github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/consense/dpoa/server.go (about)

     1  package dpoa
     2  
     3  import (
     4  	//"bytes"
     5  	"fmt"
     6  	"math"
     7  	"sync"
     8  
     9  	"github.com/sixexorg/magnetic-ring/bactor"
    10  
    11  	"github.com/sixexorg/magnetic-ring/node"
    12  	//"time"
    13  	//"encoding/json"
    14  
    15  	//"github.com/ontio/ontology-crypto/vrf"
    16  	"github.com/ontio/ontology-eventbus/actor"
    17  	"github.com/sixexorg/magnetic-ring/account"
    18  
    19  	//"github.com/sixexorg/magnetic-ring/common"
    20  	actorTypes "github.com/sixexorg/magnetic-ring/consense/actor"
    21  	"github.com/sixexorg/magnetic-ring/consense/dpoa/comm"
    22  	"github.com/sixexorg/magnetic-ring/log"
    23  
    24  	//"github.com/sixexorg/magnetic-ring/consense/vbft/config"
    25  	//"github.com/sixexorg/magnetic-ring/core/ledger"
    26  	//"github.com/sixexorg/magnetic-ring/core/types"
    27  	//"github.com/sixexorg/magnetic-ring/events"
    28  	"github.com/sixexorg/magnetic-ring/events/message"
    29  	p2pmsg "github.com/sixexorg/magnetic-ring/p2pserver/common"
    30  
    31  	//"github.com/ontio/ontology-crypto/keypair"
    32  	//"github.com/sixexorg/magnetic-ring/core/signature"
    33  	"github.com/sixexorg/magnetic-ring/core/mainchain/types"
    34  	"github.com/sixexorg/magnetic-ring/store/mainchain/storages"
    35  
    36  	//msgpack "github.com/sixexorg/magnetic-ring/p2pserver/message"
    37  	//"github.com/sixexorg/magnetic-ring/crypto"
    38  	//"encoding/hex"
    39  	p2pcomm "github.com/sixexorg/magnetic-ring/p2pserver/common"
    40  	"sync/atomic"
    41  )
    42  
    43  type BftAction struct {
    44  	Type     comm.BftActionType
    45  	BlockNum uint64
    46  	forEmpty bool
    47  }
    48  
    49  type Server struct {
    50  	//poolActor	*actorTypes.TxPoolActor
    51  	p2p *actorTypes.P2PActor
    52  	//ledger		*ledger.Ledger
    53  	ledger     *storages.LedgerStoreImp
    54  	pid        *actor.PID
    55  	account    account.NormalAccount
    56  	accountStr string
    57  	store      *BlockStore
    58  	stateMgr   *StateMgr
    59  	dpoaMgr    *DpoaMgr
    60  	peerpool   *PeerPool
    61  	trans      *TransAction
    62  	timer      *EventTimer
    63  	syncer     *Syncer
    64  	msgpool    *MsgPool
    65  	bftActionC chan *BftAction
    66  	proc       *ProcMode
    67  	//sub         *events.ActorSubscriber
    68  	rsvCount uint64
    69  
    70  	quitC       chan struct{}
    71  	stopNewHtCh chan struct{}
    72  	quitWg      sync.WaitGroup
    73  	notifyBlock *Feed
    74  	notifyState *Feed
    75  }
    76  
    77  func NewServer(account account.NormalAccount, p2p *actor.PID) (*Server, error) {
    78  	server := &Server{
    79  		//poolActor:          &actorTypes.TxPoolActor{Pool: txpool},
    80  		p2p:         &actorTypes.P2PActor{P2P: p2p},
    81  		ledger:      storages.GetLedgerStore(),
    82  		account:     account,
    83  		notifyBlock: &Feed{},
    84  		notifyState: &Feed{},
    85  		accountStr:  fmt.Sprintf("%x", account.PublicKey().Bytes()),
    86  		quitC:       make(chan struct{}, 0),
    87  	}
    88  
    89  	props := actor.FromProducer(func() actor.Actor {
    90  		return server
    91  	})
    92  
    93  	pid, err := actor.SpawnNamed(props, "consensus_dpoa")
    94  	if err != nil {
    95  		return nil, err
    96  	}
    97  	server.pid = pid
    98  	bactor.RegistActorPid(bactor.CONSENSUSACTOR, pid)
    99  	//server.sub = events.NewActorSubscriber(pid)
   100  
   101  	return server, nil
   102  }
   103  
   104  func (self *Server) LoadChainConfig(chainStore *BlockStore) error {
   105  	//self.config = &comm.ChainConfig{}
   106  	for _, n := range node.GurStars() {
   107  		if self.accountStr != n {
   108  			DftConfig.Peers = append(DftConfig.Peers, n)
   109  		}
   110  	}
   111  	DftConfig.Peers = append(DftConfig.Peers, node.CurEarth())
   112  	//if self.accountStr != "04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f" {
   113  	//	DftConfig.Peers = append(DftConfig.Peers, "04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f")
   114  	//}
   115  	//if self.accountStr != "044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6" {
   116  	//	DftConfig.Peers = append(DftConfig.Peers, "044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6")
   117  	//}
   118  	//if self.accountStr != "0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f" {
   119  	//	DftConfig.Peers = append(DftConfig.Peers, "0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f")
   120  	//}
   121  	//if self.accountStr != "04d2db562f13d94fd31d5d500152cac0bfd1692b9fc1185f2fbea712dbd34f7e6c65ce05303ee3a4ce772e0513c75e95a3f3dcc97ea45e22cfebbe3a658de4a493" {
   122  	//	DftConfig.Peers = append(DftConfig.Peers, "04d2db562f13d94fd31d5d500152cac0bfd1692b9fc1185f2fbea712dbd34f7e6c65ce05303ee3a4ce772e0513c75e95a3f3dcc97ea45e22cfebbe3a658de4a493")
   123  	//}
   124  	//if self.accountStr != "029acf7eeb3faa596ce8915a8a9b5ac00717388401ba6b0e330aa37230988ff0ef" {
   125  	//	DftConfig.Peers = append(DftConfig.Peers, "029acf7eeb3faa596ce8915a8a9b5ac00717388401ba6b0e330aa37230988ff0ef")
   126  	//}
   127  	//
   128  	//DftConfig.Peers = append(DftConfig.Peers, "04dc2c38fd4985a30f31fe9ab0e1d6ffa85d33d5c8f0a5ff38c45534e8f4ffe751055e6f1bbec984839dd772a68794722683ed12994feb84a13b8086162591418f")
   129  	//DftConfig.Peers = append(DftConfig.Peers, "044a147deabaa89e15aab6f586ce8c9b68bf11043ca83387b125d82489252d94e858f7b43a43000d7a949ff1bd8742fcad57434d08b2455bfc5f681dd0cf0a32f6")
   130  	//DftConfig.Peers = append(DftConfig.Peers, "0405269acdc54c24220f67911a3ac709b129ff2454717875b789288954bb2e6afaed4d59ff0f4c4d14afff9f6f4e1ffb71a1e5457fa2ca7440a020218559ab7f3f")
   131  	DftConfig.account = self.account
   132  	DftConfig.accountStr = self.accountStr
   133  
   134  	return nil
   135  }
   136  
   137  func (self *Server) Start() error {
   138  	return self.start()
   139  }
   140  
   141  func (self *Server) Halt() error {
   142  	return nil
   143  }
   144  
   145  func (self *Server) GetPID() *actor.PID {
   146  	return self.pid
   147  }
   148  
   149  func (self *Server) actionLoop() {
   150  	self.quitWg.Add(1)
   151  	defer self.quitWg.Done()
   152  
   153  	for {
   154  		select {
   155  		case action := <-self.bftActionC:
   156  			switch action.Type {
   157  			case comm.FastForward:
   158  			case comm.ReBroadcast:
   159  				blkNum := self.GetCurrentBlockNo()
   160  				if blkNum > action.BlockNum {
   161  					continue
   162  				}
   163  				self.trans.heartbeat()
   164  			}
   165  		case <-self.quitC:
   166  			log.Info("server %d actionLoop quit", self.accountStr)
   167  			return
   168  		}
   169  	}
   170  }
   171  
   172  func (self *Server) initialize() error {
   173  	var err error
   174  	if self.store, err = NewBlockStore(self.ledger, self.accountStr, self.p2p.P2P); err != nil {
   175  		return err
   176  	}
   177  
   178  	if err := self.LoadChainConfig(self.store); err != nil {
   179  		log.Error("failed to load config: %s", err)
   180  		return fmt.Errorf("failed to load config: %s", err)
   181  	}
   182  	log.Info("chain config loaded from local", "current blockNum:", self.store.db.GetCurrentBlockHeight()+1)
   183  
   184  	self.peerpool = NewPeerPool()
   185  	self.msgpool = newMsgPool(self.rsvCount)
   186  	self.dpoaMgr = NewdpoaMgr(DftConfig, self.store, self.msgpool, self.p2p.P2P)
   187  	self.stateMgr = NewStateMgr(self.store, self.notifyState, DftConfig)
   188  	self.trans = NewtransAction(self.msgpool, self.peerpool, self.stateMgr, self.dpoaMgr, self.p2p, DftConfig)
   189  	self.proc = NewprocMode(self.dpoaMgr, self.msgpool, self.trans, self.notifyBlock, self.notifyState, DftConfig)
   190  	self.timer = NewEventTimer()
   191  	self.syncer = newSyncer(self.stateMgr, DftConfig)
   192  	self.bftActionC = make(chan *BftAction, 8)
   193  	//self.sub.Subscribe(message.TOPIC_SAVE_BLOCK_COMPLETE)
   194  	self.peerpool.Init(DftConfig.Peers)
   195  	self.syncer.run()
   196  	self.stateMgr.run()
   197  	self.trans.start()
   198  	go self.timerLoop()
   199  	go self.actionLoop()
   200  	self.proc.Start()
   201  	self.stateMgr.StateEventC <- &StateEvent{
   202  		Type: ConfigLoaded,
   203  	}
   204  	log.Info("peer started", "PublicKey:", self.accountStr)
   205  	// TODO: start peer-conn-handlers
   206  	aa := p2pcomm.StellarNodeConnInfo{BAdd: true}
   207  
   208  	p2ppid, err := bactor.GetActorPid(bactor.P2PACTOR)
   209  
   210  	if err != nil {
   211  		log.Error("p2pactor not init", "err", err)
   212  		return err
   213  	}
   214  	p2ppid.Tell(&aa)
   215  	//self.p2p.P2P.Tell(&aa)
   216  	return nil
   217  }
   218  
   219  func (self *Server) start() error {
   220  	if err := self.initialize(); err != nil {
   221  		return fmt.Errorf("dpoa server start failed: %s", err)
   222  	}
   223  
   224  	self.timer.startPeerTicker(math.MaxUint32)
   225  	self.proc.Process()
   226  
   227  	return nil
   228  }
   229  
   230  func (self *Server) stop() error {
   231  	//self.sub.Unsubscribe(message.TOPIC_SAVE_BLOCK_COMPLETE)
   232  
   233  	close(self.quitC)
   234  	self.quitWg.Wait()
   235  
   236  	self.syncer.stop()
   237  	self.timer.stop()
   238  	self.msgpool.clean()
   239  	self.store.close()
   240  	self.peerpool.clean()
   241  
   242  	return nil
   243  }
   244  
   245  func (srv *Server) Receive(c actor.Context) {
   246  	switch msg := c.Message().(type) {
   247  	case *actor.Restarting:
   248  		log.Info("dpoa actor restarting")
   249  	case *actor.Stopping:
   250  		log.Info("dpoa actor stopping")
   251  	case *actor.Stopped:
   252  		log.Info("dpoa actor stopped")
   253  	case *actor.Started:
   254  		log.Info("dpoa actor started")
   255  	case *actor.Restart:
   256  		log.Info("dpoa actor restart")
   257  	case *actorTypes.StartConsensus:
   258  		log.Info("dpoa actor start consensus")
   259  	case *actorTypes.StopConsensus:
   260  		srv.stop()
   261  	case *message.SaveBlockCompleteMsg:
   262  		log.Info("dpoa actor receives block complete event. block height=%d, numtx=%d",
   263  			msg.Block.Header.Height, len(msg.Block.Transactions))
   264  		srv.handleBlockPersistCompleted(msg.Block)
   265  	case *p2pmsg.ConsensusPayload:
   266  		srv.NewConsensusPayload(msg)
   267  	case *types.Block:
   268  		srv.handleBlockPersistCompleted(msg)
   269  	case *p2pmsg.PeerAnn:
   270  		srv.handleAnn(msg)
   271  
   272  	default:
   273  		//log.Info("vbft actor: Unknown msg ", msg, "type", reflect.TypeOf(msg))
   274  	}
   275  }
   276  
   277  func (self *Server) handleAnn(ann *p2pcomm.PeerAnn) {
   278  	var peerid string = ""
   279  	for p, v := range self.peerpool.P2pMap {
   280  		if v == ann.PeerId {
   281  			peerid = p
   282  		}
   283  	}
   284  	if len(peerid) == 0 {
   285  		return
   286  	}
   287  
   288  	self.stateMgr.StateEventC <- &StateEvent{
   289  		Type: UpdatePeerState,
   290  		peerState: &PeerState{
   291  			peerIdx:           peerid,
   292  			connected:         true,
   293  			committedBlockNum: ann.Height,
   294  		},
   295  	}
   296  }
   297  
   298  func (self *Server) handleBlockPersistCompleted(block *types.Block) {
   299  	log.Info("1----------------|||------>>>>>>>>>>.persist block", "height", block.Header.Height, "block.Hash", block.Hash(), "timestamp", block.Header.Timestamp, "payload", len(block.Header.ConsensusPayload))
   300  	fmt.Println("1----------------|||------>>>>>>>>>>.persist block", "height", block.Header.Height, "block.Hash", block.Hash(), "timestamp", block.Header.Timestamp, "payload", len(block.Header.ConsensusPayload))
   301  	curHeight := atomic.LoadUint64(&self.store.curHeight)
   302  	if !self.store.isEarth() && curHeight < block.Header.Height{
   303  		fmt.Println("2----------------|||------>>>>>>>>>>", "curheight", atomic.LoadUint64(&self.store.curHeight), "height", block.Header.Height)
   304  		atomic.AddUint64(&self.store.curHeight,1)
   305  		if self.stateMgr.currentState == SyncReady {
   306  			self.stateMgr.currentState = Syncing
   307  			self.proc.state = Syncing
   308  			self.stateMgr.setSyncedReady()
   309  		}
   310  	}
   311  	self.store.onBlockSealed(block.Header.Height)
   312  	self.timer.onBlockSealed(uint32(block.Header.Height))
   313  	self.msgpool.onBlockSealed(block.Header.Height)
   314  	self.notifyBlock.Send(*block)
   315  	self.trans.heartbeat()
   316  
   317  	if self.store.isEarth() {
   318  		p := &p2pcomm.EarthNotifyBlk{
   319  			BlkHeight: block.Header.Height,
   320  			BlkHash:   block.Header.Hash(),
   321  		}
   322  		self.p2p.Broadcast(p)
   323  	}
   324  	pp := &p2pmsg.NotifyBlk{
   325  		BlkHeight: block.Header.Height,
   326  		BlkHash:   block.Header.Hash(),
   327  	}
   328  	self.p2p.Broadcast(pp)
   329  }
   330  
   331  func (srv *Server) NewConsensusPayload(payload *p2pmsg.ConsensusPayload) {
   332  	peerID := fmt.Sprintf("%x", payload.Owner.Bytes())
   333  	if srv.peerpool.isNewPeer(peerID) {
   334  		srv.peerpool.peerConnected(peerID)
   335  	}
   336  	p2pid, present := srv.peerpool.getP2pId(string(peerID))
   337  	if !present || p2pid != payload.PeerId {
   338  		srv.peerpool.addP2pId(peerID, payload.PeerId)
   339  	}
   340  
   341  	msg := &comm.P2pMsgPayload{
   342  		FromPeer: peerID,
   343  		Payload:  payload,
   344  	}
   345  	srv.trans.recvMsg(peerID, msg)
   346  }
   347  
   348  func (self *Server) getState() ServerState {
   349  	return self.stateMgr.getState()
   350  }
   351  
   352  func (self *Server) timerLoop() {
   353  	self.quitWg.Add(1)
   354  	defer self.quitWg.Done()
   355  
   356  	for {
   357  		select {
   358  		case evt := <-self.timer.C:
   359  			if err := self.processTimerEvent(evt); err != nil {
   360  				log.Error("failed to process timer evt: %d, err: %s", evt.evtType, err)
   361  			}
   362  
   363  		case <-self.quitC:
   364  			log.Info("server %d timerLoop quit", self.accountStr)
   365  			return
   366  		}
   367  	}
   368  }
   369  
   370  func (self *Server) processTimerEvent(evt *TimerEvent) error {
   371  	switch evt.evtType {
   372  	case EventProposalBackoff:
   373  	case EventProposeBlockTimeout:
   374  	case EventRandomBackoff:
   375  	case EventPropose2ndBlockTimeout:
   376  	case EventEndorseBlockTimeout:
   377  	case EventEndorseEmptyBlockTimeout:
   378  	case EventCommitBlockTimeout:
   379  	case EventPeerHeartbeat:
   380  		self.trans.heartbeat()
   381  	case EventTxPool:
   382  	case EventTxBlockTimeout:
   383  	}
   384  	return nil
   385  }
   386  
   387  func (srv *Server) GetCommittedBlockNo() uint64 {
   388  	return srv.store.getLatestBlockNumber()
   389  }
   390  
   391  //func (self *Server) restartSyncing() {
   392  //	self.stateMgr.checkStartSyncing(self.GetCommittedBlockNo(), true)
   393  //}
   394  
   395  func (self *Server) GetCurrentBlockNo() uint64 {
   396  	return self.store.db.GetCurrentBlockHeight() + 1
   397  }
   398  
   399  //func (self *Server) checkSyncing() {
   400  //	self.stateMgr.checkStartSyncing(self.GetCommittedBlockNo(), false)
   401  //}