github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/consense/dpoa/chainsync.go (about)

     1  package dpoa
     2  
     3  import (
     4  	"sync"
     5  
     6  	"github.com/sixexorg/magnetic-ring/log"
     7  	"github.com/sixexorg/magnetic-ring/consense/dpoa/comm"
     8  )
     9  
    10  type SyncCheckReq struct {
    11  	msg      comm.ConsensusMsg
    12  	peerIdx  uint64
    13  	blockNum uint64
    14  }
    15  
    16  type BlockSyncReq struct {
    17  	targetPeers    []string
    18  	startBlockNum  uint64
    19  	targetBlockNum uint64 // targetBlockNum == 0, as one cancel syncing request
    20  }
    21  
    22  type PeerSyncer struct {
    23  	lock          sync.Mutex
    24  	peerIdx       string
    25  	nextReqBlkNum uint64
    26  	targetBlkNum  uint64
    27  	active        bool
    28  
    29  	//server *Server
    30  	stat   *StateMgr
    31  	msgC   chan comm.ConsensusMsg
    32  }
    33  
    34  type SyncMsg struct {
    35  	fromPeer string
    36  	msg      comm.ConsensusMsg
    37  }
    38  
    39  type BlockMsgFromPeer struct {
    40  	fromPeer string
    41  	block    *comm.Block
    42  }
    43  
    44  type BlockFromPeers map[string]*comm.Block // index by peerId
    45  
    46  type Syncer struct {
    47  	lock   sync.Mutex
    48  	//server *Server
    49  	stat   *StateMgr
    50  	cfg    *Config
    51  
    52  	maxRequestPerPeer int
    53  	nextReqBlkNum     uint64
    54  	targetBlkNum      uint64
    55  
    56  	syncCheckReqC  chan *SyncCheckReq
    57  	blockSyncReqC  chan *BlockSyncReq
    58  	syncMsgC       chan *SyncMsg // receive syncmsg from server
    59  	//blockFromPeerC chan *BlockMsgFromPeer
    60  
    61  	peers         map[string]*PeerSyncer
    62  	pendingBlocks map[uint64]BlockFromPeers // index by blockNum
    63  	quitC         chan struct{}
    64  }
    65  
    66  func newSyncer(stat *StateMgr, cfg *Config) *Syncer {
    67  	return &Syncer{
    68  		stat:              stat,
    69  		cfg:               cfg,
    70  		maxRequestPerPeer: 4,
    71  		nextReqBlkNum:     1,
    72  		syncCheckReqC:     make(chan *SyncCheckReq, 4),
    73  		blockSyncReqC:     make(chan *BlockSyncReq, 16),
    74  		syncMsgC:          make(chan *SyncMsg, 256),
    75  		//blockFromPeerC:    make(chan *BlockMsgFromPeer, 64),
    76  		peers:             make(map[string]*PeerSyncer),
    77  		pendingBlocks:     make(map[uint64]BlockFromPeers),
    78  	}
    79  }
    80  
    81  func (self *Syncer) stop() {
    82  	self.lock.Lock()
    83  	defer self.lock.Unlock()
    84  
    85  	close(self.syncCheckReqC)
    86  	close(self.blockSyncReqC)
    87  	close(self.syncMsgC)
    88  	//close(self.blockFromPeerC)
    89  
    90  	self.peers = make(map[string]*PeerSyncer)
    91  	self.pendingBlocks = make(map[uint64]BlockFromPeers)
    92  }
    93  
    94  func (self *Syncer) run() {
    95  	//self.server.quitWg.Add(1)
    96  	//defer self.server.quitWg.Done()
    97  	go func() {
    98  		for {
    99  			select {
   100  			case <-self.syncCheckReqC:
   101  			case req := <-self.blockSyncReqC:
   102  				if req.targetBlockNum == 0 {
   103  					// cancel fetcher for peer
   104  					for _, id := range req.targetPeers {
   105  						self.cancelFetcherForPeer(self.peers[id])
   106  					}
   107  					continue
   108  				}
   109  
   110  				log.Info("server %v, got sync req(%d, %d) to %v",
   111  					self.cfg.accountStr, req.startBlockNum, req.targetBlockNum, req.targetPeers)
   112  				if req.startBlockNum <= self.stat.store.getLatestBlockNumber() {
   113  					req.startBlockNum = self.stat.store.getLatestBlockNumber() + 1
   114  					log.Info("server %v, sync req start change to %d",
   115  						self.cfg.accountStr, req.startBlockNum)
   116  					if req.startBlockNum > req.targetBlockNum {
   117  						continue
   118  					}
   119  				}
   120  				if err := self.onNewBlockSyncReq(req); err != nil {
   121  					log.Error("server %d failed to handle new block sync req: %s", self.cfg.accountStr, err)
   122  				}
   123  
   124  			case syncMsg := <-self.syncMsgC:
   125  				if p, present := self.peers[syncMsg.fromPeer]; present {
   126  					if p.active {
   127  						p.msgC <- syncMsg.msg
   128  					} else {
   129  						// report err
   130  						p.msgC <- nil
   131  					}
   132  				} else {
   133  					// report error
   134  				}
   135  
   136  			case <-self.quitC:
   137  				log.Info("server %v, syncer quit", self.cfg.accountStr)
   138  				return
   139  			}
   140  		}
   141  	}()
   142  }
   143  
   144  func (self *Syncer) isActive() bool {
   145  	return self.nextReqBlkNum <= self.targetBlkNum
   146  }
   147  
   148  func (self *Syncer) startPeerSyncer(syncer *PeerSyncer, targetBlkNum uint64) error {
   149  
   150  	syncer.lock.Lock()
   151  	defer syncer.lock.Unlock()
   152  
   153  	if targetBlkNum > syncer.targetBlkNum {
   154  		syncer.targetBlkNum = targetBlkNum
   155  	}
   156  	if syncer.targetBlkNum >= syncer.nextReqBlkNum && !syncer.active {
   157  		syncer.active = true
   158  		go func() {
   159  			syncer.run()
   160  		}()
   161  	}
   162  
   163  	return nil
   164  }
   165  
   166  func (self *Syncer) cancelFetcherForPeer(peer *PeerSyncer) error {
   167  	if peer == nil {
   168  		return nil
   169  	}
   170  
   171  	peer.lock.Lock()
   172  	defer peer.lock.Unlock()
   173  
   174  	// TODO
   175  
   176  	return nil
   177  }
   178  
   179  func (self *Syncer) onNewBlockSyncReq(req *BlockSyncReq) error {
   180  	if req.startBlockNum < self.nextReqBlkNum {
   181  		log.Error("server %d new blockSyncReq startblkNum %d vs %d",
   182  			self.cfg.accountStr, req.startBlockNum, self.nextReqBlkNum)
   183  	}
   184  	if req.targetBlockNum <= self.targetBlkNum {
   185  		return nil
   186  	}
   187  	if self.nextReqBlkNum == 1 {
   188  		self.nextReqBlkNum = req.startBlockNum
   189  	}
   190  	self.targetBlkNum = req.targetBlockNum
   191  	peers := req.targetPeers
   192  	if len(peers) == 0 {
   193  		for p := range self.peers {
   194  			peers = append(peers, p)
   195  		}
   196  	}
   197  
   198  	for _, peerIdx := range req.targetPeers {
   199  		if p, present := self.peers[peerIdx]; !present || !p.active {
   200  			nextBlkNum := self.nextReqBlkNum
   201  			if p != nil && p.nextReqBlkNum > nextBlkNum {
   202  				log.Info("server %v, syncer with peer %d start from %d, vs %d",
   203  					self.cfg.accountStr, peerIdx, p.nextReqBlkNum, self.nextReqBlkNum)
   204  				nextBlkNum = p.nextReqBlkNum
   205  			}
   206  			self.peers[peerIdx] = &PeerSyncer{
   207  				peerIdx:       peerIdx,
   208  				nextReqBlkNum: nextBlkNum,
   209  				targetBlkNum:  self.targetBlkNum,
   210  				active:        false,
   211  				//server:        self.server,
   212  				stat:          self.stat,
   213  				msgC:          make(chan comm.ConsensusMsg, 4),
   214  			}
   215  		}
   216  		p := self.peers[peerIdx]
   217  		self.startPeerSyncer(p, self.targetBlkNum)
   218  	}
   219  
   220  	return nil
   221  }
   222  
   223  func (self *PeerSyncer) run() {
   224  	return
   225  }
   226  
   227  func (self *PeerSyncer) stop(force bool) bool {
   228  	self.lock.Lock()
   229  	defer self.lock.Unlock()
   230  	if force || self.nextReqBlkNum > self.targetBlkNum {
   231  		self.active = false
   232  		return true
   233  	}
   234  
   235  	return false
   236  }