github.com/amazechain/amc@v0.1.3/internal/sync/initial-sync/round_robin.go (about) 1 package initialsync 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "github.com/amazechain/amc/api/protocol/types_pb" 8 block2 "github.com/amazechain/amc/common/block" 9 "github.com/amazechain/amc/utils" 10 "github.com/holiman/uint256" 11 "time" 12 13 "github.com/libp2p/go-libp2p/core/peer" 14 "github.com/paulbellamy/ratecounter" 15 ) 16 17 const ( 18 // counterSeconds is an interval over which an average rate will be calculated. 19 counterSeconds = 20 20 ) 21 22 // batchBlockReceiverFn defines batch receiving function. 23 type batchBlockReceiverFn func(chain []block2.IBlock) (int, error) 24 25 // Round Robin sync looks at the latest peer statuses and syncs up to the highest known epoch. 26 // 27 // Step 1 - Sync to finalized epoch. 28 // Sync with peers having the majority on best finalized epoch greater than node's head state. 29 func (s *Service) roundRobinSync(highestExpectedBlockNr *uint256.Int) error { 30 ctx, cancel := context.WithCancel(s.ctx) 31 defer cancel() 32 33 s.counter = ratecounter.NewRateCounter(counterSeconds * time.Second) 34 s.highestExpectedBlockNr = highestExpectedBlockNr.Clone() 35 // Step 1 - Sync to end of finalized BlockNr. 36 if err := s.syncToFinalizedBlockNr(ctx, highestExpectedBlockNr); err != nil { 37 return err 38 } 39 return nil 40 } 41 42 // syncToFinalizedBlockNr sync from head to best known finalized epoch. 43 func (s *Service) syncToFinalizedBlockNr(ctx context.Context, highestExpectedBlockNr *uint256.Int) error { 44 45 if s.cfg.Chain.CurrentBlock().Number64().Cmp(highestExpectedBlockNr) >= 0 { 46 // No need to sync, already synced to the finalized slot. 47 log.Debug("Already synced to finalized block number") 48 return nil 49 } 50 queue := newBlocksQueue(ctx, &blocksQueueConfig{ 51 p2p: s.cfg.P2P, 52 chain: s.cfg.Chain, 53 highestExpectedBlockNr: highestExpectedBlockNr, 54 mode: modeStopOnFinalizedEpoch, 55 }) 56 if err := queue.start(); err != nil { 57 return err 58 } 59 60 for data := range queue.fetchedData { 61 s.processFetchedData(ctx, s.cfg.Chain.CurrentBlock().Number64(), data) 62 } 63 64 log.Info("Synced to finalized block number - now syncing blocks up to current head", "syncedBlockNr", s.cfg.Chain.CurrentBlock().Number64().Uint64(), "highestExpectedBlockNr", highestExpectedBlockNr.Uint64()) 65 if err := queue.stop(); err != nil { 66 log.Debug("Error stopping queue", "err", err) 67 } 68 69 return nil 70 } 71 72 // processFetchedData processes data received from queue. 73 func (s *Service) processFetchedData(ctx context.Context, startBlockNr *uint256.Int, data *blocksQueueFetchedData) { 74 defer s.updatePeerScorerStats(data.pid, startBlockNr) 75 76 // Use Batch Block Verify to process and verify batches directly. 77 if _, err := s.processBatchedBlocks(ctx, data.blocks, s.cfg.Chain.InsertChain); err != nil { 78 log.Warn("Skip processing batched blocks", "err", err) 79 } 80 } 81 82 func (s *Service) processBatchedBlocks(ctx context.Context, blks []*types_pb.Block, bFunc batchBlockReceiverFn) (int, error) { 83 if len(blks) == 0 { 84 return 0, errors.New("0 blocks provided into method") 85 } 86 87 blocks := make([]block2.IBlock, 0, len(blks)) 88 for _, blk := range blks { 89 block := new(block2.Block) 90 if err := block.FromProtoMessage(blk); err != nil { 91 return 0, err 92 } 93 blocks = append(blocks, block) 94 } 95 96 firstBlock := blocks[0] 97 for s.cfg.Chain.CurrentBlock().Number64().Uint64() >= firstBlock.Number64().Uint64() { 98 if len(blocks) == 1 { 99 return 0, fmt.Errorf("ourCurrentBlockNumber:%d, blockNumber:%d , root %s:%w", s.cfg.Chain.CurrentBlock().Number64().Uint64(), firstBlock.Number64().Uint64(), firstBlock.Hash(), errBlockAlreadyProcessed) 100 } 101 blocks = blocks[1:] 102 firstBlock = blocks[0] 103 } 104 105 if !s.cfg.Chain.HasBlock(firstBlock.ParentHash(), firstBlock.Number64().Uint64()-1) { 106 return 0, fmt.Errorf("%w: %s (in processBatchedBlocks, Number=%d)", errParentDoesNotExist, firstBlock.ParentHash(), firstBlock.Number64().Uint64()) 107 } 108 109 s.logBatchSyncStatus(blks) 110 111 return bFunc(blocks) 112 } 113 114 // updatePeerScorerStats adjusts monitored metrics for a peer. 115 func (s *Service) updatePeerScorerStats(pid peer.ID, startBlockNr *uint256.Int) { 116 if pid == "" { 117 return 118 } 119 headBlockNr := s.cfg.Chain.CurrentBlock().Number64() 120 if startBlockNr.Uint64() >= headBlockNr.Uint64() { 121 return 122 } 123 if diff := headBlockNr.Uint64() - startBlockNr.Uint64(); diff > 0 { 124 scorer := s.cfg.P2P.Peers().Scorers().BlockProviderScorer() 125 scorer.IncrementProcessedBlocks(pid, diff) 126 } 127 } 128 129 // logBatchSyncStatus and increments the block processing counter. 130 func (s *Service) logBatchSyncStatus(blks []*types_pb.Block) { 131 s.counter.Incr(int64(len(blks))) 132 rate := float64(s.counter.Rate()) / counterSeconds 133 if rate == 0 { 134 rate = 1 135 } 136 targetNumber, _ := s.cfg.P2P.Peers().BestPeers(1, s.cfg.Chain.CurrentBlock().Number64()) 137 firstBlock := blks[0] 138 firstBlockNumber := utils.ConvertH256ToUint256Int(firstBlock.Header.Number) 139 log.Info( 140 fmt.Sprintf("Processing block batch of size %d starting from %d - estimated block remaining %d", 141 len(blks), 142 firstBlockNumber.Uint64(), 143 new(uint256.Int).Sub(targetNumber, firstBlockNumber).Uint64(), 144 ), 145 "peers", len(s.cfg.P2P.Peers().Connected()), 146 "blocksPerSecond", fmt.Sprintf("%.1f", rate), 147 "highestExpectedBlockNr", s.highestExpectedBlockNr.Uint64(), 148 ) 149 }