github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/round_robin.go (about) 1 package initialsync 2 3 import ( 4 "bytes" 5 "context" 6 "encoding/hex" 7 "errors" 8 "fmt" 9 "time" 10 11 "github.com/libp2p/go-libp2p-core/peer" 12 "github.com/paulbellamy/ratecounter" 13 types "github.com/prysmaticlabs/eth2-types" 14 "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" 15 "github.com/prysmaticlabs/prysm/beacon-chain/core/state" 16 "github.com/prysmaticlabs/prysm/proto/interfaces" 17 "github.com/prysmaticlabs/prysm/shared/bytesutil" 18 "github.com/sirupsen/logrus" 19 ) 20 21 const ( 22 // counterSeconds is an interval over which an average rate will be calculated. 23 counterSeconds = 20 24 ) 25 26 // blockReceiverFn defines block receiving function. 27 type blockReceiverFn func(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error 28 29 // batchBlockReceiverFn defines batch receiving function. 30 type batchBlockReceiverFn func(ctx context.Context, blks []interfaces.SignedBeaconBlock, roots [][32]byte) error 31 32 // Round Robin sync looks at the latest peer statuses and syncs up to the highest known epoch. 33 // 34 // Step 1 - Sync to finalized epoch. 35 // Sync with peers having the majority on best finalized epoch greater than node's head state. 36 // 37 // Step 2 - Sync to head from finalized epoch. 38 // Using enough peers (at least, MinimumSyncPeers*2, for example) obtain best non-finalized epoch, 39 // known to majority of the peers, and keep fetching blocks, up until that epoch is reached. 40 func (s *Service) roundRobinSync(genesis time.Time) error { 41 ctx, cancel := context.WithCancel(s.ctx) 42 defer cancel() 43 state.SkipSlotCache.Disable() 44 defer state.SkipSlotCache.Enable() 45 46 s.counter = ratecounter.NewRateCounter(counterSeconds * time.Second) 47 48 // Step 1 - Sync to end of finalized epoch. 49 if err := s.syncToFinalizedEpoch(ctx, genesis); err != nil { 50 return err 51 } 52 53 // Already at head, no need for 2nd phase. 54 if s.cfg.Chain.HeadSlot() == helpers.SlotsSince(genesis) { 55 return nil 56 } 57 58 // Step 2 - sync to head from majority of peers (from no less than MinimumSyncPeers*2 peers) 59 // having the same world view on non-finalized epoch. 60 return s.syncToNonFinalizedEpoch(ctx, genesis) 61 } 62 63 // syncToFinalizedEpoch sync from head to best known finalized epoch. 64 func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) error { 65 highestFinalizedSlot, err := helpers.StartSlot(s.highestFinalizedEpoch() + 1) 66 if err != nil { 67 return err 68 } 69 if s.cfg.Chain.HeadSlot() >= highestFinalizedSlot { 70 // No need to sync, already synced to the finalized slot. 71 log.Debug("Already synced to finalized epoch") 72 return nil 73 } 74 queue := newBlocksQueue(ctx, &blocksQueueConfig{ 75 p2p: s.cfg.P2P, 76 db: s.cfg.DB, 77 chain: s.cfg.Chain, 78 highestExpectedSlot: highestFinalizedSlot, 79 mode: modeStopOnFinalizedEpoch, 80 }) 81 if err := queue.start(); err != nil { 82 return err 83 } 84 85 for data := range queue.fetchedData { 86 s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data) 87 } 88 89 log.WithFields(logrus.Fields{ 90 "syncedSlot": s.cfg.Chain.HeadSlot(), 91 "headSlot": helpers.SlotsSince(genesis), 92 }).Info("Synced to finalized epoch - now syncing blocks up to current head") 93 if err := queue.stop(); err != nil { 94 log.WithError(err).Debug("Error stopping queue") 95 } 96 97 return nil 98 } 99 100 // syncToNonFinalizedEpoch sync from head to best known non-finalized epoch supported by majority 101 // of peers (no less than MinimumSyncPeers*2 peers). 102 func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time) error { 103 queue := newBlocksQueue(ctx, &blocksQueueConfig{ 104 p2p: s.cfg.P2P, 105 db: s.cfg.DB, 106 chain: s.cfg.Chain, 107 highestExpectedSlot: helpers.SlotsSince(genesis), 108 mode: modeNonConstrained, 109 }) 110 if err := queue.start(); err != nil { 111 return err 112 } 113 for data := range queue.fetchedData { 114 s.processFetchedDataRegSync(ctx, genesis, s.cfg.Chain.HeadSlot(), data) 115 } 116 log.WithFields(logrus.Fields{ 117 "syncedSlot": s.cfg.Chain.HeadSlot(), 118 "headSlot": helpers.SlotsSince(genesis), 119 }).Info("Synced to head of chain") 120 if err := queue.stop(); err != nil { 121 log.WithError(err).Debug("Error stopping queue") 122 } 123 124 return nil 125 } 126 127 // processFetchedData processes data received from queue. 128 func (s *Service) processFetchedData( 129 ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData) { 130 defer s.updatePeerScorerStats(data.pid, startSlot) 131 132 // Use Batch Block Verify to process and verify batches directly. 133 if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch); err != nil { 134 log.WithError(err).Warn("Batch is not processed") 135 } 136 } 137 138 // processFetchedData processes data received from queue. 139 func (s *Service) processFetchedDataRegSync( 140 ctx context.Context, genesis time.Time, startSlot types.Slot, data *blocksQueueFetchedData) { 141 defer s.updatePeerScorerStats(data.pid, startSlot) 142 143 blockReceiver := s.cfg.Chain.ReceiveBlock 144 invalidBlocks := 0 145 for _, blk := range data.blocks { 146 if err := s.processBlock(ctx, genesis, blk, blockReceiver); err != nil { 147 switch { 148 case errors.Is(err, errBlockAlreadyProcessed): 149 log.WithError(err).Debug("Block is not processed") 150 invalidBlocks++ 151 case errors.Is(err, errParentDoesNotExist): 152 log.WithError(err).Debug("Block is not processed") 153 invalidBlocks++ 154 default: 155 log.WithError(err).Warn("Block is not processed") 156 } 157 continue 158 } 159 } 160 // Add more visible logging if all blocks cannot be processed. 161 if len(data.blocks) == invalidBlocks { 162 log.WithField("error", "Range had no valid blocks to process").Warn("Range is not processed") 163 } 164 } 165 166 // highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers. 167 // Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us. 168 func (s *Service) highestFinalizedEpoch() types.Epoch { 169 highest := types.Epoch(0) 170 for _, pid := range s.cfg.P2P.Peers().Connected() { 171 peerChainState, err := s.cfg.P2P.Peers().ChainState(pid) 172 if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest { 173 highest = peerChainState.FinalizedEpoch 174 } 175 } 176 177 return highest 178 } 179 180 // logSyncStatus and increment block processing counter. 181 func (s *Service) logSyncStatus(genesis time.Time, blk interfaces.BeaconBlock, blkRoot [32]byte) { 182 s.counter.Incr(1) 183 rate := float64(s.counter.Rate()) / counterSeconds 184 if rate == 0 { 185 rate = 1 186 } 187 if helpers.IsEpochStart(blk.Slot()) { 188 timeRemaining := time.Duration(float64(helpers.SlotsSince(genesis)-blk.Slot())/rate) * time.Second 189 log.WithFields(logrus.Fields{ 190 "peers": len(s.cfg.P2P.Peers().Connected()), 191 "blocksPerSecond": fmt.Sprintf("%.1f", rate), 192 }).Infof( 193 "Processing block %s %d/%d - estimated time remaining %s", 194 fmt.Sprintf("0x%s...", hex.EncodeToString(blkRoot[:])[:8]), 195 blk.Slot(), helpers.SlotsSince(genesis), timeRemaining, 196 ) 197 } 198 } 199 200 // logBatchSyncStatus and increments the block processing counter. 201 func (s *Service) logBatchSyncStatus(genesis time.Time, blks []interfaces.SignedBeaconBlock, blkRoot [32]byte) { 202 s.counter.Incr(int64(len(blks))) 203 rate := float64(s.counter.Rate()) / counterSeconds 204 if rate == 0 { 205 rate = 1 206 } 207 firstBlk := blks[0] 208 timeRemaining := time.Duration(float64(helpers.SlotsSince(genesis)-firstBlk.Block().Slot())/rate) * time.Second 209 log.WithFields(logrus.Fields{ 210 "peers": len(s.cfg.P2P.Peers().Connected()), 211 "blocksPerSecond": fmt.Sprintf("%.1f", rate), 212 }).Infof( 213 "Processing block batch of size %d starting from %s %d/%d - estimated time remaining %s", 214 len(blks), fmt.Sprintf("0x%s...", hex.EncodeToString(blkRoot[:])[:8]), 215 firstBlk.Block().Slot(), helpers.SlotsSince(genesis), timeRemaining, 216 ) 217 } 218 219 // processBlock performs basic checks on incoming block, and triggers receiver function. 220 func (s *Service) processBlock( 221 ctx context.Context, 222 genesis time.Time, 223 blk interfaces.SignedBeaconBlock, 224 blockReceiver blockReceiverFn, 225 ) error { 226 blkRoot, err := blk.Block().HashTreeRoot() 227 if err != nil { 228 return err 229 } 230 if s.isProcessedBlock(ctx, blk, blkRoot) { 231 return fmt.Errorf("slot: %d , root %#x: %w", blk.Block().Slot(), blkRoot, errBlockAlreadyProcessed) 232 } 233 234 s.logSyncStatus(genesis, blk.Block(), blkRoot) 235 parentRoot := bytesutil.ToBytes32(blk.Block().ParentRoot()) 236 if !s.cfg.DB.HasBlock(ctx, parentRoot) && !s.cfg.Chain.HasInitSyncBlock(parentRoot) { 237 return fmt.Errorf("%w: %#x", errParentDoesNotExist, blk.Block().ParentRoot()) 238 } 239 return blockReceiver(ctx, blk, blkRoot) 240 } 241 242 func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time, 243 blks []interfaces.SignedBeaconBlock, bFunc batchBlockReceiverFn) error { 244 if len(blks) == 0 { 245 return errors.New("0 blocks provided into method") 246 } 247 firstBlock := blks[0] 248 blkRoot, err := firstBlock.Block().HashTreeRoot() 249 if err != nil { 250 return err 251 } 252 headSlot := s.cfg.Chain.HeadSlot() 253 for headSlot >= firstBlock.Block().Slot() && s.isProcessedBlock(ctx, firstBlock, blkRoot) { 254 if len(blks) == 1 { 255 return errors.New("no good blocks in batch") 256 } 257 blks = blks[1:] 258 firstBlock = blks[0] 259 blkRoot, err = firstBlock.Block().HashTreeRoot() 260 if err != nil { 261 return err 262 } 263 } 264 s.logBatchSyncStatus(genesis, blks, blkRoot) 265 parentRoot := bytesutil.ToBytes32(firstBlock.Block().ParentRoot()) 266 if !s.cfg.DB.HasBlock(ctx, parentRoot) && !s.cfg.Chain.HasInitSyncBlock(parentRoot) { 267 return fmt.Errorf("%w: %#x", errParentDoesNotExist, firstBlock.Block().ParentRoot()) 268 } 269 blockRoots := make([][32]byte, len(blks)) 270 blockRoots[0] = blkRoot 271 for i := 1; i < len(blks); i++ { 272 b := blks[i] 273 if !bytes.Equal(b.Block().ParentRoot(), blockRoots[i-1][:]) { 274 return fmt.Errorf("expected linear block list with parent root of %#x but received %#x", 275 blockRoots[i-1][:], b.Block().ParentRoot()) 276 } 277 blkRoot, err := b.Block().HashTreeRoot() 278 if err != nil { 279 return err 280 } 281 blockRoots[i] = blkRoot 282 } 283 return bFunc(ctx, blks, blockRoots) 284 } 285 286 // updatePeerScorerStats adjusts monitored metrics for a peer. 287 func (s *Service) updatePeerScorerStats(pid peer.ID, startSlot types.Slot) { 288 if pid == "" { 289 return 290 } 291 headSlot := s.cfg.Chain.HeadSlot() 292 if startSlot >= headSlot { 293 return 294 } 295 if diff := s.cfg.Chain.HeadSlot() - startSlot; diff > 0 { 296 scorer := s.cfg.P2P.Peers().Scorers().BlockProviderScorer() 297 scorer.IncrementProcessedBlocks(pid, uint64(diff)) 298 } 299 } 300 301 // isProcessedBlock checks DB and local cache for presence of a given block, to avoid duplicates. 302 func (s *Service) isProcessedBlock(ctx context.Context, blk interfaces.SignedBeaconBlock, blkRoot [32]byte) bool { 303 finalizedSlot, err := helpers.StartSlot(s.cfg.Chain.FinalizedCheckpt().Epoch) 304 if err != nil { 305 return false 306 } 307 // If block is before our finalized checkpoint 308 // we do not process it. 309 if blk.Block().Slot() <= finalizedSlot { 310 return true 311 } 312 blockExistsInDB := s.cfg.DB.HasBlock(ctx, blkRoot) || s.cfg.Chain.HasInitSyncBlock(blkRoot) 313 // If block exists in our db and is before or equal to our current head 314 // we ignore it. 315 if blockExistsInDB && s.cfg.Chain.HeadSlot() >= blk.Block().Slot() { 316 return true 317 } 318 return false 319 }