github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/blockchain/process_block.go (about) 1 package blockchain 2 3 import ( 4 "context" 5 "fmt" 6 "time" 7 8 "github.com/pkg/errors" 9 "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" 10 statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" 11 "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" 12 "github.com/prysmaticlabs/prysm/beacon-chain/core/state" 13 iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface" 14 pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" 15 ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1" 16 ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1" 17 "github.com/prysmaticlabs/prysm/proto/interfaces" 18 "github.com/prysmaticlabs/prysm/shared/attestationutil" 19 "github.com/prysmaticlabs/prysm/shared/bls" 20 "github.com/prysmaticlabs/prysm/shared/bytesutil" 21 "github.com/prysmaticlabs/prysm/shared/featureconfig" 22 "github.com/prysmaticlabs/prysm/shared/params" 23 "go.opencensus.io/trace" 24 ) 25 26 // A custom slot deadline for processing state slots in our cache. 27 const slotDeadline = 5 * time.Second 28 29 // A custom deadline for deposit trie insertion. 30 const depositDeadline = 20 * time.Second 31 32 // This defines size of the upper bound for initial sync block cache. 33 var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch) 34 35 // onBlock is called when a gossip block is received. It runs regular state transition on the block. 36 // The block's signing root should be computed before calling this method to avoid redundant 37 // computation in this method and methods it calls into. 38 // 39 // Spec pseudocode definition: 40 // def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: 41 // block = signed_block.message 42 // # Parent block must be known 43 // assert block.parent_root in store.block_states 44 // # Make a copy of the state to avoid mutability issues 45 // pre_state = copy(store.block_states[block.parent_root]) 46 // # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. 47 // assert get_current_slot(store) >= block.slot 48 // 49 // # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) 50 // finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) 51 // assert block.slot > finalized_slot 52 // # Check block is a descendant of the finalized block at the checkpoint finalized slot 53 // assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root 54 // 55 // # Check the block is valid and compute the post-state 56 // state = pre_state.copy() 57 // state_transition(state, signed_block, True) 58 // # Add new block to the store 59 // store.blocks[hash_tree_root(block)] = block 60 // # Add new state for this block to the store 61 // store.block_states[hash_tree_root(block)] = state 62 // 63 // # Update justified checkpoint 64 // if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: 65 // if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: 66 // store.best_justified_checkpoint = state.current_justified_checkpoint 67 // if should_update_justified_checkpoint(store, state.current_justified_checkpoint): 68 // store.justified_checkpoint = state.current_justified_checkpoint 69 // 70 // # Update finalized checkpoint 71 // if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: 72 // store.finalized_checkpoint = state.finalized_checkpoint 73 // 74 // # Potentially update justified if different from store 75 // if store.justified_checkpoint != state.current_justified_checkpoint: 76 // # Update justified if new justified is later than store justified 77 // if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: 78 // store.justified_checkpoint = state.current_justified_checkpoint 79 // return 80 // 81 // # Update justified if store justified is not in chain with finalized checkpoint 82 // finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) 83 // ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot) 84 // if ancestor_at_finalized_slot != store.finalized_checkpoint.root: 85 // store.justified_checkpoint = state.current_justified_checkpoint 86 func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error { 87 ctx, span := trace.StartSpan(ctx, "blockChain.onBlock") 88 defer span.End() 89 90 if signed == nil || signed.IsNil() || signed.Block().IsNil() { 91 return errors.New("nil block") 92 } 93 b := signed.Block() 94 95 preState, err := s.getBlockPreState(ctx, b) 96 if err != nil { 97 return err 98 } 99 100 postState, err := state.ExecuteStateTransition(ctx, preState, signed) 101 if err != nil { 102 return err 103 } 104 105 if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil { 106 return err 107 } 108 109 // Updating next slot state cache can happen in the background. It shouldn't block rest of the process. 110 if featureconfig.Get().EnableNextSlotStateCache { 111 go func() { 112 // Use a custom deadline here, since this method runs asynchronously. 113 // We ignore the parent method's context and instead create a new one 114 // with a custom deadline, therefore using the background context instead. 115 slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline) 116 defer cancel() 117 if err := state.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil { 118 log.WithError(err).Debug("could not update next slot state cache") 119 } 120 }() 121 } 122 123 // Update justified check point. 124 if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch { 125 if err := s.updateJustified(ctx, postState); err != nil { 126 return err 127 } 128 } 129 130 newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch 131 if featureconfig.Get().UpdateHeadTimely { 132 if newFinalized { 133 if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil { 134 return errors.Wrap(err, "could not save new justified") 135 } 136 s.prevFinalizedCheckpt = s.finalizedCheckpt 137 s.finalizedCheckpt = postState.FinalizedCheckpoint() 138 } 139 140 if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil { 141 log.WithError(err).Warn("Could not update head") 142 } 143 144 if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil { 145 return err 146 } 147 148 // Send notification of the processed block to the state feed. 149 s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ 150 Type: statefeed.BlockProcessed, 151 Data: &statefeed.BlockProcessedData{ 152 Slot: signed.Block().Slot(), 153 BlockRoot: blockRoot, 154 SignedBlock: signed, 155 Verified: true, 156 }, 157 }) 158 } 159 160 // Update finalized check point. 161 if newFinalized { 162 if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil { 163 return err 164 } 165 fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root) 166 if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil { 167 return errors.Wrap(err, "could not prune proto array fork choice nodes") 168 } 169 if !featureconfig.Get().UpdateHeadTimely { 170 if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil { 171 return errors.Wrap(err, "could not save new justified") 172 } 173 } 174 go func() { 175 // Send an event regarding the new finalized checkpoint over a common event feed. 176 s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ 177 Type: statefeed.FinalizedCheckpoint, 178 Data: ðpbv1.EventFinalizedCheckpoint{ 179 Epoch: postState.FinalizedCheckpoint().Epoch, 180 Block: postState.FinalizedCheckpoint().Root, 181 State: signed.Block().StateRoot(), 182 }, 183 }) 184 185 // Use a custom deadline here, since this method runs asynchronously. 186 // We ignore the parent method's context and instead create a new one 187 // with a custom deadline, therefore using the background context instead. 188 depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline) 189 defer cancel() 190 if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil { 191 log.WithError(err).Error("Could not insert finalized deposits.") 192 } 193 }() 194 195 } 196 197 defer reportAttestationInclusion(b) 198 199 return s.handleEpochBoundary(ctx, postState) 200 } 201 202 func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock, 203 blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) { 204 ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch") 205 defer span.End() 206 207 if len(blks) == 0 || len(blockRoots) == 0 { 208 return nil, nil, errors.New("no blocks provided") 209 } 210 if blks[0] == nil || blks[0].IsNil() || blks[0].Block().IsNil() { 211 return nil, nil, errors.New("nil block") 212 } 213 b := blks[0].Block() 214 215 // Retrieve incoming block's pre state. 216 if err := s.verifyBlkPreState(ctx, b); err != nil { 217 return nil, nil, err 218 } 219 preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot())) 220 if err != nil { 221 return nil, nil, err 222 } 223 if preState == nil || preState.IsNil() { 224 return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot()) 225 } 226 227 jCheckpoints := make([]*ethpb.Checkpoint, len(blks)) 228 fCheckpoints := make([]*ethpb.Checkpoint, len(blks)) 229 sigSet := &bls.SignatureSet{ 230 Signatures: [][]byte{}, 231 PublicKeys: []bls.PublicKey{}, 232 Messages: [][32]byte{}, 233 } 234 var set *bls.SignatureSet 235 boundaries := make(map[[32]byte]iface.BeaconState) 236 for i, b := range blks { 237 set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b) 238 if err != nil { 239 return nil, nil, err 240 } 241 // Save potential boundary states. 242 if helpers.IsEpochStart(preState.Slot()) { 243 boundaries[blockRoots[i]] = preState.Copy() 244 if err := s.handleEpochBoundary(ctx, preState); err != nil { 245 return nil, nil, errors.Wrap(err, "could not handle epoch boundary state") 246 } 247 } 248 jCheckpoints[i] = preState.CurrentJustifiedCheckpoint() 249 fCheckpoints[i] = preState.FinalizedCheckpoint() 250 sigSet.Join(set) 251 } 252 verify, err := sigSet.Verify() 253 if err != nil { 254 return nil, nil, err 255 } 256 if !verify { 257 return nil, nil, errors.New("batch block signature verification failed") 258 } 259 for r, st := range boundaries { 260 if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil { 261 return nil, nil, err 262 } 263 } 264 // Also saves the last post state which to be used as pre state for the next batch. 265 lastB := blks[len(blks)-1] 266 lastBR := blockRoots[len(blockRoots)-1] 267 if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil { 268 return nil, nil, err 269 } 270 if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil { 271 return nil, nil, err 272 } 273 return fCheckpoints, jCheckpoints, nil 274 } 275 276 // handles a block after the block's batch has been verified, where we can save blocks 277 // their state summaries and split them off to relative hot/cold storage. 278 func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interfaces.SignedBeaconBlock, 279 blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error { 280 b := signed.Block() 281 282 s.saveInitSyncBlock(blockRoot, signed) 283 if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil { 284 return err 285 } 286 if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &pb.StateSummary{ 287 Slot: signed.Block().Slot(), 288 Root: blockRoot[:], 289 }); err != nil { 290 return err 291 } 292 293 // Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory. 294 if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize { 295 if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil { 296 return err 297 } 298 s.clearInitSyncBlocks() 299 } 300 301 if jCheckpoint.Epoch > s.justifiedCheckpt.Epoch { 302 if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil { 303 return err 304 } 305 } 306 307 // Update finalized check point. Prune the block cache and helper caches on every new finalized epoch. 308 if fCheckpoint.Epoch > s.finalizedCheckpt.Epoch { 309 if err := s.updateFinalized(ctx, fCheckpoint); err != nil { 310 return err 311 } 312 if featureconfig.Get().UpdateHeadTimely { 313 s.prevFinalizedCheckpt = s.finalizedCheckpt 314 s.finalizedCheckpt = fCheckpoint 315 } 316 } 317 return nil 318 } 319 320 // Epoch boundary bookkeeping such as logging epoch summaries. 321 func (s *Service) handleEpochBoundary(ctx context.Context, postState iface.BeaconState) error { 322 if postState.Slot()+1 == s.nextEpochBoundarySlot { 323 // Update caches for the next epoch at epoch boundary slot - 1. 324 if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil { 325 return err 326 } 327 copied := postState.Copy() 328 copied, err := state.ProcessSlots(ctx, copied, copied.Slot()+1) 329 if err != nil { 330 return err 331 } 332 if err := helpers.UpdateProposerIndicesInCache(copied); err != nil { 333 return err 334 } 335 } else if postState.Slot() >= s.nextEpochBoundarySlot { 336 if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil { 337 return err 338 } 339 var err error 340 s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState)) 341 if err != nil { 342 return err 343 } 344 345 // Update caches at epoch boundary slot. 346 // The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1. 347 if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil { 348 return err 349 } 350 if err := helpers.UpdateProposerIndicesInCache(postState); err != nil { 351 return err 352 } 353 } 354 355 return nil 356 } 357 358 // This feeds in the block and block's attestations to fork choice store. It's allows fork choice store 359 // to gain information on the most current chain. 360 func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, 361 st iface.BeaconState) error { 362 fCheckpoint := st.FinalizedCheckpoint() 363 jCheckpoint := st.CurrentJustifiedCheckpoint() 364 if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil { 365 return err 366 } 367 // Feed in block's attestations to fork choice store. 368 for _, a := range blk.Body().Attestations() { 369 committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex) 370 if err != nil { 371 return err 372 } 373 indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee) 374 if err != nil { 375 return err 376 } 377 s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch) 378 } 379 return nil 380 } 381 382 func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, 383 root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error { 384 if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil { 385 return err 386 } 387 // Feed in block to fork choice store. 388 if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx, 389 blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), bytesutil.ToBytes32(blk.Body().Graffiti()), 390 jCheckpoint.Epoch, 391 fCheckpoint.Epoch); err != nil { 392 return errors.Wrap(err, "could not process block for proto array fork choice") 393 } 394 return nil 395 } 396 397 // This saves post state info to DB or cache. This also saves post state info to fork choice store. 398 // Post state info consists of processed block and state. Do not call this method unless the block and state are verified. 399 func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st iface.BeaconState, initSync bool) error { 400 ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo") 401 defer span.End() 402 if initSync { 403 s.saveInitSyncBlock(r, b) 404 } else if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil { 405 return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot()) 406 } 407 if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil { 408 return errors.Wrap(err, "could not save state") 409 } 410 if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil { 411 return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot()) 412 } 413 return nil 414 } 415 416 // This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical, 417 // meaning the block `b` is part of the canonical chain. 418 func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error { 419 if !featureconfig.Get().CorrectlyPruneCanonicalAtts { 420 return nil 421 } 422 423 canonical, err := s.IsCanonical(ctx, r) 424 if err != nil { 425 return err 426 } 427 if !canonical { 428 return nil 429 } 430 431 atts := b.Block().Body().Attestations() 432 for _, att := range atts { 433 if helpers.IsAggregated(att) { 434 if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil { 435 return err 436 } 437 } else { 438 if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil { 439 return err 440 } 441 } 442 } 443 return nil 444 }