github.com/electroneum/electroneum-sc@v0.0.0-20230105223411-3bc1d078281e/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 "sort" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/electroneum/electroneum-sc/common" 31 "github.com/electroneum/electroneum-sc/common/mclock" 32 "github.com/electroneum/electroneum-sc/common/prque" 33 "github.com/electroneum/electroneum-sc/consensus" 34 "github.com/electroneum/electroneum-sc/core/rawdb" 35 "github.com/electroneum/electroneum-sc/core/state" 36 "github.com/electroneum/electroneum-sc/core/state/snapshot" 37 "github.com/electroneum/electroneum-sc/core/types" 38 "github.com/electroneum/electroneum-sc/core/vm" 39 "github.com/electroneum/electroneum-sc/ethdb" 40 "github.com/electroneum/electroneum-sc/event" 41 "github.com/electroneum/electroneum-sc/internal/syncx" 42 "github.com/electroneum/electroneum-sc/log" 43 "github.com/electroneum/electroneum-sc/metrics" 44 "github.com/electroneum/electroneum-sc/params" 45 "github.com/electroneum/electroneum-sc/trie" 46 lru "github.com/hashicorp/golang-lru" 47 ) 48 49 var ( 50 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 51 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 52 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 53 headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil) 54 55 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 56 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 57 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 58 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 59 60 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 61 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 62 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 63 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 64 65 snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) 66 snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) 67 snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) 68 69 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 70 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 71 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 72 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 73 74 blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) 75 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 76 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 77 blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) 78 79 blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) 80 blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) 81 82 errInsertionInterrupted = errors.New("insertion is interrupted") 83 errChainStopped = errors.New("blockchain is stopped") 84 ) 85 86 const ( 87 bodyCacheLimit = 256 88 blockCacheLimit = 256 89 receiptsCacheLimit = 32 90 txLookupCacheLimit = 1024 91 maxFutureBlocks = 256 92 maxTimeFutureBlocks = 30 93 TriesInMemory = 128 94 95 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 96 // 97 // Changelog: 98 // 99 // - Version 4 100 // The following incompatible database changes were added: 101 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 102 // * the `Bloom` field of receipt is deleted 103 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 104 // - Version 5 105 // The following incompatible database changes were added: 106 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt 107 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the 108 // receipts' corresponding block 109 // - Version 6 110 // The following incompatible database changes were added: 111 // * Transaction lookup information stores the corresponding block number instead of block hash 112 // - Version 7 113 // The following incompatible database changes were added: 114 // * Use freezer as the ancient database to maintain all ancient data 115 // - Version 8 116 // The following incompatible database changes were added: 117 // * New scheme for contract code in order to separate the codes and trie nodes 118 BlockChainVersion uint64 = 8 119 ) 120 121 // CacheConfig contains the configuration values for the trie caching/pruning 122 // that's resident in a blockchain. 123 type CacheConfig struct { 124 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 125 TrieCleanJournal string // Disk journal for saving clean cache entries. 126 TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically 127 TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks 128 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 129 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 130 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 131 SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory 132 Preimages bool // Whether to store preimage of trie key to the disk 133 134 SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it 135 } 136 137 // defaultCacheConfig are the default caching values if none are specified by the 138 // user (also used during testing). 139 var defaultCacheConfig = &CacheConfig{ 140 TrieCleanLimit: 256, 141 TrieDirtyLimit: 256, 142 TrieTimeLimit: 5 * time.Minute, 143 SnapshotLimit: 256, 144 SnapshotWait: true, 145 } 146 147 // BlockChain represents the canonical chain given a database with a genesis 148 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 149 // 150 // Importing blocks in to the block chain happens according to the set of rules 151 // defined by the two stage Validator. Processing of blocks is done using the 152 // Processor which processes the included transaction. The validation of the state 153 // is done in the second part of the Validator. Failing results in aborting of 154 // the import. 155 // 156 // The BlockChain also helps in returning blocks from **any** chain included 157 // in the database as well as blocks that represents the canonical chain. It's 158 // important to note that GetBlock can return any block and does not need to be 159 // included in the canonical one where as GetBlockByNumber always represents the 160 // canonical chain. 161 type BlockChain struct { 162 chainConfig *params.ChainConfig // Chain & network configuration 163 cacheConfig *CacheConfig // Cache configuration for pruning 164 165 db ethdb.Database // Low level persistent database to store final content in 166 snaps *snapshot.Tree // Snapshot tree for fast trie leaf access 167 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 168 gcproc time.Duration // Accumulates canonical block processing for trie dumping 169 170 // txLookupLimit is the maximum number of blocks from head whose tx indices 171 // are reserved: 172 // * 0: means no limit and regenerate any missing indexes 173 // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes 174 // * nil: disable tx reindexer/deleter, but still index new blocks 175 txLookupLimit uint64 176 177 hc *HeaderChain 178 rmLogsFeed event.Feed 179 chainFeed event.Feed 180 chainSideFeed event.Feed 181 chainHeadFeed event.Feed 182 logsFeed event.Feed 183 blockProcFeed event.Feed 184 scope event.SubscriptionScope 185 genesisBlock *types.Block 186 187 // This mutex synchronizes chain write operations. 188 // Readers don't need to take it, they can just read the database. 189 chainmu *syncx.ClosableMutex 190 191 currentBlock atomic.Value // Current head of the block chain 192 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 193 currentFinalizedBlock atomic.Value // Current finalized head 194 195 stateCache state.Database // State database to reuse between imports (contains state cache) 196 bodyCache *lru.Cache // Cache for the most recent block bodies 197 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 198 receiptsCache *lru.Cache // Cache for the most recent receipts per block 199 blockCache *lru.Cache // Cache for the most recent entire blocks 200 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 201 futureBlocks *lru.Cache // future blocks are blocks added for later processing 202 203 wg sync.WaitGroup // 204 quit chan struct{} // shutdown signal, closed in Stop. 205 running int32 // 0 if chain is running, 1 when stopped 206 procInterrupt int32 // interrupt signaler for block processing 207 208 engine consensus.Engine 209 validator Validator // Block and state validator interface 210 prefetcher Prefetcher 211 processor Processor // Block transaction processor interface 212 forker *ForkChoice 213 vmConfig vm.Config 214 } 215 216 // NewBlockChain returns a fully initialised block chain using information 217 // available in the database. It initialises the default Ethereum Validator 218 // and Processor. 219 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) { 220 if cacheConfig == nil { 221 cacheConfig = defaultCacheConfig 222 } 223 bodyCache, _ := lru.New(bodyCacheLimit) 224 bodyRLPCache, _ := lru.New(bodyCacheLimit) 225 receiptsCache, _ := lru.New(receiptsCacheLimit) 226 blockCache, _ := lru.New(blockCacheLimit) 227 txLookupCache, _ := lru.New(txLookupCacheLimit) 228 futureBlocks, _ := lru.New(maxFutureBlocks) 229 230 bc := &BlockChain{ 231 chainConfig: chainConfig, 232 cacheConfig: cacheConfig, 233 db: db, 234 triegc: prque.New(nil), 235 stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ 236 Cache: cacheConfig.TrieCleanLimit, 237 Journal: cacheConfig.TrieCleanJournal, 238 Preimages: cacheConfig.Preimages, 239 }), 240 quit: make(chan struct{}), 241 chainmu: syncx.NewClosableMutex(), 242 bodyCache: bodyCache, 243 bodyRLPCache: bodyRLPCache, 244 receiptsCache: receiptsCache, 245 blockCache: blockCache, 246 txLookupCache: txLookupCache, 247 futureBlocks: futureBlocks, 248 engine: engine, 249 vmConfig: vmConfig, 250 } 251 bc.forker = NewForkChoice(bc, shouldPreserve) 252 bc.validator = NewBlockValidator(chainConfig, bc, engine) 253 bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) 254 bc.processor = NewStateProcessor(chainConfig, bc, engine) 255 256 var err error 257 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) 258 if err != nil { 259 return nil, err 260 } 261 bc.genesisBlock = bc.GetBlockByNumber(0) 262 if bc.genesisBlock == nil { 263 return nil, ErrNoGenesis 264 } 265 266 var nilBlock *types.Block 267 bc.currentBlock.Store(nilBlock) 268 bc.currentFastBlock.Store(nilBlock) 269 bc.currentFinalizedBlock.Store(nilBlock) 270 271 // Initialize the chain with ancient data if it isn't empty. 272 var txIndexBlock uint64 273 274 if bc.empty() { 275 rawdb.InitDatabaseFromFreezer(bc.db) 276 // If ancient database is not empty, reconstruct all missing 277 // indices in the background. 278 frozen, _ := bc.db.Ancients() 279 if frozen > 0 { 280 txIndexBlock = frozen 281 } 282 } 283 if err := bc.loadLastState(); err != nil { 284 return nil, err 285 } 286 287 // Make sure the state associated with the block is available 288 head := bc.CurrentBlock() 289 if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { 290 // Head state is missing, before the state recovery, find out the 291 // disk layer point of snapshot(if it's enabled). Make sure the 292 // rewound point is lower than disk layer. 293 var diskRoot common.Hash 294 if bc.cacheConfig.SnapshotLimit > 0 { 295 diskRoot = rawdb.ReadSnapshotRoot(bc.db) 296 } 297 if diskRoot != (common.Hash{}) { 298 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) 299 300 snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true) 301 if err != nil { 302 return nil, err 303 } 304 // Chain rewound, persist old snapshot number to indicate recovery procedure 305 if snapDisk != 0 { 306 rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) 307 } 308 } else { 309 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) 310 if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil { 311 return nil, err 312 } 313 } 314 } 315 316 // Ensure that a previous crash in SetHead doesn't leave extra ancients 317 if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { 318 var ( 319 needRewind bool 320 low uint64 321 ) 322 // The head full block may be rolled back to a very low height due to 323 // blockchain repair. If the head full block is even lower than the ancient 324 // chain, truncate the ancient store. 325 fullBlock := bc.CurrentBlock() 326 if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { 327 needRewind = true 328 low = fullBlock.NumberU64() 329 } 330 // In fast sync, it may happen that ancient data has been written to the 331 // ancient store, but the LastFastBlock has not been updated, truncate the 332 // extra data here. 333 fastBlock := bc.CurrentFastBlock() 334 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 335 needRewind = true 336 if fastBlock.NumberU64() < low || low == 0 { 337 low = fastBlock.NumberU64() 338 } 339 } 340 if needRewind { 341 log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) 342 if err := bc.SetHead(low); err != nil { 343 return nil, err 344 } 345 } 346 } 347 // The first thing the node will do is reconstruct the verification data for 348 // the head block (ethash cache or clique voting snapshot). Might as well do 349 // it in advance. 350 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 351 352 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 353 for hash := range BadHashes { 354 if header := bc.GetHeaderByHash(hash); header != nil { 355 // get the canonical block corresponding to the offending header's number 356 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 357 // make sure the headerByNumber (if present) is in our current canonical chain 358 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 359 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 360 if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { 361 return nil, err 362 } 363 log.Error("Chain rewind was successful, resuming normal operation") 364 } 365 } 366 } 367 368 // Load any existing snapshot, regenerating it if loading failed 369 if bc.cacheConfig.SnapshotLimit > 0 { 370 // If the chain was rewound past the snapshot persistent layer (causing 371 // a recovery block number to be persisted to disk), check if we're still 372 // in recovery mode and in that case, don't invalidate the snapshot on a 373 // head mismatch. 374 var recover bool 375 376 head := bc.CurrentBlock() 377 if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() { 378 log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) 379 recover = true 380 } 381 bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) 382 } 383 384 // Start future block processor. 385 bc.wg.Add(1) 386 go bc.updateFutureBlocks() 387 388 // Start tx indexer/unindexer. 389 if txLookupLimit != nil { 390 bc.txLookupLimit = *txLookupLimit 391 392 bc.wg.Add(1) 393 go bc.maintainTxIndex(txIndexBlock) 394 } 395 396 // If periodic cache journal is required, spin it up. 397 if bc.cacheConfig.TrieCleanRejournal > 0 { 398 if bc.cacheConfig.TrieCleanRejournal < time.Minute { 399 log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) 400 bc.cacheConfig.TrieCleanRejournal = time.Minute 401 } 402 triedb := bc.stateCache.TrieDB() 403 bc.wg.Add(1) 404 go func() { 405 defer bc.wg.Done() 406 triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) 407 }() 408 } 409 return bc, nil 410 } 411 412 // empty returns an indicator whether the blockchain is empty. 413 // Note, it's a special case that we connect a non-empty ancient 414 // database with an empty node, so that we can plugin the ancient 415 // into node seamlessly. 416 func (bc *BlockChain) empty() bool { 417 genesis := bc.genesisBlock.Hash() 418 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 419 if hash != genesis { 420 return false 421 } 422 } 423 return true 424 } 425 426 // loadLastState loads the last known chain state from the database. This method 427 // assumes that the chain manager mutex is held. 428 func (bc *BlockChain) loadLastState() error { 429 // Restore the last known head block 430 head := rawdb.ReadHeadBlockHash(bc.db) 431 if head == (common.Hash{}) { 432 // Corrupt or empty database, init from scratch 433 log.Warn("Empty database, resetting chain") 434 return bc.Reset() 435 } 436 // Make sure the entire head block is available 437 currentBlock := bc.GetBlockByHash(head) 438 if currentBlock == nil { 439 // Corrupt or empty database, init from scratch 440 log.Warn("Head block missing, resetting chain", "hash", head) 441 return bc.Reset() 442 } 443 // Everything seems to be fine, set as the head block 444 bc.currentBlock.Store(currentBlock) 445 headBlockGauge.Update(int64(currentBlock.NumberU64())) 446 447 // Restore the last known head header 448 currentHeader := currentBlock.Header() 449 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 450 if header := bc.GetHeaderByHash(head); header != nil { 451 currentHeader = header 452 } 453 } 454 bc.hc.SetCurrentHeader(currentHeader) 455 456 // Restore the last known head fast block 457 bc.currentFastBlock.Store(currentBlock) 458 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 459 460 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 461 if block := bc.GetBlockByHash(head); block != nil { 462 bc.currentFastBlock.Store(block) 463 headFastBlockGauge.Update(int64(block.NumberU64())) 464 } 465 } 466 467 // Restore the last known finalized block 468 if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) { 469 if block := bc.GetBlockByHash(head); block != nil { 470 bc.currentFinalizedBlock.Store(block) 471 headFinalizedBlockGauge.Update(int64(block.NumberU64())) 472 } 473 } 474 // Issue a status log for the user 475 currentFastBlock := bc.CurrentFastBlock() 476 currentFinalizedBlock := bc.CurrentFinalizedBlock() 477 478 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 479 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 480 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 481 482 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 483 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 484 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 485 486 if currentFinalizedBlock != nil { 487 finalTd := bc.GetTd(currentFinalizedBlock.Hash(), currentFinalizedBlock.NumberU64()) 488 log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0))) 489 } 490 if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { 491 log.Info("Loaded last fast-sync pivot marker", "number", *pivot) 492 } 493 return nil 494 } 495 496 // SetHead rewinds the local chain to a new head. Depending on whether the node 497 // was fast synced or full synced and in which state, the method will try to 498 // delete minimal data from disk whilst retaining chain consistency. 499 func (bc *BlockChain) SetHead(head uint64) error { 500 _, err := bc.setHeadBeyondRoot(head, common.Hash{}, false) 501 return err 502 } 503 504 // SetFinalized sets the finalized block. 505 func (bc *BlockChain) SetFinalized(block *types.Block) { 506 bc.currentFinalizedBlock.Store(block) 507 rawdb.WriteFinalizedBlockHash(bc.db, block.Hash()) 508 headFinalizedBlockGauge.Update(int64(block.NumberU64())) 509 } 510 511 // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition 512 // that the rewind must pass the specified state root. This method is meant to be 513 // used when rewinding with snapshots enabled to ensure that we go back further than 514 // persistent disk layer. Depending on whether the node was fast synced or full, and 515 // in which state, the method will try to delete minimal data from disk whilst 516 // retaining chain consistency. 517 // 518 // The method returns the block number where the requested root cap was found. 519 func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) { 520 if !bc.chainmu.TryLock() { 521 return 0, errChainStopped 522 } 523 defer bc.chainmu.Unlock() 524 525 // Track the block number of the requested root hash 526 var rootNumber uint64 // (no root == always 0) 527 528 // Retrieve the last pivot block to short circuit rollbacks beyond it and the 529 // current freezer limit to start nuking id underflown 530 pivot := rawdb.ReadLastPivotNumber(bc.db) 531 frozen, _ := bc.db.Ancients() 532 533 updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { 534 // Rewind the blockchain, ensuring we don't end up with a stateless head 535 // block. Note, depth equality is permitted to allow using SetHead as a 536 // chain reparation mechanism without deleting any data! 537 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { 538 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 539 if newHeadBlock == nil { 540 log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) 541 newHeadBlock = bc.genesisBlock 542 } else { 543 // Block exists, keep rewinding until we find one with state, 544 // keeping rewinding until we exceed the optional threshold 545 // root hash 546 beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) 547 548 for { 549 // If a root threshold was requested but not yet crossed, check 550 if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { 551 beyondRoot, rootNumber = true, newHeadBlock.NumberU64() 552 } 553 if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { 554 log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 555 if pivot == nil || newHeadBlock.NumberU64() > *pivot { 556 parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) 557 if parent != nil { 558 newHeadBlock = parent 559 continue 560 } 561 log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash()) 562 newHeadBlock = bc.genesisBlock 563 } else { 564 log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) 565 newHeadBlock = bc.genesisBlock 566 } 567 } 568 if beyondRoot || newHeadBlock.NumberU64() == 0 { 569 if newHeadBlock.NumberU64() == 0 { 570 // Recommit the genesis state into disk in case the rewinding destination 571 // is genesis block and the relevant state is gone. In the future this 572 // rewinding destination can be the earliest block stored in the chain 573 // if the historical chain pruning is enabled. In that case the logic 574 // needs to be improved here. 575 if !bc.HasState(bc.genesisBlock.Root()) { 576 if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil { 577 log.Crit("Failed to commit genesis state", "err", err) 578 } 579 log.Debug("Recommitted genesis state to disk") 580 } 581 } 582 log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 583 break 584 } 585 log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) 586 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding 587 } 588 } 589 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 590 591 // Degrade the chain markers if they are explicitly reverted. 592 // In theory we should update all in-memory markers in the 593 // last step, however the direction of SetHead is from high 594 // to low, so it's safe to update in-memory markers directly. 595 bc.currentBlock.Store(newHeadBlock) 596 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 597 } 598 // Rewind the fast block in a simpleton way to the target head 599 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 600 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 601 // If either blocks reached nil, reset to the genesis state 602 if newHeadFastBlock == nil { 603 newHeadFastBlock = bc.genesisBlock 604 } 605 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 606 607 // Degrade the chain markers if they are explicitly reverted. 608 // In theory we should update all in-memory markers in the 609 // last step, however the direction of SetHead is from high 610 // to low, so it's safe the update in-memory markers directly. 611 bc.currentFastBlock.Store(newHeadFastBlock) 612 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 613 } 614 head := bc.CurrentBlock().NumberU64() 615 616 // If setHead underflown the freezer threshold and the block processing 617 // intent afterwards is full block importing, delete the chain segment 618 // between the stateful-block and the sethead target. 619 var wipe bool 620 if head+1 < frozen { 621 wipe = pivot == nil || head >= *pivot 622 } 623 return head, wipe // Only force wipe if full synced 624 } 625 // Rewind the header chain, deleting all block bodies until then 626 delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { 627 // Ignore the error here since light client won't hit this path 628 frozen, _ := bc.db.Ancients() 629 if num+1 <= frozen { 630 // Truncate all relative data(header, total difficulty, body, receipt 631 // and canonical hash) from ancient store. 632 if err := bc.db.TruncateHead(num); err != nil { 633 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 634 } 635 // Remove the hash <-> number mapping from the active store. 636 rawdb.DeleteHeaderNumber(db, hash) 637 } else { 638 // Remove relative body and receipts from the active store. 639 // The header, total difficulty and canonical hash will be 640 // removed in the hc.SetHead function. 641 rawdb.DeleteBody(db, hash, num) 642 rawdb.DeleteReceipts(db, hash, num) 643 } 644 // Todo(rjl493456442) txlookup, bloombits, etc 645 } 646 // If SetHead was only called as a chain reparation method, try to skip 647 // touching the header chain altogether, unless the freezer is broken 648 if repair { 649 if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force { 650 bc.hc.SetHead(target, updateFn, delFn) 651 } 652 } else { 653 // Rewind the chain to the requested head and keep going backwards until a 654 // block with a state is found or fast sync pivot is passed 655 log.Warn("Rewinding blockchain", "target", head) 656 bc.hc.SetHead(head, updateFn, delFn) 657 } 658 // Clear out any stale content from the caches 659 bc.bodyCache.Purge() 660 bc.bodyRLPCache.Purge() 661 bc.receiptsCache.Purge() 662 bc.blockCache.Purge() 663 bc.txLookupCache.Purge() 664 bc.futureBlocks.Purge() 665 666 return rootNumber, bc.loadLastState() 667 } 668 669 // SnapSyncCommitHead sets the current head block to the one defined by the hash 670 // irrelevant what the chain contents were prior. 671 func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { 672 // Make sure that both the block as well at its state trie exists 673 block := bc.GetBlockByHash(hash) 674 if block == nil { 675 return fmt.Errorf("non existent block [%x..]", hash[:4]) 676 } 677 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 678 return err 679 } 680 681 // If all checks out, manually set the head block. 682 if !bc.chainmu.TryLock() { 683 return errChainStopped 684 } 685 bc.currentBlock.Store(block) 686 headBlockGauge.Update(int64(block.NumberU64())) 687 bc.chainmu.Unlock() 688 689 // Destroy any existing state snapshot and regenerate it in the background, 690 // also resuming the normal maintenance of any previously paused snapshot. 691 if bc.snaps != nil { 692 bc.snaps.Rebuild(block.Root()) 693 } 694 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 695 return nil 696 } 697 698 // Reset purges the entire blockchain, restoring it to its genesis state. 699 func (bc *BlockChain) Reset() error { 700 return bc.ResetWithGenesisBlock(bc.genesisBlock) 701 } 702 703 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 704 // specified genesis state. 705 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 706 // Dump the entire block chain and purge the caches 707 if err := bc.SetHead(0); err != nil { 708 return err 709 } 710 if !bc.chainmu.TryLock() { 711 return errChainStopped 712 } 713 defer bc.chainmu.Unlock() 714 715 // Prepare the genesis block and reinitialise the chain 716 batch := bc.db.NewBatch() 717 rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) 718 rawdb.WriteBlock(batch, genesis) 719 if err := batch.Write(); err != nil { 720 log.Crit("Failed to write genesis block", "err", err) 721 } 722 bc.writeHeadBlock(genesis) 723 724 // Last update all in-memory chain markers 725 bc.genesisBlock = genesis 726 bc.currentBlock.Store(bc.genesisBlock) 727 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 728 bc.hc.SetGenesis(bc.genesisBlock.Header()) 729 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 730 bc.currentFastBlock.Store(bc.genesisBlock) 731 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 732 return nil 733 } 734 735 // Export writes the active chain to the given writer. 736 func (bc *BlockChain) Export(w io.Writer) error { 737 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 738 } 739 740 // ExportN writes a subset of the active chain to the given writer. 741 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 742 if !bc.chainmu.TryLock() { 743 return errChainStopped 744 } 745 defer bc.chainmu.Unlock() 746 747 if first > last { 748 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 749 } 750 log.Info("Exporting batch of blocks", "count", last-first+1) 751 752 start, reported := time.Now(), time.Now() 753 for nr := first; nr <= last; nr++ { 754 block := bc.GetBlockByNumber(nr) 755 if block == nil { 756 return fmt.Errorf("export failed on #%d: not found", nr) 757 } 758 if err := block.EncodeRLP(w); err != nil { 759 return err 760 } 761 if time.Since(reported) >= statsReportLimit { 762 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 763 reported = time.Now() 764 } 765 } 766 return nil 767 } 768 769 // writeHeadBlock injects a new head block into the current block chain. This method 770 // assumes that the block is indeed a true head. It will also reset the head 771 // header and the head fast sync block to this very same block if they are older 772 // or if they are on a different side chain. 773 // 774 // Note, this function assumes that the `mu` mutex is held! 775 func (bc *BlockChain) writeHeadBlock(block *types.Block) { 776 // Add the block to the canonical chain number scheme and mark as the head 777 batch := bc.db.NewBatch() 778 rawdb.WriteHeadHeaderHash(batch, block.Hash()) 779 rawdb.WriteHeadFastBlockHash(batch, block.Hash()) 780 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 781 rawdb.WriteTxLookupEntriesByBlock(batch, block) 782 rawdb.WriteHeadBlockHash(batch, block.Hash()) 783 784 // Flush the whole batch into the disk, exit the node if failed 785 if err := batch.Write(); err != nil { 786 log.Crit("Failed to update chain indexes and markers", "err", err) 787 } 788 // Update all in-memory chain markers in the last step 789 bc.hc.SetCurrentHeader(block.Header()) 790 791 bc.currentFastBlock.Store(block) 792 headFastBlockGauge.Update(int64(block.NumberU64())) 793 794 bc.currentBlock.Store(block) 795 headBlockGauge.Update(int64(block.NumberU64())) 796 } 797 798 // Stop stops the blockchain service. If any imports are currently in progress 799 // it will abort them using the procInterrupt. 800 func (bc *BlockChain) Stop() { 801 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 802 return 803 } 804 805 // Unsubscribe all subscriptions registered from blockchain. 806 bc.scope.Close() 807 808 // Signal shutdown to all goroutines. 809 close(bc.quit) 810 bc.StopInsert() 811 812 // Now wait for all chain modifications to end and persistent goroutines to exit. 813 // 814 // Note: Close waits for the mutex to become available, i.e. any running chain 815 // modification will have exited when Close returns. Since we also called StopInsert, 816 // the mutex should become available quickly. It cannot be taken again after Close has 817 // returned. 818 bc.chainmu.Close() 819 bc.wg.Wait() 820 821 // Ensure that the entirety of the state snapshot is journalled to disk. 822 var snapBase common.Hash 823 if bc.snaps != nil { 824 var err error 825 if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { 826 log.Error("Failed to journal state snapshot", "err", err) 827 } 828 } 829 830 // Ensure the state of a recent block is also stored to disk before exiting. 831 // We're writing three different states to catch different restart scenarios: 832 // - HEAD: So we don't need to reprocess any blocks in the general case 833 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 834 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 835 if !bc.cacheConfig.TrieDirtyDisabled { 836 triedb := bc.stateCache.TrieDB() 837 838 for _, offset := range []uint64{0, 1, TriesInMemory - 1} { 839 if number := bc.CurrentBlock().NumberU64(); number > offset { 840 recent := bc.GetBlockByNumber(number - offset) 841 842 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 843 if err := triedb.Commit(recent.Root(), true, nil); err != nil { 844 log.Error("Failed to commit recent state trie", "err", err) 845 } 846 } 847 } 848 if snapBase != (common.Hash{}) { 849 log.Info("Writing snapshot state to disk", "root", snapBase) 850 if err := triedb.Commit(snapBase, true, nil); err != nil { 851 log.Error("Failed to commit recent state trie", "err", err) 852 } 853 } 854 for !bc.triegc.Empty() { 855 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 856 } 857 if size, _ := triedb.Size(); size != 0 { 858 log.Error("Dangling trie nodes after full cleanup") 859 } 860 } 861 // Ensure all live cached entries be saved into disk, so that we can skip 862 // cache warmup when node restarts. 863 if bc.cacheConfig.TrieCleanJournal != "" { 864 triedb := bc.stateCache.TrieDB() 865 triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) 866 } 867 log.Info("Blockchain stopped") 868 } 869 870 // StopInsert interrupts all insertion methods, causing them to return 871 // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after 872 // calling this method. 873 func (bc *BlockChain) StopInsert() { 874 atomic.StoreInt32(&bc.procInterrupt, 1) 875 } 876 877 // insertStopped returns true after StopInsert has been called. 878 func (bc *BlockChain) insertStopped() bool { 879 return atomic.LoadInt32(&bc.procInterrupt) == 1 880 } 881 882 func (bc *BlockChain) procFutureBlocks() { 883 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 884 for _, hash := range bc.futureBlocks.Keys() { 885 if block, exist := bc.futureBlocks.Peek(hash); exist { 886 blocks = append(blocks, block.(*types.Block)) 887 } 888 } 889 if len(blocks) > 0 { 890 sort.Slice(blocks, func(i, j int) bool { 891 return blocks[i].NumberU64() < blocks[j].NumberU64() 892 }) 893 // Insert one by one as chain insertion needs contiguous ancestry between blocks 894 for i := range blocks { 895 bc.InsertChain(blocks[i : i+1]) 896 } 897 } 898 } 899 900 // WriteStatus status of write 901 type WriteStatus byte 902 903 const ( 904 NonStatTy WriteStatus = iota 905 CanonStatTy 906 SideStatTy 907 ) 908 909 // InsertReceiptChain attempts to complete an already existing header chain with 910 // transaction and receipt data. 911 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 912 // We don't require the chainMu here since we want to maximize the 913 // concurrency of header insertion and receipt insertion. 914 bc.wg.Add(1) 915 defer bc.wg.Done() 916 917 var ( 918 ancientBlocks, liveBlocks types.Blocks 919 ancientReceipts, liveReceipts []types.Receipts 920 ) 921 // Do a sanity check that the provided chain is actually ordered and linked 922 for i := 0; i < len(blockChain); i++ { 923 if i != 0 { 924 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 925 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 926 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 927 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(), 928 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 929 } 930 } 931 if blockChain[i].NumberU64() <= ancientLimit { 932 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 933 } else { 934 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 935 } 936 } 937 938 var ( 939 stats = struct{ processed, ignored int32 }{} 940 start = time.Now() 941 size = int64(0) 942 ) 943 944 // updateHead updates the head fast sync block if the inserted blocks are better 945 // and returns an indicator whether the inserted blocks are canonical. 946 updateHead := func(head *types.Block) bool { 947 if !bc.chainmu.TryLock() { 948 return false 949 } 950 defer bc.chainmu.Unlock() 951 952 // Rewind may have occurred, skip in that case. 953 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 954 reorg, err := bc.forker.ReorgNeeded(bc.CurrentFastBlock().Header(), head.Header()) 955 if err != nil { 956 log.Warn("Reorg failed", "err", err) 957 return false 958 } else if !reorg { 959 return false 960 } 961 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 962 bc.currentFastBlock.Store(head) 963 headFastBlockGauge.Update(int64(head.NumberU64())) 964 return true 965 } 966 return false 967 } 968 969 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 970 // 971 // this function only accepts canonical chain data. All side chain will be reverted 972 // eventually. 973 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 974 first := blockChain[0] 975 last := blockChain[len(blockChain)-1] 976 977 // Ensure genesis is in ancients. 978 if first.NumberU64() == 1 { 979 if frozen, _ := bc.db.Ancients(); frozen == 0 { 980 b := bc.genesisBlock 981 td := bc.genesisBlock.Difficulty() 982 writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td) 983 size += writeSize 984 if err != nil { 985 log.Error("Error writing genesis to ancients", "err", err) 986 return 0, err 987 } 988 log.Info("Wrote genesis to ancients") 989 } 990 } 991 // Before writing the blocks to the ancients, we need to ensure that 992 // they correspond to the what the headerchain 'expects'. 993 // We only check the last block/header, since it's a contiguous chain. 994 if !bc.HasHeader(last.Hash(), last.NumberU64()) { 995 return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4]) 996 } 997 998 // Write all chain data to ancients. 999 td := bc.GetTd(first.Hash(), first.NumberU64()) 1000 writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td) 1001 size += writeSize 1002 if err != nil { 1003 log.Error("Error importing chain data to ancients", "err", err) 1004 return 0, err 1005 } 1006 1007 // Write tx indices if any condition is satisfied: 1008 // * If user requires to reserve all tx indices(txlookuplimit=0) 1009 // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit) 1010 // * If block number is large enough to be regarded as a recent block 1011 // It means blocks below the ancientLimit-txlookupLimit won't be indexed. 1012 // 1013 // But if the `TxIndexTail` is not nil, e.g. etn-sc is initialized with 1014 // an external ancient database, during the setup, blockchain will start 1015 // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) 1016 // range. In this case, all tx indices of newly imported blocks should be 1017 // generated. 1018 var batch = bc.db.NewBatch() 1019 for i, block := range blockChain { 1020 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { 1021 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1022 } else if rawdb.ReadTxIndexTail(bc.db) != nil { 1023 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1024 } 1025 stats.processed++ 1026 1027 if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 { 1028 size += int64(batch.ValueSize()) 1029 if err = batch.Write(); err != nil { 1030 fastBlock := bc.CurrentFastBlock().NumberU64() 1031 if err := bc.db.TruncateHead(fastBlock + 1); err != nil { 1032 log.Error("Can't truncate ancient store after failed insert", "err", err) 1033 } 1034 return 0, err 1035 } 1036 batch.Reset() 1037 } 1038 } 1039 1040 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1041 if err := bc.db.Sync(); err != nil { 1042 return 0, err 1043 } 1044 // Update the current fast block because all block data is now present in DB. 1045 previousFastBlock := bc.CurrentFastBlock().NumberU64() 1046 if !updateHead(blockChain[len(blockChain)-1]) { 1047 // We end up here if the header chain has reorg'ed, and the blocks/receipts 1048 // don't match the canonical chain. 1049 if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil { 1050 log.Error("Can't truncate ancient store after failed insert", "err", err) 1051 } 1052 return 0, errSideChainReceipts 1053 } 1054 1055 // Delete block data from the main database. 1056 batch.Reset() 1057 canonHashes := make(map[common.Hash]struct{}) 1058 for _, block := range blockChain { 1059 canonHashes[block.Hash()] = struct{}{} 1060 if block.NumberU64() == 0 { 1061 continue 1062 } 1063 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1064 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1065 } 1066 // Delete side chain hash-to-number mappings. 1067 for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) { 1068 if _, canon := canonHashes[nh.Hash]; !canon { 1069 rawdb.DeleteHeader(batch, nh.Hash, nh.Number) 1070 } 1071 } 1072 if err := batch.Write(); err != nil { 1073 return 0, err 1074 } 1075 return 0, nil 1076 } 1077 1078 // writeLive writes blockchain and corresponding receipt chain into active store. 1079 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1080 skipPresenceCheck := false 1081 batch := bc.db.NewBatch() 1082 for i, block := range blockChain { 1083 // Short circuit insertion if shutting down or processing failed 1084 if bc.insertStopped() { 1085 return 0, errInsertionInterrupted 1086 } 1087 // Short circuit if the owner header is unknown 1088 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1089 return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4]) 1090 } 1091 if !skipPresenceCheck { 1092 // Ignore if the entire data is already known 1093 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1094 stats.ignored++ 1095 continue 1096 } else { 1097 // If block N is not present, neither are the later blocks. 1098 // This should be true, but if we are mistaken, the shortcut 1099 // here will only cause overwriting of some existing data 1100 skipPresenceCheck = true 1101 } 1102 } 1103 // Write all the data out into the database 1104 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1105 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1106 rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed 1107 1108 // Write everything belongs to the blocks into the database. So that 1109 // we can ensure all components of body is completed(body, receipts, 1110 // tx indexes) 1111 if batch.ValueSize() >= ethdb.IdealBatchSize { 1112 if err := batch.Write(); err != nil { 1113 return 0, err 1114 } 1115 size += int64(batch.ValueSize()) 1116 batch.Reset() 1117 } 1118 stats.processed++ 1119 } 1120 // Write everything belongs to the blocks into the database. So that 1121 // we can ensure all components of body is completed(body, receipts, 1122 // tx indexes) 1123 if batch.ValueSize() > 0 { 1124 size += int64(batch.ValueSize()) 1125 if err := batch.Write(); err != nil { 1126 return 0, err 1127 } 1128 } 1129 updateHead(blockChain[len(blockChain)-1]) 1130 return 0, nil 1131 } 1132 1133 // Write downloaded chain data and corresponding receipt chain data 1134 if len(ancientBlocks) > 0 { 1135 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1136 if err == errInsertionInterrupted { 1137 return 0, nil 1138 } 1139 return n, err 1140 } 1141 } 1142 // Write the tx index tail (block number from where we index) before write any live blocks 1143 if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 { 1144 // The tx index tail can only be one of the following two options: 1145 // * 0: all ancient blocks have been indexed 1146 // * ancient-limit: the indices of blocks before ancient-limit are ignored 1147 if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil { 1148 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit { 1149 rawdb.WriteTxIndexTail(bc.db, 0) 1150 } else { 1151 rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit) 1152 } 1153 } 1154 } 1155 if len(liveBlocks) > 0 { 1156 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1157 if err == errInsertionInterrupted { 1158 return 0, nil 1159 } 1160 return n, err 1161 } 1162 } 1163 1164 head := blockChain[len(blockChain)-1] 1165 context := []interface{}{ 1166 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1167 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1168 "size", common.StorageSize(size), 1169 } 1170 if stats.ignored > 0 { 1171 context = append(context, []interface{}{"ignored", stats.ignored}...) 1172 } 1173 log.Info("Imported new block receipts", context...) 1174 1175 return 0, nil 1176 } 1177 1178 var lastWrite uint64 1179 1180 // writeBlockWithoutState writes only the block and its metadata to the database, 1181 // but does not write any state. This is used to construct competing side forks 1182 // up to the point where they exceed the canonical total difficulty. 1183 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1184 if bc.insertStopped() { 1185 return errInsertionInterrupted 1186 } 1187 1188 batch := bc.db.NewBatch() 1189 rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) 1190 rawdb.WriteBlock(batch, block) 1191 if err := batch.Write(); err != nil { 1192 log.Crit("Failed to write block into disk", "err", err) 1193 } 1194 return nil 1195 } 1196 1197 // writeKnownBlock updates the head block flag with a known block 1198 // and introduces chain reorg if necessary. 1199 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1200 current := bc.CurrentBlock() 1201 if block.ParentHash() != current.Hash() { 1202 if err := bc.reorg(current, block); err != nil { 1203 return err 1204 } 1205 } 1206 bc.writeHeadBlock(block) 1207 return nil 1208 } 1209 1210 // writeBlockWithState writes block, metadata and corresponding state data to the 1211 // database. 1212 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error { 1213 // Calculate the total difficulty of the block 1214 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1215 if ptd == nil { 1216 return consensus.ErrUnknownAncestor 1217 } 1218 // Make sure no inconsistent state is leaked during insertion 1219 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1220 1221 // Irrelevant of the canonical status, write the block itself to the database. 1222 // 1223 // Note all the components of block(td, hash->number map, header, body, receipts) 1224 // should be written atomically. BlockBatch is used for containing all components. 1225 blockBatch := bc.db.NewBatch() 1226 rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) 1227 rawdb.WriteBlock(blockBatch, block) 1228 rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) 1229 rawdb.WritePreimages(blockBatch, state.Preimages()) 1230 if err := blockBatch.Write(); err != nil { 1231 log.Crit("Failed to write block into disk", "err", err) 1232 } 1233 // Commit all cached state changes into underlying memory database. 1234 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 1235 if err != nil { 1236 return err 1237 } 1238 triedb := bc.stateCache.TrieDB() 1239 1240 // If we're running an archive node, always flush 1241 if bc.cacheConfig.TrieDirtyDisabled { 1242 return triedb.Commit(root, false, nil) 1243 } else { 1244 // Full but not archive node, do proper garbage collection 1245 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 1246 bc.triegc.Push(root, -int64(block.NumberU64())) 1247 1248 if current := block.NumberU64(); current > TriesInMemory { 1249 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1250 var ( 1251 nodes, imgs = triedb.Size() 1252 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1253 ) 1254 if nodes > limit || imgs > 4*1024*1024 { 1255 triedb.Cap(limit - ethdb.IdealBatchSize) 1256 } 1257 // Find the next state trie we need to commit 1258 chosen := current - TriesInMemory 1259 1260 // If we exceeded out time allowance, flush an entire trie to disk 1261 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1262 // If the header is missing (canonical chain behind), we're reorging a low 1263 // diff sidechain. Suspend committing until this operation is completed. 1264 header := bc.GetHeaderByNumber(chosen) 1265 if header == nil { 1266 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1267 } else { 1268 // If we're exceeding limits but haven't reached a large enough memory gap, 1269 // warn the user that the system is becoming unstable. 1270 if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1271 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) 1272 } 1273 // Flush an entire trie and restart the counters 1274 triedb.Commit(header.Root, true, nil) 1275 lastWrite = chosen 1276 bc.gcproc = 0 1277 } 1278 } 1279 // Garbage collect anything below our required write retention 1280 for !bc.triegc.Empty() { 1281 root, number := bc.triegc.Pop() 1282 if uint64(-number) > chosen { 1283 bc.triegc.Push(root, number) 1284 break 1285 } 1286 triedb.Dereference(root.(common.Hash)) 1287 } 1288 } 1289 } 1290 return nil 1291 } 1292 1293 // WriteBlockAndSetHead writes the given block and all associated state to the database, 1294 // and applies the block as the new chain head. 1295 func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1296 if !bc.chainmu.TryLock() { 1297 return NonStatTy, errChainStopped 1298 } 1299 defer bc.chainmu.Unlock() 1300 1301 return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent) 1302 } 1303 1304 // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. 1305 // This function expects the chain mutex to be held. 1306 func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1307 if err := bc.writeBlockWithState(block, receipts, logs, state); err != nil { 1308 return NonStatTy, err 1309 } 1310 currentBlock := bc.CurrentBlock() 1311 reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) 1312 if err != nil { 1313 return NonStatTy, err 1314 } 1315 if reorg { 1316 // Reorganise the chain if the parent is not the head block 1317 if block.ParentHash() != currentBlock.Hash() { 1318 if err := bc.reorg(currentBlock, block); err != nil { 1319 return NonStatTy, err 1320 } 1321 } 1322 status = CanonStatTy 1323 } else { 1324 status = SideStatTy 1325 } 1326 // Set new head. 1327 if status == CanonStatTy { 1328 bc.writeHeadBlock(block) 1329 } 1330 bc.futureBlocks.Remove(block.Hash()) 1331 1332 if status == CanonStatTy { 1333 bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 1334 if len(logs) > 0 { 1335 bc.logsFeed.Send(logs) 1336 } 1337 // In theory we should fire a ChainHeadEvent when we inject 1338 // a canonical block, but sometimes we can insert a batch of 1339 // canonicial blocks. Avoid firing too many ChainHeadEvents, 1340 // we will fire an accumulated ChainHeadEvent and disable fire 1341 // event here. 1342 if emitHeadEvent { 1343 bc.chainHeadFeed.Send(ChainHeadEvent{Block: block}) 1344 } 1345 } else { 1346 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1347 } 1348 return status, nil 1349 } 1350 1351 // addFutureBlock checks if the block is within the max allowed window to get 1352 // accepted for future processing, and returns an error if the block is too far 1353 // ahead and was not added. 1354 // 1355 // TODO after the transition, the future block shouldn't be kept. Because 1356 // it's not checked in the Geth side anymore. 1357 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1358 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1359 if block.Time() > max { 1360 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1361 } 1362 if block.Difficulty().Cmp(common.Big0) == 0 { 1363 // Never add PoS blocks into the future queue 1364 return nil 1365 } 1366 bc.futureBlocks.Add(block.Hash(), block) 1367 return nil 1368 } 1369 1370 // InsertChain attempts to insert the given batch of blocks in to the canonical 1371 // chain or, otherwise, create a fork. If an error is returned it will return 1372 // the index number of the failing block as well an error describing what went 1373 // wrong. After insertion is done, all accumulated events will be fired. 1374 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1375 // Sanity check that we have something meaningful to import 1376 if len(chain) == 0 { 1377 return 0, nil 1378 } 1379 bc.blockProcFeed.Send(true) 1380 defer bc.blockProcFeed.Send(false) 1381 1382 // Do a sanity check that the provided chain is actually ordered and linked. 1383 for i := 1; i < len(chain); i++ { 1384 block, prev := chain[i], chain[i-1] 1385 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1386 log.Error("Non contiguous block insert", 1387 "number", block.Number(), 1388 "hash", block.Hash(), 1389 "parent", block.ParentHash(), 1390 "prevnumber", prev.Number(), 1391 "prevhash", prev.Hash(), 1392 ) 1393 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(), 1394 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1395 } 1396 } 1397 // Pre-checks passed, start the full block imports 1398 if !bc.chainmu.TryLock() { 1399 return 0, errChainStopped 1400 } 1401 defer bc.chainmu.Unlock() 1402 return bc.insertChain(chain, true, true) 1403 } 1404 1405 // insertChain is the internal implementation of InsertChain, which assumes that 1406 // 1) chains are contiguous, and 2) The chain mutex is held. 1407 // 1408 // This method is split out so that import batches that require re-injecting 1409 // historical blocks can do so without releasing the lock, which could lead to 1410 // racey behaviour. If a sidechain import is in progress, and the historic state 1411 // is imported, but then new canon-head is added before the actual sidechain 1412 // completes, then the historic state could be pruned again 1413 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) { 1414 // If the chain is terminating, don't even bother starting up. 1415 if bc.insertStopped() { 1416 return 0, nil 1417 } 1418 1419 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1420 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1421 1422 var ( 1423 stats = insertStats{startTime: mclock.Now()} 1424 lastCanon *types.Block 1425 ) 1426 // Fire a single chain head event if we've progressed the chain 1427 defer func() { 1428 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1429 bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) 1430 } 1431 }() 1432 // Start the parallel header verifier 1433 headers := make([]*types.Header, len(chain)) 1434 seals := make([]bool, len(chain)) 1435 1436 for i, block := range chain { 1437 headers[i] = block.Header() 1438 seals[i] = verifySeals 1439 } 1440 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1441 defer close(abort) 1442 1443 // Peek the error for the first block to decide the directing import logic 1444 it := newInsertIterator(chain, results, bc.validator) 1445 block, err := it.next() 1446 1447 // Left-trim all the known blocks that don't need to build snapshot 1448 if bc.skipBlock(err, it) { 1449 // First block (and state) is known 1450 // 1. We did a roll-back, and should now do a re-import 1451 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1452 // from the canonical chain, which has not been verified. 1453 // Skip all known blocks that are behind us. 1454 var ( 1455 reorg bool 1456 current = bc.CurrentBlock() 1457 ) 1458 for block != nil && bc.skipBlock(err, it) { 1459 reorg, err = bc.forker.ReorgNeeded(current.Header(), block.Header()) 1460 if err != nil { 1461 return it.index, err 1462 } 1463 if reorg { 1464 // Switch to import mode if the forker says the reorg is necessary 1465 // and also the block is not on the canonical chain. 1466 // In eth2 the forker always returns true for reorg decision (blindly trusting 1467 // the external consensus engine), but in order to prevent the unnecessary 1468 // reorgs when importing known blocks, the special case is handled here. 1469 if block.NumberU64() > current.NumberU64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() { 1470 break 1471 } 1472 } 1473 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1474 stats.ignored++ 1475 1476 block, err = it.next() 1477 } 1478 // The remaining blocks are still known blocks, the only scenario here is: 1479 // During the fast sync, the pivot point is already submitted but rollback 1480 // happens. Then node resets the head full block to a lower height via `rollback` 1481 // and leaves a few known blocks in the database. 1482 // 1483 // When node runs a fast sync again, it can re-import a batch of known blocks via 1484 // `insertChain` while a part of them have higher total difficulty than current 1485 // head full block(new pivot point). 1486 for block != nil && bc.skipBlock(err, it) { 1487 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1488 if err := bc.writeKnownBlock(block); err != nil { 1489 return it.index, err 1490 } 1491 lastCanon = block 1492 1493 block, err = it.next() 1494 } 1495 // Falls through to the block import 1496 } 1497 switch { 1498 // First block is pruned 1499 case errors.Is(err, consensus.ErrPrunedAncestor): 1500 if setHead { 1501 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1502 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 1503 return bc.insertSideChain(block, it) 1504 } else { 1505 // We're post-merge and the parent is pruned, try to recover the parent state 1506 log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash()) 1507 _, err := bc.recoverAncestors(block) 1508 return it.index, err 1509 } 1510 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1511 case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): 1512 for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { 1513 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 1514 if err := bc.addFutureBlock(block); err != nil { 1515 return it.index, err 1516 } 1517 block, err = it.next() 1518 } 1519 stats.queued += it.processed() 1520 stats.ignored += it.remaining() 1521 1522 // If there are any still remaining, mark as ignored 1523 return it.index, err 1524 1525 // Some other error(except ErrKnownBlock) occurred, abort. 1526 // ErrKnownBlock is allowed here since some known blocks 1527 // still need re-execution to generate snapshots that are missing 1528 case err != nil && !errors.Is(err, ErrKnownBlock): 1529 bc.futureBlocks.Remove(block.Hash()) 1530 stats.ignored += len(it.chain) 1531 bc.reportBlock(block, nil, err) 1532 return it.index, err 1533 } 1534 // No validation errors for the first block (or chain prefix skipped) 1535 var activeState *state.StateDB 1536 defer func() { 1537 // The chain importer is starting and stopping trie prefetchers. If a bad 1538 // block or other error is hit however, an early return may not properly 1539 // terminate the background threads. This defer ensures that we clean up 1540 // and dangling prefetcher, without defering each and holding on live refs. 1541 if activeState != nil { 1542 activeState.StopPrefetcher() 1543 } 1544 }() 1545 1546 for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() { 1547 // If the chain is terminating, stop processing blocks 1548 if bc.insertStopped() { 1549 log.Debug("Abort during block processing") 1550 break 1551 } 1552 // If the header is a banned one, straight out abort 1553 if BadHashes[block.Hash()] { 1554 bc.reportBlock(block, nil, ErrBannedHash) 1555 return it.index, ErrBannedHash 1556 } 1557 // If the block is known (in the middle of the chain), it's a special case for 1558 // Clique blocks where they can share state among each other, so importing an 1559 // older block might complete the state of the subsequent one. In this case, 1560 // just skip the block (we already validated it once fully (and crashed), since 1561 // its header and body was already in the database). But if the corresponding 1562 // snapshot layer is missing, forcibly rerun the execution to build it. 1563 if bc.skipBlock(err, it) { 1564 logger := log.Debug 1565 if bc.chainConfig.Clique == nil { 1566 logger = log.Warn 1567 } 1568 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 1569 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1570 "root", block.Root()) 1571 1572 // Special case. Commit the empty receipt slice if we meet the known 1573 // block in the middle. It can only happen in the clique chain. Whenever 1574 // we insert blocks via `insertSideChain`, we only commit `td`, `header` 1575 // and `body` if it's non-existent. Since we don't have receipts without 1576 // reexecution, so nothing to commit. But if the sidechain will be adpoted 1577 // as the canonical chain eventually, it needs to be reexecuted for missing 1578 // state, but if it's this special case here(skip reexecution) we will lose 1579 // the empty receipt entry. 1580 if len(block.Transactions()) == 0 { 1581 rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) 1582 } else { 1583 log.Error("Please file an issue, skip known block execution without receipt", 1584 "hash", block.Hash(), "number", block.NumberU64()) 1585 } 1586 if err := bc.writeKnownBlock(block); err != nil { 1587 return it.index, err 1588 } 1589 stats.processed++ 1590 1591 // We can assume that logs are empty here, since the only way for consecutive 1592 // Clique blocks to have the same state is if there are no transactions. 1593 lastCanon = block 1594 continue 1595 } 1596 1597 // Retrieve the parent block and it's state to execute on top 1598 start := time.Now() 1599 parent := it.previous() 1600 if parent == nil { 1601 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 1602 } 1603 statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) 1604 if err != nil { 1605 return it.index, err 1606 } 1607 1608 // Enable prefetching to pull in trie node paths while processing transactions 1609 statedb.StartPrefetcher("chain") 1610 activeState = statedb 1611 1612 // If we have a followup block, run that against the current state to pre-cache 1613 // transactions and probabilistically some of the account/storage trie nodes. 1614 var followupInterrupt uint32 1615 if !bc.cacheConfig.TrieCleanNoPrefetch { 1616 if followup, err := it.peek(); followup != nil && err == nil { 1617 throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) 1618 1619 go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { 1620 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 1621 1622 blockPrefetchExecuteTimer.Update(time.Since(start)) 1623 if atomic.LoadUint32(interrupt) == 1 { 1624 blockPrefetchInterruptMeter.Mark(1) 1625 } 1626 }(time.Now(), followup, throwaway, &followupInterrupt) 1627 } 1628 } 1629 1630 // Process block using the parent state as reference point 1631 substart := time.Now() 1632 receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 1633 if err != nil { 1634 bc.reportBlock(block, receipts, err) 1635 atomic.StoreUint32(&followupInterrupt, 1) 1636 return it.index, err 1637 } 1638 1639 // Update the metrics touched during block processing 1640 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 1641 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 1642 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 1643 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 1644 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them 1645 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them 1646 triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation 1647 trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates 1648 trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates 1649 1650 blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) 1651 1652 // Validate the state using the default validator 1653 substart = time.Now() 1654 if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { 1655 bc.reportBlock(block, receipts, err) 1656 atomic.StoreUint32(&followupInterrupt, 1) 1657 return it.index, err 1658 } 1659 proctime := time.Since(start) 1660 1661 // Update the metrics touched during block validation 1662 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 1663 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 1664 blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) 1665 1666 // Write the block to the chain and get the status. 1667 substart = time.Now() 1668 var status WriteStatus 1669 if !setHead { 1670 // Don't set the head, only insert the block 1671 err = bc.writeBlockWithState(block, receipts, logs, statedb) 1672 } else { 1673 status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) 1674 } 1675 atomic.StoreUint32(&followupInterrupt, 1) 1676 if err != nil { 1677 return it.index, err 1678 } 1679 // Update the metrics touched during block commit 1680 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 1681 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 1682 snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them 1683 1684 blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) 1685 blockInsertTimer.UpdateSince(start) 1686 1687 // Report the import stats before returning the various results 1688 stats.processed++ 1689 stats.usedGas += usedGas 1690 1691 dirty, _ := bc.stateCache.TrieDB().Size() 1692 stats.report(chain, it.index, dirty, setHead) 1693 1694 if !setHead { 1695 return it.index, nil // Direct block insertion of a single block 1696 } 1697 switch status { 1698 case CanonStatTy: 1699 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1700 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1701 "elapsed", common.PrettyDuration(time.Since(start)), 1702 "root", block.Root()) 1703 1704 lastCanon = block 1705 1706 // Only count canonical blocks for GC processing time 1707 bc.gcproc += proctime 1708 1709 case SideStatTy: 1710 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1711 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1712 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1713 "root", block.Root()) 1714 1715 default: 1716 // This in theory is impossible, but lets be nice to our future selves and leave 1717 // a log, instead of trying to track down blocks imports that don't emit logs. 1718 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 1719 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1720 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1721 "root", block.Root()) 1722 } 1723 } 1724 1725 // Any blocks remaining here? The only ones we care about are the future ones 1726 if block != nil && errors.Is(err, consensus.ErrFutureBlock) { 1727 if err := bc.addFutureBlock(block); err != nil { 1728 return it.index, err 1729 } 1730 block, err = it.next() 1731 1732 for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { 1733 if err := bc.addFutureBlock(block); err != nil { 1734 return it.index, err 1735 } 1736 stats.queued++ 1737 } 1738 } 1739 stats.ignored += it.remaining() 1740 1741 return it.index, err 1742 } 1743 1744 // insertSideChain is called when an import batch hits upon a pruned ancestor 1745 // error, which happens when a sidechain with a sufficiently old fork-block is 1746 // found. 1747 // 1748 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1749 // switch over to the new chain if the TD exceeded the current chain. 1750 // insertSideChain is only used pre-merge. 1751 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { 1752 var ( 1753 externTd *big.Int 1754 lastBlock = block 1755 current = bc.CurrentBlock() 1756 ) 1757 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1758 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1759 // ones. Any other errors means that the block is invalid, and should not be written 1760 // to disk. 1761 err := consensus.ErrPrunedAncestor 1762 for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { 1763 // Check the canonical state root for that number 1764 if number := block.NumberU64(); current.NumberU64() >= number { 1765 canonical := bc.GetBlockByNumber(number) 1766 if canonical != nil && canonical.Hash() == block.Hash() { 1767 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 1768 1769 // Collect the TD of the block. Since we know it's a canon one, 1770 // we can get it directly, and not (like further below) use 1771 // the parent and then add the block on top 1772 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 1773 continue 1774 } 1775 if canonical != nil && canonical.Root() == block.Root() { 1776 // This is most likely a shadow-state attack. When a fork is imported into the 1777 // database, and it eventually reaches a block height which is not pruned, we 1778 // just found that the state already exist! This means that the sidechain block 1779 // refers to a state which already exists in our canon chain. 1780 // 1781 // If left unchecked, we would now proceed importing the blocks, without actually 1782 // having verified the state of the previous blocks. 1783 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1784 1785 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1786 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1787 // mechanism. 1788 return it.index, errors.New("sidechain ghost-state attack") 1789 } 1790 } 1791 if externTd == nil { 1792 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1793 } 1794 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1795 1796 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1797 start := time.Now() 1798 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 1799 return it.index, err 1800 } 1801 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 1802 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1803 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1804 "root", block.Root()) 1805 } 1806 lastBlock = block 1807 } 1808 // At this point, we've written all sidechain blocks to database. Loop ended 1809 // either on some other error or all were processed. If there was some other 1810 // error, we can ignore the rest of those blocks. 1811 // 1812 // If the externTd was larger than our local TD, we now need to reimport the previous 1813 // blocks to regenerate the required state 1814 reorg, err := bc.forker.ReorgNeeded(current.Header(), lastBlock.Header()) 1815 if err != nil { 1816 return it.index, err 1817 } 1818 if !reorg { 1819 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 1820 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 1821 return it.index, err 1822 } 1823 // Gather all the sidechain hashes (full blocks may be memory heavy) 1824 var ( 1825 hashes []common.Hash 1826 numbers []uint64 1827 ) 1828 parent := it.previous() 1829 for parent != nil && !bc.HasState(parent.Root) { 1830 hashes = append(hashes, parent.Hash()) 1831 numbers = append(numbers, parent.Number.Uint64()) 1832 1833 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 1834 } 1835 if parent == nil { 1836 return it.index, errors.New("missing parent") 1837 } 1838 // Import all the pruned blocks to make the state available 1839 var ( 1840 blocks []*types.Block 1841 memory common.StorageSize 1842 ) 1843 for i := len(hashes) - 1; i >= 0; i-- { 1844 // Append the next block to our batch 1845 block := bc.GetBlock(hashes[i], numbers[i]) 1846 1847 blocks = append(blocks, block) 1848 memory += block.Size() 1849 1850 // If memory use grew too large, import and continue. Sadly we need to discard 1851 // all raised events and logs from notifications since we're too heavy on the 1852 // memory here. 1853 if len(blocks) >= 2048 || memory > 64*1024*1024 { 1854 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 1855 if _, err := bc.insertChain(blocks, false, true); err != nil { 1856 return 0, err 1857 } 1858 blocks, memory = blocks[:0], 0 1859 1860 // If the chain is terminating, stop processing blocks 1861 if bc.insertStopped() { 1862 log.Debug("Abort during blocks processing") 1863 return 0, nil 1864 } 1865 } 1866 } 1867 if len(blocks) > 0 { 1868 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 1869 return bc.insertChain(blocks, false, true) 1870 } 1871 return 0, nil 1872 } 1873 1874 // recoverAncestors finds the closest ancestor with available state and re-execute 1875 // all the ancestor blocks since that. 1876 // recoverAncestors is only used post-merge. 1877 // We return the hash of the latest block that we could correctly validate. 1878 func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) { 1879 // Gather all the sidechain hashes (full blocks may be memory heavy) 1880 var ( 1881 hashes []common.Hash 1882 numbers []uint64 1883 parent = block 1884 ) 1885 for parent != nil && !bc.HasState(parent.Root()) { 1886 hashes = append(hashes, parent.Hash()) 1887 numbers = append(numbers, parent.NumberU64()) 1888 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1889 1890 // If the chain is terminating, stop iteration 1891 if bc.insertStopped() { 1892 log.Debug("Abort during blocks iteration") 1893 return common.Hash{}, errInsertionInterrupted 1894 } 1895 } 1896 if parent == nil { 1897 return common.Hash{}, errors.New("missing parent") 1898 } 1899 // Import all the pruned blocks to make the state available 1900 for i := len(hashes) - 1; i >= 0; i-- { 1901 // If the chain is terminating, stop processing blocks 1902 if bc.insertStopped() { 1903 log.Debug("Abort during blocks processing") 1904 return common.Hash{}, errInsertionInterrupted 1905 } 1906 var b *types.Block 1907 if i == 0 { 1908 b = block 1909 } else { 1910 b = bc.GetBlock(hashes[i], numbers[i]) 1911 } 1912 if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil { 1913 return b.ParentHash(), err 1914 } 1915 } 1916 return block.Hash(), nil 1917 } 1918 1919 // collectLogs collects the logs that were generated or removed during 1920 // the processing of the block that corresponds with the given hash. 1921 // These logs are later announced as deleted or reborn. 1922 func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { 1923 number := bc.hc.GetBlockNumber(hash) 1924 if number == nil { 1925 return nil 1926 } 1927 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 1928 1929 var logs []*types.Log 1930 for _, receipt := range receipts { 1931 for _, log := range receipt.Logs { 1932 l := *log 1933 if removed { 1934 l.Removed = true 1935 } 1936 logs = append(logs, &l) 1937 } 1938 } 1939 return logs 1940 } 1941 1942 // mergeLogs returns a merged log slice with specified sort order. 1943 func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log { 1944 var ret []*types.Log 1945 if reverse { 1946 for i := len(logs) - 1; i >= 0; i-- { 1947 ret = append(ret, logs[i]...) 1948 } 1949 } else { 1950 for i := 0; i < len(logs); i++ { 1951 ret = append(ret, logs[i]...) 1952 } 1953 } 1954 return ret 1955 } 1956 1957 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 1958 // blocks and inserts them to be part of the new canonical chain and accumulates 1959 // potential missing transactions and post an event about them. 1960 // Note the new head block won't be processed here, callers need to handle it 1961 // externally. 1962 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1963 var ( 1964 newChain types.Blocks 1965 oldChain types.Blocks 1966 commonBlock *types.Block 1967 1968 deletedTxs types.Transactions 1969 addedTxs types.Transactions 1970 1971 deletedLogs [][]*types.Log 1972 rebirthLogs [][]*types.Log 1973 ) 1974 // Reduce the longer chain to the same number as the shorter one 1975 if oldBlock.NumberU64() > newBlock.NumberU64() { 1976 // Old chain is longer, gather all transactions and logs as deleted ones 1977 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1978 oldChain = append(oldChain, oldBlock) 1979 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1980 1981 // Collect deleted logs for notification 1982 logs := bc.collectLogs(oldBlock.Hash(), true) 1983 if len(logs) > 0 { 1984 deletedLogs = append(deletedLogs, logs) 1985 } 1986 } 1987 } else { 1988 // New chain is longer, stash all blocks away for subsequent insertion 1989 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1990 newChain = append(newChain, newBlock) 1991 } 1992 } 1993 if oldBlock == nil { 1994 return fmt.Errorf("invalid old chain") 1995 } 1996 if newBlock == nil { 1997 return fmt.Errorf("invalid new chain") 1998 } 1999 // Both sides of the reorg are at the same number, reduce both until the common 2000 // ancestor is found 2001 for { 2002 // If the common ancestor was found, bail out 2003 if oldBlock.Hash() == newBlock.Hash() { 2004 commonBlock = oldBlock 2005 break 2006 } 2007 // Remove an old block as well as stash away a new block 2008 oldChain = append(oldChain, oldBlock) 2009 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2010 2011 // Collect deleted logs for notification 2012 logs := bc.collectLogs(oldBlock.Hash(), true) 2013 if len(logs) > 0 { 2014 deletedLogs = append(deletedLogs, logs) 2015 } 2016 newChain = append(newChain, newBlock) 2017 2018 // Step back with both chains 2019 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 2020 if oldBlock == nil { 2021 return fmt.Errorf("invalid old chain") 2022 } 2023 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 2024 if newBlock == nil { 2025 return fmt.Errorf("invalid new chain") 2026 } 2027 } 2028 // Ensure the user sees large reorgs 2029 if len(oldChain) > 0 && len(newChain) > 0 { 2030 logFn := log.Info 2031 msg := "Chain reorg detected" 2032 if len(oldChain) > 63 { 2033 msg = "Large chain reorg detected" 2034 logFn = log.Warn 2035 } 2036 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 2037 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 2038 blockReorgAddMeter.Mark(int64(len(newChain))) 2039 blockReorgDropMeter.Mark(int64(len(oldChain))) 2040 blockReorgMeter.Mark(1) 2041 } else if len(newChain) > 0 { 2042 // Special case happens in the post merge stage that current head is 2043 // the ancestor of new head while these two blocks are not consecutive 2044 log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash()) 2045 blockReorgAddMeter.Mark(int64(len(newChain))) 2046 } else { 2047 // len(newChain) == 0 && len(oldChain) > 0 2048 // rewind the canonical chain to a lower point. 2049 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain)) 2050 } 2051 // Insert the new chain(except the head block(reverse order)), 2052 // taking care of the proper incremental order. 2053 for i := len(newChain) - 1; i >= 1; i-- { 2054 // Insert the block in the canonical way, re-writing history 2055 bc.writeHeadBlock(newChain[i]) 2056 2057 // Collect reborn logs due to chain reorg 2058 logs := bc.collectLogs(newChain[i].Hash(), false) 2059 if len(logs) > 0 { 2060 rebirthLogs = append(rebirthLogs, logs) 2061 } 2062 // Collect the new added transactions. 2063 addedTxs = append(addedTxs, newChain[i].Transactions()...) 2064 } 2065 // Delete useless indexes right now which includes the non-canonical 2066 // transaction indexes, canonical chain indexes which above the head. 2067 indexesBatch := bc.db.NewBatch() 2068 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 2069 rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) 2070 } 2071 // Delete any canonical number assignments above the new head 2072 number := bc.CurrentBlock().NumberU64() 2073 for i := number + 1; ; i++ { 2074 hash := rawdb.ReadCanonicalHash(bc.db, i) 2075 if hash == (common.Hash{}) { 2076 break 2077 } 2078 rawdb.DeleteCanonicalHash(indexesBatch, i) 2079 } 2080 if err := indexesBatch.Write(); err != nil { 2081 log.Crit("Failed to delete useless indexes", "err", err) 2082 } 2083 // If any logs need to be fired, do it now. In theory we could avoid creating 2084 // this goroutine if there are no events to fire, but realistcally that only 2085 // ever happens if we're reorging empty blocks, which will only happen on idle 2086 // networks where performance is not an issue either way. 2087 if len(deletedLogs) > 0 { 2088 bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) 2089 } 2090 if len(rebirthLogs) > 0 { 2091 bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) 2092 } 2093 if len(oldChain) > 0 { 2094 for i := len(oldChain) - 1; i >= 0; i-- { 2095 bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) 2096 } 2097 } 2098 return nil 2099 } 2100 2101 // InsertBlockWithoutSetHead executes the block, runs the necessary verification 2102 // upon it and then persist the block and the associate state into the database. 2103 // The key difference between the InsertChain is it won't do the canonical chain 2104 // updating. It relies on the additional SetCanonical call to finalize the entire 2105 // procedure. 2106 func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error { 2107 if !bc.chainmu.TryLock() { 2108 return errChainStopped 2109 } 2110 defer bc.chainmu.Unlock() 2111 2112 _, err := bc.insertChain(types.Blocks{block}, true, false) 2113 return err 2114 } 2115 2116 // SetCanonical rewinds the chain to set the new head block as the specified 2117 // block. It's possible that the state of the new head is missing, and it will 2118 // be recovered in this function as well. 2119 func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) { 2120 if !bc.chainmu.TryLock() { 2121 return common.Hash{}, errChainStopped 2122 } 2123 defer bc.chainmu.Unlock() 2124 2125 // Re-execute the reorged chain in case the head state is missing. 2126 if !bc.HasState(head.Root()) { 2127 if latestValidHash, err := bc.recoverAncestors(head); err != nil { 2128 return latestValidHash, err 2129 } 2130 log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash()) 2131 } 2132 // Run the reorg if necessary and set the given block as new head. 2133 start := time.Now() 2134 if head.ParentHash() != bc.CurrentBlock().Hash() { 2135 if err := bc.reorg(bc.CurrentBlock(), head); err != nil { 2136 return common.Hash{}, err 2137 } 2138 } 2139 bc.writeHeadBlock(head) 2140 2141 // Emit events 2142 logs := bc.collectLogs(head.Hash(), false) 2143 bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs}) 2144 if len(logs) > 0 { 2145 bc.logsFeed.Send(logs) 2146 } 2147 bc.chainHeadFeed.Send(ChainHeadEvent{Block: head}) 2148 2149 context := []interface{}{ 2150 "number", head.Number(), 2151 "hash", head.Hash(), 2152 "root", head.Root(), 2153 "elapsed", time.Since(start), 2154 } 2155 if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute { 2156 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 2157 } 2158 log.Info("Chain head was updated", context...) 2159 return head.Hash(), nil 2160 } 2161 2162 func (bc *BlockChain) updateFutureBlocks() { 2163 futureTimer := time.NewTicker(5 * time.Second) 2164 defer futureTimer.Stop() 2165 defer bc.wg.Done() 2166 for { 2167 select { 2168 case <-futureTimer.C: 2169 bc.procFutureBlocks() 2170 case <-bc.quit: 2171 return 2172 } 2173 } 2174 } 2175 2176 // skipBlock returns 'true', if the block being imported can be skipped over, meaning 2177 // that the block does not need to be processed but can be considered already fully 'done'. 2178 func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { 2179 // We can only ever bypass processing if the only error returned by the validator 2180 // is ErrKnownBlock, which means all checks passed, but we already have the block 2181 // and state. 2182 if !errors.Is(err, ErrKnownBlock) { 2183 return false 2184 } 2185 // If we're not using snapshots, we can skip this, since we have both block 2186 // and (trie-) state 2187 if bc.snaps == nil { 2188 return true 2189 } 2190 var ( 2191 header = it.current() // header can't be nil 2192 parentRoot common.Hash 2193 ) 2194 // If we also have the snapshot-state, we can skip the processing. 2195 if bc.snaps.Snapshot(header.Root) != nil { 2196 return true 2197 } 2198 // In this case, we have the trie-state but not snapshot-state. If the parent 2199 // snapshot-state exists, we need to process this in order to not get a gap 2200 // in the snapshot layers. 2201 // Resolve parent block 2202 if parent := it.previous(); parent != nil { 2203 parentRoot = parent.Root 2204 } else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil { 2205 parentRoot = parent.Root 2206 } 2207 if parentRoot == (common.Hash{}) { 2208 return false // Theoretically impossible case 2209 } 2210 // Parent is also missing snapshot: we can skip this. Otherwise process. 2211 if bc.snaps.Snapshot(parentRoot) == nil { 2212 return true 2213 } 2214 return false 2215 } 2216 2217 // maintainTxIndex is responsible for the construction and deletion of the 2218 // transaction index. 2219 // 2220 // User can use flag `txlookuplimit` to specify a "recentness" block, below 2221 // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means 2222 // all tx indices will be reserved. 2223 // 2224 // The user can adjust the txlookuplimit value for each launch after fast 2225 // sync, Geth will automatically construct the missing indices and delete 2226 // the extra indices. 2227 func (bc *BlockChain) maintainTxIndex(ancients uint64) { 2228 defer bc.wg.Done() 2229 2230 // Before starting the actual maintenance, we need to handle a special case, 2231 // where user might init Geth with an external ancient database. If so, we 2232 // need to reindex all necessary transactions before starting to process any 2233 // pruning requests. 2234 if ancients > 0 { 2235 var from = uint64(0) 2236 if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit { 2237 from = ancients - bc.txLookupLimit 2238 } 2239 rawdb.IndexTransactions(bc.db, from, ancients, bc.quit) 2240 } 2241 2242 // indexBlocks reindexes or unindexes transactions depending on user configuration 2243 indexBlocks := func(tail *uint64, head uint64, done chan struct{}) { 2244 defer func() { done <- struct{}{} }() 2245 2246 // If the user just upgraded Geth to a new version which supports transaction 2247 // index pruning, write the new tail and remove anything older. 2248 if tail == nil { 2249 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2250 // Nothing to delete, write the tail and return 2251 rawdb.WriteTxIndexTail(bc.db, 0) 2252 } else { 2253 // Prune all stale tx indices and record the tx index tail 2254 rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit) 2255 } 2256 return 2257 } 2258 // If a previous indexing existed, make sure that we fill in any missing entries 2259 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2260 if *tail > 0 { 2261 // It can happen when chain is rewound to a historical point which 2262 // is even lower than the indexes tail, recap the indexing target 2263 // to new head to avoid reading non-existent block bodies. 2264 end := *tail 2265 if end > head+1 { 2266 end = head + 1 2267 } 2268 rawdb.IndexTransactions(bc.db, 0, end, bc.quit) 2269 } 2270 return 2271 } 2272 // Update the transaction index to the new chain state 2273 if head-bc.txLookupLimit+1 < *tail { 2274 // Reindex a part of missing indices and rewind index tail to HEAD-limit 2275 rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit) 2276 } else { 2277 // Unindex a part of stale indices and forward index tail to HEAD-limit 2278 rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit) 2279 } 2280 } 2281 2282 // Any reindexing done, start listening to chain events and moving the index window 2283 var ( 2284 done chan struct{} // Non-nil if background unindexing or reindexing routine is active. 2285 headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed 2286 ) 2287 sub := bc.SubscribeChainHeadEvent(headCh) 2288 if sub == nil { 2289 return 2290 } 2291 defer sub.Unsubscribe() 2292 2293 for { 2294 select { 2295 case head := <-headCh: 2296 if done == nil { 2297 done = make(chan struct{}) 2298 go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done) 2299 } 2300 case <-done: 2301 done = nil 2302 case <-bc.quit: 2303 if done != nil { 2304 log.Info("Waiting background transaction indexer to exit") 2305 <-done 2306 } 2307 return 2308 } 2309 } 2310 } 2311 2312 // reportBlock logs a bad block error. 2313 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2314 rawdb.WriteBadBlock(bc.db, block) 2315 2316 var receiptString string 2317 for i, receipt := range receipts { 2318 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2319 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 2320 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2321 } 2322 log.Error(fmt.Sprintf(` 2323 ########## BAD BLOCK ######### 2324 Chain config: %v 2325 2326 Number: %v 2327 Hash: 0x%x 2328 %v 2329 2330 Error: %v 2331 ############################## 2332 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2333 } 2334 2335 // InsertHeaderChain attempts to insert the given header chain in to the local 2336 // chain, possibly creating a reorg. If an error is returned, it will return the 2337 // index number of the failing header as well an error describing what went wrong. 2338 // 2339 // The verify parameter can be used to fine tune whether nonce verification 2340 // should be done or not. The reason behind the optional check is because some 2341 // of the header retrieval mechanisms already need to verify nonces, as well as 2342 // because nonces can be verified sparsely, not needing to check each. 2343 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2344 if len(chain) == 0 { 2345 return 0, nil 2346 } 2347 start := time.Now() 2348 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2349 return i, err 2350 } 2351 2352 if !bc.chainmu.TryLock() { 2353 return 0, errChainStopped 2354 } 2355 defer bc.chainmu.Unlock() 2356 _, err := bc.hc.InsertHeaderChain(chain, start, bc.forker) 2357 return 0, err 2358 }