github.com/ccm-chain/ccmchain@v1.0.0/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sort" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "github.com/ccm-chain/ccmchain/common" 32 "github.com/ccm-chain/ccmchain/common/mclock" 33 "github.com/ccm-chain/ccmchain/common/prque" 34 "github.com/ccm-chain/ccmchain/consensus" 35 "github.com/ccm-chain/ccmchain/core/rawdb" 36 "github.com/ccm-chain/ccmchain/core/state" 37 "github.com/ccm-chain/ccmchain/core/state/snapshot" 38 "github.com/ccm-chain/ccmchain/core/types" 39 "github.com/ccm-chain/ccmchain/core/vm" 40 "github.com/ccm-chain/ccmchain/database" 41 "github.com/ccm-chain/ccmchain/event" 42 "github.com/ccm-chain/ccmchain/log" 43 "github.com/ccm-chain/ccmchain/metrics" 44 "github.com/ccm-chain/ccmchain/params" 45 "github.com/ccm-chain/ccmchain/rlp" 46 "github.com/ccm-chain/ccmchain/trie" 47 lru "github.com/hashicorp/golang-lru" 48 ) 49 50 var ( 51 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 52 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 53 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 54 55 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 56 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 57 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 58 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 59 60 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 61 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 62 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 63 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 64 65 snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) 66 snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) 67 snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) 68 69 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 70 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 71 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 72 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 73 74 blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) 75 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 76 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 77 blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) 78 79 blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) 80 blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) 81 82 errInsertionInterrupted = errors.New("insertion is interrupted") 83 ) 84 85 const ( 86 bodyCacheLimit = 256 87 blockCacheLimit = 256 88 receiptsCacheLimit = 32 89 txLookupCacheLimit = 1024 90 maxFutureBlocks = 256 91 maxTimeFutureBlocks = 30 92 badBlockLimit = 10 93 TriesInMemory = 128 94 95 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 96 // 97 // Changelog: 98 // 99 // - Version 4 100 // The following incompatible database changes were added: 101 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 102 // * the `Bloom` field of receipt is deleted 103 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 104 // - Version 5 105 // The following incompatible database changes were added: 106 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt 107 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the 108 // receipts' corresponding block 109 // - Version 6 110 // The following incompatible database changes were added: 111 // * Transaction lookup information stores the corresponding block number instead of block hash 112 // - Version 7 113 // The following incompatible database changes were added: 114 // * Use freezer as the ancient database to maintain all ancient data 115 // - Version 8 116 // The following incompatible database changes were added: 117 // * New scheme for contract code in order to separate the codes and trie nodes 118 BlockChainVersion uint64 = 8 119 ) 120 121 // CacheConfig contains the configuration values for the trie caching/pruning 122 // that's resident in a blockchain. 123 type CacheConfig struct { 124 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 125 TrieCleanJournal string // Disk journal for saving clean cache entries. 126 TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically 127 TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks 128 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 129 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 130 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 131 SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory 132 133 SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it 134 } 135 136 // defaultCacheConfig are the default caching values if none are specified by the 137 // user (also used during testing). 138 var defaultCacheConfig = &CacheConfig{ 139 TrieCleanLimit: 256, 140 TrieDirtyLimit: 256, 141 TrieTimeLimit: 5 * time.Minute, 142 SnapshotLimit: 256, 143 SnapshotWait: true, 144 } 145 146 // BlockChain represents the canonical chain given a database with a genesis 147 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 148 // 149 // Importing blocks in to the block chain happens according to the set of rules 150 // defined by the two stage Validator. Processing of blocks is done using the 151 // Processor which processes the included transaction. The validation of the state 152 // is done in the second part of the Validator. Failing results in aborting of 153 // the import. 154 // 155 // The BlockChain also helps in returning blocks from **any** chain included 156 // in the database as well as blocks that represents the canonical chain. It's 157 // important to note that GetBlock can return any block and does not need to be 158 // included in the canonical one where as GetBlockByNumber always represents the 159 // canonical chain. 160 type BlockChain struct { 161 chainConfig *params.ChainConfig // Chain & network configuration 162 cacheConfig *CacheConfig // Cache configuration for pruning 163 164 db database.Database // Low level persistent database to store final content in 165 snaps *snapshot.Tree // Snapshot tree for fast trie leaf access 166 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 167 gcproc time.Duration // Accumulates canonical block processing for trie dumping 168 169 // txLookupLimit is the maximum number of blocks from head whose tx indices 170 // are reserved: 171 // * 0: means no limit and regenerate any missing indexes 172 // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes 173 // * nil: disable tx reindexer/deleter, but still index new blocks 174 txLookupLimit uint64 175 176 hc *HeaderChain 177 rmLogsFeed event.Feed 178 chainFeed event.Feed 179 chainSideFeed event.Feed 180 chainHeadFeed event.Feed 181 logsFeed event.Feed 182 blockProcFeed event.Feed 183 scope event.SubscriptionScope 184 genesisBlock *types.Block 185 186 chainmu sync.RWMutex // blockchain insertion lock 187 188 currentBlock atomic.Value // Current head of the block chain 189 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 190 191 stateCache state.Database // State database to reuse between imports (contains state cache) 192 bodyCache *lru.Cache // Cache for the most recent block bodies 193 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 194 receiptsCache *lru.Cache // Cache for the most recent receipts per block 195 blockCache *lru.Cache // Cache for the most recent entire blocks 196 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 197 futureBlocks *lru.Cache // future blocks are blocks added for later processing 198 199 quit chan struct{} // blockchain quit channel 200 wg sync.WaitGroup // chain processing wait group for shutting down 201 running int32 // 0 if chain is running, 1 when stopped 202 procInterrupt int32 // interrupt signaler for block processing 203 204 engine consensus.Engine 205 validator Validator // Block and state validator interface 206 prefetcher Prefetcher // Block state prefetcher interface 207 processor Processor // Block transaction processor interface 208 vmConfig vm.Config 209 210 badBlocks *lru.Cache // Bad block cache 211 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 212 terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. 213 } 214 215 // NewBlockChain returns a fully initialised block chain using information 216 // available in the database. It initialises the default Ethereum Validator and 217 // Processor. 218 func NewBlockChain(db database.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { 219 if cacheConfig == nil { 220 cacheConfig = defaultCacheConfig 221 } 222 bodyCache, _ := lru.New(bodyCacheLimit) 223 bodyRLPCache, _ := lru.New(bodyCacheLimit) 224 receiptsCache, _ := lru.New(receiptsCacheLimit) 225 blockCache, _ := lru.New(blockCacheLimit) 226 txLookupCache, _ := lru.New(txLookupCacheLimit) 227 futureBlocks, _ := lru.New(maxFutureBlocks) 228 badBlocks, _ := lru.New(badBlockLimit) 229 230 bc := &BlockChain{ 231 chainConfig: chainConfig, 232 cacheConfig: cacheConfig, 233 db: db, 234 triegc: prque.New(nil), 235 stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit, cacheConfig.TrieCleanJournal), 236 quit: make(chan struct{}), 237 shouldPreserve: shouldPreserve, 238 bodyCache: bodyCache, 239 bodyRLPCache: bodyRLPCache, 240 receiptsCache: receiptsCache, 241 blockCache: blockCache, 242 txLookupCache: txLookupCache, 243 futureBlocks: futureBlocks, 244 engine: engine, 245 vmConfig: vmConfig, 246 badBlocks: badBlocks, 247 } 248 bc.validator = NewBlockValidator(chainConfig, bc, engine) 249 bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) 250 bc.processor = NewStateProcessor(chainConfig, bc, engine) 251 252 var err error 253 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) 254 if err != nil { 255 return nil, err 256 } 257 bc.genesisBlock = bc.GetBlockByNumber(0) 258 if bc.genesisBlock == nil { 259 return nil, ErrNoGenesis 260 } 261 262 var nilBlock *types.Block 263 bc.currentBlock.Store(nilBlock) 264 bc.currentFastBlock.Store(nilBlock) 265 266 // Initialize the chain with ancient data if it isn't empty. 267 var txIndexBlock uint64 268 269 if bc.empty() { 270 rawdb.InitDatabaseFromFreezer(bc.db) 271 // If ancient database is not empty, reconstruct all missing 272 // indices in the background. 273 frozen, _ := bc.db.Ancients() 274 if frozen > 0 { 275 txIndexBlock = frozen 276 } 277 } 278 279 if err := bc.loadLastState(); err != nil { 280 return nil, err 281 } 282 // Make sure the state associated with the block is available 283 head := bc.CurrentBlock() 284 if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { 285 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) 286 if err := bc.SetHead(head.NumberU64()); err != nil { 287 return nil, err 288 } 289 } 290 // Ensure that a previous crash in SetHead doesn't leave extra ancients 291 if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { 292 var ( 293 needRewind bool 294 low uint64 295 ) 296 // The head full block may be rolled back to a very low height due to 297 // blockchain repair. If the head full block is even lower than the ancient 298 // chain, truncate the ancient store. 299 fullBlock := bc.CurrentBlock() 300 if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { 301 needRewind = true 302 low = fullBlock.NumberU64() 303 } 304 // In fast sync, it may happen that ancient data has been written to the 305 // ancient store, but the LastFastBlock has not been updated, truncate the 306 // extra data here. 307 fastBlock := bc.CurrentFastBlock() 308 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 309 needRewind = true 310 if fastBlock.NumberU64() < low || low == 0 { 311 low = fastBlock.NumberU64() 312 } 313 } 314 if needRewind { 315 log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) 316 if err := bc.SetHead(low); err != nil { 317 return nil, err 318 } 319 } 320 } 321 // The first thing the node will do is reconstruct the verification data for 322 // the head block (ethash cache or clique voting snapshot). Might as well do 323 // it in advance. 324 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 325 326 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 327 for hash := range BadHashes { 328 if header := bc.GetHeaderByHash(hash); header != nil { 329 // get the canonical block corresponding to the offending header's number 330 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 331 // make sure the headerByNumber (if present) is in our current canonical chain 332 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 333 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 334 if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { 335 return nil, err 336 } 337 log.Error("Chain rewind was successful, resuming normal operation") 338 } 339 } 340 } 341 // Load any existing snapshot, regenerating it if loading failed 342 if bc.cacheConfig.SnapshotLimit > 0 { 343 bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait) 344 } 345 // Take ownership of this particular state 346 go bc.update() 347 if txLookupLimit != nil { 348 bc.txLookupLimit = *txLookupLimit 349 go bc.maintainTxIndex(txIndexBlock) 350 } 351 // If periodic cache journal is required, spin it up. 352 if bc.cacheConfig.TrieCleanRejournal > 0 { 353 if bc.cacheConfig.TrieCleanRejournal < time.Minute { 354 log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) 355 bc.cacheConfig.TrieCleanRejournal = time.Minute 356 } 357 triedb := bc.stateCache.TrieDB() 358 bc.wg.Add(1) 359 go func() { 360 defer bc.wg.Done() 361 triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) 362 }() 363 } 364 return bc, nil 365 } 366 367 // GetVMConfig returns the block chain VM config. 368 func (bc *BlockChain) GetVMConfig() *vm.Config { 369 return &bc.vmConfig 370 } 371 372 // empty returns an indicator whether the blockchain is empty. 373 // Note, it's a special case that we connect a non-empty ancient 374 // database with an empty node, so that we can plugin the ancient 375 // into node seamlessly. 376 func (bc *BlockChain) empty() bool { 377 genesis := bc.genesisBlock.Hash() 378 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 379 if hash != genesis { 380 return false 381 } 382 } 383 return true 384 } 385 386 // loadLastState loads the last known chain state from the database. This method 387 // assumes that the chain manager mutex is held. 388 func (bc *BlockChain) loadLastState() error { 389 // Restore the last known head block 390 head := rawdb.ReadHeadBlockHash(bc.db) 391 if head == (common.Hash{}) { 392 // Corrupt or empty database, init from scratch 393 log.Warn("Empty database, resetting chain") 394 return bc.Reset() 395 } 396 // Make sure the entire head block is available 397 currentBlock := bc.GetBlockByHash(head) 398 if currentBlock == nil { 399 // Corrupt or empty database, init from scratch 400 log.Warn("Head block missing, resetting chain", "hash", head) 401 return bc.Reset() 402 } 403 // Everything seems to be fine, set as the head block 404 bc.currentBlock.Store(currentBlock) 405 headBlockGauge.Update(int64(currentBlock.NumberU64())) 406 407 // Restore the last known head header 408 currentHeader := currentBlock.Header() 409 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 410 if header := bc.GetHeaderByHash(head); header != nil { 411 currentHeader = header 412 } 413 } 414 bc.hc.SetCurrentHeader(currentHeader) 415 416 // Restore the last known head fast block 417 bc.currentFastBlock.Store(currentBlock) 418 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 419 420 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 421 if block := bc.GetBlockByHash(head); block != nil { 422 bc.currentFastBlock.Store(block) 423 headFastBlockGauge.Update(int64(block.NumberU64())) 424 } 425 } 426 // Issue a status log for the user 427 currentFastBlock := bc.CurrentFastBlock() 428 429 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 430 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 431 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 432 433 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 434 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 435 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 436 if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { 437 log.Info("Loaded last fast-sync pivot marker", "number", *pivot) 438 } 439 return nil 440 } 441 442 // SetHead rewinds the local chain to a new head. Depending on whether the node 443 // was fast synced or full synced and in which state, the method will try to 444 // delete minimal data from disk whilst retaining chain consistency. 445 func (bc *BlockChain) SetHead(head uint64) error { 446 bc.chainmu.Lock() 447 defer bc.chainmu.Unlock() 448 449 // Retrieve the last pivot block to short circuit rollbacks beyond it and the 450 // current freezer limit to start nuking id underflown 451 pivot := rawdb.ReadLastPivotNumber(bc.db) 452 frozen, _ := bc.db.Ancients() 453 454 updateFn := func(db database.KeyValueWriter, header *types.Header) (uint64, bool) { 455 // Rewind the block chain, ensuring we don't end up with a stateless head 456 // block. Note, depth equality is permitted to allow using SetHead as a 457 // chain reparation mechanism without deleting any data! 458 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { 459 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 460 if newHeadBlock == nil { 461 log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) 462 newHeadBlock = bc.genesisBlock 463 } else { 464 // Block exists, keep rewinding until we find one with state 465 for { 466 if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { 467 log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 468 if pivot == nil || newHeadBlock.NumberU64() > *pivot { 469 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) 470 continue 471 } else { 472 log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) 473 newHeadBlock = bc.genesisBlock 474 } 475 } 476 log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 477 break 478 } 479 } 480 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 481 482 // Degrade the chain markers if they are explicitly reverted. 483 // In theory we should update all in-memory markers in the 484 // last step, however the direction of SetHead is from high 485 // to low, so it's safe the update in-memory markers directly. 486 bc.currentBlock.Store(newHeadBlock) 487 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 488 } 489 // Rewind the fast block in a simpleton way to the target head 490 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 491 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 492 // If either blocks reached nil, reset to the genesis state 493 if newHeadFastBlock == nil { 494 newHeadFastBlock = bc.genesisBlock 495 } 496 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 497 498 // Degrade the chain markers if they are explicitly reverted. 499 // In theory we should update all in-memory markers in the 500 // last step, however the direction of SetHead is from high 501 // to low, so it's safe the update in-memory markers directly. 502 bc.currentFastBlock.Store(newHeadFastBlock) 503 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 504 } 505 head := bc.CurrentBlock().NumberU64() 506 507 // If setHead underflown the freezer threshold and the block processing 508 // intent afterwards is full block importing, delete the chain segment 509 // between the stateful-block and the sethead target. 510 var wipe bool 511 if head+1 < frozen { 512 wipe = pivot == nil || head >= *pivot 513 } 514 return head, wipe // Only force wipe if full synced 515 } 516 // Rewind the header chain, deleting all block bodies until then 517 delFn := func(db database.KeyValueWriter, hash common.Hash, num uint64) { 518 // Ignore the error here since light client won't hit this path 519 frozen, _ := bc.db.Ancients() 520 if num+1 <= frozen { 521 // Truncate all relative data(header, total difficulty, body, receipt 522 // and canonical hash) from ancient store. 523 if err := bc.db.TruncateAncients(num); err != nil { 524 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 525 } 526 // Remove the hash <-> number mapping from the active store. 527 rawdb.DeleteHeaderNumber(db, hash) 528 } else { 529 // Remove relative body and receipts from the active store. 530 // The header, total difficulty and canonical hash will be 531 // removed in the hc.SetHead function. 532 rawdb.DeleteBody(db, hash, num) 533 rawdb.DeleteReceipts(db, hash, num) 534 } 535 // Todo(rjl493456442) txlookup, bloombits, etc 536 } 537 // If SetHead was only called as a chain reparation method, try to skip 538 // touching the header chain altogether, unless the freezer is broken 539 if block := bc.CurrentBlock(); block.NumberU64() == head { 540 if target, force := updateFn(bc.db, block.Header()); force { 541 bc.hc.SetHead(target, updateFn, delFn) 542 } 543 } else { 544 // Rewind the chain to the requested head and keep going backwards until a 545 // block with a state is found or fast sync pivot is passed 546 log.Warn("Rewinding blockchain", "target", head) 547 bc.hc.SetHead(head, updateFn, delFn) 548 } 549 // Clear out any stale content from the caches 550 bc.bodyCache.Purge() 551 bc.bodyRLPCache.Purge() 552 bc.receiptsCache.Purge() 553 bc.blockCache.Purge() 554 bc.txLookupCache.Purge() 555 bc.futureBlocks.Purge() 556 557 return bc.loadLastState() 558 } 559 560 // FastSyncCommitHead sets the current head block to the one defined by the hash 561 // irrelevant what the chain contents were prior. 562 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 563 // Make sure that both the block as well at its state trie exists 564 block := bc.GetBlockByHash(hash) 565 if block == nil { 566 return fmt.Errorf("non existent block [%x…]", hash[:4]) 567 } 568 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 569 return err 570 } 571 // If all checks out, manually set the head block 572 bc.chainmu.Lock() 573 bc.currentBlock.Store(block) 574 headBlockGauge.Update(int64(block.NumberU64())) 575 bc.chainmu.Unlock() 576 577 // Destroy any existing state snapshot and regenerate it in the background 578 if bc.snaps != nil { 579 bc.snaps.Rebuild(block.Root()) 580 } 581 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 582 return nil 583 } 584 585 // GasLimit returns the gas limit of the current HEAD block. 586 func (bc *BlockChain) GasLimit() uint64 { 587 return bc.CurrentBlock().GasLimit() 588 } 589 590 // CurrentBlock retrieves the current head block of the canonical chain. The 591 // block is retrieved from the blockchain's internal cache. 592 func (bc *BlockChain) CurrentBlock() *types.Block { 593 return bc.currentBlock.Load().(*types.Block) 594 } 595 596 // Snapshot returns the blockchain snapshot tree. This method is mainly used for 597 // testing, to make it possible to verify the snapshot after execution. 598 // 599 // Warning: There are no guarantees about the safety of using the returned 'snap' if the 600 // blockchain is simultaneously importing blocks, so take care. 601 func (bc *BlockChain) Snapshot() *snapshot.Tree { 602 return bc.snaps 603 } 604 605 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 606 // chain. The block is retrieved from the blockchain's internal cache. 607 func (bc *BlockChain) CurrentFastBlock() *types.Block { 608 return bc.currentFastBlock.Load().(*types.Block) 609 } 610 611 // Validator returns the current validator. 612 func (bc *BlockChain) Validator() Validator { 613 return bc.validator 614 } 615 616 // Processor returns the current processor. 617 func (bc *BlockChain) Processor() Processor { 618 return bc.processor 619 } 620 621 // State returns a new mutable state based on the current HEAD block. 622 func (bc *BlockChain) State() (*state.StateDB, error) { 623 return bc.StateAt(bc.CurrentBlock().Root()) 624 } 625 626 // StateAt returns a new mutable state based on a particular point in time. 627 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 628 return state.New(root, bc.stateCache, bc.snaps) 629 } 630 631 // StateCache returns the caching database underpinning the blockchain instance. 632 func (bc *BlockChain) StateCache() state.Database { 633 return bc.stateCache 634 } 635 636 // Reset purges the entire blockchain, restoring it to its genesis state. 637 func (bc *BlockChain) Reset() error { 638 return bc.ResetWithGenesisBlock(bc.genesisBlock) 639 } 640 641 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 642 // specified genesis state. 643 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 644 // Dump the entire block chain and purge the caches 645 if err := bc.SetHead(0); err != nil { 646 return err 647 } 648 bc.chainmu.Lock() 649 defer bc.chainmu.Unlock() 650 651 // Prepare the genesis block and reinitialise the chain 652 batch := bc.db.NewBatch() 653 rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) 654 rawdb.WriteBlock(batch, genesis) 655 if err := batch.Write(); err != nil { 656 log.Crit("Failed to write genesis block", "err", err) 657 } 658 bc.writeHeadBlock(genesis) 659 660 // Last update all in-memory chain markers 661 bc.genesisBlock = genesis 662 bc.currentBlock.Store(bc.genesisBlock) 663 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 664 bc.hc.SetGenesis(bc.genesisBlock.Header()) 665 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 666 bc.currentFastBlock.Store(bc.genesisBlock) 667 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 668 return nil 669 } 670 671 // Export writes the active chain to the given writer. 672 func (bc *BlockChain) Export(w io.Writer) error { 673 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 674 } 675 676 // ExportN writes a subset of the active chain to the given writer. 677 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 678 bc.chainmu.RLock() 679 defer bc.chainmu.RUnlock() 680 681 if first > last { 682 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 683 } 684 log.Info("Exporting batch of blocks", "count", last-first+1) 685 686 start, reported := time.Now(), time.Now() 687 for nr := first; nr <= last; nr++ { 688 block := bc.GetBlockByNumber(nr) 689 if block == nil { 690 return fmt.Errorf("export failed on #%d: not found", nr) 691 } 692 if err := block.EncodeRLP(w); err != nil { 693 return err 694 } 695 if time.Since(reported) >= statsReportLimit { 696 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 697 reported = time.Now() 698 } 699 } 700 return nil 701 } 702 703 // writeHeadBlock injects a new head block into the current block chain. This method 704 // assumes that the block is indeed a true head. It will also reset the head 705 // header and the head fast sync block to this very same block if they are older 706 // or if they are on a different side chain. 707 // 708 // Note, this function assumes that the `mu` mutex is held! 709 func (bc *BlockChain) writeHeadBlock(block *types.Block) { 710 // If the block is on a side chain or an unknown one, force other heads onto it too 711 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 712 713 // Add the block to the canonical chain number scheme and mark as the head 714 batch := bc.db.NewBatch() 715 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 716 rawdb.WriteTxLookupEntries(batch, block) 717 rawdb.WriteHeadBlockHash(batch, block.Hash()) 718 719 // If the block is better than our head or is on a different chain, force update heads 720 if updateHeads { 721 rawdb.WriteHeadHeaderHash(batch, block.Hash()) 722 rawdb.WriteHeadFastBlockHash(batch, block.Hash()) 723 } 724 // Flush the whole batch into the disk, exit the node if failed 725 if err := batch.Write(); err != nil { 726 log.Crit("Failed to update chain indexes and markers", "err", err) 727 } 728 // Update all in-memory chain markers in the last step 729 if updateHeads { 730 bc.hc.SetCurrentHeader(block.Header()) 731 bc.currentFastBlock.Store(block) 732 headFastBlockGauge.Update(int64(block.NumberU64())) 733 } 734 bc.currentBlock.Store(block) 735 headBlockGauge.Update(int64(block.NumberU64())) 736 } 737 738 // Genesis retrieves the chain's genesis block. 739 func (bc *BlockChain) Genesis() *types.Block { 740 return bc.genesisBlock 741 } 742 743 // GetBody retrieves a block body (transactions and uncles) from the database by 744 // hash, caching it if found. 745 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 746 // Short circuit if the body's already in the cache, retrieve otherwise 747 if cached, ok := bc.bodyCache.Get(hash); ok { 748 body := cached.(*types.Body) 749 return body 750 } 751 number := bc.hc.GetBlockNumber(hash) 752 if number == nil { 753 return nil 754 } 755 body := rawdb.ReadBody(bc.db, hash, *number) 756 if body == nil { 757 return nil 758 } 759 // Cache the found body for next time and return 760 bc.bodyCache.Add(hash, body) 761 return body 762 } 763 764 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 765 // caching it if found. 766 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 767 // Short circuit if the body's already in the cache, retrieve otherwise 768 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 769 return cached.(rlp.RawValue) 770 } 771 number := bc.hc.GetBlockNumber(hash) 772 if number == nil { 773 return nil 774 } 775 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 776 if len(body) == 0 { 777 return nil 778 } 779 // Cache the found body for next time and return 780 bc.bodyRLPCache.Add(hash, body) 781 return body 782 } 783 784 // HasBlock checks if a block is fully present in the database or not. 785 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 786 if bc.blockCache.Contains(hash) { 787 return true 788 } 789 return rawdb.HasBody(bc.db, hash, number) 790 } 791 792 // HasFastBlock checks if a fast block is fully present in the database or not. 793 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 794 if !bc.HasBlock(hash, number) { 795 return false 796 } 797 if bc.receiptsCache.Contains(hash) { 798 return true 799 } 800 return rawdb.HasReceipts(bc.db, hash, number) 801 } 802 803 // HasState checks if state trie is fully present in the database or not. 804 func (bc *BlockChain) HasState(hash common.Hash) bool { 805 _, err := bc.stateCache.OpenTrie(hash) 806 return err == nil 807 } 808 809 // HasBlockAndState checks if a block and associated state trie is fully present 810 // in the database or not, caching it if present. 811 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 812 // Check first that the block itself is known 813 block := bc.GetBlock(hash, number) 814 if block == nil { 815 return false 816 } 817 return bc.HasState(block.Root()) 818 } 819 820 // GetBlock retrieves a block from the database by hash and number, 821 // caching it if found. 822 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 823 // Short circuit if the block's already in the cache, retrieve otherwise 824 if block, ok := bc.blockCache.Get(hash); ok { 825 return block.(*types.Block) 826 } 827 block := rawdb.ReadBlock(bc.db, hash, number) 828 if block == nil { 829 return nil 830 } 831 // Cache the found block for next time and return 832 bc.blockCache.Add(block.Hash(), block) 833 return block 834 } 835 836 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 837 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 838 number := bc.hc.GetBlockNumber(hash) 839 if number == nil { 840 return nil 841 } 842 return bc.GetBlock(hash, *number) 843 } 844 845 // GetBlockByNumber retrieves a block from the database by number, caching it 846 // (associated with its hash) if found. 847 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 848 hash := rawdb.ReadCanonicalHash(bc.db, number) 849 if hash == (common.Hash{}) { 850 return nil 851 } 852 return bc.GetBlock(hash, number) 853 } 854 855 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 856 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 857 if receipts, ok := bc.receiptsCache.Get(hash); ok { 858 return receipts.(types.Receipts) 859 } 860 number := rawdb.ReadHeaderNumber(bc.db, hash) 861 if number == nil { 862 return nil 863 } 864 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 865 if receipts == nil { 866 return nil 867 } 868 bc.receiptsCache.Add(hash, receipts) 869 return receipts 870 } 871 872 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 873 // [deprecated by eth/62] 874 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 875 number := bc.hc.GetBlockNumber(hash) 876 if number == nil { 877 return nil 878 } 879 for i := 0; i < n; i++ { 880 block := bc.GetBlock(hash, *number) 881 if block == nil { 882 break 883 } 884 blocks = append(blocks, block) 885 hash = block.ParentHash() 886 *number-- 887 } 888 return 889 } 890 891 // GetUnclesInChain retrieves all the uncles from a given block backwards until 892 // a specific distance is reached. 893 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 894 uncles := []*types.Header{} 895 for i := 0; block != nil && i < length; i++ { 896 uncles = append(uncles, block.Uncles()...) 897 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 898 } 899 return uncles 900 } 901 902 // TrieNode retrieves a blob of data associated with a trie node 903 // either from ephemeral in-memory cache, or from persistent storage. 904 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 905 return bc.stateCache.TrieDB().Node(hash) 906 } 907 908 // ContractCode retrieves a blob of data associated with a contract hash 909 // either from ephemeral in-memory cache, or from persistent storage. 910 func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { 911 return bc.stateCache.ContractCode(common.Hash{}, hash) 912 } 913 914 // ContractCodeWithPrefix retrieves a blob of data associated with a contract 915 // hash either from ephemeral in-memory cache, or from persistent storage. 916 // 917 // If the code doesn't exist in the in-memory cache, check the storage with 918 // new code scheme. 919 func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { 920 type codeReader interface { 921 ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) 922 } 923 return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) 924 } 925 926 // Stop stops the blockchain service. If any imports are currently in progress 927 // it will abort them using the procInterrupt. 928 func (bc *BlockChain) Stop() { 929 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 930 return 931 } 932 // Unsubscribe all subscriptions registered from blockchain 933 bc.scope.Close() 934 close(bc.quit) 935 bc.StopInsert() 936 bc.wg.Wait() 937 938 // Ensure that the entirety of the state snapshot is journalled to disk. 939 var snapBase common.Hash 940 if bc.snaps != nil { 941 var err error 942 if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { 943 log.Error("Failed to journal state snapshot", "err", err) 944 } 945 } 946 // Ensure the state of a recent block is also stored to disk before exiting. 947 // We're writing three different states to catch different restart scenarios: 948 // - HEAD: So we don't need to reprocess any blocks in the general case 949 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 950 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 951 if !bc.cacheConfig.TrieDirtyDisabled { 952 triedb := bc.stateCache.TrieDB() 953 954 for _, offset := range []uint64{0, 1, TriesInMemory - 1} { 955 if number := bc.CurrentBlock().NumberU64(); number > offset { 956 recent := bc.GetBlockByNumber(number - offset) 957 958 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 959 if err := triedb.Commit(recent.Root(), true, nil); err != nil { 960 log.Error("Failed to commit recent state trie", "err", err) 961 } 962 } 963 } 964 if snapBase != (common.Hash{}) { 965 log.Info("Writing snapshot state to disk", "root", snapBase) 966 if err := triedb.Commit(snapBase, true, nil); err != nil { 967 log.Error("Failed to commit recent state trie", "err", err) 968 } 969 } 970 for !bc.triegc.Empty() { 971 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 972 } 973 if size, _ := triedb.Size(); size != 0 { 974 log.Error("Dangling trie nodes after full cleanup") 975 } 976 } 977 // Ensure all live cached entries be saved into disk, so that we can skip 978 // cache warmup when node restarts. 979 if bc.cacheConfig.TrieCleanJournal != "" { 980 triedb := bc.stateCache.TrieDB() 981 triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) 982 } 983 log.Info("Blockchain stopped") 984 } 985 986 // StopInsert interrupts all insertion methods, causing them to return 987 // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after 988 // calling this method. 989 func (bc *BlockChain) StopInsert() { 990 atomic.StoreInt32(&bc.procInterrupt, 1) 991 } 992 993 // insertStopped returns true after StopInsert has been called. 994 func (bc *BlockChain) insertStopped() bool { 995 return atomic.LoadInt32(&bc.procInterrupt) == 1 996 } 997 998 func (bc *BlockChain) procFutureBlocks() { 999 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 1000 for _, hash := range bc.futureBlocks.Keys() { 1001 if block, exist := bc.futureBlocks.Peek(hash); exist { 1002 blocks = append(blocks, block.(*types.Block)) 1003 } 1004 } 1005 if len(blocks) > 0 { 1006 sort.Slice(blocks, func(i, j int) bool { 1007 return blocks[i].NumberU64() < blocks[j].NumberU64() 1008 }) 1009 // Insert one by one as chain insertion needs contiguous ancestry between blocks 1010 for i := range blocks { 1011 bc.InsertChain(blocks[i : i+1]) 1012 } 1013 } 1014 } 1015 1016 // WriteStatus status of write 1017 type WriteStatus byte 1018 1019 const ( 1020 NonStatTy WriteStatus = iota 1021 CanonStatTy 1022 SideStatTy 1023 ) 1024 1025 // truncateAncient rewinds the blockchain to the specified header and deletes all 1026 // data in the ancient store that exceeds the specified header. 1027 func (bc *BlockChain) truncateAncient(head uint64) error { 1028 frozen, err := bc.db.Ancients() 1029 if err != nil { 1030 return err 1031 } 1032 // Short circuit if there is no data to truncate in ancient store. 1033 if frozen <= head+1 { 1034 return nil 1035 } 1036 // Truncate all the data in the freezer beyond the specified head 1037 if err := bc.db.TruncateAncients(head + 1); err != nil { 1038 return err 1039 } 1040 // Clear out any stale content from the caches 1041 bc.hc.headerCache.Purge() 1042 bc.hc.tdCache.Purge() 1043 bc.hc.numberCache.Purge() 1044 1045 // Clear out any stale content from the caches 1046 bc.bodyCache.Purge() 1047 bc.bodyRLPCache.Purge() 1048 bc.receiptsCache.Purge() 1049 bc.blockCache.Purge() 1050 bc.txLookupCache.Purge() 1051 bc.futureBlocks.Purge() 1052 1053 log.Info("Rewind ancient data", "number", head) 1054 return nil 1055 } 1056 1057 // numberHash is just a container for a number and a hash, to represent a block 1058 type numberHash struct { 1059 number uint64 1060 hash common.Hash 1061 } 1062 1063 // InsertReceiptChain attempts to complete an already existing header chain with 1064 // transaction and receipt data. 1065 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 1066 // We don't require the chainMu here since we want to maximize the 1067 // concurrency of header insertion and receipt insertion. 1068 bc.wg.Add(1) 1069 defer bc.wg.Done() 1070 1071 var ( 1072 ancientBlocks, liveBlocks types.Blocks 1073 ancientReceipts, liveReceipts []types.Receipts 1074 ) 1075 // Do a sanity check that the provided chain is actually ordered and linked 1076 for i := 0; i < len(blockChain); i++ { 1077 if i != 0 { 1078 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 1079 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 1080 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 1081 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 1082 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 1083 } 1084 } 1085 if blockChain[i].NumberU64() <= ancientLimit { 1086 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 1087 } else { 1088 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 1089 } 1090 } 1091 1092 var ( 1093 stats = struct{ processed, ignored int32 }{} 1094 start = time.Now() 1095 size = 0 1096 ) 1097 // updateHead updates the head fast sync block if the inserted blocks are better 1098 // and returns an indicator whether the inserted blocks are canonical. 1099 updateHead := func(head *types.Block) bool { 1100 bc.chainmu.Lock() 1101 1102 // Rewind may have occurred, skip in that case. 1103 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 1104 currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) 1105 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 1106 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 1107 bc.currentFastBlock.Store(head) 1108 headFastBlockGauge.Update(int64(head.NumberU64())) 1109 bc.chainmu.Unlock() 1110 return true 1111 } 1112 } 1113 bc.chainmu.Unlock() 1114 return false 1115 } 1116 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 1117 // 1118 // this function only accepts canonical chain data. All side chain will be reverted 1119 // eventually. 1120 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1121 var ( 1122 previous = bc.CurrentFastBlock() 1123 batch = bc.db.NewBatch() 1124 ) 1125 // If any error occurs before updating the head or we are inserting a side chain, 1126 // all the data written this time wll be rolled back. 1127 defer func() { 1128 if previous != nil { 1129 if err := bc.truncateAncient(previous.NumberU64()); err != nil { 1130 log.Crit("Truncate ancient store failed", "err", err) 1131 } 1132 } 1133 }() 1134 var deleted []*numberHash 1135 for i, block := range blockChain { 1136 // Short circuit insertion if shutting down or processing failed 1137 if bc.insertStopped() { 1138 return 0, errInsertionInterrupted 1139 } 1140 // Short circuit insertion if it is required(used in testing only) 1141 if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) { 1142 return i, errors.New("insertion is terminated for testing purpose") 1143 } 1144 // Short circuit if the owner header is unknown 1145 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1146 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1147 } 1148 var ( 1149 start = time.Now() 1150 logged = time.Now() 1151 count int 1152 ) 1153 // Migrate all ancient blocks. This can happen if someone upgrades from Geth 1154 // 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the 1155 // long term. 1156 for { 1157 // We can ignore the error here since light client won't hit this code path. 1158 frozen, _ := bc.db.Ancients() 1159 if frozen >= block.NumberU64() { 1160 break 1161 } 1162 h := rawdb.ReadCanonicalHash(bc.db, frozen) 1163 b := rawdb.ReadBlock(bc.db, h, frozen) 1164 size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen)) 1165 count += 1 1166 1167 // Always keep genesis block in active database. 1168 if b.NumberU64() != 0 { 1169 deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()}) 1170 } 1171 if time.Since(logged) > 8*time.Second { 1172 log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1173 logged = time.Now() 1174 } 1175 // Don't collect too much in-memory, write it out every 100K blocks 1176 if len(deleted) > 100000 { 1177 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1178 if err := bc.db.Sync(); err != nil { 1179 return 0, err 1180 } 1181 // Wipe out canonical block data. 1182 for _, nh := range deleted { 1183 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1184 rawdb.DeleteCanonicalHash(batch, nh.number) 1185 } 1186 if err := batch.Write(); err != nil { 1187 return 0, err 1188 } 1189 batch.Reset() 1190 // Wipe out side chain too. 1191 for _, nh := range deleted { 1192 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1193 rawdb.DeleteBlock(batch, hash, nh.number) 1194 } 1195 } 1196 if err := batch.Write(); err != nil { 1197 return 0, err 1198 } 1199 batch.Reset() 1200 deleted = deleted[0:] 1201 } 1202 } 1203 if count > 0 { 1204 log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1205 } 1206 // Flush data into ancient database. 1207 size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) 1208 1209 // Write tx indices if any condition is satisfied: 1210 // * If user requires to reserve all tx indices(txlookuplimit=0) 1211 // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit) 1212 // * If block number is large enough to be regarded as a recent block 1213 // It means blocks below the ancientLimit-txlookupLimit won't be indexed. 1214 // 1215 // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with 1216 // an external ancient database, during the setup, blockchain will start 1217 // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) 1218 // range. In this case, all tx indices of newly imported blocks should be 1219 // generated. 1220 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { 1221 rawdb.WriteTxLookupEntries(batch, block) 1222 } else if rawdb.ReadTxIndexTail(bc.db) != nil { 1223 rawdb.WriteTxLookupEntries(batch, block) 1224 } 1225 stats.processed++ 1226 } 1227 // Flush all tx-lookup index data. 1228 size += batch.ValueSize() 1229 if err := batch.Write(); err != nil { 1230 return 0, err 1231 } 1232 batch.Reset() 1233 1234 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1235 if err := bc.db.Sync(); err != nil { 1236 return 0, err 1237 } 1238 if !updateHead(blockChain[len(blockChain)-1]) { 1239 return 0, errors.New("side blocks can't be accepted as the ancient chain data") 1240 } 1241 previous = nil // disable rollback explicitly 1242 1243 // Wipe out canonical block data. 1244 for _, nh := range deleted { 1245 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1246 rawdb.DeleteCanonicalHash(batch, nh.number) 1247 } 1248 for _, block := range blockChain { 1249 // Always keep genesis block in active database. 1250 if block.NumberU64() != 0 { 1251 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1252 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1253 } 1254 } 1255 if err := batch.Write(); err != nil { 1256 return 0, err 1257 } 1258 batch.Reset() 1259 1260 // Wipe out side chain too. 1261 for _, nh := range deleted { 1262 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1263 rawdb.DeleteBlock(batch, hash, nh.number) 1264 } 1265 } 1266 for _, block := range blockChain { 1267 // Always keep genesis block in active database. 1268 if block.NumberU64() != 0 { 1269 for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { 1270 rawdb.DeleteBlock(batch, hash, block.NumberU64()) 1271 } 1272 } 1273 } 1274 if err := batch.Write(); err != nil { 1275 return 0, err 1276 } 1277 return 0, nil 1278 } 1279 // writeLive writes blockchain and corresponding receipt chain into active store. 1280 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1281 skipPresenceCheck := false 1282 batch := bc.db.NewBatch() 1283 for i, block := range blockChain { 1284 // Short circuit insertion if shutting down or processing failed 1285 if bc.insertStopped() { 1286 return 0, errInsertionInterrupted 1287 } 1288 // Short circuit if the owner header is unknown 1289 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1290 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1291 } 1292 if !skipPresenceCheck { 1293 // Ignore if the entire data is already known 1294 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1295 stats.ignored++ 1296 continue 1297 } else { 1298 // If block N is not present, neither are the later blocks. 1299 // This should be true, but if we are mistaken, the shortcut 1300 // here will only cause overwriting of some existing data 1301 skipPresenceCheck = true 1302 } 1303 } 1304 // Write all the data out into the database 1305 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1306 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1307 rawdb.WriteTxLookupEntries(batch, block) // Always write tx indices for live blocks, we assume they are needed 1308 1309 // Write everything belongs to the blocks into the database. So that 1310 // we can ensure all components of body is completed(body, receipts, 1311 // tx indexes) 1312 if batch.ValueSize() >= database.IdealBatchSize { 1313 if err := batch.Write(); err != nil { 1314 return 0, err 1315 } 1316 size += batch.ValueSize() 1317 batch.Reset() 1318 } 1319 stats.processed++ 1320 } 1321 // Write everything belongs to the blocks into the database. So that 1322 // we can ensure all components of body is completed(body, receipts, 1323 // tx indexes) 1324 if batch.ValueSize() > 0 { 1325 size += batch.ValueSize() 1326 if err := batch.Write(); err != nil { 1327 return 0, err 1328 } 1329 } 1330 updateHead(blockChain[len(blockChain)-1]) 1331 return 0, nil 1332 } 1333 // Write downloaded chain data and corresponding receipt chain data 1334 if len(ancientBlocks) > 0 { 1335 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1336 if err == errInsertionInterrupted { 1337 return 0, nil 1338 } 1339 return n, err 1340 } 1341 } 1342 // Write the tx index tail (block number from where we index) before write any live blocks 1343 if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 { 1344 // The tx index tail can only be one of the following two options: 1345 // * 0: all ancient blocks have been indexed 1346 // * ancient-limit: the indices of blocks before ancient-limit are ignored 1347 if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil { 1348 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit { 1349 rawdb.WriteTxIndexTail(bc.db, 0) 1350 } else { 1351 rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit) 1352 } 1353 } 1354 } 1355 if len(liveBlocks) > 0 { 1356 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1357 if err == errInsertionInterrupted { 1358 return 0, nil 1359 } 1360 return n, err 1361 } 1362 } 1363 1364 head := blockChain[len(blockChain)-1] 1365 context := []interface{}{ 1366 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1367 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1368 "size", common.StorageSize(size), 1369 } 1370 if stats.ignored > 0 { 1371 context = append(context, []interface{}{"ignored", stats.ignored}...) 1372 } 1373 log.Info("Imported new block receipts", context...) 1374 1375 return 0, nil 1376 } 1377 1378 // SetTxLookupLimit is responsible for updating the txlookup limit to the 1379 // original one stored in db if the new mismatches with the old one. 1380 func (bc *BlockChain) SetTxLookupLimit(limit uint64) { 1381 bc.txLookupLimit = limit 1382 } 1383 1384 // TxLookupLimit retrieves the txlookup limit used by blockchain to prune 1385 // stale transaction indices. 1386 func (bc *BlockChain) TxLookupLimit() uint64 { 1387 return bc.txLookupLimit 1388 } 1389 1390 var lastWrite uint64 1391 1392 // writeBlockWithoutState writes only the block and its metadata to the database, 1393 // but does not write any state. This is used to construct competing side forks 1394 // up to the point where they exceed the canonical total difficulty. 1395 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1396 bc.wg.Add(1) 1397 defer bc.wg.Done() 1398 1399 batch := bc.db.NewBatch() 1400 rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) 1401 rawdb.WriteBlock(batch, block) 1402 if err := batch.Write(); err != nil { 1403 log.Crit("Failed to write block into disk", "err", err) 1404 } 1405 return nil 1406 } 1407 1408 // writeKnownBlock updates the head block flag with a known block 1409 // and introduces chain reorg if necessary. 1410 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1411 bc.wg.Add(1) 1412 defer bc.wg.Done() 1413 1414 current := bc.CurrentBlock() 1415 if block.ParentHash() != current.Hash() { 1416 if err := bc.reorg(current, block); err != nil { 1417 return err 1418 } 1419 } 1420 bc.writeHeadBlock(block) 1421 return nil 1422 } 1423 1424 // WriteBlockWithState writes the block and all associated state to the database. 1425 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1426 bc.chainmu.Lock() 1427 defer bc.chainmu.Unlock() 1428 1429 return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent) 1430 } 1431 1432 // writeBlockWithState writes the block and all associated state to the database, 1433 // but is expects the chain mutex to be held. 1434 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1435 bc.wg.Add(1) 1436 defer bc.wg.Done() 1437 1438 // Calculate the total difficulty of the block 1439 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1440 if ptd == nil { 1441 return NonStatTy, consensus.ErrUnknownAncestor 1442 } 1443 // Make sure no inconsistent state is leaked during insertion 1444 currentBlock := bc.CurrentBlock() 1445 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1446 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1447 1448 // Irrelevant of the canonical status, write the block itself to the database. 1449 // 1450 // Note all the components of block(td, hash->number map, header, body, receipts) 1451 // should be written atomically. BlockBatch is used for containing all components. 1452 blockBatch := bc.db.NewBatch() 1453 rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) 1454 rawdb.WriteBlock(blockBatch, block) 1455 rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) 1456 rawdb.WritePreimages(blockBatch, state.Preimages()) 1457 if err := blockBatch.Write(); err != nil { 1458 log.Crit("Failed to write block into disk", "err", err) 1459 } 1460 // Commit all cached state changes into underlying memory database. 1461 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 1462 if err != nil { 1463 return NonStatTy, err 1464 } 1465 triedb := bc.stateCache.TrieDB() 1466 1467 // If we're running an archive node, always flush 1468 if bc.cacheConfig.TrieDirtyDisabled { 1469 if err := triedb.Commit(root, false, nil); err != nil { 1470 return NonStatTy, err 1471 } 1472 } else { 1473 // Full but not archive node, do proper garbage collection 1474 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 1475 bc.triegc.Push(root, -int64(block.NumberU64())) 1476 1477 if current := block.NumberU64(); current > TriesInMemory { 1478 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1479 var ( 1480 nodes, imgs = triedb.Size() 1481 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1482 ) 1483 if nodes > limit || imgs > 4*1024*1024 { 1484 triedb.Cap(limit - database.IdealBatchSize) 1485 } 1486 // Find the next state trie we need to commit 1487 chosen := current - TriesInMemory 1488 1489 // If we exceeded out time allowance, flush an entire trie to disk 1490 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1491 // If the header is missing (canonical chain behind), we're reorging a low 1492 // diff sidechain. Suspend committing until this operation is completed. 1493 header := bc.GetHeaderByNumber(chosen) 1494 if header == nil { 1495 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1496 } else { 1497 // If we're exceeding limits but haven't reached a large enough memory gap, 1498 // warn the user that the system is becoming unstable. 1499 if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1500 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) 1501 } 1502 // Flush an entire trie and restart the counters 1503 triedb.Commit(header.Root, true, nil) 1504 lastWrite = chosen 1505 bc.gcproc = 0 1506 } 1507 } 1508 // Garbage collect anything below our required write retention 1509 for !bc.triegc.Empty() { 1510 root, number := bc.triegc.Pop() 1511 if uint64(-number) > chosen { 1512 bc.triegc.Push(root, number) 1513 break 1514 } 1515 triedb.Dereference(root.(common.Hash)) 1516 } 1517 } 1518 } 1519 // If the total difficulty is higher than our known, add it to the canonical chain 1520 // Second clause in the if statement reduces the vulnerability to selfish mining. 1521 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1522 reorg := externTd.Cmp(localTd) > 0 1523 currentBlock = bc.CurrentBlock() 1524 if !reorg && externTd.Cmp(localTd) == 0 { 1525 // Split same-difficulty blocks by number, then preferentially select 1526 // the block generated by the local miner as the canonical block. 1527 if block.NumberU64() < currentBlock.NumberU64() { 1528 reorg = true 1529 } else if block.NumberU64() == currentBlock.NumberU64() { 1530 var currentPreserve, blockPreserve bool 1531 if bc.shouldPreserve != nil { 1532 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1533 } 1534 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1535 } 1536 } 1537 if reorg { 1538 // Reorganise the chain if the parent is not the head block 1539 if block.ParentHash() != currentBlock.Hash() { 1540 if err := bc.reorg(currentBlock, block); err != nil { 1541 return NonStatTy, err 1542 } 1543 } 1544 status = CanonStatTy 1545 } else { 1546 status = SideStatTy 1547 } 1548 // Set new head. 1549 if status == CanonStatTy { 1550 bc.writeHeadBlock(block) 1551 } 1552 bc.futureBlocks.Remove(block.Hash()) 1553 1554 if status == CanonStatTy { 1555 bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 1556 if len(logs) > 0 { 1557 bc.logsFeed.Send(logs) 1558 } 1559 // In theory we should fire a ChainHeadEvent when we inject 1560 // a canonical block, but sometimes we can insert a batch of 1561 // canonicial blocks. Avoid firing too much ChainHeadEvents, 1562 // we will fire an accumulated ChainHeadEvent and disable fire 1563 // event here. 1564 if emitHeadEvent { 1565 bc.chainHeadFeed.Send(ChainHeadEvent{Block: block}) 1566 } 1567 } else { 1568 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1569 } 1570 return status, nil 1571 } 1572 1573 // addFutureBlock checks if the block is within the max allowed window to get 1574 // accepted for future processing, and returns an error if the block is too far 1575 // ahead and was not added. 1576 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1577 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1578 if block.Time() > max { 1579 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1580 } 1581 bc.futureBlocks.Add(block.Hash(), block) 1582 return nil 1583 } 1584 1585 // InsertChain attempts to insert the given batch of blocks in to the canonical 1586 // chain or, otherwise, create a fork. If an error is returned it will return 1587 // the index number of the failing block as well an error describing what went 1588 // wrong. 1589 // 1590 // After insertion is done, all accumulated events will be fired. 1591 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1592 // Sanity check that we have something meaningful to import 1593 if len(chain) == 0 { 1594 return 0, nil 1595 } 1596 1597 bc.blockProcFeed.Send(true) 1598 defer bc.blockProcFeed.Send(false) 1599 1600 // Remove already known canon-blocks 1601 var ( 1602 block, prev *types.Block 1603 ) 1604 // Do a sanity check that the provided chain is actually ordered and linked 1605 for i := 1; i < len(chain); i++ { 1606 block = chain[i] 1607 prev = chain[i-1] 1608 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1609 // Chain broke ancestry, log a message (programming error) and skip insertion 1610 log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), 1611 "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) 1612 1613 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(), 1614 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1615 } 1616 } 1617 // Pre-checks passed, start the full block imports 1618 bc.wg.Add(1) 1619 bc.chainmu.Lock() 1620 n, err := bc.insertChain(chain, true) 1621 bc.chainmu.Unlock() 1622 bc.wg.Done() 1623 1624 return n, err 1625 } 1626 1627 // insertChain is the internal implementation of InsertChain, which assumes that 1628 // 1) chains are contiguous, and 2) The chain mutex is held. 1629 // 1630 // This method is split out so that import batches that require re-injecting 1631 // historical blocks can do so without releasing the lock, which could lead to 1632 // racey behaviour. If a sidechain import is in progress, and the historic state 1633 // is imported, but then new canon-head is added before the actual sidechain 1634 // completes, then the historic state could be pruned again 1635 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) { 1636 // If the chain is terminating, don't even bother starting up 1637 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1638 return 0, nil 1639 } 1640 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1641 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1642 1643 var ( 1644 stats = insertStats{startTime: mclock.Now()} 1645 lastCanon *types.Block 1646 ) 1647 // Fire a single chain head event if we've progressed the chain 1648 defer func() { 1649 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1650 bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) 1651 } 1652 }() 1653 // Start the parallel header verifier 1654 headers := make([]*types.Header, len(chain)) 1655 seals := make([]bool, len(chain)) 1656 1657 for i, block := range chain { 1658 headers[i] = block.Header() 1659 seals[i] = verifySeals 1660 } 1661 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1662 defer close(abort) 1663 1664 // Peek the error for the first block to decide the directing import logic 1665 it := newInsertIterator(chain, results, bc.validator) 1666 1667 block, err := it.next() 1668 1669 // Left-trim all the known blocks 1670 if err == ErrKnownBlock { 1671 // First block (and state) is known 1672 // 1. We did a roll-back, and should now do a re-import 1673 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1674 // from the canonical chain, which has not been verified. 1675 // Skip all known blocks that are behind us 1676 var ( 1677 current = bc.CurrentBlock() 1678 localTd = bc.GetTd(current.Hash(), current.NumberU64()) 1679 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil 1680 ) 1681 for block != nil && err == ErrKnownBlock { 1682 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1683 if localTd.Cmp(externTd) < 0 { 1684 break 1685 } 1686 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1687 stats.ignored++ 1688 1689 block, err = it.next() 1690 } 1691 // The remaining blocks are still known blocks, the only scenario here is: 1692 // During the fast sync, the pivot point is already submitted but rollback 1693 // happens. Then node resets the head full block to a lower height via `rollback` 1694 // and leaves a few known blocks in the database. 1695 // 1696 // When node runs a fast sync again, it can re-import a batch of known blocks via 1697 // `insertChain` while a part of them have higher total difficulty than current 1698 // head full block(new pivot point). 1699 for block != nil && err == ErrKnownBlock { 1700 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1701 if err := bc.writeKnownBlock(block); err != nil { 1702 return it.index, err 1703 } 1704 lastCanon = block 1705 1706 block, err = it.next() 1707 } 1708 // Falls through to the block import 1709 } 1710 switch { 1711 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1712 case errors.Is(err, consensus.ErrPrunedAncestor): 1713 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 1714 return bc.insertSideChain(block, it) 1715 1716 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1717 case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): 1718 for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { 1719 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 1720 if err := bc.addFutureBlock(block); err != nil { 1721 return it.index, err 1722 } 1723 block, err = it.next() 1724 } 1725 stats.queued += it.processed() 1726 stats.ignored += it.remaining() 1727 1728 // If there are any still remaining, mark as ignored 1729 return it.index, err 1730 1731 // Some other error occurred, abort 1732 case err != nil: 1733 bc.futureBlocks.Remove(block.Hash()) 1734 stats.ignored += len(it.chain) 1735 bc.reportBlock(block, nil, err) 1736 return it.index, err 1737 } 1738 // No validation errors for the first block (or chain prefix skipped) 1739 for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { 1740 // If the chain is terminating, stop processing blocks 1741 if bc.insertStopped() { 1742 log.Debug("Abort during block processing") 1743 break 1744 } 1745 // If the header is a banned one, straight out abort 1746 if BadHashes[block.Hash()] { 1747 bc.reportBlock(block, nil, ErrBlacklistedHash) 1748 return it.index, ErrBlacklistedHash 1749 } 1750 // If the block is known (in the middle of the chain), it's a special case for 1751 // Clique blocks where they can share state among each other, so importing an 1752 // older block might complete the state of the subsequent one. In this case, 1753 // just skip the block (we already validated it once fully (and crashed), since 1754 // its header and body was already in the database). 1755 if err == ErrKnownBlock { 1756 logger := log.Debug 1757 if bc.chainConfig.Clique == nil { 1758 logger = log.Warn 1759 } 1760 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 1761 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1762 "root", block.Root()) 1763 1764 // Special case. Commit the empty receipt slice if we meet the known 1765 // block in the middle. It can only happen in the clique chain. Whenever 1766 // we insert blocks via `insertSideChain`, we only commit `td`, `header` 1767 // and `body` if it's non-existent. Since we don't have receipts without 1768 // reexecution, so nothing to commit. But if the sidechain will be adpoted 1769 // as the canonical chain eventually, it needs to be reexecuted for missing 1770 // state, but if it's this special case here(skip reexecution) we will lose 1771 // the empty receipt entry. 1772 if len(block.Transactions()) == 0 { 1773 rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) 1774 } else { 1775 log.Error("Please file an issue, skip known block execution without receipt", 1776 "hash", block.Hash(), "number", block.NumberU64()) 1777 } 1778 if err := bc.writeKnownBlock(block); err != nil { 1779 return it.index, err 1780 } 1781 stats.processed++ 1782 1783 // We can assume that logs are empty here, since the only way for consecutive 1784 // Clique blocks to have the same state is if there are no transactions. 1785 lastCanon = block 1786 continue 1787 } 1788 // Retrieve the parent block and it's state to execute on top 1789 start := time.Now() 1790 1791 parent := it.previous() 1792 if parent == nil { 1793 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 1794 } 1795 statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) 1796 if err != nil { 1797 return it.index, err 1798 } 1799 // If we have a followup block, run that against the current state to pre-cache 1800 // transactions and probabilistically some of the account/storage trie nodes. 1801 var followupInterrupt uint32 1802 if !bc.cacheConfig.TrieCleanNoPrefetch { 1803 if followup, err := it.peek(); followup != nil && err == nil { 1804 throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) 1805 go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { 1806 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 1807 1808 blockPrefetchExecuteTimer.Update(time.Since(start)) 1809 if atomic.LoadUint32(interrupt) == 1 { 1810 blockPrefetchInterruptMeter.Mark(1) 1811 } 1812 }(time.Now(), followup, throwaway, &followupInterrupt) 1813 } 1814 } 1815 // Process block using the parent state as reference point 1816 substart := time.Now() 1817 receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 1818 if err != nil { 1819 bc.reportBlock(block, receipts, err) 1820 atomic.StoreUint32(&followupInterrupt, 1) 1821 return it.index, err 1822 } 1823 // Update the metrics touched during block processing 1824 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 1825 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 1826 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 1827 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 1828 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them 1829 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them 1830 1831 triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation 1832 trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates 1833 trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates 1834 1835 blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) 1836 1837 // Validate the state using the default validator 1838 substart = time.Now() 1839 if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { 1840 bc.reportBlock(block, receipts, err) 1841 atomic.StoreUint32(&followupInterrupt, 1) 1842 return it.index, err 1843 } 1844 proctime := time.Since(start) 1845 1846 // Update the metrics touched during block validation 1847 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 1848 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 1849 1850 blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) 1851 1852 // Write the block to the chain and get the status. 1853 substart = time.Now() 1854 status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false) 1855 atomic.StoreUint32(&followupInterrupt, 1) 1856 if err != nil { 1857 return it.index, err 1858 } 1859 1860 // Update the metrics touched during block commit 1861 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 1862 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 1863 snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them 1864 1865 blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) 1866 blockInsertTimer.UpdateSince(start) 1867 1868 switch status { 1869 case CanonStatTy: 1870 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1871 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1872 "elapsed", common.PrettyDuration(time.Since(start)), 1873 "root", block.Root()) 1874 1875 lastCanon = block 1876 1877 // Only count canonical blocks for GC processing time 1878 bc.gcproc += proctime 1879 1880 case SideStatTy: 1881 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1882 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1883 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1884 "root", block.Root()) 1885 1886 default: 1887 // This in theory is impossible, but lets be nice to our future selves and leave 1888 // a log, instead of trying to track down blocks imports that don't emit logs. 1889 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 1890 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1891 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1892 "root", block.Root()) 1893 } 1894 stats.processed++ 1895 stats.usedGas += usedGas 1896 1897 dirty, _ := bc.stateCache.TrieDB().Size() 1898 stats.report(chain, it.index, dirty) 1899 } 1900 // Any blocks remaining here? The only ones we care about are the future ones 1901 if block != nil && errors.Is(err, consensus.ErrFutureBlock) { 1902 if err := bc.addFutureBlock(block); err != nil { 1903 return it.index, err 1904 } 1905 block, err = it.next() 1906 1907 for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { 1908 if err := bc.addFutureBlock(block); err != nil { 1909 return it.index, err 1910 } 1911 stats.queued++ 1912 } 1913 } 1914 stats.ignored += it.remaining() 1915 1916 return it.index, err 1917 } 1918 1919 // insertSideChain is called when an import batch hits upon a pruned ancestor 1920 // error, which happens when a sidechain with a sufficiently old fork-block is 1921 // found. 1922 // 1923 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1924 // switch over to the new chain if the TD exceeded the current chain. 1925 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { 1926 var ( 1927 externTd *big.Int 1928 current = bc.CurrentBlock() 1929 ) 1930 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1931 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1932 // ones. Any other errors means that the block is invalid, and should not be written 1933 // to disk. 1934 err := consensus.ErrPrunedAncestor 1935 for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { 1936 // Check the canonical state root for that number 1937 if number := block.NumberU64(); current.NumberU64() >= number { 1938 canonical := bc.GetBlockByNumber(number) 1939 if canonical != nil && canonical.Hash() == block.Hash() { 1940 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 1941 1942 // Collect the TD of the block. Since we know it's a canon one, 1943 // we can get it directly, and not (like further below) use 1944 // the parent and then add the block on top 1945 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 1946 continue 1947 } 1948 if canonical != nil && canonical.Root() == block.Root() { 1949 // This is most likely a shadow-state attack. When a fork is imported into the 1950 // database, and it eventually reaches a block height which is not pruned, we 1951 // just found that the state already exist! This means that the sidechain block 1952 // refers to a state which already exists in our canon chain. 1953 // 1954 // If left unchecked, we would now proceed importing the blocks, without actually 1955 // having verified the state of the previous blocks. 1956 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1957 1958 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1959 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1960 // mechanism. 1961 return it.index, errors.New("sidechain ghost-state attack") 1962 } 1963 } 1964 if externTd == nil { 1965 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1966 } 1967 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1968 1969 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1970 start := time.Now() 1971 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 1972 return it.index, err 1973 } 1974 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 1975 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1976 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1977 "root", block.Root()) 1978 } 1979 } 1980 // At this point, we've written all sidechain blocks to database. Loop ended 1981 // either on some other error or all were processed. If there was some other 1982 // error, we can ignore the rest of those blocks. 1983 // 1984 // If the externTd was larger than our local TD, we now need to reimport the previous 1985 // blocks to regenerate the required state 1986 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 1987 if localTd.Cmp(externTd) > 0 { 1988 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 1989 return it.index, err 1990 } 1991 // Gather all the sidechain hashes (full blocks may be memory heavy) 1992 var ( 1993 hashes []common.Hash 1994 numbers []uint64 1995 ) 1996 parent := it.previous() 1997 for parent != nil && !bc.HasState(parent.Root) { 1998 hashes = append(hashes, parent.Hash()) 1999 numbers = append(numbers, parent.Number.Uint64()) 2000 2001 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 2002 } 2003 if parent == nil { 2004 return it.index, errors.New("missing parent") 2005 } 2006 // Import all the pruned blocks to make the state available 2007 var ( 2008 blocks []*types.Block 2009 memory common.StorageSize 2010 ) 2011 for i := len(hashes) - 1; i >= 0; i-- { 2012 // Append the next block to our batch 2013 block := bc.GetBlock(hashes[i], numbers[i]) 2014 2015 blocks = append(blocks, block) 2016 memory += block.Size() 2017 2018 // If memory use grew too large, import and continue. Sadly we need to discard 2019 // all raised events and logs from notifications since we're too heavy on the 2020 // memory here. 2021 if len(blocks) >= 2048 || memory > 64*1024*1024 { 2022 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 2023 if _, err := bc.insertChain(blocks, false); err != nil { 2024 return 0, err 2025 } 2026 blocks, memory = blocks[:0], 0 2027 2028 // If the chain is terminating, stop processing blocks 2029 if bc.insertStopped() { 2030 log.Debug("Abort during blocks processing") 2031 return 0, nil 2032 } 2033 } 2034 } 2035 if len(blocks) > 0 { 2036 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 2037 return bc.insertChain(blocks, false) 2038 } 2039 return 0, nil 2040 } 2041 2042 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 2043 // blocks and inserts them to be part of the new canonical chain and accumulates 2044 // potential missing transactions and post an event about them. 2045 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 2046 var ( 2047 newChain types.Blocks 2048 oldChain types.Blocks 2049 commonBlock *types.Block 2050 2051 deletedTxs types.Transactions 2052 addedTxs types.Transactions 2053 2054 deletedLogs [][]*types.Log 2055 rebirthLogs [][]*types.Log 2056 2057 // collectLogs collects the logs that were generated or removed during 2058 // the processing of the block that corresponds with the given hash. 2059 // These logs are later announced as deleted or reborn 2060 collectLogs = func(hash common.Hash, removed bool) { 2061 number := bc.hc.GetBlockNumber(hash) 2062 if number == nil { 2063 return 2064 } 2065 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 2066 2067 var logs []*types.Log 2068 for _, receipt := range receipts { 2069 for _, log := range receipt.Logs { 2070 l := *log 2071 if removed { 2072 l.Removed = true 2073 } else { 2074 } 2075 logs = append(logs, &l) 2076 } 2077 } 2078 if len(logs) > 0 { 2079 if removed { 2080 deletedLogs = append(deletedLogs, logs) 2081 } else { 2082 rebirthLogs = append(rebirthLogs, logs) 2083 } 2084 } 2085 } 2086 // mergeLogs returns a merged log slice with specified sort order. 2087 mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { 2088 var ret []*types.Log 2089 if reverse { 2090 for i := len(logs) - 1; i >= 0; i-- { 2091 ret = append(ret, logs[i]...) 2092 } 2093 } else { 2094 for i := 0; i < len(logs); i++ { 2095 ret = append(ret, logs[i]...) 2096 } 2097 } 2098 return ret 2099 } 2100 ) 2101 // Reduce the longer chain to the same number as the shorter one 2102 if oldBlock.NumberU64() > newBlock.NumberU64() { 2103 // Old chain is longer, gather all transactions and logs as deleted ones 2104 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 2105 oldChain = append(oldChain, oldBlock) 2106 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2107 collectLogs(oldBlock.Hash(), true) 2108 } 2109 } else { 2110 // New chain is longer, stash all blocks away for subsequent insertion 2111 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 2112 newChain = append(newChain, newBlock) 2113 } 2114 } 2115 if oldBlock == nil { 2116 return fmt.Errorf("invalid old chain") 2117 } 2118 if newBlock == nil { 2119 return fmt.Errorf("invalid new chain") 2120 } 2121 // Both sides of the reorg are at the same number, reduce both until the common 2122 // ancestor is found 2123 for { 2124 // If the common ancestor was found, bail out 2125 if oldBlock.Hash() == newBlock.Hash() { 2126 commonBlock = oldBlock 2127 break 2128 } 2129 // Remove an old block as well as stash away a new block 2130 oldChain = append(oldChain, oldBlock) 2131 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2132 collectLogs(oldBlock.Hash(), true) 2133 2134 newChain = append(newChain, newBlock) 2135 2136 // Step back with both chains 2137 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 2138 if oldBlock == nil { 2139 return fmt.Errorf("invalid old chain") 2140 } 2141 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 2142 if newBlock == nil { 2143 return fmt.Errorf("invalid new chain") 2144 } 2145 } 2146 // Ensure the user sees large reorgs 2147 if len(oldChain) > 0 && len(newChain) > 0 { 2148 logFn := log.Info 2149 msg := "Chain reorg detected" 2150 if len(oldChain) > 63 { 2151 msg = "Large chain reorg detected" 2152 logFn = log.Warn 2153 } 2154 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 2155 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 2156 blockReorgAddMeter.Mark(int64(len(newChain))) 2157 blockReorgDropMeter.Mark(int64(len(oldChain))) 2158 blockReorgMeter.Mark(1) 2159 } else { 2160 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 2161 } 2162 // Insert the new chain(except the head block(reverse order)), 2163 // taking care of the proper incremental order. 2164 for i := len(newChain) - 1; i >= 1; i-- { 2165 // Insert the block in the canonical way, re-writing history 2166 bc.writeHeadBlock(newChain[i]) 2167 2168 // Collect reborn logs due to chain reorg 2169 collectLogs(newChain[i].Hash(), false) 2170 2171 // Collect the new added transactions. 2172 addedTxs = append(addedTxs, newChain[i].Transactions()...) 2173 } 2174 // Delete useless indexes right now which includes the non-canonical 2175 // transaction indexes, canonical chain indexes which above the head. 2176 indexesBatch := bc.db.NewBatch() 2177 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 2178 rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) 2179 } 2180 // Delete any canonical number assignments above the new head 2181 number := bc.CurrentBlock().NumberU64() 2182 for i := number + 1; ; i++ { 2183 hash := rawdb.ReadCanonicalHash(bc.db, i) 2184 if hash == (common.Hash{}) { 2185 break 2186 } 2187 rawdb.DeleteCanonicalHash(indexesBatch, i) 2188 } 2189 if err := indexesBatch.Write(); err != nil { 2190 log.Crit("Failed to delete useless indexes", "err", err) 2191 } 2192 // If any logs need to be fired, do it now. In theory we could avoid creating 2193 // this goroutine if there are no events to fire, but realistcally that only 2194 // ever happens if we're reorging empty blocks, which will only happen on idle 2195 // networks where performance is not an issue either way. 2196 if len(deletedLogs) > 0 { 2197 bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) 2198 } 2199 if len(rebirthLogs) > 0 { 2200 bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) 2201 } 2202 if len(oldChain) > 0 { 2203 for i := len(oldChain) - 1; i >= 0; i-- { 2204 bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) 2205 } 2206 } 2207 return nil 2208 } 2209 2210 func (bc *BlockChain) update() { 2211 futureTimer := time.NewTicker(5 * time.Second) 2212 defer futureTimer.Stop() 2213 for { 2214 select { 2215 case <-futureTimer.C: 2216 bc.procFutureBlocks() 2217 case <-bc.quit: 2218 return 2219 } 2220 } 2221 } 2222 2223 // maintainTxIndex is responsible for the construction and deletion of the 2224 // transaction index. 2225 // 2226 // User can use flag `txlookuplimit` to specify a "recentness" block, below 2227 // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means 2228 // all tx indices will be reserved. 2229 // 2230 // The user can adjust the txlookuplimit value for each launch after fast 2231 // sync, Geth will automatically construct the missing indices and delete 2232 // the extra indices. 2233 func (bc *BlockChain) maintainTxIndex(ancients uint64) { 2234 // Before starting the actual maintenance, we need to handle a special case, 2235 // where user might init Geth with an external ancient database. If so, we 2236 // need to reindex all necessary transactions before starting to process any 2237 // pruning requests. 2238 if ancients > 0 { 2239 var from = uint64(0) 2240 if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit { 2241 from = ancients - bc.txLookupLimit 2242 } 2243 rawdb.IndexTransactions(bc.db, from, ancients) 2244 } 2245 // indexBlocks reindexes or unindexes transactions depending on user configuration 2246 indexBlocks := func(tail *uint64, head uint64, done chan struct{}) { 2247 defer func() { done <- struct{}{} }() 2248 2249 // If the user just upgraded Geth to a new version which supports transaction 2250 // index pruning, write the new tail and remove anything older. 2251 if tail == nil { 2252 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2253 // Nothing to delete, write the tail and return 2254 rawdb.WriteTxIndexTail(bc.db, 0) 2255 } else { 2256 // Prune all stale tx indices and record the tx index tail 2257 rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1) 2258 } 2259 return 2260 } 2261 // If a previous indexing existed, make sure that we fill in any missing entries 2262 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2263 if *tail > 0 { 2264 rawdb.IndexTransactions(bc.db, 0, *tail) 2265 } 2266 return 2267 } 2268 // Update the transaction index to the new chain state 2269 if head-bc.txLookupLimit+1 < *tail { 2270 // Reindex a part of missing indices and rewind index tail to HEAD-limit 2271 rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail) 2272 } else { 2273 // Unindex a part of stale indices and forward index tail to HEAD-limit 2274 rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1) 2275 } 2276 } 2277 // Any reindexing done, start listening to chain events and moving the index window 2278 var ( 2279 done chan struct{} // Non-nil if background unindexing or reindexing routine is active. 2280 headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed 2281 ) 2282 sub := bc.SubscribeChainHeadEvent(headCh) 2283 if sub == nil { 2284 return 2285 } 2286 defer sub.Unsubscribe() 2287 2288 for { 2289 select { 2290 case head := <-headCh: 2291 if done == nil { 2292 done = make(chan struct{}) 2293 go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done) 2294 } 2295 case <-done: 2296 done = nil 2297 case <-bc.quit: 2298 return 2299 } 2300 } 2301 } 2302 2303 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 2304 func (bc *BlockChain) BadBlocks() []*types.Block { 2305 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 2306 for _, hash := range bc.badBlocks.Keys() { 2307 if blk, exist := bc.badBlocks.Peek(hash); exist { 2308 block := blk.(*types.Block) 2309 blocks = append(blocks, block) 2310 } 2311 } 2312 return blocks 2313 } 2314 2315 // addBadBlock adds a bad block to the bad-block LRU cache 2316 func (bc *BlockChain) addBadBlock(block *types.Block) { 2317 bc.badBlocks.Add(block.Hash(), block) 2318 } 2319 2320 // reportBlock logs a bad block error. 2321 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2322 bc.addBadBlock(block) 2323 2324 var receiptString string 2325 for i, receipt := range receipts { 2326 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2327 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 2328 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2329 } 2330 log.Error(fmt.Sprintf(` 2331 ########## BAD BLOCK ######### 2332 Chain config: %v 2333 2334 Number: %v 2335 Hash: 0x%x 2336 %v 2337 2338 Error: %v 2339 ############################## 2340 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2341 } 2342 2343 // InsertHeaderChain attempts to insert the given header chain in to the local 2344 // chain, possibly creating a reorg. If an error is returned, it will return the 2345 // index number of the failing header as well an error describing what went wrong. 2346 // 2347 // The verify parameter can be used to fine tune whether nonce verification 2348 // should be done or not. The reason behind the optional check is because some 2349 // of the header retrieval mechanisms already need to verify nonces, as well as 2350 // because nonces can be verified sparsely, not needing to check each. 2351 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2352 start := time.Now() 2353 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2354 return i, err 2355 } 2356 2357 // Make sure only one thread manipulates the chain at once 2358 bc.chainmu.Lock() 2359 defer bc.chainmu.Unlock() 2360 2361 bc.wg.Add(1) 2362 defer bc.wg.Done() 2363 2364 whFunc := func(header *types.Header) error { 2365 _, err := bc.hc.WriteHeader(header) 2366 return err 2367 } 2368 return bc.hc.InsertHeaderChain(chain, whFunc, start) 2369 } 2370 2371 // CurrentHeader retrieves the current head header of the canonical chain. The 2372 // header is retrieved from the HeaderChain's internal cache. 2373 func (bc *BlockChain) CurrentHeader() *types.Header { 2374 return bc.hc.CurrentHeader() 2375 } 2376 2377 // GetTd retrieves a block's total difficulty in the canonical chain from the 2378 // database by hash and number, caching it if found. 2379 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 2380 return bc.hc.GetTd(hash, number) 2381 } 2382 2383 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 2384 // database by hash, caching it if found. 2385 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 2386 return bc.hc.GetTdByHash(hash) 2387 } 2388 2389 // GetHeader retrieves a block header from the database by hash and number, 2390 // caching it if found. 2391 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 2392 return bc.hc.GetHeader(hash, number) 2393 } 2394 2395 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 2396 // found. 2397 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 2398 return bc.hc.GetHeaderByHash(hash) 2399 } 2400 2401 // HasHeader checks if a block header is present in the database or not, caching 2402 // it if present. 2403 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 2404 return bc.hc.HasHeader(hash, number) 2405 } 2406 2407 // GetCanonicalHash returns the canonical hash for a given block number 2408 func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { 2409 return bc.hc.GetCanonicalHash(number) 2410 } 2411 2412 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 2413 // hash, fetching towards the genesis block. 2414 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 2415 return bc.hc.GetBlockHashesFromHash(hash, max) 2416 } 2417 2418 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 2419 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 2420 // number of blocks to be individually checked before we reach the canonical chain. 2421 // 2422 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 2423 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 2424 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 2425 } 2426 2427 // GetHeaderByNumber retrieves a block header from the database by number, 2428 // caching it (associated with its hash) if found. 2429 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 2430 return bc.hc.GetHeaderByNumber(number) 2431 } 2432 2433 // GetTransactionLookup retrieves the lookup associate with the given transaction 2434 // hash from the cache or database. 2435 func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { 2436 // Short circuit if the txlookup already in the cache, retrieve otherwise 2437 if lookup, exist := bc.txLookupCache.Get(hash); exist { 2438 return lookup.(*rawdb.LegacyTxLookupEntry) 2439 } 2440 tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) 2441 if tx == nil { 2442 return nil 2443 } 2444 lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex} 2445 bc.txLookupCache.Add(hash, lookup) 2446 return lookup 2447 } 2448 2449 // Config retrieves the chain's fork configuration. 2450 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 2451 2452 // Engine retrieves the blockchain's consensus engine. 2453 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 2454 2455 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 2456 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 2457 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 2458 } 2459 2460 // SubscribeChainEvent registers a subscription of ChainEvent. 2461 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 2462 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 2463 } 2464 2465 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 2466 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 2467 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 2468 } 2469 2470 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 2471 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 2472 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 2473 } 2474 2475 // SubscribeLogsEvent registers a subscription of []*types.Log. 2476 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 2477 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 2478 } 2479 2480 // SubscribeBlockProcessingEvent registers a subscription of bool where true means 2481 // block processing has started while false means it has stopped. 2482 func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { 2483 return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) 2484 }