gitee.com/liu-zhao234568/cntest@v1.0.0/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sort" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "gitee.com/liu-zhao234568/cntest/common" 32 "gitee.com/liu-zhao234568/cntest/common/mclock" 33 "gitee.com/liu-zhao234568/cntest/common/prque" 34 "gitee.com/liu-zhao234568/cntest/consensus" 35 "gitee.com/liu-zhao234568/cntest/core/rawdb" 36 "gitee.com/liu-zhao234568/cntest/core/state" 37 "gitee.com/liu-zhao234568/cntest/core/state/snapshot" 38 "gitee.com/liu-zhao234568/cntest/core/types" 39 "gitee.com/liu-zhao234568/cntest/core/vm" 40 "gitee.com/liu-zhao234568/cntest/ethdb" 41 "gitee.com/liu-zhao234568/cntest/event" 42 "gitee.com/liu-zhao234568/cntest/log" 43 "gitee.com/liu-zhao234568/cntest/metrics" 44 "gitee.com/liu-zhao234568/cntest/params" 45 "gitee.com/liu-zhao234568/cntest/rlp" 46 "gitee.com/liu-zhao234568/cntest/trie" 47 lru "github.com/hashicorp/golang-lru" 48 ) 49 50 var ( 51 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 52 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 53 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 54 55 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 56 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 57 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 58 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 59 60 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 61 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 62 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 63 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 64 65 snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) 66 snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) 67 snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) 68 69 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 70 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 71 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 72 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 73 74 blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) 75 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 76 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 77 blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) 78 79 blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) 80 blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) 81 82 errInsertionInterrupted = errors.New("insertion is interrupted") 83 ) 84 85 const ( 86 bodyCacheLimit = 256 87 blockCacheLimit = 256 88 receiptsCacheLimit = 32 89 txLookupCacheLimit = 1024 90 maxFutureBlocks = 256 91 maxTimeFutureBlocks = 30 92 TriesInMemory = 128 93 94 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 95 // 96 // Changelog: 97 // 98 // - Version 4 99 // The following incompatible database changes were added: 100 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 101 // * the `Bloom` field of receipt is deleted 102 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 103 // - Version 5 104 // The following incompatible database changes were added: 105 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt 106 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the 107 // receipts' corresponding block 108 // - Version 6 109 // The following incompatible database changes were added: 110 // * Transaction lookup information stores the corresponding block number instead of block hash 111 // - Version 7 112 // The following incompatible database changes were added: 113 // * Use freezer as the ancient database to maintain all ancient data 114 // - Version 8 115 // The following incompatible database changes were added: 116 // * New scheme for contract code in order to separate the codes and trie nodes 117 BlockChainVersion uint64 = 8 118 ) 119 120 // CacheConfig contains the configuration values for the trie caching/pruning 121 // that's resident in a blockchain. 122 type CacheConfig struct { 123 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 124 TrieCleanJournal string // Disk journal for saving clean cache entries. 125 TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically 126 TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks 127 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 128 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 129 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 130 SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory 131 Preimages bool // Whether to store preimage of trie key to the disk 132 133 SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it 134 } 135 136 // defaultCacheConfig are the default caching values if none are specified by the 137 // user (also used during testing). 138 var defaultCacheConfig = &CacheConfig{ 139 TrieCleanLimit: 256, 140 TrieDirtyLimit: 256, 141 TrieTimeLimit: 5 * time.Minute, 142 SnapshotLimit: 256, 143 SnapshotWait: true, 144 } 145 146 // BlockChain represents the canonical chain given a database with a genesis 147 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 148 // 149 // Importing blocks in to the block chain happens according to the set of rules 150 // defined by the two stage Validator. Processing of blocks is done using the 151 // Processor which processes the included transaction. The validation of the state 152 // is done in the second part of the Validator. Failing results in aborting of 153 // the import. 154 // 155 // The BlockChain also helps in returning blocks from **any** chain included 156 // in the database as well as blocks that represents the canonical chain. It's 157 // important to note that GetBlock can return any block and does not need to be 158 // included in the canonical one where as GetBlockByNumber always represents the 159 // canonical chain. 160 type BlockChain struct { 161 chainConfig *params.ChainConfig // Chain & network configuration 162 cacheConfig *CacheConfig // Cache configuration for pruning 163 164 db ethdb.Database // Low level persistent database to store final content in 165 snaps *snapshot.Tree // Snapshot tree for fast trie leaf access 166 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 167 gcproc time.Duration // Accumulates canonical block processing for trie dumping 168 169 // txLookupLimit is the maximum number of blocks from head whose tx indices 170 // are reserved: 171 // * 0: means no limit and regenerate any missing indexes 172 // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes 173 // * nil: disable tx reindexer/deleter, but still index new blocks 174 txLookupLimit uint64 175 176 hc *HeaderChain 177 rmLogsFeed event.Feed 178 chainFeed event.Feed 179 chainSideFeed event.Feed 180 chainHeadFeed event.Feed 181 logsFeed event.Feed 182 blockProcFeed event.Feed 183 scope event.SubscriptionScope 184 genesisBlock *types.Block 185 186 chainmu sync.RWMutex // blockchain insertion lock 187 188 currentBlock atomic.Value // Current head of the block chain 189 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 190 191 stateCache state.Database // State database to reuse between imports (contains state cache) 192 bodyCache *lru.Cache // Cache for the most recent block bodies 193 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 194 receiptsCache *lru.Cache // Cache for the most recent receipts per block 195 blockCache *lru.Cache // Cache for the most recent entire blocks 196 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 197 futureBlocks *lru.Cache // future blocks are blocks added for later processing 198 199 quit chan struct{} // blockchain quit channel 200 wg sync.WaitGroup // chain processing wait group for shutting down 201 running int32 // 0 if chain is running, 1 when stopped 202 procInterrupt int32 // interrupt signaler for block processing 203 204 engine consensus.Engine 205 validator Validator // Block and state validator interface 206 prefetcher Prefetcher 207 processor Processor // Block transaction processor interface 208 vmConfig vm.Config 209 210 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 211 terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. 212 } 213 214 // NewBlockChain returns a fully initialised block chain using information 215 // available in the database. It initialises the default Ethereum Validator and 216 // Processor. 217 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { 218 if cacheConfig == nil { 219 cacheConfig = defaultCacheConfig 220 } 221 bodyCache, _ := lru.New(bodyCacheLimit) 222 bodyRLPCache, _ := lru.New(bodyCacheLimit) 223 receiptsCache, _ := lru.New(receiptsCacheLimit) 224 blockCache, _ := lru.New(blockCacheLimit) 225 txLookupCache, _ := lru.New(txLookupCacheLimit) 226 futureBlocks, _ := lru.New(maxFutureBlocks) 227 228 bc := &BlockChain{ 229 chainConfig: chainConfig, 230 cacheConfig: cacheConfig, 231 db: db, 232 triegc: prque.New(nil), 233 stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ 234 Cache: cacheConfig.TrieCleanLimit, 235 Journal: cacheConfig.TrieCleanJournal, 236 Preimages: cacheConfig.Preimages, 237 }), 238 quit: make(chan struct{}), 239 shouldPreserve: shouldPreserve, 240 bodyCache: bodyCache, 241 bodyRLPCache: bodyRLPCache, 242 receiptsCache: receiptsCache, 243 blockCache: blockCache, 244 txLookupCache: txLookupCache, 245 futureBlocks: futureBlocks, 246 engine: engine, 247 vmConfig: vmConfig, 248 } 249 bc.validator = NewBlockValidator(chainConfig, bc, engine) 250 bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) 251 bc.processor = NewStateProcessor(chainConfig, bc, engine) 252 253 var err error 254 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) 255 if err != nil { 256 return nil, err 257 } 258 bc.genesisBlock = bc.GetBlockByNumber(0) 259 if bc.genesisBlock == nil { 260 return nil, ErrNoGenesis 261 } 262 263 var nilBlock *types.Block 264 bc.currentBlock.Store(nilBlock) 265 bc.currentFastBlock.Store(nilBlock) 266 267 // Initialize the chain with ancient data if it isn't empty. 268 var txIndexBlock uint64 269 270 if bc.empty() { 271 rawdb.InitDatabaseFromFreezer(bc.db) 272 // If ancient database is not empty, reconstruct all missing 273 // indices in the background. 274 frozen, _ := bc.db.Ancients() 275 if frozen > 0 { 276 txIndexBlock = frozen 277 } 278 } 279 if err := bc.loadLastState(); err != nil { 280 return nil, err 281 } 282 // Make sure the state associated with the block is available 283 head := bc.CurrentBlock() 284 if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { 285 // Head state is missing, before the state recovery, find out the 286 // disk layer point of snapshot(if it's enabled). Make sure the 287 // rewound point is lower than disk layer. 288 var diskRoot common.Hash 289 if bc.cacheConfig.SnapshotLimit > 0 { 290 diskRoot = rawdb.ReadSnapshotRoot(bc.db) 291 } 292 if diskRoot != (common.Hash{}) { 293 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) 294 295 snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot) 296 if err != nil { 297 return nil, err 298 } 299 // Chain rewound, persist old snapshot number to indicate recovery procedure 300 if snapDisk != 0 { 301 rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) 302 } 303 } else { 304 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) 305 if err := bc.SetHead(head.NumberU64()); err != nil { 306 return nil, err 307 } 308 } 309 } 310 // Ensure that a previous crash in SetHead doesn't leave extra ancients 311 if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { 312 var ( 313 needRewind bool 314 low uint64 315 ) 316 // The head full block may be rolled back to a very low height due to 317 // blockchain repair. If the head full block is even lower than the ancient 318 // chain, truncate the ancient store. 319 fullBlock := bc.CurrentBlock() 320 if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { 321 needRewind = true 322 low = fullBlock.NumberU64() 323 } 324 // In fast sync, it may happen that ancient data has been written to the 325 // ancient store, but the LastFastBlock has not been updated, truncate the 326 // extra data here. 327 fastBlock := bc.CurrentFastBlock() 328 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 329 needRewind = true 330 if fastBlock.NumberU64() < low || low == 0 { 331 low = fastBlock.NumberU64() 332 } 333 } 334 if needRewind { 335 log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) 336 if err := bc.SetHead(low); err != nil { 337 return nil, err 338 } 339 } 340 } 341 // The first thing the node will do is reconstruct the verification data for 342 // the head block (ethash cache or clique voting snapshot). Might as well do 343 // it in advance. 344 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 345 346 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 347 for hash := range BadHashes { 348 if header := bc.GetHeaderByHash(hash); header != nil { 349 // get the canonical block corresponding to the offending header's number 350 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 351 // make sure the headerByNumber (if present) is in our current canonical chain 352 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 353 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 354 if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { 355 return nil, err 356 } 357 log.Error("Chain rewind was successful, resuming normal operation") 358 } 359 } 360 } 361 // Load any existing snapshot, regenerating it if loading failed 362 if bc.cacheConfig.SnapshotLimit > 0 { 363 // If the chain was rewound past the snapshot persistent layer (causing 364 // a recovery block number to be persisted to disk), check if we're still 365 // in recovery mode and in that case, don't invalidate the snapshot on a 366 // head mismatch. 367 var recover bool 368 369 head := bc.CurrentBlock() 370 if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() { 371 log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) 372 recover = true 373 } 374 bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) 375 } 376 // Take ownership of this particular state 377 go bc.update() 378 if txLookupLimit != nil { 379 bc.txLookupLimit = *txLookupLimit 380 381 bc.wg.Add(1) 382 go bc.maintainTxIndex(txIndexBlock) 383 } 384 // If periodic cache journal is required, spin it up. 385 if bc.cacheConfig.TrieCleanRejournal > 0 { 386 if bc.cacheConfig.TrieCleanRejournal < time.Minute { 387 log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) 388 bc.cacheConfig.TrieCleanRejournal = time.Minute 389 } 390 triedb := bc.stateCache.TrieDB() 391 bc.wg.Add(1) 392 go func() { 393 defer bc.wg.Done() 394 triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) 395 }() 396 } 397 return bc, nil 398 } 399 400 // GetVMConfig returns the block chain VM config. 401 func (bc *BlockChain) GetVMConfig() *vm.Config { 402 return &bc.vmConfig 403 } 404 405 // empty returns an indicator whether the blockchain is empty. 406 // Note, it's a special case that we connect a non-empty ancient 407 // database with an empty node, so that we can plugin the ancient 408 // into node seamlessly. 409 func (bc *BlockChain) empty() bool { 410 genesis := bc.genesisBlock.Hash() 411 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 412 if hash != genesis { 413 return false 414 } 415 } 416 return true 417 } 418 419 // loadLastState loads the last known chain state from the database. This method 420 // assumes that the chain manager mutex is held. 421 func (bc *BlockChain) loadLastState() error { 422 // Restore the last known head block 423 head := rawdb.ReadHeadBlockHash(bc.db) 424 if head == (common.Hash{}) { 425 // Corrupt or empty database, init from scratch 426 log.Warn("Empty database, resetting chain") 427 return bc.Reset() 428 } 429 // Make sure the entire head block is available 430 currentBlock := bc.GetBlockByHash(head) 431 if currentBlock == nil { 432 // Corrupt or empty database, init from scratch 433 log.Warn("Head block missing, resetting chain", "hash", head) 434 return bc.Reset() 435 } 436 // Everything seems to be fine, set as the head block 437 bc.currentBlock.Store(currentBlock) 438 headBlockGauge.Update(int64(currentBlock.NumberU64())) 439 440 // Restore the last known head header 441 currentHeader := currentBlock.Header() 442 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 443 if header := bc.GetHeaderByHash(head); header != nil { 444 currentHeader = header 445 } 446 } 447 bc.hc.SetCurrentHeader(currentHeader) 448 449 // Restore the last known head fast block 450 bc.currentFastBlock.Store(currentBlock) 451 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 452 453 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 454 if block := bc.GetBlockByHash(head); block != nil { 455 bc.currentFastBlock.Store(block) 456 headFastBlockGauge.Update(int64(block.NumberU64())) 457 } 458 } 459 // Issue a status log for the user 460 currentFastBlock := bc.CurrentFastBlock() 461 462 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 463 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 464 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 465 466 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 467 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 468 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 469 if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { 470 log.Info("Loaded last fast-sync pivot marker", "number", *pivot) 471 } 472 return nil 473 } 474 475 // SetHead rewinds the local chain to a new head. Depending on whether the node 476 // was fast synced or full synced and in which state, the method will try to 477 // delete minimal data from disk whilst retaining chain consistency. 478 func (bc *BlockChain) SetHead(head uint64) error { 479 _, err := bc.SetHeadBeyondRoot(head, common.Hash{}) 480 return err 481 } 482 483 // SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition 484 // that the rewind must pass the specified state root. This method is meant to be 485 // used when rewinding with snapshots enabled to ensure that we go back further than 486 // persistent disk layer. Depending on whether the node was fast synced or full, and 487 // in which state, the method will try to delete minimal data from disk whilst 488 // retaining chain consistency. 489 // 490 // The method returns the block number where the requested root cap was found. 491 func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { 492 bc.chainmu.Lock() 493 defer bc.chainmu.Unlock() 494 495 // Track the block number of the requested root hash 496 var rootNumber uint64 // (no root == always 0) 497 498 // Retrieve the last pivot block to short circuit rollbacks beyond it and the 499 // current freezer limit to start nuking id underflown 500 pivot := rawdb.ReadLastPivotNumber(bc.db) 501 frozen, _ := bc.db.Ancients() 502 503 updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { 504 // Rewind the block chain, ensuring we don't end up with a stateless head 505 // block. Note, depth equality is permitted to allow using SetHead as a 506 // chain reparation mechanism without deleting any data! 507 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { 508 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 509 if newHeadBlock == nil { 510 log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) 511 newHeadBlock = bc.genesisBlock 512 } else { 513 // Block exists, keep rewinding until we find one with state, 514 // keeping rewinding until we exceed the optional threshold 515 // root hash 516 beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) 517 518 for { 519 // If a root threshold was requested but not yet crossed, check 520 if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { 521 beyondRoot, rootNumber = true, newHeadBlock.NumberU64() 522 } 523 if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { 524 log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 525 if pivot == nil || newHeadBlock.NumberU64() > *pivot { 526 parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) 527 if parent != nil { 528 newHeadBlock = parent 529 continue 530 } 531 log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash()) 532 newHeadBlock = bc.genesisBlock 533 } else { 534 log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) 535 newHeadBlock = bc.genesisBlock 536 } 537 } 538 if beyondRoot || newHeadBlock.NumberU64() == 0 { 539 log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 540 break 541 } 542 log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) 543 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding 544 } 545 } 546 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 547 548 // Degrade the chain markers if they are explicitly reverted. 549 // In theory we should update all in-memory markers in the 550 // last step, however the direction of SetHead is from high 551 // to low, so it's safe the update in-memory markers directly. 552 bc.currentBlock.Store(newHeadBlock) 553 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 554 } 555 // Rewind the fast block in a simpleton way to the target head 556 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 557 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 558 // If either blocks reached nil, reset to the genesis state 559 if newHeadFastBlock == nil { 560 newHeadFastBlock = bc.genesisBlock 561 } 562 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 563 564 // Degrade the chain markers if they are explicitly reverted. 565 // In theory we should update all in-memory markers in the 566 // last step, however the direction of SetHead is from high 567 // to low, so it's safe the update in-memory markers directly. 568 bc.currentFastBlock.Store(newHeadFastBlock) 569 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 570 } 571 head := bc.CurrentBlock().NumberU64() 572 573 // If setHead underflown the freezer threshold and the block processing 574 // intent afterwards is full block importing, delete the chain segment 575 // between the stateful-block and the sethead target. 576 var wipe bool 577 if head+1 < frozen { 578 wipe = pivot == nil || head >= *pivot 579 } 580 return head, wipe // Only force wipe if full synced 581 } 582 // Rewind the header chain, deleting all block bodies until then 583 delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { 584 // Ignore the error here since light client won't hit this path 585 frozen, _ := bc.db.Ancients() 586 if num+1 <= frozen { 587 // Truncate all relative data(header, total difficulty, body, receipt 588 // and canonical hash) from ancient store. 589 if err := bc.db.TruncateAncients(num); err != nil { 590 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 591 } 592 // Remove the hash <-> number mapping from the active store. 593 rawdb.DeleteHeaderNumber(db, hash) 594 } else { 595 // Remove relative body and receipts from the active store. 596 // The header, total difficulty and canonical hash will be 597 // removed in the hc.SetHead function. 598 rawdb.DeleteBody(db, hash, num) 599 rawdb.DeleteReceipts(db, hash, num) 600 } 601 // Todo(rjl493456442) txlookup, bloombits, etc 602 } 603 // If SetHead was only called as a chain reparation method, try to skip 604 // touching the header chain altogether, unless the freezer is broken 605 if block := bc.CurrentBlock(); block.NumberU64() == head { 606 if target, force := updateFn(bc.db, block.Header()); force { 607 bc.hc.SetHead(target, updateFn, delFn) 608 } 609 } else { 610 // Rewind the chain to the requested head and keep going backwards until a 611 // block with a state is found or fast sync pivot is passed 612 log.Warn("Rewinding blockchain", "target", head) 613 bc.hc.SetHead(head, updateFn, delFn) 614 } 615 // Clear out any stale content from the caches 616 bc.bodyCache.Purge() 617 bc.bodyRLPCache.Purge() 618 bc.receiptsCache.Purge() 619 bc.blockCache.Purge() 620 bc.txLookupCache.Purge() 621 bc.futureBlocks.Purge() 622 623 return rootNumber, bc.loadLastState() 624 } 625 626 // FastSyncCommitHead sets the current head block to the one defined by the hash 627 // irrelevant what the chain contents were prior. 628 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 629 // Make sure that both the block as well at its state trie exists 630 block := bc.GetBlockByHash(hash) 631 if block == nil { 632 return fmt.Errorf("non existent block [%x..]", hash[:4]) 633 } 634 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 635 return err 636 } 637 // If all checks out, manually set the head block 638 bc.chainmu.Lock() 639 bc.currentBlock.Store(block) 640 headBlockGauge.Update(int64(block.NumberU64())) 641 bc.chainmu.Unlock() 642 643 // Destroy any existing state snapshot and regenerate it in the background, 644 // also resuming the normal maintenance of any previously paused snapshot. 645 if bc.snaps != nil { 646 bc.snaps.Rebuild(block.Root()) 647 } 648 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 649 return nil 650 } 651 652 // GasLimit returns the gas limit of the current HEAD block. 653 func (bc *BlockChain) GasLimit() uint64 { 654 return bc.CurrentBlock().GasLimit() 655 } 656 657 // CurrentBlock retrieves the current head block of the canonical chain. The 658 // block is retrieved from the blockchain's internal cache. 659 func (bc *BlockChain) CurrentBlock() *types.Block { 660 return bc.currentBlock.Load().(*types.Block) 661 } 662 663 // Snapshots returns the blockchain snapshot tree. 664 func (bc *BlockChain) Snapshots() *snapshot.Tree { 665 return bc.snaps 666 } 667 668 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 669 // chain. The block is retrieved from the blockchain's internal cache. 670 func (bc *BlockChain) CurrentFastBlock() *types.Block { 671 return bc.currentFastBlock.Load().(*types.Block) 672 } 673 674 // Validator returns the current validator. 675 func (bc *BlockChain) Validator() Validator { 676 return bc.validator 677 } 678 679 // Processor returns the current processor. 680 func (bc *BlockChain) Processor() Processor { 681 return bc.processor 682 } 683 684 // State returns a new mutable state based on the current HEAD block. 685 func (bc *BlockChain) State() (*state.StateDB, error) { 686 return bc.StateAt(bc.CurrentBlock().Root()) 687 } 688 689 // StateAt returns a new mutable state based on a particular point in time. 690 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 691 return state.New(root, bc.stateCache, bc.snaps) 692 } 693 694 // StateCache returns the caching database underpinning the blockchain instance. 695 func (bc *BlockChain) StateCache() state.Database { 696 return bc.stateCache 697 } 698 699 // Reset purges the entire blockchain, restoring it to its genesis state. 700 func (bc *BlockChain) Reset() error { 701 return bc.ResetWithGenesisBlock(bc.genesisBlock) 702 } 703 704 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 705 // specified genesis state. 706 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 707 // Dump the entire block chain and purge the caches 708 if err := bc.SetHead(0); err != nil { 709 return err 710 } 711 bc.chainmu.Lock() 712 defer bc.chainmu.Unlock() 713 714 // Prepare the genesis block and reinitialise the chain 715 batch := bc.db.NewBatch() 716 rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) 717 rawdb.WriteBlock(batch, genesis) 718 if err := batch.Write(); err != nil { 719 log.Crit("Failed to write genesis block", "err", err) 720 } 721 bc.writeHeadBlock(genesis) 722 723 // Last update all in-memory chain markers 724 bc.genesisBlock = genesis 725 bc.currentBlock.Store(bc.genesisBlock) 726 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 727 bc.hc.SetGenesis(bc.genesisBlock.Header()) 728 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 729 bc.currentFastBlock.Store(bc.genesisBlock) 730 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 731 return nil 732 } 733 734 // Export writes the active chain to the given writer. 735 func (bc *BlockChain) Export(w io.Writer) error { 736 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 737 } 738 739 // ExportN writes a subset of the active chain to the given writer. 740 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 741 bc.chainmu.RLock() 742 defer bc.chainmu.RUnlock() 743 744 if first > last { 745 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 746 } 747 log.Info("Exporting batch of blocks", "count", last-first+1) 748 749 start, reported := time.Now(), time.Now() 750 for nr := first; nr <= last; nr++ { 751 block := bc.GetBlockByNumber(nr) 752 if block == nil { 753 return fmt.Errorf("export failed on #%d: not found", nr) 754 } 755 if err := block.EncodeRLP(w); err != nil { 756 return err 757 } 758 if time.Since(reported) >= statsReportLimit { 759 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 760 reported = time.Now() 761 } 762 } 763 return nil 764 } 765 766 // writeHeadBlock injects a new head block into the current block chain. This method 767 // assumes that the block is indeed a true head. It will also reset the head 768 // header and the head fast sync block to this very same block if they are older 769 // or if they are on a different side chain. 770 // 771 // Note, this function assumes that the `mu` mutex is held! 772 func (bc *BlockChain) writeHeadBlock(block *types.Block) { 773 // If the block is on a side chain or an unknown one, force other heads onto it too 774 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 775 776 // Add the block to the canonical chain number scheme and mark as the head 777 batch := bc.db.NewBatch() 778 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 779 rawdb.WriteTxLookupEntriesByBlock(batch, block) 780 rawdb.WriteHeadBlockHash(batch, block.Hash()) 781 782 // If the block is better than our head or is on a different chain, force update heads 783 if updateHeads { 784 rawdb.WriteHeadHeaderHash(batch, block.Hash()) 785 rawdb.WriteHeadFastBlockHash(batch, block.Hash()) 786 } 787 // Flush the whole batch into the disk, exit the node if failed 788 if err := batch.Write(); err != nil { 789 log.Crit("Failed to update chain indexes and markers", "err", err) 790 } 791 // Update all in-memory chain markers in the last step 792 if updateHeads { 793 bc.hc.SetCurrentHeader(block.Header()) 794 bc.currentFastBlock.Store(block) 795 headFastBlockGauge.Update(int64(block.NumberU64())) 796 } 797 bc.currentBlock.Store(block) 798 headBlockGauge.Update(int64(block.NumberU64())) 799 } 800 801 // Genesis retrieves the chain's genesis block. 802 func (bc *BlockChain) Genesis() *types.Block { 803 return bc.genesisBlock 804 } 805 806 // GetBody retrieves a block body (transactions and uncles) from the database by 807 // hash, caching it if found. 808 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 809 // Short circuit if the body's already in the cache, retrieve otherwise 810 if cached, ok := bc.bodyCache.Get(hash); ok { 811 body := cached.(*types.Body) 812 return body 813 } 814 number := bc.hc.GetBlockNumber(hash) 815 if number == nil { 816 return nil 817 } 818 body := rawdb.ReadBody(bc.db, hash, *number) 819 if body == nil { 820 return nil 821 } 822 // Cache the found body for next time and return 823 bc.bodyCache.Add(hash, body) 824 return body 825 } 826 827 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 828 // caching it if found. 829 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 830 // Short circuit if the body's already in the cache, retrieve otherwise 831 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 832 return cached.(rlp.RawValue) 833 } 834 number := bc.hc.GetBlockNumber(hash) 835 if number == nil { 836 return nil 837 } 838 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 839 if len(body) == 0 { 840 return nil 841 } 842 // Cache the found body for next time and return 843 bc.bodyRLPCache.Add(hash, body) 844 return body 845 } 846 847 // HasBlock checks if a block is fully present in the database or not. 848 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 849 if bc.blockCache.Contains(hash) { 850 return true 851 } 852 return rawdb.HasBody(bc.db, hash, number) 853 } 854 855 // HasFastBlock checks if a fast block is fully present in the database or not. 856 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 857 if !bc.HasBlock(hash, number) { 858 return false 859 } 860 if bc.receiptsCache.Contains(hash) { 861 return true 862 } 863 return rawdb.HasReceipts(bc.db, hash, number) 864 } 865 866 // HasState checks if state trie is fully present in the database or not. 867 func (bc *BlockChain) HasState(hash common.Hash) bool { 868 _, err := bc.stateCache.OpenTrie(hash) 869 return err == nil 870 } 871 872 // HasBlockAndState checks if a block and associated state trie is fully present 873 // in the database or not, caching it if present. 874 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 875 // Check first that the block itself is known 876 block := bc.GetBlock(hash, number) 877 if block == nil { 878 return false 879 } 880 return bc.HasState(block.Root()) 881 } 882 883 // GetBlock retrieves a block from the database by hash and number, 884 // caching it if found. 885 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 886 // Short circuit if the block's already in the cache, retrieve otherwise 887 if block, ok := bc.blockCache.Get(hash); ok { 888 return block.(*types.Block) 889 } 890 block := rawdb.ReadBlock(bc.db, hash, number) 891 if block == nil { 892 return nil 893 } 894 // Cache the found block for next time and return 895 bc.blockCache.Add(block.Hash(), block) 896 return block 897 } 898 899 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 900 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 901 number := bc.hc.GetBlockNumber(hash) 902 if number == nil { 903 return nil 904 } 905 return bc.GetBlock(hash, *number) 906 } 907 908 // GetBlockByNumber retrieves a block from the database by number, caching it 909 // (associated with its hash) if found. 910 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 911 hash := rawdb.ReadCanonicalHash(bc.db, number) 912 if hash == (common.Hash{}) { 913 return nil 914 } 915 return bc.GetBlock(hash, number) 916 } 917 918 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 919 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 920 if receipts, ok := bc.receiptsCache.Get(hash); ok { 921 return receipts.(types.Receipts) 922 } 923 number := rawdb.ReadHeaderNumber(bc.db, hash) 924 if number == nil { 925 return nil 926 } 927 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 928 if receipts == nil { 929 return nil 930 } 931 bc.receiptsCache.Add(hash, receipts) 932 return receipts 933 } 934 935 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 936 // [deprecated by eth/62] 937 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 938 number := bc.hc.GetBlockNumber(hash) 939 if number == nil { 940 return nil 941 } 942 for i := 0; i < n; i++ { 943 block := bc.GetBlock(hash, *number) 944 if block == nil { 945 break 946 } 947 blocks = append(blocks, block) 948 hash = block.ParentHash() 949 *number-- 950 } 951 return 952 } 953 954 // GetUnclesInChain retrieves all the uncles from a given block backwards until 955 // a specific distance is reached. 956 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 957 uncles := []*types.Header{} 958 for i := 0; block != nil && i < length; i++ { 959 uncles = append(uncles, block.Uncles()...) 960 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 961 } 962 return uncles 963 } 964 965 // TrieNode retrieves a blob of data associated with a trie node 966 // either from ephemeral in-memory cache, or from persistent storage. 967 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 968 return bc.stateCache.TrieDB().Node(hash) 969 } 970 971 // ContractCode retrieves a blob of data associated with a contract hash 972 // either from ephemeral in-memory cache, or from persistent storage. 973 func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { 974 return bc.stateCache.ContractCode(common.Hash{}, hash) 975 } 976 977 // ContractCodeWithPrefix retrieves a blob of data associated with a contract 978 // hash either from ephemeral in-memory cache, or from persistent storage. 979 // 980 // If the code doesn't exist in the in-memory cache, check the storage with 981 // new code scheme. 982 func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { 983 type codeReader interface { 984 ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) 985 } 986 return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) 987 } 988 989 // Stop stops the blockchain service. If any imports are currently in progress 990 // it will abort them using the procInterrupt. 991 func (bc *BlockChain) Stop() { 992 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 993 return 994 } 995 // Unsubscribe all subscriptions registered from blockchain 996 bc.scope.Close() 997 close(bc.quit) 998 bc.StopInsert() 999 bc.wg.Wait() 1000 1001 // Ensure that the entirety of the state snapshot is journalled to disk. 1002 var snapBase common.Hash 1003 if bc.snaps != nil { 1004 var err error 1005 if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { 1006 log.Error("Failed to journal state snapshot", "err", err) 1007 } 1008 } 1009 // Ensure the state of a recent block is also stored to disk before exiting. 1010 // We're writing three different states to catch different restart scenarios: 1011 // - HEAD: So we don't need to reprocess any blocks in the general case 1012 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 1013 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 1014 if !bc.cacheConfig.TrieDirtyDisabled { 1015 triedb := bc.stateCache.TrieDB() 1016 1017 for _, offset := range []uint64{0, 1, TriesInMemory - 1} { 1018 if number := bc.CurrentBlock().NumberU64(); number > offset { 1019 recent := bc.GetBlockByNumber(number - offset) 1020 1021 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 1022 if err := triedb.Commit(recent.Root(), true, nil); err != nil { 1023 log.Error("Failed to commit recent state trie", "err", err) 1024 } 1025 } 1026 } 1027 if snapBase != (common.Hash{}) { 1028 log.Info("Writing snapshot state to disk", "root", snapBase) 1029 if err := triedb.Commit(snapBase, true, nil); err != nil { 1030 log.Error("Failed to commit recent state trie", "err", err) 1031 } 1032 } 1033 for !bc.triegc.Empty() { 1034 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 1035 } 1036 if size, _ := triedb.Size(); size != 0 { 1037 log.Error("Dangling trie nodes after full cleanup") 1038 } 1039 } 1040 // Ensure all live cached entries be saved into disk, so that we can skip 1041 // cache warmup when node restarts. 1042 if bc.cacheConfig.TrieCleanJournal != "" { 1043 triedb := bc.stateCache.TrieDB() 1044 triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) 1045 } 1046 log.Info("Blockchain stopped") 1047 } 1048 1049 // StopInsert interrupts all insertion methods, causing them to return 1050 // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after 1051 // calling this method. 1052 func (bc *BlockChain) StopInsert() { 1053 atomic.StoreInt32(&bc.procInterrupt, 1) 1054 } 1055 1056 // insertStopped returns true after StopInsert has been called. 1057 func (bc *BlockChain) insertStopped() bool { 1058 return atomic.LoadInt32(&bc.procInterrupt) == 1 1059 } 1060 1061 func (bc *BlockChain) procFutureBlocks() { 1062 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 1063 for _, hash := range bc.futureBlocks.Keys() { 1064 if block, exist := bc.futureBlocks.Peek(hash); exist { 1065 blocks = append(blocks, block.(*types.Block)) 1066 } 1067 } 1068 if len(blocks) > 0 { 1069 sort.Slice(blocks, func(i, j int) bool { 1070 return blocks[i].NumberU64() < blocks[j].NumberU64() 1071 }) 1072 // Insert one by one as chain insertion needs contiguous ancestry between blocks 1073 for i := range blocks { 1074 bc.InsertChain(blocks[i : i+1]) 1075 } 1076 } 1077 } 1078 1079 // WriteStatus status of write 1080 type WriteStatus byte 1081 1082 const ( 1083 NonStatTy WriteStatus = iota 1084 CanonStatTy 1085 SideStatTy 1086 ) 1087 1088 // truncateAncient rewinds the blockchain to the specified header and deletes all 1089 // data in the ancient store that exceeds the specified header. 1090 func (bc *BlockChain) truncateAncient(head uint64) error { 1091 frozen, err := bc.db.Ancients() 1092 if err != nil { 1093 return err 1094 } 1095 // Short circuit if there is no data to truncate in ancient store. 1096 if frozen <= head+1 { 1097 return nil 1098 } 1099 // Truncate all the data in the freezer beyond the specified head 1100 if err := bc.db.TruncateAncients(head + 1); err != nil { 1101 return err 1102 } 1103 // Clear out any stale content from the caches 1104 bc.hc.headerCache.Purge() 1105 bc.hc.tdCache.Purge() 1106 bc.hc.numberCache.Purge() 1107 1108 // Clear out any stale content from the caches 1109 bc.bodyCache.Purge() 1110 bc.bodyRLPCache.Purge() 1111 bc.receiptsCache.Purge() 1112 bc.blockCache.Purge() 1113 bc.txLookupCache.Purge() 1114 bc.futureBlocks.Purge() 1115 1116 log.Info("Rewind ancient data", "number", head) 1117 return nil 1118 } 1119 1120 // numberHash is just a container for a number and a hash, to represent a block 1121 type numberHash struct { 1122 number uint64 1123 hash common.Hash 1124 } 1125 1126 // InsertReceiptChain attempts to complete an already existing header chain with 1127 // transaction and receipt data. 1128 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 1129 // We don't require the chainMu here since we want to maximize the 1130 // concurrency of header insertion and receipt insertion. 1131 bc.wg.Add(1) 1132 defer bc.wg.Done() 1133 1134 var ( 1135 ancientBlocks, liveBlocks types.Blocks 1136 ancientReceipts, liveReceipts []types.Receipts 1137 ) 1138 // Do a sanity check that the provided chain is actually ordered and linked 1139 for i := 0; i < len(blockChain); i++ { 1140 if i != 0 { 1141 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 1142 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 1143 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 1144 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(), 1145 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 1146 } 1147 } 1148 if blockChain[i].NumberU64() <= ancientLimit { 1149 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 1150 } else { 1151 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 1152 } 1153 } 1154 1155 var ( 1156 stats = struct{ processed, ignored int32 }{} 1157 start = time.Now() 1158 size = 0 1159 ) 1160 // updateHead updates the head fast sync block if the inserted blocks are better 1161 // and returns an indicator whether the inserted blocks are canonical. 1162 updateHead := func(head *types.Block) bool { 1163 bc.chainmu.Lock() 1164 1165 // Rewind may have occurred, skip in that case. 1166 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 1167 currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) 1168 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 1169 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 1170 bc.currentFastBlock.Store(head) 1171 headFastBlockGauge.Update(int64(head.NumberU64())) 1172 bc.chainmu.Unlock() 1173 return true 1174 } 1175 } 1176 bc.chainmu.Unlock() 1177 return false 1178 } 1179 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 1180 // 1181 // this function only accepts canonical chain data. All side chain will be reverted 1182 // eventually. 1183 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1184 var ( 1185 previous = bc.CurrentFastBlock() 1186 batch = bc.db.NewBatch() 1187 ) 1188 // If any error occurs before updating the head or we are inserting a side chain, 1189 // all the data written this time wll be rolled back. 1190 defer func() { 1191 if previous != nil { 1192 if err := bc.truncateAncient(previous.NumberU64()); err != nil { 1193 log.Crit("Truncate ancient store failed", "err", err) 1194 } 1195 } 1196 }() 1197 var deleted []*numberHash 1198 for i, block := range blockChain { 1199 // Short circuit insertion if shutting down or processing failed 1200 if bc.insertStopped() { 1201 return 0, errInsertionInterrupted 1202 } 1203 // Short circuit insertion if it is required(used in testing only) 1204 if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) { 1205 return i, errors.New("insertion is terminated for testing purpose") 1206 } 1207 // Short circuit if the owner header is unknown 1208 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1209 return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4]) 1210 } 1211 if block.NumberU64() == 1 { 1212 // Make sure to write the genesis into the freezer 1213 if frozen, _ := bc.db.Ancients(); frozen == 0 { 1214 h := rawdb.ReadCanonicalHash(bc.db, 0) 1215 b := rawdb.ReadBlock(bc.db, h, 0) 1216 size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0)) 1217 log.Info("Wrote genesis to ancients") 1218 } 1219 } 1220 // Flush data into ancient database. 1221 size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) 1222 1223 // Write tx indices if any condition is satisfied: 1224 // * If user requires to reserve all tx indices(txlookuplimit=0) 1225 // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit) 1226 // * If block number is large enough to be regarded as a recent block 1227 // It means blocks below the ancientLimit-txlookupLimit won't be indexed. 1228 // 1229 // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with 1230 // an external ancient database, during the setup, blockchain will start 1231 // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) 1232 // range. In this case, all tx indices of newly imported blocks should be 1233 // generated. 1234 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { 1235 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1236 } else if rawdb.ReadTxIndexTail(bc.db) != nil { 1237 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1238 } 1239 stats.processed++ 1240 } 1241 // Flush all tx-lookup index data. 1242 size += batch.ValueSize() 1243 if err := batch.Write(); err != nil { 1244 return 0, err 1245 } 1246 batch.Reset() 1247 1248 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1249 if err := bc.db.Sync(); err != nil { 1250 return 0, err 1251 } 1252 if !updateHead(blockChain[len(blockChain)-1]) { 1253 return 0, errors.New("side blocks can't be accepted as the ancient chain data") 1254 } 1255 previous = nil // disable rollback explicitly 1256 1257 // Wipe out canonical block data. 1258 for _, nh := range deleted { 1259 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1260 rawdb.DeleteCanonicalHash(batch, nh.number) 1261 } 1262 for _, block := range blockChain { 1263 // Always keep genesis block in active database. 1264 if block.NumberU64() != 0 { 1265 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1266 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1267 } 1268 } 1269 if err := batch.Write(); err != nil { 1270 return 0, err 1271 } 1272 batch.Reset() 1273 1274 // Wipe out side chain too. 1275 for _, nh := range deleted { 1276 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1277 rawdb.DeleteBlock(batch, hash, nh.number) 1278 } 1279 } 1280 for _, block := range blockChain { 1281 // Always keep genesis block in active database. 1282 if block.NumberU64() != 0 { 1283 for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { 1284 rawdb.DeleteBlock(batch, hash, block.NumberU64()) 1285 } 1286 } 1287 } 1288 if err := batch.Write(); err != nil { 1289 return 0, err 1290 } 1291 return 0, nil 1292 } 1293 // writeLive writes blockchain and corresponding receipt chain into active store. 1294 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1295 skipPresenceCheck := false 1296 batch := bc.db.NewBatch() 1297 for i, block := range blockChain { 1298 // Short circuit insertion if shutting down or processing failed 1299 if bc.insertStopped() { 1300 return 0, errInsertionInterrupted 1301 } 1302 // Short circuit if the owner header is unknown 1303 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1304 return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4]) 1305 } 1306 if !skipPresenceCheck { 1307 // Ignore if the entire data is already known 1308 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1309 stats.ignored++ 1310 continue 1311 } else { 1312 // If block N is not present, neither are the later blocks. 1313 // This should be true, but if we are mistaken, the shortcut 1314 // here will only cause overwriting of some existing data 1315 skipPresenceCheck = true 1316 } 1317 } 1318 // Write all the data out into the database 1319 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1320 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1321 rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed 1322 1323 // Write everything belongs to the blocks into the database. So that 1324 // we can ensure all components of body is completed(body, receipts, 1325 // tx indexes) 1326 if batch.ValueSize() >= ethdb.IdealBatchSize { 1327 if err := batch.Write(); err != nil { 1328 return 0, err 1329 } 1330 size += batch.ValueSize() 1331 batch.Reset() 1332 } 1333 stats.processed++ 1334 } 1335 // Write everything belongs to the blocks into the database. So that 1336 // we can ensure all components of body is completed(body, receipts, 1337 // tx indexes) 1338 if batch.ValueSize() > 0 { 1339 size += batch.ValueSize() 1340 if err := batch.Write(); err != nil { 1341 return 0, err 1342 } 1343 } 1344 updateHead(blockChain[len(blockChain)-1]) 1345 return 0, nil 1346 } 1347 // Write downloaded chain data and corresponding receipt chain data 1348 if len(ancientBlocks) > 0 { 1349 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1350 if err == errInsertionInterrupted { 1351 return 0, nil 1352 } 1353 return n, err 1354 } 1355 } 1356 // Write the tx index tail (block number from where we index) before write any live blocks 1357 if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 { 1358 // The tx index tail can only be one of the following two options: 1359 // * 0: all ancient blocks have been indexed 1360 // * ancient-limit: the indices of blocks before ancient-limit are ignored 1361 if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil { 1362 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit { 1363 rawdb.WriteTxIndexTail(bc.db, 0) 1364 } else { 1365 rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit) 1366 } 1367 } 1368 } 1369 if len(liveBlocks) > 0 { 1370 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1371 if err == errInsertionInterrupted { 1372 return 0, nil 1373 } 1374 return n, err 1375 } 1376 } 1377 1378 head := blockChain[len(blockChain)-1] 1379 context := []interface{}{ 1380 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1381 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1382 "size", common.StorageSize(size), 1383 } 1384 if stats.ignored > 0 { 1385 context = append(context, []interface{}{"ignored", stats.ignored}...) 1386 } 1387 log.Info("Imported new block receipts", context...) 1388 1389 return 0, nil 1390 } 1391 1392 // SetTxLookupLimit is responsible for updating the txlookup limit to the 1393 // original one stored in db if the new mismatches with the old one. 1394 func (bc *BlockChain) SetTxLookupLimit(limit uint64) { 1395 bc.txLookupLimit = limit 1396 } 1397 1398 // TxLookupLimit retrieves the txlookup limit used by blockchain to prune 1399 // stale transaction indices. 1400 func (bc *BlockChain) TxLookupLimit() uint64 { 1401 return bc.txLookupLimit 1402 } 1403 1404 var lastWrite uint64 1405 1406 // writeBlockWithoutState writes only the block and its metadata to the database, 1407 // but does not write any state. This is used to construct competing side forks 1408 // up to the point where they exceed the canonical total difficulty. 1409 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1410 bc.wg.Add(1) 1411 defer bc.wg.Done() 1412 1413 batch := bc.db.NewBatch() 1414 rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) 1415 rawdb.WriteBlock(batch, block) 1416 if err := batch.Write(); err != nil { 1417 log.Crit("Failed to write block into disk", "err", err) 1418 } 1419 return nil 1420 } 1421 1422 // writeKnownBlock updates the head block flag with a known block 1423 // and introduces chain reorg if necessary. 1424 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1425 bc.wg.Add(1) 1426 defer bc.wg.Done() 1427 1428 current := bc.CurrentBlock() 1429 if block.ParentHash() != current.Hash() { 1430 if err := bc.reorg(current, block); err != nil { 1431 return err 1432 } 1433 } 1434 bc.writeHeadBlock(block) 1435 return nil 1436 } 1437 1438 // WriteBlockWithState writes the block and all associated state to the database. 1439 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1440 bc.chainmu.Lock() 1441 defer bc.chainmu.Unlock() 1442 1443 return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent) 1444 } 1445 1446 // writeBlockWithState writes the block and all associated state to the database, 1447 // but is expects the chain mutex to be held. 1448 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1449 bc.wg.Add(1) 1450 defer bc.wg.Done() 1451 1452 // Calculate the total difficulty of the block 1453 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1454 if ptd == nil { 1455 return NonStatTy, consensus.ErrUnknownAncestor 1456 } 1457 // Make sure no inconsistent state is leaked during insertion 1458 currentBlock := bc.CurrentBlock() 1459 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1460 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1461 1462 // Irrelevant of the canonical status, write the block itself to the database. 1463 // 1464 // Note all the components of block(td, hash->number map, header, body, receipts) 1465 // should be written atomically. BlockBatch is used for containing all components. 1466 blockBatch := bc.db.NewBatch() 1467 rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) 1468 rawdb.WriteBlock(blockBatch, block) 1469 rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) 1470 rawdb.WritePreimages(blockBatch, state.Preimages()) 1471 if err := blockBatch.Write(); err != nil { 1472 log.Crit("Failed to write block into disk", "err", err) 1473 } 1474 // Commit all cached state changes into underlying memory database. 1475 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 1476 if err != nil { 1477 return NonStatTy, err 1478 } 1479 triedb := bc.stateCache.TrieDB() 1480 1481 // If we're running an archive node, always flush 1482 if bc.cacheConfig.TrieDirtyDisabled { 1483 if err := triedb.Commit(root, false, nil); err != nil { 1484 return NonStatTy, err 1485 } 1486 } else { 1487 // Full but not archive node, do proper garbage collection 1488 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 1489 bc.triegc.Push(root, -int64(block.NumberU64())) 1490 1491 if current := block.NumberU64(); current > TriesInMemory { 1492 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1493 var ( 1494 nodes, imgs = triedb.Size() 1495 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1496 ) 1497 if nodes > limit || imgs > 4*1024*1024 { 1498 triedb.Cap(limit - ethdb.IdealBatchSize) 1499 } 1500 // Find the next state trie we need to commit 1501 chosen := current - TriesInMemory 1502 1503 // If we exceeded out time allowance, flush an entire trie to disk 1504 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1505 // If the header is missing (canonical chain behind), we're reorging a low 1506 // diff sidechain. Suspend committing until this operation is completed. 1507 header := bc.GetHeaderByNumber(chosen) 1508 if header == nil { 1509 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1510 } else { 1511 // If we're exceeding limits but haven't reached a large enough memory gap, 1512 // warn the user that the system is becoming unstable. 1513 if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1514 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) 1515 } 1516 // Flush an entire trie and restart the counters 1517 triedb.Commit(header.Root, true, nil) 1518 lastWrite = chosen 1519 bc.gcproc = 0 1520 } 1521 } 1522 // Garbage collect anything below our required write retention 1523 for !bc.triegc.Empty() { 1524 root, number := bc.triegc.Pop() 1525 if uint64(-number) > chosen { 1526 bc.triegc.Push(root, number) 1527 break 1528 } 1529 triedb.Dereference(root.(common.Hash)) 1530 } 1531 } 1532 } 1533 // If the total difficulty is higher than our known, add it to the canonical chain 1534 // Second clause in the if statement reduces the vulnerability to selfish mining. 1535 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1536 reorg := externTd.Cmp(localTd) > 0 1537 currentBlock = bc.CurrentBlock() 1538 if !reorg && externTd.Cmp(localTd) == 0 { 1539 // Split same-difficulty blocks by number, then preferentially select 1540 // the block generated by the local miner as the canonical block. 1541 if block.NumberU64() < currentBlock.NumberU64() { 1542 reorg = true 1543 } else if block.NumberU64() == currentBlock.NumberU64() { 1544 var currentPreserve, blockPreserve bool 1545 if bc.shouldPreserve != nil { 1546 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1547 } 1548 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1549 } 1550 } 1551 if reorg { 1552 // Reorganise the chain if the parent is not the head block 1553 if block.ParentHash() != currentBlock.Hash() { 1554 if err := bc.reorg(currentBlock, block); err != nil { 1555 return NonStatTy, err 1556 } 1557 } 1558 status = CanonStatTy 1559 } else { 1560 status = SideStatTy 1561 } 1562 // Set new head. 1563 if status == CanonStatTy { 1564 bc.writeHeadBlock(block) 1565 } 1566 bc.futureBlocks.Remove(block.Hash()) 1567 1568 if status == CanonStatTy { 1569 bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 1570 if len(logs) > 0 { 1571 bc.logsFeed.Send(logs) 1572 } 1573 // In theory we should fire a ChainHeadEvent when we inject 1574 // a canonical block, but sometimes we can insert a batch of 1575 // canonicial blocks. Avoid firing too much ChainHeadEvents, 1576 // we will fire an accumulated ChainHeadEvent and disable fire 1577 // event here. 1578 if emitHeadEvent { 1579 bc.chainHeadFeed.Send(ChainHeadEvent{Block: block}) 1580 } 1581 } else { 1582 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1583 } 1584 return status, nil 1585 } 1586 1587 // addFutureBlock checks if the block is within the max allowed window to get 1588 // accepted for future processing, and returns an error if the block is too far 1589 // ahead and was not added. 1590 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1591 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1592 if block.Time() > max { 1593 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1594 } 1595 bc.futureBlocks.Add(block.Hash(), block) 1596 return nil 1597 } 1598 1599 // InsertChain attempts to insert the given batch of blocks in to the canonical 1600 // chain or, otherwise, create a fork. If an error is returned it will return 1601 // the index number of the failing block as well an error describing what went 1602 // wrong. 1603 // 1604 // After insertion is done, all accumulated events will be fired. 1605 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1606 // Sanity check that we have something meaningful to import 1607 if len(chain) == 0 { 1608 return 0, nil 1609 } 1610 1611 bc.blockProcFeed.Send(true) 1612 defer bc.blockProcFeed.Send(false) 1613 1614 // Remove already known canon-blocks 1615 var ( 1616 block, prev *types.Block 1617 ) 1618 // Do a sanity check that the provided chain is actually ordered and linked 1619 for i := 1; i < len(chain); i++ { 1620 block = chain[i] 1621 prev = chain[i-1] 1622 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1623 // Chain broke ancestry, log a message (programming error) and skip insertion 1624 log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), 1625 "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) 1626 1627 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(), 1628 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1629 } 1630 } 1631 // Pre-checks passed, start the full block imports 1632 bc.wg.Add(1) 1633 bc.chainmu.Lock() 1634 n, err := bc.insertChain(chain, true) 1635 bc.chainmu.Unlock() 1636 bc.wg.Done() 1637 1638 return n, err 1639 } 1640 1641 // InsertChainWithoutSealVerification works exactly the same 1642 // except for seal verification, seal verification is omitted 1643 func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) { 1644 bc.blockProcFeed.Send(true) 1645 defer bc.blockProcFeed.Send(false) 1646 1647 // Pre-checks passed, start the full block imports 1648 bc.wg.Add(1) 1649 bc.chainmu.Lock() 1650 n, err := bc.insertChain(types.Blocks([]*types.Block{block}), false) 1651 bc.chainmu.Unlock() 1652 bc.wg.Done() 1653 1654 return n, err 1655 } 1656 1657 // insertChain is the internal implementation of InsertChain, which assumes that 1658 // 1) chains are contiguous, and 2) The chain mutex is held. 1659 // 1660 // This method is split out so that import batches that require re-injecting 1661 // historical blocks can do so without releasing the lock, which could lead to 1662 // racey behaviour. If a sidechain import is in progress, and the historic state 1663 // is imported, but then new canon-head is added before the actual sidechain 1664 // completes, then the historic state could be pruned again 1665 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) { 1666 // If the chain is terminating, don't even bother starting up 1667 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1668 return 0, nil 1669 } 1670 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1671 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1672 1673 var ( 1674 stats = insertStats{startTime: mclock.Now()} 1675 lastCanon *types.Block 1676 ) 1677 // Fire a single chain head event if we've progressed the chain 1678 defer func() { 1679 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1680 bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) 1681 } 1682 }() 1683 // Start the parallel header verifier 1684 headers := make([]*types.Header, len(chain)) 1685 seals := make([]bool, len(chain)) 1686 1687 for i, block := range chain { 1688 headers[i] = block.Header() 1689 seals[i] = verifySeals 1690 } 1691 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1692 defer close(abort) 1693 1694 // Peek the error for the first block to decide the directing import logic 1695 it := newInsertIterator(chain, results, bc.validator) 1696 1697 block, err := it.next() 1698 1699 // Left-trim all the known blocks 1700 if err == ErrKnownBlock { 1701 // First block (and state) is known 1702 // 1. We did a roll-back, and should now do a re-import 1703 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1704 // from the canonical chain, which has not been verified. 1705 // Skip all known blocks that are behind us 1706 var ( 1707 current = bc.CurrentBlock() 1708 localTd = bc.GetTd(current.Hash(), current.NumberU64()) 1709 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil 1710 ) 1711 for block != nil && err == ErrKnownBlock { 1712 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1713 if localTd.Cmp(externTd) < 0 { 1714 break 1715 } 1716 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1717 stats.ignored++ 1718 1719 block, err = it.next() 1720 } 1721 // The remaining blocks are still known blocks, the only scenario here is: 1722 // During the fast sync, the pivot point is already submitted but rollback 1723 // happens. Then node resets the head full block to a lower height via `rollback` 1724 // and leaves a few known blocks in the database. 1725 // 1726 // When node runs a fast sync again, it can re-import a batch of known blocks via 1727 // `insertChain` while a part of them have higher total difficulty than current 1728 // head full block(new pivot point). 1729 for block != nil && err == ErrKnownBlock { 1730 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1731 if err := bc.writeKnownBlock(block); err != nil { 1732 return it.index, err 1733 } 1734 lastCanon = block 1735 1736 block, err = it.next() 1737 } 1738 // Falls through to the block import 1739 } 1740 switch { 1741 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1742 case errors.Is(err, consensus.ErrPrunedAncestor): 1743 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 1744 return bc.insertSideChain(block, it) 1745 1746 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1747 case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): 1748 for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { 1749 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 1750 if err := bc.addFutureBlock(block); err != nil { 1751 return it.index, err 1752 } 1753 block, err = it.next() 1754 } 1755 stats.queued += it.processed() 1756 stats.ignored += it.remaining() 1757 1758 // If there are any still remaining, mark as ignored 1759 return it.index, err 1760 1761 // Some other error occurred, abort 1762 case err != nil: 1763 bc.futureBlocks.Remove(block.Hash()) 1764 stats.ignored += len(it.chain) 1765 bc.reportBlock(block, nil, err) 1766 return it.index, err 1767 } 1768 // No validation errors for the first block (or chain prefix skipped) 1769 var activeState *state.StateDB 1770 defer func() { 1771 // The chain importer is starting and stopping trie prefetchers. If a bad 1772 // block or other error is hit however, an early return may not properly 1773 // terminate the background threads. This defer ensures that we clean up 1774 // and dangling prefetcher, without defering each and holding on live refs. 1775 if activeState != nil { 1776 activeState.StopPrefetcher() 1777 } 1778 }() 1779 1780 for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { 1781 // If the chain is terminating, stop processing blocks 1782 if bc.insertStopped() { 1783 log.Debug("Abort during block processing") 1784 break 1785 } 1786 // If the header is a banned one, straight out abort 1787 if BadHashes[block.Hash()] { 1788 bc.reportBlock(block, nil, ErrBlacklistedHash) 1789 return it.index, ErrBlacklistedHash 1790 } 1791 // If the block is known (in the middle of the chain), it's a special case for 1792 // Clique blocks where they can share state among each other, so importing an 1793 // older block might complete the state of the subsequent one. In this case, 1794 // just skip the block (we already validated it once fully (and crashed), since 1795 // its header and body was already in the database). 1796 if err == ErrKnownBlock { 1797 logger := log.Debug 1798 if bc.chainConfig.Clique == nil { 1799 logger = log.Warn 1800 } 1801 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 1802 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1803 "root", block.Root()) 1804 1805 // Special case. Commit the empty receipt slice if we meet the known 1806 // block in the middle. It can only happen in the clique chain. Whenever 1807 // we insert blocks via `insertSideChain`, we only commit `td`, `header` 1808 // and `body` if it's non-existent. Since we don't have receipts without 1809 // reexecution, so nothing to commit. But if the sidechain will be adpoted 1810 // as the canonical chain eventually, it needs to be reexecuted for missing 1811 // state, but if it's this special case here(skip reexecution) we will lose 1812 // the empty receipt entry. 1813 if len(block.Transactions()) == 0 { 1814 rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) 1815 } else { 1816 log.Error("Please file an issue, skip known block execution without receipt", 1817 "hash", block.Hash(), "number", block.NumberU64()) 1818 } 1819 if err := bc.writeKnownBlock(block); err != nil { 1820 return it.index, err 1821 } 1822 stats.processed++ 1823 1824 // We can assume that logs are empty here, since the only way for consecutive 1825 // Clique blocks to have the same state is if there are no transactions. 1826 lastCanon = block 1827 continue 1828 } 1829 // Retrieve the parent block and it's state to execute on top 1830 start := time.Now() 1831 1832 parent := it.previous() 1833 if parent == nil { 1834 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 1835 } 1836 statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) 1837 if err != nil { 1838 return it.index, err 1839 } 1840 // Enable prefetching to pull in trie node paths while processing transactions 1841 statedb.StartPrefetcher("chain") 1842 activeState = statedb 1843 1844 // If we have a followup block, run that against the current state to pre-cache 1845 // transactions and probabilistically some of the account/storage trie nodes. 1846 var followupInterrupt uint32 1847 if !bc.cacheConfig.TrieCleanNoPrefetch { 1848 if followup, err := it.peek(); followup != nil && err == nil { 1849 throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) 1850 1851 go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { 1852 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 1853 1854 blockPrefetchExecuteTimer.Update(time.Since(start)) 1855 if atomic.LoadUint32(interrupt) == 1 { 1856 blockPrefetchInterruptMeter.Mark(1) 1857 } 1858 }(time.Now(), followup, throwaway, &followupInterrupt) 1859 } 1860 } 1861 // Process block using the parent state as reference point 1862 substart := time.Now() 1863 receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 1864 if err != nil { 1865 bc.reportBlock(block, receipts, err) 1866 atomic.StoreUint32(&followupInterrupt, 1) 1867 return it.index, err 1868 } 1869 // Update the metrics touched during block processing 1870 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 1871 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 1872 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 1873 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 1874 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them 1875 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them 1876 triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation 1877 trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates 1878 trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates 1879 1880 blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) 1881 1882 // Validate the state using the default validator 1883 substart = time.Now() 1884 if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { 1885 bc.reportBlock(block, receipts, err) 1886 atomic.StoreUint32(&followupInterrupt, 1) 1887 return it.index, err 1888 } 1889 proctime := time.Since(start) 1890 1891 // Update the metrics touched during block validation 1892 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 1893 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 1894 1895 blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) 1896 1897 // Write the block to the chain and get the status. 1898 substart = time.Now() 1899 status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false) 1900 atomic.StoreUint32(&followupInterrupt, 1) 1901 if err != nil { 1902 return it.index, err 1903 } 1904 // Update the metrics touched during block commit 1905 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 1906 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 1907 snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them 1908 1909 blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) 1910 blockInsertTimer.UpdateSince(start) 1911 1912 switch status { 1913 case CanonStatTy: 1914 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1915 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1916 "elapsed", common.PrettyDuration(time.Since(start)), 1917 "root", block.Root()) 1918 1919 lastCanon = block 1920 1921 // Only count canonical blocks for GC processing time 1922 bc.gcproc += proctime 1923 1924 case SideStatTy: 1925 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1926 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1927 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1928 "root", block.Root()) 1929 1930 default: 1931 // This in theory is impossible, but lets be nice to our future selves and leave 1932 // a log, instead of trying to track down blocks imports that don't emit logs. 1933 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 1934 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1935 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1936 "root", block.Root()) 1937 } 1938 stats.processed++ 1939 stats.usedGas += usedGas 1940 1941 dirty, _ := bc.stateCache.TrieDB().Size() 1942 stats.report(chain, it.index, dirty) 1943 } 1944 // Any blocks remaining here? The only ones we care about are the future ones 1945 if block != nil && errors.Is(err, consensus.ErrFutureBlock) { 1946 if err := bc.addFutureBlock(block); err != nil { 1947 return it.index, err 1948 } 1949 block, err = it.next() 1950 1951 for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { 1952 if err := bc.addFutureBlock(block); err != nil { 1953 return it.index, err 1954 } 1955 stats.queued++ 1956 } 1957 } 1958 stats.ignored += it.remaining() 1959 1960 return it.index, err 1961 } 1962 1963 // insertSideChain is called when an import batch hits upon a pruned ancestor 1964 // error, which happens when a sidechain with a sufficiently old fork-block is 1965 // found. 1966 // 1967 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1968 // switch over to the new chain if the TD exceeded the current chain. 1969 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { 1970 var ( 1971 externTd *big.Int 1972 current = bc.CurrentBlock() 1973 ) 1974 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1975 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1976 // ones. Any other errors means that the block is invalid, and should not be written 1977 // to disk. 1978 err := consensus.ErrPrunedAncestor 1979 for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { 1980 // Check the canonical state root for that number 1981 if number := block.NumberU64(); current.NumberU64() >= number { 1982 canonical := bc.GetBlockByNumber(number) 1983 if canonical != nil && canonical.Hash() == block.Hash() { 1984 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 1985 1986 // Collect the TD of the block. Since we know it's a canon one, 1987 // we can get it directly, and not (like further below) use 1988 // the parent and then add the block on top 1989 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 1990 continue 1991 } 1992 if canonical != nil && canonical.Root() == block.Root() { 1993 // This is most likely a shadow-state attack. When a fork is imported into the 1994 // database, and it eventually reaches a block height which is not pruned, we 1995 // just found that the state already exist! This means that the sidechain block 1996 // refers to a state which already exists in our canon chain. 1997 // 1998 // If left unchecked, we would now proceed importing the blocks, without actually 1999 // having verified the state of the previous blocks. 2000 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 2001 2002 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 2003 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 2004 // mechanism. 2005 return it.index, errors.New("sidechain ghost-state attack") 2006 } 2007 } 2008 if externTd == nil { 2009 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 2010 } 2011 externTd = new(big.Int).Add(externTd, block.Difficulty()) 2012 2013 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 2014 start := time.Now() 2015 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 2016 return it.index, err 2017 } 2018 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 2019 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 2020 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 2021 "root", block.Root()) 2022 } 2023 } 2024 // At this point, we've written all sidechain blocks to database. Loop ended 2025 // either on some other error or all were processed. If there was some other 2026 // error, we can ignore the rest of those blocks. 2027 // 2028 // If the externTd was larger than our local TD, we now need to reimport the previous 2029 // blocks to regenerate the required state 2030 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 2031 if localTd.Cmp(externTd) > 0 { 2032 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 2033 return it.index, err 2034 } 2035 // Gather all the sidechain hashes (full blocks may be memory heavy) 2036 var ( 2037 hashes []common.Hash 2038 numbers []uint64 2039 ) 2040 parent := it.previous() 2041 for parent != nil && !bc.HasState(parent.Root) { 2042 hashes = append(hashes, parent.Hash()) 2043 numbers = append(numbers, parent.Number.Uint64()) 2044 2045 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 2046 } 2047 if parent == nil { 2048 return it.index, errors.New("missing parent") 2049 } 2050 // Import all the pruned blocks to make the state available 2051 var ( 2052 blocks []*types.Block 2053 memory common.StorageSize 2054 ) 2055 for i := len(hashes) - 1; i >= 0; i-- { 2056 // Append the next block to our batch 2057 block := bc.GetBlock(hashes[i], numbers[i]) 2058 2059 blocks = append(blocks, block) 2060 memory += block.Size() 2061 2062 // If memory use grew too large, import and continue. Sadly we need to discard 2063 // all raised events and logs from notifications since we're too heavy on the 2064 // memory here. 2065 if len(blocks) >= 2048 || memory > 64*1024*1024 { 2066 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 2067 if _, err := bc.insertChain(blocks, false); err != nil { 2068 return 0, err 2069 } 2070 blocks, memory = blocks[:0], 0 2071 2072 // If the chain is terminating, stop processing blocks 2073 if bc.insertStopped() { 2074 log.Debug("Abort during blocks processing") 2075 return 0, nil 2076 } 2077 } 2078 } 2079 if len(blocks) > 0 { 2080 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 2081 return bc.insertChain(blocks, false) 2082 } 2083 return 0, nil 2084 } 2085 2086 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 2087 // blocks and inserts them to be part of the new canonical chain and accumulates 2088 // potential missing transactions and post an event about them. 2089 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 2090 var ( 2091 newChain types.Blocks 2092 oldChain types.Blocks 2093 commonBlock *types.Block 2094 2095 deletedTxs types.Transactions 2096 addedTxs types.Transactions 2097 2098 deletedLogs [][]*types.Log 2099 rebirthLogs [][]*types.Log 2100 2101 // collectLogs collects the logs that were generated or removed during 2102 // the processing of the block that corresponds with the given hash. 2103 // These logs are later announced as deleted or reborn 2104 collectLogs = func(hash common.Hash, removed bool) { 2105 number := bc.hc.GetBlockNumber(hash) 2106 if number == nil { 2107 return 2108 } 2109 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 2110 2111 var logs []*types.Log 2112 for _, receipt := range receipts { 2113 for _, log := range receipt.Logs { 2114 l := *log 2115 if removed { 2116 l.Removed = true 2117 } 2118 logs = append(logs, &l) 2119 } 2120 } 2121 if len(logs) > 0 { 2122 if removed { 2123 deletedLogs = append(deletedLogs, logs) 2124 } else { 2125 rebirthLogs = append(rebirthLogs, logs) 2126 } 2127 } 2128 } 2129 // mergeLogs returns a merged log slice with specified sort order. 2130 mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { 2131 var ret []*types.Log 2132 if reverse { 2133 for i := len(logs) - 1; i >= 0; i-- { 2134 ret = append(ret, logs[i]...) 2135 } 2136 } else { 2137 for i := 0; i < len(logs); i++ { 2138 ret = append(ret, logs[i]...) 2139 } 2140 } 2141 return ret 2142 } 2143 ) 2144 // Reduce the longer chain to the same number as the shorter one 2145 if oldBlock.NumberU64() > newBlock.NumberU64() { 2146 // Old chain is longer, gather all transactions and logs as deleted ones 2147 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 2148 oldChain = append(oldChain, oldBlock) 2149 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2150 collectLogs(oldBlock.Hash(), true) 2151 } 2152 } else { 2153 // New chain is longer, stash all blocks away for subsequent insertion 2154 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 2155 newChain = append(newChain, newBlock) 2156 } 2157 } 2158 if oldBlock == nil { 2159 return fmt.Errorf("invalid old chain") 2160 } 2161 if newBlock == nil { 2162 return fmt.Errorf("invalid new chain") 2163 } 2164 // Both sides of the reorg are at the same number, reduce both until the common 2165 // ancestor is found 2166 for { 2167 // If the common ancestor was found, bail out 2168 if oldBlock.Hash() == newBlock.Hash() { 2169 commonBlock = oldBlock 2170 break 2171 } 2172 // Remove an old block as well as stash away a new block 2173 oldChain = append(oldChain, oldBlock) 2174 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2175 collectLogs(oldBlock.Hash(), true) 2176 2177 newChain = append(newChain, newBlock) 2178 2179 // Step back with both chains 2180 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 2181 if oldBlock == nil { 2182 return fmt.Errorf("invalid old chain") 2183 } 2184 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 2185 if newBlock == nil { 2186 return fmt.Errorf("invalid new chain") 2187 } 2188 } 2189 // Ensure the user sees large reorgs 2190 if len(oldChain) > 0 && len(newChain) > 0 { 2191 logFn := log.Info 2192 msg := "Chain reorg detected" 2193 if len(oldChain) > 63 { 2194 msg = "Large chain reorg detected" 2195 logFn = log.Warn 2196 } 2197 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 2198 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 2199 blockReorgAddMeter.Mark(int64(len(newChain))) 2200 blockReorgDropMeter.Mark(int64(len(oldChain))) 2201 blockReorgMeter.Mark(1) 2202 } else { 2203 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 2204 } 2205 // Insert the new chain(except the head block(reverse order)), 2206 // taking care of the proper incremental order. 2207 for i := len(newChain) - 1; i >= 1; i-- { 2208 // Insert the block in the canonical way, re-writing history 2209 bc.writeHeadBlock(newChain[i]) 2210 2211 // Collect reborn logs due to chain reorg 2212 collectLogs(newChain[i].Hash(), false) 2213 2214 // Collect the new added transactions. 2215 addedTxs = append(addedTxs, newChain[i].Transactions()...) 2216 } 2217 // Delete useless indexes right now which includes the non-canonical 2218 // transaction indexes, canonical chain indexes which above the head. 2219 indexesBatch := bc.db.NewBatch() 2220 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 2221 rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) 2222 } 2223 // Delete any canonical number assignments above the new head 2224 number := bc.CurrentBlock().NumberU64() 2225 for i := number + 1; ; i++ { 2226 hash := rawdb.ReadCanonicalHash(bc.db, i) 2227 if hash == (common.Hash{}) { 2228 break 2229 } 2230 rawdb.DeleteCanonicalHash(indexesBatch, i) 2231 } 2232 if err := indexesBatch.Write(); err != nil { 2233 log.Crit("Failed to delete useless indexes", "err", err) 2234 } 2235 // If any logs need to be fired, do it now. In theory we could avoid creating 2236 // this goroutine if there are no events to fire, but realistcally that only 2237 // ever happens if we're reorging empty blocks, which will only happen on idle 2238 // networks where performance is not an issue either way. 2239 if len(deletedLogs) > 0 { 2240 bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) 2241 } 2242 if len(rebirthLogs) > 0 { 2243 bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) 2244 } 2245 if len(oldChain) > 0 { 2246 for i := len(oldChain) - 1; i >= 0; i-- { 2247 bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) 2248 } 2249 } 2250 return nil 2251 } 2252 2253 func (bc *BlockChain) update() { 2254 futureTimer := time.NewTicker(5 * time.Second) 2255 defer futureTimer.Stop() 2256 for { 2257 select { 2258 case <-futureTimer.C: 2259 bc.procFutureBlocks() 2260 case <-bc.quit: 2261 return 2262 } 2263 } 2264 } 2265 2266 // maintainTxIndex is responsible for the construction and deletion of the 2267 // transaction index. 2268 // 2269 // User can use flag `txlookuplimit` to specify a "recentness" block, below 2270 // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means 2271 // all tx indices will be reserved. 2272 // 2273 // The user can adjust the txlookuplimit value for each launch after fast 2274 // sync, Geth will automatically construct the missing indices and delete 2275 // the extra indices. 2276 func (bc *BlockChain) maintainTxIndex(ancients uint64) { 2277 defer bc.wg.Done() 2278 2279 // Before starting the actual maintenance, we need to handle a special case, 2280 // where user might init Geth with an external ancient database. If so, we 2281 // need to reindex all necessary transactions before starting to process any 2282 // pruning requests. 2283 if ancients > 0 { 2284 var from = uint64(0) 2285 if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit { 2286 from = ancients - bc.txLookupLimit 2287 } 2288 rawdb.IndexTransactions(bc.db, from, ancients, bc.quit) 2289 } 2290 // indexBlocks reindexes or unindexes transactions depending on user configuration 2291 indexBlocks := func(tail *uint64, head uint64, done chan struct{}) { 2292 defer func() { done <- struct{}{} }() 2293 2294 // If the user just upgraded Geth to a new version which supports transaction 2295 // index pruning, write the new tail and remove anything older. 2296 if tail == nil { 2297 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2298 // Nothing to delete, write the tail and return 2299 rawdb.WriteTxIndexTail(bc.db, 0) 2300 } else { 2301 // Prune all stale tx indices and record the tx index tail 2302 rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit) 2303 } 2304 return 2305 } 2306 // If a previous indexing existed, make sure that we fill in any missing entries 2307 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2308 if *tail > 0 { 2309 rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit) 2310 } 2311 return 2312 } 2313 // Update the transaction index to the new chain state 2314 if head-bc.txLookupLimit+1 < *tail { 2315 // Reindex a part of missing indices and rewind index tail to HEAD-limit 2316 rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit) 2317 } else { 2318 // Unindex a part of stale indices and forward index tail to HEAD-limit 2319 rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit) 2320 } 2321 } 2322 // Any reindexing done, start listening to chain events and moving the index window 2323 var ( 2324 done chan struct{} // Non-nil if background unindexing or reindexing routine is active. 2325 headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed 2326 ) 2327 sub := bc.SubscribeChainHeadEvent(headCh) 2328 if sub == nil { 2329 return 2330 } 2331 defer sub.Unsubscribe() 2332 2333 for { 2334 select { 2335 case head := <-headCh: 2336 if done == nil { 2337 done = make(chan struct{}) 2338 go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done) 2339 } 2340 case <-done: 2341 done = nil 2342 case <-bc.quit: 2343 if done != nil { 2344 log.Info("Waiting background transaction indexer to exit") 2345 <-done 2346 } 2347 return 2348 } 2349 } 2350 } 2351 2352 // reportBlock logs a bad block error. 2353 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2354 rawdb.WriteBadBlock(bc.db, block) 2355 2356 var receiptString string 2357 for i, receipt := range receipts { 2358 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2359 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 2360 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2361 } 2362 log.Error(fmt.Sprintf(` 2363 ########## BAD BLOCK ######### 2364 Chain config: %v 2365 2366 Number: %v 2367 Hash: 0x%x 2368 %v 2369 2370 Error: %v 2371 ############################## 2372 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2373 } 2374 2375 // InsertHeaderChain attempts to insert the given header chain in to the local 2376 // chain, possibly creating a reorg. If an error is returned, it will return the 2377 // index number of the failing header as well an error describing what went wrong. 2378 // 2379 // The verify parameter can be used to fine tune whether nonce verification 2380 // should be done or not. The reason behind the optional check is because some 2381 // of the header retrieval mechanisms already need to verify nonces, as well as 2382 // because nonces can be verified sparsely, not needing to check each. 2383 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2384 start := time.Now() 2385 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2386 return i, err 2387 } 2388 2389 // Make sure only one thread manipulates the chain at once 2390 bc.chainmu.Lock() 2391 defer bc.chainmu.Unlock() 2392 2393 bc.wg.Add(1) 2394 defer bc.wg.Done() 2395 _, err := bc.hc.InsertHeaderChain(chain, start) 2396 return 0, err 2397 } 2398 2399 // CurrentHeader retrieves the current head header of the canonical chain. The 2400 // header is retrieved from the HeaderChain's internal cache. 2401 func (bc *BlockChain) CurrentHeader() *types.Header { 2402 return bc.hc.CurrentHeader() 2403 } 2404 2405 // GetTd retrieves a block's total difficulty in the canonical chain from the 2406 // database by hash and number, caching it if found. 2407 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 2408 return bc.hc.GetTd(hash, number) 2409 } 2410 2411 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 2412 // database by hash, caching it if found. 2413 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 2414 return bc.hc.GetTdByHash(hash) 2415 } 2416 2417 // GetHeader retrieves a block header from the database by hash and number, 2418 // caching it if found. 2419 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 2420 return bc.hc.GetHeader(hash, number) 2421 } 2422 2423 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 2424 // found. 2425 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 2426 return bc.hc.GetHeaderByHash(hash) 2427 } 2428 2429 // HasHeader checks if a block header is present in the database or not, caching 2430 // it if present. 2431 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 2432 return bc.hc.HasHeader(hash, number) 2433 } 2434 2435 // GetCanonicalHash returns the canonical hash for a given block number 2436 func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { 2437 return bc.hc.GetCanonicalHash(number) 2438 } 2439 2440 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 2441 // hash, fetching towards the genesis block. 2442 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 2443 return bc.hc.GetBlockHashesFromHash(hash, max) 2444 } 2445 2446 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 2447 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 2448 // number of blocks to be individually checked before we reach the canonical chain. 2449 // 2450 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 2451 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 2452 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 2453 } 2454 2455 // GetHeaderByNumber retrieves a block header from the database by number, 2456 // caching it (associated with its hash) if found. 2457 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 2458 return bc.hc.GetHeaderByNumber(number) 2459 } 2460 2461 // GetTransactionLookup retrieves the lookup associate with the given transaction 2462 // hash from the cache or database. 2463 func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { 2464 // Short circuit if the txlookup already in the cache, retrieve otherwise 2465 if lookup, exist := bc.txLookupCache.Get(hash); exist { 2466 return lookup.(*rawdb.LegacyTxLookupEntry) 2467 } 2468 tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) 2469 if tx == nil { 2470 return nil 2471 } 2472 lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex} 2473 bc.txLookupCache.Add(hash, lookup) 2474 return lookup 2475 } 2476 2477 // Config retrieves the chain's fork configuration. 2478 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 2479 2480 // Engine retrieves the blockchain's consensus engine. 2481 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 2482 2483 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 2484 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 2485 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 2486 } 2487 2488 // SubscribeChainEvent registers a subscription of ChainEvent. 2489 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 2490 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 2491 } 2492 2493 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 2494 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 2495 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 2496 } 2497 2498 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 2499 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 2500 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 2501 } 2502 2503 // SubscribeLogsEvent registers a subscription of []*types.Log. 2504 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 2505 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 2506 } 2507 2508 // SubscribeBlockProcessingEvent registers a subscription of bool where true means 2509 // block processing has started while false means it has stopped. 2510 func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { 2511 return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) 2512 }