github.com/core-coin/go-core/v2@v2.1.9/core/blockchain.go (about) 1 // Copyright 2014 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Core consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sort" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 lru "github.com/hashicorp/golang-lru" 32 33 "github.com/core-coin/go-core/v2/xcbdb" 34 35 "github.com/core-coin/go-core/v2/common" 36 "github.com/core-coin/go-core/v2/common/mclock" 37 "github.com/core-coin/go-core/v2/common/prque" 38 "github.com/core-coin/go-core/v2/consensus" 39 "github.com/core-coin/go-core/v2/core/rawdb" 40 "github.com/core-coin/go-core/v2/core/state" 41 "github.com/core-coin/go-core/v2/core/state/snapshot" 42 "github.com/core-coin/go-core/v2/core/types" 43 "github.com/core-coin/go-core/v2/core/vm" 44 "github.com/core-coin/go-core/v2/event" 45 "github.com/core-coin/go-core/v2/log" 46 "github.com/core-coin/go-core/v2/metrics" 47 "github.com/core-coin/go-core/v2/params" 48 "github.com/core-coin/go-core/v2/rlp" 49 "github.com/core-coin/go-core/v2/trie" 50 ) 51 52 var ( 53 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 54 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 55 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 56 57 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 58 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 59 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 60 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 61 62 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 63 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 64 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 65 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 66 67 snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) 68 snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) 69 snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) 70 71 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 72 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 73 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 74 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 75 76 blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) 77 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 78 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 79 blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) 80 81 blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) 82 blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) 83 84 errInsertionInterrupted = errors.New("insertion is interrupted") 85 ) 86 87 const ( 88 bodyCacheLimit = 256 89 blockCacheLimit = 256 90 receiptsCacheLimit = 32 91 txLookupCacheLimit = 1024 92 maxFutureBlocks = 256 93 maxTimeFutureBlocks = 30 94 badBlockLimit = 10 95 TriesInMemory = 128 96 97 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 98 // 99 // Changelog: 100 // 101 // - Version 4 102 // The following incompatible database changes were added: 103 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 104 // * the `Bloom` field of receipt is deleted 105 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 106 // - Version 5 107 // The following incompatible database changes were added: 108 // * the `TxHash`, `EnergyCost`, and `ContractAddress` fields are no longer stored for a receipt 109 // * the `TxHash`, `EnergyCost`, and `ContractAddress` fields are computed by looking up the 110 // receipts' corresponding block 111 // - Version 6 112 // The following incompatible database changes were added: 113 // * Transaction lookup information stores the corresponding block number instead of block hash 114 // - Version 7 115 // The following incompatible database changes were added: 116 // * Use freezer as the ancient database to maintain all ancient data 117 // - Version 8 118 // The following incompatible database changes were added: 119 // * New scheme for contract code in order to separate the codes and trie nodes 120 BlockChainVersion uint64 = 8 121 ) 122 123 // CacheConfig contains the configuration values for the trie caching/pruning 124 // that's resident in a blockchain. 125 type CacheConfig struct { 126 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 127 TrieCleanJournal string // Disk journal for saving clean cache entries. 128 TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically 129 TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks 130 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 131 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 132 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 133 SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory 134 Preimages bool // Whether to store preimage of trie key to the disk 135 136 SnapshotWait bool // Wait for snapshot construction on startup. TODO(raisty): This is a dirty hack for testing, nuke it 137 } 138 139 // defaultCacheConfig are the default caching values if none are specified by the 140 // user (also used during testing). 141 var defaultCacheConfig = &CacheConfig{ 142 TrieCleanLimit: 256, 143 TrieDirtyLimit: 256, 144 TrieTimeLimit: 5 * time.Minute, 145 SnapshotLimit: 256, 146 SnapshotWait: true, 147 } 148 149 // BlockChain represents the canonical chain given a database with a genesis 150 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 151 // 152 // Importing blocks in to the block chain happens according to the set of rules 153 // defined by the two stage Validator. Processing of blocks is done using the 154 // Processor which processes the included transaction. The validation of the state 155 // is done in the second part of the Validator. Failing results in aborting of 156 // the import. 157 // 158 // The BlockChain also helps in returning blocks from **any** chain included 159 // in the database as well as blocks that represents the canonical chain. It's 160 // important to note that GetBlock can return any block and does not need to be 161 // included in the canonical one where as GetBlockByNumber always represents the 162 // canonical chain. 163 type BlockChain struct { 164 chainConfig *params.ChainConfig // Chain & network configuration 165 cacheConfig *CacheConfig // Cache configuration for pruning 166 167 db xcbdb.Database // Low level persistent database to store final content in 168 snaps *snapshot.Tree // Snapshot tree for fast trie leaf access 169 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 170 gcproc time.Duration // Accumulates canonical block processing for trie dumping 171 172 // txLookupLimit is the maximum number of blocks from head whose tx indices 173 // are reserved: 174 // * 0: means no limit and regenerate any missing indexes 175 // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes 176 // * nil: disable tx reindexer/deleter, but still index new blocks 177 txLookupLimit uint64 178 179 hc *HeaderChain 180 rmLogsFeed event.Feed 181 chainFeed event.Feed 182 chainSideFeed event.Feed 183 chainHeadFeed event.Feed 184 logsFeed event.Feed 185 blockProcFeed event.Feed 186 scope event.SubscriptionScope 187 genesisBlock *types.Block 188 189 chainmu sync.RWMutex // blockchain insertion lock 190 191 currentBlock atomic.Value // Current head of the block chain 192 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 193 194 stateCache state.Database // State database to reuse between imports (contains state cache) 195 bodyCache *lru.Cache // Cache for the most recent block bodies 196 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 197 receiptsCache *lru.Cache // Cache for the most recent receipts per block 198 blockCache *lru.Cache // Cache for the most recent entire blocks 199 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 200 futureBlocks *lru.Cache // future blocks are blocks added for later processing 201 202 quit chan struct{} // blockchain quit channel 203 wg sync.WaitGroup // chain processing wait group for shutting down 204 running int32 // 0 if chain is running, 1 when stopped 205 procInterrupt int32 // interrupt signaler for block processing 206 207 engine consensus.Engine 208 validator Validator // Block and state validator interface 209 prefetcher Prefetcher // Block state prefetcher interface 210 processor Processor // Block transaction processor interface 211 vmConfig vm.Config 212 213 badBlocks *lru.Cache // Bad block cache 214 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 215 terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. 216 writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format. 217 } 218 219 // NewBlockChain returns a fully initialised block chain using information 220 // available in the database. It initialises the default Core Validator and 221 // Processor. 222 func NewBlockChain(db xcbdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { 223 if cacheConfig == nil { 224 cacheConfig = defaultCacheConfig 225 } 226 bodyCache, _ := lru.New(bodyCacheLimit) 227 bodyRLPCache, _ := lru.New(bodyCacheLimit) 228 receiptsCache, _ := lru.New(receiptsCacheLimit) 229 blockCache, _ := lru.New(blockCacheLimit) 230 txLookupCache, _ := lru.New(txLookupCacheLimit) 231 futureBlocks, _ := lru.New(maxFutureBlocks) 232 badBlocks, _ := lru.New(badBlockLimit) 233 234 bc := &BlockChain{ 235 chainConfig: chainConfig, 236 cacheConfig: cacheConfig, 237 db: db, 238 triegc: prque.New(nil), 239 stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ 240 Cache: cacheConfig.TrieCleanLimit, 241 Journal: cacheConfig.TrieCleanJournal, 242 Preimages: cacheConfig.Preimages, 243 }), 244 quit: make(chan struct{}), 245 shouldPreserve: shouldPreserve, 246 bodyCache: bodyCache, 247 bodyRLPCache: bodyRLPCache, 248 receiptsCache: receiptsCache, 249 blockCache: blockCache, 250 txLookupCache: txLookupCache, 251 futureBlocks: futureBlocks, 252 engine: engine, 253 vmConfig: vmConfig, 254 badBlocks: badBlocks, 255 } 256 bc.validator = NewBlockValidator(chainConfig, bc, engine) 257 bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) 258 bc.processor = NewStateProcessor(chainConfig, bc, engine) 259 260 var err error 261 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) 262 if err != nil { 263 return nil, err 264 } 265 bc.genesisBlock = bc.GetBlockByNumber(0) 266 if bc.genesisBlock == nil { 267 return nil, ErrNoGenesis 268 } 269 270 var nilBlock *types.Block 271 bc.currentBlock.Store(nilBlock) 272 bc.currentFastBlock.Store(nilBlock) 273 274 // Initialize the chain with ancient data if it isn't empty. 275 var txIndexBlock uint64 276 277 if bc.empty() { 278 rawdb.InitDatabaseFromFreezer(bc.db) 279 // If ancient database is not empty, reconstruct all missing 280 // indices in the background. 281 frozen, _ := bc.db.Ancients() 282 if frozen > 0 { 283 txIndexBlock = frozen 284 } 285 } 286 if err := bc.loadLastState(); err != nil { 287 return nil, err 288 } 289 // Make sure the state associated with the block is available 290 head := bc.CurrentBlock() 291 if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { 292 // Head state is missing, before the state recovery, find out the 293 // disk layer point of snapshot(if it's enabled). Make sure the 294 // rewound point is lower than disk layer. 295 var diskRoot common.Hash 296 if bc.cacheConfig.SnapshotLimit > 0 { 297 diskRoot = rawdb.ReadSnapshotRoot(bc.db) 298 } 299 if diskRoot != (common.Hash{}) { 300 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) 301 302 snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot) 303 if err != nil { 304 return nil, err 305 } 306 // Chain rewound, persist old snapshot number to indicate recovery procedure 307 if snapDisk != 0 { 308 rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) 309 } 310 } else { 311 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) 312 if err := bc.SetHead(head.NumberU64()); err != nil { 313 return nil, err 314 } 315 } 316 } 317 // Ensure that a previous crash in SetHead doesn't leave extra ancients 318 if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { 319 var ( 320 needRewind bool 321 low uint64 322 ) 323 // The head full block may be rolled back to a very low height due to 324 // blockchain repair. If the head full block is even lower than the ancient 325 // chain, truncate the ancient store. 326 fullBlock := bc.CurrentBlock() 327 if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { 328 needRewind = true 329 low = fullBlock.NumberU64() 330 } 331 // In fast sync, it may happen that ancient data has been written to the 332 // ancient store, but the LastFastBlock has not been updated, truncate the 333 // extra data here. 334 fastBlock := bc.CurrentFastBlock() 335 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 336 needRewind = true 337 if fastBlock.NumberU64() < low || low == 0 { 338 low = fastBlock.NumberU64() 339 } 340 } 341 if needRewind { 342 log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) 343 if err := bc.SetHead(low); err != nil { 344 return nil, err 345 } 346 } 347 } 348 // The first thing the node will do is reconstruct the verification data for 349 // the head block (cryptore cache or clique voting snapshot). Might as well do 350 // it in advance. 351 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 352 353 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 354 for hash := range BadHashes { 355 if header := bc.GetHeaderByHash(hash); header != nil { 356 // get the canonical block corresponding to the offending header's number 357 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 358 // make sure the headerByNumber (if present) is in our current canonical chain 359 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 360 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 361 if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { 362 return nil, err 363 } 364 log.Error("Chain rewind was successful, resuming normal operation") 365 } 366 } 367 } 368 // Load any existing snapshot, regenerating it if loading failed 369 if bc.cacheConfig.SnapshotLimit > 0 { 370 // If the chain was rewound past the snapshot persistent layer (causing 371 // a recovery block number to be persisted to disk), check if we're still 372 // in recovery mode and in that case, don't invalidate the snapshot on a 373 // head mismatch. 374 var recover bool 375 376 head := bc.CurrentBlock() 377 if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() { 378 log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) 379 recover = true 380 } 381 bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover) 382 } 383 // Take ownership of this particular state 384 go bc.update() 385 if txLookupLimit != nil { 386 bc.txLookupLimit = *txLookupLimit 387 388 bc.wg.Add(1) 389 go bc.maintainTxIndex(txIndexBlock) 390 } 391 // If periodic cache journal is required, spin it up. 392 if bc.cacheConfig.TrieCleanRejournal > 0 { 393 if bc.cacheConfig.TrieCleanRejournal < time.Minute { 394 log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) 395 bc.cacheConfig.TrieCleanRejournal = time.Minute 396 } 397 triedb := bc.stateCache.TrieDB() 398 bc.wg.Add(1) 399 go func() { 400 defer bc.wg.Done() 401 triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) 402 }() 403 } 404 return bc, nil 405 } 406 407 // GetVMConfig returns the block chain VM config. 408 func (bc *BlockChain) GetVMConfig() *vm.Config { 409 return &bc.vmConfig 410 } 411 412 // empty returns an indicator whether the blockchain is empty. 413 // Note, it's a special case that we connect a non-empty ancient 414 // database with an empty node, so that we can plugin the ancient 415 // into node seamlessly. 416 func (bc *BlockChain) empty() bool { 417 genesis := bc.genesisBlock.Hash() 418 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 419 if hash != genesis { 420 return false 421 } 422 } 423 return true 424 } 425 426 // loadLastState loads the last known chain state from the database. This method 427 // assumes that the chain manager mutex is held. 428 func (bc *BlockChain) loadLastState() error { 429 // Restore the last known head block 430 head := rawdb.ReadHeadBlockHash(bc.db) 431 if head == (common.Hash{}) { 432 // Corrupt or empty database, init from scratch 433 log.Warn("Empty database, resetting chain") 434 return bc.Reset() 435 } 436 // Make sure the entire head block is available 437 currentBlock := bc.GetBlockByHash(head) 438 if currentBlock == nil { 439 // Corrupt or empty database, init from scratch 440 log.Warn("Head block missing, resetting chain", "hash", head) 441 return bc.Reset() 442 } 443 // Everything seems to be fine, set as the head block 444 bc.currentBlock.Store(currentBlock) 445 headBlockGauge.Update(int64(currentBlock.NumberU64())) 446 447 // Restore the last known head header 448 currentHeader := currentBlock.Header() 449 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 450 if header := bc.GetHeaderByHash(head); header != nil { 451 currentHeader = header 452 } 453 } 454 bc.hc.SetCurrentHeader(currentHeader) 455 456 // Restore the last known head fast block 457 bc.currentFastBlock.Store(currentBlock) 458 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 459 460 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 461 if block := bc.GetBlockByHash(head); block != nil { 462 bc.currentFastBlock.Store(block) 463 headFastBlockGauge.Update(int64(block.NumberU64())) 464 } 465 } 466 // Issue a status log for the user 467 currentFastBlock := bc.CurrentFastBlock() 468 469 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 470 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 471 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 472 473 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 474 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 475 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 476 if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { 477 log.Info("Loaded last fast-sync pivot marker", "number", *pivot) 478 } 479 return nil 480 } 481 482 // SetHead rewinds the local chain to a new head. Depending on whether the node 483 // was fast synced or full synced and in which state, the method will try to 484 // delete minimal data from disk whilst retaining chain consistency. 485 func (bc *BlockChain) SetHead(head uint64) error { 486 _, err := bc.SetHeadBeyondRoot(head, common.Hash{}) 487 return err 488 } 489 490 // SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition 491 // that the rewind must pass the specified state root. This method is meant to be 492 // used when rewiding with snapshots enabled to ensure that we go back further than 493 // persistent disk layer. Depending on whether the node was fast synced or full, and 494 // in which state, the method will try to delete minimal data from disk whilst 495 // retaining chain consistency. 496 // 497 // The method returns the block number where the requested root cap was found. 498 func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { 499 bc.chainmu.Lock() 500 defer bc.chainmu.Unlock() 501 502 // Track the block number of the requested root hash 503 var rootNumber uint64 // (no root == always 0) 504 505 // Retrieve the last pivot block to short circuit rollbacks beyond it and the 506 // current freezer limit to start nuking id underflown 507 pivot := rawdb.ReadLastPivotNumber(bc.db) 508 frozen, _ := bc.db.Ancients() 509 510 updateFn := func(db xcbdb.KeyValueWriter, header *types.Header) (uint64, bool) { 511 // Rewind the block chain, ensuring we don't end up with a stateless head 512 // block. Note, depth equality is permitted to allow using SetHead as a 513 // chain reparation mechanism without deleting any data! 514 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { 515 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 516 if newHeadBlock == nil { 517 log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) 518 newHeadBlock = bc.genesisBlock 519 } else { 520 // Block exists, keep rewinding until we find one with state, 521 // keeping rewinding until we exceed the optional threshold 522 // root hash 523 beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) 524 525 for { 526 // If a root threshold was requested but not yet crossed, check 527 if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { 528 beyondRoot, rootNumber = true, newHeadBlock.NumberU64() 529 } 530 if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { 531 log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 532 if pivot == nil || newHeadBlock.NumberU64() > *pivot { 533 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) 534 continue 535 } else { 536 log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) 537 newHeadBlock = bc.genesisBlock 538 } 539 } 540 if beyondRoot || newHeadBlock.NumberU64() == 0 { 541 log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 542 break 543 } 544 log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) 545 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding 546 } 547 } 548 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 549 550 // Degrade the chain markers if they are explicitly reverted. 551 // In theory we should update all in-memory markers in the 552 // last step, however the direction of SetHead is from high 553 // to low, so it's safe the update in-memory markers directly. 554 bc.currentBlock.Store(newHeadBlock) 555 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 556 } 557 // Rewind the fast block in a simpleton way to the target head 558 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 559 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 560 // If either blocks reached nil, reset to the genesis state 561 if newHeadFastBlock == nil { 562 newHeadFastBlock = bc.genesisBlock 563 } 564 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 565 566 // Degrade the chain markers if they are explicitly reverted. 567 // In theory we should update all in-memory markers in the 568 // last step, however the direction of SetHead is from high 569 // to low, so it's safe the update in-memory markers directly. 570 bc.currentFastBlock.Store(newHeadFastBlock) 571 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 572 } 573 head := bc.CurrentBlock().NumberU64() 574 575 // If setHead underflown the freezer threshold and the block processing 576 // intent afterwards is full block importing, delete the chain segment 577 // between the stateful-block and the sethead target. 578 var wipe bool 579 if head+1 < frozen { 580 wipe = pivot == nil || head >= *pivot 581 } 582 return head, wipe // Only force wipe if full synced 583 } 584 // Rewind the header chain, deleting all block bodies until then 585 delFn := func(db xcbdb.KeyValueWriter, hash common.Hash, num uint64) { 586 // Ignore the error here since light client won't hit this path 587 frozen, _ := bc.db.Ancients() 588 if num+1 <= frozen { 589 // Truncate all relative data(header, total difficulty, body, receipt 590 // and canonical hash) from ancient store. 591 if err := bc.db.TruncateAncients(num); err != nil { 592 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 593 } 594 // Remove the hash <-> number mapping from the active store. 595 rawdb.DeleteHeaderNumber(db, hash) 596 } else { 597 // Remove relative body and receipts from the active store. 598 // The header, total difficulty and canonical hash will be 599 // removed in the hc.SetHead function. 600 rawdb.DeleteBody(db, hash, num) 601 rawdb.DeleteReceipts(db, hash, num) 602 } 603 // Todo(raisty) txlookup, bloombits, etc 604 } 605 // If SetHead was only called as a chain reparation method, try to skip 606 // touching the header chain altogether, unless the freezer is broken 607 if block := bc.CurrentBlock(); block.NumberU64() == head { 608 if target, force := updateFn(bc.db, block.Header()); force { 609 bc.hc.SetHead(target, updateFn, delFn) 610 } 611 } else { 612 // Rewind the chain to the requested head and keep going backwards until a 613 // block with a state is found or fast sync pivot is passed 614 log.Warn("Rewinding blockchain", "target", head) 615 bc.hc.SetHead(head, updateFn, delFn) 616 } 617 // Clear out any stale content from the caches 618 bc.bodyCache.Purge() 619 bc.bodyRLPCache.Purge() 620 bc.receiptsCache.Purge() 621 bc.blockCache.Purge() 622 bc.txLookupCache.Purge() 623 bc.futureBlocks.Purge() 624 625 return rootNumber, bc.loadLastState() 626 } 627 628 // FastSyncCommitHead sets the current head block to the one defined by the hash 629 // irrelevant what the chain contents were prior. 630 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 631 // Make sure that both the block as well at its state trie exists 632 block := bc.GetBlockByHash(hash) 633 if block == nil { 634 return fmt.Errorf("non existent block [%x…]", hash[:4]) 635 } 636 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 637 return err 638 } 639 // If all checks out, manually set the head block 640 bc.chainmu.Lock() 641 bc.currentBlock.Store(block) 642 headBlockGauge.Update(int64(block.NumberU64())) 643 bc.chainmu.Unlock() 644 645 // Destroy any existing state snapshot and regenerate it in the background 646 if bc.snaps != nil { 647 bc.snaps.Rebuild(block.Root()) 648 } 649 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 650 return nil 651 } 652 653 // EnergyLimit returns the energy limit of the current HEAD block. 654 func (bc *BlockChain) EnergyLimit() uint64 { 655 return bc.CurrentBlock().EnergyLimit() 656 } 657 658 // CurrentBlock retrieves the current head block of the canonical chain. The 659 // block is retrieved from the blockchain's internal cache. 660 func (bc *BlockChain) CurrentBlock() *types.Block { 661 return bc.currentBlock.Load().(*types.Block) 662 } 663 664 // Snapshot returns the blockchain snapshot tree. This method is mainly used for 665 // testing, to make it possible to verify the snapshot after execution. 666 // 667 // Warning: There are no guarantees about the safety of using the returned 'snap' if the 668 // blockchain is simultaneously importing blocks, so take care. 669 func (bc *BlockChain) Snapshot() *snapshot.Tree { 670 return bc.snaps 671 } 672 673 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 674 // chain. The block is retrieved from the blockchain's internal cache. 675 func (bc *BlockChain) CurrentFastBlock() *types.Block { 676 return bc.currentFastBlock.Load().(*types.Block) 677 } 678 679 // Validator returns the current validator. 680 func (bc *BlockChain) Validator() Validator { 681 return bc.validator 682 } 683 684 // Processor returns the current processor. 685 func (bc *BlockChain) Processor() Processor { 686 return bc.processor 687 } 688 689 // State returns a new mutable state based on the current HEAD block. 690 func (bc *BlockChain) State() (*state.StateDB, error) { 691 return bc.StateAt(bc.CurrentBlock().Root()) 692 } 693 694 // StateAt returns a new mutable state based on a particular point in time. 695 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 696 return state.New(root, bc.stateCache, bc.snaps) 697 } 698 699 // StateCache returns the caching database underpinning the blockchain instance. 700 func (bc *BlockChain) StateCache() state.Database { 701 return bc.stateCache 702 } 703 704 // Reset purges the entire blockchain, restoring it to its genesis state. 705 func (bc *BlockChain) Reset() error { 706 return bc.ResetWithGenesisBlock(bc.genesisBlock) 707 } 708 709 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 710 // specified genesis state. 711 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 712 // Dump the entire block chain and purge the caches 713 if err := bc.SetHead(0); err != nil { 714 return err 715 } 716 bc.chainmu.Lock() 717 defer bc.chainmu.Unlock() 718 719 // Prepare the genesis block and reinitialise the chain 720 batch := bc.db.NewBatch() 721 rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) 722 rawdb.WriteBlock(batch, genesis) 723 if err := batch.Write(); err != nil { 724 log.Crit("Failed to write genesis block", "err", err) 725 } 726 bc.writeHeadBlock(genesis) 727 728 // Last update all in-memory chain markers 729 bc.genesisBlock = genesis 730 bc.currentBlock.Store(bc.genesisBlock) 731 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 732 bc.hc.SetGenesis(bc.genesisBlock.Header()) 733 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 734 bc.currentFastBlock.Store(bc.genesisBlock) 735 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 736 return nil 737 } 738 739 // Export writes the active chain to the given writer. 740 func (bc *BlockChain) Export(w io.Writer) error { 741 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 742 } 743 744 // ExportN writes a subset of the active chain to the given writer. 745 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 746 bc.chainmu.RLock() 747 defer bc.chainmu.RUnlock() 748 749 if first > last { 750 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 751 } 752 log.Info("Exporting batch of blocks", "count", last-first+1) 753 754 start, reported := time.Now(), time.Now() 755 for nr := first; nr <= last; nr++ { 756 block := bc.GetBlockByNumber(nr) 757 if block == nil { 758 return fmt.Errorf("export failed on #%d: not found", nr) 759 } 760 if err := block.EncodeRLP(w); err != nil { 761 return err 762 } 763 if time.Since(reported) >= statsReportLimit { 764 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 765 reported = time.Now() 766 } 767 } 768 return nil 769 } 770 771 // writeHeadBlock injects a new head block into the current block chain. This method 772 // assumes that the block is indeed a true head. It will also reset the head 773 // header and the head fast sync block to this very same block if they are older 774 // or if they are on a different side chain. 775 // 776 // Note, this function assumes that the `mu` mutex is held! 777 func (bc *BlockChain) writeHeadBlock(block *types.Block) { 778 // If the block is on a side chain or an unknown one, force other heads onto it too 779 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 780 781 // Add the block to the canonical chain number scheme and mark as the head 782 batch := bc.db.NewBatch() 783 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 784 rawdb.WriteTxLookupEntriesByBlock(batch, block) 785 rawdb.WriteHeadBlockHash(batch, block.Hash()) 786 787 // If the block is better than our head or is on a different chain, force update heads 788 if updateHeads { 789 rawdb.WriteHeadHeaderHash(batch, block.Hash()) 790 rawdb.WriteHeadFastBlockHash(batch, block.Hash()) 791 } 792 // Flush the whole batch into the disk, exit the node if failed 793 if err := batch.Write(); err != nil { 794 log.Crit("Failed to update chain indexes and markers", "err", err) 795 } 796 // Update all in-memory chain markers in the last step 797 if updateHeads { 798 bc.hc.SetCurrentHeader(block.Header()) 799 bc.currentFastBlock.Store(block) 800 headFastBlockGauge.Update(int64(block.NumberU64())) 801 } 802 bc.currentBlock.Store(block) 803 headBlockGauge.Update(int64(block.NumberU64())) 804 } 805 806 // Genesis retrieves the chain's genesis block. 807 func (bc *BlockChain) Genesis() *types.Block { 808 return bc.genesisBlock 809 } 810 811 // GetBody retrieves a block body (transactions and uncles) from the database by 812 // hash, caching it if found. 813 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 814 // Short circuit if the body's already in the cache, retrieve otherwise 815 if cached, ok := bc.bodyCache.Get(hash); ok { 816 body := cached.(*types.Body) 817 return body 818 } 819 number := bc.hc.GetBlockNumber(hash) 820 if number == nil { 821 return nil 822 } 823 body := rawdb.ReadBody(bc.db, hash, *number) 824 if body == nil { 825 return nil 826 } 827 // Cache the found body for next time and return 828 bc.bodyCache.Add(hash, body) 829 return body 830 } 831 832 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 833 // caching it if found. 834 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 835 // Short circuit if the body's already in the cache, retrieve otherwise 836 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 837 return cached.(rlp.RawValue) 838 } 839 number := bc.hc.GetBlockNumber(hash) 840 if number == nil { 841 return nil 842 } 843 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 844 if len(body) == 0 { 845 return nil 846 } 847 // Cache the found body for next time and return 848 bc.bodyRLPCache.Add(hash, body) 849 return body 850 } 851 852 // HasBlock checks if a block is fully present in the database or not. 853 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 854 if bc.blockCache.Contains(hash) { 855 return true 856 } 857 return rawdb.HasBody(bc.db, hash, number) 858 } 859 860 // HasFastBlock checks if a fast block is fully present in the database or not. 861 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 862 if !bc.HasBlock(hash, number) { 863 return false 864 } 865 if bc.receiptsCache.Contains(hash) { 866 return true 867 } 868 return rawdb.HasReceipts(bc.db, hash, number) 869 } 870 871 // HasState checks if state trie is fully present in the database or not. 872 func (bc *BlockChain) HasState(hash common.Hash) bool { 873 _, err := bc.stateCache.OpenTrie(hash) 874 return err == nil 875 } 876 877 // HasBlockAndState checks if a block and associated state trie is fully present 878 // in the database or not, caching it if present. 879 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 880 // Check first that the block itself is known 881 block := bc.GetBlock(hash, number) 882 if block == nil { 883 return false 884 } 885 return bc.HasState(block.Root()) 886 } 887 888 // GetBlock retrieves a block from the database by hash and number, 889 // caching it if found. 890 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 891 // Short circuit if the block's already in the cache, retrieve otherwise 892 if block, ok := bc.blockCache.Get(hash); ok { 893 return block.(*types.Block) 894 } 895 block := rawdb.ReadBlock(bc.db, hash, number) 896 if block == nil { 897 return nil 898 } 899 // Cache the found block for next time and return 900 bc.blockCache.Add(block.Hash(), block) 901 return block 902 } 903 904 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 905 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 906 number := bc.hc.GetBlockNumber(hash) 907 if number == nil { 908 return nil 909 } 910 return bc.GetBlock(hash, *number) 911 } 912 913 // GetBlockByNumber retrieves a block from the database by number, caching it 914 // (associated with its hash) if found. 915 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 916 hash := rawdb.ReadCanonicalHash(bc.db, number) 917 if hash == (common.Hash{}) { 918 return nil 919 } 920 return bc.GetBlock(hash, number) 921 } 922 923 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 924 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 925 if receipts, ok := bc.receiptsCache.Get(hash); ok { 926 return receipts.(types.Receipts) 927 } 928 number := rawdb.ReadHeaderNumber(bc.db, hash) 929 if number == nil { 930 return nil 931 } 932 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 933 if receipts == nil { 934 return nil 935 } 936 bc.receiptsCache.Add(hash, receipts) 937 return receipts 938 } 939 940 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 941 // [deprecated by xcb/62] 942 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 943 number := bc.hc.GetBlockNumber(hash) 944 if number == nil { 945 return nil 946 } 947 for i := 0; i < n; i++ { 948 block := bc.GetBlock(hash, *number) 949 if block == nil { 950 break 951 } 952 blocks = append(blocks, block) 953 hash = block.ParentHash() 954 *number-- 955 } 956 return 957 } 958 959 // GetUnclesInChain retrieves all the uncles from a given block backwards until 960 // a specific distance is reached. 961 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 962 uncles := []*types.Header{} 963 for i := 0; block != nil && i < length; i++ { 964 uncles = append(uncles, block.Uncles()...) 965 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 966 } 967 return uncles 968 } 969 970 // TrieNode retrieves a blob of data associated with a trie node 971 // either from ephemeral in-memory cache, or from persistent storage. 972 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 973 return bc.stateCache.TrieDB().Node(hash) 974 } 975 976 // ContractCode retrieves a blob of data associated with a contract hash 977 // either from ephemeral in-memory cache, or from persistent storage. 978 func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { 979 return bc.stateCache.ContractCode(common.Hash{}, hash) 980 } 981 982 // ContractCodeWithPrefix retrieves a blob of data associated with a contract 983 // hash either from ephemeral in-memory cache, or from persistent storage. 984 // 985 // If the code doesn't exist in the in-memory cache, check the storage with 986 // new code scheme. 987 func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { 988 type codeReader interface { 989 ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) 990 } 991 return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) 992 } 993 994 // Stop stops the blockchain service. If any imports are currently in progress 995 // it will abort them using the procInterrupt. 996 func (bc *BlockChain) Stop() { 997 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 998 return 999 } 1000 // Unsubscribe all subscriptions registered from blockchain 1001 bc.scope.Close() 1002 close(bc.quit) 1003 bc.StopInsert() 1004 bc.wg.Wait() 1005 1006 // Ensure that the entirety of the state snapshot is journalled to disk. 1007 var snapBase common.Hash 1008 if bc.snaps != nil { 1009 var err error 1010 if bc.writeLegacyJournal { 1011 if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil { 1012 log.Error("Failed to journal state snapshot", "err", err) 1013 } 1014 } else { 1015 if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { 1016 log.Error("Failed to journal state snapshot", "err", err) 1017 } 1018 } 1019 } 1020 // Ensure the state of a recent block is also stored to disk before exiting. 1021 // We're writing three different states to catch different restart scenarios: 1022 // - HEAD: So we don't need to reprocess any blocks in the general case 1023 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 1024 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 1025 if !bc.cacheConfig.TrieDirtyDisabled { 1026 triedb := bc.stateCache.TrieDB() 1027 1028 for _, offset := range []uint64{0, 1, TriesInMemory - 1} { 1029 if number := bc.CurrentBlock().NumberU64(); number > offset { 1030 recent := bc.GetBlockByNumber(number - offset) 1031 1032 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 1033 if err := triedb.Commit(recent.Root(), true, nil); err != nil { 1034 log.Error("Failed to commit recent state trie", "err", err) 1035 } 1036 } 1037 } 1038 if snapBase != (common.Hash{}) { 1039 log.Info("Writing snapshot state to disk", "root", snapBase) 1040 if err := triedb.Commit(snapBase, true, nil); err != nil { 1041 log.Error("Failed to commit recent state trie", "err", err) 1042 } 1043 } 1044 for !bc.triegc.Empty() { 1045 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 1046 } 1047 if size, _ := triedb.Size(); size != 0 { 1048 log.Error("Dangling trie nodes after full cleanup") 1049 } 1050 } 1051 // Ensure all live cached entries be saved into disk, so that we can skip 1052 // cache warmup when node restarts. 1053 if bc.cacheConfig.TrieCleanJournal != "" { 1054 triedb := bc.stateCache.TrieDB() 1055 triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) 1056 } 1057 log.Info("Blockchain stopped") 1058 } 1059 1060 // StopInsert interrupts all insertion methods, causing them to return 1061 // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after 1062 // calling this method. 1063 func (bc *BlockChain) StopInsert() { 1064 atomic.StoreInt32(&bc.procInterrupt, 1) 1065 } 1066 1067 // insertStopped returns true after StopInsert has been called. 1068 func (bc *BlockChain) insertStopped() bool { 1069 return atomic.LoadInt32(&bc.procInterrupt) == 1 1070 } 1071 1072 func (bc *BlockChain) procFutureBlocks() { 1073 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 1074 for _, hash := range bc.futureBlocks.Keys() { 1075 if block, exist := bc.futureBlocks.Peek(hash); exist { 1076 blocks = append(blocks, block.(*types.Block)) 1077 } 1078 } 1079 if len(blocks) > 0 { 1080 sort.Slice(blocks, func(i, j int) bool { 1081 return blocks[i].NumberU64() < blocks[j].NumberU64() 1082 }) 1083 // Insert one by one as chain insertion needs contiguous ancestry between blocks 1084 for i := range blocks { 1085 bc.InsertChain(blocks[i : i+1]) 1086 } 1087 } 1088 } 1089 1090 // WriteStatus status of write 1091 type WriteStatus byte 1092 1093 const ( 1094 NonStatTy WriteStatus = iota 1095 CanonStatTy 1096 SideStatTy 1097 ) 1098 1099 // truncateAncient rewinds the blockchain to the specified header and deletes all 1100 // data in the ancient store that exceeds the specified header. 1101 func (bc *BlockChain) truncateAncient(head uint64) error { 1102 frozen, err := bc.db.Ancients() 1103 if err != nil { 1104 return err 1105 } 1106 // Short circuit if there is no data to truncate in ancient store. 1107 if frozen <= head+1 { 1108 return nil 1109 } 1110 // Truncate all the data in the freezer beyond the specified head 1111 if err := bc.db.TruncateAncients(head + 1); err != nil { 1112 return err 1113 } 1114 // Clear out any stale content from the caches 1115 bc.hc.headerCache.Purge() 1116 bc.hc.tdCache.Purge() 1117 bc.hc.numberCache.Purge() 1118 1119 // Clear out any stale content from the caches 1120 bc.bodyCache.Purge() 1121 bc.bodyRLPCache.Purge() 1122 bc.receiptsCache.Purge() 1123 bc.blockCache.Purge() 1124 bc.txLookupCache.Purge() 1125 bc.futureBlocks.Purge() 1126 1127 log.Info("Rewind ancient data", "number", head) 1128 return nil 1129 } 1130 1131 // numberHash is just a container for a number and a hash, to represent a block 1132 type numberHash struct { 1133 number uint64 1134 hash common.Hash 1135 } 1136 1137 // InsertReceiptChain attempts to complete an already existing header chain with 1138 // transaction and receipt data. 1139 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 1140 // We don't require the chainMu here since we want to maximize the 1141 // concurrency of header insertion and receipt insertion. 1142 bc.wg.Add(1) 1143 defer bc.wg.Done() 1144 1145 var ( 1146 ancientBlocks, liveBlocks types.Blocks 1147 ancientReceipts, liveReceipts []types.Receipts 1148 ) 1149 // Do a sanity check that the provided chain is actually ordered and linked 1150 for i := 0; i < len(blockChain); i++ { 1151 if i != 0 { 1152 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 1153 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 1154 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 1155 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 1156 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 1157 } 1158 } 1159 if blockChain[i].NumberU64() <= ancientLimit { 1160 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 1161 } else { 1162 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 1163 } 1164 } 1165 1166 var ( 1167 stats = struct{ processed, ignored int32 }{} 1168 start = time.Now() 1169 size = 0 1170 ) 1171 // updateHead updates the head fast sync block if the inserted blocks are better 1172 // and returns an indicator whether the inserted blocks are canonical. 1173 updateHead := func(head *types.Block) bool { 1174 bc.chainmu.Lock() 1175 1176 // Rewind may have occurred, skip in that case. 1177 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 1178 currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) 1179 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 1180 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 1181 bc.currentFastBlock.Store(head) 1182 headFastBlockGauge.Update(int64(head.NumberU64())) 1183 bc.chainmu.Unlock() 1184 return true 1185 } 1186 } 1187 bc.chainmu.Unlock() 1188 return false 1189 } 1190 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 1191 // 1192 // this function only accepts canonical chain data. All side chain will be reverted 1193 // eventually. 1194 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1195 var ( 1196 previous = bc.CurrentFastBlock() 1197 batch = bc.db.NewBatch() 1198 ) 1199 // If any error occurs before updating the head or we are inserting a side chain, 1200 // all the data written this time wll be rolled back. 1201 defer func() { 1202 if previous != nil { 1203 if err := bc.truncateAncient(previous.NumberU64()); err != nil { 1204 log.Crit("Truncate ancient store failed", "err", err) 1205 } 1206 } 1207 }() 1208 var deleted []*numberHash 1209 for i, block := range blockChain { 1210 // Short circuit insertion if shutting down or processing failed 1211 if bc.insertStopped() { 1212 return 0, errInsertionInterrupted 1213 } 1214 // Short circuit insertion if it is required(used in testing only) 1215 if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) { 1216 return i, errors.New("insertion is terminated for testing purpose") 1217 } 1218 // Short circuit if the owner header is unknown 1219 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1220 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1221 } 1222 var ( 1223 start = time.Now() 1224 logged = time.Now() 1225 count int 1226 ) 1227 // Migrate all ancient blocks. This can happen if someone upgrades from Gocore 1228 // 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the 1229 // long term. 1230 for { 1231 // We can ignore the error here since light client won't hit this code path. 1232 frozen, _ := bc.db.Ancients() 1233 if frozen >= block.NumberU64() { 1234 break 1235 } 1236 h := rawdb.ReadCanonicalHash(bc.db, frozen) 1237 b := rawdb.ReadBlock(bc.db, h, frozen) 1238 size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen)) 1239 count += 1 1240 1241 // Always keep genesis block in active database. 1242 if b.NumberU64() != 0 { 1243 deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()}) 1244 } 1245 if time.Since(logged) > 8*time.Second { 1246 log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1247 logged = time.Now() 1248 } 1249 // Don't collect too much in-memory, write it out every 100K blocks 1250 if len(deleted) > 100000 { 1251 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1252 if err := bc.db.Sync(); err != nil { 1253 return 0, err 1254 } 1255 // Wipe out canonical block data. 1256 for _, nh := range deleted { 1257 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1258 rawdb.DeleteCanonicalHash(batch, nh.number) 1259 } 1260 if err := batch.Write(); err != nil { 1261 return 0, err 1262 } 1263 batch.Reset() 1264 // Wipe out side chain too. 1265 for _, nh := range deleted { 1266 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1267 rawdb.DeleteBlock(batch, hash, nh.number) 1268 } 1269 } 1270 if err := batch.Write(); err != nil { 1271 return 0, err 1272 } 1273 batch.Reset() 1274 deleted = deleted[0:] 1275 } 1276 } 1277 if count > 0 { 1278 log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1279 } 1280 // Flush data into ancient database. 1281 size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) 1282 1283 // Write tx indices if any condition is satisfied: 1284 // * If user requires to reserve all tx indices(txlookuplimit=0) 1285 // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit) 1286 // * If block number is large enough to be regarded as a recent block 1287 // It means blocks below the ancientLimit-txlookupLimit won't be indexed. 1288 // 1289 // But if the `TxIndexTail` is not nil, e.g. Gocore is initialized with 1290 // an external ancient database, during the setup, blockchain will start 1291 // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) 1292 // range. In this case, all tx indices of newly imported blocks should be 1293 // generated. 1294 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { 1295 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1296 } else if rawdb.ReadTxIndexTail(bc.db) != nil { 1297 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1298 } 1299 stats.processed++ 1300 } 1301 // Flush all tx-lookup index data. 1302 size += batch.ValueSize() 1303 if err := batch.Write(); err != nil { 1304 return 0, err 1305 } 1306 batch.Reset() 1307 1308 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1309 if err := bc.db.Sync(); err != nil { 1310 return 0, err 1311 } 1312 if !updateHead(blockChain[len(blockChain)-1]) { 1313 return 0, errors.New("side blocks can't be accepted as the ancient chain data") 1314 } 1315 previous = nil // disable rollback explicitly 1316 1317 // Wipe out canonical block data. 1318 for _, nh := range deleted { 1319 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1320 rawdb.DeleteCanonicalHash(batch, nh.number) 1321 } 1322 for _, block := range blockChain { 1323 // Always keep genesis block in active database. 1324 if block.NumberU64() != 0 { 1325 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1326 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1327 } 1328 } 1329 if err := batch.Write(); err != nil { 1330 return 0, err 1331 } 1332 batch.Reset() 1333 1334 // Wipe out side chain too. 1335 for _, nh := range deleted { 1336 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1337 rawdb.DeleteBlock(batch, hash, nh.number) 1338 } 1339 } 1340 for _, block := range blockChain { 1341 // Always keep genesis block in active database. 1342 if block.NumberU64() != 0 { 1343 for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { 1344 rawdb.DeleteBlock(batch, hash, block.NumberU64()) 1345 } 1346 } 1347 } 1348 if err := batch.Write(); err != nil { 1349 return 0, err 1350 } 1351 return 0, nil 1352 } 1353 // writeLive writes blockchain and corresponding receipt chain into active store. 1354 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1355 skipPresenceCheck := false 1356 batch := bc.db.NewBatch() 1357 for i, block := range blockChain { 1358 // Short circuit insertion if shutting down or processing failed 1359 if bc.insertStopped() { 1360 return 0, errInsertionInterrupted 1361 } 1362 // Short circuit if the owner header is unknown 1363 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1364 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1365 } 1366 if !skipPresenceCheck { 1367 // Ignore if the entire data is already known 1368 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1369 stats.ignored++ 1370 continue 1371 } else { 1372 // If block N is not present, neither are the later blocks. 1373 // This should be true, but if we are mistaken, the shortcut 1374 // here will only cause overwriting of some existing data 1375 skipPresenceCheck = true 1376 } 1377 } 1378 // Write all the data out into the database 1379 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1380 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1381 rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed 1382 1383 // Write everything belongs to the blocks into the database. So that 1384 // we can ensure all components of body is completed(body, receipts, 1385 // tx indexes) 1386 if batch.ValueSize() >= xcbdb.IdealBatchSize { 1387 if err := batch.Write(); err != nil { 1388 return 0, err 1389 } 1390 size += batch.ValueSize() 1391 batch.Reset() 1392 } 1393 stats.processed++ 1394 } 1395 // Write everything belongs to the blocks into the database. So that 1396 // we can ensure all components of body is completed(body, receipts, 1397 // tx indexes) 1398 if batch.ValueSize() > 0 { 1399 size += batch.ValueSize() 1400 if err := batch.Write(); err != nil { 1401 return 0, err 1402 } 1403 } 1404 updateHead(blockChain[len(blockChain)-1]) 1405 return 0, nil 1406 } 1407 // Write downloaded chain data and corresponding receipt chain data 1408 if len(ancientBlocks) > 0 { 1409 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1410 if err == errInsertionInterrupted { 1411 return 0, nil 1412 } 1413 return n, err 1414 } 1415 } 1416 // Write the tx index tail (block number from where we index) before write any live blocks 1417 if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 { 1418 // The tx index tail can only be one of the following two options: 1419 // * 0: all ancient blocks have been indexed 1420 // * ancient-limit: the indices of blocks before ancient-limit are ignored 1421 if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil { 1422 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit { 1423 rawdb.WriteTxIndexTail(bc.db, 0) 1424 } else { 1425 rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit) 1426 } 1427 } 1428 } 1429 if len(liveBlocks) > 0 { 1430 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1431 if err == errInsertionInterrupted { 1432 return 0, nil 1433 } 1434 return n, err 1435 } 1436 } 1437 1438 head := blockChain[len(blockChain)-1] 1439 context := []interface{}{ 1440 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1441 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1442 "size", common.StorageSize(size), 1443 } 1444 if stats.ignored > 0 { 1445 context = append(context, []interface{}{"ignored", stats.ignored}...) 1446 } 1447 log.Info("Imported new block receipts", context...) 1448 1449 return 0, nil 1450 } 1451 1452 // SetTxLookupLimit is responsible for updating the txlookup limit to the 1453 // original one stored in db if the new mismatches with the old one. 1454 func (bc *BlockChain) SetTxLookupLimit(limit uint64) { 1455 bc.txLookupLimit = limit 1456 } 1457 1458 // TxLookupLimit retrieves the txlookup limit used by blockchain to prune 1459 // stale transaction indices. 1460 func (bc *BlockChain) TxLookupLimit() uint64 { 1461 return bc.txLookupLimit 1462 } 1463 1464 var lastWrite uint64 1465 1466 // writeBlockWithoutState writes only the block and its metadata to the database, 1467 // but does not write any state. This is used to construct competing side forks 1468 // up to the point where they exceed the canonical total difficulty. 1469 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1470 bc.wg.Add(1) 1471 defer bc.wg.Done() 1472 1473 batch := bc.db.NewBatch() 1474 rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) 1475 rawdb.WriteBlock(batch, block) 1476 if err := batch.Write(); err != nil { 1477 log.Crit("Failed to write block into disk", "err", err) 1478 } 1479 return nil 1480 } 1481 1482 // writeKnownBlock updates the head block flag with a known block 1483 // and introduces chain reorg if necessary. 1484 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1485 bc.wg.Add(1) 1486 defer bc.wg.Done() 1487 1488 current := bc.CurrentBlock() 1489 if block.ParentHash() != current.Hash() { 1490 if err := bc.reorg(current, block); err != nil { 1491 return err 1492 } 1493 } 1494 bc.writeHeadBlock(block) 1495 return nil 1496 } 1497 1498 // WriteBlockWithState writes the block and all associated state to the database. 1499 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1500 bc.chainmu.Lock() 1501 defer bc.chainmu.Unlock() 1502 1503 return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent) 1504 } 1505 1506 // writeBlockWithState writes the block and all associated state to the database, 1507 // but is expects the chain mutex to be held. 1508 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1509 bc.wg.Add(1) 1510 defer bc.wg.Done() 1511 1512 // Calculate the total difficulty of the block 1513 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1514 if ptd == nil { 1515 return NonStatTy, consensus.ErrUnknownAncestor 1516 } 1517 // Make sure no inconsistent state is leaked during insertion 1518 currentBlock := bc.CurrentBlock() 1519 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1520 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1521 1522 // Irrelevant of the canonical status, write the block itself to the database. 1523 // 1524 // Note all the components of block(td, hash->number map, header, body, receipts) 1525 // should be written atomically. BlockBatch is used for containing all components. 1526 blockBatch := bc.db.NewBatch() 1527 rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) 1528 rawdb.WriteBlock(blockBatch, block) 1529 rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) 1530 rawdb.WritePreimages(blockBatch, state.Preimages()) 1531 if err := blockBatch.Write(); err != nil { 1532 log.Crit("Failed to write block into disk", "err", err) 1533 } 1534 // Commit all cached state changes into underlying memory database. 1535 root, err := state.Commit(true) 1536 if err != nil { 1537 return NonStatTy, err 1538 } 1539 triedb := bc.stateCache.TrieDB() 1540 1541 // If we're running an archive node, always flush 1542 if bc.cacheConfig.TrieDirtyDisabled { 1543 if err := triedb.Commit(root, false, nil); err != nil { 1544 return NonStatTy, err 1545 } 1546 } else { 1547 // Full but not archive node, do proper garbage collection 1548 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 1549 bc.triegc.Push(root, -int64(block.NumberU64())) 1550 1551 if current := block.NumberU64(); current > TriesInMemory { 1552 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1553 var ( 1554 nodes, imgs = triedb.Size() 1555 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1556 ) 1557 if nodes > limit || imgs > 4*1024*1024 { 1558 triedb.Cap(limit - xcbdb.IdealBatchSize) 1559 } 1560 // Find the next state trie we need to commit 1561 chosen := current - TriesInMemory 1562 1563 // If we exceeded out time allowance, flush an entire trie to disk 1564 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1565 // If the header is missing (canonical chain behind), we're reorging a low 1566 // diff sidechain. Suspend committing until this operation is completed. 1567 header := bc.GetHeaderByNumber(chosen) 1568 if header == nil { 1569 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1570 } else { 1571 // If we're exceeding limits but haven't reached a large enough memory gap, 1572 // warn the user that the system is becoming unstable. 1573 if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1574 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) 1575 } 1576 // Flush an entire trie and restart the counters 1577 triedb.Commit(header.Root, true, nil) 1578 lastWrite = chosen 1579 bc.gcproc = 0 1580 } 1581 } 1582 // Garbage collect anything below our required write retention 1583 for !bc.triegc.Empty() { 1584 root, number := bc.triegc.Pop() 1585 if uint64(-number) > chosen { 1586 bc.triegc.Push(root, number) 1587 break 1588 } 1589 triedb.Dereference(root.(common.Hash)) 1590 } 1591 } 1592 } 1593 // If the total difficulty is higher than our known, add it to the canonical chain 1594 // Second clause in the if statement reduces the vulnerability to selfish mining. 1595 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1596 reorg := externTd.Cmp(localTd) > 0 1597 currentBlock = bc.CurrentBlock() 1598 if !reorg && externTd.Cmp(localTd) == 0 { 1599 // Split same-difficulty blocks by number, then preferentially select 1600 // the block generated by the local miner as the canonical block. 1601 if block.NumberU64() < currentBlock.NumberU64() { 1602 reorg = true 1603 } else if block.NumberU64() == currentBlock.NumberU64() { 1604 var currentPreserve, blockPreserve bool 1605 if bc.shouldPreserve != nil { 1606 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1607 } 1608 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1609 } 1610 } 1611 if reorg { 1612 // Reorganise the chain if the parent is not the head block 1613 if block.ParentHash() != currentBlock.Hash() { 1614 if err := bc.reorg(currentBlock, block); err != nil { 1615 return NonStatTy, err 1616 } 1617 } 1618 status = CanonStatTy 1619 } else { 1620 status = SideStatTy 1621 } 1622 // Set new head. 1623 if status == CanonStatTy { 1624 bc.writeHeadBlock(block) 1625 } 1626 bc.futureBlocks.Remove(block.Hash()) 1627 1628 if status == CanonStatTy { 1629 bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 1630 if len(logs) > 0 { 1631 bc.logsFeed.Send(logs) 1632 } 1633 // In theory we should fire a ChainHeadEvent when we inject 1634 // a canonical block, but sometimes we can insert a batch of 1635 // canonicial blocks. Avoid firing too much ChainHeadEvents, 1636 // we will fire an accumulated ChainHeadEvent and disable fire 1637 // event here. 1638 if emitHeadEvent { 1639 bc.chainHeadFeed.Send(ChainHeadEvent{Block: block}) 1640 } 1641 } else { 1642 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1643 } 1644 return status, nil 1645 } 1646 1647 // addFutureBlock checks if the block is within the max allowed window to get 1648 // accepted for future processing, and returns an error if the block is too far 1649 // ahead and was not added. 1650 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1651 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1652 if block.Time() > max { 1653 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1654 } 1655 bc.futureBlocks.Add(block.Hash(), block) 1656 return nil 1657 } 1658 1659 // InsertChain attempts to insert the given batch of blocks in to the canonical 1660 // chain or, otherwise, create a fork. If an error is returned it will return 1661 // the index number of the failing block as well an error describing what went 1662 // wrong. 1663 // 1664 // After insertion is done, all accumulated events will be fired. 1665 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1666 // Sanity check that we have something meaningful to import 1667 if len(chain) == 0 { 1668 return 0, nil 1669 } 1670 1671 bc.blockProcFeed.Send(true) 1672 defer bc.blockProcFeed.Send(false) 1673 1674 // Remove already known canon-blocks 1675 var ( 1676 block, prev *types.Block 1677 ) 1678 // Do a sanity check that the provided chain is actually ordered and linked 1679 for i := 1; i < len(chain); i++ { 1680 block = chain[i] 1681 prev = chain[i-1] 1682 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1683 // Chain broke ancestry, log a message (programming error) and skip insertion 1684 log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), 1685 "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) 1686 1687 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(), 1688 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1689 } 1690 } 1691 // Pre-checks passed, start the full block imports 1692 bc.wg.Add(1) 1693 bc.chainmu.Lock() 1694 n, err := bc.insertChain(chain, true) 1695 bc.chainmu.Unlock() 1696 bc.wg.Done() 1697 1698 return n, err 1699 } 1700 1701 // insertChain is the internal implementation of InsertChain, which assumes that 1702 // 1) chains are contiguous, and 2) The chain mutex is held. 1703 // 1704 // This method is split out so that import batches that require re-injecting 1705 // historical blocks can do so without releasing the lock, which could lead to 1706 // racey behaviour. If a sidechain import is in progress, and the historic state 1707 // is imported, but then new canon-head is added before the actual sidechain 1708 // completes, then the historic state could be pruned again 1709 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) { 1710 // If the chain is terminating, don't even bother starting up 1711 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1712 return 0, nil 1713 } 1714 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1715 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig.NetworkID), chain) 1716 1717 var ( 1718 stats = insertStats{startTime: mclock.Now()} 1719 lastCanon *types.Block 1720 ) 1721 // Fire a single chain head event if we've progressed the chain 1722 defer func() { 1723 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1724 bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) 1725 } 1726 }() 1727 // Start the parallel header verifier 1728 headers := make([]*types.Header, len(chain)) 1729 seals := make([]bool, len(chain)) 1730 1731 for i, block := range chain { 1732 headers[i] = block.Header() 1733 seals[i] = verifySeals 1734 } 1735 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1736 defer close(abort) 1737 1738 // Peek the error for the first block to decide the directing import logic 1739 it := newInsertIterator(chain, results, bc.validator) 1740 1741 block, err := it.next() 1742 1743 // Left-trim all the known blocks 1744 if err == ErrKnownBlock { 1745 // First block (and state) is known 1746 // 1. We did a roll-back, and should now do a re-import 1747 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1748 // from the canonical chain, which has not been verified. 1749 // Skip all known blocks that are behind us 1750 var ( 1751 current = bc.CurrentBlock() 1752 localTd = bc.GetTd(current.Hash(), current.NumberU64()) 1753 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil 1754 ) 1755 for block != nil && err == ErrKnownBlock { 1756 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1757 if localTd.Cmp(externTd) < 0 { 1758 break 1759 } 1760 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1761 stats.ignored++ 1762 1763 block, err = it.next() 1764 } 1765 // The remaining blocks are still known blocks, the only scenario here is: 1766 // During the fast sync, the pivot point is already submitted but rollback 1767 // happens. Then node resets the head full block to a lower height via `rollback` 1768 // and leaves a few known blocks in the database. 1769 // 1770 // When node runs a fast sync again, it can re-import a batch of known blocks via 1771 // `insertChain` while a part of them have higher total difficulty than current 1772 // head full block(new pivot point). 1773 for block != nil && err == ErrKnownBlock { 1774 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1775 if err := bc.writeKnownBlock(block); err != nil { 1776 return it.index, err 1777 } 1778 lastCanon = block 1779 1780 block, err = it.next() 1781 } 1782 // Falls through to the block import 1783 } 1784 switch { 1785 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1786 case errors.Is(err, consensus.ErrPrunedAncestor): 1787 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 1788 return bc.insertSideChain(block, it) 1789 1790 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1791 case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): 1792 for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { 1793 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 1794 if err := bc.addFutureBlock(block); err != nil { 1795 return it.index, err 1796 } 1797 block, err = it.next() 1798 } 1799 stats.queued += it.processed() 1800 stats.ignored += it.remaining() 1801 1802 // If there are any still remaining, mark as ignored 1803 return it.index, err 1804 1805 // Some other error occurred, abort 1806 case err != nil: 1807 bc.futureBlocks.Remove(block.Hash()) 1808 stats.ignored += len(it.chain) 1809 bc.reportBlock(block, nil, err) 1810 return it.index, err 1811 } 1812 // No validation errors for the first block (or chain prefix skipped) 1813 for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { 1814 // If the chain is terminating, stop processing blocks 1815 if bc.insertStopped() { 1816 log.Debug("Abort during block processing") 1817 break 1818 } 1819 // If the header is a banned one, straight out abort 1820 if BadHashes[block.Hash()] { 1821 bc.reportBlock(block, nil, ErrBlacklistedHash) 1822 return it.index, ErrBlacklistedHash 1823 } 1824 // If the block is known (in the middle of the chain), it's a special case for 1825 // Clique blocks where they can share state among each other, so importing an 1826 // older block might complete the state of the subsequent one. In this case, 1827 // just skip the block (we already validated it once fully (and crashed), since 1828 // its header and body was already in the database). 1829 if err == ErrKnownBlock { 1830 logger := log.Debug 1831 if bc.chainConfig.Clique == nil { 1832 logger = log.Warn 1833 } 1834 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 1835 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "energy", block.EnergyUsed(), 1836 "root", block.Root()) 1837 1838 // Special case. Commit the empty receipt slice if we meet the known 1839 // block in the middle. It can only happen in the clique chain. Whenever 1840 // we insert blocks via `insertSideChain`, we only commit `td`, `header` 1841 // and `body` if it's non-existent. Since we don't have receipts without 1842 // reexecution, so nothing to commit. But if the sidechain will be adpoted 1843 // as the canonical chain eventually, it needs to be reexecuted for missing 1844 // state, but if it's this special case here(skip reexecution) we will lose 1845 // the empty receipt entry. 1846 if len(block.Transactions()) == 0 { 1847 rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) 1848 } else { 1849 log.Error("Please file an issue, skip known block execution without receipt", 1850 "hash", block.Hash(), "number", block.NumberU64()) 1851 } 1852 if err := bc.writeKnownBlock(block); err != nil { 1853 return it.index, err 1854 } 1855 stats.processed++ 1856 1857 // We can assume that logs are empty here, since the only way for consecutive 1858 // Clique blocks to have the same state is if there are no transactions. 1859 lastCanon = block 1860 continue 1861 } 1862 // Retrieve the parent block and it's state to execute on top 1863 start := time.Now() 1864 1865 parent := it.previous() 1866 if parent == nil { 1867 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 1868 } 1869 statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) 1870 if err != nil { 1871 return it.index, err 1872 } 1873 // If we have a followup block, run that against the current state to pre-cache 1874 // transactions and probabilistically some of the account/storage trie nodes. 1875 var followupInterrupt uint32 1876 if !bc.cacheConfig.TrieCleanNoPrefetch { 1877 if followup, err := it.peek(); followup != nil && err == nil { 1878 throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) 1879 go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { 1880 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 1881 1882 blockPrefetchExecuteTimer.Update(time.Since(start)) 1883 if atomic.LoadUint32(interrupt) == 1 { 1884 blockPrefetchInterruptMeter.Mark(1) 1885 } 1886 }(time.Now(), followup, throwaway, &followupInterrupt) 1887 } 1888 } 1889 // Process block using the parent state as reference point 1890 substart := time.Now() 1891 receipts, logs, usedEnergy, err := bc.processor.Process(block, statedb, bc.vmConfig) 1892 if err != nil { 1893 bc.reportBlock(block, receipts, err) 1894 atomic.StoreUint32(&followupInterrupt, 1) 1895 return it.index, err 1896 } 1897 // Update the metrics touched during block processing 1898 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 1899 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 1900 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 1901 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 1902 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them 1903 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them 1904 1905 triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation 1906 trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates 1907 trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates 1908 1909 blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) 1910 1911 // Validate the state using the default validator 1912 substart = time.Now() 1913 if err := bc.validator.ValidateState(block, statedb, receipts, usedEnergy); err != nil { 1914 bc.reportBlock(block, receipts, err) 1915 atomic.StoreUint32(&followupInterrupt, 1) 1916 return it.index, err 1917 } 1918 proctime := time.Since(start) 1919 1920 // Update the metrics touched during block validation 1921 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 1922 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 1923 1924 blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) 1925 1926 // Write the block to the chain and get the status. 1927 substart = time.Now() 1928 status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false) 1929 atomic.StoreUint32(&followupInterrupt, 1) 1930 if err != nil { 1931 return it.index, err 1932 } 1933 1934 // Update the metrics touched during block commit 1935 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 1936 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 1937 snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them 1938 1939 blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) 1940 blockInsertTimer.UpdateSince(start) 1941 1942 switch status { 1943 case CanonStatTy: 1944 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1945 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "energy", block.EnergyUsed(), 1946 "elapsed", common.PrettyDuration(time.Since(start)), 1947 "root", block.Root()) 1948 1949 lastCanon = block 1950 1951 // Only count canonical blocks for GC processing time 1952 bc.gcproc += proctime 1953 1954 case SideStatTy: 1955 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1956 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1957 "txs", len(block.Transactions()), "energy", block.EnergyUsed(), "uncles", len(block.Uncles()), 1958 "root", block.Root()) 1959 1960 default: 1961 // This in theory is impossible, but lets be nice to our future selves and leave 1962 // a log, instead of trying to track down blocks imports that don't emit logs. 1963 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 1964 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1965 "txs", len(block.Transactions()), "energy", block.EnergyUsed(), "uncles", len(block.Uncles()), 1966 "root", block.Root()) 1967 } 1968 stats.processed++ 1969 stats.usedEnergy += usedEnergy 1970 1971 dirty, _ := bc.stateCache.TrieDB().Size() 1972 stats.report(chain, it.index, dirty) 1973 } 1974 // Any blocks remaining here? The only ones we care about are the future ones 1975 if block != nil && errors.Is(err, consensus.ErrFutureBlock) { 1976 if err := bc.addFutureBlock(block); err != nil { 1977 return it.index, err 1978 } 1979 block, err = it.next() 1980 1981 for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { 1982 if err := bc.addFutureBlock(block); err != nil { 1983 return it.index, err 1984 } 1985 stats.queued++ 1986 } 1987 } 1988 stats.ignored += it.remaining() 1989 1990 return it.index, err 1991 } 1992 1993 // insertSideChain is called when an import batch hits upon a pruned ancestor 1994 // error, which happens when a sidechain with a sufficiently old fork-block is 1995 // found. 1996 // 1997 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1998 // switch over to the new chain if the TD exceeded the current chain. 1999 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { 2000 var ( 2001 externTd *big.Int 2002 current = bc.CurrentBlock() 2003 ) 2004 // The first sidechain block error is already verified to be ErrPrunedAncestor. 2005 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 2006 // ones. Any other errors means that the block is invalid, and should not be written 2007 // to disk. 2008 err := consensus.ErrPrunedAncestor 2009 for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { 2010 // Check the canonical state root for that number 2011 if number := block.NumberU64(); current.NumberU64() >= number { 2012 canonical := bc.GetBlockByNumber(number) 2013 if canonical != nil && canonical.Hash() == block.Hash() { 2014 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 2015 2016 // Collect the TD of the block. Since we know it's a canon one, 2017 // we can get it directly, and not (like further below) use 2018 // the parent and then add the block on top 2019 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 2020 continue 2021 } 2022 if canonical != nil && canonical.Root() == block.Root() { 2023 // This is most likely a shadow-state attack. When a fork is imported into the 2024 // database, and it eventually reaches a block height which is not pruned, we 2025 // just found that the state already exist! This means that the sidechain block 2026 // refers to a state which already exists in our canon chain. 2027 // 2028 // If left unchecked, we would now proceed importing the blocks, without actually 2029 // having verified the state of the previous blocks. 2030 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 2031 2032 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 2033 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 2034 // mechanism. 2035 return it.index, errors.New("sidechain ghost-state attack") 2036 } 2037 } 2038 if externTd == nil { 2039 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 2040 } 2041 externTd = new(big.Int).Add(externTd, block.Difficulty()) 2042 2043 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 2044 start := time.Now() 2045 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 2046 return it.index, err 2047 } 2048 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 2049 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 2050 "txs", len(block.Transactions()), "energy", block.EnergyUsed(), "uncles", len(block.Uncles()), 2051 "root", block.Root()) 2052 } 2053 } 2054 // At this point, we've written all sidechain blocks to database. Loop ended 2055 // either on some other error or all were processed. If there was some other 2056 // error, we can ignore the rest of those blocks. 2057 // 2058 // If the externTd was larger than our local TD, we now need to reimport the previous 2059 // blocks to regenerate the required state 2060 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 2061 if localTd.Cmp(externTd) > 0 { 2062 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 2063 return it.index, err 2064 } 2065 // Gather all the sidechain hashes (full blocks may be memory heavy) 2066 var ( 2067 hashes []common.Hash 2068 numbers []uint64 2069 ) 2070 parent := it.previous() 2071 for parent != nil && !bc.HasState(parent.Root) { 2072 hashes = append(hashes, parent.Hash()) 2073 numbers = append(numbers, parent.Number.Uint64()) 2074 2075 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 2076 } 2077 if parent == nil { 2078 return it.index, errors.New("missing parent") 2079 } 2080 // Import all the pruned blocks to make the state available 2081 var ( 2082 blocks []*types.Block 2083 memory common.StorageSize 2084 ) 2085 for i := len(hashes) - 1; i >= 0; i-- { 2086 // Append the next block to our batch 2087 block := bc.GetBlock(hashes[i], numbers[i]) 2088 2089 blocks = append(blocks, block) 2090 memory += block.Size() 2091 2092 // If memory use grew too large, import and continue. Sadly we need to discard 2093 // all raised events and logs from notifications since we're too heavy on the 2094 // memory here. 2095 if len(blocks) >= 2048 || memory > 64*1024*1024 { 2096 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 2097 if _, err := bc.insertChain(blocks, false); err != nil { 2098 return 0, err 2099 } 2100 blocks, memory = blocks[:0], 0 2101 2102 // If the chain is terminating, stop processing blocks 2103 if bc.insertStopped() { 2104 log.Debug("Abort during blocks processing") 2105 return 0, nil 2106 } 2107 } 2108 } 2109 if len(blocks) > 0 { 2110 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 2111 return bc.insertChain(blocks, false) 2112 } 2113 return 0, nil 2114 } 2115 2116 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 2117 // blocks and inserts them to be part of the new canonical chain and accumulates 2118 // potential missing transactions and post an event about them. 2119 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 2120 var ( 2121 newChain types.Blocks 2122 oldChain types.Blocks 2123 commonBlock *types.Block 2124 2125 deletedTxs types.Transactions 2126 addedTxs types.Transactions 2127 2128 deletedLogs [][]*types.Log 2129 rebirthLogs [][]*types.Log 2130 2131 // collectLogs collects the logs that were generated or removed during 2132 // the processing of the block that corresponds with the given hash. 2133 // These logs are later announced as deleted or reborn 2134 collectLogs = func(hash common.Hash, removed bool) { 2135 number := bc.hc.GetBlockNumber(hash) 2136 if number == nil { 2137 return 2138 } 2139 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 2140 2141 var logs []*types.Log 2142 for _, receipt := range receipts { 2143 for _, log := range receipt.Logs { 2144 l := *log 2145 if removed { 2146 l.Removed = true 2147 } else { 2148 } 2149 logs = append(logs, &l) 2150 } 2151 } 2152 if len(logs) > 0 { 2153 if removed { 2154 deletedLogs = append(deletedLogs, logs) 2155 } else { 2156 rebirthLogs = append(rebirthLogs, logs) 2157 } 2158 } 2159 } 2160 // mergeLogs returns a merged log slice with specified sort order. 2161 mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { 2162 var ret []*types.Log 2163 if reverse { 2164 for i := len(logs) - 1; i >= 0; i-- { 2165 ret = append(ret, logs[i]...) 2166 } 2167 } else { 2168 for i := 0; i < len(logs); i++ { 2169 ret = append(ret, logs[i]...) 2170 } 2171 } 2172 return ret 2173 } 2174 ) 2175 // Reduce the longer chain to the same number as the shorter one 2176 if oldBlock.NumberU64() > newBlock.NumberU64() { 2177 // Old chain is longer, gather all transactions and logs as deleted ones 2178 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 2179 oldChain = append(oldChain, oldBlock) 2180 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2181 collectLogs(oldBlock.Hash(), true) 2182 } 2183 } else { 2184 // New chain is longer, stash all blocks away for subsequent insertion 2185 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 2186 newChain = append(newChain, newBlock) 2187 } 2188 } 2189 if oldBlock == nil { 2190 return fmt.Errorf("invalid old chain") 2191 } 2192 if newBlock == nil { 2193 return fmt.Errorf("invalid new chain") 2194 } 2195 // Both sides of the reorg are at the same number, reduce both until the common 2196 // ancestor is found 2197 for { 2198 // If the common ancestor was found, bail out 2199 if oldBlock.Hash() == newBlock.Hash() { 2200 commonBlock = oldBlock 2201 break 2202 } 2203 // Remove an old block as well as stash away a new block 2204 oldChain = append(oldChain, oldBlock) 2205 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2206 collectLogs(oldBlock.Hash(), true) 2207 2208 newChain = append(newChain, newBlock) 2209 2210 // Step back with both chains 2211 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 2212 if oldBlock == nil { 2213 return fmt.Errorf("invalid old chain") 2214 } 2215 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 2216 if newBlock == nil { 2217 return fmt.Errorf("invalid new chain") 2218 } 2219 } 2220 // Ensure the user sees large reorgs 2221 if len(oldChain) > 0 && len(newChain) > 0 { 2222 logFn := log.Info 2223 msg := "Chain reorg detected" 2224 if len(oldChain) > 63 { 2225 msg = "Large chain reorg detected" 2226 logFn = log.Warn 2227 } 2228 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 2229 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 2230 blockReorgAddMeter.Mark(int64(len(newChain))) 2231 blockReorgDropMeter.Mark(int64(len(oldChain))) 2232 blockReorgMeter.Mark(1) 2233 } else { 2234 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 2235 } 2236 // Insert the new chain(except the head block(reverse order)), 2237 // taking care of the proper incremental order. 2238 for i := len(newChain) - 1; i >= 1; i-- { 2239 // Insert the block in the canonical way, re-writing history 2240 bc.writeHeadBlock(newChain[i]) 2241 2242 // Collect reborn logs due to chain reorg 2243 collectLogs(newChain[i].Hash(), false) 2244 2245 // Collect the new added transactions. 2246 addedTxs = append(addedTxs, newChain[i].Transactions()...) 2247 } 2248 // Delete useless indexes right now which includes the non-canonical 2249 // transaction indexes, canonical chain indexes which above the head. 2250 indexesBatch := bc.db.NewBatch() 2251 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 2252 rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) 2253 } 2254 // Delete any canonical number assignments above the new head 2255 number := bc.CurrentBlock().NumberU64() 2256 for i := number + 1; ; i++ { 2257 hash := rawdb.ReadCanonicalHash(bc.db, i) 2258 if hash == (common.Hash{}) { 2259 break 2260 } 2261 rawdb.DeleteCanonicalHash(indexesBatch, i) 2262 } 2263 if err := indexesBatch.Write(); err != nil { 2264 log.Crit("Failed to delete useless indexes", "err", err) 2265 } 2266 // If any logs need to be fired, do it now. In theory we could avoid creating 2267 // this goroutine if there are no events to fire, but realistcally that only 2268 // ever happens if we're reorging empty blocks, which will only happen on idle 2269 // networks where performance is not an issue either way. 2270 if len(deletedLogs) > 0 { 2271 bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) 2272 } 2273 if len(rebirthLogs) > 0 { 2274 bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) 2275 } 2276 if len(oldChain) > 0 { 2277 for i := len(oldChain) - 1; i >= 0; i-- { 2278 bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) 2279 } 2280 } 2281 return nil 2282 } 2283 2284 func (bc *BlockChain) update() { 2285 futureTimer := time.NewTicker(5 * time.Second) 2286 defer futureTimer.Stop() 2287 for { 2288 select { 2289 case <-futureTimer.C: 2290 bc.procFutureBlocks() 2291 case <-bc.quit: 2292 return 2293 } 2294 } 2295 } 2296 2297 // maintainTxIndex is responsible for the construction and deletion of the 2298 // transaction index. 2299 // 2300 // User can use flag `txlookuplimit` to specify a "recentness" block, below 2301 // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means 2302 // all tx indices will be reserved. 2303 // 2304 // The user can adjust the txlookuplimit value for each launch after fast 2305 // sync, Gocore will automatically construct the missing indices and delete 2306 // the extra indices. 2307 func (bc *BlockChain) maintainTxIndex(ancients uint64) { 2308 defer bc.wg.Done() 2309 2310 // Before starting the actual maintenance, we need to handle a special case, 2311 // where user might init Gocore with an external ancient database. If so, we 2312 // need to reindex all necessary transactions before starting to process any 2313 // pruning requests. 2314 if ancients > 0 { 2315 var from = uint64(0) 2316 if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit { 2317 from = ancients - bc.txLookupLimit 2318 } 2319 rawdb.IndexTransactions(bc.db, from, ancients, bc.quit) 2320 } 2321 // indexBlocks reindexes or unindexes transactions depending on user configuration 2322 indexBlocks := func(tail *uint64, head uint64, done chan struct{}) { 2323 defer func() { done <- struct{}{} }() 2324 2325 // If the user just upgraded Gocore to a new version which supports transaction 2326 // index pruning, write the new tail and remove anything older. 2327 if tail == nil { 2328 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2329 // Nothing to delete, write the tail and return 2330 rawdb.WriteTxIndexTail(bc.db, 0) 2331 } else { 2332 // Prune all stale tx indices and record the tx index tail 2333 rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit) 2334 } 2335 return 2336 } 2337 // If a previous indexing existed, make sure that we fill in any missing entries 2338 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2339 if *tail > 0 { 2340 rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit) 2341 } 2342 return 2343 } 2344 // Update the transaction index to the new chain state 2345 if head-bc.txLookupLimit+1 < *tail { 2346 // Reindex a part of missing indices and rewind index tail to HEAD-limit 2347 rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit) 2348 } else { 2349 // Unindex a part of stale indices and forward index tail to HEAD-limit 2350 rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit) 2351 } 2352 } 2353 // Any reindexing done, start listening to chain events and moving the index window 2354 var ( 2355 done chan struct{} // Non-nil if background unindexing or reindexing routine is active. 2356 headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed 2357 ) 2358 sub := bc.SubscribeChainHeadEvent(headCh) 2359 if sub == nil { 2360 return 2361 } 2362 defer sub.Unsubscribe() 2363 2364 for { 2365 select { 2366 case head := <-headCh: 2367 if done == nil { 2368 done = make(chan struct{}) 2369 go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done) 2370 } 2371 case <-done: 2372 done = nil 2373 case <-bc.quit: 2374 if done != nil { 2375 log.Info("Waiting background transaction indexer to exit") 2376 <-done 2377 } 2378 return 2379 } 2380 } 2381 } 2382 2383 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 2384 func (bc *BlockChain) BadBlocks() []*types.Block { 2385 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 2386 for _, hash := range bc.badBlocks.Keys() { 2387 if blk, exist := bc.badBlocks.Peek(hash); exist { 2388 block := blk.(*types.Block) 2389 blocks = append(blocks, block) 2390 } 2391 } 2392 return blocks 2393 } 2394 2395 // addBadBlock adds a bad block to the bad-block LRU cache 2396 func (bc *BlockChain) addBadBlock(block *types.Block) { 2397 bc.badBlocks.Add(block.Hash(), block) 2398 } 2399 2400 // reportBlock logs a bad block error. 2401 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2402 bc.addBadBlock(block) 2403 2404 var receiptString string 2405 for i, receipt := range receipts { 2406 receiptString += fmt.Sprintf("\t %d: cumulative: %v energy: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2407 i, receipt.CumulativeEnergyUsed, receipt.EnergyUsed, receipt.ContractAddress.Hex(), 2408 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2409 } 2410 log.Error(fmt.Sprintf(` 2411 ########## BAD BLOCK ######### 2412 Chain config: %v 2413 2414 Number: %v 2415 Hash: 0x%x 2416 %v 2417 2418 Error: %v 2419 ############################## 2420 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2421 } 2422 2423 // InsertHeaderChain attempts to insert the given header chain in to the local 2424 // chain, possibly creating a reorg. If an error is returned, it will return the 2425 // index number of the failing header as well an error describing what went wrong. 2426 // 2427 // The verify parameter can be used to fine tune whether nonce verification 2428 // should be done or not. The reason behind the optional check is because some 2429 // of the header retrieval mechanisms already need to verify nonces, as well as 2430 // because nonces can be verified sparsely, not needing to check each. 2431 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2432 start := time.Now() 2433 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2434 return i, err 2435 } 2436 2437 // Make sure only one thread manipulates the chain at once 2438 bc.chainmu.Lock() 2439 defer bc.chainmu.Unlock() 2440 2441 bc.wg.Add(1) 2442 defer bc.wg.Done() 2443 _, err := bc.hc.InsertHeaderChain(chain, start) 2444 return 0, err 2445 } 2446 2447 // CurrentHeader retrieves the current head header of the canonical chain. The 2448 // header is retrieved from the HeaderChain's internal cache. 2449 func (bc *BlockChain) CurrentHeader() *types.Header { 2450 return bc.hc.CurrentHeader() 2451 } 2452 2453 // GetTd retrieves a block's total difficulty in the canonical chain from the 2454 // database by hash and number, caching it if found. 2455 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 2456 return bc.hc.GetTd(hash, number) 2457 } 2458 2459 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 2460 // database by hash, caching it if found. 2461 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 2462 return bc.hc.GetTdByHash(hash) 2463 } 2464 2465 // GetHeader retrieves a block header from the database by hash and number, 2466 // caching it if found. 2467 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 2468 return bc.hc.GetHeader(hash, number) 2469 } 2470 2471 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 2472 // found. 2473 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 2474 return bc.hc.GetHeaderByHash(hash) 2475 } 2476 2477 // HasHeader checks if a block header is present in the database or not, caching 2478 // it if present. 2479 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 2480 return bc.hc.HasHeader(hash, number) 2481 } 2482 2483 // GetCanonicalHash returns the canonical hash for a given block number 2484 func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { 2485 return bc.hc.GetCanonicalHash(number) 2486 } 2487 2488 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 2489 // hash, fetching towards the genesis block. 2490 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 2491 return bc.hc.GetBlockHashesFromHash(hash, max) 2492 } 2493 2494 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 2495 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 2496 // number of blocks to be individually checked before we reach the canonical chain. 2497 // 2498 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 2499 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 2500 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 2501 } 2502 2503 // GetHeaderByNumber retrieves a block header from the database by number, 2504 // caching it (associated with its hash) if found. 2505 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 2506 return bc.hc.GetHeaderByNumber(number) 2507 } 2508 2509 // GetTransactionLookup retrieves the lookup associate with the given transaction 2510 // hash from the cache or database. 2511 func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { 2512 // Short circuit if the txlookup already in the cache, retrieve otherwise 2513 if lookup, exist := bc.txLookupCache.Get(hash); exist { 2514 return lookup.(*rawdb.LegacyTxLookupEntry) 2515 } 2516 tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) 2517 if tx == nil { 2518 return nil 2519 } 2520 lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex} 2521 bc.txLookupCache.Add(hash, lookup) 2522 return lookup 2523 } 2524 2525 // Config retrieves the chain's fork configuration. 2526 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 2527 2528 // Engine retrieves the blockchain's consensus engine. 2529 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 2530 2531 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 2532 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 2533 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 2534 } 2535 2536 // SubscribeChainEvent registers a subscription of ChainEvent. 2537 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 2538 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 2539 } 2540 2541 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 2542 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 2543 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 2544 } 2545 2546 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 2547 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 2548 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 2549 } 2550 2551 // SubscribeLogsEvent registers a subscription of []*types.Log. 2552 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 2553 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 2554 } 2555 2556 // SubscribeBlockProcessingEvent registers a subscription of bool where true means 2557 // block processing has started while false means it has stopped. 2558 func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { 2559 return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) 2560 }