github.com/fff-chain/go-fff@v0.0.0-20220726032732-1c84420b8a99/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sort" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 lru "github.com/hashicorp/golang-lru" 32 33 "github.com/fff-chain/go-fff/common" 34 "github.com/fff-chain/go-fff/common/mclock" 35 "github.com/fff-chain/go-fff/common/prque" 36 "github.com/fff-chain/go-fff/consensus" 37 "github.com/fff-chain/go-fff/core/rawdb" 38 "github.com/fff-chain/go-fff/core/state" 39 "github.com/fff-chain/go-fff/core/state/snapshot" 40 "github.com/fff-chain/go-fff/core/types" 41 "github.com/fff-chain/go-fff/core/vm" 42 "github.com/fff-chain/go-fff/ethdb" 43 "github.com/fff-chain/go-fff/event" 44 "github.com/fff-chain/go-fff/log" 45 "github.com/fff-chain/go-fff/metrics" 46 "github.com/fff-chain/go-fff/params" 47 "github.com/fff-chain/go-fff/rlp" 48 "github.com/fff-chain/go-fff/trie" 49 ) 50 51 var ( 52 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 53 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 54 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 55 56 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 57 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 58 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 59 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 60 61 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 62 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 63 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 64 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 65 66 snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) 67 snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) 68 snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) 69 70 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 71 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 72 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 73 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 74 75 blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) 76 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 77 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 78 blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil) 79 80 errInsertionInterrupted = errors.New("insertion is interrupted") 81 errStateRootVerificationFailed = errors.New("state root verification failed") 82 ) 83 84 const ( 85 bodyCacheLimit = 256 86 blockCacheLimit = 256 87 diffLayerCacheLimit = 1024 88 diffLayerRLPCacheLimit = 256 89 receiptsCacheLimit = 10000 90 txLookupCacheLimit = 1024 91 maxBadBlockLimit = 16 92 maxFutureBlocks = 256 93 maxTimeFutureBlocks = 30 94 maxBeyondBlocks = 2048 95 prefetchTxNumber = 100 96 97 diffLayerFreezerRecheckInterval = 3 * time.Second 98 diffLayerPruneRecheckInterval = 1 * time.Second // The interval to prune unverified diff layers 99 maxDiffQueueDist = 2048 // Maximum allowed distance from the chain head to queue diffLayers 100 maxDiffLimit = 2048 // Maximum number of unique diff layers a peer may have responded 101 maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head 102 maxDiffLimitForBroadcast = 128 // Maximum number of unique diff layers a peer may have broadcasted 103 104 rewindBadBlockInterval = 1 * time.Second 105 106 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 107 // 108 // Changelog: 109 // 110 // - Version 4 111 // The following incompatible database changes were added: 112 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 113 // * the `Bloom` field of receipt is deleted 114 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 115 // - Version 5 116 // The following incompatible database changes were added: 117 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt 118 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the 119 // receipts' corresponding block 120 // - Version 6 121 // The following incompatible database changes were added: 122 // * Transaction lookup information stores the corresponding block number instead of block hash 123 // - Version 7 124 // The following incompatible database changes were added: 125 // * Use freezer as the ancient database to maintain all ancient data 126 // - Version 8 127 // The following incompatible database changes were added: 128 // * New scheme for contract code in order to separate the codes and trie nodes 129 BlockChainVersion uint64 = 8 130 ) 131 132 // CacheConfig contains the configuration values for the trie caching/pruning 133 // that's resident in a blockchain. 134 type CacheConfig struct { 135 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 136 TrieCleanJournal string // Disk journal for saving clean cache entries. 137 TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically 138 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 139 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 140 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 141 SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory 142 Preimages bool // Whether to store preimage of trie key to the disk 143 TriesInMemory uint64 // How many tries keeps in memory 144 145 SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it 146 } 147 148 // To avoid cycle import 149 type PeerIDer interface { 150 ID() string 151 } 152 153 // defaultCacheConfig are the default caching values if none are specified by the 154 // user (also used during testing). 155 var defaultCacheConfig = &CacheConfig{ 156 TrieCleanLimit: 256, 157 TrieDirtyLimit: 256, 158 TrieTimeLimit: 5 * time.Minute, 159 SnapshotLimit: 256, 160 TriesInMemory: 128, 161 SnapshotWait: true, 162 } 163 164 type BlockChainOption func(*BlockChain) *BlockChain 165 166 // BlockChain represents the canonical chain given a database with a genesis 167 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 168 // 169 // Importing blocks in to the block chain happens according to the set of rules 170 // defined by the two stage Validator. Processing of blocks is done using the 171 // Processor which processes the included transaction. The validation of the state 172 // is done in the second part of the Validator. Failing results in aborting of 173 // the import. 174 // 175 // The BlockChain also helps in returning blocks from **any** chain included 176 // in the database as well as blocks that represents the canonical chain. It's 177 // important to note that GetBlock can return any block and does not need to be 178 // included in the canonical one where as GetBlockByNumber always represents the 179 // canonical chain. 180 type BlockChain struct { 181 chainConfig *params.ChainConfig // Chain & network configuration 182 cacheConfig *CacheConfig // Cache configuration for pruning 183 184 db ethdb.Database // Low level persistent database to store final content in 185 snaps *snapshot.Tree // Snapshot tree for fast trie leaf access 186 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 187 gcproc time.Duration // Accumulates canonical block processing for trie dumping 188 commitLock sync.Mutex // CommitLock is used to protect above field from being modified concurrently 189 190 // txLookupLimit is the maximum number of blocks from head whose tx indices 191 // are reserved: 192 // * 0: means no limit and regenerate any missing indexes 193 // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes 194 // * nil: disable tx reindexer/deleter, but still index new blocks 195 txLookupLimit uint64 196 triesInMemory uint64 197 198 hc *HeaderChain 199 rmLogsFeed event.Feed 200 chainFeed event.Feed 201 chainSideFeed event.Feed 202 chainHeadFeed event.Feed 203 logsFeed event.Feed 204 blockProcFeed event.Feed 205 scope event.SubscriptionScope 206 genesisBlock *types.Block 207 208 chainmu sync.RWMutex // blockchain insertion lock 209 210 currentBlock atomic.Value // Current head of the block chain 211 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 212 highestVerifiedHeader atomic.Value 213 214 stateCache state.Database // State database to reuse between imports (contains state cache) 215 bodyCache *lru.Cache // Cache for the most recent block bodies 216 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 217 receiptsCache *lru.Cache // Cache for the most recent receipts per block 218 blockCache *lru.Cache // Cache for the most recent entire blocks 219 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 220 futureBlocks *lru.Cache // future blocks are blocks added for later processing 221 badBlockCache *lru.Cache // Cache for the blocks that failed to pass MPT root verification 222 223 // trusted diff layers 224 diffLayerCache *lru.Cache // Cache for the diffLayers 225 diffLayerRLPCache *lru.Cache // Cache for the rlp encoded diffLayers 226 diffQueue *prque.Prque // A Priority queue to store recent diff layer 227 diffQueueBuffer chan *types.DiffLayer 228 diffLayerFreezerBlockLimit uint64 229 230 // untrusted diff layers 231 diffMux sync.RWMutex 232 blockHashToDiffLayers map[common.Hash]map[common.Hash]*types.DiffLayer // map[blockHash] map[DiffHash]Diff 233 diffHashToBlockHash map[common.Hash]common.Hash // map[diffHash]blockHash 234 diffHashToPeers map[common.Hash]map[string]struct{} // map[diffHash]map[pid] 235 diffNumToBlockHashes map[uint64]map[common.Hash]struct{} // map[number]map[blockHash] 236 diffPeersToDiffHashes map[string]map[common.Hash]struct{} // map[pid]map[diffHash] 237 238 quit chan struct{} // blockchain quit channel 239 wg sync.WaitGroup // chain processing wait group for shutting down 240 running int32 // 0 if chain is running, 1 when stopped 241 procInterrupt int32 // interrupt signaler for block processing 242 243 engine consensus.Engine 244 prefetcher Prefetcher 245 validator Validator // Block and state validator interface 246 processor Processor // Block transaction processor interface 247 vmConfig vm.Config 248 pipeCommit bool 249 250 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 251 terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. 252 } 253 254 // NewBlockChain returns a fully initialised block chain using information 255 // available in the database. It initialises the default Ethereum Validator and 256 // Processor. 257 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, 258 vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64, 259 options ...BlockChainOption) (*BlockChain, error) { 260 if cacheConfig == nil { 261 cacheConfig = defaultCacheConfig 262 } 263 if cacheConfig.TriesInMemory != 128 { 264 log.Warn("TriesInMemory isn't the default value(128), you need specify exact same TriesInMemory when prune data", 265 "triesInMemory", cacheConfig.TriesInMemory) 266 } 267 bodyCache, _ := lru.New(bodyCacheLimit) 268 bodyRLPCache, _ := lru.New(bodyCacheLimit) 269 receiptsCache, _ := lru.New(receiptsCacheLimit) 270 blockCache, _ := lru.New(blockCacheLimit) 271 txLookupCache, _ := lru.New(txLookupCacheLimit) 272 badBlockCache, _ := lru.New(maxBadBlockLimit) 273 274 futureBlocks, _ := lru.New(maxFutureBlocks) 275 diffLayerCache, _ := lru.New(diffLayerCacheLimit) 276 diffLayerRLPCache, _ := lru.New(diffLayerRLPCacheLimit) 277 278 bc := &BlockChain{ 279 chainConfig: chainConfig, 280 cacheConfig: cacheConfig, 281 db: db, 282 triegc: prque.New(nil), 283 stateCache: state.NewDatabaseWithConfigAndCache(db, &trie.Config{ 284 Cache: cacheConfig.TrieCleanLimit, 285 Journal: cacheConfig.TrieCleanJournal, 286 Preimages: cacheConfig.Preimages, 287 }), 288 triesInMemory: cacheConfig.TriesInMemory, 289 quit: make(chan struct{}), 290 shouldPreserve: shouldPreserve, 291 bodyCache: bodyCache, 292 bodyRLPCache: bodyRLPCache, 293 receiptsCache: receiptsCache, 294 blockCache: blockCache, 295 badBlockCache: badBlockCache, 296 diffLayerCache: diffLayerCache, 297 diffLayerRLPCache: diffLayerRLPCache, 298 txLookupCache: txLookupCache, 299 futureBlocks: futureBlocks, 300 engine: engine, 301 vmConfig: vmConfig, 302 diffQueue: prque.New(nil), 303 diffQueueBuffer: make(chan *types.DiffLayer), 304 blockHashToDiffLayers: make(map[common.Hash]map[common.Hash]*types.DiffLayer), 305 diffHashToBlockHash: make(map[common.Hash]common.Hash), 306 diffHashToPeers: make(map[common.Hash]map[string]struct{}), 307 diffNumToBlockHashes: make(map[uint64]map[common.Hash]struct{}), 308 diffPeersToDiffHashes: make(map[string]map[common.Hash]struct{}), 309 } 310 bc.prefetcher = NewStatePrefetcher(chainConfig, bc, engine) 311 bc.validator = NewBlockValidator(chainConfig, bc, engine) 312 bc.processor = NewStateProcessor(chainConfig, bc, engine) 313 314 var err error 315 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) 316 if err != nil { 317 return nil, err 318 } 319 bc.genesisBlock = bc.GetBlockByNumber(0) 320 if bc.genesisBlock == nil { 321 return nil, ErrNoGenesis 322 } 323 324 var nilBlock *types.Block 325 bc.currentBlock.Store(nilBlock) 326 bc.currentFastBlock.Store(nilBlock) 327 328 var nilHeader *types.Header 329 bc.highestVerifiedHeader.Store(nilHeader) 330 331 // Initialize the chain with ancient data if it isn't empty. 332 var txIndexBlock uint64 333 334 if bc.empty() { 335 rawdb.InitDatabaseFromFreezer(bc.db) 336 // If ancient database is not empty, reconstruct all missing 337 // indices in the background. 338 frozen, _ := bc.db.ItemAmountInAncient() 339 if frozen > 0 { 340 txIndexBlock, _ = bc.db.Ancients() 341 } 342 } 343 if err := bc.loadLastState(); err != nil { 344 return nil, err 345 } 346 // Make sure the state associated with the block is available 347 head := bc.CurrentBlock() 348 if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { 349 // Head state is missing, before the state recovery, find out the 350 // disk layer point of snapshot(if it's enabled). Make sure the 351 // rewound point is lower than disk layer. 352 var diskRoot common.Hash 353 if bc.cacheConfig.SnapshotLimit > 0 { 354 diskRoot = rawdb.ReadSnapshotRoot(bc.db) 355 } 356 if diskRoot != (common.Hash{}) { 357 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) 358 359 snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot) 360 if err != nil { 361 return nil, err 362 } 363 // Chain rewound, persist old snapshot number to indicate recovery procedure 364 if snapDisk != 0 { 365 rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) 366 } 367 } else { 368 log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) 369 if err := bc.SetHead(head.NumberU64()); err != nil { 370 return nil, err 371 } 372 } 373 } 374 // Ensure that a previous crash in SetHead doesn't leave extra ancients 375 if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 { 376 frozen, err = bc.db.Ancients() 377 if err != nil { 378 return nil, err 379 } 380 var ( 381 needRewind bool 382 low uint64 383 ) 384 // The head full block may be rolled back to a very low height due to 385 // blockchain repair. If the head full block is even lower than the ancient 386 // chain, truncate the ancient store. 387 fullBlock := bc.CurrentBlock() 388 if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { 389 needRewind = true 390 low = fullBlock.NumberU64() 391 } 392 // In fast sync, it may happen that ancient data has been written to the 393 // ancient store, but the LastFastBlock has not been updated, truncate the 394 // extra data here. 395 fastBlock := bc.CurrentFastBlock() 396 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 397 needRewind = true 398 if fastBlock.NumberU64() < low || low == 0 { 399 low = fastBlock.NumberU64() 400 } 401 } 402 if needRewind { 403 log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) 404 if err := bc.SetHead(low); err != nil { 405 return nil, err 406 } 407 } 408 } 409 // The first thing the node will do is reconstruct the verification data for 410 // the head block (ethash cache or clique voting snapshot). Might as well do 411 // it in advance. 412 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 413 414 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 415 for hash := range BadHashes { 416 if header := bc.GetHeaderByHash(hash); header != nil { 417 // get the canonical block corresponding to the offending header's number 418 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 419 // make sure the headerByNumber (if present) is in our current canonical chain 420 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 421 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 422 if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { 423 return nil, err 424 } 425 log.Error("Chain rewind was successful, resuming normal operation") 426 } 427 } 428 } 429 // Load any existing snapshot, regenerating it if loading failed 430 if bc.cacheConfig.SnapshotLimit > 0 { 431 // If the chain was rewound past the snapshot persistent layer (causing 432 // a recovery block number to be persisted to disk), check if we're still 433 // in recovery mode and in that case, don't invalidate the snapshot on a 434 // head mismatch. 435 var recover bool 436 437 head := bc.CurrentBlock() 438 if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() { 439 log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) 440 recover = true 441 } 442 bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, int(bc.cacheConfig.TriesInMemory), head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) 443 } 444 // do options before start any routine 445 for _, option := range options { 446 bc = option(bc) 447 } 448 // Take ownership of this particular state 449 go bc.update() 450 if txLookupLimit != nil { 451 bc.txLookupLimit = *txLookupLimit 452 453 bc.wg.Add(1) 454 go bc.maintainTxIndex(txIndexBlock) 455 } 456 // If periodic cache journal is required, spin it up. 457 if bc.cacheConfig.TrieCleanRejournal > 0 { 458 if bc.cacheConfig.TrieCleanRejournal < time.Minute { 459 log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) 460 bc.cacheConfig.TrieCleanRejournal = time.Minute 461 } 462 triedb := bc.stateCache.TrieDB() 463 bc.wg.Add(1) 464 go func() { 465 defer bc.wg.Done() 466 triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) 467 }() 468 } 469 // Need persist and prune diff layer 470 if bc.db.DiffStore() != nil { 471 go bc.trustedDiffLayerLoop() 472 } 473 go bc.untrustedDiffLayerPruneLoop() 474 if bc.pipeCommit { 475 // check current block and rewind invalid one 476 go bc.rewindInvalidHeaderBlockLoop() 477 } 478 return bc, nil 479 } 480 481 // GetVMConfig returns the block chain VM config. 482 func (bc *BlockChain) GetVMConfig() *vm.Config { 483 return &bc.vmConfig 484 } 485 486 func (bc *BlockChain) cacheReceipts(hash common.Hash, receipts types.Receipts) { 487 // TODO, This is a hot fix for the block hash of logs is `0x0000000000000000000000000000000000000000000000000000000000000000` for system tx 488 // Please check details in https://github.com/binance-chain/bsc/issues/443 489 // This is a temporary fix, the official fix should be a hard fork. 490 const possibleSystemReceipts = 3 // One slash tx, two reward distribute txs. 491 numOfReceipts := len(receipts) 492 for i := numOfReceipts - 1; i >= 0 && i >= numOfReceipts-possibleSystemReceipts; i-- { 493 for j := 0; j < len(receipts[i].Logs); j++ { 494 receipts[i].Logs[j].BlockHash = hash 495 } 496 } 497 bc.receiptsCache.Add(hash, receipts) 498 } 499 500 func (bc *BlockChain) cacheDiffLayer(diffLayer *types.DiffLayer) { 501 if bc.diffLayerCache.Len() >= diffLayerCacheLimit { 502 bc.diffLayerCache.RemoveOldest() 503 } 504 bc.diffLayerCache.Add(diffLayer.BlockHash, diffLayer) 505 if bc.db.DiffStore() != nil { 506 // push to priority queue before persisting 507 bc.diffQueueBuffer <- diffLayer 508 } 509 } 510 511 func (bc *BlockChain) cacheBlock(hash common.Hash, block *types.Block) { 512 bc.blockCache.Add(hash, block) 513 } 514 515 // empty returns an indicator whether the blockchain is empty. 516 // Note, it's a special case that we connect a non-empty ancient 517 // database with an empty node, so that we can plugin the ancient 518 // into node seamlessly. 519 func (bc *BlockChain) empty() bool { 520 genesis := bc.genesisBlock.Hash() 521 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 522 if hash != genesis { 523 return false 524 } 525 } 526 return true 527 } 528 529 // loadLastState loads the last known chain state from the database. This method 530 // assumes that the chain manager mutex is held. 531 func (bc *BlockChain) loadLastState() error { 532 // Restore the last known head block 533 head := rawdb.ReadHeadBlockHash(bc.db) 534 if head == (common.Hash{}) { 535 // Corrupt or empty database, init from scratch 536 log.Warn("Empty database, resetting chain") 537 return bc.Reset() 538 } 539 // Make sure the entire head block is available 540 currentBlock := bc.GetBlockByHash(head) 541 if currentBlock == nil { 542 // Corrupt or empty database, init from scratch 543 log.Warn("Head block missing, resetting chain", "hash", head) 544 return bc.Reset() 545 } 546 // Everything seems to be fine, set as the head block 547 bc.currentBlock.Store(currentBlock) 548 headBlockGauge.Update(int64(currentBlock.NumberU64())) 549 550 // Restore the last known head header 551 currentHeader := currentBlock.Header() 552 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 553 if header := bc.GetHeaderByHash(head); header != nil { 554 currentHeader = header 555 } 556 } 557 bc.hc.SetCurrentHeader(currentHeader) 558 559 // Restore the last known head fast block 560 bc.currentFastBlock.Store(currentBlock) 561 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 562 563 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 564 if block := bc.GetBlockByHash(head); block != nil { 565 bc.currentFastBlock.Store(block) 566 headFastBlockGauge.Update(int64(block.NumberU64())) 567 } 568 } 569 // Issue a status log for the user 570 currentFastBlock := bc.CurrentFastBlock() 571 572 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 573 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 574 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 575 576 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 577 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 578 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 579 if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { 580 log.Info("Loaded last fast-sync pivot marker", "number", *pivot) 581 } 582 return nil 583 } 584 585 // SetHead rewinds the local chain to a new head. Depending on whether the node 586 // was fast synced or full synced and in which state, the method will try to 587 // delete minimal data from disk whilst retaining chain consistency. 588 func (bc *BlockChain) SetHead(head uint64) error { 589 _, err := bc.SetHeadBeyondRoot(head, common.Hash{}) 590 return err 591 } 592 593 func (bc *BlockChain) tryRewindBadBlocks() { 594 bc.chainmu.Lock() 595 defer bc.chainmu.Unlock() 596 block := bc.CurrentBlock() 597 snaps := bc.snaps 598 // Verified and Result is false 599 if snaps != nil && snaps.Snapshot(block.Root()) != nil && 600 snaps.Snapshot(block.Root()).Verified() && !snaps.Snapshot(block.Root()).WaitAndGetVerifyRes() { 601 // Rewind by one block 602 log.Warn("current block verified failed, rewind to its parent", "height", block.NumberU64(), "hash", block.Hash()) 603 bc.futureBlocks.Remove(block.Hash()) 604 bc.badBlockCache.Add(block.Hash(), time.Now()) 605 bc.diffLayerCache.Remove(block.Hash()) 606 bc.diffLayerRLPCache.Remove(block.Hash()) 607 bc.reportBlock(block, nil, errStateRootVerificationFailed) 608 bc.setHeadBeyondRoot(block.NumberU64()-1, common.Hash{}) 609 } 610 } 611 612 // SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition 613 // that the rewind must pass the specified state root. This method is meant to be 614 // used when rewinding with snapshots enabled to ensure that we go back further than 615 // persistent disk layer. Depending on whether the node was fast synced or full, and 616 // in which state, the method will try to delete minimal data from disk whilst 617 // retaining chain consistency. 618 // 619 // The method returns the block number where the requested root cap was found. 620 func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { 621 bc.chainmu.Lock() 622 defer bc.chainmu.Unlock() 623 return bc.setHeadBeyondRoot(head, root) 624 } 625 626 func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { 627 // Track the block number of the requested root hash 628 var rootNumber uint64 // (no root == always 0) 629 630 // Retrieve the last pivot block to short circuit rollbacks beyond it and the 631 // current freezer limit to start nuking id underflown 632 pivot := rawdb.ReadLastPivotNumber(bc.db) 633 frozen, _ := bc.db.Ancients() 634 635 updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) { 636 // Rewind the block chain, ensuring we don't end up with a stateless head 637 // block. Note, depth equality is permitted to allow using SetHead as a 638 // chain reparation mechanism without deleting any data! 639 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { 640 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 641 lastBlockNum := header.Number.Uint64() 642 if newHeadBlock == nil { 643 log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) 644 newHeadBlock = bc.genesisBlock 645 } else { 646 // Block exists, keep rewinding until we find one with state, 647 // keeping rewinding until we exceed the optional threshold 648 // root hash 649 beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) 650 enoughBeyondCount := false 651 beyondCount := 0 652 for { 653 beyondCount++ 654 // If a root threshold was requested but not yet crossed, check 655 if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { 656 beyondRoot, rootNumber = true, newHeadBlock.NumberU64() 657 } 658 659 enoughBeyondCount = beyondCount > maxBeyondBlocks 660 661 if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { 662 log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 663 if pivot == nil || newHeadBlock.NumberU64() > *pivot { 664 parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) 665 if parent != nil { 666 newHeadBlock = parent 667 continue 668 } 669 log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash()) 670 newHeadBlock = bc.genesisBlock 671 } else { 672 log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) 673 newHeadBlock = bc.genesisBlock 674 } 675 } 676 if beyondRoot || (enoughBeyondCount && root != common.Hash{}) || newHeadBlock.NumberU64() == 0 { 677 if enoughBeyondCount && (root != common.Hash{}) && rootNumber == 0 { 678 for { 679 lastBlockNum++ 680 block := bc.GetBlockByNumber(lastBlockNum) 681 if block == nil { 682 break 683 } 684 if block.Root() == root { 685 rootNumber = block.NumberU64() 686 break 687 } 688 } 689 } 690 log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) 691 break 692 } 693 log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) 694 newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding 695 } 696 } 697 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 698 699 // Degrade the chain markers if they are explicitly reverted. 700 // In theory we should update all in-memory markers in the 701 // last step, however the direction of SetHead is from high 702 // to low, so it's safe the update in-memory markers directly. 703 bc.currentBlock.Store(newHeadBlock) 704 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 705 } 706 // Rewind the fast block in a simpleton way to the target head 707 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 708 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 709 // If either blocks reached nil, reset to the genesis state 710 if newHeadFastBlock == nil { 711 newHeadFastBlock = bc.genesisBlock 712 } 713 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 714 715 // Degrade the chain markers if they are explicitly reverted. 716 // In theory we should update all in-memory markers in the 717 // last step, however the direction of SetHead is from high 718 // to low, so it's safe the update in-memory markers directly. 719 bc.currentFastBlock.Store(newHeadFastBlock) 720 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 721 } 722 head := bc.CurrentBlock().NumberU64() 723 724 // If setHead underflown the freezer threshold and the block processing 725 // intent afterwards is full block importing, delete the chain segment 726 // between the stateful-block and the sethead target. 727 var wipe bool 728 if head+1 < frozen { 729 wipe = pivot == nil || head >= *pivot 730 } 731 return head, wipe // Only force wipe if full synced 732 } 733 // Rewind the header chain, deleting all block bodies until then 734 delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { 735 // Ignore the error here since light client won't hit this path 736 frozen, _ := bc.db.Ancients() 737 if num+1 <= frozen { 738 // Truncate all relative data(header, total difficulty, body, receipt 739 // and canonical hash) from ancient store. 740 if err := bc.db.TruncateAncients(num); err != nil { 741 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 742 } 743 // Remove the hash <-> number mapping from the active store. 744 rawdb.DeleteHeaderNumber(db, hash) 745 } else { 746 // Remove relative body and receipts from the active store. 747 // The header, total difficulty and canonical hash will be 748 // removed in the hc.SetHead function. 749 rawdb.DeleteBody(db, hash, num) 750 rawdb.DeleteReceipts(db, hash, num) 751 } 752 // Todo(rjl493456442) txlookup, bloombits, etc 753 } 754 // If SetHead was only called as a chain reparation method, try to skip 755 // touching the header chain altogether, unless the freezer is broken 756 if block := bc.CurrentBlock(); block.NumberU64() == head { 757 if target, force := updateFn(bc.db, block.Header()); force { 758 bc.hc.SetHead(target, updateFn, delFn) 759 } 760 } else { 761 // Rewind the chain to the requested head and keep going backwards until a 762 // block with a state is found or fast sync pivot is passed 763 log.Warn("Rewinding blockchain", "target", head) 764 bc.hc.SetHead(head, updateFn, delFn) 765 } 766 // Clear out any stale content from the caches 767 bc.bodyCache.Purge() 768 bc.bodyRLPCache.Purge() 769 bc.receiptsCache.Purge() 770 bc.blockCache.Purge() 771 bc.txLookupCache.Purge() 772 bc.futureBlocks.Purge() 773 774 return rootNumber, bc.loadLastState() 775 } 776 777 // FastSyncCommitHead sets the current head block to the one defined by the hash 778 // irrelevant what the chain contents were prior. 779 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 780 // Make sure that both the block as well at its state trie exists 781 block := bc.GetBlockByHash(hash) 782 if block == nil { 783 return fmt.Errorf("non existent block [%x..]", hash[:4]) 784 } 785 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 786 return err 787 } 788 // If all checks out, manually set the head block 789 bc.chainmu.Lock() 790 bc.currentBlock.Store(block) 791 headBlockGauge.Update(int64(block.NumberU64())) 792 bc.chainmu.Unlock() 793 794 // Destroy any existing state snapshot and regenerate it in the background, 795 // also resuming the normal maintenance of any previously paused snapshot. 796 if bc.snaps != nil { 797 bc.snaps.Rebuild(block.Root()) 798 } 799 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 800 return nil 801 } 802 803 // GasLimit returns the gas limit of the current HEAD block. 804 func (bc *BlockChain) GasLimit() uint64 { 805 return bc.CurrentBlock().GasLimit() 806 } 807 808 // CurrentBlock retrieves the current head block of the canonical chain. The 809 // block is retrieved from the blockchain's internal cache. 810 func (bc *BlockChain) CurrentBlock() *types.Block { 811 return bc.currentBlock.Load().(*types.Block) 812 } 813 814 // Snapshots returns the blockchain snapshot tree. 815 func (bc *BlockChain) Snapshots() *snapshot.Tree { 816 return bc.snaps 817 } 818 819 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 820 // chain. The block is retrieved from the blockchain's internal cache. 821 func (bc *BlockChain) CurrentFastBlock() *types.Block { 822 return bc.currentFastBlock.Load().(*types.Block) 823 } 824 825 // Validator returns the current validator. 826 func (bc *BlockChain) Validator() Validator { 827 return bc.validator 828 } 829 830 // Processor returns the current processor. 831 func (bc *BlockChain) Processor() Processor { 832 return bc.processor 833 } 834 835 // State returns a new mutable state based on the current HEAD block. 836 func (bc *BlockChain) State() (*state.StateDB, error) { 837 return bc.StateAt(bc.CurrentBlock().Root()) 838 } 839 840 // StateAt returns a new mutable state based on a particular point in time. 841 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 842 return state.New(root, bc.stateCache, bc.snaps) 843 } 844 845 // StateCache returns the caching database underpinning the blockchain instance. 846 func (bc *BlockChain) StateCache() state.Database { 847 return bc.stateCache 848 } 849 850 // Reset purges the entire blockchain, restoring it to its genesis state. 851 func (bc *BlockChain) Reset() error { 852 return bc.ResetWithGenesisBlock(bc.genesisBlock) 853 } 854 855 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 856 // specified genesis state. 857 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 858 // Dump the entire block chain and purge the caches 859 if err := bc.SetHead(0); err != nil { 860 return err 861 } 862 bc.chainmu.Lock() 863 defer bc.chainmu.Unlock() 864 865 // Prepare the genesis block and reinitialise the chain 866 batch := bc.db.NewBatch() 867 rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) 868 rawdb.WriteBlock(batch, genesis) 869 if err := batch.Write(); err != nil { 870 log.Crit("Failed to write genesis block", "err", err) 871 } 872 bc.writeHeadBlock(genesis) 873 874 // Last update all in-memory chain markers 875 bc.genesisBlock = genesis 876 bc.currentBlock.Store(bc.genesisBlock) 877 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 878 bc.hc.SetGenesis(bc.genesisBlock.Header()) 879 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 880 bc.currentFastBlock.Store(bc.genesisBlock) 881 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 882 return nil 883 } 884 885 // Export writes the active chain to the given writer. 886 func (bc *BlockChain) Export(w io.Writer) error { 887 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 888 } 889 890 // ExportN writes a subset of the active chain to the given writer. 891 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 892 bc.chainmu.RLock() 893 defer bc.chainmu.RUnlock() 894 895 if first > last { 896 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 897 } 898 log.Info("Exporting batch of blocks", "count", last-first+1) 899 900 start, reported := time.Now(), time.Now() 901 for nr := first; nr <= last; nr++ { 902 block := bc.GetBlockByNumber(nr) 903 if block == nil { 904 return fmt.Errorf("export failed on #%d: not found", nr) 905 } 906 if err := block.EncodeRLP(w); err != nil { 907 return err 908 } 909 if time.Since(reported) >= statsReportLimit { 910 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 911 reported = time.Now() 912 } 913 } 914 return nil 915 } 916 917 // writeHeadBlock injects a new head block into the current block chain. This method 918 // assumes that the block is indeed a true head. It will also reset the head 919 // header and the head fast sync block to this very same block if they are older 920 // or if they are on a different side chain. 921 // 922 // Note, this function assumes that the `mu` mutex is held! 923 func (bc *BlockChain) writeHeadBlock(block *types.Block) { 924 // If the block is on a side chain or an unknown one, force other heads onto it too 925 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 926 927 // Add the block to the canonical chain number scheme and mark as the head 928 batch := bc.db.NewBatch() 929 rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) 930 rawdb.WriteTxLookupEntriesByBlock(batch, block) 931 rawdb.WriteHeadBlockHash(batch, block.Hash()) 932 933 // If the block is better than our head or is on a different chain, force update heads 934 if updateHeads { 935 rawdb.WriteHeadHeaderHash(batch, block.Hash()) 936 rawdb.WriteHeadFastBlockHash(batch, block.Hash()) 937 } 938 // Flush the whole batch into the disk, exit the node if failed 939 if err := batch.Write(); err != nil { 940 log.Crit("Failed to update chain indexes and markers", "err", err) 941 } 942 // Update all in-memory chain markers in the last step 943 if updateHeads { 944 bc.hc.SetCurrentHeader(block.Header()) 945 bc.currentFastBlock.Store(block) 946 headFastBlockGauge.Update(int64(block.NumberU64())) 947 } 948 bc.currentBlock.Store(block) 949 headBlockGauge.Update(int64(block.NumberU64())) 950 } 951 952 // Genesis retrieves the chain's genesis block. 953 func (bc *BlockChain) Genesis() *types.Block { 954 return bc.genesisBlock 955 } 956 957 // GetBody retrieves a block body (transactions and uncles) from the database by 958 // hash, caching it if found. 959 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 960 // Short circuit if the body's already in the cache, retrieve otherwise 961 if cached, ok := bc.bodyCache.Get(hash); ok { 962 body := cached.(*types.Body) 963 return body 964 } 965 number := bc.hc.GetBlockNumber(hash) 966 if number == nil { 967 return nil 968 } 969 body := rawdb.ReadBody(bc.db, hash, *number) 970 if body == nil { 971 return nil 972 } 973 // Cache the found body for next time and return 974 bc.bodyCache.Add(hash, body) 975 return body 976 } 977 978 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 979 // caching it if found. 980 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 981 // Short circuit if the body's already in the cache, retrieve otherwise 982 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 983 return cached.(rlp.RawValue) 984 } 985 number := bc.hc.GetBlockNumber(hash) 986 if number == nil { 987 return nil 988 } 989 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 990 if len(body) == 0 { 991 return nil 992 } 993 // Cache the found body for next time and return 994 bc.bodyRLPCache.Add(hash, body) 995 return body 996 } 997 998 // GetDiffLayerRLP retrieves a diff layer in RLP encoding from the cache or database by blockHash 999 func (bc *BlockChain) GetDiffLayerRLP(blockHash common.Hash) rlp.RawValue { 1000 // Short circuit if the diffLayer's already in the cache, retrieve otherwise 1001 if cached, ok := bc.diffLayerRLPCache.Get(blockHash); ok { 1002 return cached.(rlp.RawValue) 1003 } 1004 if cached, ok := bc.diffLayerCache.Get(blockHash); ok { 1005 diff := cached.(*types.DiffLayer) 1006 bz, err := rlp.EncodeToBytes(diff) 1007 if err != nil { 1008 return nil 1009 } 1010 bc.diffLayerRLPCache.Add(blockHash, rlp.RawValue(bz)) 1011 return bz 1012 } 1013 1014 // fallback to untrusted sources. 1015 diff := bc.GetUnTrustedDiffLayer(blockHash, "") 1016 if diff != nil { 1017 bz, err := rlp.EncodeToBytes(diff) 1018 if err != nil { 1019 return nil 1020 } 1021 // No need to cache untrusted data 1022 return bz 1023 } 1024 1025 // fallback to disk 1026 diffStore := bc.db.DiffStore() 1027 if diffStore == nil { 1028 return nil 1029 } 1030 rawData := rawdb.ReadDiffLayerRLP(diffStore, blockHash) 1031 if len(rawData) != 0 { 1032 bc.diffLayerRLPCache.Add(blockHash, rawData) 1033 } 1034 return rawData 1035 } 1036 1037 func (bc *BlockChain) GetDiffAccounts(blockHash common.Hash) ([]common.Address, error) { 1038 var ( 1039 accounts []common.Address 1040 diffLayer *types.DiffLayer 1041 ) 1042 1043 header := bc.GetHeaderByHash(blockHash) 1044 if header == nil { 1045 return nil, fmt.Errorf("no block found") 1046 } 1047 1048 if cached, ok := bc.diffLayerCache.Get(blockHash); ok { 1049 diffLayer = cached.(*types.DiffLayer) 1050 } else if diffStore := bc.db.DiffStore(); diffStore != nil { 1051 diffLayer = rawdb.ReadDiffLayer(diffStore, blockHash) 1052 } 1053 1054 if diffLayer == nil { 1055 if header.TxHash != types.EmptyRootHash { 1056 return nil, ErrDiffLayerNotFound 1057 } 1058 1059 return nil, nil 1060 } 1061 1062 for _, diffAccounts := range diffLayer.Accounts { 1063 accounts = append(accounts, diffAccounts.Account) 1064 } 1065 1066 if header.TxHash != types.EmptyRootHash && len(accounts) == 0 { 1067 return nil, fmt.Errorf("no diff account in block, maybe bad diff layer") 1068 } 1069 1070 return accounts, nil 1071 } 1072 1073 // HasBlock checks if a block is fully present in the database or not. 1074 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 1075 if bc.blockCache.Contains(hash) { 1076 return true 1077 } 1078 return rawdb.HasBody(bc.db, hash, number) 1079 } 1080 1081 // HasFastBlock checks if a fast block is fully present in the database or not. 1082 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 1083 if !bc.HasBlock(hash, number) { 1084 return false 1085 } 1086 if bc.receiptsCache.Contains(hash) { 1087 return true 1088 } 1089 return rawdb.HasReceipts(bc.db, hash, number) 1090 } 1091 1092 // HasState checks if state trie is fully present in the database or not. 1093 func (bc *BlockChain) HasState(hash common.Hash) bool { 1094 if bc.pipeCommit && bc.snaps != nil { 1095 // If parent snap is pending on verification, treat it as state exist 1096 if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() { 1097 return true 1098 } 1099 } 1100 _, err := bc.stateCache.OpenTrie(hash) 1101 return err == nil 1102 } 1103 1104 // HasBlockAndState checks if a block and associated state trie is fully present 1105 // in the database or not, caching it if present. 1106 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 1107 // Check first that the block itself is known 1108 block := bc.GetBlock(hash, number) 1109 if block == nil { 1110 return false 1111 } 1112 return bc.HasState(block.Root()) 1113 } 1114 1115 // GetBlock retrieves a block from the database by hash and number, 1116 // caching it if found. 1117 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 1118 // Short circuit if the block's already in the cache, retrieve otherwise 1119 if block, ok := bc.blockCache.Get(hash); ok { 1120 return block.(*types.Block) 1121 } 1122 block := rawdb.ReadBlock(bc.db, hash, number) 1123 if block == nil { 1124 return nil 1125 } 1126 // Cache the found block for next time and return 1127 bc.blockCache.Add(block.Hash(), block) 1128 return block 1129 } 1130 1131 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 1132 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 1133 number := bc.hc.GetBlockNumber(hash) 1134 if number == nil { 1135 return nil 1136 } 1137 return bc.GetBlock(hash, *number) 1138 } 1139 1140 // GetBlockByNumber retrieves a block from the database by number, caching it 1141 // (associated with its hash) if found. 1142 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 1143 hash := rawdb.ReadCanonicalHash(bc.db, number) 1144 if hash == (common.Hash{}) { 1145 return nil 1146 } 1147 return bc.GetBlock(hash, number) 1148 } 1149 1150 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 1151 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 1152 if receipts, ok := bc.receiptsCache.Get(hash); ok { 1153 return receipts.(types.Receipts) 1154 } 1155 number := rawdb.ReadHeaderNumber(bc.db, hash) 1156 if number == nil { 1157 return nil 1158 } 1159 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 1160 if receipts == nil { 1161 return nil 1162 } 1163 bc.receiptsCache.Add(hash, receipts) 1164 return receipts 1165 } 1166 1167 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 1168 // [deprecated by eth/62] 1169 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 1170 number := bc.hc.GetBlockNumber(hash) 1171 if number == nil { 1172 return nil 1173 } 1174 for i := 0; i < n; i++ { 1175 block := bc.GetBlock(hash, *number) 1176 if block == nil { 1177 break 1178 } 1179 blocks = append(blocks, block) 1180 hash = block.ParentHash() 1181 *number-- 1182 } 1183 return 1184 } 1185 1186 // GetUnclesInChain retrieves all the uncles from a given block backwards until 1187 // a specific distance is reached. 1188 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 1189 uncles := []*types.Header{} 1190 for i := 0; block != nil && i < length; i++ { 1191 uncles = append(uncles, block.Uncles()...) 1192 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1193 } 1194 return uncles 1195 } 1196 1197 // TrieNode retrieves a blob of data associated with a trie node 1198 // either from ephemeral in-memory cache, or from persistent storage. 1199 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 1200 return bc.stateCache.TrieDB().Node(hash) 1201 } 1202 1203 // ContractCode retrieves a blob of data associated with a contract hash 1204 // either from ephemeral in-memory cache, or from persistent storage. 1205 func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { 1206 return bc.stateCache.ContractCode(common.Hash{}, hash) 1207 } 1208 1209 // ContractCodeWithPrefix retrieves a blob of data associated with a contract 1210 // hash either from ephemeral in-memory cache, or from persistent storage. 1211 // 1212 // If the code doesn't exist in the in-memory cache, check the storage with 1213 // new code scheme. 1214 func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { 1215 type codeReader interface { 1216 ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) 1217 } 1218 return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) 1219 } 1220 1221 // Stop stops the blockchain service. If any imports are currently in progress 1222 // it will abort them using the procInterrupt. 1223 func (bc *BlockChain) Stop() { 1224 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 1225 return 1226 } 1227 // Unsubscribe all subscriptions registered from blockchain 1228 bc.scope.Close() 1229 close(bc.quit) 1230 bc.StopInsert() 1231 bc.wg.Wait() 1232 1233 // Ensure that the entirety of the state snapshot is journalled to disk. 1234 var snapBase common.Hash 1235 if bc.snaps != nil { 1236 var err error 1237 if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { 1238 log.Error("Failed to journal state snapshot", "err", err) 1239 } 1240 } 1241 // Ensure the state of a recent block is also stored to disk before exiting. 1242 // We're writing three different states to catch different restart scenarios: 1243 // - HEAD: So we don't need to reprocess any blocks in the general case 1244 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 1245 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 1246 if !bc.cacheConfig.TrieDirtyDisabled { 1247 triedb := bc.stateCache.TrieDB() 1248 1249 for _, offset := range []uint64{0, 1, bc.triesInMemory - 1} { 1250 if number := bc.CurrentBlock().NumberU64(); number > offset { 1251 recent := bc.GetBlockByNumber(number - offset) 1252 1253 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 1254 if err := triedb.Commit(recent.Root(), true, nil); err != nil { 1255 log.Error("Failed to commit recent state trie", "err", err) 1256 } 1257 } 1258 } 1259 if snapBase != (common.Hash{}) { 1260 log.Info("Writing snapshot state to disk", "root", snapBase) 1261 if err := triedb.Commit(snapBase, true, nil); err != nil { 1262 log.Error("Failed to commit recent state trie", "err", err) 1263 } 1264 } 1265 for !bc.triegc.Empty() { 1266 go triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 1267 } 1268 if size, _ := triedb.Size(); size != 0 { 1269 log.Error("Dangling trie nodes after full cleanup") 1270 } 1271 } 1272 // Ensure all live cached entries be saved into disk, so that we can skip 1273 // cache warmup when node restarts. 1274 if bc.cacheConfig.TrieCleanJournal != "" { 1275 triedb := bc.stateCache.TrieDB() 1276 triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) 1277 } 1278 log.Info("Blockchain stopped") 1279 } 1280 1281 // StopInsert interrupts all insertion methods, causing them to return 1282 // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after 1283 // calling this method. 1284 func (bc *BlockChain) StopInsert() { 1285 atomic.StoreInt32(&bc.procInterrupt, 1) 1286 } 1287 1288 // insertStopped returns true after StopInsert has been called. 1289 func (bc *BlockChain) insertStopped() bool { 1290 return atomic.LoadInt32(&bc.procInterrupt) == 1 1291 } 1292 1293 func (bc *BlockChain) procFutureBlocks() { 1294 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 1295 for _, hash := range bc.futureBlocks.Keys() { 1296 if block, exist := bc.futureBlocks.Peek(hash); exist { 1297 blocks = append(blocks, block.(*types.Block)) 1298 } 1299 } 1300 if len(blocks) > 0 { 1301 sort.Slice(blocks, func(i, j int) bool { 1302 return blocks[i].NumberU64() < blocks[j].NumberU64() 1303 }) 1304 // Insert one by one as chain insertion needs contiguous ancestry between blocks 1305 for i := range blocks { 1306 bc.InsertChain(blocks[i : i+1]) 1307 } 1308 } 1309 } 1310 1311 // WriteStatus status of write 1312 type WriteStatus byte 1313 1314 const ( 1315 NonStatTy WriteStatus = iota 1316 CanonStatTy 1317 SideStatTy 1318 ) 1319 1320 // truncateAncient rewinds the blockchain to the specified header and deletes all 1321 // data in the ancient store that exceeds the specified header. 1322 func (bc *BlockChain) truncateAncient(head uint64) error { 1323 frozen, err := bc.db.Ancients() 1324 if err != nil { 1325 return err 1326 } 1327 // Short circuit if there is no data to truncate in ancient store. 1328 if frozen <= head+1 { 1329 return nil 1330 } 1331 // Truncate all the data in the freezer beyond the specified head 1332 if err := bc.db.TruncateAncients(head + 1); err != nil { 1333 return err 1334 } 1335 // Clear out any stale content from the caches 1336 bc.hc.headerCache.Purge() 1337 bc.hc.tdCache.Purge() 1338 bc.hc.numberCache.Purge() 1339 1340 // Clear out any stale content from the caches 1341 bc.bodyCache.Purge() 1342 bc.bodyRLPCache.Purge() 1343 bc.receiptsCache.Purge() 1344 bc.blockCache.Purge() 1345 bc.txLookupCache.Purge() 1346 bc.futureBlocks.Purge() 1347 1348 log.Info("Rewind ancient data", "number", head) 1349 return nil 1350 } 1351 1352 // numberHash is just a container for a number and a hash, to represent a block 1353 type numberHash struct { 1354 number uint64 1355 hash common.Hash 1356 } 1357 1358 // InsertReceiptChain attempts to complete an already existing header chain with 1359 // transaction and receipt data. 1360 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 1361 // We don't require the chainMu here since we want to maximize the 1362 // concurrency of header insertion and receipt insertion. 1363 bc.wg.Add(1) 1364 defer bc.wg.Done() 1365 1366 var ( 1367 ancientBlocks, liveBlocks types.Blocks 1368 ancientReceipts, liveReceipts []types.Receipts 1369 ) 1370 // Do a sanity check that the provided chain is actually ordered and linked 1371 for i := 0; i < len(blockChain); i++ { 1372 if i != 0 { 1373 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 1374 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 1375 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 1376 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(), 1377 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 1378 } 1379 } 1380 if blockChain[i].NumberU64() <= ancientLimit { 1381 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 1382 } else { 1383 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 1384 } 1385 } 1386 1387 var ( 1388 stats = struct{ processed, ignored int32 }{} 1389 start = time.Now() 1390 size = 0 1391 ) 1392 // updateHead updates the head fast sync block if the inserted blocks are better 1393 // and returns an indicator whether the inserted blocks are canonical. 1394 updateHead := func(head *types.Block) bool { 1395 bc.chainmu.Lock() 1396 1397 // Rewind may have occurred, skip in that case. 1398 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 1399 currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) 1400 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 1401 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 1402 bc.currentFastBlock.Store(head) 1403 headFastBlockGauge.Update(int64(head.NumberU64())) 1404 bc.chainmu.Unlock() 1405 return true 1406 } 1407 } 1408 bc.chainmu.Unlock() 1409 return false 1410 } 1411 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 1412 // 1413 // this function only accepts canonical chain data. All side chain will be reverted 1414 // eventually. 1415 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1416 var ( 1417 previous = bc.CurrentFastBlock() 1418 batch = bc.db.NewBatch() 1419 ) 1420 // If any error occurs before updating the head or we are inserting a side chain, 1421 // all the data written this time wll be rolled back. 1422 defer func() { 1423 if previous != nil { 1424 if err := bc.truncateAncient(previous.NumberU64()); err != nil { 1425 log.Crit("Truncate ancient store failed", "err", err) 1426 } 1427 } 1428 }() 1429 var deleted []*numberHash 1430 for i, block := range blockChain { 1431 // Short circuit insertion if shutting down or processing failed 1432 if bc.insertStopped() { 1433 return 0, errInsertionInterrupted 1434 } 1435 // Short circuit insertion if it is required(used in testing only) 1436 if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) { 1437 return i, errors.New("insertion is terminated for testing purpose") 1438 } 1439 // Short circuit if the owner header is unknown 1440 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1441 return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4]) 1442 } 1443 if block.NumberU64() == 1 { 1444 // Make sure to write the genesis into the freezer 1445 if frozen, _ := bc.db.Ancients(); frozen == 0 { 1446 h := rawdb.ReadCanonicalHash(bc.db, 0) 1447 b := rawdb.ReadBlock(bc.db, h, 0) 1448 size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0)) 1449 log.Info("Wrote genesis to ancients") 1450 } 1451 } 1452 // Flush data into ancient database. 1453 size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) 1454 1455 // Write tx indices if any condition is satisfied: 1456 // * If user requires to reserve all tx indices(txlookuplimit=0) 1457 // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit) 1458 // * If block number is large enough to be regarded as a recent block 1459 // It means blocks below the ancientLimit-txlookupLimit won't be indexed. 1460 // 1461 // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with 1462 // an external ancient database, during the setup, blockchain will start 1463 // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) 1464 // range. In this case, all tx indices of newly imported blocks should be 1465 // generated. 1466 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { 1467 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1468 } else if rawdb.ReadTxIndexTail(bc.db) != nil { 1469 rawdb.WriteTxLookupEntriesByBlock(batch, block) 1470 } 1471 stats.processed++ 1472 } 1473 // Flush all tx-lookup index data. 1474 size += batch.ValueSize() 1475 if err := batch.Write(); err != nil { 1476 return 0, err 1477 } 1478 batch.Reset() 1479 1480 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1481 if err := bc.db.Sync(); err != nil { 1482 return 0, err 1483 } 1484 if !updateHead(blockChain[len(blockChain)-1]) { 1485 return 0, errors.New("side blocks can't be accepted as the ancient chain data") 1486 } 1487 previous = nil // disable rollback explicitly 1488 1489 // Wipe out canonical block data. 1490 for _, nh := range deleted { 1491 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1492 rawdb.DeleteCanonicalHash(batch, nh.number) 1493 } 1494 for _, block := range blockChain { 1495 // Always keep genesis block in active database. 1496 if block.NumberU64() != 0 { 1497 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1498 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1499 } 1500 } 1501 if err := batch.Write(); err != nil { 1502 return 0, err 1503 } 1504 batch.Reset() 1505 1506 // Wipe out side chain too. 1507 for _, nh := range deleted { 1508 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1509 rawdb.DeleteBlock(batch, hash, nh.number) 1510 } 1511 } 1512 for _, block := range blockChain { 1513 // Always keep genesis block in active database. 1514 if block.NumberU64() != 0 { 1515 for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { 1516 rawdb.DeleteBlock(batch, hash, block.NumberU64()) 1517 } 1518 } 1519 } 1520 if err := batch.Write(); err != nil { 1521 return 0, err 1522 } 1523 return 0, nil 1524 } 1525 // writeLive writes blockchain and corresponding receipt chain into active store. 1526 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1527 skipPresenceCheck := false 1528 batch := bc.db.NewBatch() 1529 for i, block := range blockChain { 1530 // Short circuit insertion if shutting down or processing failed 1531 if bc.insertStopped() { 1532 return 0, errInsertionInterrupted 1533 } 1534 // Short circuit if the owner header is unknown 1535 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1536 return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4]) 1537 } 1538 if !skipPresenceCheck { 1539 // Ignore if the entire data is already known 1540 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1541 stats.ignored++ 1542 continue 1543 } else { 1544 // If block N is not present, neither are the later blocks. 1545 // This should be true, but if we are mistaken, the shortcut 1546 // here will only cause overwriting of some existing data 1547 skipPresenceCheck = true 1548 } 1549 } 1550 // Write all the data out into the database 1551 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1552 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1553 rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed 1554 1555 // Write everything belongs to the blocks into the database. So that 1556 // we can ensure all components of body is completed(body, receipts, 1557 // tx indexes) 1558 if batch.ValueSize() >= ethdb.IdealBatchSize { 1559 if err := batch.Write(); err != nil { 1560 return 0, err 1561 } 1562 size += batch.ValueSize() 1563 batch.Reset() 1564 } 1565 stats.processed++ 1566 } 1567 // Write everything belongs to the blocks into the database. So that 1568 // we can ensure all components of body is completed(body, receipts, 1569 // tx indexes) 1570 if batch.ValueSize() > 0 { 1571 size += batch.ValueSize() 1572 if err := batch.Write(); err != nil { 1573 return 0, err 1574 } 1575 } 1576 updateHead(blockChain[len(blockChain)-1]) 1577 return 0, nil 1578 } 1579 // Write downloaded chain data and corresponding receipt chain data 1580 if len(ancientBlocks) > 0 { 1581 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1582 if err == errInsertionInterrupted { 1583 return 0, nil 1584 } 1585 return n, err 1586 } 1587 } 1588 // Write the tx index tail (block number from where we index) before write any live blocks 1589 if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 { 1590 // The tx index tail can only be one of the following two options: 1591 // * 0: all ancient blocks have been indexed 1592 // * ancient-limit: the indices of blocks before ancient-limit are ignored 1593 if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil { 1594 if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit { 1595 rawdb.WriteTxIndexTail(bc.db, 0) 1596 } else { 1597 rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit) 1598 } 1599 } 1600 } 1601 if len(liveBlocks) > 0 { 1602 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1603 if err == errInsertionInterrupted { 1604 return 0, nil 1605 } 1606 return n, err 1607 } 1608 } 1609 1610 head := blockChain[len(blockChain)-1] 1611 context := []interface{}{ 1612 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1613 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1614 "size", common.StorageSize(size), 1615 } 1616 if stats.ignored > 0 { 1617 context = append(context, []interface{}{"ignored", stats.ignored}...) 1618 } 1619 log.Info("Imported new block receipts", context...) 1620 1621 return 0, nil 1622 } 1623 1624 // SetTxLookupLimit is responsible for updating the txlookup limit to the 1625 // original one stored in db if the new mismatches with the old one. 1626 func (bc *BlockChain) SetTxLookupLimit(limit uint64) { 1627 bc.txLookupLimit = limit 1628 } 1629 1630 // TxLookupLimit retrieves the txlookup limit used by blockchain to prune 1631 // stale transaction indices. 1632 func (bc *BlockChain) TxLookupLimit() uint64 { 1633 return bc.txLookupLimit 1634 } 1635 1636 var lastWrite uint64 1637 1638 // writeBlockWithoutState writes only the block and its metadata to the database, 1639 // but does not write any state. This is used to construct competing side forks 1640 // up to the point where they exceed the canonical total difficulty. 1641 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1642 bc.wg.Add(1) 1643 defer bc.wg.Done() 1644 1645 batch := bc.db.NewBatch() 1646 rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td) 1647 rawdb.WriteBlock(batch, block) 1648 if err := batch.Write(); err != nil { 1649 log.Crit("Failed to write block into disk", "err", err) 1650 } 1651 return nil 1652 } 1653 1654 // writeKnownBlock updates the head block flag with a known block 1655 // and introduces chain reorg if necessary. 1656 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1657 bc.wg.Add(1) 1658 defer bc.wg.Done() 1659 1660 current := bc.CurrentBlock() 1661 if block.ParentHash() != current.Hash() { 1662 if err := bc.reorg(current, block); err != nil { 1663 return err 1664 } 1665 } 1666 bc.writeHeadBlock(block) 1667 return nil 1668 } 1669 1670 // WriteBlockWithState writes the block and all associated state to the database. 1671 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1672 bc.chainmu.Lock() 1673 defer bc.chainmu.Unlock() 1674 1675 return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent) 1676 } 1677 1678 // writeBlockWithState writes the block and all associated state to the database, 1679 // but is expects the chain mutex to be held. 1680 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { 1681 bc.wg.Add(1) 1682 defer bc.wg.Done() 1683 1684 // Calculate the total difficulty of the block 1685 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1686 if ptd == nil { 1687 return NonStatTy, consensus.ErrUnknownAncestor 1688 } 1689 // Make sure no inconsistent state is leaked during insertion 1690 currentBlock := bc.CurrentBlock() 1691 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1692 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1693 1694 // Irrelevant of the canonical status, write the block itself to the database. 1695 // 1696 // Note all the components of block(td, hash->number map, header, body, receipts) 1697 // should be written atomically. BlockBatch is used for containing all components. 1698 wg := sync.WaitGroup{} 1699 wg.Add(1) 1700 go func() { 1701 blockBatch := bc.db.NewBatch() 1702 rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) 1703 rawdb.WriteBlock(blockBatch, block) 1704 rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) 1705 rawdb.WritePreimages(blockBatch, state.Preimages()) 1706 if err := blockBatch.Write(); err != nil { 1707 log.Crit("Failed to write block into disk", "err", err) 1708 } 1709 wg.Done() 1710 }() 1711 1712 tryCommitTrieDB := func() error { 1713 bc.commitLock.Lock() 1714 defer bc.commitLock.Unlock() 1715 1716 triedb := bc.stateCache.TrieDB() 1717 // If we're running an archive node, always flush 1718 if bc.cacheConfig.TrieDirtyDisabled { 1719 err := triedb.Commit(block.Root(), false, nil) 1720 if err != nil { 1721 return err 1722 } 1723 } else { 1724 // Full but not archive node, do proper garbage collection 1725 triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive 1726 bc.triegc.Push(block.Root(), -int64(block.NumberU64())) 1727 1728 if current := block.NumberU64(); current > bc.triesInMemory { 1729 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1730 var ( 1731 nodes, imgs = triedb.Size() 1732 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1733 ) 1734 if nodes > limit || imgs > 4*1024*1024 { 1735 triedb.Cap(limit - ethdb.IdealBatchSize) 1736 } 1737 // Find the next state trie we need to commit 1738 chosen := current - bc.triesInMemory 1739 1740 // If we exceeded out time allowance, flush an entire trie to disk 1741 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1742 canWrite := true 1743 if posa, ok := bc.engine.(consensus.PoSA); ok { 1744 if !posa.EnoughDistance(bc, block.Header()) { 1745 canWrite = false 1746 } 1747 } 1748 if canWrite { 1749 // If the header is missing (canonical chain behind), we're reorging a low 1750 // diff sidechain. Suspend committing until this operation is completed. 1751 header := bc.GetHeaderByNumber(chosen) 1752 if header == nil { 1753 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1754 } else { 1755 // If we're exceeding limits but haven't reached a large enough memory gap, 1756 // warn the user that the system is becoming unstable. 1757 if chosen < lastWrite+bc.triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1758 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/float64(bc.triesInMemory)) 1759 } 1760 // Flush an entire trie and restart the counters 1761 triedb.Commit(header.Root, true, nil) 1762 lastWrite = chosen 1763 bc.gcproc = 0 1764 } 1765 } 1766 } 1767 // Garbage collect anything below our required write retention 1768 for !bc.triegc.Empty() { 1769 root, number := bc.triegc.Pop() 1770 if uint64(-number) > chosen { 1771 bc.triegc.Push(root, number) 1772 break 1773 } 1774 go triedb.Dereference(root.(common.Hash)) 1775 } 1776 } 1777 } 1778 return nil 1779 } 1780 1781 // Commit all cached state changes into underlying memory database. 1782 _, diffLayer, err := state.Commit(bc.tryRewindBadBlocks, tryCommitTrieDB) 1783 if err != nil { 1784 return NonStatTy, err 1785 } 1786 1787 // Ensure no empty block body 1788 if diffLayer != nil && block.Header().TxHash != types.EmptyRootHash { 1789 // Filling necessary field 1790 diffLayer.Receipts = receipts 1791 diffLayer.BlockHash = block.Hash() 1792 diffLayer.Number = block.NumberU64() 1793 bc.cacheDiffLayer(diffLayer) 1794 } 1795 1796 wg.Wait() 1797 1798 // If the total difficulty is higher than our known, add it to the canonical chain 1799 // Second clause in the if statement reduces the vulnerability to selfish mining. 1800 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1801 reorg := externTd.Cmp(localTd) > 0 1802 currentBlock = bc.CurrentBlock() 1803 if !reorg && externTd.Cmp(localTd) == 0 { 1804 // Split same-difficulty blocks by number, then preferentially select 1805 // the block generated by the local miner as the canonical block. 1806 if block.NumberU64() < currentBlock.NumberU64() || block.Time() < currentBlock.Time() { 1807 reorg = true 1808 } else if p, ok := bc.engine.(consensus.PoSA); ok && p.IsLocalBlock(currentBlock.Header()) { 1809 reorg = true 1810 } else if block.NumberU64() == currentBlock.NumberU64() { 1811 var currentPreserve, blockPreserve bool 1812 if bc.shouldPreserve != nil { 1813 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1814 } 1815 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1816 } 1817 } 1818 if reorg { 1819 // Reorganise the chain if the parent is not the head block 1820 if block.ParentHash() != currentBlock.Hash() { 1821 if err := bc.reorg(currentBlock, block); err != nil { 1822 return NonStatTy, err 1823 } 1824 } 1825 status = CanonStatTy 1826 } else { 1827 status = SideStatTy 1828 } 1829 // Set new head. 1830 if status == CanonStatTy { 1831 bc.writeHeadBlock(block) 1832 } 1833 bc.futureBlocks.Remove(block.Hash()) 1834 1835 if status == CanonStatTy { 1836 bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 1837 if len(logs) > 0 { 1838 bc.logsFeed.Send(logs) 1839 } 1840 // In theory we should fire a ChainHeadEvent when we inject 1841 // a canonical block, but sometimes we can insert a batch of 1842 // canonicial blocks. Avoid firing too much ChainHeadEvents, 1843 // we will fire an accumulated ChainHeadEvent and disable fire 1844 // event here. 1845 if emitHeadEvent { 1846 bc.chainHeadFeed.Send(ChainHeadEvent{Block: block}) 1847 } 1848 } else { 1849 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1850 } 1851 return status, nil 1852 } 1853 1854 // addFutureBlock checks if the block is within the max allowed window to get 1855 // accepted for future processing, and returns an error if the block is too far 1856 // ahead and was not added. 1857 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1858 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1859 if block.Time() > max { 1860 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1861 } 1862 bc.futureBlocks.Add(block.Hash(), block) 1863 return nil 1864 } 1865 1866 // InsertChain attempts to insert the given batch of blocks in to the canonical 1867 // chain or, otherwise, create a fork. If an error is returned it will return 1868 // the index number of the failing block as well an error describing what went 1869 // wrong. 1870 // 1871 // After insertion is done, all accumulated events will be fired. 1872 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1873 // Sanity check that we have something meaningful to import 1874 if len(chain) == 0 { 1875 return 0, nil 1876 } 1877 1878 bc.blockProcFeed.Send(true) 1879 defer bc.blockProcFeed.Send(false) 1880 1881 // Remove already known canon-blocks 1882 var ( 1883 block, prev *types.Block 1884 ) 1885 // Do a sanity check that the provided chain is actually ordered and linked 1886 for i := 1; i < len(chain); i++ { 1887 block = chain[i] 1888 prev = chain[i-1] 1889 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1890 // Chain broke ancestry, log a message (programming error) and skip insertion 1891 log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), 1892 "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) 1893 1894 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(), 1895 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1896 } 1897 } 1898 // Pre-checks passed, start the full block imports 1899 bc.wg.Add(1) 1900 bc.chainmu.Lock() 1901 n, err := bc.insertChain(chain, true) 1902 bc.chainmu.Unlock() 1903 bc.wg.Done() 1904 1905 return n, err 1906 } 1907 1908 // InsertChainWithoutSealVerification works exactly the same 1909 // except for seal verification, seal verification is omitted 1910 func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) { 1911 bc.blockProcFeed.Send(true) 1912 defer bc.blockProcFeed.Send(false) 1913 1914 // Pre-checks passed, start the full block imports 1915 bc.wg.Add(1) 1916 bc.chainmu.Lock() 1917 n, err := bc.insertChain(types.Blocks([]*types.Block{block}), false) 1918 bc.chainmu.Unlock() 1919 bc.wg.Done() 1920 1921 return n, err 1922 } 1923 1924 // insertChain is the internal implementation of InsertChain, which assumes that 1925 // 1) chains are contiguous, and 2) The chain mutex is held. 1926 // 1927 // This method is split out so that import batches that require re-injecting 1928 // historical blocks can do so without releasing the lock, which could lead to 1929 // racey behaviour. If a sidechain import is in progress, and the historic state 1930 // is imported, but then new canon-head is added before the actual sidechain 1931 // completes, then the historic state could be pruned again 1932 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) { 1933 // If the chain is terminating, don't even bother starting up 1934 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1935 return 0, nil 1936 } 1937 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1938 signer := types.MakeSigner(bc.chainConfig, chain[0].Number()) 1939 go senderCacher.recoverFromBlocks(signer, chain) 1940 1941 var ( 1942 stats = insertStats{startTime: mclock.Now()} 1943 lastCanon *types.Block 1944 ) 1945 // Fire a single chain head event if we've progressed the chain 1946 defer func() { 1947 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1948 bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon}) 1949 } 1950 }() 1951 // Start the parallel header verifier 1952 headers := make([]*types.Header, len(chain)) 1953 seals := make([]bool, len(chain)) 1954 1955 for i, block := range chain { 1956 headers[i] = block.Header() 1957 seals[i] = verifySeals 1958 } 1959 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1960 defer close(abort) 1961 1962 // Peek the error for the first block to decide the directing import logic 1963 it := newInsertIterator(chain, results, bc.validator) 1964 1965 block, err := it.next() 1966 1967 // Left-trim all the known blocks 1968 if err == ErrKnownBlock { 1969 // First block (and state) is known 1970 // 1. We did a roll-back, and should now do a re-import 1971 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1972 // from the canonical chain, which has not been verified. 1973 // Skip all known blocks that are behind us 1974 var ( 1975 current = bc.CurrentBlock() 1976 localTd = bc.GetTd(current.Hash(), current.NumberU64()) 1977 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil 1978 ) 1979 for block != nil && err == ErrKnownBlock { 1980 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1981 if localTd.Cmp(externTd) < 0 { 1982 break 1983 } 1984 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1985 stats.ignored++ 1986 1987 block, err = it.next() 1988 } 1989 // The remaining blocks are still known blocks, the only scenario here is: 1990 // During the fast sync, the pivot point is already submitted but rollback 1991 // happens. Then node resets the head full block to a lower height via `rollback` 1992 // and leaves a few known blocks in the database. 1993 // 1994 // When node runs a fast sync again, it can re-import a batch of known blocks via 1995 // `insertChain` while a part of them have higher total difficulty than current 1996 // head full block(new pivot point). 1997 for block != nil && err == ErrKnownBlock { 1998 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1999 if err := bc.writeKnownBlock(block); err != nil { 2000 return it.index, err 2001 } 2002 lastCanon = block 2003 2004 block, err = it.next() 2005 } 2006 // Falls through to the block import 2007 } 2008 switch { 2009 // First block is pruned, insert as sidechain and reorg only if TD grows enough 2010 case errors.Is(err, consensus.ErrPrunedAncestor): 2011 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 2012 return bc.insertSideChain(block, it) 2013 2014 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 2015 case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): 2016 for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { 2017 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 2018 if err := bc.addFutureBlock(block); err != nil { 2019 return it.index, err 2020 } 2021 block, err = it.next() 2022 } 2023 stats.queued += it.processed() 2024 stats.ignored += it.remaining() 2025 2026 // If there are any still remaining, mark as ignored 2027 return it.index, err 2028 2029 // Some other error occurred, abort 2030 case err != nil: 2031 bc.futureBlocks.Remove(block.Hash()) 2032 stats.ignored += len(it.chain) 2033 bc.reportBlock(block, nil, err) 2034 return it.index, err 2035 } 2036 // No validation errors for the first block (or chain prefix skipped) 2037 var activeState *state.StateDB 2038 defer func() { 2039 // The chain importer is starting and stopping trie prefetchers. If a bad 2040 // block or other error is hit however, an early return may not properly 2041 // terminate the background threads. This defer ensures that we clean up 2042 // and dangling prefetcher, without defering each and holding on live refs. 2043 if activeState != nil { 2044 activeState.StopPrefetcher() 2045 } 2046 }() 2047 2048 for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { 2049 // If the chain is terminating, stop processing blocks 2050 if bc.insertStopped() { 2051 log.Debug("Abort during block processing") 2052 break 2053 } 2054 // If the header is a banned one, straight out abort 2055 if BadHashes[block.Hash()] { 2056 bc.reportBlock(block, nil, ErrBlacklistedHash) 2057 return it.index, ErrBlacklistedHash 2058 } 2059 // If the block is known (in the middle of the chain), it's a special case for 2060 // Clique blocks where they can share state among each other, so importing an 2061 // older block might complete the state of the subsequent one. In this case, 2062 // just skip the block (we already validated it once fully (and crashed), since 2063 // its header and body was already in the database). 2064 if err == ErrKnownBlock { 2065 logger := log.Debug 2066 if bc.chainConfig.Clique == nil { 2067 logger = log.Warn 2068 } 2069 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 2070 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 2071 "root", block.Root()) 2072 2073 // Special case. Commit the empty receipt slice if we meet the known 2074 // block in the middle. It can only happen in the clique chain. Whenever 2075 // we insert blocks via `insertSideChain`, we only commit `td`, `header` 2076 // and `body` if it's non-existent. Since we don't have receipts without 2077 // reexecution, so nothing to commit. But if the sidechain will be adpoted 2078 // as the canonical chain eventually, it needs to be reexecuted for missing 2079 // state, but if it's this special case here(skip reexecution) we will lose 2080 // the empty receipt entry. 2081 if len(block.Transactions()) == 0 { 2082 rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) 2083 } else { 2084 log.Error("Please file an issue, skip known block execution without receipt", 2085 "hash", block.Hash(), "number", block.NumberU64()) 2086 } 2087 if err := bc.writeKnownBlock(block); err != nil { 2088 return it.index, err 2089 } 2090 stats.processed++ 2091 2092 // We can assume that logs are empty here, since the only way for consecutive 2093 // Clique blocks to have the same state is if there are no transactions. 2094 lastCanon = block 2095 continue 2096 } 2097 // Retrieve the parent block and it's state to execute on top 2098 start := time.Now() 2099 2100 parent := it.previous() 2101 if parent == nil { 2102 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 2103 } 2104 statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) 2105 if err != nil { 2106 return it.index, err 2107 } 2108 bc.updateHighestVerifiedHeader(block.Header()) 2109 2110 // Enable prefetching to pull in trie node paths while processing transactions 2111 statedb.StartPrefetcher("chain") 2112 var followupInterrupt uint32 2113 // For diff sync, it may fallback to full sync, so we still do prefetch 2114 if len(block.Transactions()) >= prefetchTxNumber { 2115 throwaway := statedb.Copy() 2116 go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { 2117 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 2118 }(time.Now(), block, throwaway, &followupInterrupt) 2119 } 2120 //Process block using the parent state as reference point 2121 substart := time.Now() 2122 if bc.pipeCommit { 2123 statedb.EnablePipeCommit() 2124 } 2125 statedb.SetExpectedStateRoot(block.Root()) 2126 statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 2127 atomic.StoreUint32(&followupInterrupt, 1) 2128 activeState = statedb 2129 if err != nil { 2130 bc.reportBlock(block, receipts, err) 2131 return it.index, err 2132 } 2133 // Update the metrics touched during block processing 2134 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 2135 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 2136 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 2137 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 2138 snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them 2139 snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them 2140 2141 blockExecutionTimer.Update(time.Since(substart)) 2142 2143 // Validate the state using the default validator 2144 substart = time.Now() 2145 if !statedb.IsLightProcessed() { 2146 if err := bc.validator.ValidateState(block, statedb, receipts, usedGas, bc.pipeCommit); err != nil { 2147 log.Error("validate state failed", "error", err) 2148 bc.reportBlock(block, receipts, err) 2149 return it.index, err 2150 } 2151 } 2152 bc.cacheReceipts(block.Hash(), receipts) 2153 bc.cacheBlock(block.Hash(), block) 2154 proctime := time.Since(start) 2155 2156 // Update the metrics touched during block validation 2157 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 2158 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 2159 2160 blockValidationTimer.Update(time.Since(substart)) 2161 2162 // Write the block to the chain and get the status. 2163 substart = time.Now() 2164 status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false) 2165 if err != nil { 2166 return it.index, err 2167 } 2168 // Update the metrics touched during block commit 2169 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 2170 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 2171 snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them 2172 2173 blockWriteTimer.Update(time.Since(substart)) 2174 blockInsertTimer.UpdateSince(start) 2175 2176 switch status { 2177 case CanonStatTy: 2178 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 2179 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 2180 "elapsed", common.PrettyDuration(time.Since(start)), 2181 "root", block.Root()) 2182 2183 lastCanon = block 2184 2185 // Only count canonical blocks for GC processing time 2186 bc.gcproc += proctime 2187 2188 case SideStatTy: 2189 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 2190 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 2191 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 2192 "root", block.Root()) 2193 2194 default: 2195 // This in theory is impossible, but lets be nice to our future selves and leave 2196 // a log, instead of trying to track down blocks imports that don't emit logs. 2197 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 2198 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 2199 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 2200 "root", block.Root()) 2201 } 2202 stats.processed++ 2203 stats.usedGas += usedGas 2204 2205 dirty, _ := bc.stateCache.TrieDB().Size() 2206 stats.report(chain, it.index, dirty) 2207 } 2208 // Any blocks remaining here? The only ones we care about are the future ones 2209 if block != nil && errors.Is(err, consensus.ErrFutureBlock) { 2210 if err := bc.addFutureBlock(block); err != nil { 2211 return it.index, err 2212 } 2213 block, err = it.next() 2214 2215 for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() { 2216 if err := bc.addFutureBlock(block); err != nil { 2217 return it.index, err 2218 } 2219 stats.queued++ 2220 } 2221 } 2222 stats.ignored += it.remaining() 2223 2224 return it.index, err 2225 } 2226 2227 func (bc *BlockChain) updateHighestVerifiedHeader(header *types.Header) { 2228 if header == nil || header.Number == nil { 2229 return 2230 } 2231 currentHeader := bc.highestVerifiedHeader.Load().(*types.Header) 2232 if currentHeader == nil { 2233 bc.highestVerifiedHeader.Store(types.CopyHeader(header)) 2234 return 2235 } 2236 2237 newParentTD := bc.GetTdByHash(header.ParentHash) 2238 if newParentTD == nil { 2239 newParentTD = big.NewInt(0) 2240 } 2241 oldParentTD := bc.GetTdByHash(currentHeader.ParentHash) 2242 if oldParentTD == nil { 2243 oldParentTD = big.NewInt(0) 2244 } 2245 newTD := big.NewInt(0).Add(newParentTD, header.Difficulty) 2246 oldTD := big.NewInt(0).Add(oldParentTD, currentHeader.Difficulty) 2247 2248 if newTD.Cmp(oldTD) > 0 { 2249 bc.highestVerifiedHeader.Store(types.CopyHeader(header)) 2250 return 2251 } 2252 } 2253 2254 func (bc *BlockChain) GetHighestVerifiedHeader() *types.Header { 2255 return bc.highestVerifiedHeader.Load().(*types.Header) 2256 } 2257 2258 // insertSideChain is called when an import batch hits upon a pruned ancestor 2259 // error, which happens when a sidechain with a sufficiently old fork-block is 2260 // found. 2261 // 2262 // The method writes all (header-and-body-valid) blocks to disk, then tries to 2263 // switch over to the new chain if the TD exceeded the current chain. 2264 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { 2265 var ( 2266 externTd *big.Int 2267 current = bc.CurrentBlock() 2268 ) 2269 // The first sidechain block error is already verified to be ErrPrunedAncestor. 2270 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 2271 // ones. Any other errors means that the block is invalid, and should not be written 2272 // to disk. 2273 err := consensus.ErrPrunedAncestor 2274 for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { 2275 // Check the canonical state root for that number 2276 if number := block.NumberU64(); current.NumberU64() >= number { 2277 canonical := bc.GetBlockByNumber(number) 2278 if canonical != nil && canonical.Hash() == block.Hash() { 2279 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 2280 2281 // Collect the TD of the block. Since we know it's a canon one, 2282 // we can get it directly, and not (like further below) use 2283 // the parent and then add the block on top 2284 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 2285 continue 2286 } 2287 if canonical != nil && canonical.Root() == block.Root() { 2288 // This is most likely a shadow-state attack. When a fork is imported into the 2289 // database, and it eventually reaches a block height which is not pruned, we 2290 // just found that the state already exist! This means that the sidechain block 2291 // refers to a state which already exists in our canon chain. 2292 // 2293 // If left unchecked, we would now proceed importing the blocks, without actually 2294 // having verified the state of the previous blocks. 2295 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 2296 2297 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 2298 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 2299 // mechanism. 2300 return it.index, errors.New("sidechain ghost-state attack") 2301 } 2302 } 2303 if externTd == nil { 2304 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 2305 } 2306 externTd = new(big.Int).Add(externTd, block.Difficulty()) 2307 2308 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 2309 start := time.Now() 2310 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 2311 return it.index, err 2312 } 2313 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 2314 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 2315 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 2316 "root", block.Root()) 2317 } 2318 } 2319 // At this point, we've written all sidechain blocks to database. Loop ended 2320 // either on some other error or all were processed. If there was some other 2321 // error, we can ignore the rest of those blocks. 2322 // 2323 // If the externTd was larger than our local TD, we now need to reimport the previous 2324 // blocks to regenerate the required state 2325 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 2326 if localTd.Cmp(externTd) > 0 { 2327 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 2328 return it.index, err 2329 } 2330 // Gather all the sidechain hashes (full blocks may be memory heavy) 2331 var ( 2332 hashes []common.Hash 2333 numbers []uint64 2334 ) 2335 parent := it.previous() 2336 for parent != nil && !bc.HasState(parent.Root) { 2337 hashes = append(hashes, parent.Hash()) 2338 numbers = append(numbers, parent.Number.Uint64()) 2339 2340 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 2341 } 2342 if parent == nil { 2343 return it.index, errors.New("missing parent") 2344 } 2345 // Import all the pruned blocks to make the state available 2346 var ( 2347 blocks []*types.Block 2348 memory common.StorageSize 2349 ) 2350 for i := len(hashes) - 1; i >= 0; i-- { 2351 // Append the next block to our batch 2352 block := bc.GetBlock(hashes[i], numbers[i]) 2353 2354 blocks = append(blocks, block) 2355 memory += block.Size() 2356 2357 // If memory use grew too large, import and continue. Sadly we need to discard 2358 // all raised events and logs from notifications since we're too heavy on the 2359 // memory here. 2360 if len(blocks) >= 2048 || memory > 64*1024*1024 { 2361 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 2362 if _, err := bc.insertChain(blocks, false); err != nil { 2363 return 0, err 2364 } 2365 blocks, memory = blocks[:0], 0 2366 2367 // If the chain is terminating, stop processing blocks 2368 if bc.insertStopped() { 2369 log.Debug("Abort during blocks processing") 2370 return 0, nil 2371 } 2372 } 2373 } 2374 if len(blocks) > 0 { 2375 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 2376 return bc.insertChain(blocks, false) 2377 } 2378 return 0, nil 2379 } 2380 2381 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 2382 // blocks and inserts them to be part of the new canonical chain and accumulates 2383 // potential missing transactions and post an event about them. 2384 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 2385 var ( 2386 newChain types.Blocks 2387 oldChain types.Blocks 2388 commonBlock *types.Block 2389 2390 deletedTxs types.Transactions 2391 addedTxs types.Transactions 2392 2393 deletedLogs [][]*types.Log 2394 rebirthLogs [][]*types.Log 2395 2396 // collectLogs collects the logs that were generated or removed during 2397 // the processing of the block that corresponds with the given hash. 2398 // These logs are later announced as deleted or reborn 2399 collectLogs = func(hash common.Hash, removed bool) { 2400 number := bc.hc.GetBlockNumber(hash) 2401 if number == nil { 2402 return 2403 } 2404 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 2405 2406 var logs []*types.Log 2407 for _, receipt := range receipts { 2408 for _, log := range receipt.Logs { 2409 l := *log 2410 if removed { 2411 l.Removed = true 2412 } 2413 logs = append(logs, &l) 2414 } 2415 } 2416 if len(logs) > 0 { 2417 if removed { 2418 deletedLogs = append(deletedLogs, logs) 2419 } else { 2420 rebirthLogs = append(rebirthLogs, logs) 2421 } 2422 } 2423 } 2424 // mergeLogs returns a merged log slice with specified sort order. 2425 mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { 2426 var ret []*types.Log 2427 if reverse { 2428 for i := len(logs) - 1; i >= 0; i-- { 2429 ret = append(ret, logs[i]...) 2430 } 2431 } else { 2432 for i := 0; i < len(logs); i++ { 2433 ret = append(ret, logs[i]...) 2434 } 2435 } 2436 return ret 2437 } 2438 ) 2439 // Reduce the longer chain to the same number as the shorter one 2440 if oldBlock.NumberU64() > newBlock.NumberU64() { 2441 // Old chain is longer, gather all transactions and logs as deleted ones 2442 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 2443 oldChain = append(oldChain, oldBlock) 2444 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2445 collectLogs(oldBlock.Hash(), true) 2446 } 2447 } else { 2448 // New chain is longer, stash all blocks away for subsequent insertion 2449 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 2450 newChain = append(newChain, newBlock) 2451 } 2452 } 2453 if oldBlock == nil { 2454 return fmt.Errorf("invalid old chain") 2455 } 2456 if newBlock == nil { 2457 return fmt.Errorf("invalid new chain") 2458 } 2459 // Both sides of the reorg are at the same number, reduce both until the common 2460 // ancestor is found 2461 for { 2462 // If the common ancestor was found, bail out 2463 if oldBlock.Hash() == newBlock.Hash() { 2464 commonBlock = oldBlock 2465 break 2466 } 2467 // Remove an old block as well as stash away a new block 2468 oldChain = append(oldChain, oldBlock) 2469 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 2470 collectLogs(oldBlock.Hash(), true) 2471 2472 newChain = append(newChain, newBlock) 2473 2474 // Step back with both chains 2475 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 2476 if oldBlock == nil { 2477 return fmt.Errorf("invalid old chain") 2478 } 2479 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 2480 if newBlock == nil { 2481 return fmt.Errorf("invalid new chain") 2482 } 2483 } 2484 // Ensure the user sees large reorgs 2485 if len(oldChain) > 0 && len(newChain) > 0 { 2486 logFn := log.Info 2487 msg := "Chain reorg detected" 2488 if len(oldChain) > 63 { 2489 msg = "Large chain reorg detected" 2490 logFn = log.Warn 2491 } 2492 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 2493 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 2494 blockReorgAddMeter.Mark(int64(len(newChain))) 2495 blockReorgDropMeter.Mark(int64(len(oldChain))) 2496 blockReorgMeter.Mark(1) 2497 } else { 2498 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 2499 } 2500 // Insert the new chain(except the head block(reverse order)), 2501 // taking care of the proper incremental order. 2502 for i := len(newChain) - 1; i >= 1; i-- { 2503 // Insert the block in the canonical way, re-writing history 2504 bc.writeHeadBlock(newChain[i]) 2505 2506 // Collect reborn logs due to chain reorg 2507 collectLogs(newChain[i].Hash(), false) 2508 2509 // Collect the new added transactions. 2510 addedTxs = append(addedTxs, newChain[i].Transactions()...) 2511 } 2512 // Delete useless indexes right now which includes the non-canonical 2513 // transaction indexes, canonical chain indexes which above the head. 2514 indexesBatch := bc.db.NewBatch() 2515 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 2516 rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) 2517 } 2518 // Delete any canonical number assignments above the new head 2519 number := bc.CurrentBlock().NumberU64() 2520 for i := number + 1; ; i++ { 2521 hash := rawdb.ReadCanonicalHash(bc.db, i) 2522 if hash == (common.Hash{}) { 2523 break 2524 } 2525 rawdb.DeleteCanonicalHash(indexesBatch, i) 2526 } 2527 if err := indexesBatch.Write(); err != nil { 2528 log.Crit("Failed to delete useless indexes", "err", err) 2529 } 2530 // If any logs need to be fired, do it now. In theory we could avoid creating 2531 // this goroutine if there are no events to fire, but realistcally that only 2532 // ever happens if we're reorging empty blocks, which will only happen on idle 2533 // networks where performance is not an issue either way. 2534 if len(deletedLogs) > 0 { 2535 bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) 2536 } 2537 if len(rebirthLogs) > 0 { 2538 bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) 2539 } 2540 if len(oldChain) > 0 { 2541 for i := len(oldChain) - 1; i >= 0; i-- { 2542 bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) 2543 } 2544 } 2545 return nil 2546 } 2547 2548 func (bc *BlockChain) update() { 2549 futureTimer := time.NewTicker(5 * time.Second) 2550 defer futureTimer.Stop() 2551 for { 2552 select { 2553 case <-futureTimer.C: 2554 bc.procFutureBlocks() 2555 case <-bc.quit: 2556 return 2557 } 2558 } 2559 } 2560 2561 func (bc *BlockChain) rewindInvalidHeaderBlockLoop() { 2562 recheck := time.NewTicker(rewindBadBlockInterval) 2563 defer recheck.Stop() 2564 for { 2565 select { 2566 case <-recheck.C: 2567 bc.tryRewindBadBlocks() 2568 case <-bc.quit: 2569 return 2570 } 2571 } 2572 } 2573 2574 func (bc *BlockChain) trustedDiffLayerLoop() { 2575 recheck := time.NewTicker(diffLayerFreezerRecheckInterval) 2576 bc.wg.Add(1) 2577 defer func() { 2578 bc.wg.Done() 2579 recheck.Stop() 2580 }() 2581 for { 2582 select { 2583 case diff := <-bc.diffQueueBuffer: 2584 bc.diffQueue.Push(diff, -(int64(diff.Number))) 2585 case <-bc.quit: 2586 // Persist all diffLayers when shutdown, it will introduce redundant storage, but it is acceptable. 2587 // If the client been ungracefully shutdown, it will missing all cached diff layers, it is acceptable as well. 2588 var batch ethdb.Batch 2589 for !bc.diffQueue.Empty() { 2590 diff, _ := bc.diffQueue.Pop() 2591 diffLayer := diff.(*types.DiffLayer) 2592 if batch == nil { 2593 batch = bc.db.DiffStore().NewBatch() 2594 } 2595 rawdb.WriteDiffLayer(batch, diffLayer.BlockHash, diffLayer) 2596 if batch.ValueSize() > ethdb.IdealBatchSize { 2597 if err := batch.Write(); err != nil { 2598 log.Error("Failed to write diff layer", "err", err) 2599 return 2600 } 2601 batch.Reset() 2602 } 2603 } 2604 if batch != nil { 2605 // flush data 2606 if err := batch.Write(); err != nil { 2607 log.Error("Failed to write diff layer", "err", err) 2608 return 2609 } 2610 batch.Reset() 2611 } 2612 return 2613 case <-recheck.C: 2614 currentHeight := bc.CurrentBlock().NumberU64() 2615 var batch ethdb.Batch 2616 for !bc.diffQueue.Empty() { 2617 diff, prio := bc.diffQueue.Pop() 2618 diffLayer := diff.(*types.DiffLayer) 2619 2620 // if the block not old enough 2621 if int64(currentHeight)+prio < int64(bc.triesInMemory) { 2622 bc.diffQueue.Push(diffLayer, prio) 2623 break 2624 } 2625 canonicalHash := bc.GetCanonicalHash(uint64(-prio)) 2626 // on the canonical chain 2627 if canonicalHash == diffLayer.BlockHash { 2628 if batch == nil { 2629 batch = bc.db.DiffStore().NewBatch() 2630 } 2631 rawdb.WriteDiffLayer(batch, diffLayer.BlockHash, diffLayer) 2632 staleHash := bc.GetCanonicalHash(uint64(-prio) - bc.diffLayerFreezerBlockLimit) 2633 rawdb.DeleteDiffLayer(batch, staleHash) 2634 } 2635 if batch != nil && batch.ValueSize() > ethdb.IdealBatchSize { 2636 if err := batch.Write(); err != nil { 2637 panic(fmt.Sprintf("Failed to write diff layer, error %v", err)) 2638 } 2639 batch.Reset() 2640 } 2641 } 2642 if batch != nil { 2643 if err := batch.Write(); err != nil { 2644 panic(fmt.Sprintf("Failed to write diff layer, error %v", err)) 2645 } 2646 batch.Reset() 2647 } 2648 } 2649 } 2650 } 2651 2652 func (bc *BlockChain) GetUnTrustedDiffLayer(blockHash common.Hash, pid string) *types.DiffLayer { 2653 bc.diffMux.RLock() 2654 defer bc.diffMux.RUnlock() 2655 if diffs, exist := bc.blockHashToDiffLayers[blockHash]; exist && len(diffs) != 0 { 2656 if len(diffs) == 1 { 2657 // return the only one diff layer 2658 for _, diff := range diffs { 2659 return diff 2660 } 2661 } 2662 // pick the one from exact same peer if we know where the block comes from 2663 if pid != "" { 2664 if diffHashes, exist := bc.diffPeersToDiffHashes[pid]; exist { 2665 for diff := range diffs { 2666 if _, overlap := diffHashes[diff]; overlap { 2667 return bc.blockHashToDiffLayers[blockHash][diff] 2668 } 2669 } 2670 } 2671 } 2672 // Do not find overlap, do random pick 2673 for _, diff := range diffs { 2674 return diff 2675 } 2676 } 2677 return nil 2678 } 2679 2680 func (bc *BlockChain) removeDiffLayers(diffHash common.Hash) { 2681 bc.diffMux.Lock() 2682 defer bc.diffMux.Unlock() 2683 2684 // Untrusted peers 2685 pids := bc.diffHashToPeers[diffHash] 2686 invalidDiffHashes := make(map[common.Hash]struct{}) 2687 for pid := range pids { 2688 invaliDiffHashesPeer := bc.diffPeersToDiffHashes[pid] 2689 for invaliDiffHash := range invaliDiffHashesPeer { 2690 invalidDiffHashes[invaliDiffHash] = struct{}{} 2691 } 2692 delete(bc.diffPeersToDiffHashes, pid) 2693 } 2694 for invalidDiffHash := range invalidDiffHashes { 2695 delete(bc.diffHashToPeers, invalidDiffHash) 2696 affectedBlockHash := bc.diffHashToBlockHash[invalidDiffHash] 2697 if diffs, exist := bc.blockHashToDiffLayers[affectedBlockHash]; exist { 2698 delete(diffs, invalidDiffHash) 2699 if len(diffs) == 0 { 2700 delete(bc.blockHashToDiffLayers, affectedBlockHash) 2701 } 2702 } 2703 delete(bc.diffHashToBlockHash, invalidDiffHash) 2704 } 2705 } 2706 2707 func (bc *BlockChain) untrustedDiffLayerPruneLoop() { 2708 recheck := time.NewTicker(diffLayerPruneRecheckInterval) 2709 bc.wg.Add(1) 2710 defer func() { 2711 bc.wg.Done() 2712 recheck.Stop() 2713 }() 2714 for { 2715 select { 2716 case <-bc.quit: 2717 return 2718 case <-recheck.C: 2719 bc.pruneDiffLayer() 2720 } 2721 } 2722 } 2723 2724 func (bc *BlockChain) pruneDiffLayer() { 2725 currentHeight := bc.CurrentBlock().NumberU64() 2726 bc.diffMux.Lock() 2727 defer bc.diffMux.Unlock() 2728 sortNumbers := make([]uint64, 0, len(bc.diffNumToBlockHashes)) 2729 for number := range bc.diffNumToBlockHashes { 2730 sortNumbers = append(sortNumbers, number) 2731 } 2732 sort.Slice(sortNumbers, func(i, j int) bool { 2733 return sortNumbers[i] <= sortNumbers[j] 2734 }) 2735 staleBlockHashes := make(map[common.Hash]struct{}) 2736 for _, number := range sortNumbers { 2737 if number >= currentHeight-maxDiffForkDist { 2738 break 2739 } 2740 affectedHashes := bc.diffNumToBlockHashes[number] 2741 if affectedHashes != nil { 2742 for affectedHash := range affectedHashes { 2743 staleBlockHashes[affectedHash] = struct{}{} 2744 } 2745 delete(bc.diffNumToBlockHashes, number) 2746 } 2747 } 2748 staleDiffHashes := make(map[common.Hash]struct{}) 2749 for blockHash := range staleBlockHashes { 2750 if diffHashes, exist := bc.blockHashToDiffLayers[blockHash]; exist { 2751 for diffHash := range diffHashes { 2752 staleDiffHashes[diffHash] = struct{}{} 2753 delete(bc.diffHashToBlockHash, diffHash) 2754 delete(bc.diffHashToPeers, diffHash) 2755 } 2756 } 2757 delete(bc.blockHashToDiffLayers, blockHash) 2758 } 2759 for diffHash := range staleDiffHashes { 2760 for p, diffHashes := range bc.diffPeersToDiffHashes { 2761 delete(diffHashes, diffHash) 2762 if len(diffHashes) == 0 { 2763 delete(bc.diffPeersToDiffHashes, p) 2764 } 2765 } 2766 } 2767 } 2768 2769 // Process received diff layers 2770 func (bc *BlockChain) HandleDiffLayer(diffLayer *types.DiffLayer, pid string, fulfilled bool) error { 2771 // Basic check 2772 currentHeight := bc.CurrentBlock().NumberU64() 2773 if diffLayer.Number > currentHeight && diffLayer.Number-currentHeight > maxDiffQueueDist { 2774 log.Debug("diff layers too new from current", "pid", pid) 2775 return nil 2776 } 2777 if diffLayer.Number < currentHeight && currentHeight-diffLayer.Number > maxDiffForkDist { 2778 log.Debug("diff layers too old from current", "pid", pid) 2779 return nil 2780 } 2781 2782 bc.diffMux.Lock() 2783 defer bc.diffMux.Unlock() 2784 if blockHash, exist := bc.diffHashToBlockHash[diffLayer.DiffHash]; exist && blockHash == diffLayer.BlockHash { 2785 return nil 2786 } 2787 2788 if !fulfilled && len(bc.diffPeersToDiffHashes[pid]) > maxDiffLimitForBroadcast { 2789 log.Debug("too many accumulated diffLayers", "pid", pid) 2790 return nil 2791 } 2792 2793 if len(bc.diffPeersToDiffHashes[pid]) > maxDiffLimit { 2794 log.Debug("too many accumulated diffLayers", "pid", pid) 2795 return nil 2796 } 2797 if _, exist := bc.diffPeersToDiffHashes[pid]; exist { 2798 if _, alreadyHas := bc.diffPeersToDiffHashes[pid][diffLayer.DiffHash]; alreadyHas { 2799 return nil 2800 } 2801 } else { 2802 bc.diffPeersToDiffHashes[pid] = make(map[common.Hash]struct{}) 2803 } 2804 bc.diffPeersToDiffHashes[pid][diffLayer.DiffHash] = struct{}{} 2805 if _, exist := bc.diffNumToBlockHashes[diffLayer.Number]; !exist { 2806 bc.diffNumToBlockHashes[diffLayer.Number] = make(map[common.Hash]struct{}) 2807 } 2808 bc.diffNumToBlockHashes[diffLayer.Number][diffLayer.BlockHash] = struct{}{} 2809 2810 if _, exist := bc.diffHashToPeers[diffLayer.DiffHash]; !exist { 2811 bc.diffHashToPeers[diffLayer.DiffHash] = make(map[string]struct{}) 2812 } 2813 bc.diffHashToPeers[diffLayer.DiffHash][pid] = struct{}{} 2814 2815 if _, exist := bc.blockHashToDiffLayers[diffLayer.BlockHash]; !exist { 2816 bc.blockHashToDiffLayers[diffLayer.BlockHash] = make(map[common.Hash]*types.DiffLayer) 2817 } 2818 bc.blockHashToDiffLayers[diffLayer.BlockHash][diffLayer.DiffHash] = diffLayer 2819 bc.diffHashToBlockHash[diffLayer.DiffHash] = diffLayer.BlockHash 2820 2821 return nil 2822 } 2823 2824 // maintainTxIndex is responsible for the construction and deletion of the 2825 // transaction index. 2826 // 2827 // User can use flag `txlookuplimit` to specify a "recentness" block, below 2828 // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means 2829 // all tx indices will be reserved. 2830 // 2831 // The user can adjust the txlookuplimit value for each launch after fast 2832 // sync, Geth will automatically construct the missing indices and delete 2833 // the extra indices. 2834 func (bc *BlockChain) maintainTxIndex(ancients uint64) { 2835 defer bc.wg.Done() 2836 2837 // Before starting the actual maintenance, we need to handle a special case, 2838 // where user might init Geth with an external ancient database. If so, we 2839 // need to reindex all necessary transactions before starting to process any 2840 // pruning requests. 2841 if ancients > 0 { 2842 var from = uint64(0) 2843 if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit { 2844 from = ancients - bc.txLookupLimit 2845 } 2846 rawdb.IndexTransactions(bc.db, from, ancients, bc.quit) 2847 } 2848 // indexBlocks reindexes or unindexes transactions depending on user configuration 2849 indexBlocks := func(tail *uint64, head uint64, done chan struct{}) { 2850 defer func() { done <- struct{}{} }() 2851 2852 // If the user just upgraded Geth to a new version which supports transaction 2853 // index pruning, write the new tail and remove anything older. 2854 if tail == nil { 2855 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2856 // Nothing to delete, write the tail and return 2857 rawdb.WriteTxIndexTail(bc.db, 0) 2858 } else { 2859 // Prune all stale tx indices and record the tx index tail 2860 rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit) 2861 } 2862 return 2863 } 2864 // If a previous indexing existed, make sure that we fill in any missing entries 2865 if bc.txLookupLimit == 0 || head < bc.txLookupLimit { 2866 if *tail > 0 { 2867 rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit) 2868 } 2869 return 2870 } 2871 // Update the transaction index to the new chain state 2872 if head-bc.txLookupLimit+1 < *tail { 2873 // Reindex a part of missing indices and rewind index tail to HEAD-limit 2874 rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit) 2875 } else { 2876 // Unindex a part of stale indices and forward index tail to HEAD-limit 2877 rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit) 2878 } 2879 } 2880 // Any reindexing done, start listening to chain events and moving the index window 2881 var ( 2882 done chan struct{} // Non-nil if background unindexing or reindexing routine is active. 2883 headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed 2884 ) 2885 sub := bc.SubscribeChainHeadEvent(headCh) 2886 if sub == nil { 2887 return 2888 } 2889 defer sub.Unsubscribe() 2890 2891 for { 2892 select { 2893 case head := <-headCh: 2894 if done == nil { 2895 done = make(chan struct{}) 2896 go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done) 2897 } 2898 case <-done: 2899 done = nil 2900 case <-bc.quit: 2901 if done != nil { 2902 log.Info("Waiting background transaction indexer to exit") 2903 <-done 2904 } 2905 return 2906 } 2907 } 2908 } 2909 2910 func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool { 2911 if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist { 2912 putAt := timeAt.(time.Time) 2913 if time.Since(putAt) >= badBlockCacheExpire { 2914 bc.badBlockCache.Remove(block.Hash()) 2915 return false 2916 } 2917 return true 2918 } 2919 return false 2920 } 2921 2922 // reportBlock logs a bad block error. 2923 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2924 rawdb.WriteBadBlock(bc.db, block) 2925 2926 var receiptString string 2927 for i, receipt := range receipts { 2928 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2929 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 2930 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2931 } 2932 log.Error(fmt.Sprintf(` 2933 ########## BAD BLOCK ######### 2934 Chain config: %v 2935 2936 Number: %v 2937 Hash: 0x%x 2938 %v 2939 2940 Error: %v 2941 ############################## 2942 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2943 } 2944 2945 // InsertHeaderChain attempts to insert the given header chain in to the local 2946 // chain, possibly creating a reorg. If an error is returned, it will return the 2947 // index number of the failing header as well an error describing what went wrong. 2948 // 2949 // The verify parameter can be used to fine tune whether nonce verification 2950 // should be done or not. The reason behind the optional check is because some 2951 // of the header retrieval mechanisms already need to verify nonces, as well as 2952 // because nonces can be verified sparsely, not needing to check each. 2953 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2954 start := time.Now() 2955 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2956 return i, err 2957 } 2958 2959 // Make sure only one thread manipulates the chain at once 2960 bc.chainmu.Lock() 2961 defer bc.chainmu.Unlock() 2962 2963 bc.wg.Add(1) 2964 defer bc.wg.Done() 2965 _, err := bc.hc.InsertHeaderChain(chain, start) 2966 return 0, err 2967 } 2968 2969 // CurrentHeader retrieves the current head header of the canonical chain. The 2970 // header is retrieved from the HeaderChain's internal cache. 2971 func (bc *BlockChain) CurrentHeader() *types.Header { 2972 return bc.hc.CurrentHeader() 2973 } 2974 2975 // GetTd retrieves a block's total difficulty in the canonical chain from the 2976 // database by hash and number, caching it if found. 2977 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 2978 return bc.hc.GetTd(hash, number) 2979 } 2980 2981 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 2982 // database by hash, caching it if found. 2983 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 2984 return bc.hc.GetTdByHash(hash) 2985 } 2986 2987 // GetHeader retrieves a block header from the database by hash and number, 2988 // caching it if found. 2989 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 2990 return bc.hc.GetHeader(hash, number) 2991 } 2992 2993 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 2994 // found. 2995 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 2996 return bc.hc.GetHeaderByHash(hash) 2997 } 2998 2999 // HasHeader checks if a block header is present in the database or not, caching 3000 // it if present. 3001 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 3002 return bc.hc.HasHeader(hash, number) 3003 } 3004 3005 // GetCanonicalHash returns the canonical hash for a given block number 3006 func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { 3007 return bc.hc.GetCanonicalHash(number) 3008 } 3009 3010 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 3011 // hash, fetching towards the genesis block. 3012 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 3013 return bc.hc.GetBlockHashesFromHash(hash, max) 3014 } 3015 3016 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 3017 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 3018 // number of blocks to be individually checked before we reach the canonical chain. 3019 // 3020 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 3021 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 3022 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 3023 } 3024 3025 // GetHeaderByNumber retrieves a block header from the database by number, 3026 // caching it (associated with its hash) if found. 3027 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 3028 return bc.hc.GetHeaderByNumber(number) 3029 } 3030 3031 // GetTransactionLookup retrieves the lookup associate with the given transaction 3032 // hash from the cache or database. 3033 func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { 3034 // Short circuit if the txlookup already in the cache, retrieve otherwise 3035 if lookup, exist := bc.txLookupCache.Get(hash); exist { 3036 return lookup.(*rawdb.LegacyTxLookupEntry) 3037 } 3038 tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) 3039 if tx == nil { 3040 return nil 3041 } 3042 lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex} 3043 bc.txLookupCache.Add(hash, lookup) 3044 return lookup 3045 } 3046 3047 func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory } 3048 3049 // Config retrieves the chain's fork configuration. 3050 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 3051 3052 // Engine retrieves the blockchain's consensus engine. 3053 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 3054 3055 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 3056 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 3057 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 3058 } 3059 3060 // SubscribeChainEvent registers a subscription of ChainEvent. 3061 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 3062 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 3063 } 3064 3065 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 3066 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 3067 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 3068 } 3069 3070 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 3071 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 3072 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 3073 } 3074 3075 // SubscribeLogsEvent registers a subscription of []*types.Log. 3076 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 3077 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 3078 } 3079 3080 // SubscribeBlockProcessingEvent registers a subscription of bool where true means 3081 // block processing has started while false means it has stopped. 3082 func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { 3083 return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) 3084 } 3085 3086 // Options 3087 func EnableLightProcessor(bc *BlockChain) *BlockChain { 3088 bc.processor = NewLightStateProcessor(bc.Config(), bc, bc.engine) 3089 return bc 3090 } 3091 3092 func EnablePipelineCommit(bc *BlockChain) *BlockChain { 3093 bc.pipeCommit = true 3094 return bc 3095 } 3096 3097 func EnablePersistDiff(limit uint64) BlockChainOption { 3098 return func(chain *BlockChain) *BlockChain { 3099 chain.diffLayerFreezerBlockLimit = limit 3100 return chain 3101 } 3102 }