github.com/Goplush/go-ethereum@v0.0.0-20191031044858-21506be82b68/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/Fantom-foundation/go-ethereum/common" 31 "github.com/Fantom-foundation/go-ethereum/common/mclock" 32 "github.com/Fantom-foundation/go-ethereum/common/prque" 33 "github.com/Fantom-foundation/go-ethereum/consensus" 34 "github.com/Fantom-foundation/go-ethereum/core/rawdb" 35 "github.com/Fantom-foundation/go-ethereum/core/state" 36 "github.com/Fantom-foundation/go-ethereum/core/types" 37 "github.com/Fantom-foundation/go-ethereum/core/vm" 38 "github.com/Fantom-foundation/go-ethereum/ethdb" 39 "github.com/Fantom-foundation/go-ethereum/event" 40 "github.com/Fantom-foundation/go-ethereum/log" 41 "github.com/Fantom-foundation/go-ethereum/metrics" 42 "github.com/Fantom-foundation/go-ethereum/params" 43 "github.com/Fantom-foundation/go-ethereum/rlp" 44 "github.com/Fantom-foundation/go-ethereum/trie" 45 "github.com/hashicorp/golang-lru" 46 ) 47 48 var ( 49 headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) 50 headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) 51 headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) 52 53 accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) 54 accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) 55 accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) 56 accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) 57 58 storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) 59 storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) 60 storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) 61 storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) 62 63 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 64 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 65 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 66 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 67 blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) 68 blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) 69 70 blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil) 71 blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil) 72 73 errInsertionInterrupted = errors.New("insertion is interrupted") 74 ) 75 76 const ( 77 bodyCacheLimit = 256 78 blockCacheLimit = 256 79 receiptsCacheLimit = 32 80 txLookupCacheLimit = 1024 81 maxFutureBlocks = 256 82 maxTimeFutureBlocks = 30 83 badBlockLimit = 10 84 TriesInMemory = 128 85 86 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 87 // 88 // Changelog: 89 // 90 // - Version 4 91 // The following incompatible database changes were added: 92 // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted 93 // * the `Bloom` field of receipt is deleted 94 // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted 95 // - Version 5 96 // The following incompatible database changes were added: 97 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt 98 // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the 99 // receipts' corresponding block 100 // - Version 6 101 // The following incompatible database changes were added: 102 // * Transaction lookup information stores the corresponding block number instead of block hash 103 // - Version 7 104 // The following incompatible database changes were added: 105 // * Use freezer as the ancient database to maintain all ancient data 106 BlockChainVersion uint64 = 7 107 ) 108 109 // CacheConfig contains the configuration values for the trie caching/pruning 110 // that's resident in a blockchain. 111 type CacheConfig struct { 112 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 113 TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks 114 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 115 TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) 116 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 117 } 118 119 // BlockChain represents the canonical chain given a database with a genesis 120 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 121 // 122 // Importing blocks in to the block chain happens according to the set of rules 123 // defined by the two stage Validator. Processing of blocks is done using the 124 // Processor which processes the included transaction. The validation of the state 125 // is done in the second part of the Validator. Failing results in aborting of 126 // the import. 127 // 128 // The BlockChain also helps in returning blocks from **any** chain included 129 // in the database as well as blocks that represents the canonical chain. It's 130 // important to note that GetBlock can return any block and does not need to be 131 // included in the canonical one where as GetBlockByNumber always represents the 132 // canonical chain. 133 type BlockChain struct { 134 chainConfig *params.ChainConfig // Chain & network configuration 135 cacheConfig *CacheConfig // Cache configuration for pruning 136 137 db ethdb.Database // Low level persistent database to store final content in 138 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 139 gcproc time.Duration // Accumulates canonical block processing for trie dumping 140 141 hc *HeaderChain 142 rmLogsFeed event.Feed 143 chainFeed event.Feed 144 chainSideFeed event.Feed 145 chainHeadFeed event.Feed 146 logsFeed event.Feed 147 blockProcFeed event.Feed 148 scope event.SubscriptionScope 149 genesisBlock *types.Block 150 151 chainmu sync.RWMutex // blockchain insertion lock 152 153 currentBlock atomic.Value // Current head of the block chain 154 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 155 156 stateCache state.Database // State database to reuse between imports (contains state cache) 157 bodyCache *lru.Cache // Cache for the most recent block bodies 158 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 159 receiptsCache *lru.Cache // Cache for the most recent receipts per block 160 blockCache *lru.Cache // Cache for the most recent entire blocks 161 txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. 162 futureBlocks *lru.Cache // future blocks are blocks added for later processing 163 164 quit chan struct{} // blockchain quit channel 165 running int32 // running must be called atomically 166 // procInterrupt must be atomically called 167 procInterrupt int32 // interrupt signaler for block processing 168 wg sync.WaitGroup // chain processing wait group for shutting down 169 170 engine consensus.Engine 171 validator Validator // Block and state validator interface 172 prefetcher Prefetcher // Block state prefetcher interface 173 processor Processor // Block transaction processor interface 174 vmConfig vm.Config 175 176 badBlocks *lru.Cache // Bad block cache 177 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 178 terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. 179 } 180 181 // NewBlockChain returns a fully initialised block chain using information 182 // available in the database. It initialises the default Ethereum Validator and 183 // Processor. 184 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 185 if cacheConfig == nil { 186 cacheConfig = &CacheConfig{ 187 TrieCleanLimit: 256, 188 TrieDirtyLimit: 256, 189 TrieTimeLimit: 5 * time.Minute, 190 } 191 } 192 bodyCache, _ := lru.New(bodyCacheLimit) 193 bodyRLPCache, _ := lru.New(bodyCacheLimit) 194 receiptsCache, _ := lru.New(receiptsCacheLimit) 195 blockCache, _ := lru.New(blockCacheLimit) 196 txLookupCache, _ := lru.New(txLookupCacheLimit) 197 futureBlocks, _ := lru.New(maxFutureBlocks) 198 badBlocks, _ := lru.New(badBlockLimit) 199 200 bc := &BlockChain{ 201 chainConfig: chainConfig, 202 cacheConfig: cacheConfig, 203 db: db, 204 triegc: prque.New(nil), 205 stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit), 206 quit: make(chan struct{}), 207 shouldPreserve: shouldPreserve, 208 bodyCache: bodyCache, 209 bodyRLPCache: bodyRLPCache, 210 receiptsCache: receiptsCache, 211 blockCache: blockCache, 212 txLookupCache: txLookupCache, 213 futureBlocks: futureBlocks, 214 engine: engine, 215 vmConfig: vmConfig, 216 badBlocks: badBlocks, 217 } 218 bc.validator = NewBlockValidator(chainConfig, bc, engine) 219 bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) 220 bc.processor = NewStateProcessor(chainConfig, bc, engine) 221 222 var err error 223 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 224 if err != nil { 225 return nil, err 226 } 227 bc.genesisBlock = bc.GetBlockByNumber(0) 228 if bc.genesisBlock == nil { 229 return nil, ErrNoGenesis 230 } 231 232 var nilBlock *types.Block 233 bc.currentBlock.Store(nilBlock) 234 bc.currentFastBlock.Store(nilBlock) 235 236 // Initialize the chain with ancient data if it isn't empty. 237 if bc.empty() { 238 rawdb.InitDatabaseFromFreezer(bc.db) 239 } 240 241 if err := bc.loadLastState(); err != nil { 242 return nil, err 243 } 244 // The first thing the node will do is reconstruct the verification data for 245 // the head block (ethash cache or clique voting snapshot). Might as well do 246 // it in advance. 247 bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true) 248 249 if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { 250 var ( 251 needRewind bool 252 low uint64 253 ) 254 // The head full block may be rolled back to a very low height due to 255 // blockchain repair. If the head full block is even lower than the ancient 256 // chain, truncate the ancient store. 257 fullBlock := bc.CurrentBlock() 258 if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 { 259 needRewind = true 260 low = fullBlock.NumberU64() 261 } 262 // In fast sync, it may happen that ancient data has been written to the 263 // ancient store, but the LastFastBlock has not been updated, truncate the 264 // extra data here. 265 fastBlock := bc.CurrentFastBlock() 266 if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { 267 needRewind = true 268 if fastBlock.NumberU64() < low || low == 0 { 269 low = fastBlock.NumberU64() 270 } 271 } 272 if needRewind { 273 var hashes []common.Hash 274 previous := bc.CurrentHeader().Number.Uint64() 275 for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ { 276 hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i)) 277 } 278 bc.Rollback(hashes) 279 log.Warn("Truncate ancient chain", "from", previous, "to", low) 280 } 281 } 282 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 283 for hash := range BadHashes { 284 if header := bc.GetHeaderByHash(hash); header != nil { 285 // get the canonical block corresponding to the offending header's number 286 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 287 // make sure the headerByNumber (if present) is in our current canonical chain 288 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 289 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 290 bc.SetHead(header.Number.Uint64() - 1) 291 log.Error("Chain rewind was successful, resuming normal operation") 292 } 293 } 294 } 295 // Take ownership of this particular state 296 go bc.update() 297 return bc, nil 298 } 299 300 func (bc *BlockChain) getProcInterrupt() bool { 301 return atomic.LoadInt32(&bc.procInterrupt) == 1 302 } 303 304 // GetVMConfig returns the block chain VM config. 305 func (bc *BlockChain) GetVMConfig() *vm.Config { 306 return &bc.vmConfig 307 } 308 309 // empty returns an indicator whether the blockchain is empty. 310 // Note, it's a special case that we connect a non-empty ancient 311 // database with an empty node, so that we can plugin the ancient 312 // into node seamlessly. 313 func (bc *BlockChain) empty() bool { 314 genesis := bc.genesisBlock.Hash() 315 for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { 316 if hash != genesis { 317 return false 318 } 319 } 320 return true 321 } 322 323 // loadLastState loads the last known chain state from the database. This method 324 // assumes that the chain manager mutex is held. 325 func (bc *BlockChain) loadLastState() error { 326 // Restore the last known head block 327 head := rawdb.ReadHeadBlockHash(bc.db) 328 if head == (common.Hash{}) { 329 // Corrupt or empty database, init from scratch 330 log.Warn("Empty database, resetting chain") 331 return bc.Reset() 332 } 333 // Make sure the entire head block is available 334 currentBlock := bc.GetBlockByHash(head) 335 if currentBlock == nil { 336 // Corrupt or empty database, init from scratch 337 log.Warn("Head block missing, resetting chain", "hash", head) 338 return bc.Reset() 339 } 340 // Make sure the state associated with the block is available 341 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 342 // Dangling block without a state associated, init from scratch 343 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 344 if err := bc.repair(¤tBlock); err != nil { 345 return err 346 } 347 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 348 } 349 // Everything seems to be fine, set as the head block 350 bc.currentBlock.Store(currentBlock) 351 headBlockGauge.Update(int64(currentBlock.NumberU64())) 352 353 // Restore the last known head header 354 currentHeader := currentBlock.Header() 355 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 356 if header := bc.GetHeaderByHash(head); header != nil { 357 currentHeader = header 358 } 359 } 360 bc.hc.SetCurrentHeader(currentHeader) 361 362 // Restore the last known head fast block 363 bc.currentFastBlock.Store(currentBlock) 364 headFastBlockGauge.Update(int64(currentBlock.NumberU64())) 365 366 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 367 if block := bc.GetBlockByHash(head); block != nil { 368 bc.currentFastBlock.Store(block) 369 headFastBlockGauge.Update(int64(block.NumberU64())) 370 } 371 } 372 // Issue a status log for the user 373 currentFastBlock := bc.CurrentFastBlock() 374 375 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 376 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 377 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 378 379 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 380 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 381 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 382 383 return nil 384 } 385 386 // SetHead rewinds the local chain to a new head. In the case of headers, everything 387 // above the new head will be deleted and the new one set. In the case of blocks 388 // though, the head may be further rewound if block bodies are missing (non-archive 389 // nodes after a fast sync). 390 func (bc *BlockChain) SetHead(head uint64) error { 391 log.Warn("Rewinding blockchain", "target", head) 392 393 bc.chainmu.Lock() 394 defer bc.chainmu.Unlock() 395 396 updateFn := func(db ethdb.KeyValueWriter, header *types.Header) { 397 // Rewind the block chain, ensuring we don't end up with a stateless head block 398 if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() { 399 newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 400 if newHeadBlock == nil { 401 newHeadBlock = bc.genesisBlock 402 } else { 403 if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil { 404 // Rewound state missing, rolled back to before pivot, reset to genesis 405 newHeadBlock = bc.genesisBlock 406 } 407 } 408 rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) 409 bc.currentBlock.Store(newHeadBlock) 410 headBlockGauge.Update(int64(newHeadBlock.NumberU64())) 411 } 412 413 // Rewind the fast block in a simpleton way to the target head 414 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { 415 newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) 416 // If either blocks reached nil, reset to the genesis state 417 if newHeadFastBlock == nil { 418 newHeadFastBlock = bc.genesisBlock 419 } 420 rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) 421 bc.currentFastBlock.Store(newHeadFastBlock) 422 headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) 423 } 424 } 425 426 // Rewind the header chain, deleting all block bodies until then 427 delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { 428 // Ignore the error here since light client won't hit this path 429 frozen, _ := bc.db.Ancients() 430 if num+1 <= frozen { 431 // Truncate all relative data(header, total difficulty, body, receipt 432 // and canonical hash) from ancient store. 433 if err := bc.db.TruncateAncients(num + 1); err != nil { 434 log.Crit("Failed to truncate ancient data", "number", num, "err", err) 435 } 436 437 // Remove the hash <-> number mapping from the active store. 438 rawdb.DeleteHeaderNumber(db, hash) 439 } else { 440 // Remove relative body and receipts from the active store. 441 // The header, total difficulty and canonical hash will be 442 // removed in the hc.SetHead function. 443 rawdb.DeleteBody(db, hash, num) 444 rawdb.DeleteReceipts(db, hash, num) 445 } 446 // Todo(rjl493456442) txlookup, bloombits, etc 447 } 448 bc.hc.SetHead(head, updateFn, delFn) 449 450 // Clear out any stale content from the caches 451 bc.bodyCache.Purge() 452 bc.bodyRLPCache.Purge() 453 bc.receiptsCache.Purge() 454 bc.blockCache.Purge() 455 bc.txLookupCache.Purge() 456 bc.futureBlocks.Purge() 457 458 return bc.loadLastState() 459 } 460 461 // FastSyncCommitHead sets the current head block to the one defined by the hash 462 // irrelevant what the chain contents were prior. 463 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 464 // Make sure that both the block as well at its state trie exists 465 block := bc.GetBlockByHash(hash) 466 if block == nil { 467 return fmt.Errorf("non existent block [%x…]", hash[:4]) 468 } 469 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil { 470 return err 471 } 472 // If all checks out, manually set the head block 473 bc.chainmu.Lock() 474 bc.currentBlock.Store(block) 475 headBlockGauge.Update(int64(block.NumberU64())) 476 bc.chainmu.Unlock() 477 478 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 479 return nil 480 } 481 482 // GasLimit returns the gas limit of the current HEAD block. 483 func (bc *BlockChain) GasLimit() uint64 { 484 return bc.CurrentBlock().GasLimit() 485 } 486 487 // CurrentBlock retrieves the current head block of the canonical chain. The 488 // block is retrieved from the blockchain's internal cache. 489 func (bc *BlockChain) CurrentBlock() *types.Block { 490 return bc.currentBlock.Load().(*types.Block) 491 } 492 493 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 494 // chain. The block is retrieved from the blockchain's internal cache. 495 func (bc *BlockChain) CurrentFastBlock() *types.Block { 496 return bc.currentFastBlock.Load().(*types.Block) 497 } 498 499 // Validator returns the current validator. 500 func (bc *BlockChain) Validator() Validator { 501 return bc.validator 502 } 503 504 // Processor returns the current processor. 505 func (bc *BlockChain) Processor() Processor { 506 return bc.processor 507 } 508 509 // State returns a new mutable state based on the current HEAD block. 510 func (bc *BlockChain) State() (*state.StateDB, error) { 511 return bc.StateAt(bc.CurrentBlock().Root()) 512 } 513 514 // StateAt returns a new mutable state based on a particular point in time. 515 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 516 return state.New(root, bc.stateCache) 517 } 518 519 // StateCache returns the caching database underpinning the blockchain instance. 520 func (bc *BlockChain) StateCache() state.Database { 521 return bc.stateCache 522 } 523 524 // Reset purges the entire blockchain, restoring it to its genesis state. 525 func (bc *BlockChain) Reset() error { 526 return bc.ResetWithGenesisBlock(bc.genesisBlock) 527 } 528 529 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 530 // specified genesis state. 531 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 532 // Dump the entire block chain and purge the caches 533 if err := bc.SetHead(0); err != nil { 534 return err 535 } 536 bc.chainmu.Lock() 537 defer bc.chainmu.Unlock() 538 539 // Prepare the genesis block and reinitialise the chain 540 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 541 log.Crit("Failed to write genesis block TD", "err", err) 542 } 543 rawdb.WriteBlock(bc.db, genesis) 544 545 bc.genesisBlock = genesis 546 bc.insert(bc.genesisBlock) 547 bc.currentBlock.Store(bc.genesisBlock) 548 headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 549 550 bc.hc.SetGenesis(bc.genesisBlock.Header()) 551 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 552 bc.currentFastBlock.Store(bc.genesisBlock) 553 headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) 554 555 return nil 556 } 557 558 // repair tries to repair the current blockchain by rolling back the current block 559 // until one with associated state is found. This is needed to fix incomplete db 560 // writes caused either by crashes/power outages, or simply non-committed tries. 561 // 562 // This method only rolls back the current block. The current header and current 563 // fast block are left intact. 564 func (bc *BlockChain) repair(head **types.Block) error { 565 for { 566 // Abort if we've rewound to a head block that does have associated state 567 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 568 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 569 return nil 570 } 571 // Otherwise rewind one block and recheck state availability there 572 block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 573 if block == nil { 574 return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) 575 } 576 *head = block 577 } 578 } 579 580 // Export writes the active chain to the given writer. 581 func (bc *BlockChain) Export(w io.Writer) error { 582 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 583 } 584 585 // ExportN writes a subset of the active chain to the given writer. 586 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 587 bc.chainmu.RLock() 588 defer bc.chainmu.RUnlock() 589 590 if first > last { 591 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 592 } 593 log.Info("Exporting batch of blocks", "count", last-first+1) 594 595 start, reported := time.Now(), time.Now() 596 for nr := first; nr <= last; nr++ { 597 block := bc.GetBlockByNumber(nr) 598 if block == nil { 599 return fmt.Errorf("export failed on #%d: not found", nr) 600 } 601 if err := block.EncodeRLP(w); err != nil { 602 return err 603 } 604 if time.Since(reported) >= statsReportLimit { 605 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 606 reported = time.Now() 607 } 608 } 609 return nil 610 } 611 612 // insert injects a new head block into the current block chain. This method 613 // assumes that the block is indeed a true head. It will also reset the head 614 // header and the head fast sync block to this very same block if they are older 615 // or if they are on a different side chain. 616 // 617 // Note, this function assumes that the `mu` mutex is held! 618 func (bc *BlockChain) insert(block *types.Block) { 619 // If the block is on a side chain or an unknown one, force other heads onto it too 620 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 621 622 // Add the block to the canonical chain number scheme and mark as the head 623 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 624 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 625 626 bc.currentBlock.Store(block) 627 headBlockGauge.Update(int64(block.NumberU64())) 628 629 // If the block is better than our head or is on a different chain, force update heads 630 if updateHeads { 631 bc.hc.SetCurrentHeader(block.Header()) 632 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 633 634 bc.currentFastBlock.Store(block) 635 headFastBlockGauge.Update(int64(block.NumberU64())) 636 } 637 } 638 639 // Genesis retrieves the chain's genesis block. 640 func (bc *BlockChain) Genesis() *types.Block { 641 return bc.genesisBlock 642 } 643 644 // GetBody retrieves a block body (transactions and uncles) from the database by 645 // hash, caching it if found. 646 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 647 // Short circuit if the body's already in the cache, retrieve otherwise 648 if cached, ok := bc.bodyCache.Get(hash); ok { 649 body := cached.(*types.Body) 650 return body 651 } 652 number := bc.hc.GetBlockNumber(hash) 653 if number == nil { 654 return nil 655 } 656 body := rawdb.ReadBody(bc.db, hash, *number) 657 if body == nil { 658 return nil 659 } 660 // Cache the found body for next time and return 661 bc.bodyCache.Add(hash, body) 662 return body 663 } 664 665 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 666 // caching it if found. 667 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 668 // Short circuit if the body's already in the cache, retrieve otherwise 669 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 670 return cached.(rlp.RawValue) 671 } 672 number := bc.hc.GetBlockNumber(hash) 673 if number == nil { 674 return nil 675 } 676 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 677 if len(body) == 0 { 678 return nil 679 } 680 // Cache the found body for next time and return 681 bc.bodyRLPCache.Add(hash, body) 682 return body 683 } 684 685 // HasBlock checks if a block is fully present in the database or not. 686 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 687 if bc.blockCache.Contains(hash) { 688 return true 689 } 690 return rawdb.HasBody(bc.db, hash, number) 691 } 692 693 // HasFastBlock checks if a fast block is fully present in the database or not. 694 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 695 if !bc.HasBlock(hash, number) { 696 return false 697 } 698 if bc.receiptsCache.Contains(hash) { 699 return true 700 } 701 return rawdb.HasReceipts(bc.db, hash, number) 702 } 703 704 // HasState checks if state trie is fully present in the database or not. 705 func (bc *BlockChain) HasState(hash common.Hash) bool { 706 _, err := bc.stateCache.OpenTrie(hash) 707 return err == nil 708 } 709 710 // HasBlockAndState checks if a block and associated state trie is fully present 711 // in the database or not, caching it if present. 712 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 713 // Check first that the block itself is known 714 block := bc.GetBlock(hash, number) 715 if block == nil { 716 return false 717 } 718 return bc.HasState(block.Root()) 719 } 720 721 // GetBlock retrieves a block from the database by hash and number, 722 // caching it if found. 723 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 724 // Short circuit if the block's already in the cache, retrieve otherwise 725 if block, ok := bc.blockCache.Get(hash); ok { 726 return block.(*types.Block) 727 } 728 block := rawdb.ReadBlock(bc.db, hash, number) 729 if block == nil { 730 return nil 731 } 732 // Cache the found block for next time and return 733 bc.blockCache.Add(block.Hash(), block) 734 return block 735 } 736 737 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 738 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 739 number := bc.hc.GetBlockNumber(hash) 740 if number == nil { 741 return nil 742 } 743 return bc.GetBlock(hash, *number) 744 } 745 746 // GetBlockByNumber retrieves a block from the database by number, caching it 747 // (associated with its hash) if found. 748 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 749 hash := rawdb.ReadCanonicalHash(bc.db, number) 750 if hash == (common.Hash{}) { 751 return nil 752 } 753 return bc.GetBlock(hash, number) 754 } 755 756 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 757 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 758 if receipts, ok := bc.receiptsCache.Get(hash); ok { 759 return receipts.(types.Receipts) 760 } 761 number := rawdb.ReadHeaderNumber(bc.db, hash) 762 if number == nil { 763 return nil 764 } 765 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 766 if receipts == nil { 767 return nil 768 } 769 bc.receiptsCache.Add(hash, receipts) 770 return receipts 771 } 772 773 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 774 // [deprecated by eth/62] 775 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 776 number := bc.hc.GetBlockNumber(hash) 777 if number == nil { 778 return nil 779 } 780 for i := 0; i < n; i++ { 781 block := bc.GetBlock(hash, *number) 782 if block == nil { 783 break 784 } 785 blocks = append(blocks, block) 786 hash = block.ParentHash() 787 *number-- 788 } 789 return 790 } 791 792 // GetUnclesInChain retrieves all the uncles from a given block backwards until 793 // a specific distance is reached. 794 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 795 uncles := []*types.Header{} 796 for i := 0; block != nil && i < length; i++ { 797 uncles = append(uncles, block.Uncles()...) 798 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 799 } 800 return uncles 801 } 802 803 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 804 // either from ephemeral in-memory cache, or from persistent storage. 805 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 806 return bc.stateCache.TrieDB().Node(hash) 807 } 808 809 // Stop stops the blockchain service. If any imports are currently in progress 810 // it will abort them using the procInterrupt. 811 func (bc *BlockChain) Stop() { 812 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 813 return 814 } 815 // Unsubscribe all subscriptions registered from blockchain 816 bc.scope.Close() 817 close(bc.quit) 818 atomic.StoreInt32(&bc.procInterrupt, 1) 819 820 bc.wg.Wait() 821 822 // Ensure the state of a recent block is also stored to disk before exiting. 823 // We're writing three different states to catch different restart scenarios: 824 // - HEAD: So we don't need to reprocess any blocks in the general case 825 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 826 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 827 if !bc.cacheConfig.TrieDirtyDisabled { 828 triedb := bc.stateCache.TrieDB() 829 830 for _, offset := range []uint64{0, 1, TriesInMemory - 1} { 831 if number := bc.CurrentBlock().NumberU64(); number > offset { 832 recent := bc.GetBlockByNumber(number - offset) 833 834 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 835 if err := triedb.Commit(recent.Root(), true); err != nil { 836 log.Error("Failed to commit recent state trie", "err", err) 837 } 838 } 839 } 840 for !bc.triegc.Empty() { 841 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 842 } 843 if size, _ := triedb.Size(); size != 0 { 844 log.Error("Dangling trie nodes after full cleanup") 845 } 846 } 847 log.Info("Blockchain manager stopped") 848 } 849 850 func (bc *BlockChain) procFutureBlocks() { 851 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 852 for _, hash := range bc.futureBlocks.Keys() { 853 if block, exist := bc.futureBlocks.Peek(hash); exist { 854 blocks = append(blocks, block.(*types.Block)) 855 } 856 } 857 if len(blocks) > 0 { 858 types.BlockBy(types.Number).Sort(blocks) 859 860 // Insert one by one as chain insertion needs contiguous ancestry between blocks 861 for i := range blocks { 862 bc.InsertChain(blocks[i : i+1]) 863 } 864 } 865 } 866 867 // WriteStatus status of write 868 type WriteStatus byte 869 870 const ( 871 NonStatTy WriteStatus = iota 872 CanonStatTy 873 SideStatTy 874 ) 875 876 // Rollback is designed to remove a chain of links from the database that aren't 877 // certain enough to be valid. 878 func (bc *BlockChain) Rollback(chain []common.Hash) { 879 bc.chainmu.Lock() 880 defer bc.chainmu.Unlock() 881 882 for i := len(chain) - 1; i >= 0; i-- { 883 hash := chain[i] 884 885 currentHeader := bc.hc.CurrentHeader() 886 if currentHeader.Hash() == hash { 887 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 888 } 889 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 890 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 891 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 892 bc.currentFastBlock.Store(newFastBlock) 893 headFastBlockGauge.Update(int64(newFastBlock.NumberU64())) 894 } 895 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 896 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 897 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 898 bc.currentBlock.Store(newBlock) 899 headBlockGauge.Update(int64(newBlock.NumberU64())) 900 } 901 } 902 // Truncate ancient data which exceeds the current header. 903 // 904 // Notably, it can happen that system crashes without truncating the ancient data 905 // but the head indicator has been updated in the active store. Regarding this issue, 906 // system will self recovery by truncating the extra data during the setup phase. 907 if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil { 908 log.Crit("Truncate ancient store failed", "err", err) 909 } 910 } 911 912 // truncateAncient rewinds the blockchain to the specified header and deletes all 913 // data in the ancient store that exceeds the specified header. 914 func (bc *BlockChain) truncateAncient(head uint64) error { 915 frozen, err := bc.db.Ancients() 916 if err != nil { 917 return err 918 } 919 // Short circuit if there is no data to truncate in ancient store. 920 if frozen <= head+1 { 921 return nil 922 } 923 // Truncate all the data in the freezer beyond the specified head 924 if err := bc.db.TruncateAncients(head + 1); err != nil { 925 return err 926 } 927 // Clear out any stale content from the caches 928 bc.hc.headerCache.Purge() 929 bc.hc.tdCache.Purge() 930 bc.hc.numberCache.Purge() 931 932 // Clear out any stale content from the caches 933 bc.bodyCache.Purge() 934 bc.bodyRLPCache.Purge() 935 bc.receiptsCache.Purge() 936 bc.blockCache.Purge() 937 bc.txLookupCache.Purge() 938 bc.futureBlocks.Purge() 939 940 log.Info("Rewind ancient data", "number", head) 941 return nil 942 } 943 944 // numberHash is just a container for a number and a hash, to represent a block 945 type numberHash struct { 946 number uint64 947 hash common.Hash 948 } 949 950 // InsertReceiptChain attempts to complete an already existing header chain with 951 // transaction and receipt data. 952 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { 953 // We don't require the chainMu here since we want to maximize the 954 // concurrency of header insertion and receipt insertion. 955 bc.wg.Add(1) 956 defer bc.wg.Done() 957 958 var ( 959 ancientBlocks, liveBlocks types.Blocks 960 ancientReceipts, liveReceipts []types.Receipts 961 ) 962 // Do a sanity check that the provided chain is actually ordered and linked 963 for i := 0; i < len(blockChain); i++ { 964 if i != 0 { 965 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 966 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 967 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 968 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 969 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 970 } 971 } 972 if blockChain[i].NumberU64() <= ancientLimit { 973 ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) 974 } else { 975 liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) 976 } 977 } 978 979 var ( 980 stats = struct{ processed, ignored int32 }{} 981 start = time.Now() 982 size = 0 983 ) 984 // updateHead updates the head fast sync block if the inserted blocks are better 985 // and returns a indicator whether the inserted blocks are canonical. 986 updateHead := func(head *types.Block) bool { 987 bc.chainmu.Lock() 988 989 // Rewind may have occurred, skip in that case. 990 if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { 991 currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) 992 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 993 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 994 bc.currentFastBlock.Store(head) 995 headFastBlockGauge.Update(int64(head.NumberU64())) 996 bc.chainmu.Unlock() 997 return true 998 } 999 } 1000 bc.chainmu.Unlock() 1001 return false 1002 } 1003 // writeAncient writes blockchain and corresponding receipt chain into ancient store. 1004 // 1005 // this function only accepts canonical chain data. All side chain will be reverted 1006 // eventually. 1007 writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1008 var ( 1009 previous = bc.CurrentFastBlock() 1010 batch = bc.db.NewBatch() 1011 ) 1012 // If any error occurs before updating the head or we are inserting a side chain, 1013 // all the data written this time wll be rolled back. 1014 defer func() { 1015 if previous != nil { 1016 if err := bc.truncateAncient(previous.NumberU64()); err != nil { 1017 log.Crit("Truncate ancient store failed", "err", err) 1018 } 1019 } 1020 }() 1021 var deleted []*numberHash 1022 for i, block := range blockChain { 1023 // Short circuit insertion if shutting down or processing failed 1024 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1025 return 0, errInsertionInterrupted 1026 } 1027 // Short circuit insertion if it is required(used in testing only) 1028 if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) { 1029 return i, errors.New("insertion is terminated for testing purpose") 1030 } 1031 // Short circuit if the owner header is unknown 1032 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1033 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1034 } 1035 var ( 1036 start = time.Now() 1037 logged = time.Now() 1038 count int 1039 ) 1040 // Migrate all ancient blocks. This can happen if someone upgrades from Geth 1041 // 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the 1042 // long term. 1043 for { 1044 // We can ignore the error here since light client won't hit this code path. 1045 frozen, _ := bc.db.Ancients() 1046 if frozen >= block.NumberU64() { 1047 break 1048 } 1049 h := rawdb.ReadCanonicalHash(bc.db, frozen) 1050 b := rawdb.ReadBlock(bc.db, h, frozen) 1051 size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen)) 1052 count += 1 1053 1054 // Always keep genesis block in active database. 1055 if b.NumberU64() != 0 { 1056 deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()}) 1057 } 1058 if time.Since(logged) > 8*time.Second { 1059 log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1060 logged = time.Now() 1061 } 1062 // Don't collect too much in-memory, write it out every 100K blocks 1063 if len(deleted) > 100000 { 1064 1065 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1066 if err := bc.db.Sync(); err != nil { 1067 return 0, err 1068 } 1069 // Wipe out canonical block data. 1070 for _, nh := range deleted { 1071 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1072 rawdb.DeleteCanonicalHash(batch, nh.number) 1073 } 1074 if err := batch.Write(); err != nil { 1075 return 0, err 1076 } 1077 batch.Reset() 1078 // Wipe out side chain too. 1079 for _, nh := range deleted { 1080 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1081 rawdb.DeleteBlock(batch, hash, nh.number) 1082 } 1083 } 1084 if err := batch.Write(); err != nil { 1085 return 0, err 1086 } 1087 batch.Reset() 1088 deleted = deleted[0:] 1089 } 1090 } 1091 if count > 0 { 1092 log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 1093 } 1094 // Flush data into ancient database. 1095 size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) 1096 rawdb.WriteTxLookupEntries(batch, block) 1097 1098 stats.processed++ 1099 } 1100 // Flush all tx-lookup index data. 1101 size += batch.ValueSize() 1102 if err := batch.Write(); err != nil { 1103 return 0, err 1104 } 1105 batch.Reset() 1106 1107 // Sync the ancient store explicitly to ensure all data has been flushed to disk. 1108 if err := bc.db.Sync(); err != nil { 1109 return 0, err 1110 } 1111 if !updateHead(blockChain[len(blockChain)-1]) { 1112 return 0, errors.New("side blocks can't be accepted as the ancient chain data") 1113 } 1114 previous = nil // disable rollback explicitly 1115 1116 // Wipe out canonical block data. 1117 for _, nh := range deleted { 1118 rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number) 1119 rawdb.DeleteCanonicalHash(batch, nh.number) 1120 } 1121 for _, block := range blockChain { 1122 // Always keep genesis block in active database. 1123 if block.NumberU64() != 0 { 1124 rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) 1125 rawdb.DeleteCanonicalHash(batch, block.NumberU64()) 1126 } 1127 } 1128 if err := batch.Write(); err != nil { 1129 return 0, err 1130 } 1131 batch.Reset() 1132 1133 // Wipe out side chain too. 1134 for _, nh := range deleted { 1135 for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) { 1136 rawdb.DeleteBlock(batch, hash, nh.number) 1137 } 1138 } 1139 for _, block := range blockChain { 1140 // Always keep genesis block in active database. 1141 if block.NumberU64() != 0 { 1142 for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { 1143 rawdb.DeleteBlock(batch, hash, block.NumberU64()) 1144 } 1145 } 1146 } 1147 if err := batch.Write(); err != nil { 1148 return 0, err 1149 } 1150 return 0, nil 1151 } 1152 // writeLive writes blockchain and corresponding receipt chain into active store. 1153 writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 1154 batch := bc.db.NewBatch() 1155 for i, block := range blockChain { 1156 // Short circuit insertion if shutting down or processing failed 1157 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1158 return 0, errInsertionInterrupted 1159 } 1160 // Short circuit if the owner header is unknown 1161 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 1162 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 1163 } 1164 if bc.HasBlock(block.Hash(), block.NumberU64()) { 1165 stats.ignored++ 1166 continue 1167 } 1168 // Write all the data out into the database 1169 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 1170 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) 1171 rawdb.WriteTxLookupEntries(batch, block) 1172 1173 stats.processed++ 1174 if batch.ValueSize() >= ethdb.IdealBatchSize { 1175 if err := batch.Write(); err != nil { 1176 return 0, err 1177 } 1178 size += batch.ValueSize() 1179 batch.Reset() 1180 } 1181 } 1182 if batch.ValueSize() > 0 { 1183 size += batch.ValueSize() 1184 if err := batch.Write(); err != nil { 1185 return 0, err 1186 } 1187 } 1188 updateHead(blockChain[len(blockChain)-1]) 1189 return 0, nil 1190 } 1191 // Write downloaded chain data and corresponding receipt chain data. 1192 if len(ancientBlocks) > 0 { 1193 if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil { 1194 if err == errInsertionInterrupted { 1195 return 0, nil 1196 } 1197 return n, err 1198 } 1199 } 1200 if len(liveBlocks) > 0 { 1201 if n, err := writeLive(liveBlocks, liveReceipts); err != nil { 1202 if err == errInsertionInterrupted { 1203 return 0, nil 1204 } 1205 return n, err 1206 } 1207 } 1208 1209 head := blockChain[len(blockChain)-1] 1210 context := []interface{}{ 1211 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 1212 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 1213 "size", common.StorageSize(size), 1214 } 1215 if stats.ignored > 0 { 1216 context = append(context, []interface{}{"ignored", stats.ignored}...) 1217 } 1218 log.Info("Imported new block receipts", context...) 1219 1220 return 0, nil 1221 } 1222 1223 var lastWrite uint64 1224 1225 // writeBlockWithoutState writes only the block and its metadata to the database, 1226 // but does not write any state. This is used to construct competing side forks 1227 // up to the point where they exceed the canonical total difficulty. 1228 func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { 1229 bc.wg.Add(1) 1230 defer bc.wg.Done() 1231 1232 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 1233 return err 1234 } 1235 rawdb.WriteBlock(bc.db, block) 1236 1237 return nil 1238 } 1239 1240 // writeKnownBlock updates the head block flag with a known block 1241 // and introduces chain reorg if necessary. 1242 func (bc *BlockChain) writeKnownBlock(block *types.Block) error { 1243 bc.wg.Add(1) 1244 defer bc.wg.Done() 1245 1246 current := bc.CurrentBlock() 1247 if block.ParentHash() != current.Hash() { 1248 if err := bc.reorg(current, block); err != nil { 1249 return err 1250 } 1251 } 1252 // Write the positional metadata for transaction/receipt lookups. 1253 // Preimages here is empty, ignore it. 1254 rawdb.WriteTxLookupEntries(bc.db, block) 1255 1256 bc.insert(block) 1257 return nil 1258 } 1259 1260 // WriteBlockWithState writes the block and all associated state to the database. 1261 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 1262 bc.chainmu.Lock() 1263 defer bc.chainmu.Unlock() 1264 1265 return bc.writeBlockWithState(block, receipts, state) 1266 } 1267 1268 // writeBlockWithState writes the block and all associated state to the database, 1269 // but is expects the chain mutex to be held. 1270 func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 1271 bc.wg.Add(1) 1272 defer bc.wg.Done() 1273 1274 // Calculate the total difficulty of the block 1275 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1276 if ptd == nil { 1277 return NonStatTy, consensus.ErrUnknownAncestor 1278 } 1279 // Make sure no inconsistent state is leaked during insertion 1280 currentBlock := bc.CurrentBlock() 1281 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1282 externTd := new(big.Int).Add(block.Difficulty(), ptd) 1283 1284 // Irrelevant of the canonical status, write the block itself to the database 1285 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 1286 return NonStatTy, err 1287 } 1288 rawdb.WriteBlock(bc.db, block) 1289 1290 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 1291 if err != nil { 1292 return NonStatTy, err 1293 } 1294 triedb := bc.stateCache.TrieDB() 1295 1296 // If we're running an archive node, always flush 1297 if bc.cacheConfig.TrieDirtyDisabled { 1298 if err := triedb.Commit(root, false); err != nil { 1299 return NonStatTy, err 1300 } 1301 } else { 1302 // Full but not archive node, do proper garbage collection 1303 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 1304 bc.triegc.Push(root, -int64(block.NumberU64())) 1305 1306 if current := block.NumberU64(); current > TriesInMemory { 1307 // If we exceeded our memory allowance, flush matured singleton nodes to disk 1308 var ( 1309 nodes, imgs = triedb.Size() 1310 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1311 ) 1312 if nodes > limit || imgs > 4*1024*1024 { 1313 triedb.Cap(limit - ethdb.IdealBatchSize) 1314 } 1315 // Find the next state trie we need to commit 1316 chosen := current - TriesInMemory 1317 1318 // If we exceeded out time allowance, flush an entire trie to disk 1319 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1320 // If the header is missing (canonical chain behind), we're reorging a low 1321 // diff sidechain. Suspend committing until this operation is completed. 1322 header := bc.GetHeaderByNumber(chosen) 1323 if header == nil { 1324 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1325 } else { 1326 // If we're exceeding limits but haven't reached a large enough memory gap, 1327 // warn the user that the system is becoming unstable. 1328 if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1329 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) 1330 } 1331 // Flush an entire trie and restart the counters 1332 triedb.Commit(header.Root, true) 1333 lastWrite = chosen 1334 bc.gcproc = 0 1335 } 1336 } 1337 // Garbage collect anything below our required write retention 1338 for !bc.triegc.Empty() { 1339 root, number := bc.triegc.Pop() 1340 if uint64(-number) > chosen { 1341 bc.triegc.Push(root, number) 1342 break 1343 } 1344 triedb.Dereference(root.(common.Hash)) 1345 } 1346 } 1347 } 1348 1349 // Write other block data using a batch. 1350 batch := bc.db.NewBatch() 1351 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 1352 1353 // If the total difficulty is higher than our known, add it to the canonical chain 1354 // Second clause in the if statement reduces the vulnerability to selfish mining. 1355 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1356 reorg := externTd.Cmp(localTd) > 0 1357 currentBlock = bc.CurrentBlock() 1358 if !reorg && externTd.Cmp(localTd) == 0 { 1359 // Split same-difficulty blocks by number, then preferentially select 1360 // the block generated by the local miner as the canonical block. 1361 if block.NumberU64() < currentBlock.NumberU64() { 1362 reorg = true 1363 } else if block.NumberU64() == currentBlock.NumberU64() { 1364 var currentPreserve, blockPreserve bool 1365 if bc.shouldPreserve != nil { 1366 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1367 } 1368 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1369 } 1370 } 1371 if reorg { 1372 // Reorganise the chain if the parent is not the head block 1373 if block.ParentHash() != currentBlock.Hash() { 1374 if err := bc.reorg(currentBlock, block); err != nil { 1375 return NonStatTy, err 1376 } 1377 } 1378 // Write the positional metadata for transaction/receipt lookups and preimages 1379 rawdb.WriteTxLookupEntries(batch, block) 1380 rawdb.WritePreimages(batch, state.Preimages()) 1381 1382 status = CanonStatTy 1383 } else { 1384 status = SideStatTy 1385 } 1386 if err := batch.Write(); err != nil { 1387 return NonStatTy, err 1388 } 1389 1390 // Set new head. 1391 if status == CanonStatTy { 1392 bc.insert(block) 1393 } 1394 bc.futureBlocks.Remove(block.Hash()) 1395 return status, nil 1396 } 1397 1398 // addFutureBlock checks if the block is within the max allowed window to get 1399 // accepted for future processing, and returns an error if the block is too far 1400 // ahead and was not added. 1401 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1402 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1403 if block.Time() > max { 1404 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1405 } 1406 bc.futureBlocks.Add(block.Hash(), block) 1407 return nil 1408 } 1409 1410 // InsertChain attempts to insert the given batch of blocks in to the canonical 1411 // chain or, otherwise, create a fork. If an error is returned it will return 1412 // the index number of the failing block as well an error describing what went 1413 // wrong. 1414 // 1415 // After insertion is done, all accumulated events will be fired. 1416 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1417 // Sanity check that we have something meaningful to import 1418 if len(chain) == 0 { 1419 return 0, nil 1420 } 1421 1422 bc.blockProcFeed.Send(true) 1423 defer bc.blockProcFeed.Send(false) 1424 1425 // Remove already known canon-blocks 1426 var ( 1427 block, prev *types.Block 1428 ) 1429 // Do a sanity check that the provided chain is actually ordered and linked 1430 for i := 1; i < len(chain); i++ { 1431 block = chain[i] 1432 prev = chain[i-1] 1433 if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { 1434 // Chain broke ancestry, log a message (programming error) and skip insertion 1435 log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), 1436 "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) 1437 1438 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(), 1439 prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) 1440 } 1441 } 1442 // Pre-checks passed, start the full block imports 1443 bc.wg.Add(1) 1444 bc.chainmu.Lock() 1445 n, events, logs, err := bc.insertChain(chain, true) 1446 bc.chainmu.Unlock() 1447 bc.wg.Done() 1448 1449 bc.PostChainEvents(events, logs) 1450 return n, err 1451 } 1452 1453 // insertChain is the internal implementation of InsertChain, which assumes that 1454 // 1) chains are contiguous, and 2) The chain mutex is held. 1455 // 1456 // This method is split out so that import batches that require re-injecting 1457 // historical blocks can do so without releasing the lock, which could lead to 1458 // racey behaviour. If a sidechain import is in progress, and the historic state 1459 // is imported, but then new canon-head is added before the actual sidechain 1460 // completes, then the historic state could be pruned again 1461 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { 1462 // If the chain is terminating, don't even bother starting up 1463 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1464 return 0, nil, nil, nil 1465 } 1466 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1467 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1468 1469 // A queued approach to delivering events. This is generally 1470 // faster than direct delivery and requires much less mutex 1471 // acquiring. 1472 var ( 1473 stats = insertStats{startTime: mclock.Now()} 1474 events = make([]interface{}, 0, len(chain)) 1475 lastCanon *types.Block 1476 coalescedLogs []*types.Log 1477 ) 1478 // Start the parallel header verifier 1479 headers := make([]*types.Header, len(chain)) 1480 seals := make([]bool, len(chain)) 1481 1482 for i, block := range chain { 1483 headers[i] = block.Header() 1484 seals[i] = verifySeals 1485 } 1486 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1487 defer close(abort) 1488 1489 // Peek the error for the first block to decide the directing import logic 1490 it := newInsertIterator(chain, results, bc.validator) 1491 1492 block, err := it.next() 1493 1494 // Left-trim all the known blocks 1495 if err == ErrKnownBlock { 1496 // First block (and state) is known 1497 // 1. We did a roll-back, and should now do a re-import 1498 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1499 // from the canonical chain, which has not been verified. 1500 // Skip all known blocks that are behind us 1501 var ( 1502 current = bc.CurrentBlock() 1503 localTd = bc.GetTd(current.Hash(), current.NumberU64()) 1504 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil 1505 ) 1506 for block != nil && err == ErrKnownBlock { 1507 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1508 if localTd.Cmp(externTd) < 0 { 1509 break 1510 } 1511 log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) 1512 stats.ignored++ 1513 1514 block, err = it.next() 1515 } 1516 // The remaining blocks are still known blocks, the only scenario here is: 1517 // During the fast sync, the pivot point is already submitted but rollback 1518 // happens. Then node resets the head full block to a lower height via `rollback` 1519 // and leaves a few known blocks in the database. 1520 // 1521 // When node runs a fast sync again, it can re-import a batch of known blocks via 1522 // `insertChain` while a part of them have higher total difficulty than current 1523 // head full block(new pivot point). 1524 for block != nil && err == ErrKnownBlock { 1525 log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) 1526 if err := bc.writeKnownBlock(block); err != nil { 1527 return it.index, nil, nil, err 1528 } 1529 lastCanon = block 1530 1531 block, err = it.next() 1532 } 1533 // Falls through to the block import 1534 } 1535 switch { 1536 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1537 case err == consensus.ErrPrunedAncestor: 1538 log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) 1539 return bc.insertSideChain(block, it) 1540 1541 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1542 case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())): 1543 for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { 1544 log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash()) 1545 if err := bc.addFutureBlock(block); err != nil { 1546 return it.index, events, coalescedLogs, err 1547 } 1548 block, err = it.next() 1549 } 1550 stats.queued += it.processed() 1551 stats.ignored += it.remaining() 1552 1553 // If there are any still remaining, mark as ignored 1554 return it.index, events, coalescedLogs, err 1555 1556 // Some other error occurred, abort 1557 case err != nil: 1558 bc.futureBlocks.Remove(block.Hash()) 1559 stats.ignored += len(it.chain) 1560 bc.reportBlock(block, nil, err) 1561 return it.index, events, coalescedLogs, err 1562 } 1563 // No validation errors for the first block (or chain prefix skipped) 1564 for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { 1565 // If the chain is terminating, stop processing blocks 1566 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1567 log.Debug("Premature abort during blocks processing") 1568 break 1569 } 1570 // If the header is a banned one, straight out abort 1571 if BadHashes[block.Hash()] { 1572 bc.reportBlock(block, nil, ErrBlacklistedHash) 1573 return it.index, events, coalescedLogs, ErrBlacklistedHash 1574 } 1575 // If the block is known (in the middle of the chain), it's a special case for 1576 // Clique blocks where they can share state among each other, so importing an 1577 // older block might complete the state of the subsequent one. In this case, 1578 // just skip the block (we already validated it once fully (and crashed), since 1579 // its header and body was already in the database). 1580 if err == ErrKnownBlock { 1581 logger := log.Debug 1582 if bc.chainConfig.Clique == nil { 1583 logger = log.Warn 1584 } 1585 logger("Inserted known block", "number", block.Number(), "hash", block.Hash(), 1586 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1587 "root", block.Root()) 1588 1589 if err := bc.writeKnownBlock(block); err != nil { 1590 return it.index, nil, nil, err 1591 } 1592 stats.processed++ 1593 1594 // We can assume that logs are empty here, since the only way for consecutive 1595 // Clique blocks to have the same state is if there are no transactions. 1596 events = append(events, ChainEvent{block, block.Hash(), nil}) 1597 lastCanon = block 1598 1599 continue 1600 } 1601 // Retrieve the parent block and it's state to execute on top 1602 start := time.Now() 1603 1604 parent := it.previous() 1605 if parent == nil { 1606 parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) 1607 } 1608 statedb, err := state.New(parent.Root, bc.stateCache) 1609 if err != nil { 1610 return it.index, events, coalescedLogs, err 1611 } 1612 // If we have a followup block, run that against the current state to pre-cache 1613 // transactions and probabilistically some of the account/storage trie nodes. 1614 var followupInterrupt uint32 1615 1616 if !bc.cacheConfig.TrieCleanNoPrefetch { 1617 if followup, err := it.peek(); followup != nil && err == nil { 1618 go func(start time.Time) { 1619 throwaway, _ := state.New(parent.Root, bc.stateCache) 1620 bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) 1621 1622 blockPrefetchExecuteTimer.Update(time.Since(start)) 1623 if atomic.LoadUint32(&followupInterrupt) == 1 { 1624 blockPrefetchInterruptMeter.Mark(1) 1625 } 1626 }(time.Now()) 1627 } 1628 } 1629 // Process block using the parent state as reference point 1630 substart := time.Now() 1631 receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 1632 if err != nil { 1633 bc.reportBlock(block, receipts, err) 1634 atomic.StoreUint32(&followupInterrupt, 1) 1635 return it.index, events, coalescedLogs, err 1636 } 1637 // Update the metrics touched during block processing 1638 accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them 1639 storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them 1640 accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them 1641 storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them 1642 1643 triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation 1644 trieproc := statedb.AccountReads + statedb.AccountUpdates 1645 trieproc += statedb.StorageReads + statedb.StorageUpdates 1646 1647 blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) 1648 1649 // Validate the state using the default validator 1650 substart = time.Now() 1651 if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { 1652 bc.reportBlock(block, receipts, err) 1653 atomic.StoreUint32(&followupInterrupt, 1) 1654 return it.index, events, coalescedLogs, err 1655 } 1656 proctime := time.Since(start) 1657 1658 // Update the metrics touched during block validation 1659 accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them 1660 storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them 1661 1662 blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) 1663 1664 // Write the block to the chain and get the status. 1665 substart = time.Now() 1666 status, err := bc.writeBlockWithState(block, receipts, statedb) 1667 if err != nil { 1668 atomic.StoreUint32(&followupInterrupt, 1) 1669 return it.index, events, coalescedLogs, err 1670 } 1671 atomic.StoreUint32(&followupInterrupt, 1) 1672 1673 // Update the metrics touched during block commit 1674 accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them 1675 storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them 1676 1677 blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits) 1678 blockInsertTimer.UpdateSince(start) 1679 1680 switch status { 1681 case CanonStatTy: 1682 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1683 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1684 "elapsed", common.PrettyDuration(time.Since(start)), 1685 "root", block.Root()) 1686 1687 coalescedLogs = append(coalescedLogs, logs...) 1688 events = append(events, ChainEvent{block, block.Hash(), logs}) 1689 lastCanon = block 1690 1691 // Only count canonical blocks for GC processing time 1692 bc.gcproc += proctime 1693 1694 case SideStatTy: 1695 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1696 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1697 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1698 "root", block.Root()) 1699 events = append(events, ChainSideEvent{block}) 1700 1701 default: 1702 // This in theory is impossible, but lets be nice to our future selves and leave 1703 // a log, instead of trying to track down blocks imports that don't emit logs. 1704 log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(), 1705 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1706 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1707 "root", block.Root()) 1708 } 1709 stats.processed++ 1710 stats.usedGas += usedGas 1711 1712 dirty, _ := bc.stateCache.TrieDB().Size() 1713 stats.report(chain, it.index, dirty) 1714 } 1715 // Any blocks remaining here? The only ones we care about are the future ones 1716 if block != nil && err == consensus.ErrFutureBlock { 1717 if err := bc.addFutureBlock(block); err != nil { 1718 return it.index, events, coalescedLogs, err 1719 } 1720 block, err = it.next() 1721 1722 for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { 1723 if err := bc.addFutureBlock(block); err != nil { 1724 return it.index, events, coalescedLogs, err 1725 } 1726 stats.queued++ 1727 } 1728 } 1729 stats.ignored += it.remaining() 1730 1731 // Append a single chain head event if we've progressed the chain 1732 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1733 events = append(events, ChainHeadEvent{lastCanon}) 1734 } 1735 return it.index, events, coalescedLogs, err 1736 } 1737 1738 // insertSideChain is called when an import batch hits upon a pruned ancestor 1739 // error, which happens when a sidechain with a sufficiently old fork-block is 1740 // found. 1741 // 1742 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1743 // switch over to the new chain if the TD exceeded the current chain. 1744 func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) { 1745 var ( 1746 externTd *big.Int 1747 current = bc.CurrentBlock() 1748 ) 1749 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1750 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1751 // ones. Any other errors means that the block is invalid, and should not be written 1752 // to disk. 1753 err := consensus.ErrPrunedAncestor 1754 for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() { 1755 // Check the canonical state root for that number 1756 if number := block.NumberU64(); current.NumberU64() >= number { 1757 canonical := bc.GetBlockByNumber(number) 1758 if canonical != nil && canonical.Hash() == block.Hash() { 1759 // Not a sidechain block, this is a re-import of a canon block which has it's state pruned 1760 1761 // Collect the TD of the block. Since we know it's a canon one, 1762 // we can get it directly, and not (like further below) use 1763 // the parent and then add the block on top 1764 externTd = bc.GetTd(block.Hash(), block.NumberU64()) 1765 continue 1766 } 1767 if canonical != nil && canonical.Root() == block.Root() { 1768 // This is most likely a shadow-state attack. When a fork is imported into the 1769 // database, and it eventually reaches a block height which is not pruned, we 1770 // just found that the state already exist! This means that the sidechain block 1771 // refers to a state which already exists in our canon chain. 1772 // 1773 // If left unchecked, we would now proceed importing the blocks, without actually 1774 // having verified the state of the previous blocks. 1775 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1776 1777 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1778 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1779 // mechanism. 1780 return it.index, nil, nil, errors.New("sidechain ghost-state attack") 1781 } 1782 } 1783 if externTd == nil { 1784 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1785 } 1786 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1787 1788 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1789 start := time.Now() 1790 if err := bc.writeBlockWithoutState(block, externTd); err != nil { 1791 return it.index, nil, nil, err 1792 } 1793 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 1794 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1795 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1796 "root", block.Root()) 1797 } 1798 } 1799 // At this point, we've written all sidechain blocks to database. Loop ended 1800 // either on some other error or all were processed. If there was some other 1801 // error, we can ignore the rest of those blocks. 1802 // 1803 // If the externTd was larger than our local TD, we now need to reimport the previous 1804 // blocks to regenerate the required state 1805 localTd := bc.GetTd(current.Hash(), current.NumberU64()) 1806 if localTd.Cmp(externTd) > 0 { 1807 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) 1808 return it.index, nil, nil, err 1809 } 1810 // Gather all the sidechain hashes (full blocks may be memory heavy) 1811 var ( 1812 hashes []common.Hash 1813 numbers []uint64 1814 ) 1815 parent := it.previous() 1816 for parent != nil && !bc.HasState(parent.Root) { 1817 hashes = append(hashes, parent.Hash()) 1818 numbers = append(numbers, parent.Number.Uint64()) 1819 1820 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 1821 } 1822 if parent == nil { 1823 return it.index, nil, nil, errors.New("missing parent") 1824 } 1825 // Import all the pruned blocks to make the state available 1826 var ( 1827 blocks []*types.Block 1828 memory common.StorageSize 1829 ) 1830 for i := len(hashes) - 1; i >= 0; i-- { 1831 // Append the next block to our batch 1832 block := bc.GetBlock(hashes[i], numbers[i]) 1833 1834 blocks = append(blocks, block) 1835 memory += block.Size() 1836 1837 // If memory use grew too large, import and continue. Sadly we need to discard 1838 // all raised events and logs from notifications since we're too heavy on the 1839 // memory here. 1840 if len(blocks) >= 2048 || memory > 64*1024*1024 { 1841 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 1842 if _, _, _, err := bc.insertChain(blocks, false); err != nil { 1843 return 0, nil, nil, err 1844 } 1845 blocks, memory = blocks[:0], 0 1846 1847 // If the chain is terminating, stop processing blocks 1848 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1849 log.Debug("Premature abort during blocks processing") 1850 return 0, nil, nil, nil 1851 } 1852 } 1853 } 1854 if len(blocks) > 0 { 1855 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 1856 return bc.insertChain(blocks, false) 1857 } 1858 return 0, nil, nil, nil 1859 } 1860 1861 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 1862 // blocks and inserts them to be part of the new canonical chain and accumulates 1863 // potential missing transactions and post an event about them. 1864 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1865 var ( 1866 newChain types.Blocks 1867 oldChain types.Blocks 1868 commonBlock *types.Block 1869 1870 deletedTxs types.Transactions 1871 addedTxs types.Transactions 1872 1873 deletedLogs []*types.Log 1874 rebirthLogs []*types.Log 1875 1876 // collectLogs collects the logs that were generated during the 1877 // processing of the block that corresponds with the given hash. 1878 // These logs are later announced as deleted or reborn 1879 collectLogs = func(hash common.Hash, removed bool) { 1880 number := bc.hc.GetBlockNumber(hash) 1881 if number == nil { 1882 return 1883 } 1884 receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) 1885 for _, receipt := range receipts { 1886 for _, log := range receipt.Logs { 1887 l := *log 1888 if removed { 1889 l.Removed = true 1890 deletedLogs = append(deletedLogs, &l) 1891 } else { 1892 rebirthLogs = append(rebirthLogs, &l) 1893 } 1894 } 1895 } 1896 } 1897 ) 1898 // Reduce the longer chain to the same number as the shorter one 1899 if oldBlock.NumberU64() > newBlock.NumberU64() { 1900 // Old chain is longer, gather all transactions and logs as deleted ones 1901 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1902 oldChain = append(oldChain, oldBlock) 1903 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1904 collectLogs(oldBlock.Hash(), true) 1905 } 1906 } else { 1907 // New chain is longer, stash all blocks away for subsequent insertion 1908 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1909 newChain = append(newChain, newBlock) 1910 } 1911 } 1912 if oldBlock == nil { 1913 return fmt.Errorf("invalid old chain") 1914 } 1915 if newBlock == nil { 1916 return fmt.Errorf("invalid new chain") 1917 } 1918 // Both sides of the reorg are at the same number, reduce both until the common 1919 // ancestor is found 1920 for { 1921 // If the common ancestor was found, bail out 1922 if oldBlock.Hash() == newBlock.Hash() { 1923 commonBlock = oldBlock 1924 break 1925 } 1926 // Remove an old block as well as stash away a new block 1927 oldChain = append(oldChain, oldBlock) 1928 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1929 collectLogs(oldBlock.Hash(), true) 1930 1931 newChain = append(newChain, newBlock) 1932 1933 // Step back with both chains 1934 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 1935 if oldBlock == nil { 1936 return fmt.Errorf("invalid old chain") 1937 } 1938 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1939 if newBlock == nil { 1940 return fmt.Errorf("invalid new chain") 1941 } 1942 } 1943 // Ensure the user sees large reorgs 1944 if len(oldChain) > 0 && len(newChain) > 0 { 1945 logFn := log.Info 1946 msg := "Chain reorg detected" 1947 if len(oldChain) > 63 { 1948 msg = "Large chain reorg detected" 1949 logFn = log.Warn 1950 } 1951 logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1952 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1953 blockReorgAddMeter.Mark(int64(len(newChain))) 1954 blockReorgDropMeter.Mark(int64(len(oldChain))) 1955 } else { 1956 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1957 } 1958 // Insert the new chain(except the head block(reverse order)), 1959 // taking care of the proper incremental order. 1960 for i := len(newChain) - 1; i >= 1; i-- { 1961 // Insert the block in the canonical way, re-writing history 1962 bc.insert(newChain[i]) 1963 1964 // Collect reborn logs due to chain reorg 1965 collectLogs(newChain[i].Hash(), false) 1966 1967 // Write lookup entries for hash based transaction/receipt searches 1968 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1969 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1970 } 1971 // When transactions get deleted from the database, the receipts that were 1972 // created in the fork must also be deleted 1973 batch := bc.db.NewBatch() 1974 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 1975 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1976 } 1977 // Delete any canonical number assignments above the new head 1978 number := bc.CurrentBlock().NumberU64() 1979 for i := number + 1; ; i++ { 1980 hash := rawdb.ReadCanonicalHash(bc.db, i) 1981 if hash == (common.Hash{}) { 1982 break 1983 } 1984 rawdb.DeleteCanonicalHash(batch, i) 1985 } 1986 batch.Write() 1987 // If any logs need to be fired, do it now. In theory we could avoid creating 1988 // this goroutine if there are no events to fire, but realistcally that only 1989 // ever happens if we're reorging empty blocks, which will only happen on idle 1990 // networks where performance is not an issue either way. 1991 // 1992 // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct 1993 // event ordering? 1994 go func() { 1995 if len(deletedLogs) > 0 { 1996 bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1997 } 1998 if len(rebirthLogs) > 0 { 1999 bc.logsFeed.Send(rebirthLogs) 2000 } 2001 if len(oldChain) > 0 { 2002 for _, block := range oldChain { 2003 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 2004 } 2005 } 2006 }() 2007 return nil 2008 } 2009 2010 // PostChainEvents iterates over the events generated by a chain insertion and 2011 // posts them into the event feed. 2012 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 2013 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 2014 // post event logs for further processing 2015 if logs != nil { 2016 bc.logsFeed.Send(logs) 2017 } 2018 for _, event := range events { 2019 switch ev := event.(type) { 2020 case ChainEvent: 2021 bc.chainFeed.Send(ev) 2022 2023 case ChainHeadEvent: 2024 bc.chainHeadFeed.Send(ev) 2025 2026 case ChainSideEvent: 2027 bc.chainSideFeed.Send(ev) 2028 } 2029 } 2030 } 2031 2032 func (bc *BlockChain) update() { 2033 futureTimer := time.NewTicker(5 * time.Second) 2034 defer futureTimer.Stop() 2035 for { 2036 select { 2037 case <-futureTimer.C: 2038 bc.procFutureBlocks() 2039 case <-bc.quit: 2040 return 2041 } 2042 } 2043 } 2044 2045 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 2046 func (bc *BlockChain) BadBlocks() []*types.Block { 2047 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 2048 for _, hash := range bc.badBlocks.Keys() { 2049 if blk, exist := bc.badBlocks.Peek(hash); exist { 2050 block := blk.(*types.Block) 2051 blocks = append(blocks, block) 2052 } 2053 } 2054 return blocks 2055 } 2056 2057 // addBadBlock adds a bad block to the bad-block LRU cache 2058 func (bc *BlockChain) addBadBlock(block *types.Block) { 2059 bc.badBlocks.Add(block.Hash(), block) 2060 } 2061 2062 // reportBlock logs a bad block error. 2063 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 2064 bc.addBadBlock(block) 2065 2066 var receiptString string 2067 for i, receipt := range receipts { 2068 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 2069 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 2070 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 2071 } 2072 log.Error(fmt.Sprintf(` 2073 ########## BAD BLOCK ######### 2074 Chain config: %v 2075 2076 Number: %v 2077 Hash: 0x%x 2078 %v 2079 2080 Error: %v 2081 ############################## 2082 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 2083 } 2084 2085 // InsertHeaderChain attempts to insert the given header chain in to the local 2086 // chain, possibly creating a reorg. If an error is returned, it will return the 2087 // index number of the failing header as well an error describing what went wrong. 2088 // 2089 // The verify parameter can be used to fine tune whether nonce verification 2090 // should be done or not. The reason behind the optional check is because some 2091 // of the header retrieval mechanisms already need to verify nonces, as well as 2092 // because nonces can be verified sparsely, not needing to check each. 2093 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 2094 start := time.Now() 2095 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 2096 return i, err 2097 } 2098 2099 // Make sure only one thread manipulates the chain at once 2100 bc.chainmu.Lock() 2101 defer bc.chainmu.Unlock() 2102 2103 bc.wg.Add(1) 2104 defer bc.wg.Done() 2105 2106 whFunc := func(header *types.Header) error { 2107 _, err := bc.hc.WriteHeader(header) 2108 return err 2109 } 2110 return bc.hc.InsertHeaderChain(chain, whFunc, start) 2111 } 2112 2113 // CurrentHeader retrieves the current head header of the canonical chain. The 2114 // header is retrieved from the HeaderChain's internal cache. 2115 func (bc *BlockChain) CurrentHeader() *types.Header { 2116 return bc.hc.CurrentHeader() 2117 } 2118 2119 // GetTd retrieves a block's total difficulty in the canonical chain from the 2120 // database by hash and number, caching it if found. 2121 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 2122 return bc.hc.GetTd(hash, number) 2123 } 2124 2125 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 2126 // database by hash, caching it if found. 2127 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 2128 return bc.hc.GetTdByHash(hash) 2129 } 2130 2131 // GetHeader retrieves a block header from the database by hash and number, 2132 // caching it if found. 2133 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 2134 return bc.hc.GetHeader(hash, number) 2135 } 2136 2137 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 2138 // found. 2139 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 2140 return bc.hc.GetHeaderByHash(hash) 2141 } 2142 2143 // HasHeader checks if a block header is present in the database or not, caching 2144 // it if present. 2145 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 2146 return bc.hc.HasHeader(hash, number) 2147 } 2148 2149 // GetCanonicalHash returns the canonical hash for a given block number 2150 func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { 2151 return bc.hc.GetCanonicalHash(number) 2152 } 2153 2154 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 2155 // hash, fetching towards the genesis block. 2156 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 2157 return bc.hc.GetBlockHashesFromHash(hash, max) 2158 } 2159 2160 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 2161 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 2162 // number of blocks to be individually checked before we reach the canonical chain. 2163 // 2164 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 2165 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 2166 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 2167 } 2168 2169 // GetHeaderByNumber retrieves a block header from the database by number, 2170 // caching it (associated with its hash) if found. 2171 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 2172 return bc.hc.GetHeaderByNumber(number) 2173 } 2174 2175 // GetTransactionLookup retrieves the lookup associate with the given transaction 2176 // hash from the cache or database. 2177 func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { 2178 // Short circuit if the txlookup already in the cache, retrieve otherwise 2179 if lookup, exist := bc.txLookupCache.Get(hash); exist { 2180 return lookup.(*rawdb.LegacyTxLookupEntry) 2181 } 2182 tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) 2183 if tx == nil { 2184 return nil 2185 } 2186 lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex} 2187 bc.txLookupCache.Add(hash, lookup) 2188 return lookup 2189 } 2190 2191 // Config retrieves the chain's fork configuration. 2192 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 2193 2194 // Engine retrieves the blockchain's consensus engine. 2195 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 2196 2197 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 2198 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 2199 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 2200 } 2201 2202 // SubscribeChainEvent registers a subscription of ChainEvent. 2203 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 2204 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 2205 } 2206 2207 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 2208 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 2209 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 2210 } 2211 2212 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 2213 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 2214 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 2215 } 2216 2217 // SubscribeLogsEvent registers a subscription of []*types.Log. 2218 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 2219 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 2220 } 2221 2222 // SubscribeBlockProcessingEvent registers a subscription of bool where true means 2223 // block processing has started while false means it has stopped. 2224 func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription { 2225 return bc.scope.Track(bc.blockProcFeed.Subscribe(ch)) 2226 }