github.com/codingfuture/orig-energi3@v0.8.4/core/blockchain.go (about) 1 // Copyright 2018 The Energi Core Authors 2 // Copyright 2014 The go-ethereum Authors 3 // This file is part of the Energi Core library. 4 // 5 // The Energi Core library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The Energi Core library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>. 17 18 // Package core implements the Ethereum consensus protocol. 19 package core 20 21 import ( 22 "errors" 23 "fmt" 24 "io" 25 "math/big" 26 mrand "math/rand" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "github.com/ethereum/go-ethereum/common" 32 "github.com/ethereum/go-ethereum/common/mclock" 33 "github.com/ethereum/go-ethereum/common/prque" 34 "github.com/ethereum/go-ethereum/consensus" 35 "github.com/ethereum/go-ethereum/core/rawdb" 36 "github.com/ethereum/go-ethereum/core/state" 37 "github.com/ethereum/go-ethereum/core/types" 38 "github.com/ethereum/go-ethereum/core/vm" 39 "github.com/ethereum/go-ethereum/crypto" 40 "github.com/ethereum/go-ethereum/ethdb" 41 "github.com/ethereum/go-ethereum/event" 42 "github.com/ethereum/go-ethereum/log" 43 "github.com/ethereum/go-ethereum/metrics" 44 "github.com/ethereum/go-ethereum/params" 45 "github.com/ethereum/go-ethereum/rlp" 46 "github.com/ethereum/go-ethereum/trie" 47 "github.com/hashicorp/golang-lru" 48 49 energi_params "energi.world/core/gen3/energi/params" 50 ) 51 52 var ( 53 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 54 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 55 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 56 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 57 58 ErrNoGenesis = errors.New("Genesis not found in chain") 59 ) 60 61 const ( 62 bodyCacheLimit = 256 63 blockCacheLimit = 256 64 receiptsCacheLimit = 32 65 maxFutureBlocks = 256 66 maxTimeFutureBlocks = 30 67 badBlockLimit = 10 68 triesInMemory = 160 69 70 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 71 BlockChainVersion uint64 = 3 72 ) 73 74 func init() { 75 max_maturity_blocks := ((energi_params.MaturityPeriod / energi_params.MinBlockGap) + 1) 76 max_fork_blocks := ((energi_params.OldForkPeriod / energi_params.MinBlockGap) + 1) 77 78 if triesInMemory < (max_maturity_blocks + max_fork_blocks) { 79 panic("More Tries in memory is required!") 80 } 81 } 82 83 // CacheConfig contains the configuration values for the trie caching/pruning 84 // that's resident in a blockchain. 85 type CacheConfig struct { 86 Disabled bool // Whether to disable trie write caching (archive node) 87 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 88 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 89 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 90 TrieRapidLimit time.Duration // Similar to TrieTimeLimit, but for Engine with history requirements 91 } 92 93 // BlockChain represents the canonical chain given a database with a genesis 94 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 95 // 96 // Importing blocks in to the block chain happens according to the set of rules 97 // defined by the two stage Validator. Processing of blocks is done using the 98 // Processor which processes the included transaction. The validation of the state 99 // is done in the second part of the Validator. Failing results in aborting of 100 // the import. 101 // 102 // The BlockChain also helps in returning blocks from **any** chain included 103 // in the database as well as blocks that represents the canonical chain. It's 104 // important to note that GetBlock can return any block and does not need to be 105 // included in the canonical one where as GetBlockByNumber always represents the 106 // canonical chain. 107 type BlockChain struct { 108 chainConfig *params.ChainConfig // Chain & network configuration 109 cacheConfig *CacheConfig // Cache configuration for pruning 110 111 db ethdb.Database // Low level persistent database to store final content in 112 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 113 gcproc time.Duration // Accumulates canonical block processing for trie dumping 114 115 hc *HeaderChain 116 rmLogsFeed event.Feed 117 chainFeed event.Feed 118 chainSideFeed event.Feed 119 chainHeadFeed event.Feed 120 logsFeed event.Feed 121 scope event.SubscriptionScope 122 genesisBlock *types.Block 123 124 mu sync.RWMutex // global mutex for locking chain operations 125 chainmu sync.RWMutex // blockchain insertion lock 126 procmu sync.RWMutex // block processor lock 127 128 checkpoint int // checkpoint counts towards the new checkpoint 129 currentBlock atomic.Value // Current head of the block chain 130 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 131 132 stateCache state.Database // State database to reuse between imports (contains state cache) 133 bodyCache *lru.Cache // Cache for the most recent block bodies 134 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 135 receiptsCache *lru.Cache // Cache for the most recent receipts per block 136 blockCache *lru.Cache // Cache for the most recent entire blocks 137 futureBlocks *lru.Cache // future blocks are blocks added for later processing 138 139 quit chan struct{} // blockchain quit channel 140 running int32 // running must be called atomically 141 // procInterrupt must be atomically called 142 procInterrupt int32 // interrupt signaler for block processing 143 wg sync.WaitGroup // chain processing wait group for shutting down 144 145 engine consensus.Engine 146 processor Processor // block processor interface 147 validator Validator // block and state validator interface 148 vmConfig vm.Config 149 150 badBlocks *lru.Cache // Bad block cache 151 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 152 153 checkpoints *checkpointManager 154 } 155 156 // NewBlockChain returns a fully initialised block chain using information 157 // available in the database. It initialises the default Ethereum Validator and 158 // Processor. 159 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 160 if cacheConfig == nil { 161 cacheConfig = &CacheConfig{ 162 TrieCleanLimit: 256, 163 TrieDirtyLimit: 256, 164 TrieTimeLimit: 5 * time.Minute, 165 TrieRapidLimit: 10 * time.Second, 166 } 167 } 168 bodyCache, _ := lru.New(bodyCacheLimit) 169 bodyRLPCache, _ := lru.New(bodyCacheLimit) 170 receiptsCache, _ := lru.New(receiptsCacheLimit) 171 blockCache, _ := lru.New(blockCacheLimit) 172 futureBlocks, _ := lru.New(maxFutureBlocks) 173 badBlocks, _ := lru.New(badBlockLimit) 174 175 bc := &BlockChain{ 176 chainConfig: chainConfig, 177 cacheConfig: cacheConfig, 178 db: db, 179 triegc: prque.New(nil), 180 stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit), 181 quit: make(chan struct{}), 182 shouldPreserve: shouldPreserve, 183 bodyCache: bodyCache, 184 bodyRLPCache: bodyRLPCache, 185 receiptsCache: receiptsCache, 186 blockCache: blockCache, 187 futureBlocks: futureBlocks, 188 engine: engine, 189 vmConfig: vmConfig, 190 badBlocks: badBlocks, 191 } 192 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 193 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 194 195 var err error 196 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 197 if err != nil { 198 return nil, err 199 } 200 bc.checkpoints = bc.hc.checkpoints 201 bc.genesisBlock = bc.GetBlockByNumber(0) 202 if bc.genesisBlock == nil { 203 return nil, ErrNoGenesis 204 } 205 if err := bc.loadLastState(); err != nil { 206 return nil, err 207 } 208 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 209 for hash := range BadHashes { 210 if header := bc.GetHeaderByHash(hash); header != nil { 211 // get the canonical block corresponding to the offending header's number 212 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 213 // make sure the headerByNumber (if present) is in our current canonical chain 214 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 215 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 216 bc.SetHead(header.Number.Uint64() - 1) 217 log.Error("Chain rewind was successful, resuming normal operation") 218 } 219 } 220 } 221 bc.checkpoints.setup(bc) 222 // Take ownership of this particular state 223 go bc.update() 224 return bc, nil 225 } 226 227 func (bc *BlockChain) getProcInterrupt() bool { 228 return atomic.LoadInt32(&bc.procInterrupt) == 1 229 } 230 231 // GetVMConfig returns the block chain VM config. 232 func (bc *BlockChain) GetVMConfig() *vm.Config { 233 return &bc.vmConfig 234 } 235 236 // loadLastState loads the last known chain state from the database. This method 237 // assumes that the chain manager mutex is held. 238 func (bc *BlockChain) loadLastState() error { 239 // Restore the last known head block 240 head := rawdb.ReadHeadBlockHash(bc.db) 241 if head == (common.Hash{}) { 242 // Corrupt or empty database, init from scratch 243 log.Warn("Empty database, resetting chain") 244 return bc.Reset() 245 } 246 // Make sure the entire head block is available 247 currentBlock := bc.GetBlockByHash(head) 248 if currentBlock == nil { 249 // Corrupt or empty database, init from scratch 250 log.Warn("Head block missing, resetting chain", "hash", head) 251 return bc.Reset() 252 } 253 // Make sure the state associated with the block is available 254 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 255 // Dangling block without a state associated, init from scratch 256 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 257 if err := bc.repair(¤tBlock); err != nil { 258 return err 259 } 260 } 261 // Everything seems to be fine, set as the head block 262 bc.currentBlock.Store(currentBlock) 263 264 // Restore the last known head header 265 currentHeader := currentBlock.Header() 266 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 267 if header := bc.GetHeaderByHash(head); header != nil { 268 currentHeader = header 269 } 270 } 271 bc.hc.SetCurrentHeader(currentHeader) 272 273 // Restore the last known head fast block 274 bc.currentFastBlock.Store(currentBlock) 275 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 276 if block := bc.GetBlockByHash(head); block != nil { 277 bc.currentFastBlock.Store(block) 278 } 279 } 280 281 // Issue a status log for the user 282 currentFastBlock := bc.CurrentFastBlock() 283 284 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 285 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 286 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 287 288 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 289 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 290 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 291 292 return nil 293 } 294 295 // SetHead rewinds the local chain to a new head. In the case of headers, everything 296 // above the new head will be deleted and the new one set. In the case of blocks 297 // though, the head may be further rewound if block bodies are missing (non-archive 298 // nodes after a fast sync). 299 func (bc *BlockChain) SetHead(head uint64) error { 300 log.Warn("Rewinding blockchain", "target", head) 301 302 bc.mu.Lock() 303 defer bc.mu.Unlock() 304 305 // Rewind the header chain, deleting all block bodies until then 306 delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { 307 rawdb.DeleteBody(db, hash, num) 308 } 309 bc.hc.SetHead(head, delFn) 310 currentHeader := bc.hc.CurrentHeader() 311 312 // Clear out any stale content from the caches 313 bc.bodyCache.Purge() 314 bc.bodyRLPCache.Purge() 315 bc.receiptsCache.Purge() 316 bc.blockCache.Purge() 317 bc.futureBlocks.Purge() 318 319 // Rewind the block chain, ensuring we don't end up with a stateless head block 320 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 321 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 322 } 323 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 324 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 325 // Rewound state missing, rolled back to before pivot, reset to genesis 326 bc.currentBlock.Store(bc.genesisBlock) 327 } 328 } 329 // Rewind the fast block in a simpleton way to the target head 330 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 331 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 332 } 333 // If either blocks reached nil, reset to the genesis state 334 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 335 bc.currentBlock.Store(bc.genesisBlock) 336 } 337 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 338 bc.currentFastBlock.Store(bc.genesisBlock) 339 } 340 currentBlock := bc.CurrentBlock() 341 currentFastBlock := bc.CurrentFastBlock() 342 343 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 344 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 345 346 return bc.loadLastState() 347 } 348 349 // FastSyncCommitHead sets the current head block to the one defined by the hash 350 // irrelevant what the chain contents were prior. 351 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 352 // Make sure that both the block as well at its state trie exists 353 block := bc.GetBlockByHash(hash) 354 if block == nil { 355 return fmt.Errorf("non existent block [%x…]", hash[:4]) 356 } 357 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 358 return err 359 } 360 // If all checks out, manually set the head block 361 bc.mu.Lock() 362 bc.currentBlock.Store(block) 363 bc.mu.Unlock() 364 365 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 366 return nil 367 } 368 369 // GasLimit returns the gas limit of the current HEAD block. 370 func (bc *BlockChain) GasLimit() uint64 { 371 return bc.CurrentBlock().GasLimit() 372 } 373 374 // CurrentBlock retrieves the current head block of the canonical chain. The 375 // block is retrieved from the blockchain's internal cache. 376 func (bc *BlockChain) CurrentBlock() *types.Block { 377 return bc.currentBlock.Load().(*types.Block) 378 } 379 380 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 381 // chain. The block is retrieved from the blockchain's internal cache. 382 func (bc *BlockChain) CurrentFastBlock() *types.Block { 383 return bc.currentFastBlock.Load().(*types.Block) 384 } 385 386 // SetProcessor sets the processor required for making state modifications. 387 func (bc *BlockChain) SetProcessor(processor Processor) { 388 bc.procmu.Lock() 389 defer bc.procmu.Unlock() 390 bc.processor = processor 391 } 392 393 // SetValidator sets the validator which is used to validate incoming blocks. 394 func (bc *BlockChain) SetValidator(validator Validator) { 395 bc.procmu.Lock() 396 defer bc.procmu.Unlock() 397 bc.validator = validator 398 } 399 400 // Validator returns the current validator. 401 func (bc *BlockChain) Validator() Validator { 402 bc.procmu.RLock() 403 defer bc.procmu.RUnlock() 404 return bc.validator 405 } 406 407 // Processor returns the current processor. 408 func (bc *BlockChain) Processor() Processor { 409 bc.procmu.RLock() 410 defer bc.procmu.RUnlock() 411 return bc.processor 412 } 413 414 // State returns a new mutable state based on the current HEAD block. 415 func (bc *BlockChain) State() (*state.StateDB, error) { 416 return bc.StateAt(bc.CurrentBlock().Root()) 417 } 418 419 // StateAt returns a new mutable state based on a particular point in time. 420 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 421 return state.New(root, bc.stateCache) 422 } 423 424 // StateCache returns the caching database underpinning the blockchain instance. 425 func (bc *BlockChain) StateCache() state.Database { 426 return bc.stateCache 427 } 428 429 // Reset purges the entire blockchain, restoring it to its genesis state. 430 func (bc *BlockChain) Reset() error { 431 return bc.ResetWithGenesisBlock(bc.genesisBlock) 432 } 433 434 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 435 // specified genesis state. 436 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 437 // Dump the entire block chain and purge the caches 438 if err := bc.SetHead(0); err != nil { 439 return err 440 } 441 bc.mu.Lock() 442 defer bc.mu.Unlock() 443 444 // Prepare the genesis block and reinitialise the chain 445 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 446 log.Crit("Failed to write genesis block TD", "err", err) 447 } 448 rawdb.WriteBlock(bc.db, genesis) 449 450 bc.genesisBlock = genesis 451 bc.insert(bc.genesisBlock) 452 bc.currentBlock.Store(bc.genesisBlock) 453 bc.hc.SetGenesis(bc.genesisBlock.Header()) 454 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 455 bc.currentFastBlock.Store(bc.genesisBlock) 456 457 return nil 458 } 459 460 // repair tries to repair the current blockchain by rolling back the current block 461 // until one with associated state is found. This is needed to fix incomplete db 462 // writes caused either by crashes/power outages, or simply non-committed tries. 463 // 464 // This method only rolls back the current block. The current header and current 465 // fast block are left intact. 466 func (bc *BlockChain) repair(head **types.Block) error { 467 for { 468 // Abort if we've rewound to a head block that does have associated state 469 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 470 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 471 return nil 472 } 473 // Otherwise rewind one block and recheck state availability there 474 block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 475 if block == nil { 476 return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) 477 } 478 (*head) = block 479 } 480 } 481 482 // Export writes the active chain to the given writer. 483 func (bc *BlockChain) Export(w io.Writer) error { 484 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 485 } 486 487 // ExportN writes a subset of the active chain to the given writer. 488 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 489 bc.mu.RLock() 490 defer bc.mu.RUnlock() 491 492 if first > last { 493 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 494 } 495 log.Info("Exporting batch of blocks", "count", last-first+1) 496 497 start, reported := time.Now(), time.Now() 498 for nr := first; nr <= last; nr++ { 499 block := bc.GetBlockByNumber(nr) 500 if block == nil { 501 return fmt.Errorf("export failed on #%d: not found", nr) 502 } 503 if err := block.EncodeRLP(w); err != nil { 504 return err 505 } 506 if time.Since(reported) >= statsReportLimit { 507 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 508 reported = time.Now() 509 } 510 } 511 512 return nil 513 } 514 515 // insert injects a new head block into the current block chain. This method 516 // assumes that the block is indeed a true head. It will also reset the head 517 // header and the head fast sync block to this very same block if they are older 518 // or if they are on a different side chain. 519 // 520 // Note, this function assumes that the `mu` mutex is held! 521 func (bc *BlockChain) insert(block *types.Block) { 522 // If the block is on a side chain or an unknown one, force other heads onto it too 523 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 524 525 // Add the block to the canonical chain number scheme and mark as the head 526 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 527 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 528 529 bc.currentBlock.Store(block) 530 531 // If the block is better than our head or is on a different chain, force update heads 532 if updateHeads { 533 bc.hc.SetCurrentHeader(block.Header()) 534 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 535 536 bc.currentFastBlock.Store(block) 537 } 538 } 539 540 // Genesis retrieves the chain's genesis block. 541 func (bc *BlockChain) Genesis() *types.Block { 542 return bc.genesisBlock 543 } 544 545 // GetBody retrieves a block body (transactions and uncles) from the database by 546 // hash, caching it if found. 547 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 548 // Short circuit if the body's already in the cache, retrieve otherwise 549 if cached, ok := bc.bodyCache.Get(hash); ok { 550 body := cached.(*types.Body) 551 return body 552 } 553 number := bc.hc.GetBlockNumber(hash) 554 if number == nil { 555 return nil 556 } 557 body := rawdb.ReadBody(bc.db, hash, *number) 558 if body == nil { 559 return nil 560 } 561 // Cache the found body for next time and return 562 bc.bodyCache.Add(hash, body) 563 return body 564 } 565 566 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 567 // caching it if found. 568 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 569 // Short circuit if the body's already in the cache, retrieve otherwise 570 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 571 return cached.(rlp.RawValue) 572 } 573 number := bc.hc.GetBlockNumber(hash) 574 if number == nil { 575 return nil 576 } 577 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 578 if len(body) == 0 { 579 return nil 580 } 581 // Cache the found body for next time and return 582 bc.bodyRLPCache.Add(hash, body) 583 return body 584 } 585 586 // HasBlock checks if a block is fully present in the database or not. 587 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 588 if bc.blockCache.Contains(hash) { 589 return true 590 } 591 return rawdb.HasBody(bc.db, hash, number) 592 } 593 594 // HasFastBlock checks if a fast block is fully present in the database or not. 595 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 596 if !bc.HasBlock(hash, number) { 597 return false 598 } 599 if bc.receiptsCache.Contains(hash) { 600 return true 601 } 602 return rawdb.HasReceipts(bc.db, hash, number) 603 } 604 605 // HasState checks if state trie is fully present in the database or not. 606 func (bc *BlockChain) HasState(hash common.Hash) bool { 607 _, err := bc.stateCache.OpenTrie(hash) 608 return err == nil 609 } 610 611 // HasBlockAndState checks if a block and associated state trie is fully present 612 // in the database or not, caching it if present. 613 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 614 // Check first that the block itself is known 615 block := bc.GetBlock(hash, number) 616 if block == nil { 617 return false 618 } 619 return bc.HasState(block.Root()) 620 } 621 622 // GetBlock retrieves a block from the database by hash and number, 623 // caching it if found. 624 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 625 // Short circuit if the block's already in the cache, retrieve otherwise 626 if block, ok := bc.blockCache.Get(hash); ok { 627 return block.(*types.Block) 628 } 629 block := rawdb.ReadBlock(bc.db, hash, number) 630 if block == nil { 631 return nil 632 } 633 // Cache the found block for next time and return 634 bc.blockCache.Add(block.Hash(), block) 635 return block 636 } 637 638 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 639 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 640 number := bc.hc.GetBlockNumber(hash) 641 if number == nil { 642 return nil 643 } 644 return bc.GetBlock(hash, *number) 645 } 646 647 // GetBlockByNumber retrieves a block from the database by number, caching it 648 // (associated with its hash) if found. 649 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 650 hash := rawdb.ReadCanonicalHash(bc.db, number) 651 if hash == (common.Hash{}) { 652 return nil 653 } 654 return bc.GetBlock(hash, number) 655 } 656 657 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 658 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 659 if receipts, ok := bc.receiptsCache.Get(hash); ok { 660 return receipts.(types.Receipts) 661 } 662 number := rawdb.ReadHeaderNumber(bc.db, hash) 663 if number == nil { 664 return nil 665 } 666 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 667 bc.receiptsCache.Add(hash, receipts) 668 return receipts 669 } 670 671 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 672 // [deprecated by eth/62] 673 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 674 number := bc.hc.GetBlockNumber(hash) 675 if number == nil { 676 return nil 677 } 678 for i := 0; i < n; i++ { 679 block := bc.GetBlock(hash, *number) 680 if block == nil { 681 break 682 } 683 blocks = append(blocks, block) 684 hash = block.ParentHash() 685 *number-- 686 } 687 return 688 } 689 690 // GetUnclesInChain retrieves all the uncles from a given block backwards until 691 // a specific distance is reached. 692 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 693 uncles := []*types.Header{} 694 for i := 0; block != nil && i < length; i++ { 695 uncles = append(uncles, block.Uncles()...) 696 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 697 } 698 return uncles 699 } 700 701 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 702 // either from ephemeral in-memory cache, or from persistent storage. 703 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 704 return bc.stateCache.TrieDB().Node(hash) 705 } 706 707 // Stop stops the blockchain service. If any imports are currently in progress 708 // it will abort them using the procInterrupt. 709 func (bc *BlockChain) Stop() { 710 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 711 return 712 } 713 // Unsubscribe all subscriptions registered from blockchain 714 bc.scope.Close() 715 close(bc.quit) 716 atomic.StoreInt32(&bc.procInterrupt, 1) 717 718 bc.wg.Wait() 719 720 // Ensure the state of a recent block is also stored to disk before exiting. 721 // We're writing three different states to catch different restart scenarios: 722 // - HEAD: So we don't need to reprocess any blocks in the general case 723 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 724 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 725 if !bc.cacheConfig.Disabled { 726 triedb := bc.stateCache.TrieDB() 727 728 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 729 if number := bc.CurrentBlock().NumberU64(); number > offset { 730 recent := bc.GetBlockByNumber(number - offset) 731 732 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 733 if err := triedb.Commit(recent.Root(), true); err != nil { 734 log.Error("Failed to commit recent state trie", "err", err) 735 } 736 } 737 } 738 for !bc.triegc.Empty() { 739 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 740 } 741 if size, _ := triedb.Size(); size != 0 { 742 log.Error("Dangling trie nodes after full cleanup") 743 } 744 } 745 log.Info("Blockchain manager stopped") 746 } 747 748 func (bc *BlockChain) procFutureBlocks() { 749 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 750 for _, hash := range bc.futureBlocks.Keys() { 751 if block, exist := bc.futureBlocks.Peek(hash); exist { 752 blocks = append(blocks, block.(*types.Block)) 753 } 754 } 755 if len(blocks) > 0 { 756 types.BlockBy(types.Number).Sort(blocks) 757 758 // Insert one by one as chain insertion needs contiguous ancestry between blocks 759 for i := range blocks { 760 bc.InsertChain(blocks[i : i+1]) 761 } 762 } 763 } 764 765 // WriteStatus status of write 766 type WriteStatus byte 767 768 const ( 769 NonStatTy WriteStatus = iota 770 CanonStatTy 771 SideStatTy 772 ) 773 774 // Rollback is designed to remove a chain of links from the database that aren't 775 // certain enough to be valid. 776 func (bc *BlockChain) Rollback(chain []common.Hash) { 777 bc.mu.Lock() 778 defer bc.mu.Unlock() 779 780 for i := len(chain) - 1; i >= 0; i-- { 781 hash := chain[i] 782 783 currentHeader := bc.hc.CurrentHeader() 784 if currentHeader.Hash() == hash { 785 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 786 } 787 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 788 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 789 bc.currentFastBlock.Store(newFastBlock) 790 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 791 } 792 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 793 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 794 bc.currentBlock.Store(newBlock) 795 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 796 } 797 } 798 } 799 800 // SetReceiptsData computes all the non-consensus fields of the receipts 801 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 802 signer := types.MakeSigner(config, block.Number()) 803 804 transactions, logIndex := block.Transactions(), uint(0) 805 if len(transactions) != len(receipts) { 806 return errors.New("transaction and receipt count mismatch") 807 } 808 809 for j := 0; j < len(receipts); j++ { 810 // The transaction hash can be retrieved from the transaction itself 811 receipts[j].TxHash = transactions[j].Hash() 812 813 // The contract address can be derived from the transaction itself 814 if transactions[j].To() == nil { 815 // Deriving the signer is expensive, only do if it's actually needed 816 from, _ := types.Sender(signer, transactions[j]) 817 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 818 } 819 // The used gas can be calculated based on previous receipts 820 if j == 0 { 821 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 822 } else { 823 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 824 } 825 // The derived log fields can simply be set from the block and transaction 826 for k := 0; k < len(receipts[j].Logs); k++ { 827 receipts[j].Logs[k].BlockNumber = block.NumberU64() 828 receipts[j].Logs[k].BlockHash = block.Hash() 829 receipts[j].Logs[k].TxHash = receipts[j].TxHash 830 receipts[j].Logs[k].TxIndex = uint(j) 831 receipts[j].Logs[k].Index = logIndex 832 logIndex++ 833 } 834 } 835 return nil 836 } 837 838 // InsertReceiptChain attempts to complete an already existing header chain with 839 // transaction and receipt data. 840 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 841 bc.wg.Add(1) 842 defer bc.wg.Done() 843 844 // Do a sanity check that the provided chain is actually ordered and linked 845 for i := 1; i < len(blockChain); i++ { 846 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 847 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 848 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 849 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 850 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 851 } 852 } 853 854 var ( 855 stats = struct{ processed, ignored int32 }{} 856 start = time.Now() 857 bytes = 0 858 batch = bc.db.NewBatch() 859 ) 860 for i, block := range blockChain { 861 receipts := receiptChain[i] 862 // Short circuit insertion if shutting down or processing failed 863 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 864 return 0, nil 865 } 866 // Short circuit if the owner header is unknown 867 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 868 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 869 } 870 // Skip if the entire data is already known 871 if bc.HasBlock(block.Hash(), block.NumberU64()) { 872 stats.ignored++ 873 continue 874 } 875 // Compute all the non-consensus fields of the receipts 876 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 877 return i, fmt.Errorf("failed to set receipts data: %v", err) 878 } 879 // Write all the data out into the database 880 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 881 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 882 rawdb.WriteTxLookupEntries(batch, block) 883 884 stats.processed++ 885 886 if batch.ValueSize() >= ethdb.IdealBatchSize { 887 if err := batch.Write(); err != nil { 888 return 0, err 889 } 890 bytes += batch.ValueSize() 891 batch.Reset() 892 } 893 } 894 if batch.ValueSize() > 0 { 895 bytes += batch.ValueSize() 896 if err := batch.Write(); err != nil { 897 return 0, err 898 } 899 } 900 901 // Update the head fast sync block if better 902 bc.mu.Lock() 903 head := blockChain[len(blockChain)-1] 904 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 905 currentFastBlock := bc.CurrentFastBlock() 906 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 907 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 908 bc.currentFastBlock.Store(head) 909 } 910 } 911 bc.mu.Unlock() 912 913 context := []interface{}{ 914 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 915 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 916 "size", common.StorageSize(bytes), 917 } 918 if stats.ignored > 0 { 919 context = append(context, []interface{}{"ignored", stats.ignored}...) 920 } 921 log.Info("Imported new block receipts", context...) 922 923 return 0, nil 924 } 925 926 var lastWrite uint64 927 928 // WriteBlockWithoutState writes only the block and its metadata to the database, 929 // but does not write any state. This is used to construct competing side forks 930 // up to the point where they exceed the canonical total difficulty. 931 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 932 bc.wg.Add(1) 933 defer bc.wg.Done() 934 935 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 936 return err 937 } 938 rawdb.WriteBlock(bc.db, block) 939 940 return nil 941 } 942 943 // WriteBlockWithState writes the block and all associated state to the database. 944 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 945 bc.wg.Add(1) 946 defer bc.wg.Done() 947 948 // Calculate the total difficulty of the block 949 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 950 if ptd == nil { 951 return NonStatTy, consensus.ErrUnknownAncestor 952 } 953 // Make sure no inconsistent state is leaked during insertion 954 bc.mu.Lock() 955 defer bc.mu.Unlock() 956 957 currentBlock := bc.CurrentBlock() 958 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 959 externTd := new(big.Int).Add(block.Difficulty(), ptd) 960 961 // Irrelevant of the canonical status, write the block itself to the database 962 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 963 return NonStatTy, err 964 } 965 rawdb.WriteBlock(bc.db, block) 966 967 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 968 if err != nil { 969 return NonStatTy, err 970 } 971 triedb := bc.stateCache.TrieDB() 972 973 // If we're running an archive node, always flush 974 if bc.cacheConfig.Disabled { 975 if err := triedb.Commit(root, false); err != nil { 976 return NonStatTy, err 977 } 978 } else { 979 // Full but not archive node, do proper garbage collection 980 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 981 bc.triegc.Push(root, -int64(block.NumberU64())) 982 983 if current := block.NumberU64(); current > triesInMemory { 984 // If we exceeded our memory allowance, flush matured singleton nodes to disk 985 var ( 986 nodes, imgs = triedb.Size() 987 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 988 ) 989 if nodes > limit || imgs > 4*1024*1024 { 990 triedb.Cap(limit - ethdb.IdealBatchSize) 991 } 992 // Find the next state trie we need to commit 993 chosen := current - triesInMemory 994 995 // If we exceeded out time allowance, flush an entire trie to disk 996 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 997 // If the header is missing (canonical chain behind), we're reorging a low 998 // diff sidechain. Suspend committing until this operation is completed. 999 header := bc.GetHeaderByNumber(chosen) 1000 if header == nil { 1001 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1002 } else { 1003 // If we're exceeding limits but haven't reached a large enough memory gap, 1004 // warn the user that the system is becoming unstable. 1005 if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1006 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 1007 } 1008 // Flush an entire trie and restart the counters 1009 triedb.Commit(header.Root, true) 1010 lastWrite = chosen 1011 bc.gcproc = 0 1012 } 1013 } else if bc.chainConfig.Energi != nil && bc.gcproc > bc.cacheConfig.TrieRapidLimit { 1014 // Energi requires state for PoS verification, 1015 // so ensure more often commit for faster recreation. 1016 1017 header := bc.GetHeaderByNumber(chosen) 1018 if header == nil { 1019 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 1020 } else { 1021 // Flush an entire trie and restart the counters 1022 triedb.Commit(header.Root, true) 1023 lastWrite = chosen 1024 bc.gcproc = 0 1025 } 1026 } 1027 1028 // Garbage collect anything below our required write retention 1029 for !bc.triegc.Empty() { 1030 root, number := bc.triegc.Pop() 1031 if uint64(-number) > chosen { 1032 bc.triegc.Push(root, number) 1033 break 1034 } 1035 triedb.Dereference(root.(common.Hash)) 1036 } 1037 } 1038 } 1039 1040 // Write other block data using a batch. 1041 batch := bc.db.NewBatch() 1042 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 1043 1044 // If the total difficulty is higher than our known, add it to the canonical chain 1045 // Second clause in the if statement reduces the vulnerability to selfish mining. 1046 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1047 reorg := externTd.Cmp(localTd) > 0 1048 currentBlock = bc.CurrentBlock() 1049 if !reorg && externTd.Cmp(localTd) == 0 { 1050 // Split same-difficulty blocks by number, then preferentially select 1051 // the block generated by the local miner as the canonical block. 1052 if block.NumberU64() < currentBlock.NumberU64() { 1053 reorg = true 1054 } else if block.NumberU64() == currentBlock.NumberU64() { 1055 var currentPreserve, blockPreserve bool 1056 if bc.shouldPreserve != nil { 1057 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1058 } 1059 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1060 } 1061 } 1062 if reorg { 1063 // Reorganise the chain if the parent is not the head block 1064 if block.ParentHash() != currentBlock.Hash() { 1065 if err := bc.reorg(currentBlock, block); err != nil { 1066 return NonStatTy, err 1067 } 1068 } 1069 // Write the positional metadata for transaction/receipt lookups and preimages 1070 rawdb.WriteTxLookupEntries(batch, block) 1071 rawdb.WritePreimages(batch, state.Preimages()) 1072 1073 status = CanonStatTy 1074 } else { 1075 status = SideStatTy 1076 } 1077 if err := batch.Write(); err != nil { 1078 return NonStatTy, err 1079 } 1080 1081 // Set new head. 1082 if status == CanonStatTy { 1083 bc.insert(block) 1084 } 1085 bc.futureBlocks.Remove(block.Hash()) 1086 return status, nil 1087 } 1088 1089 // addFutureBlock checks if the block is within the max allowed window to get 1090 // accepted for future processing, and returns an error if the block is too far 1091 // ahead and was not added. 1092 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1093 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1094 if block.Time() > max { 1095 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1096 } 1097 bc.futureBlocks.Add(block.Hash(), block) 1098 return nil 1099 } 1100 1101 // InsertChain attempts to insert the given batch of blocks in to the canonical 1102 // chain or, otherwise, create a fork. If an error is returned it will return 1103 // the index number of the failing block as well an error describing what went 1104 // wrong. 1105 // 1106 // After insertion is done, all accumulated events will be fired. 1107 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1108 // Sanity check that we have something meaningful to import 1109 if len(chain) == 0 { 1110 return 0, nil 1111 } 1112 // Do a sanity check that the provided chain is actually ordered and linked 1113 for i := 1; i < len(chain); i++ { 1114 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1115 // Chain broke ancestry, log a message (programming error) and skip insertion 1116 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1117 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1118 1119 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1120 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1121 } 1122 } 1123 // Pre-checks passed, start the full block imports 1124 bc.wg.Add(1) 1125 bc.chainmu.Lock() 1126 n, events, logs, err := bc.insertChain(chain, true) 1127 bc.chainmu.Unlock() 1128 bc.wg.Done() 1129 1130 bc.PostChainEvents(events, logs) 1131 return n, err 1132 } 1133 1134 // insertChain is the internal implementation of insertChain, which assumes that 1135 // 1) chains are contiguous, and 2) The chain mutex is held. 1136 // 1137 // This method is split out so that import batches that require re-injecting 1138 // historical blocks can do so without releasing the lock, which could lead to 1139 // racey behaviour. If a sidechain import is in progress, and the historic state 1140 // is imported, but then new canon-head is added before the actual sidechain 1141 // completes, then the historic state could be pruned again 1142 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { 1143 // If the chain is terminating, don't even bother starting u 1144 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1145 return 0, nil, nil, nil 1146 } 1147 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1148 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1149 1150 // A queued approach to delivering events. This is generally 1151 // faster than direct delivery and requires much less mutex 1152 // acquiring. 1153 var ( 1154 stats = insertStats{startTime: mclock.Now()} 1155 events = make([]interface{}, 0, len(chain)) 1156 lastCanon *types.Block 1157 coalescedLogs []*types.Log 1158 ) 1159 // Start the parallel header verifier 1160 headers := make([]*types.Header, len(chain)) 1161 seals := make([]bool, len(chain)) 1162 1163 for i, block := range chain { 1164 headers[i] = block.Header() 1165 seals[i] = verifySeals 1166 } 1167 abort, results, ready := bc.engine.VerifyHeaders(bc, headers, seals) 1168 defer close(abort) 1169 1170 // Peek the error for the first block to decide the directing import logic 1171 it := newInsertIterator(chain, results, ready, bc.Validator()) 1172 1173 block, err := it.next() 1174 switch { 1175 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1176 case err == consensus.ErrPrunedAncestor: 1177 return bc.insertSidechain(block, it) 1178 1179 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1180 case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())): 1181 for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { 1182 if err := bc.addFutureBlock(block); err != nil { 1183 return it.index, events, coalescedLogs, err 1184 } 1185 block, err = it.next() 1186 } 1187 stats.queued += it.processed() 1188 stats.ignored += it.remaining() 1189 1190 // If there are any still remaining, mark as ignored 1191 return it.index, events, coalescedLogs, err 1192 1193 // First block (and state) is known 1194 // 1. We did a roll-back, and should now do a re-import 1195 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1196 // from the canonical chain, which has not been verified. 1197 case err == ErrKnownBlock: 1198 // Skip all known blocks that behind us 1199 current := bc.CurrentBlock().NumberU64() 1200 1201 for block != nil && err == ErrKnownBlock && current >= block.NumberU64() { 1202 stats.ignored++ 1203 block, err = it.next() 1204 } 1205 // Falls through to the block import 1206 1207 // We are unable to process the block due to missing previous state. 1208 // Most likely, it got pruned. It is not bad block case. 1209 // 1210 // TODO: schedule retrieval of missing pieces and re-try only if 1211 // the current head is stalled. 1212 case err == consensus.ErrMissingState: 1213 log.Debug("Missing state at", "number", block.Number(), "hash", block.Hash()) 1214 stats.ignored += len(it.chain) 1215 return it.index, events, coalescedLogs, err 1216 1217 // DoS throttling. It does not mean that the block is invalid, but 1218 // we refuse to accept one at the current moment to mitigate DoS. 1219 case err == consensus.ErrDoSThrottle: 1220 log.Debug("DoS throttling at", "number", block.Number(), "hash", block.Hash()) 1221 stats.ignored += len(it.chain) 1222 return it.index, events, coalescedLogs, err 1223 1224 // Some other error occurred, abort 1225 case err != nil: 1226 stats.ignored += len(it.chain) 1227 bc.reportBlock(block, nil, err) 1228 return it.index, events, coalescedLogs, err 1229 } 1230 // No validation errors for the first block (or chain prefix skipped) 1231 for ; block != nil && err == nil; block, err = it.next() { 1232 // If the chain is terminating, stop processing blocks 1233 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1234 log.Debug("Premature abort during blocks processing") 1235 break 1236 } 1237 // If the header is a banned one, straight out abort 1238 if BadHashes[block.Hash()] { 1239 bc.reportBlock(block, nil, ErrBlacklistedHash) 1240 return it.index, events, coalescedLogs, ErrBlacklistedHash 1241 } 1242 if err = bc.checkpoints.validate(bc, block.NumberU64(), block.Hash()); err != nil { 1243 bc.reportBlock(block, nil, err) 1244 return it.index, events, coalescedLogs, err 1245 } 1246 // Retrieve the parent block and it's state to execute on top 1247 start := time.Now() 1248 1249 parent := it.previous() 1250 if parent == nil { 1251 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1252 } 1253 state, err := state.New(parent.Root(), bc.stateCache) 1254 if err != nil { 1255 return it.index, events, coalescedLogs, err 1256 } 1257 // Process block using the parent state as reference point. 1258 t0 := time.Now() 1259 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1260 t1 := time.Now() 1261 if err != nil { 1262 bc.reportBlock(block, receipts, err) 1263 return it.index, events, coalescedLogs, err 1264 } 1265 // Validate the state using the default validator 1266 if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil { 1267 bc.reportBlock(block, receipts, err) 1268 return it.index, events, coalescedLogs, err 1269 } 1270 t2 := time.Now() 1271 proctime := time.Since(start) 1272 1273 // Write the block to the chain and get the status. 1274 status, err := bc.WriteBlockWithState(block, receipts, state) 1275 t3 := time.Now() 1276 if err != nil { 1277 return it.index, events, coalescedLogs, err 1278 } 1279 blockInsertTimer.UpdateSince(start) 1280 blockExecutionTimer.Update(t1.Sub(t0)) 1281 blockValidationTimer.Update(t2.Sub(t1)) 1282 blockWriteTimer.Update(t3.Sub(t2)) 1283 switch status { 1284 case CanonStatTy: 1285 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1286 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1287 "elapsed", common.PrettyDuration(time.Since(start)), 1288 "root", block.Root()) 1289 1290 coalescedLogs = append(coalescedLogs, logs...) 1291 events = append(events, ChainEvent{block, block.Hash(), logs}) 1292 lastCanon = block 1293 1294 // Only count canonical blocks for GC processing time 1295 bc.gcproc += proctime 1296 1297 case SideStatTy: 1298 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1299 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1300 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1301 "root", block.Root()) 1302 events = append(events, ChainSideEvent{block}) 1303 } 1304 blockInsertTimer.UpdateSince(start) 1305 stats.processed++ 1306 stats.usedGas += usedGas 1307 1308 cache, _ := bc.stateCache.TrieDB().Size() 1309 stats.report(chain, it.index, cache) 1310 } 1311 // Any blocks remaining here? The only ones we care about are the future ones 1312 if block != nil && err == consensus.ErrFutureBlock { 1313 if err := bc.addFutureBlock(block); err != nil { 1314 return it.index, events, coalescedLogs, err 1315 } 1316 block, err = it.next() 1317 1318 for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { 1319 if err := bc.addFutureBlock(block); err != nil { 1320 return it.index, events, coalescedLogs, err 1321 } 1322 stats.queued++ 1323 } 1324 } 1325 stats.ignored += it.remaining() 1326 1327 // Append a single chain head event if we've progressed the chain 1328 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1329 events = append(events, ChainHeadEvent{lastCanon}) 1330 } 1331 return it.index, events, coalescedLogs, err 1332 } 1333 1334 // insertSidechain is called when an import batch hits upon a pruned ancestor 1335 // error, which happens when a sidechain with a sufficiently old fork-block is 1336 // found. 1337 // 1338 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1339 // switch over to the new chain if the TD exceeded the current chain. 1340 func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) { 1341 var ( 1342 externTd *big.Int 1343 current = bc.CurrentBlock().NumberU64() 1344 ) 1345 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1346 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1347 // ones. Any other errors means that the block is invalid, and should not be written 1348 // to disk. 1349 err := consensus.ErrPrunedAncestor 1350 for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() { 1351 // Check the canonical state root for that number 1352 if number := block.NumberU64(); current >= number { 1353 if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() { 1354 // This is most likely a shadow-state attack. When a fork is imported into the 1355 // database, and it eventually reaches a block height which is not pruned, we 1356 // just found that the state already exist! This means that the sidechain block 1357 // refers to a state which already exists in our canon chain. 1358 // 1359 // If left unchecked, we would now proceed importing the blocks, without actually 1360 // having verified the state of the previous blocks. 1361 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1362 1363 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1364 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1365 // mechanism. 1366 return it.index, nil, nil, errors.New("sidechain ghost-state attack") 1367 } 1368 } 1369 if externTd == nil { 1370 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1371 } 1372 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1373 1374 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1375 start := time.Now() 1376 if err := bc.WriteBlockWithoutState(block, externTd); err != nil { 1377 return it.index, nil, nil, err 1378 } 1379 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 1380 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1381 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1382 "root", block.Root()) 1383 } 1384 } 1385 // At this point, we've written all sidechain blocks to database. Loop ended 1386 // either on some other error or all were processed. If there was some other 1387 // error, we can ignore the rest of those blocks. 1388 // 1389 // If the externTd was larger than our local TD, we now need to reimport the previous 1390 // blocks to regenerate the required state 1391 localTd := bc.GetTd(bc.CurrentBlock().Hash(), current) 1392 if localTd.Cmp(externTd) > 0 { 1393 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd) 1394 return it.index, nil, nil, err 1395 } 1396 // Gather all the sidechain hashes (full blocks may be memory heavy) 1397 var ( 1398 hashes []common.Hash 1399 numbers []uint64 1400 ) 1401 parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64()) 1402 for parent != nil && !bc.HasState(parent.Root) { 1403 hashes = append(hashes, parent.Hash()) 1404 numbers = append(numbers, parent.Number.Uint64()) 1405 1406 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 1407 } 1408 if parent == nil { 1409 return it.index, nil, nil, errors.New("missing parent") 1410 } 1411 // Import all the pruned blocks to make the state available 1412 var ( 1413 blocks []*types.Block 1414 memory common.StorageSize 1415 ) 1416 for i := len(hashes) - 1; i >= 0; i-- { 1417 // Append the next block to our batch 1418 block := bc.GetBlock(hashes[i], numbers[i]) 1419 1420 blocks = append(blocks, block) 1421 memory += block.Size() 1422 1423 // If memory use grew too large, import and continue. Sadly we need to discard 1424 // all raised events and logs from notifications since we're too heavy on the 1425 // memory here. 1426 if len(blocks) >= 2048 || memory > 64*1024*1024 { 1427 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 1428 if _, _, _, err := bc.insertChain(blocks, false); err != nil { 1429 return 0, nil, nil, err 1430 } 1431 blocks, memory = blocks[:0], 0 1432 1433 // If the chain is terminating, stop processing blocks 1434 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1435 log.Debug("Premature abort during blocks processing") 1436 return 0, nil, nil, nil 1437 } 1438 } 1439 } 1440 if len(blocks) > 0 { 1441 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 1442 return bc.insertChain(blocks, false) 1443 } 1444 return 0, nil, nil, nil 1445 } 1446 1447 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 1448 // blocks and inserts them to be part of the new canonical chain and accumulates 1449 // potential missing transactions and post an event about them. 1450 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1451 var ( 1452 newChain types.Blocks 1453 oldChain types.Blocks 1454 commonBlock *types.Block 1455 1456 deletedTxs types.Transactions 1457 addedTxs types.Transactions 1458 1459 deletedLogs []*types.Log 1460 rebirthLogs []*types.Log 1461 1462 // collectLogs collects the logs that were generated during the 1463 // processing of the block that corresponds with the given hash. 1464 // These logs are later announced as deleted or reborn 1465 collectLogs = func(hash common.Hash, removed bool) { 1466 number := bc.hc.GetBlockNumber(hash) 1467 if number == nil { 1468 return 1469 } 1470 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1471 for _, receipt := range receipts { 1472 for _, log := range receipt.Logs { 1473 l := *log 1474 if removed { 1475 l.Removed = true 1476 deletedLogs = append(deletedLogs, &l) 1477 } else { 1478 rebirthLogs = append(rebirthLogs, &l) 1479 } 1480 } 1481 } 1482 } 1483 ) 1484 // Reduce the longer chain to the same number as the shorter one 1485 if oldBlock.NumberU64() > newBlock.NumberU64() { 1486 // Old chain is longer, gather all transactions and logs as deleted ones 1487 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1488 oldChain = append(oldChain, oldBlock) 1489 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1490 collectLogs(oldBlock.Hash(), true) 1491 } 1492 } else { 1493 // New chain is longer, stash all blocks away for subsequent insertion 1494 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1495 newChain = append(newChain, newBlock) 1496 } 1497 } 1498 if oldBlock == nil { 1499 return fmt.Errorf("invalid old chain") 1500 } 1501 if newBlock == nil { 1502 return fmt.Errorf("invalid new chain") 1503 } 1504 // Both sides of the reorg are at the same number, reduce both until the common 1505 // ancestor is found 1506 for { 1507 // If the common ancestor was found, bail out 1508 if oldBlock.Hash() == newBlock.Hash() { 1509 commonBlock = oldBlock 1510 break 1511 } 1512 // Remove an old block as well as stash away a new block 1513 oldChain = append(oldChain, oldBlock) 1514 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1515 collectLogs(oldBlock.Hash(), true) 1516 1517 newChain = append(newChain, newBlock) 1518 1519 // Step back with both chains 1520 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 1521 if oldBlock == nil { 1522 return fmt.Errorf("invalid old chain") 1523 } 1524 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1525 if newBlock == nil { 1526 return fmt.Errorf("invalid new chain") 1527 } 1528 } 1529 // Ensure the user sees large reorgs 1530 if len(oldChain) > 0 && len(newChain) > 0 { 1531 logFn := log.Debug 1532 if len(oldChain) > 63 { 1533 logFn = log.Warn 1534 } 1535 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1536 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1537 } else { 1538 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1539 } 1540 // Insert the new chain, taking care of the proper incremental order 1541 for i := len(newChain) - 1; i >= 0; i-- { 1542 // Insert the block in the canonical way, re-writing history 1543 bc.insert(newChain[i]) 1544 1545 // Collect reborn logs due to chain reorg (except head block (reverse order)) 1546 if i != 0 { 1547 collectLogs(newChain[i].Hash(), false) 1548 } 1549 // Write lookup entries for hash based transaction/receipt searches 1550 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1551 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1552 } 1553 // When transactions get deleted from the database, the receipts that were 1554 // created in the fork must also be deleted 1555 batch := bc.db.NewBatch() 1556 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 1557 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1558 } 1559 batch.Write() 1560 1561 // If any logs need to be fired, do it now. In theory we could avoid creating 1562 // this goroutine if there are no events to fire, but realistcally that only 1563 // ever happens if we're reorging empty blocks, which will only happen on idle 1564 // networks where performance is not an issue either way. 1565 // 1566 // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct 1567 // event ordering? 1568 go func() { 1569 if len(deletedLogs) > 0 { 1570 bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1571 } 1572 if len(rebirthLogs) > 0 { 1573 bc.logsFeed.Send(rebirthLogs) 1574 } 1575 if len(oldChain) > 0 { 1576 for _, block := range oldChain { 1577 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1578 } 1579 } 1580 }() 1581 return nil 1582 } 1583 1584 // PostChainEvents iterates over the events generated by a chain insertion and 1585 // posts them into the event feed. 1586 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1587 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1588 // post event logs for further processing 1589 if logs != nil { 1590 bc.logsFeed.Send(logs) 1591 } 1592 for _, event := range events { 1593 switch ev := event.(type) { 1594 case ChainEvent: 1595 bc.chainFeed.Send(ev) 1596 1597 case ChainHeadEvent: 1598 bc.chainHeadFeed.Send(ev) 1599 1600 case ChainSideEvent: 1601 bc.chainSideFeed.Send(ev) 1602 } 1603 } 1604 } 1605 1606 func (bc *BlockChain) update() { 1607 futureTimer := time.NewTicker(5 * time.Second) 1608 defer futureTimer.Stop() 1609 for { 1610 select { 1611 case <-futureTimer.C: 1612 bc.procFutureBlocks() 1613 case <-bc.quit: 1614 return 1615 } 1616 } 1617 } 1618 1619 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1620 func (bc *BlockChain) BadBlocks() []*types.Block { 1621 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 1622 for _, hash := range bc.badBlocks.Keys() { 1623 if blk, exist := bc.badBlocks.Peek(hash); exist { 1624 block := blk.(*types.Block) 1625 blocks = append(blocks, block) 1626 } 1627 } 1628 return blocks 1629 } 1630 1631 // addBadBlock adds a bad block to the bad-block LRU cache 1632 func (bc *BlockChain) addBadBlock(block *types.Block) { 1633 bc.badBlocks.Add(block.Hash(), block) 1634 } 1635 1636 // reportBlock logs a bad block error. 1637 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1638 bc.addBadBlock(block) 1639 1640 var receiptString string 1641 for i, receipt := range receipts { 1642 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 1643 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 1644 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 1645 } 1646 log.Error(fmt.Sprintf(` 1647 ########## BAD BLOCK ######### 1648 Chain config: %v 1649 1650 Number: %v 1651 Hash: 0x%x 1652 %v 1653 1654 Error: %v 1655 ############################## 1656 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1657 } 1658 1659 // InsertHeaderChain attempts to insert the given header chain in to the local 1660 // chain, possibly creating a reorg. If an error is returned, it will return the 1661 // index number of the failing header as well an error describing what went wrong. 1662 // 1663 // The verify parameter can be used to fine tune whether nonce verification 1664 // should be done or not. The reason behind the optional check is because some 1665 // of the header retrieval mechanisms already need to verify nonces, as well as 1666 // because nonces can be verified sparsely, not needing to check each. 1667 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1668 start := time.Now() 1669 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1670 return i, err 1671 } 1672 1673 // Make sure only one thread manipulates the chain at once 1674 bc.chainmu.Lock() 1675 defer bc.chainmu.Unlock() 1676 1677 bc.wg.Add(1) 1678 defer bc.wg.Done() 1679 1680 whFunc := func(header *types.Header) error { 1681 bc.mu.Lock() 1682 defer bc.mu.Unlock() 1683 1684 _, err := bc.hc.WriteHeader(header) 1685 return err 1686 } 1687 1688 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1689 } 1690 1691 // writeHeader writes a header into the local chain, given that its parent is 1692 // already known. If the total difficulty of the newly inserted header becomes 1693 // greater than the current known TD, the canonical chain is re-routed. 1694 // 1695 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1696 // into the chain, as side effects caused by reorganisations cannot be emulated 1697 // without the real blocks. Hence, writing headers directly should only be done 1698 // in two scenarios: pure-header mode of operation (light clients), or properly 1699 // separated header/block phases (non-archive clients). 1700 func (bc *BlockChain) writeHeader(header *types.Header) error { 1701 bc.wg.Add(1) 1702 defer bc.wg.Done() 1703 1704 bc.mu.Lock() 1705 defer bc.mu.Unlock() 1706 1707 _, err := bc.hc.WriteHeader(header) 1708 return err 1709 } 1710 1711 // CurrentHeader retrieves the current head header of the canonical chain. The 1712 // header is retrieved from the HeaderChain's internal cache. 1713 func (bc *BlockChain) CurrentHeader() *types.Header { 1714 return bc.hc.CurrentHeader() 1715 } 1716 1717 // GetTd retrieves a block's total difficulty in the canonical chain from the 1718 // database by hash and number, caching it if found. 1719 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1720 return bc.hc.GetTd(hash, number) 1721 } 1722 1723 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1724 // database by hash, caching it if found. 1725 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1726 return bc.hc.GetTdByHash(hash) 1727 } 1728 1729 // GetHeader retrieves a block header from the database by hash and number, 1730 // caching it if found. 1731 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1732 return bc.hc.GetHeader(hash, number) 1733 } 1734 1735 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1736 // found. 1737 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1738 return bc.hc.GetHeaderByHash(hash) 1739 } 1740 1741 // HasHeader checks if a block header is present in the database or not, caching 1742 // it if present. 1743 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1744 return bc.hc.HasHeader(hash, number) 1745 } 1746 1747 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1748 // hash, fetching towards the genesis block. 1749 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1750 return bc.hc.GetBlockHashesFromHash(hash, max) 1751 } 1752 1753 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 1754 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 1755 // number of blocks to be individually checked before we reach the canonical chain. 1756 // 1757 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 1758 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 1759 bc.chainmu.Lock() 1760 defer bc.chainmu.Unlock() 1761 1762 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 1763 } 1764 1765 // GetHeaderByNumber retrieves a block header from the database by number, 1766 // caching it (associated with its hash) if found. 1767 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1768 return bc.hc.GetHeaderByNumber(number) 1769 } 1770 1771 // Config retrieves the blockchain's chain configuration. 1772 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1773 1774 // Engine retrieves the blockchain's consensus engine. 1775 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1776 1777 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1778 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1779 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1780 } 1781 1782 // SubscribeChainEvent registers a subscription of ChainEvent. 1783 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1784 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1785 } 1786 1787 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1788 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1789 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1790 } 1791 1792 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1793 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1794 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1795 } 1796 1797 // SubscribeLogsEvent registers a subscription of []*types.Log. 1798 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1799 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1800 } 1801 1802 // Retrieve or calculate block state based on available reference points 1803 func (bc *BlockChain) CalculateBlockState( 1804 hash common.Hash, 1805 number uint64, 1806 ) *state.StateDB { 1807 header := bc.GetHeader(hash, number) 1808 1809 if header == nil { 1810 return nil 1811 } 1812 1813 statedb, err := state.New(header.Root, bc.stateCache) 1814 1815 // Fast exit 1816 if err == nil { 1817 return statedb 1818 } 1819 1820 hh_stack := make([]common.Hash, 1, triesInMemory) 1821 hh_stack[0] = hash 1822 1823 // Find the base header with state 1824 for { 1825 hh_stack = append(hh_stack, header.ParentHash) 1826 1827 header = bc.GetHeader(header.ParentHash, header.Number.Uint64()-1) 1828 if header == nil { 1829 log.Error("Failed to find ancestor with state", "block", hash) 1830 return nil 1831 } 1832 1833 statedb, err = state.New(header.Root, bc.stateCache) 1834 if err == nil { 1835 break 1836 } 1837 if len(hh_stack) == triesInMemory { 1838 log.Warn("Ancestor with state is too far! Out-of-memory is possible.") 1839 } 1840 } 1841 1842 tmp_hash := &hh_stack[len(hh_stack)-1] 1843 parent := bc.GetBlockByHash(*tmp_hash) 1844 if parent == nil { 1845 log.Error("Failed to read parent", "parent", *tmp_hash) 1846 return nil 1847 } 1848 1849 log.Info("Re-creating historical state", 1850 "block", hash, "number", number, 1851 "len", len(hh_stack)) 1852 1853 bc.mu.Lock() 1854 defer bc.mu.Unlock() 1855 1856 // re-create state 1857 for i := len(hh_stack) - 2; i >= 0; i-- { 1858 tmp_hash = &hh_stack[i] 1859 block := bc.GetBlockByHash(*tmp_hash) 1860 if block == nil { 1861 log.Error("Failed to read block", "block", *tmp_hash) 1862 return nil 1863 } 1864 1865 receipts, _, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) 1866 if err != nil { 1867 log.Error("Failed to re-process block", "block", *tmp_hash, "err", err) 1868 return nil 1869 } 1870 1871 err = bc.Validator().ValidateState(block, parent, statedb, receipts, usedGas) 1872 if err != nil { 1873 log.Error("Failed to re-validate block", "block", *tmp_hash, "err", err) 1874 return nil 1875 } 1876 1877 root, err := statedb.Commit(bc.chainConfig.IsEIP158(block.Number())) 1878 if err != nil { 1879 log.Error("Failed to commit state", "block", *tmp_hash, "err", err) 1880 return nil 1881 } 1882 1883 // Help subsequent calls to avoid recalculation. 1884 // It may grow dramatically, but triedb GC is anticipated at some point. 1885 if i < triesInMemory { 1886 bc.stateCache.TrieDB().Reference(root, common.Hash{}) 1887 bc.triegc.Push(root, -int64(block.NumberU64())) 1888 } else if i == (triesInMemory + 1) { 1889 // This should be a dead code, if periodic save is working properly 1890 err = bc.stateCache.TrieDB().Commit(root, false) 1891 if err != nil { 1892 log.Error("Failed to commit trie", "block", *tmp_hash, "err", err) 1893 return nil 1894 } 1895 } 1896 1897 statedb = statedb.Copy() 1898 parent = block 1899 } 1900 1901 return statedb 1902 }