gitee.com/moran666666/go-ubiq@v3.0.1+incompatible/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ubiq consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ubiq/go-ubiq/common" 31 "github.com/ubiq/go-ubiq/common/mclock" 32 "github.com/ubiq/go-ubiq/common/prque" 33 "github.com/ubiq/go-ubiq/consensus" 34 "github.com/ubiq/go-ubiq/core/rawdb" 35 "github.com/ubiq/go-ubiq/core/state" 36 "github.com/ubiq/go-ubiq/core/types" 37 "github.com/ubiq/go-ubiq/core/vm" 38 "github.com/ubiq/go-ubiq/crypto" 39 "github.com/ubiq/go-ubiq/ethdb" 40 "github.com/ubiq/go-ubiq/event" 41 "github.com/ubiq/go-ubiq/log" 42 "github.com/ubiq/go-ubiq/metrics" 43 "github.com/ubiq/go-ubiq/params" 44 "github.com/ubiq/go-ubiq/rlp" 45 "github.com/ubiq/go-ubiq/trie" 46 "github.com/hashicorp/golang-lru" 47 ) 48 49 var ( 50 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 51 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 52 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 53 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 54 55 ErrNoGenesis = errors.New("Genesis not found in chain") 56 ) 57 58 const ( 59 bodyCacheLimit = 256 60 blockCacheLimit = 256 61 receiptsCacheLimit = 32 62 maxFutureBlocks = 256 63 maxTimeFutureBlocks = 30 64 badBlockLimit = 10 65 triesInMemory = 128 66 67 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 68 BlockChainVersion uint64 = 3 69 ) 70 71 // CacheConfig contains the configuration values for the trie caching/pruning 72 // that's resident in a blockchain. 73 type CacheConfig struct { 74 Disabled bool // Whether to disable trie write caching (archive node) 75 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 76 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 77 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 78 } 79 80 // BlockChain represents the canonical chain given a database with a genesis 81 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 82 // 83 // Importing blocks in to the block chain happens according to the set of rules 84 // defined by the two stage Validator. Processing of blocks is done using the 85 // Processor which processes the included transaction. The validation of the state 86 // is done in the second part of the Validator. Failing results in aborting of 87 // the import. 88 // 89 // The BlockChain also helps in returning blocks from **any** chain included 90 // in the database as well as blocks that represents the canonical chain. It's 91 // important to note that GetBlock can return any block and does not need to be 92 // included in the canonical one where as GetBlockByNumber always represents the 93 // canonical chain. 94 type BlockChain struct { 95 chainConfig *params.ChainConfig // Chain & network configuration 96 cacheConfig *CacheConfig // Cache configuration for pruning 97 98 db ethdb.Database // Low level persistent database to store final content in 99 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 100 gcproc time.Duration // Accumulates canonical block processing for trie dumping 101 102 hc *HeaderChain 103 rmLogsFeed event.Feed 104 chainFeed event.Feed 105 chainSideFeed event.Feed 106 chainHeadFeed event.Feed 107 logsFeed event.Feed 108 scope event.SubscriptionScope 109 genesisBlock *types.Block 110 111 mu sync.RWMutex // global mutex for locking chain operations 112 chainmu sync.RWMutex // blockchain insertion lock 113 procmu sync.RWMutex // block processor lock 114 115 checkpoint int // checkpoint counts towards the new checkpoint 116 currentBlock atomic.Value // Current head of the block chain 117 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 118 119 stateCache state.Database // State database to reuse between imports (contains state cache) 120 bodyCache *lru.Cache // Cache for the most recent block bodies 121 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 122 receiptsCache *lru.Cache // Cache for the most recent receipts per block 123 blockCache *lru.Cache // Cache for the most recent entire blocks 124 futureBlocks *lru.Cache // future blocks are blocks added for later processing 125 126 quit chan struct{} // blockchain quit channel 127 running int32 // running must be called atomically 128 // procInterrupt must be atomically called 129 procInterrupt int32 // interrupt signaler for block processing 130 wg sync.WaitGroup // chain processing wait group for shutting down 131 132 engine consensus.Engine 133 processor Processor // block processor interface 134 validator Validator // block and state validator interface 135 vmConfig vm.Config 136 137 badBlocks *lru.Cache // Bad block cache 138 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 139 } 140 141 // NewBlockChain returns a fully initialised block chain using information 142 // available in the database. It initialises the default Ethereum Validator and 143 // Processor. 144 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 145 if cacheConfig == nil { 146 cacheConfig = &CacheConfig{ 147 TrieCleanLimit: 256, 148 TrieDirtyLimit: 256, 149 TrieTimeLimit: 5 * time.Minute, 150 } 151 } 152 bodyCache, _ := lru.New(bodyCacheLimit) 153 bodyRLPCache, _ := lru.New(bodyCacheLimit) 154 receiptsCache, _ := lru.New(receiptsCacheLimit) 155 blockCache, _ := lru.New(blockCacheLimit) 156 futureBlocks, _ := lru.New(maxFutureBlocks) 157 badBlocks, _ := lru.New(badBlockLimit) 158 159 bc := &BlockChain{ 160 chainConfig: chainConfig, 161 cacheConfig: cacheConfig, 162 db: db, 163 triegc: prque.New(nil), 164 stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit), 165 quit: make(chan struct{}), 166 shouldPreserve: shouldPreserve, 167 bodyCache: bodyCache, 168 bodyRLPCache: bodyRLPCache, 169 receiptsCache: receiptsCache, 170 blockCache: blockCache, 171 futureBlocks: futureBlocks, 172 engine: engine, 173 vmConfig: vmConfig, 174 badBlocks: badBlocks, 175 } 176 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 177 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 178 179 var err error 180 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 181 if err != nil { 182 return nil, err 183 } 184 bc.genesisBlock = bc.GetBlockByNumber(0) 185 if bc.genesisBlock == nil { 186 return nil, ErrNoGenesis 187 } 188 if err := bc.loadLastState(); err != nil { 189 return nil, err 190 } 191 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 192 for hash := range BadHashes { 193 if header := bc.GetHeaderByHash(hash); header != nil { 194 // get the canonical block corresponding to the offending header's number 195 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 196 // make sure the headerByNumber (if present) is in our current canonical chain 197 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 198 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 199 bc.SetHead(header.Number.Uint64() - 1) 200 log.Error("Chain rewind was successful, resuming normal operation") 201 } 202 } 203 } 204 // Take ownership of this particular state 205 go bc.update() 206 return bc, nil 207 } 208 209 func (bc *BlockChain) getProcInterrupt() bool { 210 return atomic.LoadInt32(&bc.procInterrupt) == 1 211 } 212 213 // GetVMConfig returns the block chain VM config. 214 func (bc *BlockChain) GetVMConfig() *vm.Config { 215 return &bc.vmConfig 216 } 217 218 // loadLastState loads the last known chain state from the database. This method 219 // assumes that the chain manager mutex is held. 220 func (bc *BlockChain) loadLastState() error { 221 // Restore the last known head block 222 head := rawdb.ReadHeadBlockHash(bc.db) 223 if head == (common.Hash{}) { 224 // Corrupt or empty database, init from scratch 225 log.Warn("Empty database, resetting chain") 226 return bc.Reset() 227 } 228 // Make sure the entire head block is available 229 currentBlock := bc.GetBlockByHash(head) 230 if currentBlock == nil { 231 // Corrupt or empty database, init from scratch 232 log.Warn("Head block missing, resetting chain", "hash", head) 233 return bc.Reset() 234 } 235 // Make sure the state associated with the block is available 236 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 237 // Dangling block without a state associated, init from scratch 238 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 239 if err := bc.repair(¤tBlock); err != nil { 240 return err 241 } 242 } 243 // Everything seems to be fine, set as the head block 244 bc.currentBlock.Store(currentBlock) 245 246 // Restore the last known head header 247 currentHeader := currentBlock.Header() 248 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 249 if header := bc.GetHeaderByHash(head); header != nil { 250 currentHeader = header 251 } 252 } 253 bc.hc.SetCurrentHeader(currentHeader) 254 255 // Restore the last known head fast block 256 bc.currentFastBlock.Store(currentBlock) 257 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 258 if block := bc.GetBlockByHash(head); block != nil { 259 bc.currentFastBlock.Store(block) 260 } 261 } 262 263 // Issue a status log for the user 264 currentFastBlock := bc.CurrentFastBlock() 265 266 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 267 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 268 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 269 270 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) 271 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) 272 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) 273 274 return nil 275 } 276 277 // SetHead rewinds the local chain to a new head. In the case of headers, everything 278 // above the new head will be deleted and the new one set. In the case of blocks 279 // though, the head may be further rewound if block bodies are missing (non-archive 280 // nodes after a fast sync). 281 func (bc *BlockChain) SetHead(head uint64) error { 282 log.Warn("Rewinding blockchain", "target", head) 283 284 bc.mu.Lock() 285 defer bc.mu.Unlock() 286 287 // Rewind the header chain, deleting all block bodies until then 288 delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { 289 rawdb.DeleteBody(db, hash, num) 290 } 291 bc.hc.SetHead(head, delFn) 292 currentHeader := bc.hc.CurrentHeader() 293 294 // Clear out any stale content from the caches 295 bc.bodyCache.Purge() 296 bc.bodyRLPCache.Purge() 297 bc.receiptsCache.Purge() 298 bc.blockCache.Purge() 299 bc.futureBlocks.Purge() 300 301 // Rewind the block chain, ensuring we don't end up with a stateless head block 302 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 303 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 304 } 305 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 306 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 307 // Rewound state missing, rolled back to before pivot, reset to genesis 308 bc.currentBlock.Store(bc.genesisBlock) 309 } 310 } 311 // Rewind the fast block in a simpleton way to the target head 312 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 313 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 314 } 315 // If either blocks reached nil, reset to the genesis state 316 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 317 bc.currentBlock.Store(bc.genesisBlock) 318 } 319 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 320 bc.currentFastBlock.Store(bc.genesisBlock) 321 } 322 currentBlock := bc.CurrentBlock() 323 currentFastBlock := bc.CurrentFastBlock() 324 325 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 326 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 327 328 return bc.loadLastState() 329 } 330 331 // FastSyncCommitHead sets the current head block to the one defined by the hash 332 // irrelevant what the chain contents were prior. 333 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 334 // Make sure that both the block as well at its state trie exists 335 block := bc.GetBlockByHash(hash) 336 if block == nil { 337 return fmt.Errorf("non existent block [%x…]", hash[:4]) 338 } 339 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 340 return err 341 } 342 // If all checks out, manually set the head block 343 bc.mu.Lock() 344 bc.currentBlock.Store(block) 345 bc.mu.Unlock() 346 347 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 348 return nil 349 } 350 351 // GasLimit returns the gas limit of the current HEAD block. 352 func (bc *BlockChain) GasLimit() uint64 { 353 return bc.CurrentBlock().GasLimit() 354 } 355 356 // CurrentBlock retrieves the current head block of the canonical chain. The 357 // block is retrieved from the blockchain's internal cache. 358 func (bc *BlockChain) CurrentBlock() *types.Block { 359 return bc.currentBlock.Load().(*types.Block) 360 } 361 362 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 363 // chain. The block is retrieved from the blockchain's internal cache. 364 func (bc *BlockChain) CurrentFastBlock() *types.Block { 365 return bc.currentFastBlock.Load().(*types.Block) 366 } 367 368 // SetProcessor sets the processor required for making state modifications. 369 func (bc *BlockChain) SetProcessor(processor Processor) { 370 bc.procmu.Lock() 371 defer bc.procmu.Unlock() 372 bc.processor = processor 373 } 374 375 // SetValidator sets the validator which is used to validate incoming blocks. 376 func (bc *BlockChain) SetValidator(validator Validator) { 377 bc.procmu.Lock() 378 defer bc.procmu.Unlock() 379 bc.validator = validator 380 } 381 382 // Validator returns the current validator. 383 func (bc *BlockChain) Validator() Validator { 384 bc.procmu.RLock() 385 defer bc.procmu.RUnlock() 386 return bc.validator 387 } 388 389 // Processor returns the current processor. 390 func (bc *BlockChain) Processor() Processor { 391 bc.procmu.RLock() 392 defer bc.procmu.RUnlock() 393 return bc.processor 394 } 395 396 // State returns a new mutable state based on the current HEAD block. 397 func (bc *BlockChain) State() (*state.StateDB, error) { 398 return bc.StateAt(bc.CurrentBlock().Root()) 399 } 400 401 // StateAt returns a new mutable state based on a particular point in time. 402 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 403 return state.New(root, bc.stateCache) 404 } 405 406 // StateCache returns the caching database underpinning the blockchain instance. 407 func (bc *BlockChain) StateCache() state.Database { 408 return bc.stateCache 409 } 410 411 // Reset purges the entire blockchain, restoring it to its genesis state. 412 func (bc *BlockChain) Reset() error { 413 return bc.ResetWithGenesisBlock(bc.genesisBlock) 414 } 415 416 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 417 // specified genesis state. 418 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 419 // Dump the entire block chain and purge the caches 420 if err := bc.SetHead(0); err != nil { 421 return err 422 } 423 bc.mu.Lock() 424 defer bc.mu.Unlock() 425 426 // Prepare the genesis block and reinitialise the chain 427 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 428 log.Crit("Failed to write genesis block TD", "err", err) 429 } 430 rawdb.WriteBlock(bc.db, genesis) 431 432 bc.genesisBlock = genesis 433 bc.insert(bc.genesisBlock) 434 bc.currentBlock.Store(bc.genesisBlock) 435 bc.hc.SetGenesis(bc.genesisBlock.Header()) 436 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 437 bc.currentFastBlock.Store(bc.genesisBlock) 438 439 return nil 440 } 441 442 // repair tries to repair the current blockchain by rolling back the current block 443 // until one with associated state is found. This is needed to fix incomplete db 444 // writes caused either by crashes/power outages, or simply non-committed tries. 445 // 446 // This method only rolls back the current block. The current header and current 447 // fast block are left intact. 448 func (bc *BlockChain) repair(head **types.Block) error { 449 for { 450 // Abort if we've rewound to a head block that does have associated state 451 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 452 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 453 return nil 454 } 455 // Otherwise rewind one block and recheck state availability there 456 block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 457 if block == nil { 458 return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) 459 } 460 (*head) = block 461 } 462 } 463 464 // Export writes the active chain to the given writer. 465 func (bc *BlockChain) Export(w io.Writer) error { 466 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 467 } 468 469 // ExportN writes a subset of the active chain to the given writer. 470 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 471 bc.mu.RLock() 472 defer bc.mu.RUnlock() 473 474 if first > last { 475 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 476 } 477 log.Info("Exporting batch of blocks", "count", last-first+1) 478 479 start, reported := time.Now(), time.Now() 480 for nr := first; nr <= last; nr++ { 481 block := bc.GetBlockByNumber(nr) 482 if block == nil { 483 return fmt.Errorf("export failed on #%d: not found", nr) 484 } 485 if err := block.EncodeRLP(w); err != nil { 486 return err 487 } 488 if time.Since(reported) >= statsReportLimit { 489 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 490 reported = time.Now() 491 } 492 } 493 494 return nil 495 } 496 497 // insert injects a new head block into the current block chain. This method 498 // assumes that the block is indeed a true head. It will also reset the head 499 // header and the head fast sync block to this very same block if they are older 500 // or if they are on a different side chain. 501 // 502 // Note, this function assumes that the `mu` mutex is held! 503 func (bc *BlockChain) insert(block *types.Block) { 504 // If the block is on a side chain or an unknown one, force other heads onto it too 505 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 506 507 // Add the block to the canonical chain number scheme and mark as the head 508 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 509 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 510 511 bc.currentBlock.Store(block) 512 513 // If the block is better than our head or is on a different chain, force update heads 514 if updateHeads { 515 bc.hc.SetCurrentHeader(block.Header()) 516 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 517 518 bc.currentFastBlock.Store(block) 519 } 520 } 521 522 // Genesis retrieves the chain's genesis block. 523 func (bc *BlockChain) Genesis() *types.Block { 524 return bc.genesisBlock 525 } 526 527 // GetBody retrieves a block body (transactions and uncles) from the database by 528 // hash, caching it if found. 529 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 530 // Short circuit if the body's already in the cache, retrieve otherwise 531 if cached, ok := bc.bodyCache.Get(hash); ok { 532 body := cached.(*types.Body) 533 return body 534 } 535 number := bc.hc.GetBlockNumber(hash) 536 if number == nil { 537 return nil 538 } 539 body := rawdb.ReadBody(bc.db, hash, *number) 540 if body == nil { 541 return nil 542 } 543 // Cache the found body for next time and return 544 bc.bodyCache.Add(hash, body) 545 return body 546 } 547 548 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 549 // caching it if found. 550 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 551 // Short circuit if the body's already in the cache, retrieve otherwise 552 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 553 return cached.(rlp.RawValue) 554 } 555 number := bc.hc.GetBlockNumber(hash) 556 if number == nil { 557 return nil 558 } 559 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 560 if len(body) == 0 { 561 return nil 562 } 563 // Cache the found body for next time and return 564 bc.bodyRLPCache.Add(hash, body) 565 return body 566 } 567 568 // HasBlock checks if a block is fully present in the database or not. 569 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 570 if bc.blockCache.Contains(hash) { 571 return true 572 } 573 return rawdb.HasBody(bc.db, hash, number) 574 } 575 576 // HasFastBlock checks if a fast block is fully present in the database or not. 577 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 578 if !bc.HasBlock(hash, number) { 579 return false 580 } 581 if bc.receiptsCache.Contains(hash) { 582 return true 583 } 584 return rawdb.HasReceipts(bc.db, hash, number) 585 } 586 587 // HasState checks if state trie is fully present in the database or not. 588 func (bc *BlockChain) HasState(hash common.Hash) bool { 589 _, err := bc.stateCache.OpenTrie(hash) 590 return err == nil 591 } 592 593 // HasBlockAndState checks if a block and associated state trie is fully present 594 // in the database or not, caching it if present. 595 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 596 // Check first that the block itself is known 597 block := bc.GetBlock(hash, number) 598 if block == nil { 599 return false 600 } 601 return bc.HasState(block.Root()) 602 } 603 604 // calcPastMedianTime calculates the median time of the previous few blocks 605 // prior to, and including, the passed block node. 606 // 607 // Modified from btcsuite 608 func (bc *BlockChain) CalcPastMedianTime(number uint64, parent *types.Header) *big.Int { 609 return bc.hc.CalcPastMedianTime(number, parent) 610 } 611 612 // GetBlock retrieves a block from the database by hash and number, 613 // caching it if found. 614 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 615 // Short circuit if the block's already in the cache, retrieve otherwise 616 if block, ok := bc.blockCache.Get(hash); ok { 617 return block.(*types.Block) 618 } 619 block := rawdb.ReadBlock(bc.db, hash, number) 620 if block == nil { 621 return nil 622 } 623 // Cache the found block for next time and return 624 bc.blockCache.Add(block.Hash(), block) 625 return block 626 } 627 628 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 629 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 630 number := bc.hc.GetBlockNumber(hash) 631 if number == nil { 632 return nil 633 } 634 return bc.GetBlock(hash, *number) 635 } 636 637 // GetBlockByNumber retrieves a block from the database by number, caching it 638 // (associated with its hash) if found. 639 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 640 hash := rawdb.ReadCanonicalHash(bc.db, number) 641 if hash == (common.Hash{}) { 642 return nil 643 } 644 return bc.GetBlock(hash, number) 645 } 646 647 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 648 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 649 if receipts, ok := bc.receiptsCache.Get(hash); ok { 650 return receipts.(types.Receipts) 651 } 652 number := rawdb.ReadHeaderNumber(bc.db, hash) 653 if number == nil { 654 return nil 655 } 656 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 657 bc.receiptsCache.Add(hash, receipts) 658 return receipts 659 } 660 661 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 662 // [deprecated by eth/62] 663 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 664 number := bc.hc.GetBlockNumber(hash) 665 if number == nil { 666 return nil 667 } 668 for i := 0; i < n; i++ { 669 block := bc.GetBlock(hash, *number) 670 if block == nil { 671 break 672 } 673 blocks = append(blocks, block) 674 hash = block.ParentHash() 675 *number-- 676 } 677 return 678 } 679 680 // GetUnclesInChain retrieves all the uncles from a given block backwards until 681 // a specific distance is reached. 682 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 683 uncles := []*types.Header{} 684 for i := 0; block != nil && i < length; i++ { 685 uncles = append(uncles, block.Uncles()...) 686 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 687 } 688 return uncles 689 } 690 691 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 692 // either from ephemeral in-memory cache, or from persistent storage. 693 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 694 return bc.stateCache.TrieDB().Node(hash) 695 } 696 697 // Stop stops the blockchain service. If any imports are currently in progress 698 // it will abort them using the procInterrupt. 699 func (bc *BlockChain) Stop() { 700 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 701 return 702 } 703 // Unsubscribe all subscriptions registered from blockchain 704 bc.scope.Close() 705 close(bc.quit) 706 atomic.StoreInt32(&bc.procInterrupt, 1) 707 708 bc.wg.Wait() 709 710 // Ensure the state of a recent block is also stored to disk before exiting. 711 // We're writing three different states to catch different restart scenarios: 712 // - HEAD: So we don't need to reprocess any blocks in the general case 713 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 714 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 715 if !bc.cacheConfig.Disabled { 716 triedb := bc.stateCache.TrieDB() 717 718 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 719 if number := bc.CurrentBlock().NumberU64(); number > offset { 720 recent := bc.GetBlockByNumber(number - offset) 721 722 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 723 if err := triedb.Commit(recent.Root(), true); err != nil { 724 log.Error("Failed to commit recent state trie", "err", err) 725 } 726 } 727 } 728 for !bc.triegc.Empty() { 729 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 730 } 731 if size, _ := triedb.Size(); size != 0 { 732 log.Error("Dangling trie nodes after full cleanup") 733 } 734 } 735 log.Info("Blockchain manager stopped") 736 } 737 738 func (bc *BlockChain) procFutureBlocks() { 739 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 740 for _, hash := range bc.futureBlocks.Keys() { 741 if block, exist := bc.futureBlocks.Peek(hash); exist { 742 blocks = append(blocks, block.(*types.Block)) 743 } 744 } 745 if len(blocks) > 0 { 746 types.BlockBy(types.Number).Sort(blocks) 747 748 // Insert one by one as chain insertion needs contiguous ancestry between blocks 749 for i := range blocks { 750 bc.InsertChain(blocks[i : i+1]) 751 } 752 } 753 } 754 755 // WriteStatus status of write 756 type WriteStatus byte 757 758 const ( 759 NonStatTy WriteStatus = iota 760 CanonStatTy 761 SideStatTy 762 ) 763 764 // Rollback is designed to remove a chain of links from the database that aren't 765 // certain enough to be valid. 766 func (bc *BlockChain) Rollback(chain []common.Hash) { 767 bc.mu.Lock() 768 defer bc.mu.Unlock() 769 770 for i := len(chain) - 1; i >= 0; i-- { 771 hash := chain[i] 772 773 currentHeader := bc.hc.CurrentHeader() 774 if currentHeader.Hash() == hash { 775 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 776 } 777 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 778 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 779 bc.currentFastBlock.Store(newFastBlock) 780 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 781 } 782 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 783 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 784 bc.currentBlock.Store(newBlock) 785 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 786 } 787 } 788 } 789 790 // SetReceiptsData computes all the non-consensus fields of the receipts 791 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 792 signer := types.MakeSigner(config, block.Number()) 793 794 transactions, logIndex := block.Transactions(), uint(0) 795 if len(transactions) != len(receipts) { 796 return errors.New("transaction and receipt count mismatch") 797 } 798 799 for j := 0; j < len(receipts); j++ { 800 // The transaction hash can be retrieved from the transaction itself 801 receipts[j].TxHash = transactions[j].Hash() 802 803 // The contract address can be derived from the transaction itself 804 if transactions[j].To() == nil { 805 // Deriving the signer is expensive, only do if it's actually needed 806 from, _ := types.Sender(signer, transactions[j]) 807 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 808 } 809 // The used gas can be calculated based on previous receipts 810 if j == 0 { 811 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 812 } else { 813 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 814 } 815 // The derived log fields can simply be set from the block and transaction 816 for k := 0; k < len(receipts[j].Logs); k++ { 817 receipts[j].Logs[k].BlockNumber = block.NumberU64() 818 receipts[j].Logs[k].BlockHash = block.Hash() 819 receipts[j].Logs[k].TxHash = receipts[j].TxHash 820 receipts[j].Logs[k].TxIndex = uint(j) 821 receipts[j].Logs[k].Index = logIndex 822 logIndex++ 823 } 824 } 825 return nil 826 } 827 828 // InsertReceiptChain attempts to complete an already existing header chain with 829 // transaction and receipt data. 830 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 831 bc.wg.Add(1) 832 defer bc.wg.Done() 833 834 // Do a sanity check that the provided chain is actually ordered and linked 835 for i := 1; i < len(blockChain); i++ { 836 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 837 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 838 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 839 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 840 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 841 } 842 } 843 844 var ( 845 stats = struct{ processed, ignored int32 }{} 846 start = time.Now() 847 bytes = 0 848 batch = bc.db.NewBatch() 849 ) 850 for i, block := range blockChain { 851 receipts := receiptChain[i] 852 // Short circuit insertion if shutting down or processing failed 853 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 854 return 0, nil 855 } 856 // Short circuit if the owner header is unknown 857 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 858 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 859 } 860 // Skip if the entire data is already known 861 if bc.HasBlock(block.Hash(), block.NumberU64()) { 862 stats.ignored++ 863 continue 864 } 865 // Compute all the non-consensus fields of the receipts 866 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 867 return i, fmt.Errorf("failed to set receipts data: %v", err) 868 } 869 // Write all the data out into the database 870 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 871 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 872 rawdb.WriteTxLookupEntries(batch, block) 873 874 stats.processed++ 875 876 if batch.ValueSize() >= ethdb.IdealBatchSize { 877 if err := batch.Write(); err != nil { 878 return 0, err 879 } 880 bytes += batch.ValueSize() 881 batch.Reset() 882 } 883 } 884 if batch.ValueSize() > 0 { 885 bytes += batch.ValueSize() 886 if err := batch.Write(); err != nil { 887 return 0, err 888 } 889 } 890 891 // Update the head fast sync block if better 892 bc.mu.Lock() 893 head := blockChain[len(blockChain)-1] 894 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 895 currentFastBlock := bc.CurrentFastBlock() 896 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 897 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 898 bc.currentFastBlock.Store(head) 899 } 900 } 901 bc.mu.Unlock() 902 903 context := []interface{}{ 904 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 905 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), 906 "size", common.StorageSize(bytes), 907 } 908 if stats.ignored > 0 { 909 context = append(context, []interface{}{"ignored", stats.ignored}...) 910 } 911 log.Info("Imported new block receipts", context...) 912 913 return 0, nil 914 } 915 916 var lastWrite uint64 917 918 // WriteBlockWithoutState writes only the block and its metadata to the database, 919 // but does not write any state. This is used to construct competing side forks 920 // up to the point where they exceed the canonical total difficulty. 921 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 922 bc.wg.Add(1) 923 defer bc.wg.Done() 924 925 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 926 return err 927 } 928 rawdb.WriteBlock(bc.db, block) 929 930 return nil 931 } 932 933 // WriteBlockWithState writes the block and all associated state to the database. 934 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 935 bc.wg.Add(1) 936 defer bc.wg.Done() 937 938 // Calculate the total difficulty of the block 939 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 940 if ptd == nil { 941 return NonStatTy, consensus.ErrUnknownAncestor 942 } 943 // Make sure no inconsistent state is leaked during insertion 944 bc.mu.Lock() 945 defer bc.mu.Unlock() 946 947 currentBlock := bc.CurrentBlock() 948 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 949 externTd := new(big.Int).Add(block.Difficulty(), ptd) 950 951 // Irrelevant of the canonical status, write the block itself to the database 952 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 953 return NonStatTy, err 954 } 955 rawdb.WriteBlock(bc.db, block) 956 957 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 958 if err != nil { 959 return NonStatTy, err 960 } 961 triedb := bc.stateCache.TrieDB() 962 963 // If we're running an archive node, always flush 964 if bc.cacheConfig.Disabled { 965 if err := triedb.Commit(root, false); err != nil { 966 return NonStatTy, err 967 } 968 } else { 969 // Full but not archive node, do proper garbage collection 970 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 971 bc.triegc.Push(root, -int64(block.NumberU64())) 972 973 if current := block.NumberU64(); current > triesInMemory { 974 // If we exceeded our memory allowance, flush matured singleton nodes to disk 975 var ( 976 nodes, imgs = triedb.Size() 977 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 978 ) 979 if nodes > limit || imgs > 4*1024*1024 { 980 triedb.Cap(limit - ethdb.IdealBatchSize) 981 } 982 // Find the next state trie we need to commit 983 chosen := current - triesInMemory 984 985 // If we exceeded out time allowance, flush an entire trie to disk 986 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 987 // If the header is missing (canonical chain behind), we're reorging a low 988 // diff sidechain. Suspend committing until this operation is completed. 989 header := bc.GetHeaderByNumber(chosen) 990 if header == nil { 991 log.Warn("Reorg in progress, trie commit postponed", "number", chosen) 992 } else { 993 // If we're exceeding limits but haven't reached a large enough memory gap, 994 // warn the user that the system is becoming unstable. 995 if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 996 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 997 } 998 // Flush an entire trie and restart the counters 999 triedb.Commit(header.Root, true) 1000 lastWrite = chosen 1001 bc.gcproc = 0 1002 } 1003 } 1004 // Garbage collect anything below our required write retention 1005 for !bc.triegc.Empty() { 1006 root, number := bc.triegc.Pop() 1007 if uint64(-number) > chosen { 1008 bc.triegc.Push(root, number) 1009 break 1010 } 1011 triedb.Dereference(root.(common.Hash)) 1012 } 1013 } 1014 } 1015 1016 // Write other block data using a batch. 1017 batch := bc.db.NewBatch() 1018 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 1019 1020 // If the total difficulty is higher than our known, add it to the canonical chain 1021 // Second clause in the if statement reduces the vulnerability to selfish mining. 1022 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1023 reorg := externTd.Cmp(localTd) > 0 1024 currentBlock = bc.CurrentBlock() 1025 if !reorg && externTd.Cmp(localTd) == 0 { 1026 // Split same-difficulty blocks by number, then preferentially select 1027 // the block generated by the local miner as the canonical block. 1028 if block.NumberU64() < currentBlock.NumberU64() { 1029 reorg = true 1030 } else if block.NumberU64() == currentBlock.NumberU64() { 1031 var currentPreserve, blockPreserve bool 1032 if bc.shouldPreserve != nil { 1033 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1034 } 1035 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1036 } 1037 } 1038 if reorg { 1039 // Reorganise the chain if the parent is not the head block 1040 if block.ParentHash() != currentBlock.Hash() { 1041 if err := bc.reorg(currentBlock, block); err != nil { 1042 return NonStatTy, err 1043 } 1044 } 1045 // Write the positional metadata for transaction/receipt lookups and preimages 1046 rawdb.WriteTxLookupEntries(batch, block) 1047 rawdb.WritePreimages(batch, state.Preimages()) 1048 1049 status = CanonStatTy 1050 } else { 1051 status = SideStatTy 1052 } 1053 if err := batch.Write(); err != nil { 1054 return NonStatTy, err 1055 } 1056 1057 // Set new head. 1058 if status == CanonStatTy { 1059 bc.insert(block) 1060 } 1061 bc.futureBlocks.Remove(block.Hash()) 1062 return status, nil 1063 } 1064 1065 // addFutureBlock checks if the block is within the max allowed window to get 1066 // accepted for future processing, and returns an error if the block is too far 1067 // ahead and was not added. 1068 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1069 max := uint64(time.Now().Unix() + maxTimeFutureBlocks) 1070 if block.Time() > max { 1071 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1072 } 1073 bc.futureBlocks.Add(block.Hash(), block) 1074 return nil 1075 } 1076 1077 // InsertChain attempts to insert the given batch of blocks in to the canonical 1078 // chain or, otherwise, create a fork. If an error is returned it will return 1079 // the index number of the failing block as well an error describing what went 1080 // wrong. 1081 // 1082 // After insertion is done, all accumulated events will be fired. 1083 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1084 // Sanity check that we have something meaningful to import 1085 if len(chain) == 0 { 1086 return 0, nil 1087 } 1088 // Do a sanity check that the provided chain is actually ordered and linked 1089 for i := 1; i < len(chain); i++ { 1090 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1091 // Chain broke ancestry, log a message (programming error) and skip insertion 1092 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1093 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1094 1095 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1096 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1097 } 1098 } 1099 // Pre-checks passed, start the full block imports 1100 bc.wg.Add(1) 1101 bc.chainmu.Lock() 1102 n, events, logs, err := bc.insertChain(chain, true) 1103 bc.chainmu.Unlock() 1104 bc.wg.Done() 1105 1106 bc.PostChainEvents(events, logs) 1107 return n, err 1108 } 1109 1110 // insertChain is the internal implementation of insertChain, which assumes that 1111 // 1) chains are contiguous, and 2) The chain mutex is held. 1112 // 1113 // This method is split out so that import batches that require re-injecting 1114 // historical blocks can do so without releasing the lock, which could lead to 1115 // racey behaviour. If a sidechain import is in progress, and the historic state 1116 // is imported, but then new canon-head is added before the actual sidechain 1117 // completes, then the historic state could be pruned again 1118 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { 1119 // If the chain is terminating, don't even bother starting u 1120 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1121 return 0, nil, nil, nil 1122 } 1123 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1124 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1125 1126 // A queued approach to delivering events. This is generally 1127 // faster than direct delivery and requires much less mutex 1128 // acquiring. 1129 var ( 1130 stats = insertStats{startTime: mclock.Now()} 1131 events = make([]interface{}, 0, len(chain)) 1132 lastCanon *types.Block 1133 coalescedLogs []*types.Log 1134 ) 1135 // Start the parallel header verifier 1136 headers := make([]*types.Header, len(chain)) 1137 seals := make([]bool, len(chain)) 1138 1139 for i, block := range chain { 1140 headers[i] = block.Header() 1141 seals[i] = verifySeals 1142 } 1143 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1144 defer close(abort) 1145 1146 // Peek the error for the first block to decide the directing import logic 1147 it := newInsertIterator(chain, results, bc.Validator()) 1148 1149 block, err := it.next() 1150 switch { 1151 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1152 case err == consensus.ErrPrunedAncestor: 1153 return bc.insertSidechain(block, it) 1154 1155 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1156 case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())): 1157 for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { 1158 if err := bc.addFutureBlock(block); err != nil { 1159 return it.index, events, coalescedLogs, err 1160 } 1161 block, err = it.next() 1162 } 1163 stats.queued += it.processed() 1164 stats.ignored += it.remaining() 1165 1166 // If there are any still remaining, mark as ignored 1167 return it.index, events, coalescedLogs, err 1168 1169 // First block (and state) is known 1170 // 1. We did a roll-back, and should now do a re-import 1171 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1172 // from the canonical chain, which has not been verified. 1173 case err == ErrKnownBlock: 1174 // Skip all known blocks that behind us 1175 current := bc.CurrentBlock().NumberU64() 1176 1177 for block != nil && err == ErrKnownBlock && current >= block.NumberU64() { 1178 stats.ignored++ 1179 block, err = it.next() 1180 } 1181 // Falls through to the block import 1182 1183 // Some other error occurred, abort 1184 case err != nil: 1185 stats.ignored += len(it.chain) 1186 bc.reportBlock(block, nil, err) 1187 return it.index, events, coalescedLogs, err 1188 } 1189 // No validation errors for the first block (or chain prefix skipped) 1190 for ; block != nil && err == nil; block, err = it.next() { 1191 // If the chain is terminating, stop processing blocks 1192 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1193 log.Debug("Premature abort during blocks processing") 1194 break 1195 } 1196 // If the header is a banned one, straight out abort 1197 if BadHashes[block.Hash()] { 1198 bc.reportBlock(block, nil, ErrBlacklistedHash) 1199 return it.index, events, coalescedLogs, ErrBlacklistedHash 1200 } 1201 // Retrieve the parent block and it's state to execute on top 1202 start := time.Now() 1203 1204 parent := it.previous() 1205 if parent == nil { 1206 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1207 } 1208 state, err := state.New(parent.Root(), bc.stateCache) 1209 if err != nil { 1210 return it.index, events, coalescedLogs, err 1211 } 1212 // Process block using the parent state as reference point. 1213 t0 := time.Now() 1214 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1215 t1 := time.Now() 1216 if err != nil { 1217 bc.reportBlock(block, receipts, err) 1218 return it.index, events, coalescedLogs, err 1219 } 1220 // Validate the state using the default validator 1221 if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil { 1222 bc.reportBlock(block, receipts, err) 1223 return it.index, events, coalescedLogs, err 1224 } 1225 t2 := time.Now() 1226 proctime := time.Since(start) 1227 1228 // Write the block to the chain and get the status. 1229 status, err := bc.WriteBlockWithState(block, receipts, state) 1230 t3 := time.Now() 1231 if err != nil { 1232 return it.index, events, coalescedLogs, err 1233 } 1234 blockInsertTimer.UpdateSince(start) 1235 blockExecutionTimer.Update(t1.Sub(t0)) 1236 blockValidationTimer.Update(t2.Sub(t1)) 1237 blockWriteTimer.Update(t3.Sub(t2)) 1238 switch status { 1239 case CanonStatTy: 1240 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1241 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1242 "elapsed", common.PrettyDuration(time.Since(start)), 1243 "root", block.Root()) 1244 1245 coalescedLogs = append(coalescedLogs, logs...) 1246 events = append(events, ChainEvent{block, block.Hash(), logs}) 1247 lastCanon = block 1248 1249 // Only count canonical blocks for GC processing time 1250 bc.gcproc += proctime 1251 1252 case SideStatTy: 1253 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1254 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1255 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1256 "root", block.Root()) 1257 events = append(events, ChainSideEvent{block}) 1258 } 1259 blockInsertTimer.UpdateSince(start) 1260 stats.processed++ 1261 stats.usedGas += usedGas 1262 1263 cache, _ := bc.stateCache.TrieDB().Size() 1264 stats.report(chain, it.index, cache) 1265 } 1266 // Any blocks remaining here? The only ones we care about are the future ones 1267 if block != nil && err == consensus.ErrFutureBlock { 1268 if err := bc.addFutureBlock(block); err != nil { 1269 return it.index, events, coalescedLogs, err 1270 } 1271 block, err = it.next() 1272 1273 for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { 1274 if err := bc.addFutureBlock(block); err != nil { 1275 return it.index, events, coalescedLogs, err 1276 } 1277 stats.queued++ 1278 } 1279 } 1280 stats.ignored += it.remaining() 1281 1282 // Append a single chain head event if we've progressed the chain 1283 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1284 events = append(events, ChainHeadEvent{lastCanon}) 1285 } 1286 return it.index, events, coalescedLogs, err 1287 } 1288 1289 // insertSidechain is called when an import batch hits upon a pruned ancestor 1290 // error, which happens when a sidechain with a sufficiently old fork-block is 1291 // found. 1292 // 1293 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1294 // switch over to the new chain if the TD exceeded the current chain. 1295 func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) { 1296 var ( 1297 externTd *big.Int 1298 current = bc.CurrentBlock().NumberU64() 1299 ) 1300 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1301 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1302 // ones. Any other errors means that the block is invalid, and should not be written 1303 // to disk. 1304 err := consensus.ErrPrunedAncestor 1305 for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() { 1306 // Check the canonical state root for that number 1307 if number := block.NumberU64(); current >= number { 1308 if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() { 1309 // This is most likely a shadow-state attack. When a fork is imported into the 1310 // database, and it eventually reaches a block height which is not pruned, we 1311 // just found that the state already exist! This means that the sidechain block 1312 // refers to a state which already exists in our canon chain. 1313 // 1314 // If left unchecked, we would now proceed importing the blocks, without actually 1315 // having verified the state of the previous blocks. 1316 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1317 1318 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1319 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1320 // mechanism. 1321 return it.index, nil, nil, errors.New("sidechain ghost-state attack") 1322 } 1323 } 1324 if externTd == nil { 1325 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1326 } 1327 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1328 1329 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1330 start := time.Now() 1331 if err := bc.WriteBlockWithoutState(block, externTd); err != nil { 1332 return it.index, nil, nil, err 1333 } 1334 log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), 1335 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1336 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1337 "root", block.Root()) 1338 } 1339 } 1340 // At this point, we've written all sidechain blocks to database. Loop ended 1341 // either on some other error or all were processed. If there was some other 1342 // error, we can ignore the rest of those blocks. 1343 // 1344 // If the externTd was larger than our local TD, we now need to reimport the previous 1345 // blocks to regenerate the required state 1346 localTd := bc.GetTd(bc.CurrentBlock().Hash(), current) 1347 if localTd.Cmp(externTd) > 0 { 1348 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd) 1349 return it.index, nil, nil, err 1350 } 1351 // Gather all the sidechain hashes (full blocks may be memory heavy) 1352 var ( 1353 hashes []common.Hash 1354 numbers []uint64 1355 ) 1356 parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64()) 1357 for parent != nil && !bc.HasState(parent.Root) { 1358 hashes = append(hashes, parent.Hash()) 1359 numbers = append(numbers, parent.Number.Uint64()) 1360 1361 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 1362 } 1363 if parent == nil { 1364 return it.index, nil, nil, errors.New("missing parent") 1365 } 1366 // Import all the pruned blocks to make the state available 1367 var ( 1368 blocks []*types.Block 1369 memory common.StorageSize 1370 ) 1371 for i := len(hashes) - 1; i >= 0; i-- { 1372 // Append the next block to our batch 1373 block := bc.GetBlock(hashes[i], numbers[i]) 1374 1375 blocks = append(blocks, block) 1376 memory += block.Size() 1377 1378 // If memory use grew too large, import and continue. Sadly we need to discard 1379 // all raised events and logs from notifications since we're too heavy on the 1380 // memory here. 1381 if len(blocks) >= 2048 || memory > 64*1024*1024 { 1382 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 1383 if _, _, _, err := bc.insertChain(blocks, false); err != nil { 1384 return 0, nil, nil, err 1385 } 1386 blocks, memory = blocks[:0], 0 1387 1388 // If the chain is terminating, stop processing blocks 1389 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1390 log.Debug("Premature abort during blocks processing") 1391 return 0, nil, nil, nil 1392 } 1393 } 1394 } 1395 if len(blocks) > 0 { 1396 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 1397 return bc.insertChain(blocks, false) 1398 } 1399 return 0, nil, nil, nil 1400 } 1401 1402 // reorg takes two blocks, an old chain and a new chain and will reconstruct the 1403 // blocks and inserts them to be part of the new canonical chain and accumulates 1404 // potential missing transactions and post an event about them. 1405 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1406 var ( 1407 newChain types.Blocks 1408 oldChain types.Blocks 1409 commonBlock *types.Block 1410 1411 deletedTxs types.Transactions 1412 addedTxs types.Transactions 1413 1414 deletedLogs []*types.Log 1415 rebirthLogs []*types.Log 1416 1417 // collectLogs collects the logs that were generated during the 1418 // processing of the block that corresponds with the given hash. 1419 // These logs are later announced as deleted or reborn 1420 collectLogs = func(hash common.Hash, removed bool) { 1421 number := bc.hc.GetBlockNumber(hash) 1422 if number == nil { 1423 return 1424 } 1425 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1426 for _, receipt := range receipts { 1427 for _, log := range receipt.Logs { 1428 l := *log 1429 if removed { 1430 l.Removed = true 1431 deletedLogs = append(deletedLogs, &l) 1432 } else { 1433 rebirthLogs = append(rebirthLogs, &l) 1434 } 1435 } 1436 } 1437 } 1438 ) 1439 // Reduce the longer chain to the same number as the shorter one 1440 if oldBlock.NumberU64() > newBlock.NumberU64() { 1441 // Old chain is longer, gather all transactions and logs as deleted ones 1442 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1443 oldChain = append(oldChain, oldBlock) 1444 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1445 collectLogs(oldBlock.Hash(), true) 1446 } 1447 } else { 1448 // New chain is longer, stash all blocks away for subsequent insertion 1449 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1450 newChain = append(newChain, newBlock) 1451 } 1452 } 1453 if oldBlock == nil { 1454 return fmt.Errorf("invalid old chain") 1455 } 1456 if newBlock == nil { 1457 return fmt.Errorf("invalid new chain") 1458 } 1459 // Both sides of the reorg are at the same number, reduce both until the common 1460 // ancestor is found 1461 for { 1462 // If the common ancestor was found, bail out 1463 if oldBlock.Hash() == newBlock.Hash() { 1464 commonBlock = oldBlock 1465 break 1466 } 1467 // Remove an old block as well as stash away a new block 1468 oldChain = append(oldChain, oldBlock) 1469 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1470 collectLogs(oldBlock.Hash(), true) 1471 1472 newChain = append(newChain, newBlock) 1473 1474 // Step back with both chains 1475 oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) 1476 if oldBlock == nil { 1477 return fmt.Errorf("invalid old chain") 1478 } 1479 newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1480 if newBlock == nil { 1481 return fmt.Errorf("invalid new chain") 1482 } 1483 } 1484 // Ensure the user sees large reorgs 1485 if len(oldChain) > 0 && len(newChain) > 0 { 1486 logFn := log.Debug 1487 if len(oldChain) > 63 { 1488 logFn = log.Warn 1489 } 1490 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1491 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1492 } else { 1493 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1494 } 1495 // Insert the new chain, taking care of the proper incremental order 1496 for i := len(newChain) - 1; i >= 0; i-- { 1497 // Insert the block in the canonical way, re-writing history 1498 bc.insert(newChain[i]) 1499 1500 // Collect reborn logs due to chain reorg (except head block (reverse order)) 1501 if i != 0 { 1502 collectLogs(newChain[i].Hash(), false) 1503 } 1504 // Write lookup entries for hash based transaction/receipt searches 1505 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1506 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1507 } 1508 // When transactions get deleted from the database, the receipts that were 1509 // created in the fork must also be deleted 1510 batch := bc.db.NewBatch() 1511 for _, tx := range types.TxDifference(deletedTxs, addedTxs) { 1512 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1513 } 1514 batch.Write() 1515 1516 // If any logs need to be fired, do it now. In theory we could avoid creating 1517 // this goroutine if there are no events to fire, but realistcally that only 1518 // ever happens if we're reorging empty blocks, which will only happen on idle 1519 // networks where performance is not an issue either way. 1520 // 1521 // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct 1522 // event ordering? 1523 go func() { 1524 if len(deletedLogs) > 0 { 1525 bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1526 } 1527 if len(rebirthLogs) > 0 { 1528 bc.logsFeed.Send(rebirthLogs) 1529 } 1530 if len(oldChain) > 0 { 1531 for _, block := range oldChain { 1532 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1533 } 1534 } 1535 }() 1536 return nil 1537 } 1538 1539 // PostChainEvents iterates over the events generated by a chain insertion and 1540 // posts them into the event feed. 1541 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1542 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1543 // post event logs for further processing 1544 if logs != nil { 1545 bc.logsFeed.Send(logs) 1546 } 1547 for _, event := range events { 1548 switch ev := event.(type) { 1549 case ChainEvent: 1550 bc.chainFeed.Send(ev) 1551 1552 case ChainHeadEvent: 1553 bc.chainHeadFeed.Send(ev) 1554 1555 case ChainSideEvent: 1556 bc.chainSideFeed.Send(ev) 1557 } 1558 } 1559 } 1560 1561 func (bc *BlockChain) update() { 1562 futureTimer := time.NewTicker(5 * time.Second) 1563 defer futureTimer.Stop() 1564 for { 1565 select { 1566 case <-futureTimer.C: 1567 bc.procFutureBlocks() 1568 case <-bc.quit: 1569 return 1570 } 1571 } 1572 } 1573 1574 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1575 func (bc *BlockChain) BadBlocks() []*types.Block { 1576 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 1577 for _, hash := range bc.badBlocks.Keys() { 1578 if blk, exist := bc.badBlocks.Peek(hash); exist { 1579 block := blk.(*types.Block) 1580 blocks = append(blocks, block) 1581 } 1582 } 1583 return blocks 1584 } 1585 1586 // addBadBlock adds a bad block to the bad-block LRU cache 1587 func (bc *BlockChain) addBadBlock(block *types.Block) { 1588 bc.badBlocks.Add(block.Hash(), block) 1589 } 1590 1591 // reportBlock logs a bad block error. 1592 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1593 bc.addBadBlock(block) 1594 1595 var receiptString string 1596 for i, receipt := range receipts { 1597 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 1598 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 1599 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 1600 } 1601 log.Error(fmt.Sprintf(` 1602 ########## BAD BLOCK ######### 1603 Chain config: %v 1604 1605 Number: %v 1606 Hash: 0x%x 1607 %v 1608 1609 Error: %v 1610 ############################## 1611 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1612 } 1613 1614 // InsertHeaderChain attempts to insert the given header chain in to the local 1615 // chain, possibly creating a reorg. If an error is returned, it will return the 1616 // index number of the failing header as well an error describing what went wrong. 1617 // 1618 // The verify parameter can be used to fine tune whether nonce verification 1619 // should be done or not. The reason behind the optional check is because some 1620 // of the header retrieval mechanisms already need to verify nonces, as well as 1621 // because nonces can be verified sparsely, not needing to check each. 1622 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1623 start := time.Now() 1624 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1625 return i, err 1626 } 1627 1628 // Make sure only one thread manipulates the chain at once 1629 bc.chainmu.Lock() 1630 defer bc.chainmu.Unlock() 1631 1632 bc.wg.Add(1) 1633 defer bc.wg.Done() 1634 1635 whFunc := func(header *types.Header) error { 1636 bc.mu.Lock() 1637 defer bc.mu.Unlock() 1638 1639 _, err := bc.hc.WriteHeader(header) 1640 return err 1641 } 1642 1643 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1644 } 1645 1646 // writeHeader writes a header into the local chain, given that its parent is 1647 // already known. If the total difficulty of the newly inserted header becomes 1648 // greater than the current known TD, the canonical chain is re-routed. 1649 // 1650 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1651 // into the chain, as side effects caused by reorganisations cannot be emulated 1652 // without the real blocks. Hence, writing headers directly should only be done 1653 // in two scenarios: pure-header mode of operation (light clients), or properly 1654 // separated header/block phases (non-archive clients). 1655 func (bc *BlockChain) writeHeader(header *types.Header) error { 1656 bc.wg.Add(1) 1657 defer bc.wg.Done() 1658 1659 bc.mu.Lock() 1660 defer bc.mu.Unlock() 1661 1662 _, err := bc.hc.WriteHeader(header) 1663 return err 1664 } 1665 1666 // CurrentHeader retrieves the current head header of the canonical chain. The 1667 // header is retrieved from the HeaderChain's internal cache. 1668 func (bc *BlockChain) CurrentHeader() *types.Header { 1669 return bc.hc.CurrentHeader() 1670 } 1671 1672 // GetTd retrieves a block's total difficulty in the canonical chain from the 1673 // database by hash and number, caching it if found. 1674 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1675 return bc.hc.GetTd(hash, number) 1676 } 1677 1678 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1679 // database by hash, caching it if found. 1680 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1681 return bc.hc.GetTdByHash(hash) 1682 } 1683 1684 // GetHeader retrieves a block header from the database by hash and number, 1685 // caching it if found. 1686 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1687 return bc.hc.GetHeader(hash, number) 1688 } 1689 1690 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1691 // found. 1692 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1693 return bc.hc.GetHeaderByHash(hash) 1694 } 1695 1696 // HasHeader checks if a block header is present in the database or not, caching 1697 // it if present. 1698 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1699 return bc.hc.HasHeader(hash, number) 1700 } 1701 1702 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1703 // hash, fetching towards the genesis block. 1704 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1705 return bc.hc.GetBlockHashesFromHash(hash, max) 1706 } 1707 1708 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 1709 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 1710 // number of blocks to be individually checked before we reach the canonical chain. 1711 // 1712 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 1713 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 1714 bc.chainmu.Lock() 1715 defer bc.chainmu.Unlock() 1716 1717 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 1718 } 1719 1720 // GetHeaderByNumber retrieves a block header from the database by number, 1721 // caching it (associated with its hash) if found. 1722 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1723 return bc.hc.GetHeaderByNumber(number) 1724 } 1725 1726 // Config retrieves the blockchain's chain configuration. 1727 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1728 1729 // Engine retrieves the blockchain's consensus engine. 1730 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1731 1732 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1733 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1734 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1735 } 1736 1737 // SubscribeChainEvent registers a subscription of ChainEvent. 1738 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1739 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1740 } 1741 1742 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1743 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1744 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1745 } 1746 1747 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1748 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1749 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1750 } 1751 1752 // SubscribeLogsEvent registers a subscription of []*types.Log. 1753 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1754 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1755 }