github.com/shyftnetwork/go-empyrean@v1.8.3-0.20191127201940-fbfca9338f04/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ShyftNetwork/go-empyrean/common" 31 "github.com/ShyftNetwork/go-empyrean/common/mclock" 32 "github.com/ShyftNetwork/go-empyrean/common/prque" 33 "github.com/ShyftNetwork/go-empyrean/consensus" 34 "github.com/ShyftNetwork/go-empyrean/core/rawdb" 35 "github.com/ShyftNetwork/go-empyrean/core/state" 36 "github.com/ShyftNetwork/go-empyrean/core/types" 37 "github.com/ShyftNetwork/go-empyrean/core/vm" 38 "github.com/ShyftNetwork/go-empyrean/crypto" 39 "github.com/ShyftNetwork/go-empyrean/ethdb" 40 "github.com/ShyftNetwork/go-empyrean/event" 41 "github.com/ShyftNetwork/go-empyrean/log" 42 "github.com/ShyftNetwork/go-empyrean/metrics" 43 "github.com/ShyftNetwork/go-empyrean/params" 44 "github.com/ShyftNetwork/go-empyrean/rlp" 45 "github.com/ShyftNetwork/go-empyrean/trie" 46 "github.com/hashicorp/golang-lru" 47 ) 48 49 var ( 50 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 51 blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) 52 blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) 53 blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) 54 55 ErrNoGenesis = errors.New("Genesis not found in chain") 56 ) 57 58 const ( 59 bodyCacheLimit = 256 60 blockCacheLimit = 256 61 receiptsCacheLimit = 32 62 maxFutureBlocks = 256 63 maxTimeFutureBlocks = 30 64 badBlockLimit = 10 65 triesInMemory = 128 66 67 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 68 BlockChainVersion uint64 = 3 69 ) 70 71 // CacheConfig contains the configuration values for the trie caching/pruning 72 // that's resident in a blockchain. 73 type CacheConfig struct { 74 Disabled bool // Whether to disable trie write caching (archive node) 75 TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory 76 TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk 77 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 78 } 79 80 // BlockChain represents the canonical chain given a database with a genesis 81 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 82 // 83 // Importing blocks in to the block chain happens according to the set of rules 84 // defined by the two stage Validator. Processing of blocks is done using the 85 // Processor which processes the included transaction. The validation of the state 86 // is done in the second part of the Validator. Failing results in aborting of 87 // the import. 88 // 89 // The BlockChain also helps in returning blocks from **any** chain included 90 // in the database as well as blocks that represents the canonical chain. It's 91 // important to note that GetBlock can return any block and does not need to be 92 // included in the canonical one where as GetBlockByNumber always represents the 93 // canonical chain. 94 type BlockChain struct { 95 chainConfig *params.ChainConfig // Chain & network configuration 96 cacheConfig *CacheConfig // Cache configuration for pruning 97 98 db ethdb.Database // Low level persistent database to store final content in 99 shyftDb ethdb.SDatabase // Shyft Postgres instance 100 101 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 102 gcproc time.Duration // Accumulates canonical block processing for trie dumping 103 104 hc *HeaderChain 105 rmLogsFeed event.Feed 106 chainFeed event.Feed 107 chainSideFeed event.Feed 108 chainHeadFeed event.Feed 109 logsFeed event.Feed 110 scope event.SubscriptionScope 111 genesisBlock *types.Block 112 113 mu sync.RWMutex // global mutex for locking chain operations 114 chainmu sync.RWMutex // blockchain insertion lock 115 procmu sync.RWMutex // block processor lock 116 117 checkpoint int // checkpoint counts towards the new checkpoint 118 currentBlock atomic.Value // Current head of the block chain 119 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 120 121 stateCache state.Database // State database to reuse between imports (contains state cache) 122 bodyCache *lru.Cache // Cache for the most recent block bodies 123 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 124 receiptsCache *lru.Cache // Cache for the most recent receipts per block 125 blockCache *lru.Cache // Cache for the most recent entire blocks 126 futureBlocks *lru.Cache // future blocks are blocks added for later processing 127 128 quit chan struct{} // blockchain quit channel 129 running int32 // running must be called atomically 130 // procInterrupt must be atomically called 131 procInterrupt int32 // interrupt signaler for block processing 132 wg sync.WaitGroup // chain processing wait group for shutting down 133 134 engine consensus.Engine 135 processor Processor // block processor interface 136 validator Validator // block and state validator interface 137 vmConfig vm.Config 138 139 badBlocks *lru.Cache // Bad block cache 140 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 141 } 142 143 // NewBlockChain returns a fully initialised block chain using information 144 // available in the database. It initialises the default Ethereum Validator and 145 // Processor. 146 func NewBlockChain(db ethdb.Database, shyftDb ethdb.SDatabase, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 147 if cacheConfig == nil { 148 cacheConfig = &CacheConfig{ 149 TrieCleanLimit: 256, 150 TrieDirtyLimit: 256, 151 TrieTimeLimit: 5 * time.Minute, 152 } 153 } 154 bodyCache, _ := lru.New(bodyCacheLimit) 155 bodyRLPCache, _ := lru.New(bodyCacheLimit) 156 receiptsCache, _ := lru.New(receiptsCacheLimit) 157 blockCache, _ := lru.New(blockCacheLimit) 158 futureBlocks, _ := lru.New(maxFutureBlocks) 159 badBlocks, _ := lru.New(badBlockLimit) 160 161 bc := &BlockChain{ 162 chainConfig: chainConfig, 163 cacheConfig: cacheConfig, 164 db: db, 165 shyftDb: shyftDb, 166 triegc: prque.New(nil), 167 stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit), 168 quit: make(chan struct{}), 169 shouldPreserve: shouldPreserve, 170 bodyCache: bodyCache, 171 bodyRLPCache: bodyRLPCache, 172 receiptsCache: receiptsCache, 173 blockCache: blockCache, 174 futureBlocks: futureBlocks, 175 engine: engine, 176 vmConfig: vmConfig, 177 badBlocks: badBlocks, 178 } 179 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 180 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 181 182 var err error 183 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 184 if err != nil { 185 return nil, err 186 } 187 bc.genesisBlock = bc.GetBlockByNumber(0) 188 if bc.genesisBlock == nil { 189 return nil, ErrNoGenesis 190 } 191 if err := bc.loadLastState(); err != nil { 192 return nil, err 193 } 194 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 195 for hash := range BadHashes { 196 if header := bc.GetHeaderByHash(hash); header != nil { 197 // get the canonical block corresponding to the offending header's number 198 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 199 // make sure the headerByNumber (if present) is in our current canonical chain 200 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 201 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 202 bc.SetHead(header.Number.Uint64() - 1) 203 log.Error("Chain rewind was successful, resuming normal operation") 204 } 205 } 206 } 207 // Take ownership of this particular state 208 go bc.update() 209 return bc, nil 210 } 211 212 func (bc *BlockChain) getProcInterrupt() bool { 213 return atomic.LoadInt32(&bc.procInterrupt) == 1 214 } 215 216 // GetVMConfig returns the block chain VM config. 217 func (bc *BlockChain) GetVMConfig() *vm.Config { 218 return &bc.vmConfig 219 } 220 221 // loadLastState loads the last known chain state from the database. This method 222 // assumes that the chain manager mutex is held. 223 func (bc *BlockChain) loadLastState() error { 224 // Restore the last known head block 225 head := rawdb.ReadHeadBlockHash(bc.db) 226 if head == (common.Hash{}) { 227 // Corrupt or empty database, init from scratch 228 log.Warn("Empty database, resetting chain") 229 return bc.Reset() 230 } 231 // Make sure the entire head block is available 232 currentBlock := bc.GetBlockByHash(head) 233 if currentBlock == nil { 234 // Corrupt or empty database, init from scratch 235 log.Warn("Head block missing, resetting chain", "hash", head) 236 return bc.Reset() 237 } 238 // Make sure the state associated with the block is available 239 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 240 // Dangling block without a state associated, init from scratch 241 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 242 if err := bc.repair(¤tBlock); err != nil { 243 return err 244 } 245 } 246 // Everything seems to be fine, set as the head block 247 bc.currentBlock.Store(currentBlock) 248 249 // Restore the last known head header 250 currentHeader := currentBlock.Header() 251 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 252 if header := bc.GetHeaderByHash(head); header != nil { 253 currentHeader = header 254 } 255 } 256 bc.hc.SetCurrentHeader(currentHeader) 257 258 // Restore the last known head fast block 259 bc.currentFastBlock.Store(currentBlock) 260 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 261 if block := bc.GetBlockByHash(head); block != nil { 262 bc.currentFastBlock.Store(block) 263 } 264 } 265 266 // Issue a status log for the user 267 currentFastBlock := bc.CurrentFastBlock() 268 269 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 270 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 271 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 272 273 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) 274 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) 275 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) 276 277 return nil 278 } 279 280 // SetHead rewinds the local chain to a new head. In the case of headers, everything 281 // above the new head will be deleted and the new one set. In the case of blocks 282 // though, the head may be further rewound if block bodies are missing (non-archive 283 // nodes after a fast sync). 284 func (bc *BlockChain) SetHead(head uint64) error { 285 log.Warn("Rewinding blockchain", "target", head) 286 287 bc.mu.Lock() 288 defer bc.mu.Unlock() 289 290 // Rewind the header chain, deleting all block bodies until then 291 delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { 292 rawdb.DeleteBody(db, hash, num) 293 } 294 bc.hc.SetHead(head, delFn) 295 currentHeader := bc.hc.CurrentHeader() 296 297 // Clear out any stale content from the caches 298 bc.bodyCache.Purge() 299 bc.bodyRLPCache.Purge() 300 bc.receiptsCache.Purge() 301 bc.blockCache.Purge() 302 bc.futureBlocks.Purge() 303 304 // Rewind the block chain, ensuring we don't end up with a stateless head block 305 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 306 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 307 } 308 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 309 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 310 // Rewound state missing, rolled back to before pivot, reset to genesis 311 bc.currentBlock.Store(bc.genesisBlock) 312 } 313 } 314 // Rewind the fast block in a simpleton way to the target head 315 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 316 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 317 } 318 // If either blocks reached nil, reset to the genesis state 319 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 320 bc.currentBlock.Store(bc.genesisBlock) 321 } 322 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 323 bc.currentFastBlock.Store(bc.genesisBlock) 324 } 325 currentBlock := bc.CurrentBlock() 326 currentFastBlock := bc.CurrentFastBlock() 327 328 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 329 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 330 331 return bc.loadLastState() 332 } 333 334 // FastSyncCommitHead sets the current head block to the one defined by the hash 335 // irrelevant what the chain contents were prior. 336 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 337 // Make sure that both the block as well at its state trie exists 338 block := bc.GetBlockByHash(hash) 339 if block == nil { 340 return fmt.Errorf("non existent block [%x…]", hash[:4]) 341 } 342 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 343 return err 344 } 345 // If all checks out, manually set the head block 346 bc.mu.Lock() 347 bc.currentBlock.Store(block) 348 bc.mu.Unlock() 349 350 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 351 return nil 352 } 353 354 // GasLimit returns the gas limit of the current HEAD block. 355 func (bc *BlockChain) GasLimit() uint64 { 356 return bc.CurrentBlock().GasLimit() 357 } 358 359 // CurrentBlock retrieves the current head block of the canonical chain. The 360 // block is retrieved from the blockchain's internal cache. 361 func (bc *BlockChain) CurrentBlock() *types.Block { 362 return bc.currentBlock.Load().(*types.Block) 363 } 364 365 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 366 // chain. The block is retrieved from the blockchain's internal cache. 367 func (bc *BlockChain) CurrentFastBlock() *types.Block { 368 return bc.currentFastBlock.Load().(*types.Block) 369 } 370 371 // SetProcessor sets the processor required for making state modifications. 372 func (bc *BlockChain) SetProcessor(processor Processor) { 373 bc.procmu.Lock() 374 defer bc.procmu.Unlock() 375 bc.processor = processor 376 } 377 378 // SetValidator sets the validator which is used to validate incoming blocks. 379 func (bc *BlockChain) SetValidator(validator Validator) { 380 bc.procmu.Lock() 381 defer bc.procmu.Unlock() 382 bc.validator = validator 383 } 384 385 // Validator returns the current validator. 386 func (bc *BlockChain) Validator() Validator { 387 bc.procmu.RLock() 388 defer bc.procmu.RUnlock() 389 return bc.validator 390 } 391 392 // Processor returns the current processor. 393 func (bc *BlockChain) Processor() Processor { 394 bc.procmu.RLock() 395 defer bc.procmu.RUnlock() 396 return bc.processor 397 } 398 399 // State returns a new mutable state based on the current HEAD block. 400 func (bc *BlockChain) State() (*state.StateDB, error) { 401 return bc.StateAt(bc.CurrentBlock().Root()) 402 } 403 404 // StateAt returns a new mutable state based on a particular point in time. 405 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 406 return state.New(root, bc.stateCache) 407 } 408 409 // StateCache returns the caching database underpinning the blockchain instance. 410 func (bc *BlockChain) StateCache() state.Database { 411 return bc.stateCache 412 } 413 414 // Reset purges the entire blockchain, restoring it to its genesis state. 415 func (bc *BlockChain) Reset() error { 416 return bc.ResetWithGenesisBlock(bc.genesisBlock) 417 } 418 419 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 420 // specified genesis state. 421 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 422 // Dump the entire block chain and purge the caches 423 if err := bc.SetHead(0); err != nil { 424 return err 425 } 426 bc.mu.Lock() 427 defer bc.mu.Unlock() 428 429 // Prepare the genesis block and reinitialise the chain 430 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 431 log.Crit("Failed to write genesis block TD", "err", err) 432 } 433 rawdb.WriteBlock(bc.db, genesis) 434 435 bc.genesisBlock = genesis 436 bc.insert(bc.genesisBlock) 437 bc.currentBlock.Store(bc.genesisBlock) 438 bc.hc.SetGenesis(bc.genesisBlock.Header()) 439 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 440 bc.currentFastBlock.Store(bc.genesisBlock) 441 442 return nil 443 } 444 445 // repair tries to repair the current blockchain by rolling back the current block 446 // until one with associated state is found. This is needed to fix incomplete db 447 // writes caused either by crashes/power outages, or simply non-committed tries. 448 // 449 // This method only rolls back the current block. The current header and current 450 // fast block are left intact. 451 func (bc *BlockChain) repair(head **types.Block) error { 452 for { 453 // Abort if we've rewound to a head block that does have associated state 454 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 455 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 456 return nil 457 } 458 // Otherwise rewind one block and recheck state availability there 459 block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 460 if block == nil { 461 return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) 462 } 463 (*head) = block 464 } 465 } 466 467 // Export writes the active chain to the given writer. 468 func (bc *BlockChain) Export(w io.Writer) error { 469 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 470 } 471 472 // ExportN writes a subset of the active chain to the given writer. 473 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 474 bc.mu.RLock() 475 defer bc.mu.RUnlock() 476 477 if first > last { 478 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 479 } 480 log.Info("Exporting batch of blocks", "count", last-first+1) 481 482 start, reported := time.Now(), time.Now() 483 for nr := first; nr <= last; nr++ { 484 block := bc.GetBlockByNumber(nr) 485 if block == nil { 486 return fmt.Errorf("export failed on #%d: not found", nr) 487 } 488 if err := block.EncodeRLP(w); err != nil { 489 return err 490 } 491 if time.Since(reported) >= statsReportLimit { 492 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 493 reported = time.Now() 494 } 495 } 496 497 return nil 498 } 499 500 // insert injects a new head block into the current block chain. This method 501 // assumes that the block is indeed a true head. It will also reset the head 502 // header and the head fast sync block to this very same block if they are older 503 // or if they are on a different side chain. 504 // 505 // Note, this function assumes that the `mu` mutex is held! 506 func (bc *BlockChain) insert(block *types.Block) { 507 // If the block is on a side chain or an unknown one, force other heads onto it too 508 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 509 510 // Add the block to the canonical chain number scheme and mark as the head 511 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 512 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 513 514 bc.currentBlock.Store(block) 515 516 // If the block is better than our head or is on a different chain, force update heads 517 if updateHeads { 518 bc.hc.SetCurrentHeader(block.Header()) 519 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 520 521 bc.currentFastBlock.Store(block) 522 } 523 } 524 525 // Genesis retrieves the chain's genesis block. 526 func (bc *BlockChain) Genesis() *types.Block { 527 return bc.genesisBlock 528 } 529 530 //GetBlockHashesSinceLastValidBlockHash returns a slice of invalid blockHashes 531 func (bc *BlockChain) GetBlockHashesSinceLastValidBlockHash(validHash common.Hash) (blockHashes []common.Hash, bHashes []string) { 532 //bNumber is VALID blockNumber 533 bNumber := bc.hc.GetBlockNumber(validHash) 534 //hNumber is the Current Header Block Number 535 hNumber := bc.hc.CurrentHeader().Number.Uint64() 536 //hash is the current Headers block hash 537 hash := bc.hc.CurrentHeader().Hash() 538 //Starting at block height bNumber loop until i <= hNumber 539 for i := hNumber; i > (*bNumber); i-- { 540 block := bc.GetBlock(hash, hNumber) 541 if block == nil { 542 break 543 } 544 //bHashes will be a slice of all invalid block hashs as []string 545 bHashes = append(bHashes, block.Hash().String()) 546 //blockHashes will be a slice of all invalid blockhashes this is returned 547 //to be passed into Rollback() 548 blockHashes = append(blockHashes, block.Hash()) 549 //set the new hash to the parentHash and continue loop 550 hash = block.ParentHash() 551 //decrease the hNumber to align with above parentHash 552 //necessary for GetBlock LN 517 553 hNumber-- 554 } 555 return blockHashes, bHashes 556 } 557 558 // GetBody retrieves a block body (transactions and uncles) from the database by 559 // hash, caching it if found. 560 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 561 // Short circuit if the body's already in the cache, retrieve otherwise 562 if cached, ok := bc.bodyCache.Get(hash); ok { 563 body := cached.(*types.Body) 564 return body 565 } 566 number := bc.hc.GetBlockNumber(hash) 567 if number == nil { 568 return nil 569 } 570 body := rawdb.ReadBody(bc.db, hash, *number) 571 if body == nil { 572 return nil 573 } 574 // Cache the found body for next time and return 575 bc.bodyCache.Add(hash, body) 576 return body 577 } 578 579 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 580 // caching it if found. 581 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 582 // Short circuit if the body's already in the cache, retrieve otherwise 583 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 584 return cached.(rlp.RawValue) 585 } 586 number := bc.hc.GetBlockNumber(hash) 587 if number == nil { 588 return nil 589 } 590 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 591 if len(body) == 0 { 592 return nil 593 } 594 // Cache the found body for next time and return 595 bc.bodyRLPCache.Add(hash, body) 596 return body 597 } 598 599 // HasBlock checks if a block is fully present in the database or not. 600 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 601 if bc.blockCache.Contains(hash) { 602 return true 603 } 604 return rawdb.HasBody(bc.db, hash, number) 605 } 606 607 // HasFastBlock checks if a fast block is fully present in the database or not. 608 func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { 609 if !bc.HasBlock(hash, number) { 610 return false 611 } 612 if bc.receiptsCache.Contains(hash) { 613 return true 614 } 615 return rawdb.HasReceipts(bc.db, hash, number) 616 } 617 618 // HasState checks if state trie is fully present in the database or not. 619 func (bc *BlockChain) HasState(hash common.Hash) bool { 620 _, err := bc.stateCache.OpenTrie(hash) 621 return err == nil 622 } 623 624 // HasBlockAndState checks if a block and associated state trie is fully present 625 // in the database or not, caching it if present. 626 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 627 // Check first that the block itself is known 628 block := bc.GetBlock(hash, number) 629 if block == nil { 630 return false 631 } 632 return bc.HasState(block.Root()) 633 } 634 635 // GetBlock retrieves a block from the database by hash and number, 636 // caching it if found. 637 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 638 // Short circuit if the block's already in the cache, retrieve otherwise 639 if block, ok := bc.blockCache.Get(hash); ok { 640 return block.(*types.Block) 641 } 642 block := rawdb.ReadBlock(bc.db, hash, number) 643 if block == nil { 644 return nil 645 } 646 // Cache the found block for next time and return 647 bc.blockCache.Add(block.Hash(), block) 648 return block 649 } 650 651 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 652 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 653 number := bc.hc.GetBlockNumber(hash) 654 if number == nil { 655 return nil 656 } 657 return bc.GetBlock(hash, *number) 658 } 659 660 // GetBlockByNumber retrieves a block from the database by number, caching it 661 // (associated with its hash) if found. 662 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 663 hash := rawdb.ReadCanonicalHash(bc.db, number) 664 if hash == (common.Hash{}) { 665 return nil 666 } 667 return bc.GetBlock(hash, number) 668 } 669 670 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 671 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 672 if receipts, ok := bc.receiptsCache.Get(hash); ok { 673 return receipts.(types.Receipts) 674 } 675 number := rawdb.ReadHeaderNumber(bc.db, hash) 676 if number == nil { 677 return nil 678 } 679 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 680 bc.receiptsCache.Add(hash, receipts) 681 return receipts 682 } 683 684 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 685 // [deprecated by eth/62] 686 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 687 number := bc.hc.GetBlockNumber(hash) 688 if number == nil { 689 return nil 690 } 691 for i := 0; i < n; i++ { 692 block := bc.GetBlock(hash, *number) 693 if block == nil { 694 break 695 } 696 blocks = append(blocks, block) 697 hash = block.ParentHash() 698 *number-- 699 } 700 return 701 } 702 703 // GetUnclesInChain retrieves all the uncles from a given block backwards until 704 // a specific distance is reached. 705 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 706 uncles := []*types.Header{} 707 for i := 0; block != nil && i < length; i++ { 708 uncles = append(uncles, block.Uncles()...) 709 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 710 } 711 return uncles 712 } 713 714 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 715 // either from ephemeral in-memory cache, or from persistent storage. 716 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 717 return bc.stateCache.TrieDB().Node(hash) 718 } 719 720 // Stop stops the blockchain service. If any imports are currently in progress 721 // it will abort them using the procInterrupt. 722 func (bc *BlockChain) Stop() { 723 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 724 return 725 } 726 // Unsubscribe all subscriptions registered from blockchain 727 bc.scope.Close() 728 close(bc.quit) 729 atomic.StoreInt32(&bc.procInterrupt, 1) 730 731 bc.wg.Wait() 732 733 // Ensure the state of a recent block is also stored to disk before exiting. 734 // We're writing three different states to catch different restart scenarios: 735 // - HEAD: So we don't need to reprocess any blocks in the general case 736 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 737 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 738 if !bc.cacheConfig.Disabled { 739 triedb := bc.stateCache.TrieDB() 740 741 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 742 if number := bc.CurrentBlock().NumberU64(); number > offset { 743 recent := bc.GetBlockByNumber(number - offset) 744 745 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 746 if err := triedb.Commit(recent.Root(), true); err != nil { 747 log.Error("Failed to commit recent state trie", "err", err) 748 } 749 } 750 } 751 for !bc.triegc.Empty() { 752 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 753 } 754 if size, _ := triedb.Size(); size != 0 { 755 log.Error("Dangling trie nodes after full cleanup") 756 } 757 } 758 log.Info("Blockchain manager stopped") 759 } 760 761 func (bc *BlockChain) procFutureBlocks() { 762 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 763 for _, hash := range bc.futureBlocks.Keys() { 764 if block, exist := bc.futureBlocks.Peek(hash); exist { 765 blocks = append(blocks, block.(*types.Block)) 766 } 767 } 768 if len(blocks) > 0 { 769 types.BlockBy(types.Number).Sort(blocks) 770 771 // Insert one by one as chain insertion needs contiguous ancestry between blocks 772 for i := range blocks { 773 bc.InsertChain(blocks[i : i+1]) 774 } 775 } 776 } 777 778 // WriteStatus status of write 779 type WriteStatus byte 780 781 const ( 782 NonStatTy WriteStatus = iota 783 CanonStatTy 784 SideStatTy 785 ) 786 787 // Rollback is designed to remove a chain of links from the database that aren't 788 // certain enough to be valid. 789 func (bc *BlockChain) Rollback(chain []common.Hash) { 790 bc.mu.Lock() 791 defer bc.mu.Unlock() 792 for i := len(chain) - 1; i >= 0; i-- { 793 hash := chain[i] 794 currentHeader := bc.hc.CurrentHeader() 795 if currentHeader.Hash() == hash { 796 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 797 } 798 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 799 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 800 bc.currentFastBlock.Store(newFastBlock) 801 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 802 } 803 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 804 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 805 bc.currentBlock.Store(newBlock) 806 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 807 } 808 } 809 } 810 811 // SetReceiptsData computes all the non-consensus fields of the receipts 812 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 813 signer := types.MakeSigner(config, block.Number()) 814 815 transactions, logIndex := block.Transactions(), uint(0) 816 if len(transactions) != len(receipts) { 817 return errors.New("transaction and receipt count mismatch") 818 } 819 820 for j := 0; j < len(receipts); j++ { 821 // The transaction hash can be retrieved from the transaction itself 822 receipts[j].TxHash = transactions[j].Hash() 823 824 // The contract address can be derived from the transaction itself 825 if transactions[j].To() == nil { 826 // Deriving the signer is expensive, only do if it's actually needed 827 from, _ := types.Sender(signer, transactions[j]) 828 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 829 } 830 // The used gas can be calculated based on previous receipts 831 if j == 0 { 832 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 833 } else { 834 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 835 } 836 // The derived log fields can simply be set from the block and transaction 837 for k := 0; k < len(receipts[j].Logs); k++ { 838 receipts[j].Logs[k].BlockNumber = block.NumberU64() 839 receipts[j].Logs[k].BlockHash = block.Hash() 840 receipts[j].Logs[k].TxHash = receipts[j].TxHash 841 receipts[j].Logs[k].TxIndex = uint(j) 842 receipts[j].Logs[k].Index = logIndex 843 logIndex++ 844 } 845 } 846 return nil 847 } 848 849 // InsertReceiptChain attempts to complete an already existing header chain with 850 // transaction and receipt data. 851 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 852 bc.wg.Add(1) 853 defer bc.wg.Done() 854 855 // Do a sanity check that the provided chain is actually ordered and linked 856 for i := 1; i < len(blockChain); i++ { 857 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 858 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 859 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 860 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 861 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 862 } 863 } 864 865 var ( 866 stats = struct{ processed, ignored int32 }{} 867 start = time.Now() 868 bytes = 0 869 batch = bc.db.NewBatch() 870 ) 871 for i, block := range blockChain { 872 receipts := receiptChain[i] 873 // Short circuit insertion if shutting down or processing failed 874 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 875 return 0, nil 876 } 877 // Short circuit if the owner header is unknown 878 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 879 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 880 } 881 // Skip if the entire data is already known 882 if bc.HasBlock(block.Hash(), block.NumberU64()) { 883 stats.ignored++ 884 continue 885 } 886 // Compute all the non-consensus fields of the receipts 887 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 888 return i, fmt.Errorf("failed to set receipts data: %v", err) 889 } 890 // Write all the data out into the database 891 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 892 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 893 rawdb.WriteTxLookupEntries(batch, block) 894 895 stats.processed++ 896 897 if batch.ValueSize() >= ethdb.IdealBatchSize { 898 if err := batch.Write(); err != nil { 899 return 0, err 900 } 901 bytes += batch.ValueSize() 902 batch.Reset() 903 } 904 } 905 if batch.ValueSize() > 0 { 906 bytes += batch.ValueSize() 907 if err := batch.Write(); err != nil { 908 return 0, err 909 } 910 } 911 912 // Update the head fast sync block if better 913 bc.mu.Lock() 914 head := blockChain[len(blockChain)-1] 915 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 916 currentFastBlock := bc.CurrentFastBlock() 917 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 918 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 919 bc.currentFastBlock.Store(head) 920 } 921 } 922 bc.mu.Unlock() 923 924 context := []interface{}{ 925 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 926 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), 927 "size", common.StorageSize(bytes), 928 } 929 if stats.ignored > 0 { 930 context = append(context, []interface{}{"ignored", stats.ignored}...) 931 } 932 log.Info("Imported new block receipts", context...) 933 934 return 0, nil 935 } 936 937 var lastWrite uint64 938 939 // WriteBlockWithoutState writes only the block and its metadata to the database, 940 // but does not write any state. This is used to construct competing side forks 941 // up to the point where they exceed the canonical total difficulty. 942 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 943 bc.wg.Add(1) 944 defer bc.wg.Done() 945 946 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 947 return err 948 } 949 rawdb.WriteBlock(bc.db, block) 950 951 return nil 952 } 953 954 // WriteBlockWithState writes the block and all associated state to the database. 955 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 956 bc.wg.Add(1) 957 defer bc.wg.Done() 958 959 // Calculate the total difficulty of the block 960 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 961 if ptd == nil { 962 return NonStatTy, consensus.ErrUnknownAncestor 963 } 964 // Make sure no inconsistent state is leaked during insertion 965 bc.mu.Lock() 966 defer bc.mu.Unlock() 967 968 currentBlock := bc.CurrentBlock() 969 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 970 externTd := new(big.Int).Add(block.Difficulty(), ptd) 971 972 // Irrelevant of the canonical status, write the block itself to the database 973 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 974 return NonStatTy, err 975 } 976 rawdb.WriteBlock(bc.db, block) 977 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 978 979 if err != nil { 980 return NonStatTy, err 981 } 982 983 triedb := bc.stateCache.TrieDB() 984 985 // If we're running an archive node, always flush 986 if bc.cacheConfig.Disabled { 987 if err := triedb.Commit(root, false); err != nil { 988 return NonStatTy, err 989 } 990 } else { 991 // Full but not archive node, do proper garbage collection 992 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 993 bc.triegc.Push(root, -int64(block.NumberU64())) 994 995 if current := block.NumberU64(); current > triesInMemory { 996 // If we exceeded our memory allowance, flush matured singleton nodes to disk 997 var ( 998 nodes, imgs = triedb.Size() 999 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 1000 ) 1001 if nodes > limit || imgs > 4*1024*1024 { 1002 triedb.Cap(limit - ethdb.IdealBatchSize) 1003 } 1004 // Find the next state trie we need to commit 1005 header := bc.GetHeaderByNumber(current - triesInMemory) 1006 chosen := header.Number.Uint64() 1007 1008 // If we exceeded out time allowance, flush an entire trie to disk 1009 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 1010 // If we're exceeding limits but haven't reached a large enough memory gap, 1011 // warn the user that the system is becoming unstable. 1012 if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 1013 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 1014 } 1015 // Flush an entire trie and restart the counters 1016 triedb.Commit(header.Root, true) 1017 lastWrite = chosen 1018 bc.gcproc = 0 1019 } 1020 // Garbage collect anything below our required write retention 1021 for !bc.triegc.Empty() { 1022 root, number := bc.triegc.Pop() 1023 if uint64(-number) > chosen { 1024 bc.triegc.Push(root, number) 1025 break 1026 } 1027 triedb.Dereference(root.(common.Hash)) 1028 } 1029 } 1030 } 1031 1032 // Write other block data using a batch. 1033 batch := bc.db.NewBatch() 1034 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 1035 1036 // If the total difficulty is higher than our known, add it to the canonical chain 1037 // Second clause in the if statement reduces the vulnerability to selfish mining. 1038 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1039 reorg := externTd.Cmp(localTd) > 0 1040 currentBlock = bc.CurrentBlock() 1041 if !reorg && externTd.Cmp(localTd) == 0 { 1042 // Split same-difficulty blocks by number, then preferentially select 1043 // the block generated by the local miner as the canonical block. 1044 if block.NumberU64() < currentBlock.NumberU64() { 1045 reorg = true 1046 } else if block.NumberU64() == currentBlock.NumberU64() { 1047 var currentPreserve, blockPreserve bool 1048 if bc.shouldPreserve != nil { 1049 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1050 } 1051 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1052 } 1053 } 1054 if reorg { 1055 // Reorganise the chain if the parent is not the head block 1056 if block.ParentHash() != currentBlock.Hash() { 1057 if err := bc.reorg(currentBlock, block); err != nil { 1058 return NonStatTy, err 1059 } 1060 } 1061 // Write the positional metadata for transaction/receipt lookups and preimages 1062 rawdb.WriteTxLookupEntries(batch, block) 1063 rawdb.WritePreimages(batch, state.Preimages()) 1064 1065 status = CanonStatTy 1066 } else { 1067 status = SideStatTy 1068 } 1069 if err := batch.Write(); err != nil { 1070 return NonStatTy, err 1071 } 1072 1073 // Set new head. 1074 if status == CanonStatTy { 1075 bc.insert(block) 1076 // NOTE:SHYFT - Write block data for block explorer 1077 if GlobalPG != "disconnect" { 1078 if err := SWriteBlock(bc.shyftDb, block, receipts); err != nil { 1079 return NonStatTy, err 1080 } 1081 } 1082 } 1083 1084 bc.futureBlocks.Remove(block.Hash()) 1085 return status, nil 1086 } 1087 1088 // addFutureBlock checks if the block is within the max allowed window to get 1089 // accepted for future processing, and returns an error if the block is too far 1090 // ahead and was not added. 1091 func (bc *BlockChain) addFutureBlock(block *types.Block) error { 1092 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1093 if block.Time().Cmp(max) > 0 { 1094 return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) 1095 } 1096 bc.futureBlocks.Add(block.Hash(), block) 1097 return nil 1098 } 1099 1100 // InsertChain attempts to insert the given batch of blocks in to the canonical 1101 // chain or, otherwise, create a fork. If an error is returned it will return 1102 // the index number of the failing block as well an error describing what went 1103 // wrong. 1104 // 1105 // After insertion is done, all accumulated events will be fired. 1106 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1107 // Sanity check that we have something meaningful to import 1108 if len(chain) == 0 { 1109 return 0, nil 1110 } 1111 // Do a sanity check that the provided chain is actually ordered and linked 1112 for i := 1; i < len(chain); i++ { 1113 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1114 // Chain broke ancestry, log a message (programming error) and skip insertion 1115 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1116 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1117 1118 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1119 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1120 } 1121 } 1122 // Pre-checks passed, start the full block imports 1123 bc.wg.Add(1) 1124 bc.chainmu.Lock() 1125 n, events, logs, err := bc.insertChain(chain, true) 1126 bc.chainmu.Unlock() 1127 bc.wg.Done() 1128 1129 bc.PostChainEvents(events, logs) 1130 return n, err 1131 } 1132 1133 // insertChain is the internal implementation of insertChain, which assumes that 1134 // 1) chains are contiguous, and 2) The chain mutex is held. 1135 // 1136 // This method is split out so that import batches that require re-injecting 1137 // historical blocks can do so without releasing the lock, which could lead to 1138 // racey behaviour. If a sidechain import is in progress, and the historic state 1139 // is imported, but then new canon-head is added before the actual sidechain 1140 // completes, then the historic state could be pruned again 1141 func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { 1142 // If the chain is terminating, don't even bother starting u 1143 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1144 return 0, nil, nil, nil 1145 } 1146 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1147 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1148 1149 // A queued approach to delivering events. This is generally 1150 // faster than direct delivery and requires much less mutex 1151 // acquiring. 1152 var ( 1153 stats = insertStats{startTime: mclock.Now()} 1154 events = make([]interface{}, 0, len(chain)) 1155 lastCanon *types.Block 1156 coalescedLogs []*types.Log 1157 ) 1158 // Start the parallel header verifier 1159 headers := make([]*types.Header, len(chain)) 1160 seals := make([]bool, len(chain)) 1161 1162 for i, block := range chain { 1163 headers[i] = block.Header() 1164 seals[i] = verifySeals 1165 } 1166 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1167 defer close(abort) 1168 1169 // Peek the error for the first block to decide the directing import logic 1170 it := newInsertIterator(chain, results, bc.Validator()) 1171 1172 block, err := it.next() 1173 switch { 1174 // First block is pruned, insert as sidechain and reorg only if TD grows enough 1175 case err == consensus.ErrPrunedAncestor: 1176 return bc.insertSidechain(it) 1177 1178 // First block is future, shove it (and all children) to the future queue (unknown ancestor) 1179 case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())): 1180 for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { 1181 if err := bc.addFutureBlock(block); err != nil { 1182 return it.index, events, coalescedLogs, err 1183 } 1184 block, err = it.next() 1185 } 1186 stats.queued += it.processed() 1187 stats.ignored += it.remaining() 1188 1189 // If there are any still remaining, mark as ignored 1190 return it.index, events, coalescedLogs, err 1191 1192 // First block (and state) is known 1193 // 1. We did a roll-back, and should now do a re-import 1194 // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot 1195 // from the canonical chain, which has not been verified. 1196 case err == ErrKnownBlock: 1197 // Skip all known blocks that behind us 1198 current := bc.CurrentBlock().NumberU64() 1199 1200 for block != nil && err == ErrKnownBlock && current >= block.NumberU64() { 1201 stats.ignored++ 1202 block, err = it.next() 1203 } 1204 // Falls through to the block import 1205 1206 // Some other error occurred, abort 1207 case err != nil: 1208 stats.ignored += len(it.chain) 1209 bc.reportBlock(block, nil, err) 1210 return it.index, events, coalescedLogs, err 1211 } 1212 // No validation errors for the first block (or chain prefix skipped) 1213 for ; block != nil && err == nil; block, err = it.next() { 1214 // If the chain is terminating, stop processing blocks 1215 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1216 log.Debug("Premature abort during blocks processing") 1217 break 1218 } 1219 // If the header is a banned one, straight out abort 1220 if BadHashes[block.Hash()] { 1221 bc.reportBlock(block, nil, ErrBlacklistedHash) 1222 return it.index, events, coalescedLogs, ErrBlacklistedHash 1223 } 1224 // Retrieve the parent block and it's state to execute on top 1225 start := time.Now() 1226 1227 parent := it.previous() 1228 if parent == nil { 1229 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1230 } 1231 state, err := state.New(parent.Root(), bc.stateCache) 1232 if err != nil { 1233 return it.index, events, coalescedLogs, err 1234 } 1235 // Process block using the parent state as reference point. 1236 t0 := time.Now() 1237 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1238 t1 := time.Now() 1239 if err != nil { 1240 bc.reportBlock(block, receipts, err) 1241 return it.index, events, coalescedLogs, err 1242 } 1243 // Validate the state using the default validator 1244 if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil { 1245 bc.reportBlock(block, receipts, err) 1246 return it.index, events, coalescedLogs, err 1247 } 1248 t2 := time.Now() 1249 proctime := time.Since(start) 1250 1251 // Write the block to the chain and get the status. 1252 status, err := bc.WriteBlockWithState(block, receipts, state) 1253 t3 := time.Now() 1254 if err != nil { 1255 return it.index, events, coalescedLogs, err 1256 } 1257 blockInsertTimer.UpdateSince(start) 1258 blockExecutionTimer.Update(t1.Sub(t0)) 1259 blockValidationTimer.Update(t2.Sub(t1)) 1260 blockWriteTimer.Update(t3.Sub(t2)) 1261 switch status { 1262 case CanonStatTy: 1263 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), 1264 "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), 1265 "elapsed", common.PrettyDuration(time.Since(start)), 1266 "root", block.Root()) 1267 1268 coalescedLogs = append(coalescedLogs, logs...) 1269 events = append(events, ChainEvent{block, block.Hash(), logs}) 1270 lastCanon = block 1271 1272 // Only count canonical blocks for GC processing time 1273 bc.gcproc += proctime 1274 1275 case SideStatTy: 1276 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), 1277 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1278 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1279 "root", block.Root()) 1280 events = append(events, ChainSideEvent{block}) 1281 } 1282 blockInsertTimer.UpdateSince(start) 1283 stats.processed++ 1284 stats.usedGas += usedGas 1285 1286 cache, _ := bc.stateCache.TrieDB().Size() 1287 stats.report(chain, it.index, cache) 1288 } 1289 // Any blocks remaining here? The only ones we care about are the future ones 1290 if block != nil && err == consensus.ErrFutureBlock { 1291 if err := bc.addFutureBlock(block); err != nil { 1292 return it.index, events, coalescedLogs, err 1293 } 1294 block, err = it.next() 1295 1296 for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { 1297 if err := bc.addFutureBlock(block); err != nil { 1298 return it.index, events, coalescedLogs, err 1299 } 1300 stats.queued++ 1301 } 1302 } 1303 stats.ignored += it.remaining() 1304 1305 // Append a single chain head event if we've progressed the chain 1306 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1307 events = append(events, ChainHeadEvent{lastCanon}) 1308 } 1309 return it.index, events, coalescedLogs, err 1310 } 1311 1312 // insertSidechain is called when an import batch hits upon a pruned ancestor 1313 // error, which happens when a sidechain with a sufficiently old fork-block is 1314 // found. 1315 // 1316 // The method writes all (header-and-body-valid) blocks to disk, then tries to 1317 // switch over to the new chain if the TD exceeded the current chain. 1318 func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) { 1319 var ( 1320 externTd *big.Int 1321 current = bc.CurrentBlock().NumberU64() 1322 ) 1323 // The first sidechain block error is already verified to be ErrPrunedAncestor. 1324 // Since we don't import them here, we expect ErrUnknownAncestor for the remaining 1325 // ones. Any other errors means that the block is invalid, and should not be written 1326 // to disk. 1327 block, err := it.current(), consensus.ErrPrunedAncestor 1328 for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() { 1329 // Check the canonical state root for that number 1330 if number := block.NumberU64(); current >= number { 1331 if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() { 1332 // This is most likely a shadow-state attack. When a fork is imported into the 1333 // database, and it eventually reaches a block height which is not pruned, we 1334 // just found that the state already exist! This means that the sidechain block 1335 // refers to a state which already exists in our canon chain. 1336 // 1337 // If left unchecked, we would now proceed importing the blocks, without actually 1338 // having verified the state of the previous blocks. 1339 log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) 1340 1341 // If someone legitimately side-mines blocks, they would still be imported as usual. However, 1342 // we cannot risk writing unverified blocks to disk when they obviously target the pruning 1343 // mechanism. 1344 return it.index, nil, nil, errors.New("sidechain ghost-state attack") 1345 } 1346 } 1347 if externTd == nil { 1348 externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) 1349 } 1350 externTd = new(big.Int).Add(externTd, block.Difficulty()) 1351 1352 if !bc.HasBlock(block.Hash(), block.NumberU64()) { 1353 start := time.Now() 1354 if err := bc.WriteBlockWithoutState(block, externTd); err != nil { 1355 return it.index, nil, nil, err 1356 } 1357 log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(), 1358 "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), 1359 "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), 1360 "root", block.Root()) 1361 } 1362 } 1363 // At this point, we've written all sidechain blocks to database. Loop ended 1364 // either on some other error or all were processed. If there was some other 1365 // error, we can ignore the rest of those blocks. 1366 // 1367 // If the externTd was larger than our local TD, we now need to reimport the previous 1368 // blocks to regenerate the required state 1369 localTd := bc.GetTd(bc.CurrentBlock().Hash(), current) 1370 if localTd.Cmp(externTd) > 0 { 1371 log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd) 1372 return it.index, nil, nil, err 1373 } 1374 // Gather all the sidechain hashes (full blocks may be memory heavy) 1375 var ( 1376 hashes []common.Hash 1377 numbers []uint64 1378 ) 1379 parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64()) 1380 for parent != nil && !bc.HasState(parent.Root) { 1381 hashes = append(hashes, parent.Hash()) 1382 numbers = append(numbers, parent.Number.Uint64()) 1383 1384 parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) 1385 } 1386 if parent == nil { 1387 return it.index, nil, nil, errors.New("missing parent") 1388 } 1389 // Import all the pruned blocks to make the state available 1390 var ( 1391 blocks []*types.Block 1392 memory common.StorageSize 1393 ) 1394 for i := len(hashes) - 1; i >= 0; i-- { 1395 // Append the next block to our batch 1396 block := bc.GetBlock(hashes[i], numbers[i]) 1397 1398 blocks = append(blocks, block) 1399 memory += block.Size() 1400 1401 // If memory use grew too large, import and continue. Sadly we need to discard 1402 // all raised events and logs from notifications since we're too heavy on the 1403 // memory here. 1404 if len(blocks) >= 2048 || memory > 64*1024*1024 { 1405 log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) 1406 if _, _, _, err := bc.insertChain(blocks, false); err != nil { 1407 return 0, nil, nil, err 1408 } 1409 blocks, memory = blocks[:0], 0 1410 1411 // If the chain is terminating, stop processing blocks 1412 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1413 log.Debug("Premature abort during blocks processing") 1414 return 0, nil, nil, nil 1415 } 1416 } 1417 } 1418 if len(blocks) > 0 { 1419 log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) 1420 return bc.insertChain(blocks, false) 1421 } 1422 return 0, nil, nil, nil 1423 } 1424 1425 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1426 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1427 // event about them 1428 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1429 var ( 1430 newChain types.Blocks 1431 oldChain types.Blocks 1432 commonBlock *types.Block 1433 deletedTxs types.Transactions 1434 deletedLogs []*types.Log 1435 // collectLogs collects the logs that were generated during the 1436 // processing of the block that corresponds with the given hash. 1437 // These logs are later announced as deleted. 1438 collectLogs = func(hash common.Hash) { 1439 // Coalesce logs and set 'Removed'. 1440 number := bc.hc.GetBlockNumber(hash) 1441 if number == nil { 1442 return 1443 } 1444 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1445 for _, receipt := range receipts { 1446 for _, log := range receipt.Logs { 1447 del := *log 1448 del.Removed = true 1449 deletedLogs = append(deletedLogs, &del) 1450 } 1451 } 1452 } 1453 ) 1454 1455 // first reduce whoever is higher bound 1456 if oldBlock.NumberU64() > newBlock.NumberU64() { 1457 // reduce old chain 1458 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1459 oldChain = append(oldChain, oldBlock) 1460 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1461 1462 collectLogs(oldBlock.Hash()) 1463 } 1464 } else { 1465 // reduce new chain and append new chain blocks for inserting later on 1466 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1467 newChain = append(newChain, newBlock) 1468 } 1469 } 1470 if oldBlock == nil { 1471 return fmt.Errorf("Invalid old chain") 1472 } 1473 if newBlock == nil { 1474 return fmt.Errorf("Invalid new chain") 1475 } 1476 1477 for { 1478 if oldBlock.Hash() == newBlock.Hash() { 1479 commonBlock = oldBlock 1480 break 1481 } 1482 1483 oldChain = append(oldChain, oldBlock) 1484 newChain = append(newChain, newBlock) 1485 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1486 collectLogs(oldBlock.Hash()) 1487 1488 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1489 if oldBlock == nil { 1490 return fmt.Errorf("Invalid old chain") 1491 } 1492 if newBlock == nil { 1493 return fmt.Errorf("Invalid new chain") 1494 } 1495 } 1496 // Ensure the user sees large reorgs 1497 if len(oldChain) > 0 && len(newChain) > 0 { 1498 logFn := log.Debug 1499 if len(oldChain) > 63 { 1500 logFn = log.Warn 1501 } 1502 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1503 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1504 } else { 1505 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1506 } 1507 // Insert the new chain, taking care of the proper incremental order 1508 var addedTxs types.Transactions 1509 for i := len(newChain) - 1; i >= 0; i-- { 1510 // insert the block in the canonical way, re-writing history 1511 bc.insert(newChain[i]) 1512 // write lookup entries for hash based transaction/receipt searches 1513 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1514 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1515 } 1516 // calculate the difference between deleted and added transactions 1517 diff := types.TxDifference(deletedTxs, addedTxs) 1518 // When transactions get deleted from the database that means the 1519 // receipts that were created in the fork must also be deleted 1520 batch := bc.db.NewBatch() 1521 for _, tx := range diff { 1522 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1523 } 1524 batch.Write() 1525 1526 if len(deletedLogs) > 0 { 1527 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1528 } 1529 if len(oldChain) > 0 { 1530 go func() { 1531 for _, block := range oldChain { 1532 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1533 } 1534 }() 1535 } 1536 1537 return nil 1538 } 1539 1540 // PostChainEvents iterates over the events generated by a chain insertion and 1541 // posts them into the event feed. 1542 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1543 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1544 // post event logs for further processing 1545 if logs != nil { 1546 bc.logsFeed.Send(logs) 1547 } 1548 for _, event := range events { 1549 switch ev := event.(type) { 1550 case ChainEvent: 1551 bc.chainFeed.Send(ev) 1552 1553 case ChainHeadEvent: 1554 bc.chainHeadFeed.Send(ev) 1555 1556 case ChainSideEvent: 1557 bc.chainSideFeed.Send(ev) 1558 } 1559 } 1560 } 1561 1562 func (bc *BlockChain) update() { 1563 futureTimer := time.NewTicker(5 * time.Second) 1564 defer futureTimer.Stop() 1565 for { 1566 select { 1567 case <-futureTimer.C: 1568 bc.procFutureBlocks() 1569 case <-bc.quit: 1570 return 1571 } 1572 } 1573 } 1574 1575 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1576 func (bc *BlockChain) BadBlocks() []*types.Block { 1577 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 1578 for _, hash := range bc.badBlocks.Keys() { 1579 if blk, exist := bc.badBlocks.Peek(hash); exist { 1580 block := blk.(*types.Block) 1581 blocks = append(blocks, block) 1582 } 1583 } 1584 return blocks 1585 } 1586 1587 // addBadBlock adds a bad block to the bad-block LRU cache 1588 func (bc *BlockChain) addBadBlock(block *types.Block) { 1589 bc.badBlocks.Add(block.Hash(), block) 1590 } 1591 1592 // reportBlock logs a bad block error. 1593 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1594 bc.addBadBlock(block) 1595 1596 var receiptString string 1597 for i, receipt := range receipts { 1598 receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", 1599 i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), 1600 receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) 1601 } 1602 log.Error(fmt.Sprintf(` 1603 ########## BAD BLOCK ######### 1604 Chain config: %v 1605 1606 Number: %v 1607 Hash: 0x%x 1608 %v 1609 1610 Error: %v 1611 ############################## 1612 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1613 } 1614 1615 // InsertHeaderChain attempts to insert the given header chain in to the local 1616 // chain, possibly creating a reorg. If an error is returned, it will return the 1617 // index number of the failing header as well an error describing what went wrong. 1618 // 1619 // The verify parameter can be used to fine tune whether nonce verification 1620 // should be done or not. The reason behind the optional check is because some 1621 // of the header retrieval mechanisms already need to verify nonces, as well as 1622 // because nonces can be verified sparsely, not needing to check each. 1623 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1624 start := time.Now() 1625 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1626 return i, err 1627 } 1628 1629 // Make sure only one thread manipulates the chain at once 1630 bc.chainmu.Lock() 1631 defer bc.chainmu.Unlock() 1632 1633 bc.wg.Add(1) 1634 defer bc.wg.Done() 1635 1636 whFunc := func(header *types.Header) error { 1637 bc.mu.Lock() 1638 defer bc.mu.Unlock() 1639 1640 _, err := bc.hc.WriteHeader(header) 1641 return err 1642 } 1643 1644 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1645 } 1646 1647 // writeHeader writes a header into the local chain, given that its parent is 1648 // already known. If the total difficulty of the newly inserted header becomes 1649 // greater than the current known TD, the canonical chain is re-routed. 1650 // 1651 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1652 // into the chain, as side effects caused by reorganisations cannot be emulated 1653 // without the real blocks. Hence, writing headers directly should only be done 1654 // in two scenarios: pure-header mode of operation (light clients), or properly 1655 // separated header/block phases (non-archive clients). 1656 func (bc *BlockChain) writeHeader(header *types.Header) error { 1657 bc.wg.Add(1) 1658 defer bc.wg.Done() 1659 1660 bc.mu.Lock() 1661 defer bc.mu.Unlock() 1662 1663 _, err := bc.hc.WriteHeader(header) 1664 return err 1665 } 1666 1667 // CurrentHeader retrieves the current head header of the canonical chain. The 1668 // header is retrieved from the HeaderChain's internal cache. 1669 func (bc *BlockChain) CurrentHeader() *types.Header { 1670 return bc.hc.CurrentHeader() 1671 } 1672 1673 // GetTd retrieves a block's total difficulty in the canonical chain from the 1674 // database by hash and number, caching it if found. 1675 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1676 return bc.hc.GetTd(hash, number) 1677 } 1678 1679 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1680 // database by hash, caching it if found. 1681 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1682 return bc.hc.GetTdByHash(hash) 1683 } 1684 1685 // GetHeader retrieves a block header from the database by hash and number, 1686 // caching it if found. 1687 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1688 return bc.hc.GetHeader(hash, number) 1689 } 1690 1691 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1692 // found. 1693 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1694 return bc.hc.GetHeaderByHash(hash) 1695 } 1696 1697 // HasHeader checks if a block header is present in the database or not, caching 1698 // it if present. 1699 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1700 return bc.hc.HasHeader(hash, number) 1701 } 1702 1703 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1704 // hash, fetching towards the genesis block. 1705 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1706 return bc.hc.GetBlockHashesFromHash(hash, max) 1707 } 1708 1709 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 1710 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 1711 // number of blocks to be individually checked before we reach the canonical chain. 1712 // 1713 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 1714 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 1715 bc.chainmu.Lock() 1716 defer bc.chainmu.Unlock() 1717 1718 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 1719 } 1720 1721 // GetHeaderByNumber retrieves a block header from the database by number, 1722 // caching it (associated with its hash) if found. 1723 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1724 return bc.hc.GetHeaderByNumber(number) 1725 } 1726 1727 // Config retrieves the blockchain's chain configuration. 1728 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1729 1730 // Engine retrieves the blockchain's consensus engine. 1731 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1732 1733 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1734 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1735 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1736 } 1737 1738 // SubscribeChainEvent registers a subscription of ChainEvent. 1739 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1740 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1741 } 1742 1743 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1744 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1745 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1746 } 1747 1748 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1749 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1750 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1751 } 1752 1753 // SubscribeLogsEvent registers a subscription of []*types.Log. 1754 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1755 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1756 } 1757 1758 // ShyftRollback is designed to remove a chain of links from the database that aren't 1759 // certain enough to be valid. 1760 func (bc *BlockChain) ShyftRollback(chain []common.Hash) { 1761 bc.mu.Lock() 1762 defer bc.mu.Unlock() 1763 for i := 0; i <= len(chain)-1; i++ { 1764 hash := chain[i] 1765 currentHeader := bc.hc.CurrentHeader() 1766 if currentHeader.Hash() == hash { 1767 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 1768 } 1769 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 1770 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 1771 bc.currentFastBlock.Store(newFastBlock) 1772 WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 1773 } 1774 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 1775 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 1776 bc.currentBlock.Store(newBlock) 1777 WriteHeadBlockHash(bc.db, newBlock.Hash()) 1778 } 1779 } 1780 }