github.com/alexanderbez/go-ethereum@v1.8.17-0.20181024144731-0a57b29f0c8e/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/mclock" 32 "github.com/ethereum/go-ethereum/common/prque" 33 "github.com/ethereum/go-ethereum/consensus" 34 "github.com/ethereum/go-ethereum/core/rawdb" 35 "github.com/ethereum/go-ethereum/core/state" 36 "github.com/ethereum/go-ethereum/core/types" 37 "github.com/ethereum/go-ethereum/core/vm" 38 "github.com/ethereum/go-ethereum/crypto" 39 "github.com/ethereum/go-ethereum/ethdb" 40 "github.com/ethereum/go-ethereum/event" 41 "github.com/ethereum/go-ethereum/log" 42 "github.com/ethereum/go-ethereum/metrics" 43 "github.com/ethereum/go-ethereum/params" 44 "github.com/ethereum/go-ethereum/rlp" 45 "github.com/ethereum/go-ethereum/trie" 46 "github.com/hashicorp/golang-lru" 47 ) 48 49 var ( 50 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 51 52 ErrNoGenesis = errors.New("Genesis not found in chain") 53 ) 54 55 const ( 56 bodyCacheLimit = 256 57 blockCacheLimit = 256 58 maxFutureBlocks = 256 59 maxTimeFutureBlocks = 30 60 badBlockLimit = 10 61 triesInMemory = 128 62 63 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 64 BlockChainVersion = 3 65 ) 66 67 // CacheConfig contains the configuration values for the trie caching/pruning 68 // that's resident in a blockchain. 69 type CacheConfig struct { 70 Disabled bool // Whether to disable trie write caching (archive node) 71 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 72 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 73 } 74 75 // BlockChain represents the canonical chain given a database with a genesis 76 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 77 // 78 // Importing blocks in to the block chain happens according to the set of rules 79 // defined by the two stage Validator. Processing of blocks is done using the 80 // Processor which processes the included transaction. The validation of the state 81 // is done in the second part of the Validator. Failing results in aborting of 82 // the import. 83 // 84 // The BlockChain also helps in returning blocks from **any** chain included 85 // in the database as well as blocks that represents the canonical chain. It's 86 // important to note that GetBlock can return any block and does not need to be 87 // included in the canonical one where as GetBlockByNumber always represents the 88 // canonical chain. 89 type BlockChain struct { 90 chainConfig *params.ChainConfig // Chain & network configuration 91 cacheConfig *CacheConfig // Cache configuration for pruning 92 93 db ethdb.Database // Low level persistent database to store final content in 94 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 95 gcproc time.Duration // Accumulates canonical block processing for trie dumping 96 97 hc *HeaderChain 98 rmLogsFeed event.Feed 99 chainFeed event.Feed 100 chainSideFeed event.Feed 101 chainHeadFeed event.Feed 102 logsFeed event.Feed 103 scope event.SubscriptionScope 104 genesisBlock *types.Block 105 106 mu sync.RWMutex // global mutex for locking chain operations 107 chainmu sync.RWMutex // blockchain insertion lock 108 procmu sync.RWMutex // block processor lock 109 110 checkpoint int // checkpoint counts towards the new checkpoint 111 currentBlock atomic.Value // Current head of the block chain 112 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 113 114 stateCache state.Database // State database to reuse between imports (contains state cache) 115 bodyCache *lru.Cache // Cache for the most recent block bodies 116 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 117 blockCache *lru.Cache // Cache for the most recent entire blocks 118 futureBlocks *lru.Cache // future blocks are blocks added for later processing 119 120 quit chan struct{} // blockchain quit channel 121 running int32 // running must be called atomically 122 // procInterrupt must be atomically called 123 procInterrupt int32 // interrupt signaler for block processing 124 wg sync.WaitGroup // chain processing wait group for shutting down 125 126 engine consensus.Engine 127 processor Processor // block processor interface 128 validator Validator // block and state validator interface 129 vmConfig vm.Config 130 131 badBlocks *lru.Cache // Bad block cache 132 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 133 } 134 135 // NewBlockChain returns a fully initialised block chain using information 136 // available in the database. It initialises the default Ethereum Validator and 137 // Processor. 138 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 139 if cacheConfig == nil { 140 cacheConfig = &CacheConfig{ 141 TrieNodeLimit: 256 * 1024 * 1024, 142 TrieTimeLimit: 5 * time.Minute, 143 } 144 } 145 bodyCache, _ := lru.New(bodyCacheLimit) 146 bodyRLPCache, _ := lru.New(bodyCacheLimit) 147 blockCache, _ := lru.New(blockCacheLimit) 148 futureBlocks, _ := lru.New(maxFutureBlocks) 149 badBlocks, _ := lru.New(badBlockLimit) 150 151 bc := &BlockChain{ 152 chainConfig: chainConfig, 153 cacheConfig: cacheConfig, 154 db: db, 155 triegc: prque.New(nil), 156 stateCache: state.NewDatabase(db), 157 quit: make(chan struct{}), 158 shouldPreserve: shouldPreserve, 159 bodyCache: bodyCache, 160 bodyRLPCache: bodyRLPCache, 161 blockCache: blockCache, 162 futureBlocks: futureBlocks, 163 engine: engine, 164 vmConfig: vmConfig, 165 badBlocks: badBlocks, 166 } 167 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 168 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 169 170 var err error 171 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 172 if err != nil { 173 return nil, err 174 } 175 bc.genesisBlock = bc.GetBlockByNumber(0) 176 if bc.genesisBlock == nil { 177 return nil, ErrNoGenesis 178 } 179 if err := bc.loadLastState(); err != nil { 180 return nil, err 181 } 182 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 183 for hash := range BadHashes { 184 if header := bc.GetHeaderByHash(hash); header != nil { 185 // get the canonical block corresponding to the offending header's number 186 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 187 // make sure the headerByNumber (if present) is in our current canonical chain 188 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 189 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 190 bc.SetHead(header.Number.Uint64() - 1) 191 log.Error("Chain rewind was successful, resuming normal operation") 192 } 193 } 194 } 195 // Take ownership of this particular state 196 go bc.update() 197 return bc, nil 198 } 199 200 func (bc *BlockChain) getProcInterrupt() bool { 201 return atomic.LoadInt32(&bc.procInterrupt) == 1 202 } 203 204 // loadLastState loads the last known chain state from the database. This method 205 // assumes that the chain manager mutex is held. 206 func (bc *BlockChain) loadLastState() error { 207 // Restore the last known head block 208 head := rawdb.ReadHeadBlockHash(bc.db) 209 if head == (common.Hash{}) { 210 // Corrupt or empty database, init from scratch 211 log.Warn("Empty database, resetting chain") 212 return bc.Reset() 213 } 214 // Make sure the entire head block is available 215 currentBlock := bc.GetBlockByHash(head) 216 if currentBlock == nil { 217 // Corrupt or empty database, init from scratch 218 log.Warn("Head block missing, resetting chain", "hash", head) 219 return bc.Reset() 220 } 221 // Make sure the state associated with the block is available 222 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 223 // Dangling block without a state associated, init from scratch 224 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 225 if err := bc.repair(¤tBlock); err != nil { 226 return err 227 } 228 } 229 // Everything seems to be fine, set as the head block 230 bc.currentBlock.Store(currentBlock) 231 232 // Restore the last known head header 233 currentHeader := currentBlock.Header() 234 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 235 if header := bc.GetHeaderByHash(head); header != nil { 236 currentHeader = header 237 } 238 } 239 bc.hc.SetCurrentHeader(currentHeader) 240 241 // Restore the last known head fast block 242 bc.currentFastBlock.Store(currentBlock) 243 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 244 if block := bc.GetBlockByHash(head); block != nil { 245 bc.currentFastBlock.Store(block) 246 } 247 } 248 249 // Issue a status log for the user 250 currentFastBlock := bc.CurrentFastBlock() 251 252 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 253 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 254 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 255 256 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) 257 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) 258 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) 259 260 return nil 261 } 262 263 // SetHead rewinds the local chain to a new head. In the case of headers, everything 264 // above the new head will be deleted and the new one set. In the case of blocks 265 // though, the head may be further rewound if block bodies are missing (non-archive 266 // nodes after a fast sync). 267 func (bc *BlockChain) SetHead(head uint64) error { 268 log.Warn("Rewinding blockchain", "target", head) 269 270 bc.mu.Lock() 271 defer bc.mu.Unlock() 272 273 // Rewind the header chain, deleting all block bodies until then 274 delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { 275 rawdb.DeleteBody(db, hash, num) 276 } 277 bc.hc.SetHead(head, delFn) 278 currentHeader := bc.hc.CurrentHeader() 279 280 // Clear out any stale content from the caches 281 bc.bodyCache.Purge() 282 bc.bodyRLPCache.Purge() 283 bc.blockCache.Purge() 284 bc.futureBlocks.Purge() 285 286 // Rewind the block chain, ensuring we don't end up with a stateless head block 287 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 288 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 289 } 290 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 291 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 292 // Rewound state missing, rolled back to before pivot, reset to genesis 293 bc.currentBlock.Store(bc.genesisBlock) 294 } 295 } 296 // Rewind the fast block in a simpleton way to the target head 297 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 298 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 299 } 300 // If either blocks reached nil, reset to the genesis state 301 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 302 bc.currentBlock.Store(bc.genesisBlock) 303 } 304 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 305 bc.currentFastBlock.Store(bc.genesisBlock) 306 } 307 currentBlock := bc.CurrentBlock() 308 currentFastBlock := bc.CurrentFastBlock() 309 310 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 311 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 312 313 return bc.loadLastState() 314 } 315 316 // FastSyncCommitHead sets the current head block to the one defined by the hash 317 // irrelevant what the chain contents were prior. 318 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 319 // Make sure that both the block as well at its state trie exists 320 block := bc.GetBlockByHash(hash) 321 if block == nil { 322 return fmt.Errorf("non existent block [%x…]", hash[:4]) 323 } 324 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 325 return err 326 } 327 // If all checks out, manually set the head block 328 bc.mu.Lock() 329 bc.currentBlock.Store(block) 330 bc.mu.Unlock() 331 332 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 333 return nil 334 } 335 336 // GasLimit returns the gas limit of the current HEAD block. 337 func (bc *BlockChain) GasLimit() uint64 { 338 return bc.CurrentBlock().GasLimit() 339 } 340 341 // CurrentBlock retrieves the current head block of the canonical chain. The 342 // block is retrieved from the blockchain's internal cache. 343 func (bc *BlockChain) CurrentBlock() *types.Block { 344 return bc.currentBlock.Load().(*types.Block) 345 } 346 347 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 348 // chain. The block is retrieved from the blockchain's internal cache. 349 func (bc *BlockChain) CurrentFastBlock() *types.Block { 350 return bc.currentFastBlock.Load().(*types.Block) 351 } 352 353 // SetProcessor sets the processor required for making state modifications. 354 func (bc *BlockChain) SetProcessor(processor Processor) { 355 bc.procmu.Lock() 356 defer bc.procmu.Unlock() 357 bc.processor = processor 358 } 359 360 // SetValidator sets the validator which is used to validate incoming blocks. 361 func (bc *BlockChain) SetValidator(validator Validator) { 362 bc.procmu.Lock() 363 defer bc.procmu.Unlock() 364 bc.validator = validator 365 } 366 367 // Validator returns the current validator. 368 func (bc *BlockChain) Validator() Validator { 369 bc.procmu.RLock() 370 defer bc.procmu.RUnlock() 371 return bc.validator 372 } 373 374 // Processor returns the current processor. 375 func (bc *BlockChain) Processor() Processor { 376 bc.procmu.RLock() 377 defer bc.procmu.RUnlock() 378 return bc.processor 379 } 380 381 // State returns a new mutable state based on the current HEAD block. 382 func (bc *BlockChain) State() (state.StateDB, error) { 383 return bc.StateAt(bc.CurrentBlock().Root()) 384 } 385 386 // StateAt returns a new mutable state based on a particular point in time. 387 func (bc *BlockChain) StateAt(root common.Hash) (state.StateDB, error) { 388 return state.New(root, bc.stateCache) 389 } 390 391 // Reset purges the entire blockchain, restoring it to its genesis state. 392 func (bc *BlockChain) Reset() error { 393 return bc.ResetWithGenesisBlock(bc.genesisBlock) 394 } 395 396 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 397 // specified genesis state. 398 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 399 // Dump the entire block chain and purge the caches 400 if err := bc.SetHead(0); err != nil { 401 return err 402 } 403 bc.mu.Lock() 404 defer bc.mu.Unlock() 405 406 // Prepare the genesis block and reinitialise the chain 407 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 408 log.Crit("Failed to write genesis block TD", "err", err) 409 } 410 rawdb.WriteBlock(bc.db, genesis) 411 412 bc.genesisBlock = genesis 413 bc.insert(bc.genesisBlock) 414 bc.currentBlock.Store(bc.genesisBlock) 415 bc.hc.SetGenesis(bc.genesisBlock.Header()) 416 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 417 bc.currentFastBlock.Store(bc.genesisBlock) 418 419 return nil 420 } 421 422 // repair tries to repair the current blockchain by rolling back the current block 423 // until one with associated state is found. This is needed to fix incomplete db 424 // writes caused either by crashes/power outages, or simply non-committed tries. 425 // 426 // This method only rolls back the current block. The current header and current 427 // fast block are left intact. 428 func (bc *BlockChain) repair(head **types.Block) error { 429 for { 430 // Abort if we've rewound to a head block that does have associated state 431 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 432 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 433 return nil 434 } 435 // Otherwise rewind one block and recheck state availability there 436 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 437 } 438 } 439 440 // Export writes the active chain to the given writer. 441 func (bc *BlockChain) Export(w io.Writer) error { 442 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 443 } 444 445 // ExportN writes a subset of the active chain to the given writer. 446 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 447 bc.mu.RLock() 448 defer bc.mu.RUnlock() 449 450 if first > last { 451 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 452 } 453 log.Info("Exporting batch of blocks", "count", last-first+1) 454 455 start, reported := time.Now(), time.Now() 456 for nr := first; nr <= last; nr++ { 457 block := bc.GetBlockByNumber(nr) 458 if block == nil { 459 return fmt.Errorf("export failed on #%d: not found", nr) 460 } 461 if err := block.EncodeRLP(w); err != nil { 462 return err 463 } 464 if time.Since(reported) >= statsReportLimit { 465 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 466 reported = time.Now() 467 } 468 } 469 470 return nil 471 } 472 473 // insert injects a new head block into the current block chain. This method 474 // assumes that the block is indeed a true head. It will also reset the head 475 // header and the head fast sync block to this very same block if they are older 476 // or if they are on a different side chain. 477 // 478 // Note, this function assumes that the `mu` mutex is held! 479 func (bc *BlockChain) insert(block *types.Block) { 480 // If the block is on a side chain or an unknown one, force other heads onto it too 481 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 482 483 // Add the block to the canonical chain number scheme and mark as the head 484 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 485 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 486 487 bc.currentBlock.Store(block) 488 489 // If the block is better than our head or is on a different chain, force update heads 490 if updateHeads { 491 bc.hc.SetCurrentHeader(block.Header()) 492 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 493 494 bc.currentFastBlock.Store(block) 495 } 496 } 497 498 // Genesis retrieves the chain's genesis block. 499 func (bc *BlockChain) Genesis() *types.Block { 500 return bc.genesisBlock 501 } 502 503 // GetBody retrieves a block body (transactions and uncles) from the database by 504 // hash, caching it if found. 505 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 506 // Short circuit if the body's already in the cache, retrieve otherwise 507 if cached, ok := bc.bodyCache.Get(hash); ok { 508 body := cached.(*types.Body) 509 return body 510 } 511 number := bc.hc.GetBlockNumber(hash) 512 if number == nil { 513 return nil 514 } 515 body := rawdb.ReadBody(bc.db, hash, *number) 516 if body == nil { 517 return nil 518 } 519 // Cache the found body for next time and return 520 bc.bodyCache.Add(hash, body) 521 return body 522 } 523 524 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 525 // caching it if found. 526 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 527 // Short circuit if the body's already in the cache, retrieve otherwise 528 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 529 return cached.(rlp.RawValue) 530 } 531 number := bc.hc.GetBlockNumber(hash) 532 if number == nil { 533 return nil 534 } 535 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 536 if len(body) == 0 { 537 return nil 538 } 539 // Cache the found body for next time and return 540 bc.bodyRLPCache.Add(hash, body) 541 return body 542 } 543 544 // HasBlock checks if a block is fully present in the database or not. 545 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 546 if bc.blockCache.Contains(hash) { 547 return true 548 } 549 return rawdb.HasBody(bc.db, hash, number) 550 } 551 552 // HasState checks if state trie is fully present in the database or not. 553 func (bc *BlockChain) HasState(hash common.Hash) bool { 554 _, err := bc.stateCache.OpenTrie(hash) 555 return err == nil 556 } 557 558 // HasBlockAndState checks if a block and associated state trie is fully present 559 // in the database or not, caching it if present. 560 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 561 // Check first that the block itself is known 562 block := bc.GetBlock(hash, number) 563 if block == nil { 564 return false 565 } 566 return bc.HasState(block.Root()) 567 } 568 569 // GetBlock retrieves a block from the database by hash and number, 570 // caching it if found. 571 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 572 // Short circuit if the block's already in the cache, retrieve otherwise 573 if block, ok := bc.blockCache.Get(hash); ok { 574 return block.(*types.Block) 575 } 576 block := rawdb.ReadBlock(bc.db, hash, number) 577 if block == nil { 578 return nil 579 } 580 // Cache the found block for next time and return 581 bc.blockCache.Add(block.Hash(), block) 582 return block 583 } 584 585 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 586 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 587 number := bc.hc.GetBlockNumber(hash) 588 if number == nil { 589 return nil 590 } 591 return bc.GetBlock(hash, *number) 592 } 593 594 // GetBlockByNumber retrieves a block from the database by number, caching it 595 // (associated with its hash) if found. 596 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 597 hash := rawdb.ReadCanonicalHash(bc.db, number) 598 if hash == (common.Hash{}) { 599 return nil 600 } 601 return bc.GetBlock(hash, number) 602 } 603 604 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 605 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 606 number := rawdb.ReadHeaderNumber(bc.db, hash) 607 if number == nil { 608 return nil 609 } 610 return rawdb.ReadReceipts(bc.db, hash, *number) 611 } 612 613 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 614 // [deprecated by eth/62] 615 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 616 number := bc.hc.GetBlockNumber(hash) 617 if number == nil { 618 return nil 619 } 620 for i := 0; i < n; i++ { 621 block := bc.GetBlock(hash, *number) 622 if block == nil { 623 break 624 } 625 blocks = append(blocks, block) 626 hash = block.ParentHash() 627 *number-- 628 } 629 return 630 } 631 632 // GetUnclesInChain retrieves all the uncles from a given block backwards until 633 // a specific distance is reached. 634 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 635 uncles := []*types.Header{} 636 for i := 0; block != nil && i < length; i++ { 637 uncles = append(uncles, block.Uncles()...) 638 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 639 } 640 return uncles 641 } 642 643 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 644 // either from ephemeral in-memory cache, or from persistent storage. 645 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 646 return bc.stateCache.TrieDB().Node(hash) 647 } 648 649 // Stop stops the blockchain service. If any imports are currently in progress 650 // it will abort them using the procInterrupt. 651 func (bc *BlockChain) Stop() { 652 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 653 return 654 } 655 // Unsubscribe all subscriptions registered from blockchain 656 bc.scope.Close() 657 close(bc.quit) 658 atomic.StoreInt32(&bc.procInterrupt, 1) 659 660 bc.wg.Wait() 661 662 // Ensure the state of a recent block is also stored to disk before exiting. 663 // We're writing three different states to catch different restart scenarios: 664 // - HEAD: So we don't need to reprocess any blocks in the general case 665 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 666 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 667 if !bc.cacheConfig.Disabled { 668 triedb := bc.stateCache.TrieDB() 669 670 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 671 if number := bc.CurrentBlock().NumberU64(); number > offset { 672 recent := bc.GetBlockByNumber(number - offset) 673 674 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 675 if err := triedb.Commit(recent.Root(), true); err != nil { 676 log.Error("Failed to commit recent state trie", "err", err) 677 } 678 } 679 } 680 for !bc.triegc.Empty() { 681 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 682 } 683 if size, _ := triedb.Size(); size != 0 { 684 log.Error("Dangling trie nodes after full cleanup") 685 } 686 } 687 log.Info("Blockchain manager stopped") 688 } 689 690 func (bc *BlockChain) procFutureBlocks() { 691 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 692 for _, hash := range bc.futureBlocks.Keys() { 693 if block, exist := bc.futureBlocks.Peek(hash); exist { 694 blocks = append(blocks, block.(*types.Block)) 695 } 696 } 697 if len(blocks) > 0 { 698 types.BlockBy(types.Number).Sort(blocks) 699 700 // Insert one by one as chain insertion needs contiguous ancestry between blocks 701 for i := range blocks { 702 bc.InsertChain(blocks[i : i+1]) 703 } 704 } 705 } 706 707 // WriteStatus status of write 708 type WriteStatus byte 709 710 const ( 711 NonStatTy WriteStatus = iota 712 CanonStatTy 713 SideStatTy 714 ) 715 716 // Rollback is designed to remove a chain of links from the database that aren't 717 // certain enough to be valid. 718 func (bc *BlockChain) Rollback(chain []common.Hash) { 719 bc.mu.Lock() 720 defer bc.mu.Unlock() 721 722 for i := len(chain) - 1; i >= 0; i-- { 723 hash := chain[i] 724 725 currentHeader := bc.hc.CurrentHeader() 726 if currentHeader.Hash() == hash { 727 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 728 } 729 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 730 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 731 bc.currentFastBlock.Store(newFastBlock) 732 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 733 } 734 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 735 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 736 bc.currentBlock.Store(newBlock) 737 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 738 } 739 } 740 } 741 742 // SetReceiptsData computes all the non-consensus fields of the receipts 743 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 744 signer := types.MakeSigner(config, block.Number()) 745 746 transactions, logIndex := block.Transactions(), uint(0) 747 if len(transactions) != len(receipts) { 748 return errors.New("transaction and receipt count mismatch") 749 } 750 751 for j := 0; j < len(receipts); j++ { 752 // The transaction hash can be retrieved from the transaction itself 753 receipts[j].TxHash = transactions[j].Hash() 754 755 // The contract address can be derived from the transaction itself 756 if transactions[j].To() == nil { 757 // Deriving the signer is expensive, only do if it's actually needed 758 from, _ := types.Sender(signer, transactions[j]) 759 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 760 } 761 // The used gas can be calculated based on previous receipts 762 if j == 0 { 763 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 764 } else { 765 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 766 } 767 // The derived log fields can simply be set from the block and transaction 768 for k := 0; k < len(receipts[j].Logs); k++ { 769 receipts[j].Logs[k].BlockNumber = block.NumberU64() 770 receipts[j].Logs[k].BlockHash = block.Hash() 771 receipts[j].Logs[k].TxHash = receipts[j].TxHash 772 receipts[j].Logs[k].TxIndex = uint(j) 773 receipts[j].Logs[k].Index = logIndex 774 logIndex++ 775 } 776 } 777 return nil 778 } 779 780 // InsertReceiptChain attempts to complete an already existing header chain with 781 // transaction and receipt data. 782 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 783 bc.wg.Add(1) 784 defer bc.wg.Done() 785 786 // Do a sanity check that the provided chain is actually ordered and linked 787 for i := 1; i < len(blockChain); i++ { 788 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 789 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 790 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 791 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 792 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 793 } 794 } 795 796 var ( 797 stats = struct{ processed, ignored int32 }{} 798 start = time.Now() 799 bytes = 0 800 batch = bc.db.NewBatch() 801 ) 802 for i, block := range blockChain { 803 receipts := receiptChain[i] 804 // Short circuit insertion if shutting down or processing failed 805 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 806 return 0, nil 807 } 808 // Short circuit if the owner header is unknown 809 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 810 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 811 } 812 // Skip if the entire data is already known 813 if bc.HasBlock(block.Hash(), block.NumberU64()) { 814 stats.ignored++ 815 continue 816 } 817 // Compute all the non-consensus fields of the receipts 818 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 819 return i, fmt.Errorf("failed to set receipts data: %v", err) 820 } 821 // Write all the data out into the database 822 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 823 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 824 rawdb.WriteTxLookupEntries(batch, block) 825 826 stats.processed++ 827 828 if batch.ValueSize() >= ethdb.IdealBatchSize { 829 if err := batch.Write(); err != nil { 830 return 0, err 831 } 832 bytes += batch.ValueSize() 833 batch.Reset() 834 } 835 } 836 if batch.ValueSize() > 0 { 837 bytes += batch.ValueSize() 838 if err := batch.Write(); err != nil { 839 return 0, err 840 } 841 } 842 843 // Update the head fast sync block if better 844 bc.mu.Lock() 845 head := blockChain[len(blockChain)-1] 846 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 847 currentFastBlock := bc.CurrentFastBlock() 848 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 849 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 850 bc.currentFastBlock.Store(head) 851 } 852 } 853 bc.mu.Unlock() 854 855 context := []interface{}{ 856 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 857 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), 858 "size", common.StorageSize(bytes), 859 } 860 if stats.ignored > 0 { 861 context = append(context, []interface{}{"ignored", stats.ignored}...) 862 } 863 log.Info("Imported new block receipts", context...) 864 865 return 0, nil 866 } 867 868 var lastWrite uint64 869 870 // WriteBlockWithoutState writes only the block and its metadata to the database, 871 // but does not write any state. This is used to construct competing side forks 872 // up to the point where they exceed the canonical total difficulty. 873 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 874 bc.wg.Add(1) 875 defer bc.wg.Done() 876 877 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 878 return err 879 } 880 rawdb.WriteBlock(bc.db, block) 881 882 return nil 883 } 884 885 // WriteBlockWithState writes the block and all associated state to the database. 886 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state state.StateDB) (status WriteStatus, err error) { 887 bc.wg.Add(1) 888 defer bc.wg.Done() 889 890 // Calculate the total difficulty of the block 891 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 892 if ptd == nil { 893 return NonStatTy, consensus.ErrUnknownAncestor 894 } 895 // Make sure no inconsistent state is leaked during insertion 896 bc.mu.Lock() 897 defer bc.mu.Unlock() 898 899 currentBlock := bc.CurrentBlock() 900 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 901 externTd := new(big.Int).Add(block.Difficulty(), ptd) 902 903 // Irrelevant of the canonical status, write the block itself to the database 904 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 905 return NonStatTy, err 906 } 907 rawdb.WriteBlock(bc.db, block) 908 909 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 910 if err != nil { 911 return NonStatTy, err 912 } 913 triedb := bc.stateCache.TrieDB() 914 915 // If we're running an archive node, always flush 916 if bc.cacheConfig.Disabled { 917 if err := triedb.Commit(root, false); err != nil { 918 return NonStatTy, err 919 } 920 } else { 921 // Full but not archive node, do proper garbage collection 922 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 923 bc.triegc.Push(root, -int64(block.NumberU64())) 924 925 if current := block.NumberU64(); current > triesInMemory { 926 // If we exceeded our memory allowance, flush matured singleton nodes to disk 927 var ( 928 nodes, imgs = triedb.Size() 929 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 930 ) 931 if nodes > limit || imgs > 4*1024*1024 { 932 triedb.Cap(limit - ethdb.IdealBatchSize) 933 } 934 // Find the next state trie we need to commit 935 header := bc.GetHeaderByNumber(current - triesInMemory) 936 chosen := header.Number.Uint64() 937 938 // If we exceeded out time allowance, flush an entire trie to disk 939 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 940 // If we're exceeding limits but haven't reached a large enough memory gap, 941 // warn the user that the system is becoming unstable. 942 if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 943 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 944 } 945 // Flush an entire trie and restart the counters 946 triedb.Commit(header.Root, true) 947 lastWrite = chosen 948 bc.gcproc = 0 949 } 950 // Garbage collect anything below our required write retention 951 for !bc.triegc.Empty() { 952 root, number := bc.triegc.Pop() 953 if uint64(-number) > chosen { 954 bc.triegc.Push(root, number) 955 break 956 } 957 triedb.Dereference(root.(common.Hash)) 958 } 959 } 960 } 961 962 // Write other block data using a batch. 963 batch := bc.db.NewBatch() 964 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 965 966 // If the total difficulty is higher than our known, add it to the canonical chain 967 // Second clause in the if statement reduces the vulnerability to selfish mining. 968 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 969 reorg := externTd.Cmp(localTd) > 0 970 currentBlock = bc.CurrentBlock() 971 if !reorg && externTd.Cmp(localTd) == 0 { 972 // Split same-difficulty blocks by number, then preferentially select 973 // the block generated by the local miner as the canonical block. 974 if block.NumberU64() < currentBlock.NumberU64() { 975 reorg = true 976 } else if block.NumberU64() == currentBlock.NumberU64() { 977 var currentPreserve, blockPreserve bool 978 if bc.shouldPreserve != nil { 979 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 980 } 981 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 982 } 983 } 984 if reorg { 985 // Reorganise the chain if the parent is not the head block 986 if block.ParentHash() != currentBlock.Hash() { 987 if err := bc.reorg(currentBlock, block); err != nil { 988 return NonStatTy, err 989 } 990 } 991 // Write the positional metadata for transaction/receipt lookups and preimages 992 rawdb.WriteTxLookupEntries(batch, block) 993 rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()) 994 995 status = CanonStatTy 996 } else { 997 status = SideStatTy 998 } 999 if err := batch.Write(); err != nil { 1000 return NonStatTy, err 1001 } 1002 1003 // Set new head. 1004 if status == CanonStatTy { 1005 bc.insert(block) 1006 } 1007 bc.futureBlocks.Remove(block.Hash()) 1008 return status, nil 1009 } 1010 1011 // InsertChain attempts to insert the given batch of blocks in to the canonical 1012 // chain or, otherwise, create a fork. If an error is returned it will return 1013 // the index number of the failing block as well an error describing what went 1014 // wrong. 1015 // 1016 // After insertion is done, all accumulated events will be fired. 1017 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1018 n, events, logs, err := bc.insertChain(chain) 1019 bc.PostChainEvents(events, logs) 1020 return n, err 1021 } 1022 1023 // insertChain will execute the actual chain insertion and event aggregation. The 1024 // only reason this method exists as a separate one is to make locking cleaner 1025 // with deferred statements. 1026 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1027 // Sanity check that we have something meaningful to import 1028 if len(chain) == 0 { 1029 return 0, nil, nil, nil 1030 } 1031 // Do a sanity check that the provided chain is actually ordered and linked 1032 for i := 1; i < len(chain); i++ { 1033 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1034 // Chain broke ancestry, log a message (programming error) and skip insertion 1035 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1036 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1037 1038 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1039 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1040 } 1041 } 1042 // Pre-checks passed, start the full block imports 1043 bc.wg.Add(1) 1044 defer bc.wg.Done() 1045 1046 bc.chainmu.Lock() 1047 defer bc.chainmu.Unlock() 1048 1049 // A queued approach to delivering events. This is generally 1050 // faster than direct delivery and requires much less mutex 1051 // acquiring. 1052 var ( 1053 stats = insertStats{startTime: mclock.Now()} 1054 events = make([]interface{}, 0, len(chain)) 1055 lastCanon *types.Block 1056 coalescedLogs []*types.Log 1057 ) 1058 // Start the parallel header verifier 1059 headers := make([]*types.Header, len(chain)) 1060 seals := make([]bool, len(chain)) 1061 1062 for i, block := range chain { 1063 headers[i] = block.Header() 1064 seals[i] = true 1065 } 1066 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1067 defer close(abort) 1068 1069 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1070 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1071 1072 // Iterate over the blocks and insert when the verifier permits 1073 for i, block := range chain { 1074 // If the chain is terminating, stop processing blocks 1075 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1076 log.Debug("Premature abort during blocks processing") 1077 break 1078 } 1079 // If the header is a banned one, straight out abort 1080 if BadHashes[block.Hash()] { 1081 bc.reportBlock(block, nil, ErrBlacklistedHash) 1082 return i, events, coalescedLogs, ErrBlacklistedHash 1083 } 1084 // Wait for the block's verification to complete 1085 bstart := time.Now() 1086 1087 err := <-results 1088 if err == nil { 1089 err = bc.Validator().ValidateBody(block) 1090 } 1091 switch { 1092 case err == ErrKnownBlock: 1093 // Block and state both already known. However if the current block is below 1094 // this number we did a rollback and we should reimport it nonetheless. 1095 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1096 stats.ignored++ 1097 continue 1098 } 1099 1100 case err == consensus.ErrFutureBlock: 1101 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1102 // the chain is discarded and processed at a later time if given. 1103 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1104 if block.Time().Cmp(max) > 0 { 1105 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1106 } 1107 bc.futureBlocks.Add(block.Hash(), block) 1108 stats.queued++ 1109 continue 1110 1111 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1112 bc.futureBlocks.Add(block.Hash(), block) 1113 stats.queued++ 1114 continue 1115 1116 case err == consensus.ErrPrunedAncestor: 1117 // Block competing with the canonical chain, store in the db, but don't process 1118 // until the competitor TD goes above the canonical TD 1119 currentBlock := bc.CurrentBlock() 1120 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1121 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1122 if localTd.Cmp(externTd) > 0 { 1123 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1124 return i, events, coalescedLogs, err 1125 } 1126 continue 1127 } 1128 // Competitor chain beat canonical, gather all blocks from the common ancestor 1129 var winner []*types.Block 1130 1131 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1132 for !bc.HasState(parent.Root()) { 1133 winner = append(winner, parent) 1134 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1135 } 1136 for j := 0; j < len(winner)/2; j++ { 1137 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1138 } 1139 // Import all the pruned blocks to make the state available 1140 bc.chainmu.Unlock() 1141 _, evs, logs, err := bc.insertChain(winner) 1142 bc.chainmu.Lock() 1143 events, coalescedLogs = evs, logs 1144 1145 if err != nil { 1146 return i, events, coalescedLogs, err 1147 } 1148 1149 case err != nil: 1150 bc.reportBlock(block, nil, err) 1151 return i, events, coalescedLogs, err 1152 } 1153 // Create a new statedb using the parent block and report an 1154 // error if it fails. 1155 var parent *types.Block 1156 if i == 0 { 1157 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1158 } else { 1159 parent = chain[i-1] 1160 } 1161 state, err := state.New(parent.Root(), bc.stateCache) 1162 if err != nil { 1163 return i, events, coalescedLogs, err 1164 } 1165 // Process block using the parent state as reference point. 1166 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1167 if err != nil { 1168 bc.reportBlock(block, receipts, err) 1169 return i, events, coalescedLogs, err 1170 } 1171 // Validate the state using the default validator 1172 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1173 if err != nil { 1174 bc.reportBlock(block, receipts, err) 1175 return i, events, coalescedLogs, err 1176 } 1177 proctime := time.Since(bstart) 1178 1179 // Write the block to the chain and get the status. 1180 status, err := bc.WriteBlockWithState(block, receipts, state) 1181 if err != nil { 1182 return i, events, coalescedLogs, err 1183 } 1184 switch status { 1185 case CanonStatTy: 1186 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1187 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1188 1189 coalescedLogs = append(coalescedLogs, logs...) 1190 blockInsertTimer.UpdateSince(bstart) 1191 events = append(events, ChainEvent{block, block.Hash(), logs}) 1192 lastCanon = block 1193 1194 // Only count canonical blocks for GC processing time 1195 bc.gcproc += proctime 1196 1197 case SideStatTy: 1198 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1199 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1200 1201 blockInsertTimer.UpdateSince(bstart) 1202 events = append(events, ChainSideEvent{block}) 1203 } 1204 stats.processed++ 1205 stats.usedGas += usedGas 1206 1207 cache, _ := bc.stateCache.TrieDB().Size() 1208 stats.report(chain, i, cache) 1209 } 1210 // Append a single chain head event if we've progressed the chain 1211 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1212 events = append(events, ChainHeadEvent{lastCanon}) 1213 } 1214 return 0, events, coalescedLogs, nil 1215 } 1216 1217 // insertStats tracks and reports on block insertion. 1218 type insertStats struct { 1219 queued, processed, ignored int 1220 usedGas uint64 1221 lastIndex int 1222 startTime mclock.AbsTime 1223 } 1224 1225 // statsReportLimit is the time limit during import and export after which we 1226 // always print out progress. This avoids the user wondering what's going on. 1227 const statsReportLimit = 8 * time.Second 1228 1229 // report prints statistics if some number of blocks have been processed 1230 // or more than a few seconds have passed since the last message. 1231 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1232 // Fetch the timings for the batch 1233 var ( 1234 now = mclock.Now() 1235 elapsed = time.Duration(now) - time.Duration(st.startTime) 1236 ) 1237 // If we're at the last block of the batch or report period reached, log 1238 if index == len(chain)-1 || elapsed >= statsReportLimit { 1239 var ( 1240 end = chain[index] 1241 txs = countTransactions(chain[st.lastIndex : index+1]) 1242 ) 1243 context := []interface{}{ 1244 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1245 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1246 "number", end.Number(), "hash", end.Hash(), 1247 } 1248 if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { 1249 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 1250 } 1251 context = append(context, []interface{}{"cache", cache}...) 1252 1253 if st.queued > 0 { 1254 context = append(context, []interface{}{"queued", st.queued}...) 1255 } 1256 if st.ignored > 0 { 1257 context = append(context, []interface{}{"ignored", st.ignored}...) 1258 } 1259 log.Info("Imported new chain segment", context...) 1260 1261 *st = insertStats{startTime: now, lastIndex: index + 1} 1262 } 1263 } 1264 1265 func countTransactions(chain []*types.Block) (c int) { 1266 for _, b := range chain { 1267 c += len(b.Transactions()) 1268 } 1269 return c 1270 } 1271 1272 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1273 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1274 // event about them 1275 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1276 var ( 1277 newChain types.Blocks 1278 oldChain types.Blocks 1279 commonBlock *types.Block 1280 deletedTxs types.Transactions 1281 deletedLogs []*types.Log 1282 // collectLogs collects the logs that were generated during the 1283 // processing of the block that corresponds with the given hash. 1284 // These logs are later announced as deleted. 1285 collectLogs = func(hash common.Hash) { 1286 // Coalesce logs and set 'Removed'. 1287 number := bc.hc.GetBlockNumber(hash) 1288 if number == nil { 1289 return 1290 } 1291 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1292 for _, receipt := range receipts { 1293 for _, log := range receipt.Logs { 1294 del := *log 1295 del.Removed = true 1296 deletedLogs = append(deletedLogs, &del) 1297 } 1298 } 1299 } 1300 ) 1301 1302 // first reduce whoever is higher bound 1303 if oldBlock.NumberU64() > newBlock.NumberU64() { 1304 // reduce old chain 1305 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1306 oldChain = append(oldChain, oldBlock) 1307 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1308 1309 collectLogs(oldBlock.Hash()) 1310 } 1311 } else { 1312 // reduce new chain and append new chain blocks for inserting later on 1313 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1314 newChain = append(newChain, newBlock) 1315 } 1316 } 1317 if oldBlock == nil { 1318 return fmt.Errorf("Invalid old chain") 1319 } 1320 if newBlock == nil { 1321 return fmt.Errorf("Invalid new chain") 1322 } 1323 1324 for { 1325 if oldBlock.Hash() == newBlock.Hash() { 1326 commonBlock = oldBlock 1327 break 1328 } 1329 1330 oldChain = append(oldChain, oldBlock) 1331 newChain = append(newChain, newBlock) 1332 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1333 collectLogs(oldBlock.Hash()) 1334 1335 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1336 if oldBlock == nil { 1337 return fmt.Errorf("Invalid old chain") 1338 } 1339 if newBlock == nil { 1340 return fmt.Errorf("Invalid new chain") 1341 } 1342 } 1343 // Ensure the user sees large reorgs 1344 if len(oldChain) > 0 && len(newChain) > 0 { 1345 logFn := log.Debug 1346 if len(oldChain) > 63 { 1347 logFn = log.Warn 1348 } 1349 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1350 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1351 } else { 1352 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1353 } 1354 // Insert the new chain, taking care of the proper incremental order 1355 var addedTxs types.Transactions 1356 for i := len(newChain) - 1; i >= 0; i-- { 1357 // insert the block in the canonical way, re-writing history 1358 bc.insert(newChain[i]) 1359 // write lookup entries for hash based transaction/receipt searches 1360 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1361 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1362 } 1363 // calculate the difference between deleted and added transactions 1364 diff := types.TxDifference(deletedTxs, addedTxs) 1365 // When transactions get deleted from the database that means the 1366 // receipts that were created in the fork must also be deleted 1367 batch := bc.db.NewBatch() 1368 for _, tx := range diff { 1369 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1370 } 1371 batch.Write() 1372 1373 if len(deletedLogs) > 0 { 1374 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1375 } 1376 if len(oldChain) > 0 { 1377 go func() { 1378 for _, block := range oldChain { 1379 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1380 } 1381 }() 1382 } 1383 1384 return nil 1385 } 1386 1387 // PostChainEvents iterates over the events generated by a chain insertion and 1388 // posts them into the event feed. 1389 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1390 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1391 // post event logs for further processing 1392 if logs != nil { 1393 bc.logsFeed.Send(logs) 1394 } 1395 for _, event := range events { 1396 switch ev := event.(type) { 1397 case ChainEvent: 1398 bc.chainFeed.Send(ev) 1399 1400 case ChainHeadEvent: 1401 bc.chainHeadFeed.Send(ev) 1402 1403 case ChainSideEvent: 1404 bc.chainSideFeed.Send(ev) 1405 } 1406 } 1407 } 1408 1409 func (bc *BlockChain) update() { 1410 futureTimer := time.NewTicker(5 * time.Second) 1411 defer futureTimer.Stop() 1412 for { 1413 select { 1414 case <-futureTimer.C: 1415 bc.procFutureBlocks() 1416 case <-bc.quit: 1417 return 1418 } 1419 } 1420 } 1421 1422 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1423 func (bc *BlockChain) BadBlocks() []*types.Block { 1424 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 1425 for _, hash := range bc.badBlocks.Keys() { 1426 if blk, exist := bc.badBlocks.Peek(hash); exist { 1427 block := blk.(*types.Block) 1428 blocks = append(blocks, block) 1429 } 1430 } 1431 return blocks 1432 } 1433 1434 // addBadBlock adds a bad block to the bad-block LRU cache 1435 func (bc *BlockChain) addBadBlock(block *types.Block) { 1436 bc.badBlocks.Add(block.Hash(), block) 1437 } 1438 1439 // reportBlock logs a bad block error. 1440 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1441 bc.addBadBlock(block) 1442 1443 var receiptString string 1444 for _, receipt := range receipts { 1445 receiptString += fmt.Sprintf("\t%v\n", receipt) 1446 } 1447 log.Error(fmt.Sprintf(` 1448 ########## BAD BLOCK ######### 1449 Chain config: %v 1450 1451 Number: %v 1452 Hash: 0x%x 1453 %v 1454 1455 Error: %v 1456 ############################## 1457 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1458 } 1459 1460 // InsertHeaderChain attempts to insert the given header chain in to the local 1461 // chain, possibly creating a reorg. If an error is returned, it will return the 1462 // index number of the failing header as well an error describing what went wrong. 1463 // 1464 // The verify parameter can be used to fine tune whether nonce verification 1465 // should be done or not. The reason behind the optional check is because some 1466 // of the header retrieval mechanisms already need to verify nonces, as well as 1467 // because nonces can be verified sparsely, not needing to check each. 1468 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1469 start := time.Now() 1470 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1471 return i, err 1472 } 1473 1474 // Make sure only one thread manipulates the chain at once 1475 bc.chainmu.Lock() 1476 defer bc.chainmu.Unlock() 1477 1478 bc.wg.Add(1) 1479 defer bc.wg.Done() 1480 1481 whFunc := func(header *types.Header) error { 1482 bc.mu.Lock() 1483 defer bc.mu.Unlock() 1484 1485 _, err := bc.hc.WriteHeader(header) 1486 return err 1487 } 1488 1489 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1490 } 1491 1492 // writeHeader writes a header into the local chain, given that its parent is 1493 // already known. If the total difficulty of the newly inserted header becomes 1494 // greater than the current known TD, the canonical chain is re-routed. 1495 // 1496 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1497 // into the chain, as side effects caused by reorganisations cannot be emulated 1498 // without the real blocks. Hence, writing headers directly should only be done 1499 // in two scenarios: pure-header mode of operation (light clients), or properly 1500 // separated header/block phases (non-archive clients). 1501 func (bc *BlockChain) writeHeader(header *types.Header) error { 1502 bc.wg.Add(1) 1503 defer bc.wg.Done() 1504 1505 bc.mu.Lock() 1506 defer bc.mu.Unlock() 1507 1508 _, err := bc.hc.WriteHeader(header) 1509 return err 1510 } 1511 1512 // CurrentHeader retrieves the current head header of the canonical chain. The 1513 // header is retrieved from the HeaderChain's internal cache. 1514 func (bc *BlockChain) CurrentHeader() *types.Header { 1515 return bc.hc.CurrentHeader() 1516 } 1517 1518 // GetTd retrieves a block's total difficulty in the canonical chain from the 1519 // database by hash and number, caching it if found. 1520 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1521 return bc.hc.GetTd(hash, number) 1522 } 1523 1524 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1525 // database by hash, caching it if found. 1526 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1527 return bc.hc.GetTdByHash(hash) 1528 } 1529 1530 // GetHeader retrieves a block header from the database by hash and number, 1531 // caching it if found. 1532 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1533 return bc.hc.GetHeader(hash, number) 1534 } 1535 1536 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1537 // found. 1538 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1539 return bc.hc.GetHeaderByHash(hash) 1540 } 1541 1542 // HasHeader checks if a block header is present in the database or not, caching 1543 // it if present. 1544 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1545 return bc.hc.HasHeader(hash, number) 1546 } 1547 1548 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1549 // hash, fetching towards the genesis block. 1550 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1551 return bc.hc.GetBlockHashesFromHash(hash, max) 1552 } 1553 1554 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 1555 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 1556 // number of blocks to be individually checked before we reach the canonical chain. 1557 // 1558 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 1559 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 1560 bc.chainmu.Lock() 1561 defer bc.chainmu.Unlock() 1562 1563 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 1564 } 1565 1566 // GetHeaderByNumber retrieves a block header from the database by number, 1567 // caching it (associated with its hash) if found. 1568 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1569 return bc.hc.GetHeaderByNumber(number) 1570 } 1571 1572 // Config retrieves the blockchain's chain configuration. 1573 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1574 1575 // Engine retrieves the blockchain's consensus engine. 1576 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1577 1578 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1579 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1580 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1581 } 1582 1583 // SubscribeChainEvent registers a subscription of ChainEvent. 1584 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1585 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1586 } 1587 1588 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1589 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1590 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1591 } 1592 1593 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1594 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1595 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1596 } 1597 1598 // SubscribeLogsEvent registers a subscription of []*types.Log. 1599 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1600 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1601 }