github.com/4000d/go-ethereum@v1.8.2-0.20180223170251-423c8bb1d821/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/mclock" 32 "github.com/ethereum/go-ethereum/consensus" 33 "github.com/ethereum/go-ethereum/core/state" 34 "github.com/ethereum/go-ethereum/core/types" 35 "github.com/ethereum/go-ethereum/core/vm" 36 "github.com/ethereum/go-ethereum/crypto" 37 "github.com/ethereum/go-ethereum/ethdb" 38 "github.com/ethereum/go-ethereum/event" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/metrics" 41 "github.com/ethereum/go-ethereum/params" 42 "github.com/ethereum/go-ethereum/rlp" 43 "github.com/ethereum/go-ethereum/trie" 44 "github.com/hashicorp/golang-lru" 45 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 3 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db ethdb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 checkpoint int // checkpoint counts towards the new checkpoint 110 currentBlock *types.Block // Current head of the block chain 111 currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!) 112 113 stateCache state.Database // State database to reuse between imports (contains state cache) 114 bodyCache *lru.Cache // Cache for the most recent block bodies 115 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 116 blockCache *lru.Cache // Cache for the most recent entire blocks 117 futureBlocks *lru.Cache // future blocks are blocks added for later processing 118 119 quit chan struct{} // blockchain quit channel 120 running int32 // running must be called atomically 121 // procInterrupt must be atomically called 122 procInterrupt int32 // interrupt signaler for block processing 123 wg sync.WaitGroup // chain processing wait group for shutting down 124 125 engine consensus.Engine 126 processor Processor // block processor interface 127 validator Validator // block and state validator interface 128 vmConfig vm.Config 129 130 badBlocks *lru.Cache // Bad block cache 131 } 132 133 // NewBlockChain returns a fully initialised block chain using information 134 // available in the database. It initialises the default Ethereum Validator and 135 // Processor. 136 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 137 if cacheConfig == nil { 138 cacheConfig = &CacheConfig{ 139 TrieNodeLimit: 256 * 1024 * 1024, 140 TrieTimeLimit: 5 * time.Minute, 141 } 142 } 143 bodyCache, _ := lru.New(bodyCacheLimit) 144 bodyRLPCache, _ := lru.New(bodyCacheLimit) 145 blockCache, _ := lru.New(blockCacheLimit) 146 futureBlocks, _ := lru.New(maxFutureBlocks) 147 badBlocks, _ := lru.New(badBlockLimit) 148 149 bc := &BlockChain{ 150 chainConfig: chainConfig, 151 cacheConfig: cacheConfig, 152 db: db, 153 triegc: prque.New(), 154 stateCache: state.NewDatabase(db), 155 quit: make(chan struct{}), 156 bodyCache: bodyCache, 157 bodyRLPCache: bodyRLPCache, 158 blockCache: blockCache, 159 futureBlocks: futureBlocks, 160 engine: engine, 161 vmConfig: vmConfig, 162 badBlocks: badBlocks, 163 } 164 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 165 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 166 167 var err error 168 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 169 if err != nil { 170 return nil, err 171 } 172 bc.genesisBlock = bc.GetBlockByNumber(0) 173 if bc.genesisBlock == nil { 174 return nil, ErrNoGenesis 175 } 176 if err := bc.loadLastState(); err != nil { 177 return nil, err 178 } 179 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 180 for hash := range BadHashes { 181 if header := bc.GetHeaderByHash(hash); header != nil { 182 // get the canonical block corresponding to the offending header's number 183 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 184 // make sure the headerByNumber (if present) is in our current canonical chain 185 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 186 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 187 bc.SetHead(header.Number.Uint64() - 1) 188 log.Error("Chain rewind was successful, resuming normal operation") 189 } 190 } 191 } 192 // Take ownership of this particular state 193 go bc.update() 194 return bc, nil 195 } 196 197 func (bc *BlockChain) getProcInterrupt() bool { 198 return atomic.LoadInt32(&bc.procInterrupt) == 1 199 } 200 201 // loadLastState loads the last known chain state from the database. This method 202 // assumes that the chain manager mutex is held. 203 func (bc *BlockChain) loadLastState() error { 204 // Restore the last known head block 205 head := GetHeadBlockHash(bc.db) 206 if head == (common.Hash{}) { 207 // Corrupt or empty database, init from scratch 208 log.Warn("Empty database, resetting chain") 209 return bc.Reset() 210 } 211 // Make sure the entire head block is available 212 currentBlock := bc.GetBlockByHash(head) 213 if currentBlock == nil { 214 // Corrupt or empty database, init from scratch 215 log.Warn("Head block missing, resetting chain", "hash", head) 216 return bc.Reset() 217 } 218 // Make sure the state associated with the block is available 219 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 220 // Dangling block without a state associated, init from scratch 221 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 222 if err := bc.repair(¤tBlock); err != nil { 223 return err 224 } 225 } 226 // Everything seems to be fine, set as the head block 227 bc.currentBlock = currentBlock 228 229 // Restore the last known head header 230 currentHeader := bc.currentBlock.Header() 231 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 232 if header := bc.GetHeaderByHash(head); header != nil { 233 currentHeader = header 234 } 235 } 236 bc.hc.SetCurrentHeader(currentHeader) 237 238 // Restore the last known head fast block 239 bc.currentFastBlock = bc.currentBlock 240 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 241 if block := bc.GetBlockByHash(head); block != nil { 242 bc.currentFastBlock = block 243 } 244 } 245 246 // Issue a status log for the user 247 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 248 blockTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 249 fastTd := bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()) 250 251 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 252 log.Info("Loaded most recent local full block", "number", bc.currentBlock.Number(), "hash", bc.currentBlock.Hash(), "td", blockTd) 253 log.Info("Loaded most recent local fast block", "number", bc.currentFastBlock.Number(), "hash", bc.currentFastBlock.Hash(), "td", fastTd) 254 255 return nil 256 } 257 258 // SetHead rewinds the local chain to a new head. In the case of headers, everything 259 // above the new head will be deleted and the new one set. In the case of blocks 260 // though, the head may be further rewound if block bodies are missing (non-archive 261 // nodes after a fast sync). 262 func (bc *BlockChain) SetHead(head uint64) error { 263 log.Warn("Rewinding blockchain", "target", head) 264 265 bc.mu.Lock() 266 defer bc.mu.Unlock() 267 268 // Rewind the header chain, deleting all block bodies until then 269 delFn := func(hash common.Hash, num uint64) { 270 DeleteBody(bc.db, hash, num) 271 } 272 bc.hc.SetHead(head, delFn) 273 currentHeader := bc.hc.CurrentHeader() 274 275 // Clear out any stale content from the caches 276 bc.bodyCache.Purge() 277 bc.bodyRLPCache.Purge() 278 bc.blockCache.Purge() 279 bc.futureBlocks.Purge() 280 281 // Rewind the block chain, ensuring we don't end up with a stateless head block 282 if bc.currentBlock != nil && currentHeader.Number.Uint64() < bc.currentBlock.NumberU64() { 283 bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()) 284 } 285 if bc.currentBlock != nil { 286 if _, err := state.New(bc.currentBlock.Root(), bc.stateCache); err != nil { 287 // Rewound state missing, rolled back to before pivot, reset to genesis 288 bc.currentBlock = nil 289 } 290 } 291 // Rewind the fast block in a simpleton way to the target head 292 if bc.currentFastBlock != nil && currentHeader.Number.Uint64() < bc.currentFastBlock.NumberU64() { 293 bc.currentFastBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()) 294 } 295 // If either blocks reached nil, reset to the genesis state 296 if bc.currentBlock == nil { 297 bc.currentBlock = bc.genesisBlock 298 } 299 if bc.currentFastBlock == nil { 300 bc.currentFastBlock = bc.genesisBlock 301 } 302 if err := WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()); err != nil { 303 log.Crit("Failed to reset head full block", "err", err) 304 } 305 if err := WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()); err != nil { 306 log.Crit("Failed to reset head fast block", "err", err) 307 } 308 return bc.loadLastState() 309 } 310 311 // FastSyncCommitHead sets the current head block to the one defined by the hash 312 // irrelevant what the chain contents were prior. 313 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 314 // Make sure that both the block as well at its state trie exists 315 block := bc.GetBlockByHash(hash) 316 if block == nil { 317 return fmt.Errorf("non existent block [%x…]", hash[:4]) 318 } 319 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 320 return err 321 } 322 // If all checks out, manually set the head block 323 bc.mu.Lock() 324 bc.currentBlock = block 325 bc.mu.Unlock() 326 327 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 328 return nil 329 } 330 331 // GasLimit returns the gas limit of the current HEAD block. 332 func (bc *BlockChain) GasLimit() uint64 { 333 bc.mu.RLock() 334 defer bc.mu.RUnlock() 335 336 return bc.currentBlock.GasLimit() 337 } 338 339 // CurrentBlock retrieves the current head block of the canonical chain. The 340 // block is retrieved from the blockchain's internal cache. 341 func (bc *BlockChain) CurrentBlock() *types.Block { 342 bc.mu.RLock() 343 defer bc.mu.RUnlock() 344 345 return bc.currentBlock 346 } 347 348 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 349 // chain. The block is retrieved from the blockchain's internal cache. 350 func (bc *BlockChain) CurrentFastBlock() *types.Block { 351 bc.mu.RLock() 352 defer bc.mu.RUnlock() 353 354 return bc.currentFastBlock 355 } 356 357 // SetProcessor sets the processor required for making state modifications. 358 func (bc *BlockChain) SetProcessor(processor Processor) { 359 bc.procmu.Lock() 360 defer bc.procmu.Unlock() 361 bc.processor = processor 362 } 363 364 // SetValidator sets the validator which is used to validate incoming blocks. 365 func (bc *BlockChain) SetValidator(validator Validator) { 366 bc.procmu.Lock() 367 defer bc.procmu.Unlock() 368 bc.validator = validator 369 } 370 371 // Validator returns the current validator. 372 func (bc *BlockChain) Validator() Validator { 373 bc.procmu.RLock() 374 defer bc.procmu.RUnlock() 375 return bc.validator 376 } 377 378 // Processor returns the current processor. 379 func (bc *BlockChain) Processor() Processor { 380 bc.procmu.RLock() 381 defer bc.procmu.RUnlock() 382 return bc.processor 383 } 384 385 // State returns a new mutable state based on the current HEAD block. 386 func (bc *BlockChain) State() (*state.StateDB, error) { 387 return bc.StateAt(bc.CurrentBlock().Root()) 388 } 389 390 // StateAt returns a new mutable state based on a particular point in time. 391 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 392 return state.New(root, bc.stateCache) 393 } 394 395 // Reset purges the entire blockchain, restoring it to its genesis state. 396 func (bc *BlockChain) Reset() error { 397 return bc.ResetWithGenesisBlock(bc.genesisBlock) 398 } 399 400 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 401 // specified genesis state. 402 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 403 // Dump the entire block chain and purge the caches 404 if err := bc.SetHead(0); err != nil { 405 return err 406 } 407 bc.mu.Lock() 408 defer bc.mu.Unlock() 409 410 // Prepare the genesis block and reinitialise the chain 411 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 412 log.Crit("Failed to write genesis block TD", "err", err) 413 } 414 if err := WriteBlock(bc.db, genesis); err != nil { 415 log.Crit("Failed to write genesis block", "err", err) 416 } 417 bc.genesisBlock = genesis 418 bc.insert(bc.genesisBlock) 419 bc.currentBlock = bc.genesisBlock 420 bc.hc.SetGenesis(bc.genesisBlock.Header()) 421 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 422 bc.currentFastBlock = bc.genesisBlock 423 424 return nil 425 } 426 427 // repair tries to repair the current blockchain by rolling back the current block 428 // until one with associated state is found. This is needed to fix incomplete db 429 // writes caused either by crashes/power outages, or simply non-committed tries. 430 // 431 // This method only rolls back the current block. The current header and current 432 // fast block are left intact. 433 func (bc *BlockChain) repair(head **types.Block) error { 434 for { 435 // Abort if we've rewound to a head block that does have associated state 436 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 437 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 438 return nil 439 } 440 // Otherwise rewind one block and recheck state availability there 441 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 442 } 443 } 444 445 // Export writes the active chain to the given writer. 446 func (bc *BlockChain) Export(w io.Writer) error { 447 return bc.ExportN(w, uint64(0), bc.currentBlock.NumberU64()) 448 } 449 450 // ExportN writes a subset of the active chain to the given writer. 451 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 452 bc.mu.RLock() 453 defer bc.mu.RUnlock() 454 455 if first > last { 456 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 457 } 458 log.Info("Exporting batch of blocks", "count", last-first+1) 459 460 for nr := first; nr <= last; nr++ { 461 block := bc.GetBlockByNumber(nr) 462 if block == nil { 463 return fmt.Errorf("export failed on #%d: not found", nr) 464 } 465 466 if err := block.EncodeRLP(w); err != nil { 467 return err 468 } 469 } 470 471 return nil 472 } 473 474 // insert injects a new head block into the current block chain. This method 475 // assumes that the block is indeed a true head. It will also reset the head 476 // header and the head fast sync block to this very same block if they are older 477 // or if they are on a different side chain. 478 // 479 // Note, this function assumes that the `mu` mutex is held! 480 func (bc *BlockChain) insert(block *types.Block) { 481 // If the block is on a side chain or an unknown one, force other heads onto it too 482 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 483 484 // Add the block to the canonical chain number scheme and mark as the head 485 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 486 log.Crit("Failed to insert block number", "err", err) 487 } 488 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 489 log.Crit("Failed to insert head block hash", "err", err) 490 } 491 bc.currentBlock = block 492 493 // If the block is better than our head or is on a different chain, force update heads 494 if updateHeads { 495 bc.hc.SetCurrentHeader(block.Header()) 496 497 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 498 log.Crit("Failed to insert head fast block hash", "err", err) 499 } 500 bc.currentFastBlock = block 501 } 502 } 503 504 // Genesis retrieves the chain's genesis block. 505 func (bc *BlockChain) Genesis() *types.Block { 506 return bc.genesisBlock 507 } 508 509 // GetBody retrieves a block body (transactions and uncles) from the database by 510 // hash, caching it if found. 511 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 512 // Short circuit if the body's already in the cache, retrieve otherwise 513 if cached, ok := bc.bodyCache.Get(hash); ok { 514 body := cached.(*types.Body) 515 return body 516 } 517 body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) 518 if body == nil { 519 return nil 520 } 521 // Cache the found body for next time and return 522 bc.bodyCache.Add(hash, body) 523 return body 524 } 525 526 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 527 // caching it if found. 528 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 529 // Short circuit if the body's already in the cache, retrieve otherwise 530 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 531 return cached.(rlp.RawValue) 532 } 533 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 534 if len(body) == 0 { 535 return nil 536 } 537 // Cache the found body for next time and return 538 bc.bodyRLPCache.Add(hash, body) 539 return body 540 } 541 542 // HasBlock checks if a block is fully present in the database or not. 543 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 544 if bc.blockCache.Contains(hash) { 545 return true 546 } 547 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 548 return ok 549 } 550 551 // HasState checks if state trie is fully present in the database or not. 552 func (bc *BlockChain) HasState(hash common.Hash) bool { 553 _, err := bc.stateCache.OpenTrie(hash) 554 return err == nil 555 } 556 557 // HasBlockAndState checks if a block and associated state trie is fully present 558 // in the database or not, caching it if present. 559 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 560 // Check first that the block itself is known 561 block := bc.GetBlock(hash, number) 562 if block == nil { 563 return false 564 } 565 return bc.HasState(block.Root()) 566 } 567 568 // GetBlock retrieves a block from the database by hash and number, 569 // caching it if found. 570 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 571 // Short circuit if the block's already in the cache, retrieve otherwise 572 if block, ok := bc.blockCache.Get(hash); ok { 573 return block.(*types.Block) 574 } 575 block := GetBlock(bc.db, hash, number) 576 if block == nil { 577 return nil 578 } 579 // Cache the found block for next time and return 580 bc.blockCache.Add(block.Hash(), block) 581 return block 582 } 583 584 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 585 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 586 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 587 } 588 589 // GetBlockByNumber retrieves a block from the database by number, caching it 590 // (associated with its hash) if found. 591 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 592 hash := GetCanonicalHash(bc.db, number) 593 if hash == (common.Hash{}) { 594 return nil 595 } 596 return bc.GetBlock(hash, number) 597 } 598 599 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 600 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 601 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 602 } 603 604 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 605 // [deprecated by eth/62] 606 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 607 number := bc.hc.GetBlockNumber(hash) 608 for i := 0; i < n; i++ { 609 block := bc.GetBlock(hash, number) 610 if block == nil { 611 break 612 } 613 blocks = append(blocks, block) 614 hash = block.ParentHash() 615 number-- 616 } 617 return 618 } 619 620 // GetUnclesInChain retrieves all the uncles from a given block backwards until 621 // a specific distance is reached. 622 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 623 uncles := []*types.Header{} 624 for i := 0; block != nil && i < length; i++ { 625 uncles = append(uncles, block.Uncles()...) 626 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 627 } 628 return uncles 629 } 630 631 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 632 // either from ephemeral in-memory cache, or from persistent storage. 633 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 634 return bc.stateCache.TrieDB().Node(hash) 635 } 636 637 // Stop stops the blockchain service. If any imports are currently in progress 638 // it will abort them using the procInterrupt. 639 func (bc *BlockChain) Stop() { 640 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 641 return 642 } 643 // Unsubscribe all subscriptions registered from blockchain 644 bc.scope.Close() 645 close(bc.quit) 646 atomic.StoreInt32(&bc.procInterrupt, 1) 647 648 bc.wg.Wait() 649 650 // Ensure the state of a recent block is also stored to disk before exiting. 651 // We're writing three different states to catch different restart scenarios: 652 // - HEAD: So we don't need to reprocess any blocks in the general case 653 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 654 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 655 if !bc.cacheConfig.Disabled { 656 triedb := bc.stateCache.TrieDB() 657 658 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 659 if number := bc.CurrentBlock().NumberU64(); number > offset { 660 recent := bc.GetBlockByNumber(number - offset) 661 662 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 663 if err := triedb.Commit(recent.Root(), true); err != nil { 664 log.Error("Failed to commit recent state trie", "err", err) 665 } 666 } 667 } 668 for !bc.triegc.Empty() { 669 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 670 } 671 if size := triedb.Size(); size != 0 { 672 log.Error("Dangling trie nodes after full cleanup") 673 } 674 } 675 log.Info("Blockchain manager stopped") 676 } 677 678 func (bc *BlockChain) procFutureBlocks() { 679 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 680 for _, hash := range bc.futureBlocks.Keys() { 681 if block, exist := bc.futureBlocks.Peek(hash); exist { 682 blocks = append(blocks, block.(*types.Block)) 683 } 684 } 685 if len(blocks) > 0 { 686 types.BlockBy(types.Number).Sort(blocks) 687 688 // Insert one by one as chain insertion needs contiguous ancestry between blocks 689 for i := range blocks { 690 bc.InsertChain(blocks[i : i+1]) 691 } 692 } 693 } 694 695 // WriteStatus status of write 696 type WriteStatus byte 697 698 const ( 699 NonStatTy WriteStatus = iota 700 CanonStatTy 701 SideStatTy 702 ) 703 704 // Rollback is designed to remove a chain of links from the database that aren't 705 // certain enough to be valid. 706 func (bc *BlockChain) Rollback(chain []common.Hash) { 707 bc.mu.Lock() 708 defer bc.mu.Unlock() 709 710 for i := len(chain) - 1; i >= 0; i-- { 711 hash := chain[i] 712 713 currentHeader := bc.hc.CurrentHeader() 714 if currentHeader.Hash() == hash { 715 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 716 } 717 if bc.currentFastBlock.Hash() == hash { 718 bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash(), bc.currentFastBlock.NumberU64()-1) 719 WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()) 720 } 721 if bc.currentBlock.Hash() == hash { 722 bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash(), bc.currentBlock.NumberU64()-1) 723 WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()) 724 } 725 } 726 } 727 728 // SetReceiptsData computes all the non-consensus fields of the receipts 729 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) { 730 signer := types.MakeSigner(config, block.Number()) 731 732 transactions, logIndex := block.Transactions(), uint(0) 733 734 for j := 0; j < len(receipts); j++ { 735 // The transaction hash can be retrieved from the transaction itself 736 receipts[j].TxHash = transactions[j].Hash() 737 738 // The contract address can be derived from the transaction itself 739 if transactions[j].To() == nil { 740 // Deriving the signer is expensive, only do if it's actually needed 741 from, _ := types.Sender(signer, transactions[j]) 742 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 743 } 744 // The used gas can be calculated based on previous receipts 745 if j == 0 { 746 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 747 } else { 748 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 749 } 750 // The derived log fields can simply be set from the block and transaction 751 for k := 0; k < len(receipts[j].Logs); k++ { 752 receipts[j].Logs[k].BlockNumber = block.NumberU64() 753 receipts[j].Logs[k].BlockHash = block.Hash() 754 receipts[j].Logs[k].TxHash = receipts[j].TxHash 755 receipts[j].Logs[k].TxIndex = uint(j) 756 receipts[j].Logs[k].Index = logIndex 757 logIndex++ 758 } 759 } 760 } 761 762 // InsertReceiptChain attempts to complete an already existing header chain with 763 // transaction and receipt data. 764 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 765 bc.wg.Add(1) 766 defer bc.wg.Done() 767 768 // Do a sanity check that the provided chain is actually ordered and linked 769 for i := 1; i < len(blockChain); i++ { 770 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 771 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 772 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 773 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 774 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 775 } 776 } 777 778 var ( 779 stats = struct{ processed, ignored int32 }{} 780 start = time.Now() 781 bytes = 0 782 batch = bc.db.NewBatch() 783 ) 784 for i, block := range blockChain { 785 receipts := receiptChain[i] 786 // Short circuit insertion if shutting down or processing failed 787 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 788 return 0, nil 789 } 790 // Short circuit if the owner header is unknown 791 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 792 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 793 } 794 // Skip if the entire data is already known 795 if bc.HasBlock(block.Hash(), block.NumberU64()) { 796 stats.ignored++ 797 continue 798 } 799 // Compute all the non-consensus fields of the receipts 800 SetReceiptsData(bc.chainConfig, block, receipts) 801 // Write all the data out into the database 802 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 803 return i, fmt.Errorf("failed to write block body: %v", err) 804 } 805 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 806 return i, fmt.Errorf("failed to write block receipts: %v", err) 807 } 808 if err := WriteTxLookupEntries(batch, block); err != nil { 809 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 810 } 811 stats.processed++ 812 813 if batch.ValueSize() >= ethdb.IdealBatchSize { 814 if err := batch.Write(); err != nil { 815 return 0, err 816 } 817 bytes += batch.ValueSize() 818 batch.Reset() 819 } 820 } 821 if batch.ValueSize() > 0 { 822 bytes += batch.ValueSize() 823 if err := batch.Write(); err != nil { 824 return 0, err 825 } 826 } 827 828 // Update the head fast sync block if better 829 bc.mu.Lock() 830 head := blockChain[len(blockChain)-1] 831 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 832 if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 { 833 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 834 log.Crit("Failed to update head fast block hash", "err", err) 835 } 836 bc.currentFastBlock = head 837 } 838 } 839 bc.mu.Unlock() 840 841 log.Info("Imported new block receipts", 842 "count", stats.processed, 843 "elapsed", common.PrettyDuration(time.Since(start)), 844 "number", head.Number(), 845 "hash", head.Hash(), 846 "size", common.StorageSize(bytes), 847 "ignored", stats.ignored) 848 return 0, nil 849 } 850 851 var lastWrite uint64 852 853 // WriteBlockWithoutState writes only the block and its metadata to the database, 854 // but does not write any state. This is used to construct competing side forks 855 // up to the point where they exceed the canonical total difficulty. 856 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 857 bc.wg.Add(1) 858 defer bc.wg.Done() 859 860 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 861 return err 862 } 863 if err := WriteBlock(bc.db, block); err != nil { 864 return err 865 } 866 return nil 867 } 868 869 // WriteBlockWithState writes the block and all associated state to the database. 870 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 871 bc.wg.Add(1) 872 defer bc.wg.Done() 873 874 // Calculate the total difficulty of the block 875 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 876 if ptd == nil { 877 return NonStatTy, consensus.ErrUnknownAncestor 878 } 879 // Make sure no inconsistent state is leaked during insertion 880 bc.mu.Lock() 881 defer bc.mu.Unlock() 882 883 localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 884 externTd := new(big.Int).Add(block.Difficulty(), ptd) 885 886 // Irrelevant of the canonical status, write the block itself to the database 887 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 888 return NonStatTy, err 889 } 890 // Write other block data using a batch. 891 batch := bc.db.NewBatch() 892 if err := WriteBlock(batch, block); err != nil { 893 return NonStatTy, err 894 } 895 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 896 if err != nil { 897 return NonStatTy, err 898 } 899 triedb := bc.stateCache.TrieDB() 900 901 // If we're running an archive node, always flush 902 if bc.cacheConfig.Disabled { 903 if err := triedb.Commit(root, false); err != nil { 904 return NonStatTy, err 905 } 906 } else { 907 // Full but not archive node, do proper garbage collection 908 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 909 bc.triegc.Push(root, -float32(block.NumberU64())) 910 911 if current := block.NumberU64(); current > triesInMemory { 912 // Find the next state trie we need to commit 913 header := bc.GetHeaderByNumber(current - triesInMemory) 914 chosen := header.Number.Uint64() 915 916 // Only write to disk if we exceeded our memory allowance *and* also have at 917 // least a given number of tries gapped. 918 var ( 919 size = triedb.Size() 920 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 921 ) 922 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 923 // If we're exceeding limits but haven't reached a large enough memory gap, 924 // warn the user that the system is becoming unstable. 925 if chosen < lastWrite+triesInMemory { 926 switch { 927 case size >= 2*limit: 928 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 929 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 930 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 931 } 932 } 933 // If optimum or critical limits reached, write to disk 934 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 935 triedb.Commit(header.Root, true) 936 lastWrite = chosen 937 bc.gcproc = 0 938 } 939 } 940 // Garbage collect anything below our required write retention 941 for !bc.triegc.Empty() { 942 root, number := bc.triegc.Pop() 943 if uint64(-number) > chosen { 944 bc.triegc.Push(root, number) 945 break 946 } 947 triedb.Dereference(root.(common.Hash), common.Hash{}) 948 } 949 } 950 } 951 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 952 return NonStatTy, err 953 } 954 // If the total difficulty is higher than our known, add it to the canonical chain 955 // Second clause in the if statement reduces the vulnerability to selfish mining. 956 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 957 reorg := externTd.Cmp(localTd) > 0 958 if !reorg && externTd.Cmp(localTd) == 0 { 959 // Split same-difficulty blocks by number, then at random 960 reorg = block.NumberU64() < bc.currentBlock.NumberU64() || (block.NumberU64() == bc.currentBlock.NumberU64() && mrand.Float64() < 0.5) 961 } 962 if reorg { 963 // Reorganise the chain if the parent is not the head block 964 if block.ParentHash() != bc.currentBlock.Hash() { 965 if err := bc.reorg(bc.currentBlock, block); err != nil { 966 return NonStatTy, err 967 } 968 } 969 // Write the positional metadata for transaction and receipt lookups 970 if err := WriteTxLookupEntries(batch, block); err != nil { 971 return NonStatTy, err 972 } 973 // Write hash preimages 974 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 975 return NonStatTy, err 976 } 977 status = CanonStatTy 978 } else { 979 status = SideStatTy 980 } 981 if err := batch.Write(); err != nil { 982 return NonStatTy, err 983 } 984 985 // Set new head. 986 if status == CanonStatTy { 987 bc.insert(block) 988 } 989 bc.futureBlocks.Remove(block.Hash()) 990 return status, nil 991 } 992 993 // InsertChain attempts to insert the given batch of blocks in to the canonical 994 // chain or, otherwise, create a fork. If an error is returned it will return 995 // the index number of the failing block as well an error describing what went 996 // wrong. 997 // 998 // After insertion is done, all accumulated events will be fired. 999 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1000 n, events, logs, err := bc.insertChain(chain) 1001 bc.PostChainEvents(events, logs) 1002 return n, err 1003 } 1004 1005 // insertChain will execute the actual chain insertion and event aggregation. The 1006 // only reason this method exists as a separate one is to make locking cleaner 1007 // with deferred statements. 1008 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1009 // Do a sanity check that the provided chain is actually ordered and linked 1010 for i := 1; i < len(chain); i++ { 1011 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1012 // Chain broke ancestry, log a messge (programming error) and skip insertion 1013 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1014 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1015 1016 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1017 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1018 } 1019 } 1020 // Pre-checks passed, start the full block imports 1021 bc.wg.Add(1) 1022 defer bc.wg.Done() 1023 1024 bc.chainmu.Lock() 1025 defer bc.chainmu.Unlock() 1026 1027 // A queued approach to delivering events. This is generally 1028 // faster than direct delivery and requires much less mutex 1029 // acquiring. 1030 var ( 1031 stats = insertStats{startTime: mclock.Now()} 1032 events = make([]interface{}, 0, len(chain)) 1033 lastCanon *types.Block 1034 coalescedLogs []*types.Log 1035 ) 1036 // Start the parallel header verifier 1037 headers := make([]*types.Header, len(chain)) 1038 seals := make([]bool, len(chain)) 1039 1040 for i, block := range chain { 1041 headers[i] = block.Header() 1042 seals[i] = true 1043 } 1044 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1045 defer close(abort) 1046 1047 // Iterate over the blocks and insert when the verifier permits 1048 for i, block := range chain { 1049 // If the chain is terminating, stop processing blocks 1050 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1051 log.Debug("Premature abort during blocks processing") 1052 break 1053 } 1054 // If the header is a banned one, straight out abort 1055 if BadHashes[block.Hash()] { 1056 bc.reportBlock(block, nil, ErrBlacklistedHash) 1057 return i, events, coalescedLogs, ErrBlacklistedHash 1058 } 1059 // Wait for the block's verification to complete 1060 bstart := time.Now() 1061 1062 err := <-results 1063 if err == nil { 1064 err = bc.Validator().ValidateBody(block) 1065 } 1066 switch { 1067 case err == ErrKnownBlock: 1068 // Block and state both already known. However if the current block is below 1069 // this number we did a rollback and we should reimport it nonetheless. 1070 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1071 stats.ignored++ 1072 continue 1073 } 1074 1075 case err == consensus.ErrFutureBlock: 1076 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1077 // the chain is discarded and processed at a later time if given. 1078 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1079 if block.Time().Cmp(max) > 0 { 1080 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1081 } 1082 bc.futureBlocks.Add(block.Hash(), block) 1083 stats.queued++ 1084 continue 1085 1086 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1087 bc.futureBlocks.Add(block.Hash(), block) 1088 stats.queued++ 1089 continue 1090 1091 case err == consensus.ErrPrunedAncestor: 1092 // Block competing with the canonical chain, store in the db, but don't process 1093 // until the competitor TD goes above the canonical TD 1094 localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 1095 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1096 if localTd.Cmp(externTd) > 0 { 1097 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1098 return i, events, coalescedLogs, err 1099 } 1100 continue 1101 } 1102 // Competitor chain beat canonical, gather all blocks from the common ancestor 1103 var winner []*types.Block 1104 1105 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1106 for !bc.HasState(parent.Root()) { 1107 winner = append(winner, parent) 1108 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1109 } 1110 for j := 0; j < len(winner)/2; j++ { 1111 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1112 } 1113 // Import all the pruned blocks to make the state available 1114 bc.chainmu.Unlock() 1115 _, evs, logs, err := bc.insertChain(winner) 1116 bc.chainmu.Lock() 1117 events, coalescedLogs = evs, logs 1118 1119 if err != nil { 1120 return i, events, coalescedLogs, err 1121 } 1122 1123 case err != nil: 1124 bc.reportBlock(block, nil, err) 1125 return i, events, coalescedLogs, err 1126 } 1127 // Create a new statedb using the parent block and report an 1128 // error if it fails. 1129 var parent *types.Block 1130 if i == 0 { 1131 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1132 } else { 1133 parent = chain[i-1] 1134 } 1135 state, err := state.New(parent.Root(), bc.stateCache) 1136 if err != nil { 1137 return i, events, coalescedLogs, err 1138 } 1139 // Process block using the parent state as reference point. 1140 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1141 if err != nil { 1142 bc.reportBlock(block, receipts, err) 1143 return i, events, coalescedLogs, err 1144 } 1145 // Validate the state using the default validator 1146 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1147 if err != nil { 1148 bc.reportBlock(block, receipts, err) 1149 return i, events, coalescedLogs, err 1150 } 1151 proctime := time.Since(bstart) 1152 1153 // Write the block to the chain and get the status. 1154 status, err := bc.WriteBlockWithState(block, receipts, state) 1155 if err != nil { 1156 return i, events, coalescedLogs, err 1157 } 1158 switch status { 1159 case CanonStatTy: 1160 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1161 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1162 1163 coalescedLogs = append(coalescedLogs, logs...) 1164 blockInsertTimer.UpdateSince(bstart) 1165 events = append(events, ChainEvent{block, block.Hash(), logs}) 1166 lastCanon = block 1167 1168 // Only count canonical blocks for GC processing time 1169 bc.gcproc += proctime 1170 1171 case SideStatTy: 1172 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1173 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1174 1175 blockInsertTimer.UpdateSince(bstart) 1176 events = append(events, ChainSideEvent{block}) 1177 } 1178 stats.processed++ 1179 stats.usedGas += usedGas 1180 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1181 } 1182 // Append a single chain head event if we've progressed the chain 1183 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1184 events = append(events, ChainHeadEvent{lastCanon}) 1185 } 1186 return 0, events, coalescedLogs, nil 1187 } 1188 1189 // insertStats tracks and reports on block insertion. 1190 type insertStats struct { 1191 queued, processed, ignored int 1192 usedGas uint64 1193 lastIndex int 1194 startTime mclock.AbsTime 1195 } 1196 1197 // statsReportLimit is the time limit during import after which we always print 1198 // out progress. This avoids the user wondering what's going on. 1199 const statsReportLimit = 8 * time.Second 1200 1201 // report prints statistics if some number of blocks have been processed 1202 // or more than a few seconds have passed since the last message. 1203 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1204 // Fetch the timings for the batch 1205 var ( 1206 now = mclock.Now() 1207 elapsed = time.Duration(now) - time.Duration(st.startTime) 1208 ) 1209 // If we're at the last block of the batch or report period reached, log 1210 if index == len(chain)-1 || elapsed >= statsReportLimit { 1211 var ( 1212 end = chain[index] 1213 txs = countTransactions(chain[st.lastIndex : index+1]) 1214 ) 1215 context := []interface{}{ 1216 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1217 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1218 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1219 } 1220 if st.queued > 0 { 1221 context = append(context, []interface{}{"queued", st.queued}...) 1222 } 1223 if st.ignored > 0 { 1224 context = append(context, []interface{}{"ignored", st.ignored}...) 1225 } 1226 log.Info("Imported new chain segment", context...) 1227 1228 *st = insertStats{startTime: now, lastIndex: index + 1} 1229 } 1230 } 1231 1232 func countTransactions(chain []*types.Block) (c int) { 1233 for _, b := range chain { 1234 c += len(b.Transactions()) 1235 } 1236 return c 1237 } 1238 1239 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1240 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1241 // event about them 1242 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1243 var ( 1244 newChain types.Blocks 1245 oldChain types.Blocks 1246 commonBlock *types.Block 1247 deletedTxs types.Transactions 1248 deletedLogs []*types.Log 1249 // collectLogs collects the logs that were generated during the 1250 // processing of the block that corresponds with the given hash. 1251 // These logs are later announced as deleted. 1252 collectLogs = func(h common.Hash) { 1253 // Coalesce logs and set 'Removed'. 1254 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1255 for _, receipt := range receipts { 1256 for _, log := range receipt.Logs { 1257 del := *log 1258 del.Removed = true 1259 deletedLogs = append(deletedLogs, &del) 1260 } 1261 } 1262 } 1263 ) 1264 1265 // first reduce whoever is higher bound 1266 if oldBlock.NumberU64() > newBlock.NumberU64() { 1267 // reduce old chain 1268 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1269 oldChain = append(oldChain, oldBlock) 1270 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1271 1272 collectLogs(oldBlock.Hash()) 1273 } 1274 } else { 1275 // reduce new chain and append new chain blocks for inserting later on 1276 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1277 newChain = append(newChain, newBlock) 1278 } 1279 } 1280 if oldBlock == nil { 1281 return fmt.Errorf("Invalid old chain") 1282 } 1283 if newBlock == nil { 1284 return fmt.Errorf("Invalid new chain") 1285 } 1286 1287 for { 1288 if oldBlock.Hash() == newBlock.Hash() { 1289 commonBlock = oldBlock 1290 break 1291 } 1292 1293 oldChain = append(oldChain, oldBlock) 1294 newChain = append(newChain, newBlock) 1295 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1296 collectLogs(oldBlock.Hash()) 1297 1298 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1299 if oldBlock == nil { 1300 return fmt.Errorf("Invalid old chain") 1301 } 1302 if newBlock == nil { 1303 return fmt.Errorf("Invalid new chain") 1304 } 1305 } 1306 // Ensure the user sees large reorgs 1307 if len(oldChain) > 0 && len(newChain) > 0 { 1308 logFn := log.Debug 1309 if len(oldChain) > 63 { 1310 logFn = log.Warn 1311 } 1312 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1313 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1314 } else { 1315 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1316 } 1317 // Insert the new chain, taking care of the proper incremental order 1318 var addedTxs types.Transactions 1319 for i := len(newChain) - 1; i >= 0; i-- { 1320 // insert the block in the canonical way, re-writing history 1321 bc.insert(newChain[i]) 1322 // write lookup entries for hash based transaction/receipt searches 1323 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1324 return err 1325 } 1326 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1327 } 1328 // calculate the difference between deleted and added transactions 1329 diff := types.TxDifference(deletedTxs, addedTxs) 1330 // When transactions get deleted from the database that means the 1331 // receipts that were created in the fork must also be deleted 1332 for _, tx := range diff { 1333 DeleteTxLookupEntry(bc.db, tx.Hash()) 1334 } 1335 if len(deletedLogs) > 0 { 1336 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1337 } 1338 if len(oldChain) > 0 { 1339 go func() { 1340 for _, block := range oldChain { 1341 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1342 } 1343 }() 1344 } 1345 1346 return nil 1347 } 1348 1349 // PostChainEvents iterates over the events generated by a chain insertion and 1350 // posts them into the event feed. 1351 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1352 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1353 // post event logs for further processing 1354 if logs != nil { 1355 bc.logsFeed.Send(logs) 1356 } 1357 for _, event := range events { 1358 switch ev := event.(type) { 1359 case ChainEvent: 1360 bc.chainFeed.Send(ev) 1361 1362 case ChainHeadEvent: 1363 bc.chainHeadFeed.Send(ev) 1364 1365 case ChainSideEvent: 1366 bc.chainSideFeed.Send(ev) 1367 } 1368 } 1369 } 1370 1371 func (bc *BlockChain) update() { 1372 futureTimer := time.NewTicker(5 * time.Second) 1373 defer futureTimer.Stop() 1374 for { 1375 select { 1376 case <-futureTimer.C: 1377 bc.procFutureBlocks() 1378 case <-bc.quit: 1379 return 1380 } 1381 } 1382 } 1383 1384 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1385 type BadBlockArgs struct { 1386 Hash common.Hash `json:"hash"` 1387 Header *types.Header `json:"header"` 1388 } 1389 1390 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1391 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1392 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1393 for _, hash := range bc.badBlocks.Keys() { 1394 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1395 header := hdr.(*types.Header) 1396 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1397 } 1398 } 1399 return headers, nil 1400 } 1401 1402 // addBadBlock adds a bad block to the bad-block LRU cache 1403 func (bc *BlockChain) addBadBlock(block *types.Block) { 1404 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1405 } 1406 1407 // reportBlock logs a bad block error. 1408 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1409 bc.addBadBlock(block) 1410 1411 var receiptString string 1412 for _, receipt := range receipts { 1413 receiptString += fmt.Sprintf("\t%v\n", receipt) 1414 } 1415 log.Error(fmt.Sprintf(` 1416 ########## BAD BLOCK ######### 1417 Chain config: %v 1418 1419 Number: %v 1420 Hash: 0x%x 1421 %v 1422 1423 Error: %v 1424 ############################## 1425 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1426 } 1427 1428 // InsertHeaderChain attempts to insert the given header chain in to the local 1429 // chain, possibly creating a reorg. If an error is returned, it will return the 1430 // index number of the failing header as well an error describing what went wrong. 1431 // 1432 // The verify parameter can be used to fine tune whether nonce verification 1433 // should be done or not. The reason behind the optional check is because some 1434 // of the header retrieval mechanisms already need to verify nonces, as well as 1435 // because nonces can be verified sparsely, not needing to check each. 1436 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1437 start := time.Now() 1438 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1439 return i, err 1440 } 1441 1442 // Make sure only one thread manipulates the chain at once 1443 bc.chainmu.Lock() 1444 defer bc.chainmu.Unlock() 1445 1446 bc.wg.Add(1) 1447 defer bc.wg.Done() 1448 1449 whFunc := func(header *types.Header) error { 1450 bc.mu.Lock() 1451 defer bc.mu.Unlock() 1452 1453 _, err := bc.hc.WriteHeader(header) 1454 return err 1455 } 1456 1457 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1458 } 1459 1460 // writeHeader writes a header into the local chain, given that its parent is 1461 // already known. If the total difficulty of the newly inserted header becomes 1462 // greater than the current known TD, the canonical chain is re-routed. 1463 // 1464 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1465 // into the chain, as side effects caused by reorganisations cannot be emulated 1466 // without the real blocks. Hence, writing headers directly should only be done 1467 // in two scenarios: pure-header mode of operation (light clients), or properly 1468 // separated header/block phases (non-archive clients). 1469 func (bc *BlockChain) writeHeader(header *types.Header) error { 1470 bc.wg.Add(1) 1471 defer bc.wg.Done() 1472 1473 bc.mu.Lock() 1474 defer bc.mu.Unlock() 1475 1476 _, err := bc.hc.WriteHeader(header) 1477 return err 1478 } 1479 1480 // CurrentHeader retrieves the current head header of the canonical chain. The 1481 // header is retrieved from the HeaderChain's internal cache. 1482 func (bc *BlockChain) CurrentHeader() *types.Header { 1483 bc.mu.RLock() 1484 defer bc.mu.RUnlock() 1485 1486 return bc.hc.CurrentHeader() 1487 } 1488 1489 // GetTd retrieves a block's total difficulty in the canonical chain from the 1490 // database by hash and number, caching it if found. 1491 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1492 return bc.hc.GetTd(hash, number) 1493 } 1494 1495 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1496 // database by hash, caching it if found. 1497 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1498 return bc.hc.GetTdByHash(hash) 1499 } 1500 1501 // GetHeader retrieves a block header from the database by hash and number, 1502 // caching it if found. 1503 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1504 return bc.hc.GetHeader(hash, number) 1505 } 1506 1507 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1508 // found. 1509 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1510 return bc.hc.GetHeaderByHash(hash) 1511 } 1512 1513 // HasHeader checks if a block header is present in the database or not, caching 1514 // it if present. 1515 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1516 return bc.hc.HasHeader(hash, number) 1517 } 1518 1519 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1520 // hash, fetching towards the genesis block. 1521 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1522 return bc.hc.GetBlockHashesFromHash(hash, max) 1523 } 1524 1525 // GetHeaderByNumber retrieves a block header from the database by number, 1526 // caching it (associated with its hash) if found. 1527 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1528 return bc.hc.GetHeaderByNumber(number) 1529 } 1530 1531 // Config retrieves the blockchain's chain configuration. 1532 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1533 1534 // Engine retrieves the blockchain's consensus engine. 1535 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1536 1537 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1538 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1539 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1540 } 1541 1542 // SubscribeChainEvent registers a subscription of ChainEvent. 1543 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1544 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1545 } 1546 1547 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1548 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1549 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1550 } 1551 1552 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1553 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1554 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1555 } 1556 1557 // SubscribeLogsEvent registers a subscription of []*types.Log. 1558 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1559 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1560 }