github.com/anthdm/go-ethereum@v1.8.4-0.20180412101906-60516c83b011/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/mclock" 32 "github.com/ethereum/go-ethereum/consensus" 33 "github.com/ethereum/go-ethereum/core/state" 34 "github.com/ethereum/go-ethereum/core/types" 35 "github.com/ethereum/go-ethereum/core/vm" 36 "github.com/ethereum/go-ethereum/crypto" 37 "github.com/ethereum/go-ethereum/ethdb" 38 "github.com/ethereum/go-ethereum/event" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/metrics" 41 "github.com/ethereum/go-ethereum/params" 42 "github.com/ethereum/go-ethereum/rlp" 43 "github.com/ethereum/go-ethereum/trie" 44 "github.com/hashicorp/golang-lru" 45 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 3 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db ethdb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 checkpoint int // checkpoint counts towards the new checkpoint 110 currentBlock atomic.Value // Current head of the block chain 111 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 112 113 stateCache state.Database // State database to reuse between imports (contains state cache) 114 bodyCache *lru.Cache // Cache for the most recent block bodies 115 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 116 blockCache *lru.Cache // Cache for the most recent entire blocks 117 futureBlocks *lru.Cache // future blocks are blocks added for later processing 118 119 quit chan struct{} // blockchain quit channel 120 running int32 // running must be called atomically 121 // procInterrupt must be atomically called 122 procInterrupt int32 // interrupt signaler for block processing 123 wg sync.WaitGroup // chain processing wait group for shutting down 124 125 engine consensus.Engine 126 processor Processor // block processor interface 127 validator Validator // block and state validator interface 128 vmConfig vm.Config 129 130 badBlocks *lru.Cache // Bad block cache 131 } 132 133 // NewBlockChain returns a fully initialised block chain using information 134 // available in the database. It initialises the default Ethereum Validator and 135 // Processor. 136 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 137 if cacheConfig == nil { 138 cacheConfig = &CacheConfig{ 139 TrieNodeLimit: 256 * 1024 * 1024, 140 TrieTimeLimit: 5 * time.Minute, 141 } 142 } 143 bodyCache, _ := lru.New(bodyCacheLimit) 144 bodyRLPCache, _ := lru.New(bodyCacheLimit) 145 blockCache, _ := lru.New(blockCacheLimit) 146 futureBlocks, _ := lru.New(maxFutureBlocks) 147 badBlocks, _ := lru.New(badBlockLimit) 148 149 bc := &BlockChain{ 150 chainConfig: chainConfig, 151 cacheConfig: cacheConfig, 152 db: db, 153 triegc: prque.New(), 154 stateCache: state.NewDatabase(db), 155 quit: make(chan struct{}), 156 bodyCache: bodyCache, 157 bodyRLPCache: bodyRLPCache, 158 blockCache: blockCache, 159 futureBlocks: futureBlocks, 160 engine: engine, 161 vmConfig: vmConfig, 162 badBlocks: badBlocks, 163 } 164 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 165 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 166 167 var err error 168 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 169 if err != nil { 170 return nil, err 171 } 172 bc.genesisBlock = bc.GetBlockByNumber(0) 173 if bc.genesisBlock == nil { 174 return nil, ErrNoGenesis 175 } 176 if err := bc.loadLastState(); err != nil { 177 return nil, err 178 } 179 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 180 for hash := range BadHashes { 181 if header := bc.GetHeaderByHash(hash); header != nil { 182 // get the canonical block corresponding to the offending header's number 183 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 184 // make sure the headerByNumber (if present) is in our current canonical chain 185 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 186 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 187 bc.SetHead(header.Number.Uint64() - 1) 188 log.Error("Chain rewind was successful, resuming normal operation") 189 } 190 } 191 } 192 // Take ownership of this particular state 193 go bc.update() 194 return bc, nil 195 } 196 197 func (bc *BlockChain) getProcInterrupt() bool { 198 return atomic.LoadInt32(&bc.procInterrupt) == 1 199 } 200 201 // loadLastState loads the last known chain state from the database. This method 202 // assumes that the chain manager mutex is held. 203 func (bc *BlockChain) loadLastState() error { 204 // Restore the last known head block 205 head := GetHeadBlockHash(bc.db) 206 if head == (common.Hash{}) { 207 // Corrupt or empty database, init from scratch 208 log.Warn("Empty database, resetting chain") 209 return bc.Reset() 210 } 211 // Make sure the entire head block is available 212 currentBlock := bc.GetBlockByHash(head) 213 if currentBlock == nil { 214 // Corrupt or empty database, init from scratch 215 log.Warn("Head block missing, resetting chain", "hash", head) 216 return bc.Reset() 217 } 218 // Make sure the state associated with the block is available 219 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 220 // Dangling block without a state associated, init from scratch 221 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 222 if err := bc.repair(¤tBlock); err != nil { 223 return err 224 } 225 } 226 // Everything seems to be fine, set as the head block 227 bc.currentBlock.Store(currentBlock) 228 229 // Restore the last known head header 230 currentHeader := currentBlock.Header() 231 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 232 if header := bc.GetHeaderByHash(head); header != nil { 233 currentHeader = header 234 } 235 } 236 bc.hc.SetCurrentHeader(currentHeader) 237 238 // Restore the last known head fast block 239 bc.currentFastBlock.Store(currentBlock) 240 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 241 if block := bc.GetBlockByHash(head); block != nil { 242 bc.currentFastBlock.Store(block) 243 } 244 } 245 246 // Issue a status log for the user 247 currentFastBlock := bc.CurrentFastBlock() 248 249 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 250 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 251 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 252 253 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 254 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) 255 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) 256 257 return nil 258 } 259 260 // SetHead rewinds the local chain to a new head. In the case of headers, everything 261 // above the new head will be deleted and the new one set. In the case of blocks 262 // though, the head may be further rewound if block bodies are missing (non-archive 263 // nodes after a fast sync). 264 func (bc *BlockChain) SetHead(head uint64) error { 265 log.Warn("Rewinding blockchain", "target", head) 266 267 bc.mu.Lock() 268 defer bc.mu.Unlock() 269 270 // Rewind the header chain, deleting all block bodies until then 271 delFn := func(hash common.Hash, num uint64) { 272 DeleteBody(bc.db, hash, num) 273 } 274 bc.hc.SetHead(head, delFn) 275 currentHeader := bc.hc.CurrentHeader() 276 277 // Clear out any stale content from the caches 278 bc.bodyCache.Purge() 279 bc.bodyRLPCache.Purge() 280 bc.blockCache.Purge() 281 bc.futureBlocks.Purge() 282 283 // Rewind the block chain, ensuring we don't end up with a stateless head block 284 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 285 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 286 } 287 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 288 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 289 // Rewound state missing, rolled back to before pivot, reset to genesis 290 bc.currentBlock.Store(bc.genesisBlock) 291 } 292 } 293 // Rewind the fast block in a simpleton way to the target head 294 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 295 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 296 } 297 // If either blocks reached nil, reset to the genesis state 298 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 299 bc.currentBlock.Store(bc.genesisBlock) 300 } 301 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 302 bc.currentFastBlock.Store(bc.genesisBlock) 303 } 304 currentBlock := bc.CurrentBlock() 305 currentFastBlock := bc.CurrentFastBlock() 306 if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { 307 log.Crit("Failed to reset head full block", "err", err) 308 } 309 if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { 310 log.Crit("Failed to reset head fast block", "err", err) 311 } 312 return bc.loadLastState() 313 } 314 315 // FastSyncCommitHead sets the current head block to the one defined by the hash 316 // irrelevant what the chain contents were prior. 317 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 318 // Make sure that both the block as well at its state trie exists 319 block := bc.GetBlockByHash(hash) 320 if block == nil { 321 return fmt.Errorf("non existent block [%x…]", hash[:4]) 322 } 323 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 324 return err 325 } 326 // If all checks out, manually set the head block 327 bc.mu.Lock() 328 bc.currentBlock.Store(block) 329 bc.mu.Unlock() 330 331 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 332 return nil 333 } 334 335 // GasLimit returns the gas limit of the current HEAD block. 336 func (bc *BlockChain) GasLimit() uint64 { 337 return bc.CurrentBlock().GasLimit() 338 } 339 340 // CurrentBlock retrieves the current head block of the canonical chain. The 341 // block is retrieved from the blockchain's internal cache. 342 func (bc *BlockChain) CurrentBlock() *types.Block { 343 return bc.currentBlock.Load().(*types.Block) 344 } 345 346 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 347 // chain. The block is retrieved from the blockchain's internal cache. 348 func (bc *BlockChain) CurrentFastBlock() *types.Block { 349 return bc.currentFastBlock.Load().(*types.Block) 350 } 351 352 // SetProcessor sets the processor required for making state modifications. 353 func (bc *BlockChain) SetProcessor(processor Processor) { 354 bc.procmu.Lock() 355 defer bc.procmu.Unlock() 356 bc.processor = processor 357 } 358 359 // SetValidator sets the validator which is used to validate incoming blocks. 360 func (bc *BlockChain) SetValidator(validator Validator) { 361 bc.procmu.Lock() 362 defer bc.procmu.Unlock() 363 bc.validator = validator 364 } 365 366 // Validator returns the current validator. 367 func (bc *BlockChain) Validator() Validator { 368 bc.procmu.RLock() 369 defer bc.procmu.RUnlock() 370 return bc.validator 371 } 372 373 // Processor returns the current processor. 374 func (bc *BlockChain) Processor() Processor { 375 bc.procmu.RLock() 376 defer bc.procmu.RUnlock() 377 return bc.processor 378 } 379 380 // State returns a new mutable state based on the current HEAD block. 381 func (bc *BlockChain) State() (*state.StateDB, error) { 382 return bc.StateAt(bc.CurrentBlock().Root()) 383 } 384 385 // StateAt returns a new mutable state based on a particular point in time. 386 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 387 return state.New(root, bc.stateCache) 388 } 389 390 // Reset purges the entire blockchain, restoring it to its genesis state. 391 func (bc *BlockChain) Reset() error { 392 return bc.ResetWithGenesisBlock(bc.genesisBlock) 393 } 394 395 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 396 // specified genesis state. 397 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 398 // Dump the entire block chain and purge the caches 399 if err := bc.SetHead(0); err != nil { 400 return err 401 } 402 bc.mu.Lock() 403 defer bc.mu.Unlock() 404 405 // Prepare the genesis block and reinitialise the chain 406 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 407 log.Crit("Failed to write genesis block TD", "err", err) 408 } 409 if err := WriteBlock(bc.db, genesis); err != nil { 410 log.Crit("Failed to write genesis block", "err", err) 411 } 412 bc.genesisBlock = genesis 413 bc.insert(bc.genesisBlock) 414 bc.currentBlock.Store(bc.genesisBlock) 415 bc.hc.SetGenesis(bc.genesisBlock.Header()) 416 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 417 bc.currentFastBlock.Store(bc.genesisBlock) 418 419 return nil 420 } 421 422 // repair tries to repair the current blockchain by rolling back the current block 423 // until one with associated state is found. This is needed to fix incomplete db 424 // writes caused either by crashes/power outages, or simply non-committed tries. 425 // 426 // This method only rolls back the current block. The current header and current 427 // fast block are left intact. 428 func (bc *BlockChain) repair(head **types.Block) error { 429 for { 430 // Abort if we've rewound to a head block that does have associated state 431 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 432 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 433 return nil 434 } 435 // Otherwise rewind one block and recheck state availability there 436 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 437 } 438 } 439 440 // Export writes the active chain to the given writer. 441 func (bc *BlockChain) Export(w io.Writer) error { 442 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 443 } 444 445 // ExportN writes a subset of the active chain to the given writer. 446 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 447 bc.mu.RLock() 448 defer bc.mu.RUnlock() 449 450 if first > last { 451 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 452 } 453 log.Info("Exporting batch of blocks", "count", last-first+1) 454 455 for nr := first; nr <= last; nr++ { 456 block := bc.GetBlockByNumber(nr) 457 if block == nil { 458 return fmt.Errorf("export failed on #%d: not found", nr) 459 } 460 461 if err := block.EncodeRLP(w); err != nil { 462 return err 463 } 464 } 465 466 return nil 467 } 468 469 // insert injects a new head block into the current block chain. This method 470 // assumes that the block is indeed a true head. It will also reset the head 471 // header and the head fast sync block to this very same block if they are older 472 // or if they are on a different side chain. 473 // 474 // Note, this function assumes that the `mu` mutex is held! 475 func (bc *BlockChain) insert(block *types.Block) { 476 // If the block is on a side chain or an unknown one, force other heads onto it too 477 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 478 479 // Add the block to the canonical chain number scheme and mark as the head 480 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 481 log.Crit("Failed to insert block number", "err", err) 482 } 483 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 484 log.Crit("Failed to insert head block hash", "err", err) 485 } 486 bc.currentBlock.Store(block) 487 488 // If the block is better than our head or is on a different chain, force update heads 489 if updateHeads { 490 bc.hc.SetCurrentHeader(block.Header()) 491 492 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 493 log.Crit("Failed to insert head fast block hash", "err", err) 494 } 495 bc.currentFastBlock.Store(block) 496 } 497 } 498 499 // Genesis retrieves the chain's genesis block. 500 func (bc *BlockChain) Genesis() *types.Block { 501 return bc.genesisBlock 502 } 503 504 // GetBody retrieves a block body (transactions and uncles) from the database by 505 // hash, caching it if found. 506 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 507 // Short circuit if the body's already in the cache, retrieve otherwise 508 if cached, ok := bc.bodyCache.Get(hash); ok { 509 body := cached.(*types.Body) 510 return body 511 } 512 body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) 513 if body == nil { 514 return nil 515 } 516 // Cache the found body for next time and return 517 bc.bodyCache.Add(hash, body) 518 return body 519 } 520 521 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 522 // caching it if found. 523 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 524 // Short circuit if the body's already in the cache, retrieve otherwise 525 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 526 return cached.(rlp.RawValue) 527 } 528 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 529 if len(body) == 0 { 530 return nil 531 } 532 // Cache the found body for next time and return 533 bc.bodyRLPCache.Add(hash, body) 534 return body 535 } 536 537 // HasBlock checks if a block is fully present in the database or not. 538 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 539 if bc.blockCache.Contains(hash) { 540 return true 541 } 542 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 543 return ok 544 } 545 546 // HasState checks if state trie is fully present in the database or not. 547 func (bc *BlockChain) HasState(hash common.Hash) bool { 548 _, err := bc.stateCache.OpenTrie(hash) 549 return err == nil 550 } 551 552 // HasBlockAndState checks if a block and associated state trie is fully present 553 // in the database or not, caching it if present. 554 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 555 // Check first that the block itself is known 556 block := bc.GetBlock(hash, number) 557 if block == nil { 558 return false 559 } 560 return bc.HasState(block.Root()) 561 } 562 563 // GetBlock retrieves a block from the database by hash and number, 564 // caching it if found. 565 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 566 // Short circuit if the block's already in the cache, retrieve otherwise 567 if block, ok := bc.blockCache.Get(hash); ok { 568 return block.(*types.Block) 569 } 570 block := GetBlock(bc.db, hash, number) 571 if block == nil { 572 return nil 573 } 574 // Cache the found block for next time and return 575 bc.blockCache.Add(block.Hash(), block) 576 return block 577 } 578 579 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 580 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 581 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 582 } 583 584 // GetBlockByNumber retrieves a block from the database by number, caching it 585 // (associated with its hash) if found. 586 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 587 hash := GetCanonicalHash(bc.db, number) 588 if hash == (common.Hash{}) { 589 return nil 590 } 591 return bc.GetBlock(hash, number) 592 } 593 594 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 595 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 596 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 597 } 598 599 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 600 // [deprecated by eth/62] 601 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 602 number := bc.hc.GetBlockNumber(hash) 603 for i := 0; i < n; i++ { 604 block := bc.GetBlock(hash, number) 605 if block == nil { 606 break 607 } 608 blocks = append(blocks, block) 609 hash = block.ParentHash() 610 number-- 611 } 612 return 613 } 614 615 // GetUnclesInChain retrieves all the uncles from a given block backwards until 616 // a specific distance is reached. 617 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 618 uncles := []*types.Header{} 619 for i := 0; block != nil && i < length; i++ { 620 uncles = append(uncles, block.Uncles()...) 621 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 622 } 623 return uncles 624 } 625 626 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 627 // either from ephemeral in-memory cache, or from persistent storage. 628 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 629 return bc.stateCache.TrieDB().Node(hash) 630 } 631 632 // Stop stops the blockchain service. If any imports are currently in progress 633 // it will abort them using the procInterrupt. 634 func (bc *BlockChain) Stop() { 635 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 636 return 637 } 638 // Unsubscribe all subscriptions registered from blockchain 639 bc.scope.Close() 640 close(bc.quit) 641 atomic.StoreInt32(&bc.procInterrupt, 1) 642 643 bc.wg.Wait() 644 645 // Ensure the state of a recent block is also stored to disk before exiting. 646 // We're writing three different states to catch different restart scenarios: 647 // - HEAD: So we don't need to reprocess any blocks in the general case 648 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 649 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 650 if !bc.cacheConfig.Disabled { 651 triedb := bc.stateCache.TrieDB() 652 653 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 654 if number := bc.CurrentBlock().NumberU64(); number > offset { 655 recent := bc.GetBlockByNumber(number - offset) 656 657 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 658 if err := triedb.Commit(recent.Root(), true); err != nil { 659 log.Error("Failed to commit recent state trie", "err", err) 660 } 661 } 662 } 663 for !bc.triegc.Empty() { 664 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 665 } 666 if size := triedb.Size(); size != 0 { 667 log.Error("Dangling trie nodes after full cleanup") 668 } 669 } 670 log.Info("Blockchain manager stopped") 671 } 672 673 func (bc *BlockChain) procFutureBlocks() { 674 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 675 for _, hash := range bc.futureBlocks.Keys() { 676 if block, exist := bc.futureBlocks.Peek(hash); exist { 677 blocks = append(blocks, block.(*types.Block)) 678 } 679 } 680 if len(blocks) > 0 { 681 types.BlockBy(types.Number).Sort(blocks) 682 683 // Insert one by one as chain insertion needs contiguous ancestry between blocks 684 for i := range blocks { 685 bc.InsertChain(blocks[i : i+1]) 686 } 687 } 688 } 689 690 // WriteStatus status of write 691 type WriteStatus byte 692 693 const ( 694 NonStatTy WriteStatus = iota 695 CanonStatTy 696 SideStatTy 697 ) 698 699 // Rollback is designed to remove a chain of links from the database that aren't 700 // certain enough to be valid. 701 func (bc *BlockChain) Rollback(chain []common.Hash) { 702 bc.mu.Lock() 703 defer bc.mu.Unlock() 704 705 for i := len(chain) - 1; i >= 0; i-- { 706 hash := chain[i] 707 708 currentHeader := bc.hc.CurrentHeader() 709 if currentHeader.Hash() == hash { 710 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 711 } 712 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 713 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 714 bc.currentFastBlock.Store(newFastBlock) 715 WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 716 } 717 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 718 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 719 bc.currentBlock.Store(newBlock) 720 WriteHeadBlockHash(bc.db, newBlock.Hash()) 721 } 722 } 723 } 724 725 // SetReceiptsData computes all the non-consensus fields of the receipts 726 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 727 signer := types.MakeSigner(config, block.Number()) 728 729 transactions, logIndex := block.Transactions(), uint(0) 730 if len(transactions) != len(receipts) { 731 return errors.New("transaction and receipt count mismatch") 732 } 733 734 for j := 0; j < len(receipts); j++ { 735 // The transaction hash can be retrieved from the transaction itself 736 receipts[j].TxHash = transactions[j].Hash() 737 738 // The contract address can be derived from the transaction itself 739 if transactions[j].To() == nil { 740 // Deriving the signer is expensive, only do if it's actually needed 741 from, _ := types.Sender(signer, transactions[j]) 742 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 743 } 744 // The used gas can be calculated based on previous receipts 745 if j == 0 { 746 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 747 } else { 748 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 749 } 750 // The derived log fields can simply be set from the block and transaction 751 for k := 0; k < len(receipts[j].Logs); k++ { 752 receipts[j].Logs[k].BlockNumber = block.NumberU64() 753 receipts[j].Logs[k].BlockHash = block.Hash() 754 receipts[j].Logs[k].TxHash = receipts[j].TxHash 755 receipts[j].Logs[k].TxIndex = uint(j) 756 receipts[j].Logs[k].Index = logIndex 757 logIndex++ 758 } 759 } 760 return nil 761 } 762 763 // InsertReceiptChain attempts to complete an already existing header chain with 764 // transaction and receipt data. 765 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 766 bc.wg.Add(1) 767 defer bc.wg.Done() 768 769 // Do a sanity check that the provided chain is actually ordered and linked 770 for i := 1; i < len(blockChain); i++ { 771 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 772 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 773 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 774 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 775 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 776 } 777 } 778 779 var ( 780 stats = struct{ processed, ignored int32 }{} 781 start = time.Now() 782 bytes = 0 783 batch = bc.db.NewBatch() 784 ) 785 for i, block := range blockChain { 786 receipts := receiptChain[i] 787 // Short circuit insertion if shutting down or processing failed 788 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 789 return 0, nil 790 } 791 // Short circuit if the owner header is unknown 792 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 793 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 794 } 795 // Skip if the entire data is already known 796 if bc.HasBlock(block.Hash(), block.NumberU64()) { 797 stats.ignored++ 798 continue 799 } 800 // Compute all the non-consensus fields of the receipts 801 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 802 return i, fmt.Errorf("failed to set receipts data: %v", err) 803 } 804 // Write all the data out into the database 805 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 806 return i, fmt.Errorf("failed to write block body: %v", err) 807 } 808 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 809 return i, fmt.Errorf("failed to write block receipts: %v", err) 810 } 811 if err := WriteTxLookupEntries(batch, block); err != nil { 812 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 813 } 814 stats.processed++ 815 816 if batch.ValueSize() >= ethdb.IdealBatchSize { 817 if err := batch.Write(); err != nil { 818 return 0, err 819 } 820 bytes += batch.ValueSize() 821 batch.Reset() 822 } 823 } 824 if batch.ValueSize() > 0 { 825 bytes += batch.ValueSize() 826 if err := batch.Write(); err != nil { 827 return 0, err 828 } 829 } 830 831 // Update the head fast sync block if better 832 bc.mu.Lock() 833 head := blockChain[len(blockChain)-1] 834 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 835 currentFastBlock := bc.CurrentFastBlock() 836 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 837 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 838 log.Crit("Failed to update head fast block hash", "err", err) 839 } 840 bc.currentFastBlock.Store(head) 841 } 842 } 843 bc.mu.Unlock() 844 845 log.Info("Imported new block receipts", 846 "count", stats.processed, 847 "elapsed", common.PrettyDuration(time.Since(start)), 848 "number", head.Number(), 849 "hash", head.Hash(), 850 "size", common.StorageSize(bytes), 851 "ignored", stats.ignored) 852 return 0, nil 853 } 854 855 var lastWrite uint64 856 857 // WriteBlockWithoutState writes only the block and its metadata to the database, 858 // but does not write any state. This is used to construct competing side forks 859 // up to the point where they exceed the canonical total difficulty. 860 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 861 bc.wg.Add(1) 862 defer bc.wg.Done() 863 864 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 865 return err 866 } 867 if err := WriteBlock(bc.db, block); err != nil { 868 return err 869 } 870 return nil 871 } 872 873 // WriteBlockWithState writes the block and all associated state to the database. 874 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 875 bc.wg.Add(1) 876 defer bc.wg.Done() 877 878 // Calculate the total difficulty of the block 879 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 880 if ptd == nil { 881 return NonStatTy, consensus.ErrUnknownAncestor 882 } 883 // Make sure no inconsistent state is leaked during insertion 884 bc.mu.Lock() 885 defer bc.mu.Unlock() 886 887 currentBlock := bc.CurrentBlock() 888 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 889 externTd := new(big.Int).Add(block.Difficulty(), ptd) 890 891 // Irrelevant of the canonical status, write the block itself to the database 892 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 893 return NonStatTy, err 894 } 895 // Write other block data using a batch. 896 batch := bc.db.NewBatch() 897 if err := WriteBlock(batch, block); err != nil { 898 return NonStatTy, err 899 } 900 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 901 if err != nil { 902 return NonStatTy, err 903 } 904 triedb := bc.stateCache.TrieDB() 905 906 // If we're running an archive node, always flush 907 if bc.cacheConfig.Disabled { 908 if err := triedb.Commit(root, false); err != nil { 909 return NonStatTy, err 910 } 911 } else { 912 // Full but not archive node, do proper garbage collection 913 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 914 bc.triegc.Push(root, -float32(block.NumberU64())) 915 916 if current := block.NumberU64(); current > triesInMemory { 917 // Find the next state trie we need to commit 918 header := bc.GetHeaderByNumber(current - triesInMemory) 919 chosen := header.Number.Uint64() 920 921 // Only write to disk if we exceeded our memory allowance *and* also have at 922 // least a given number of tries gapped. 923 var ( 924 size = triedb.Size() 925 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 926 ) 927 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 928 // If we're exceeding limits but haven't reached a large enough memory gap, 929 // warn the user that the system is becoming unstable. 930 if chosen < lastWrite+triesInMemory { 931 switch { 932 case size >= 2*limit: 933 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 934 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 935 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 936 } 937 } 938 // If optimum or critical limits reached, write to disk 939 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 940 triedb.Commit(header.Root, true) 941 lastWrite = chosen 942 bc.gcproc = 0 943 } 944 } 945 // Garbage collect anything below our required write retention 946 for !bc.triegc.Empty() { 947 root, number := bc.triegc.Pop() 948 if uint64(-number) > chosen { 949 bc.triegc.Push(root, number) 950 break 951 } 952 triedb.Dereference(root.(common.Hash), common.Hash{}) 953 } 954 } 955 } 956 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 957 return NonStatTy, err 958 } 959 // If the total difficulty is higher than our known, add it to the canonical chain 960 // Second clause in the if statement reduces the vulnerability to selfish mining. 961 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 962 reorg := externTd.Cmp(localTd) > 0 963 currentBlock = bc.CurrentBlock() 964 if !reorg && externTd.Cmp(localTd) == 0 { 965 // Split same-difficulty blocks by number, then at random 966 reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) 967 } 968 if reorg { 969 // Reorganise the chain if the parent is not the head block 970 if block.ParentHash() != currentBlock.Hash() { 971 if err := bc.reorg(currentBlock, block); err != nil { 972 return NonStatTy, err 973 } 974 } 975 // Write the positional metadata for transaction and receipt lookups 976 if err := WriteTxLookupEntries(batch, block); err != nil { 977 return NonStatTy, err 978 } 979 // Write hash preimages 980 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 981 return NonStatTy, err 982 } 983 status = CanonStatTy 984 } else { 985 status = SideStatTy 986 } 987 if err := batch.Write(); err != nil { 988 return NonStatTy, err 989 } 990 991 // Set new head. 992 if status == CanonStatTy { 993 bc.insert(block) 994 } 995 bc.futureBlocks.Remove(block.Hash()) 996 return status, nil 997 } 998 999 // InsertChain attempts to insert the given batch of blocks in to the canonical 1000 // chain or, otherwise, create a fork. If an error is returned it will return 1001 // the index number of the failing block as well an error describing what went 1002 // wrong. 1003 // 1004 // After insertion is done, all accumulated events will be fired. 1005 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1006 n, events, logs, err := bc.insertChain(chain) 1007 bc.PostChainEvents(events, logs) 1008 return n, err 1009 } 1010 1011 // insertChain will execute the actual chain insertion and event aggregation. The 1012 // only reason this method exists as a separate one is to make locking cleaner 1013 // with deferred statements. 1014 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1015 // Do a sanity check that the provided chain is actually ordered and linked 1016 for i := 1; i < len(chain); i++ { 1017 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1018 // Chain broke ancestry, log a messge (programming error) and skip insertion 1019 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1020 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1021 1022 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1023 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1024 } 1025 } 1026 // Pre-checks passed, start the full block imports 1027 bc.wg.Add(1) 1028 defer bc.wg.Done() 1029 1030 bc.chainmu.Lock() 1031 defer bc.chainmu.Unlock() 1032 1033 // A queued approach to delivering events. This is generally 1034 // faster than direct delivery and requires much less mutex 1035 // acquiring. 1036 var ( 1037 stats = insertStats{startTime: mclock.Now()} 1038 events = make([]interface{}, 0, len(chain)) 1039 lastCanon *types.Block 1040 coalescedLogs []*types.Log 1041 ) 1042 // Start the parallel header verifier 1043 headers := make([]*types.Header, len(chain)) 1044 seals := make([]bool, len(chain)) 1045 1046 for i, block := range chain { 1047 headers[i] = block.Header() 1048 seals[i] = true 1049 } 1050 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1051 defer close(abort) 1052 1053 // Iterate over the blocks and insert when the verifier permits 1054 for i, block := range chain { 1055 // If the chain is terminating, stop processing blocks 1056 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1057 log.Debug("Premature abort during blocks processing") 1058 break 1059 } 1060 // If the header is a banned one, straight out abort 1061 if BadHashes[block.Hash()] { 1062 bc.reportBlock(block, nil, ErrBlacklistedHash) 1063 return i, events, coalescedLogs, ErrBlacklistedHash 1064 } 1065 // Wait for the block's verification to complete 1066 bstart := time.Now() 1067 1068 err := <-results 1069 if err == nil { 1070 err = bc.Validator().ValidateBody(block) 1071 } 1072 switch { 1073 case err == ErrKnownBlock: 1074 // Block and state both already known. However if the current block is below 1075 // this number we did a rollback and we should reimport it nonetheless. 1076 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1077 stats.ignored++ 1078 continue 1079 } 1080 1081 case err == consensus.ErrFutureBlock: 1082 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1083 // the chain is discarded and processed at a later time if given. 1084 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1085 if block.Time().Cmp(max) > 0 { 1086 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1087 } 1088 bc.futureBlocks.Add(block.Hash(), block) 1089 stats.queued++ 1090 continue 1091 1092 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1093 bc.futureBlocks.Add(block.Hash(), block) 1094 stats.queued++ 1095 continue 1096 1097 case err == consensus.ErrPrunedAncestor: 1098 // Block competing with the canonical chain, store in the db, but don't process 1099 // until the competitor TD goes above the canonical TD 1100 currentBlock := bc.CurrentBlock() 1101 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1102 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1103 if localTd.Cmp(externTd) > 0 { 1104 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1105 return i, events, coalescedLogs, err 1106 } 1107 continue 1108 } 1109 // Competitor chain beat canonical, gather all blocks from the common ancestor 1110 var winner []*types.Block 1111 1112 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1113 for !bc.HasState(parent.Root()) { 1114 winner = append(winner, parent) 1115 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1116 } 1117 for j := 0; j < len(winner)/2; j++ { 1118 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1119 } 1120 // Import all the pruned blocks to make the state available 1121 bc.chainmu.Unlock() 1122 _, evs, logs, err := bc.insertChain(winner) 1123 bc.chainmu.Lock() 1124 events, coalescedLogs = evs, logs 1125 1126 if err != nil { 1127 return i, events, coalescedLogs, err 1128 } 1129 1130 case err != nil: 1131 bc.reportBlock(block, nil, err) 1132 return i, events, coalescedLogs, err 1133 } 1134 // Create a new statedb using the parent block and report an 1135 // error if it fails. 1136 var parent *types.Block 1137 if i == 0 { 1138 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1139 } else { 1140 parent = chain[i-1] 1141 } 1142 state, err := state.New(parent.Root(), bc.stateCache) 1143 if err != nil { 1144 return i, events, coalescedLogs, err 1145 } 1146 // Process block using the parent state as reference point. 1147 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1148 if err != nil { 1149 bc.reportBlock(block, receipts, err) 1150 return i, events, coalescedLogs, err 1151 } 1152 // Validate the state using the default validator 1153 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1154 if err != nil { 1155 bc.reportBlock(block, receipts, err) 1156 return i, events, coalescedLogs, err 1157 } 1158 proctime := time.Since(bstart) 1159 1160 // Write the block to the chain and get the status. 1161 status, err := bc.WriteBlockWithState(block, receipts, state) 1162 if err != nil { 1163 return i, events, coalescedLogs, err 1164 } 1165 switch status { 1166 case CanonStatTy: 1167 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1168 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1169 1170 coalescedLogs = append(coalescedLogs, logs...) 1171 blockInsertTimer.UpdateSince(bstart) 1172 events = append(events, ChainEvent{block, block.Hash(), logs}) 1173 lastCanon = block 1174 1175 // Only count canonical blocks for GC processing time 1176 bc.gcproc += proctime 1177 1178 case SideStatTy: 1179 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1180 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1181 1182 blockInsertTimer.UpdateSince(bstart) 1183 events = append(events, ChainSideEvent{block}) 1184 } 1185 stats.processed++ 1186 stats.usedGas += usedGas 1187 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1188 } 1189 // Append a single chain head event if we've progressed the chain 1190 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1191 events = append(events, ChainHeadEvent{lastCanon}) 1192 } 1193 return 0, events, coalescedLogs, nil 1194 } 1195 1196 // insertStats tracks and reports on block insertion. 1197 type insertStats struct { 1198 queued, processed, ignored int 1199 usedGas uint64 1200 lastIndex int 1201 startTime mclock.AbsTime 1202 } 1203 1204 // statsReportLimit is the time limit during import after which we always print 1205 // out progress. This avoids the user wondering what's going on. 1206 const statsReportLimit = 8 * time.Second 1207 1208 // report prints statistics if some number of blocks have been processed 1209 // or more than a few seconds have passed since the last message. 1210 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1211 // Fetch the timings for the batch 1212 var ( 1213 now = mclock.Now() 1214 elapsed = time.Duration(now) - time.Duration(st.startTime) 1215 ) 1216 // If we're at the last block of the batch or report period reached, log 1217 if index == len(chain)-1 || elapsed >= statsReportLimit { 1218 var ( 1219 end = chain[index] 1220 txs = countTransactions(chain[st.lastIndex : index+1]) 1221 ) 1222 context := []interface{}{ 1223 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1224 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1225 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1226 } 1227 if st.queued > 0 { 1228 context = append(context, []interface{}{"queued", st.queued}...) 1229 } 1230 if st.ignored > 0 { 1231 context = append(context, []interface{}{"ignored", st.ignored}...) 1232 } 1233 log.Info("Imported new chain segment", context...) 1234 1235 *st = insertStats{startTime: now, lastIndex: index + 1} 1236 } 1237 } 1238 1239 func countTransactions(chain []*types.Block) (c int) { 1240 for _, b := range chain { 1241 c += len(b.Transactions()) 1242 } 1243 return c 1244 } 1245 1246 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1247 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1248 // event about them 1249 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1250 var ( 1251 newChain types.Blocks 1252 oldChain types.Blocks 1253 commonBlock *types.Block 1254 deletedTxs types.Transactions 1255 deletedLogs []*types.Log 1256 // collectLogs collects the logs that were generated during the 1257 // processing of the block that corresponds with the given hash. 1258 // These logs are later announced as deleted. 1259 collectLogs = func(h common.Hash) { 1260 // Coalesce logs and set 'Removed'. 1261 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1262 for _, receipt := range receipts { 1263 for _, log := range receipt.Logs { 1264 del := *log 1265 del.Removed = true 1266 deletedLogs = append(deletedLogs, &del) 1267 } 1268 } 1269 } 1270 ) 1271 1272 // first reduce whoever is higher bound 1273 if oldBlock.NumberU64() > newBlock.NumberU64() { 1274 // reduce old chain 1275 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1276 oldChain = append(oldChain, oldBlock) 1277 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1278 1279 collectLogs(oldBlock.Hash()) 1280 } 1281 } else { 1282 // reduce new chain and append new chain blocks for inserting later on 1283 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1284 newChain = append(newChain, newBlock) 1285 } 1286 } 1287 if oldBlock == nil { 1288 return fmt.Errorf("Invalid old chain") 1289 } 1290 if newBlock == nil { 1291 return fmt.Errorf("Invalid new chain") 1292 } 1293 1294 for { 1295 if oldBlock.Hash() == newBlock.Hash() { 1296 commonBlock = oldBlock 1297 break 1298 } 1299 1300 oldChain = append(oldChain, oldBlock) 1301 newChain = append(newChain, newBlock) 1302 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1303 collectLogs(oldBlock.Hash()) 1304 1305 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1306 if oldBlock == nil { 1307 return fmt.Errorf("Invalid old chain") 1308 } 1309 if newBlock == nil { 1310 return fmt.Errorf("Invalid new chain") 1311 } 1312 } 1313 // Ensure the user sees large reorgs 1314 if len(oldChain) > 0 && len(newChain) > 0 { 1315 logFn := log.Debug 1316 if len(oldChain) > 63 { 1317 logFn = log.Warn 1318 } 1319 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1320 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1321 } else { 1322 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1323 } 1324 // Insert the new chain, taking care of the proper incremental order 1325 var addedTxs types.Transactions 1326 for i := len(newChain) - 1; i >= 0; i-- { 1327 // insert the block in the canonical way, re-writing history 1328 bc.insert(newChain[i]) 1329 // write lookup entries for hash based transaction/receipt searches 1330 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1331 return err 1332 } 1333 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1334 } 1335 // calculate the difference between deleted and added transactions 1336 diff := types.TxDifference(deletedTxs, addedTxs) 1337 // When transactions get deleted from the database that means the 1338 // receipts that were created in the fork must also be deleted 1339 for _, tx := range diff { 1340 DeleteTxLookupEntry(bc.db, tx.Hash()) 1341 } 1342 if len(deletedLogs) > 0 { 1343 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1344 } 1345 if len(oldChain) > 0 { 1346 go func() { 1347 for _, block := range oldChain { 1348 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1349 } 1350 }() 1351 } 1352 1353 return nil 1354 } 1355 1356 // PostChainEvents iterates over the events generated by a chain insertion and 1357 // posts them into the event feed. 1358 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1359 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1360 // post event logs for further processing 1361 if logs != nil { 1362 bc.logsFeed.Send(logs) 1363 } 1364 for _, event := range events { 1365 switch ev := event.(type) { 1366 case ChainEvent: 1367 bc.chainFeed.Send(ev) 1368 1369 case ChainHeadEvent: 1370 bc.chainHeadFeed.Send(ev) 1371 1372 case ChainSideEvent: 1373 bc.chainSideFeed.Send(ev) 1374 } 1375 } 1376 } 1377 1378 func (bc *BlockChain) update() { 1379 futureTimer := time.NewTicker(5 * time.Second) 1380 defer futureTimer.Stop() 1381 for { 1382 select { 1383 case <-futureTimer.C: 1384 bc.procFutureBlocks() 1385 case <-bc.quit: 1386 return 1387 } 1388 } 1389 } 1390 1391 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1392 type BadBlockArgs struct { 1393 Hash common.Hash `json:"hash"` 1394 Header *types.Header `json:"header"` 1395 } 1396 1397 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1398 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1399 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1400 for _, hash := range bc.badBlocks.Keys() { 1401 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1402 header := hdr.(*types.Header) 1403 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1404 } 1405 } 1406 return headers, nil 1407 } 1408 1409 // addBadBlock adds a bad block to the bad-block LRU cache 1410 func (bc *BlockChain) addBadBlock(block *types.Block) { 1411 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1412 } 1413 1414 // reportBlock logs a bad block error. 1415 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1416 bc.addBadBlock(block) 1417 1418 var receiptString string 1419 for _, receipt := range receipts { 1420 receiptString += fmt.Sprintf("\t%v\n", receipt) 1421 } 1422 log.Error(fmt.Sprintf(` 1423 ########## BAD BLOCK ######### 1424 Chain config: %v 1425 1426 Number: %v 1427 Hash: 0x%x 1428 %v 1429 1430 Error: %v 1431 ############################## 1432 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1433 } 1434 1435 // InsertHeaderChain attempts to insert the given header chain in to the local 1436 // chain, possibly creating a reorg. If an error is returned, it will return the 1437 // index number of the failing header as well an error describing what went wrong. 1438 // 1439 // The verify parameter can be used to fine tune whether nonce verification 1440 // should be done or not. The reason behind the optional check is because some 1441 // of the header retrieval mechanisms already need to verify nonces, as well as 1442 // because nonces can be verified sparsely, not needing to check each. 1443 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1444 start := time.Now() 1445 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1446 return i, err 1447 } 1448 1449 // Make sure only one thread manipulates the chain at once 1450 bc.chainmu.Lock() 1451 defer bc.chainmu.Unlock() 1452 1453 bc.wg.Add(1) 1454 defer bc.wg.Done() 1455 1456 whFunc := func(header *types.Header) error { 1457 bc.mu.Lock() 1458 defer bc.mu.Unlock() 1459 1460 _, err := bc.hc.WriteHeader(header) 1461 return err 1462 } 1463 1464 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1465 } 1466 1467 // writeHeader writes a header into the local chain, given that its parent is 1468 // already known. If the total difficulty of the newly inserted header becomes 1469 // greater than the current known TD, the canonical chain is re-routed. 1470 // 1471 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1472 // into the chain, as side effects caused by reorganisations cannot be emulated 1473 // without the real blocks. Hence, writing headers directly should only be done 1474 // in two scenarios: pure-header mode of operation (light clients), or properly 1475 // separated header/block phases (non-archive clients). 1476 func (bc *BlockChain) writeHeader(header *types.Header) error { 1477 bc.wg.Add(1) 1478 defer bc.wg.Done() 1479 1480 bc.mu.Lock() 1481 defer bc.mu.Unlock() 1482 1483 _, err := bc.hc.WriteHeader(header) 1484 return err 1485 } 1486 1487 // CurrentHeader retrieves the current head header of the canonical chain. The 1488 // header is retrieved from the HeaderChain's internal cache. 1489 func (bc *BlockChain) CurrentHeader() *types.Header { 1490 return bc.hc.CurrentHeader() 1491 } 1492 1493 // GetTd retrieves a block's total difficulty in the canonical chain from the 1494 // database by hash and number, caching it if found. 1495 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1496 return bc.hc.GetTd(hash, number) 1497 } 1498 1499 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1500 // database by hash, caching it if found. 1501 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1502 return bc.hc.GetTdByHash(hash) 1503 } 1504 1505 // GetHeader retrieves a block header from the database by hash and number, 1506 // caching it if found. 1507 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1508 return bc.hc.GetHeader(hash, number) 1509 } 1510 1511 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1512 // found. 1513 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1514 return bc.hc.GetHeaderByHash(hash) 1515 } 1516 1517 // HasHeader checks if a block header is present in the database or not, caching 1518 // it if present. 1519 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1520 return bc.hc.HasHeader(hash, number) 1521 } 1522 1523 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1524 // hash, fetching towards the genesis block. 1525 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1526 return bc.hc.GetBlockHashesFromHash(hash, max) 1527 } 1528 1529 // GetHeaderByNumber retrieves a block header from the database by number, 1530 // caching it (associated with its hash) if found. 1531 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1532 return bc.hc.GetHeaderByNumber(number) 1533 } 1534 1535 // Config retrieves the blockchain's chain configuration. 1536 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1537 1538 // Engine retrieves the blockchain's consensus engine. 1539 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1540 1541 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1542 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1543 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1544 } 1545 1546 // SubscribeChainEvent registers a subscription of ChainEvent. 1547 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1548 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1549 } 1550 1551 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1552 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1553 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1554 } 1555 1556 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1557 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1558 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1559 } 1560 1561 // SubscribeLogsEvent registers a subscription of []*types.Log. 1562 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1563 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1564 }