github.com/samgwo/go-ethereum@v1.8.2-0.20180302101319-49bcb5fbd55e/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/mclock" 32 "github.com/ethereum/go-ethereum/consensus" 33 "github.com/ethereum/go-ethereum/core/state" 34 "github.com/ethereum/go-ethereum/core/types" 35 "github.com/ethereum/go-ethereum/core/vm" 36 "github.com/ethereum/go-ethereum/crypto" 37 "github.com/ethereum/go-ethereum/ethdb" 38 "github.com/ethereum/go-ethereum/event" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/metrics" 41 "github.com/ethereum/go-ethereum/params" 42 "github.com/ethereum/go-ethereum/rlp" 43 "github.com/ethereum/go-ethereum/trie" 44 "github.com/hashicorp/golang-lru" 45 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 3 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db ethdb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 checkpoint int // checkpoint counts towards the new checkpoint 110 currentBlock atomic.Value // Current head of the block chain 111 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 112 113 stateCache state.Database // State database to reuse between imports (contains state cache) 114 bodyCache *lru.Cache // Cache for the most recent block bodies 115 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 116 blockCache *lru.Cache // Cache for the most recent entire blocks 117 futureBlocks *lru.Cache // future blocks are blocks added for later processing 118 119 quit chan struct{} // blockchain quit channel 120 running int32 // running must be called atomically 121 // procInterrupt must be atomically called 122 procInterrupt int32 // interrupt signaler for block processing 123 wg sync.WaitGroup // chain processing wait group for shutting down 124 125 engine consensus.Engine 126 processor Processor // block processor interface 127 validator Validator // block and state validator interface 128 vmConfig vm.Config 129 130 badBlocks *lru.Cache // Bad block cache 131 } 132 133 // NewBlockChain returns a fully initialised block chain using information 134 // available in the database. It initialises the default Ethereum Validator and 135 // Processor. 136 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 137 if cacheConfig == nil { 138 cacheConfig = &CacheConfig{ 139 TrieNodeLimit: 256 * 1024 * 1024, 140 TrieTimeLimit: 5 * time.Minute, 141 } 142 } 143 bodyCache, _ := lru.New(bodyCacheLimit) 144 bodyRLPCache, _ := lru.New(bodyCacheLimit) 145 blockCache, _ := lru.New(blockCacheLimit) 146 futureBlocks, _ := lru.New(maxFutureBlocks) 147 badBlocks, _ := lru.New(badBlockLimit) 148 149 bc := &BlockChain{ 150 chainConfig: chainConfig, 151 cacheConfig: cacheConfig, 152 db: db, 153 triegc: prque.New(), 154 stateCache: state.NewDatabase(db), 155 quit: make(chan struct{}), 156 bodyCache: bodyCache, 157 bodyRLPCache: bodyRLPCache, 158 blockCache: blockCache, 159 futureBlocks: futureBlocks, 160 engine: engine, 161 vmConfig: vmConfig, 162 badBlocks: badBlocks, 163 } 164 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 165 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 166 167 var err error 168 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 169 if err != nil { 170 return nil, err 171 } 172 bc.genesisBlock = bc.GetBlockByNumber(0) 173 if bc.genesisBlock == nil { 174 return nil, ErrNoGenesis 175 } 176 if err := bc.loadLastState(); err != nil { 177 return nil, err 178 } 179 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 180 for hash := range BadHashes { 181 if header := bc.GetHeaderByHash(hash); header != nil { 182 // get the canonical block corresponding to the offending header's number 183 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 184 // make sure the headerByNumber (if present) is in our current canonical chain 185 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 186 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 187 bc.SetHead(header.Number.Uint64() - 1) 188 log.Error("Chain rewind was successful, resuming normal operation") 189 } 190 } 191 } 192 // Take ownership of this particular state 193 go bc.update() 194 return bc, nil 195 } 196 197 func (bc *BlockChain) getProcInterrupt() bool { 198 return atomic.LoadInt32(&bc.procInterrupt) == 1 199 } 200 201 // loadLastState loads the last known chain state from the database. This method 202 // assumes that the chain manager mutex is held. 203 func (bc *BlockChain) loadLastState() error { 204 // Restore the last known head block 205 head := GetHeadBlockHash(bc.db) 206 if head == (common.Hash{}) { 207 // Corrupt or empty database, init from scratch 208 log.Warn("Empty database, resetting chain") 209 return bc.Reset() 210 } 211 // Make sure the entire head block is available 212 currentBlock := bc.GetBlockByHash(head) 213 if currentBlock == nil { 214 // Corrupt or empty database, init from scratch 215 log.Warn("Head block missing, resetting chain", "hash", head) 216 return bc.Reset() 217 } 218 // Make sure the state associated with the block is available 219 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 220 // Dangling block without a state associated, init from scratch 221 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 222 if err := bc.repair(¤tBlock); err != nil { 223 return err 224 } 225 } 226 // Everything seems to be fine, set as the head block 227 bc.currentBlock.Store(currentBlock) 228 229 // Restore the last known head header 230 currentHeader := currentBlock.Header() 231 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 232 if header := bc.GetHeaderByHash(head); header != nil { 233 currentHeader = header 234 } 235 } 236 bc.hc.SetCurrentHeader(currentHeader) 237 238 // Restore the last known head fast block 239 bc.currentFastBlock.Store(currentBlock) 240 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 241 if block := bc.GetBlockByHash(head); block != nil { 242 bc.currentFastBlock.Store(block) 243 } 244 } 245 246 // Issue a status log for the user 247 currentFastBlock := bc.CurrentFastBlock() 248 249 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 250 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 251 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 252 253 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 254 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) 255 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) 256 257 return nil 258 } 259 260 // SetHead rewinds the local chain to a new head. In the case of headers, everything 261 // above the new head will be deleted and the new one set. In the case of blocks 262 // though, the head may be further rewound if block bodies are missing (non-archive 263 // nodes after a fast sync). 264 func (bc *BlockChain) SetHead(head uint64) error { 265 log.Warn("Rewinding blockchain", "target", head) 266 267 bc.mu.Lock() 268 defer bc.mu.Unlock() 269 270 // Rewind the header chain, deleting all block bodies until then 271 delFn := func(hash common.Hash, num uint64) { 272 DeleteBody(bc.db, hash, num) 273 } 274 bc.hc.SetHead(head, delFn) 275 currentHeader := bc.hc.CurrentHeader() 276 277 // Clear out any stale content from the caches 278 bc.bodyCache.Purge() 279 bc.bodyRLPCache.Purge() 280 bc.blockCache.Purge() 281 bc.futureBlocks.Purge() 282 283 // Rewind the block chain, ensuring we don't end up with a stateless head block 284 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 285 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 286 } 287 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 288 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 289 // Rewound state missing, rolled back to before pivot, reset to genesis 290 bc.currentBlock.Store(bc.genesisBlock) 291 } 292 } 293 // Rewind the fast block in a simpleton way to the target head 294 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 295 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 296 } 297 // If either blocks reached nil, reset to the genesis state 298 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 299 bc.currentBlock.Store(bc.genesisBlock) 300 } 301 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 302 bc.currentFastBlock.Store(bc.genesisBlock) 303 } 304 currentBlock := bc.CurrentBlock() 305 currentFastBlock := bc.CurrentFastBlock() 306 if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { 307 log.Crit("Failed to reset head full block", "err", err) 308 } 309 if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { 310 log.Crit("Failed to reset head fast block", "err", err) 311 } 312 return bc.loadLastState() 313 } 314 315 // FastSyncCommitHead sets the current head block to the one defined by the hash 316 // irrelevant what the chain contents were prior. 317 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 318 // Make sure that both the block as well at its state trie exists 319 block := bc.GetBlockByHash(hash) 320 if block == nil { 321 return fmt.Errorf("non existent block [%x…]", hash[:4]) 322 } 323 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 324 return err 325 } 326 // If all checks out, manually set the head block 327 bc.mu.Lock() 328 bc.currentBlock.Store(block) 329 bc.mu.Unlock() 330 331 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 332 return nil 333 } 334 335 // GasLimit returns the gas limit of the current HEAD block. 336 func (bc *BlockChain) GasLimit() uint64 { 337 return bc.CurrentBlock().GasLimit() 338 } 339 340 // CurrentBlock retrieves the current head block of the canonical chain. The 341 // block is retrieved from the blockchain's internal cache. 342 func (bc *BlockChain) CurrentBlock() *types.Block { 343 return bc.currentBlock.Load().(*types.Block) 344 } 345 346 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 347 // chain. The block is retrieved from the blockchain's internal cache. 348 func (bc *BlockChain) CurrentFastBlock() *types.Block { 349 return bc.currentFastBlock.Load().(*types.Block) 350 } 351 352 // SetProcessor sets the processor required for making state modifications. 353 func (bc *BlockChain) SetProcessor(processor Processor) { 354 bc.procmu.Lock() 355 defer bc.procmu.Unlock() 356 bc.processor = processor 357 } 358 359 // SetValidator sets the validator which is used to validate incoming blocks. 360 func (bc *BlockChain) SetValidator(validator Validator) { 361 bc.procmu.Lock() 362 defer bc.procmu.Unlock() 363 bc.validator = validator 364 } 365 366 // Validator returns the current validator. 367 func (bc *BlockChain) Validator() Validator { 368 bc.procmu.RLock() 369 defer bc.procmu.RUnlock() 370 return bc.validator 371 } 372 373 // Processor returns the current processor. 374 func (bc *BlockChain) Processor() Processor { 375 bc.procmu.RLock() 376 defer bc.procmu.RUnlock() 377 return bc.processor 378 } 379 380 // State returns a new mutable state based on the current HEAD block. 381 func (bc *BlockChain) State() (*state.StateDB, error) { 382 return bc.StateAt(bc.CurrentBlock().Root()) 383 } 384 385 // StateAt returns a new mutable state based on a particular point in time. 386 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 387 return state.New(root, bc.stateCache) 388 } 389 390 // Reset purges the entire blockchain, restoring it to its genesis state. 391 func (bc *BlockChain) Reset() error { 392 return bc.ResetWithGenesisBlock(bc.genesisBlock) 393 } 394 395 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 396 // specified genesis state. 397 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 398 // Dump the entire block chain and purge the caches 399 if err := bc.SetHead(0); err != nil { 400 return err 401 } 402 bc.mu.Lock() 403 defer bc.mu.Unlock() 404 405 // Prepare the genesis block and reinitialise the chain 406 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 407 log.Crit("Failed to write genesis block TD", "err", err) 408 } 409 if err := WriteBlock(bc.db, genesis); err != nil { 410 log.Crit("Failed to write genesis block", "err", err) 411 } 412 bc.genesisBlock = genesis 413 bc.insert(bc.genesisBlock) 414 bc.currentBlock.Store(bc.genesisBlock) 415 bc.hc.SetGenesis(bc.genesisBlock.Header()) 416 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 417 bc.currentFastBlock.Store(bc.genesisBlock) 418 419 return nil 420 } 421 422 // repair tries to repair the current blockchain by rolling back the current block 423 // until one with associated state is found. This is needed to fix incomplete db 424 // writes caused either by crashes/power outages, or simply non-committed tries. 425 // 426 // This method only rolls back the current block. The current header and current 427 // fast block are left intact. 428 func (bc *BlockChain) repair(head **types.Block) error { 429 for { 430 // Abort if we've rewound to a head block that does have associated state 431 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 432 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 433 return nil 434 } 435 // Otherwise rewind one block and recheck state availability there 436 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 437 } 438 } 439 440 // Export writes the active chain to the given writer. 441 func (bc *BlockChain) Export(w io.Writer) error { 442 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 443 } 444 445 // ExportN writes a subset of the active chain to the given writer. 446 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 447 bc.mu.RLock() 448 defer bc.mu.RUnlock() 449 450 if first > last { 451 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 452 } 453 log.Info("Exporting batch of blocks", "count", last-first+1) 454 455 for nr := first; nr <= last; nr++ { 456 block := bc.GetBlockByNumber(nr) 457 if block == nil { 458 return fmt.Errorf("export failed on #%d: not found", nr) 459 } 460 461 if err := block.EncodeRLP(w); err != nil { 462 return err 463 } 464 } 465 466 return nil 467 } 468 469 // insert injects a new head block into the current block chain. This method 470 // assumes that the block is indeed a true head. It will also reset the head 471 // header and the head fast sync block to this very same block if they are older 472 // or if they are on a different side chain. 473 // 474 // Note, this function assumes that the `mu` mutex is held! 475 func (bc *BlockChain) insert(block *types.Block) { 476 // If the block is on a side chain or an unknown one, force other heads onto it too 477 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 478 479 // Add the block to the canonical chain number scheme and mark as the head 480 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 481 log.Crit("Failed to insert block number", "err", err) 482 } 483 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 484 log.Crit("Failed to insert head block hash", "err", err) 485 } 486 bc.currentBlock.Store(block) 487 488 // If the block is better than our head or is on a different chain, force update heads 489 if updateHeads { 490 bc.hc.SetCurrentHeader(block.Header()) 491 492 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 493 log.Crit("Failed to insert head fast block hash", "err", err) 494 } 495 bc.currentFastBlock.Store(block) 496 } 497 } 498 499 // Genesis retrieves the chain's genesis block. 500 func (bc *BlockChain) Genesis() *types.Block { 501 return bc.genesisBlock 502 } 503 504 // GetBody retrieves a block body (transactions and uncles) from the database by 505 // hash, caching it if found. 506 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 507 // Short circuit if the body's already in the cache, retrieve otherwise 508 if cached, ok := bc.bodyCache.Get(hash); ok { 509 body := cached.(*types.Body) 510 return body 511 } 512 body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) 513 if body == nil { 514 return nil 515 } 516 // Cache the found body for next time and return 517 bc.bodyCache.Add(hash, body) 518 return body 519 } 520 521 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 522 // caching it if found. 523 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 524 // Short circuit if the body's already in the cache, retrieve otherwise 525 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 526 return cached.(rlp.RawValue) 527 } 528 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 529 if len(body) == 0 { 530 return nil 531 } 532 // Cache the found body for next time and return 533 bc.bodyRLPCache.Add(hash, body) 534 return body 535 } 536 537 // HasBlock checks if a block is fully present in the database or not. 538 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 539 if bc.blockCache.Contains(hash) { 540 return true 541 } 542 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 543 return ok 544 } 545 546 // HasState checks if state trie is fully present in the database or not. 547 func (bc *BlockChain) HasState(hash common.Hash) bool { 548 _, err := bc.stateCache.OpenTrie(hash) 549 return err == nil 550 } 551 552 // HasBlockAndState checks if a block and associated state trie is fully present 553 // in the database or not, caching it if present. 554 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 555 // Check first that the block itself is known 556 block := bc.GetBlock(hash, number) 557 if block == nil { 558 return false 559 } 560 return bc.HasState(block.Root()) 561 } 562 563 // GetBlock retrieves a block from the database by hash and number, 564 // caching it if found. 565 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 566 // Short circuit if the block's already in the cache, retrieve otherwise 567 if block, ok := bc.blockCache.Get(hash); ok { 568 return block.(*types.Block) 569 } 570 block := GetBlock(bc.db, hash, number) 571 if block == nil { 572 return nil 573 } 574 // Cache the found block for next time and return 575 bc.blockCache.Add(block.Hash(), block) 576 return block 577 } 578 579 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 580 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 581 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 582 } 583 584 // GetBlockByNumber retrieves a block from the database by number, caching it 585 // (associated with its hash) if found. 586 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 587 hash := GetCanonicalHash(bc.db, number) 588 if hash == (common.Hash{}) { 589 return nil 590 } 591 return bc.GetBlock(hash, number) 592 } 593 594 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 595 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 596 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 597 } 598 599 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 600 // [deprecated by eth/62] 601 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 602 number := bc.hc.GetBlockNumber(hash) 603 for i := 0; i < n; i++ { 604 block := bc.GetBlock(hash, number) 605 if block == nil { 606 break 607 } 608 blocks = append(blocks, block) 609 hash = block.ParentHash() 610 number-- 611 } 612 return 613 } 614 615 // GetUnclesInChain retrieves all the uncles from a given block backwards until 616 // a specific distance is reached. 617 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 618 uncles := []*types.Header{} 619 for i := 0; block != nil && i < length; i++ { 620 uncles = append(uncles, block.Uncles()...) 621 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 622 } 623 return uncles 624 } 625 626 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 627 // either from ephemeral in-memory cache, or from persistent storage. 628 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 629 return bc.stateCache.TrieDB().Node(hash) 630 } 631 632 // Stop stops the blockchain service. If any imports are currently in progress 633 // it will abort them using the procInterrupt. 634 func (bc *BlockChain) Stop() { 635 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 636 return 637 } 638 // Unsubscribe all subscriptions registered from blockchain 639 bc.scope.Close() 640 close(bc.quit) 641 atomic.StoreInt32(&bc.procInterrupt, 1) 642 643 bc.wg.Wait() 644 645 // Ensure the state of a recent block is also stored to disk before exiting. 646 // We're writing three different states to catch different restart scenarios: 647 // - HEAD: So we don't need to reprocess any blocks in the general case 648 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 649 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 650 if !bc.cacheConfig.Disabled { 651 triedb := bc.stateCache.TrieDB() 652 653 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 654 if number := bc.CurrentBlock().NumberU64(); number > offset { 655 recent := bc.GetBlockByNumber(number - offset) 656 657 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 658 if err := triedb.Commit(recent.Root(), true); err != nil { 659 log.Error("Failed to commit recent state trie", "err", err) 660 } 661 } 662 } 663 for !bc.triegc.Empty() { 664 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 665 } 666 if size := triedb.Size(); size != 0 { 667 log.Error("Dangling trie nodes after full cleanup") 668 } 669 } 670 log.Info("Blockchain manager stopped") 671 } 672 673 func (bc *BlockChain) procFutureBlocks() { 674 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 675 for _, hash := range bc.futureBlocks.Keys() { 676 if block, exist := bc.futureBlocks.Peek(hash); exist { 677 blocks = append(blocks, block.(*types.Block)) 678 } 679 } 680 if len(blocks) > 0 { 681 types.BlockBy(types.Number).Sort(blocks) 682 683 // Insert one by one as chain insertion needs contiguous ancestry between blocks 684 for i := range blocks { 685 bc.InsertChain(blocks[i : i+1]) 686 } 687 } 688 } 689 690 // WriteStatus status of write 691 type WriteStatus byte 692 693 const ( 694 NonStatTy WriteStatus = iota 695 CanonStatTy 696 SideStatTy 697 ) 698 699 // Rollback is designed to remove a chain of links from the database that aren't 700 // certain enough to be valid. 701 func (bc *BlockChain) Rollback(chain []common.Hash) { 702 bc.mu.Lock() 703 defer bc.mu.Unlock() 704 705 for i := len(chain) - 1; i >= 0; i-- { 706 hash := chain[i] 707 708 currentHeader := bc.hc.CurrentHeader() 709 if currentHeader.Hash() == hash { 710 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 711 } 712 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 713 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 714 bc.currentFastBlock.Store(newFastBlock) 715 WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 716 } 717 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 718 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 719 bc.currentBlock.Store(newBlock) 720 WriteHeadBlockHash(bc.db, newBlock.Hash()) 721 } 722 } 723 } 724 725 // SetReceiptsData computes all the non-consensus fields of the receipts 726 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) { 727 signer := types.MakeSigner(config, block.Number()) 728 729 transactions, logIndex := block.Transactions(), uint(0) 730 731 for j := 0; j < len(receipts); j++ { 732 // The transaction hash can be retrieved from the transaction itself 733 receipts[j].TxHash = transactions[j].Hash() 734 735 // The contract address can be derived from the transaction itself 736 if transactions[j].To() == nil { 737 // Deriving the signer is expensive, only do if it's actually needed 738 from, _ := types.Sender(signer, transactions[j]) 739 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 740 } 741 // The used gas can be calculated based on previous receipts 742 if j == 0 { 743 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 744 } else { 745 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 746 } 747 // The derived log fields can simply be set from the block and transaction 748 for k := 0; k < len(receipts[j].Logs); k++ { 749 receipts[j].Logs[k].BlockNumber = block.NumberU64() 750 receipts[j].Logs[k].BlockHash = block.Hash() 751 receipts[j].Logs[k].TxHash = receipts[j].TxHash 752 receipts[j].Logs[k].TxIndex = uint(j) 753 receipts[j].Logs[k].Index = logIndex 754 logIndex++ 755 } 756 } 757 } 758 759 // InsertReceiptChain attempts to complete an already existing header chain with 760 // transaction and receipt data. 761 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 762 bc.wg.Add(1) 763 defer bc.wg.Done() 764 765 // Do a sanity check that the provided chain is actually ordered and linked 766 for i := 1; i < len(blockChain); i++ { 767 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 768 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 769 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 770 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 771 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 772 } 773 } 774 775 var ( 776 stats = struct{ processed, ignored int32 }{} 777 start = time.Now() 778 bytes = 0 779 batch = bc.db.NewBatch() 780 ) 781 for i, block := range blockChain { 782 receipts := receiptChain[i] 783 // Short circuit insertion if shutting down or processing failed 784 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 785 return 0, nil 786 } 787 // Short circuit if the owner header is unknown 788 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 789 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 790 } 791 // Skip if the entire data is already known 792 if bc.HasBlock(block.Hash(), block.NumberU64()) { 793 stats.ignored++ 794 continue 795 } 796 // Compute all the non-consensus fields of the receipts 797 SetReceiptsData(bc.chainConfig, block, receipts) 798 // Write all the data out into the database 799 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 800 return i, fmt.Errorf("failed to write block body: %v", err) 801 } 802 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 803 return i, fmt.Errorf("failed to write block receipts: %v", err) 804 } 805 if err := WriteTxLookupEntries(batch, block); err != nil { 806 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 807 } 808 stats.processed++ 809 810 if batch.ValueSize() >= ethdb.IdealBatchSize { 811 if err := batch.Write(); err != nil { 812 return 0, err 813 } 814 bytes += batch.ValueSize() 815 batch.Reset() 816 } 817 } 818 if batch.ValueSize() > 0 { 819 bytes += batch.ValueSize() 820 if err := batch.Write(); err != nil { 821 return 0, err 822 } 823 } 824 825 // Update the head fast sync block if better 826 bc.mu.Lock() 827 head := blockChain[len(blockChain)-1] 828 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 829 currentFastBlock := bc.CurrentFastBlock() 830 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 831 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 832 log.Crit("Failed to update head fast block hash", "err", err) 833 } 834 bc.currentFastBlock.Store(head) 835 } 836 } 837 bc.mu.Unlock() 838 839 log.Info("Imported new block receipts", 840 "count", stats.processed, 841 "elapsed", common.PrettyDuration(time.Since(start)), 842 "number", head.Number(), 843 "hash", head.Hash(), 844 "size", common.StorageSize(bytes), 845 "ignored", stats.ignored) 846 return 0, nil 847 } 848 849 var lastWrite uint64 850 851 // WriteBlockWithoutState writes only the block and its metadata to the database, 852 // but does not write any state. This is used to construct competing side forks 853 // up to the point where they exceed the canonical total difficulty. 854 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 855 bc.wg.Add(1) 856 defer bc.wg.Done() 857 858 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 859 return err 860 } 861 if err := WriteBlock(bc.db, block); err != nil { 862 return err 863 } 864 return nil 865 } 866 867 // WriteBlockWithState writes the block and all associated state to the database. 868 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 869 bc.wg.Add(1) 870 defer bc.wg.Done() 871 872 // Calculate the total difficulty of the block 873 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 874 if ptd == nil { 875 return NonStatTy, consensus.ErrUnknownAncestor 876 } 877 // Make sure no inconsistent state is leaked during insertion 878 bc.mu.Lock() 879 defer bc.mu.Unlock() 880 881 currentBlock := bc.CurrentBlock() 882 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 883 externTd := new(big.Int).Add(block.Difficulty(), ptd) 884 885 // Irrelevant of the canonical status, write the block itself to the database 886 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 887 return NonStatTy, err 888 } 889 // Write other block data using a batch. 890 batch := bc.db.NewBatch() 891 if err := WriteBlock(batch, block); err != nil { 892 return NonStatTy, err 893 } 894 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 895 if err != nil { 896 return NonStatTy, err 897 } 898 triedb := bc.stateCache.TrieDB() 899 900 // If we're running an archive node, always flush 901 if bc.cacheConfig.Disabled { 902 if err := triedb.Commit(root, false); err != nil { 903 return NonStatTy, err 904 } 905 } else { 906 // Full but not archive node, do proper garbage collection 907 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 908 bc.triegc.Push(root, -float32(block.NumberU64())) 909 910 if current := block.NumberU64(); current > triesInMemory { 911 // Find the next state trie we need to commit 912 header := bc.GetHeaderByNumber(current - triesInMemory) 913 chosen := header.Number.Uint64() 914 915 // Only write to disk if we exceeded our memory allowance *and* also have at 916 // least a given number of tries gapped. 917 var ( 918 size = triedb.Size() 919 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 920 ) 921 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 922 // If we're exceeding limits but haven't reached a large enough memory gap, 923 // warn the user that the system is becoming unstable. 924 if chosen < lastWrite+triesInMemory { 925 switch { 926 case size >= 2*limit: 927 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 928 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 929 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 930 } 931 } 932 // If optimum or critical limits reached, write to disk 933 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 934 triedb.Commit(header.Root, true) 935 lastWrite = chosen 936 bc.gcproc = 0 937 } 938 } 939 // Garbage collect anything below our required write retention 940 for !bc.triegc.Empty() { 941 root, number := bc.triegc.Pop() 942 if uint64(-number) > chosen { 943 bc.triegc.Push(root, number) 944 break 945 } 946 triedb.Dereference(root.(common.Hash), common.Hash{}) 947 } 948 } 949 } 950 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 951 return NonStatTy, err 952 } 953 // If the total difficulty is higher than our known, add it to the canonical chain 954 // Second clause in the if statement reduces the vulnerability to selfish mining. 955 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 956 reorg := externTd.Cmp(localTd) > 0 957 currentBlock = bc.CurrentBlock() 958 if !reorg && externTd.Cmp(localTd) == 0 { 959 // Split same-difficulty blocks by number, then at random 960 reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) 961 } 962 if reorg { 963 // Reorganise the chain if the parent is not the head block 964 if block.ParentHash() != currentBlock.Hash() { 965 if err := bc.reorg(currentBlock, block); err != nil { 966 return NonStatTy, err 967 } 968 } 969 // Write the positional metadata for transaction and receipt lookups 970 if err := WriteTxLookupEntries(batch, block); err != nil { 971 return NonStatTy, err 972 } 973 // Write hash preimages 974 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 975 return NonStatTy, err 976 } 977 status = CanonStatTy 978 } else { 979 status = SideStatTy 980 } 981 if err := batch.Write(); err != nil { 982 return NonStatTy, err 983 } 984 985 // Set new head. 986 if status == CanonStatTy { 987 bc.insert(block) 988 } 989 bc.futureBlocks.Remove(block.Hash()) 990 return status, nil 991 } 992 993 // InsertChain attempts to insert the given batch of blocks in to the canonical 994 // chain or, otherwise, create a fork. If an error is returned it will return 995 // the index number of the failing block as well an error describing what went 996 // wrong. 997 // 998 // After insertion is done, all accumulated events will be fired. 999 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1000 n, events, logs, err := bc.insertChain(chain) 1001 bc.PostChainEvents(events, logs) 1002 return n, err 1003 } 1004 1005 // insertChain will execute the actual chain insertion and event aggregation. The 1006 // only reason this method exists as a separate one is to make locking cleaner 1007 // with deferred statements. 1008 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1009 // Do a sanity check that the provided chain is actually ordered and linked 1010 for i := 1; i < len(chain); i++ { 1011 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1012 // Chain broke ancestry, log a messge (programming error) and skip insertion 1013 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1014 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1015 1016 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1017 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1018 } 1019 } 1020 // Pre-checks passed, start the full block imports 1021 bc.wg.Add(1) 1022 defer bc.wg.Done() 1023 1024 bc.chainmu.Lock() 1025 defer bc.chainmu.Unlock() 1026 1027 // A queued approach to delivering events. This is generally 1028 // faster than direct delivery and requires much less mutex 1029 // acquiring. 1030 var ( 1031 stats = insertStats{startTime: mclock.Now()} 1032 events = make([]interface{}, 0, len(chain)) 1033 lastCanon *types.Block 1034 coalescedLogs []*types.Log 1035 ) 1036 // Start the parallel header verifier 1037 headers := make([]*types.Header, len(chain)) 1038 seals := make([]bool, len(chain)) 1039 1040 for i, block := range chain { 1041 headers[i] = block.Header() 1042 seals[i] = true 1043 } 1044 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1045 defer close(abort) 1046 1047 // Iterate over the blocks and insert when the verifier permits 1048 for i, block := range chain { 1049 // If the chain is terminating, stop processing blocks 1050 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1051 log.Debug("Premature abort during blocks processing") 1052 break 1053 } 1054 // If the header is a banned one, straight out abort 1055 if BadHashes[block.Hash()] { 1056 bc.reportBlock(block, nil, ErrBlacklistedHash) 1057 return i, events, coalescedLogs, ErrBlacklistedHash 1058 } 1059 // Wait for the block's verification to complete 1060 bstart := time.Now() 1061 1062 err := <-results 1063 if err == nil { 1064 err = bc.Validator().ValidateBody(block) 1065 } 1066 switch { 1067 case err == ErrKnownBlock: 1068 // Block and state both already known. However if the current block is below 1069 // this number we did a rollback and we should reimport it nonetheless. 1070 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1071 stats.ignored++ 1072 continue 1073 } 1074 1075 case err == consensus.ErrFutureBlock: 1076 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1077 // the chain is discarded and processed at a later time if given. 1078 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1079 if block.Time().Cmp(max) > 0 { 1080 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1081 } 1082 bc.futureBlocks.Add(block.Hash(), block) 1083 stats.queued++ 1084 continue 1085 1086 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1087 bc.futureBlocks.Add(block.Hash(), block) 1088 stats.queued++ 1089 continue 1090 1091 case err == consensus.ErrPrunedAncestor: 1092 // Block competing with the canonical chain, store in the db, but don't process 1093 // until the competitor TD goes above the canonical TD 1094 currentBlock := bc.CurrentBlock() 1095 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1096 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1097 if localTd.Cmp(externTd) > 0 { 1098 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1099 return i, events, coalescedLogs, err 1100 } 1101 continue 1102 } 1103 // Competitor chain beat canonical, gather all blocks from the common ancestor 1104 var winner []*types.Block 1105 1106 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1107 for !bc.HasState(parent.Root()) { 1108 winner = append(winner, parent) 1109 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1110 } 1111 for j := 0; j < len(winner)/2; j++ { 1112 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1113 } 1114 // Import all the pruned blocks to make the state available 1115 bc.chainmu.Unlock() 1116 _, evs, logs, err := bc.insertChain(winner) 1117 bc.chainmu.Lock() 1118 events, coalescedLogs = evs, logs 1119 1120 if err != nil { 1121 return i, events, coalescedLogs, err 1122 } 1123 1124 case err != nil: 1125 bc.reportBlock(block, nil, err) 1126 return i, events, coalescedLogs, err 1127 } 1128 // Create a new statedb using the parent block and report an 1129 // error if it fails. 1130 var parent *types.Block 1131 if i == 0 { 1132 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1133 } else { 1134 parent = chain[i-1] 1135 } 1136 state, err := state.New(parent.Root(), bc.stateCache) 1137 if err != nil { 1138 return i, events, coalescedLogs, err 1139 } 1140 // Process block using the parent state as reference point. 1141 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1142 if err != nil { 1143 bc.reportBlock(block, receipts, err) 1144 return i, events, coalescedLogs, err 1145 } 1146 // Validate the state using the default validator 1147 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1148 if err != nil { 1149 bc.reportBlock(block, receipts, err) 1150 return i, events, coalescedLogs, err 1151 } 1152 proctime := time.Since(bstart) 1153 1154 // Write the block to the chain and get the status. 1155 status, err := bc.WriteBlockWithState(block, receipts, state) 1156 if err != nil { 1157 return i, events, coalescedLogs, err 1158 } 1159 switch status { 1160 case CanonStatTy: 1161 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1162 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1163 1164 coalescedLogs = append(coalescedLogs, logs...) 1165 blockInsertTimer.UpdateSince(bstart) 1166 events = append(events, ChainEvent{block, block.Hash(), logs}) 1167 lastCanon = block 1168 1169 // Only count canonical blocks for GC processing time 1170 bc.gcproc += proctime 1171 1172 case SideStatTy: 1173 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1174 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1175 1176 blockInsertTimer.UpdateSince(bstart) 1177 events = append(events, ChainSideEvent{block}) 1178 } 1179 stats.processed++ 1180 stats.usedGas += usedGas 1181 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1182 } 1183 // Append a single chain head event if we've progressed the chain 1184 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1185 events = append(events, ChainHeadEvent{lastCanon}) 1186 } 1187 return 0, events, coalescedLogs, nil 1188 } 1189 1190 // insertStats tracks and reports on block insertion. 1191 type insertStats struct { 1192 queued, processed, ignored int 1193 usedGas uint64 1194 lastIndex int 1195 startTime mclock.AbsTime 1196 } 1197 1198 // statsReportLimit is the time limit during import after which we always print 1199 // out progress. This avoids the user wondering what's going on. 1200 const statsReportLimit = 8 * time.Second 1201 1202 // report prints statistics if some number of blocks have been processed 1203 // or more than a few seconds have passed since the last message. 1204 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1205 // Fetch the timings for the batch 1206 var ( 1207 now = mclock.Now() 1208 elapsed = time.Duration(now) - time.Duration(st.startTime) 1209 ) 1210 // If we're at the last block of the batch or report period reached, log 1211 if index == len(chain)-1 || elapsed >= statsReportLimit { 1212 var ( 1213 end = chain[index] 1214 txs = countTransactions(chain[st.lastIndex : index+1]) 1215 ) 1216 context := []interface{}{ 1217 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1218 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1219 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1220 } 1221 if st.queued > 0 { 1222 context = append(context, []interface{}{"queued", st.queued}...) 1223 } 1224 if st.ignored > 0 { 1225 context = append(context, []interface{}{"ignored", st.ignored}...) 1226 } 1227 log.Info("Imported new chain segment", context...) 1228 1229 *st = insertStats{startTime: now, lastIndex: index + 1} 1230 } 1231 } 1232 1233 func countTransactions(chain []*types.Block) (c int) { 1234 for _, b := range chain { 1235 c += len(b.Transactions()) 1236 } 1237 return c 1238 } 1239 1240 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1241 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1242 // event about them 1243 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1244 var ( 1245 newChain types.Blocks 1246 oldChain types.Blocks 1247 commonBlock *types.Block 1248 deletedTxs types.Transactions 1249 deletedLogs []*types.Log 1250 // collectLogs collects the logs that were generated during the 1251 // processing of the block that corresponds with the given hash. 1252 // These logs are later announced as deleted. 1253 collectLogs = func(h common.Hash) { 1254 // Coalesce logs and set 'Removed'. 1255 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1256 for _, receipt := range receipts { 1257 for _, log := range receipt.Logs { 1258 del := *log 1259 del.Removed = true 1260 deletedLogs = append(deletedLogs, &del) 1261 } 1262 } 1263 } 1264 ) 1265 1266 // first reduce whoever is higher bound 1267 if oldBlock.NumberU64() > newBlock.NumberU64() { 1268 // reduce old chain 1269 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1270 oldChain = append(oldChain, oldBlock) 1271 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1272 1273 collectLogs(oldBlock.Hash()) 1274 } 1275 } else { 1276 // reduce new chain and append new chain blocks for inserting later on 1277 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1278 newChain = append(newChain, newBlock) 1279 } 1280 } 1281 if oldBlock == nil { 1282 return fmt.Errorf("Invalid old chain") 1283 } 1284 if newBlock == nil { 1285 return fmt.Errorf("Invalid new chain") 1286 } 1287 1288 for { 1289 if oldBlock.Hash() == newBlock.Hash() { 1290 commonBlock = oldBlock 1291 break 1292 } 1293 1294 oldChain = append(oldChain, oldBlock) 1295 newChain = append(newChain, newBlock) 1296 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1297 collectLogs(oldBlock.Hash()) 1298 1299 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1300 if oldBlock == nil { 1301 return fmt.Errorf("Invalid old chain") 1302 } 1303 if newBlock == nil { 1304 return fmt.Errorf("Invalid new chain") 1305 } 1306 } 1307 // Ensure the user sees large reorgs 1308 if len(oldChain) > 0 && len(newChain) > 0 { 1309 logFn := log.Debug 1310 if len(oldChain) > 63 { 1311 logFn = log.Warn 1312 } 1313 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1314 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1315 } else { 1316 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1317 } 1318 // Insert the new chain, taking care of the proper incremental order 1319 var addedTxs types.Transactions 1320 for i := len(newChain) - 1; i >= 0; i-- { 1321 // insert the block in the canonical way, re-writing history 1322 bc.insert(newChain[i]) 1323 // write lookup entries for hash based transaction/receipt searches 1324 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1325 return err 1326 } 1327 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1328 } 1329 // calculate the difference between deleted and added transactions 1330 diff := types.TxDifference(deletedTxs, addedTxs) 1331 // When transactions get deleted from the database that means the 1332 // receipts that were created in the fork must also be deleted 1333 for _, tx := range diff { 1334 DeleteTxLookupEntry(bc.db, tx.Hash()) 1335 } 1336 if len(deletedLogs) > 0 { 1337 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1338 } 1339 if len(oldChain) > 0 { 1340 go func() { 1341 for _, block := range oldChain { 1342 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1343 } 1344 }() 1345 } 1346 1347 return nil 1348 } 1349 1350 // PostChainEvents iterates over the events generated by a chain insertion and 1351 // posts them into the event feed. 1352 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1353 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1354 // post event logs for further processing 1355 if logs != nil { 1356 bc.logsFeed.Send(logs) 1357 } 1358 for _, event := range events { 1359 switch ev := event.(type) { 1360 case ChainEvent: 1361 bc.chainFeed.Send(ev) 1362 1363 case ChainHeadEvent: 1364 bc.chainHeadFeed.Send(ev) 1365 1366 case ChainSideEvent: 1367 bc.chainSideFeed.Send(ev) 1368 } 1369 } 1370 } 1371 1372 func (bc *BlockChain) update() { 1373 futureTimer := time.NewTicker(5 * time.Second) 1374 defer futureTimer.Stop() 1375 for { 1376 select { 1377 case <-futureTimer.C: 1378 bc.procFutureBlocks() 1379 case <-bc.quit: 1380 return 1381 } 1382 } 1383 } 1384 1385 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1386 type BadBlockArgs struct { 1387 Hash common.Hash `json:"hash"` 1388 Header *types.Header `json:"header"` 1389 } 1390 1391 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1392 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1393 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1394 for _, hash := range bc.badBlocks.Keys() { 1395 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1396 header := hdr.(*types.Header) 1397 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1398 } 1399 } 1400 return headers, nil 1401 } 1402 1403 // addBadBlock adds a bad block to the bad-block LRU cache 1404 func (bc *BlockChain) addBadBlock(block *types.Block) { 1405 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1406 } 1407 1408 // reportBlock logs a bad block error. 1409 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1410 bc.addBadBlock(block) 1411 1412 var receiptString string 1413 for _, receipt := range receipts { 1414 receiptString += fmt.Sprintf("\t%v\n", receipt) 1415 } 1416 log.Error(fmt.Sprintf(` 1417 ########## BAD BLOCK ######### 1418 Chain config: %v 1419 1420 Number: %v 1421 Hash: 0x%x 1422 %v 1423 1424 Error: %v 1425 ############################## 1426 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1427 } 1428 1429 // InsertHeaderChain attempts to insert the given header chain in to the local 1430 // chain, possibly creating a reorg. If an error is returned, it will return the 1431 // index number of the failing header as well an error describing what went wrong. 1432 // 1433 // The verify parameter can be used to fine tune whether nonce verification 1434 // should be done or not. The reason behind the optional check is because some 1435 // of the header retrieval mechanisms already need to verify nonces, as well as 1436 // because nonces can be verified sparsely, not needing to check each. 1437 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1438 start := time.Now() 1439 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1440 return i, err 1441 } 1442 1443 // Make sure only one thread manipulates the chain at once 1444 bc.chainmu.Lock() 1445 defer bc.chainmu.Unlock() 1446 1447 bc.wg.Add(1) 1448 defer bc.wg.Done() 1449 1450 whFunc := func(header *types.Header) error { 1451 bc.mu.Lock() 1452 defer bc.mu.Unlock() 1453 1454 _, err := bc.hc.WriteHeader(header) 1455 return err 1456 } 1457 1458 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1459 } 1460 1461 // writeHeader writes a header into the local chain, given that its parent is 1462 // already known. If the total difficulty of the newly inserted header becomes 1463 // greater than the current known TD, the canonical chain is re-routed. 1464 // 1465 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1466 // into the chain, as side effects caused by reorganisations cannot be emulated 1467 // without the real blocks. Hence, writing headers directly should only be done 1468 // in two scenarios: pure-header mode of operation (light clients), or properly 1469 // separated header/block phases (non-archive clients). 1470 func (bc *BlockChain) writeHeader(header *types.Header) error { 1471 bc.wg.Add(1) 1472 defer bc.wg.Done() 1473 1474 bc.mu.Lock() 1475 defer bc.mu.Unlock() 1476 1477 _, err := bc.hc.WriteHeader(header) 1478 return err 1479 } 1480 1481 // CurrentHeader retrieves the current head header of the canonical chain. The 1482 // header is retrieved from the HeaderChain's internal cache. 1483 func (bc *BlockChain) CurrentHeader() *types.Header { 1484 return bc.hc.CurrentHeader() 1485 } 1486 1487 // GetTd retrieves a block's total difficulty in the canonical chain from the 1488 // database by hash and number, caching it if found. 1489 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1490 return bc.hc.GetTd(hash, number) 1491 } 1492 1493 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1494 // database by hash, caching it if found. 1495 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1496 return bc.hc.GetTdByHash(hash) 1497 } 1498 1499 // GetHeader retrieves a block header from the database by hash and number, 1500 // caching it if found. 1501 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1502 return bc.hc.GetHeader(hash, number) 1503 } 1504 1505 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1506 // found. 1507 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1508 return bc.hc.GetHeaderByHash(hash) 1509 } 1510 1511 // HasHeader checks if a block header is present in the database or not, caching 1512 // it if present. 1513 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1514 return bc.hc.HasHeader(hash, number) 1515 } 1516 1517 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1518 // hash, fetching towards the genesis block. 1519 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1520 return bc.hc.GetBlockHashesFromHash(hash, max) 1521 } 1522 1523 // GetHeaderByNumber retrieves a block header from the database by number, 1524 // caching it (associated with its hash) if found. 1525 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1526 return bc.hc.GetHeaderByNumber(number) 1527 } 1528 1529 // Config retrieves the blockchain's chain configuration. 1530 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1531 1532 // Engine retrieves the blockchain's consensus engine. 1533 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1534 1535 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1536 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1537 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1538 } 1539 1540 // SubscribeChainEvent registers a subscription of ChainEvent. 1541 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1542 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1543 } 1544 1545 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1546 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1547 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1548 } 1549 1550 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1551 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1552 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1553 } 1554 1555 // SubscribeLogsEvent registers a subscription of []*types.Log. 1556 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1557 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1558 }