github.com/niluplatform/go-nilu@v1.7.4-0.20200912082737-a0cb0776d52c/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/NiluPlatform/go-nilu/common" 31 "github.com/NiluPlatform/go-nilu/common/mclock" 32 "github.com/NiluPlatform/go-nilu/consensus" 33 "github.com/NiluPlatform/go-nilu/core/rawdb" 34 "github.com/NiluPlatform/go-nilu/core/state" 35 "github.com/NiluPlatform/go-nilu/core/types" 36 "github.com/NiluPlatform/go-nilu/core/vm" 37 "github.com/NiluPlatform/go-nilu/crypto" 38 "github.com/NiluPlatform/go-nilu/ethdb" 39 "github.com/NiluPlatform/go-nilu/event" 40 "github.com/NiluPlatform/go-nilu/log" 41 "github.com/NiluPlatform/go-nilu/metrics" 42 "github.com/NiluPlatform/go-nilu/params" 43 "github.com/NiluPlatform/go-nilu/rlp" 44 "github.com/NiluPlatform/go-nilu/trie" 45 "github.com/hashicorp/golang-lru" 46 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 47 ) 48 49 var ( 50 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 51 52 ErrNoGenesis = errors.New("Genesis not found in chain") 53 ) 54 55 const ( 56 bodyCacheLimit = 256 57 blockCacheLimit = 256 58 maxFutureBlocks = 256 59 maxTimeFutureBlocks = 30 60 badBlockLimit = 10 61 triesInMemory = 128 62 63 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 64 BlockChainVersion = 3 65 ) 66 67 // CacheConfig contains the configuration values for the trie caching/pruning 68 // that's resident in a blockchain. 69 type CacheConfig struct { 70 Disabled bool // Whether to disable trie write caching (archive node) 71 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 72 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 73 } 74 75 // BlockChain represents the canonical chain given a database with a genesis 76 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 77 // 78 // Importing blocks in to the block chain happens according to the set of rules 79 // defined by the two stage Validator. Processing of blocks is done using the 80 // Processor which processes the included transaction. The validation of the state 81 // is done in the second part of the Validator. Failing results in aborting of 82 // the import. 83 // 84 // The BlockChain also helps in returning blocks from **any** chain included 85 // in the database as well as blocks that represents the canonical chain. It's 86 // important to note that GetBlock can return any block and does not need to be 87 // included in the canonical one where as GetBlockByNumber always represents the 88 // canonical chain. 89 type BlockChain struct { 90 chainConfig *params.ChainConfig // Chain & network configuration 91 cacheConfig *CacheConfig // Cache configuration for pruning 92 93 db ethdb.Database // Low level persistent database to store final content in 94 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 95 gcproc time.Duration // Accumulates canonical block processing for trie dumping 96 97 hc *HeaderChain 98 rmLogsFeed event.Feed 99 chainFeed event.Feed 100 chainSideFeed event.Feed 101 chainHeadFeed event.Feed 102 logsFeed event.Feed 103 scope event.SubscriptionScope 104 genesisBlock *types.Block 105 106 mu sync.RWMutex // global mutex for locking chain operations 107 chainmu sync.RWMutex // blockchain insertion lock 108 procmu sync.RWMutex // block processor lock 109 110 checkpoint int // checkpoint counts towards the new checkpoint 111 currentBlock atomic.Value // Current head of the block chain 112 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 113 114 stateCache state.Database // State database to reuse between imports (contains state cache) 115 bodyCache *lru.Cache // Cache for the most recent block bodies 116 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 117 blockCache *lru.Cache // Cache for the most recent entire blocks 118 futureBlocks *lru.Cache // future blocks are blocks added for later processing 119 120 quit chan struct{} // blockchain quit channel 121 running int32 // running must be called atomically 122 // procInterrupt must be atomically called 123 procInterrupt int32 // interrupt signaler for block processing 124 wg sync.WaitGroup // chain processing wait group for shutting down 125 126 engine consensus.Engine 127 processor Processor // block processor interface 128 validator Validator // block and state validator interface 129 vmConfig vm.Config 130 131 badBlocks *lru.Cache // Bad block cache 132 } 133 134 // NewBlockChain returns a fully initialised block chain using information 135 // available in the database. It initialises the default Ethereum Validator and 136 // Processor. 137 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 138 if cacheConfig == nil { 139 cacheConfig = &CacheConfig{ 140 TrieNodeLimit: 256 * 1024 * 1024, 141 TrieTimeLimit: 5 * time.Minute, 142 } 143 } 144 bodyCache, _ := lru.New(bodyCacheLimit) 145 bodyRLPCache, _ := lru.New(bodyCacheLimit) 146 blockCache, _ := lru.New(blockCacheLimit) 147 futureBlocks, _ := lru.New(maxFutureBlocks) 148 badBlocks, _ := lru.New(badBlockLimit) 149 150 bc := &BlockChain{ 151 chainConfig: chainConfig, 152 cacheConfig: cacheConfig, 153 db: db, 154 triegc: prque.New(), 155 stateCache: state.NewDatabase(db), 156 quit: make(chan struct{}), 157 bodyCache: bodyCache, 158 bodyRLPCache: bodyRLPCache, 159 blockCache: blockCache, 160 futureBlocks: futureBlocks, 161 engine: engine, 162 vmConfig: vmConfig, 163 badBlocks: badBlocks, 164 } 165 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 166 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 167 168 var err error 169 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 170 if err != nil { 171 return nil, err 172 } 173 bc.genesisBlock = bc.GetBlockByNumber(0) 174 if bc.genesisBlock == nil { 175 return nil, ErrNoGenesis 176 } 177 if err := bc.loadLastState(); err != nil { 178 return nil, err 179 } 180 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 181 for hash := range BadHashes { 182 if header := bc.GetHeaderByHash(hash); header != nil { 183 // get the canonical block corresponding to the offending header's number 184 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 185 // make sure the headerByNumber (if present) is in our current canonical chain 186 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 187 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 188 bc.SetHead(header.Number.Uint64() - 1) 189 log.Error("Chain rewind was successful, resuming normal operation") 190 } 191 } 192 } 193 // Take ownership of this particular state 194 go bc.update() 195 return bc, nil 196 } 197 198 func (bc *BlockChain) getProcInterrupt() bool { 199 return atomic.LoadInt32(&bc.procInterrupt) == 1 200 } 201 202 // loadLastState loads the last known chain state from the database. This method 203 // assumes that the chain manager mutex is held. 204 func (bc *BlockChain) loadLastState() error { 205 // Restore the last known head block 206 head := rawdb.ReadHeadBlockHash(bc.db) 207 if head == (common.Hash{}) { 208 // Corrupt or empty database, init from scratch 209 log.Warn("Empty database, resetting chain") 210 return bc.Reset() 211 } 212 // Make sure the entire head block is available 213 currentBlock := bc.GetBlockByHash(head) 214 if currentBlock == nil { 215 // Corrupt or empty database, init from scratch 216 log.Warn("Head block missing, resetting chain", "hash", head) 217 return bc.Reset() 218 } 219 // Make sure the state associated with the block is available 220 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 221 // Dangling block without a state associated, init from scratch 222 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 223 if err := bc.repair(¤tBlock); err != nil { 224 return err 225 } 226 } 227 // Everything seems to be fine, set as the head block 228 bc.currentBlock.Store(currentBlock) 229 230 // Restore the last known head header 231 currentHeader := currentBlock.Header() 232 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 233 if header := bc.GetHeaderByHash(head); header != nil { 234 currentHeader = header 235 } 236 } 237 bc.hc.SetCurrentHeader(currentHeader) 238 239 // Restore the last known head fast block 240 bc.currentFastBlock.Store(currentBlock) 241 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 242 if block := bc.GetBlockByHash(head); block != nil { 243 bc.currentFastBlock.Store(block) 244 } 245 } 246 247 // Issue a status log for the user 248 currentFastBlock := bc.CurrentFastBlock() 249 250 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 251 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 252 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 253 254 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 255 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) 256 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) 257 258 return nil 259 } 260 261 // SetHead rewinds the local chain to a new head. In the case of headers, everything 262 // above the new head will be deleted and the new one set. In the case of blocks 263 // though, the head may be further rewound if block bodies are missing (non-archive 264 // nodes after a fast sync). 265 func (bc *BlockChain) SetHead(head uint64) error { 266 log.Warn("Rewinding blockchain", "target", head) 267 268 bc.mu.Lock() 269 defer bc.mu.Unlock() 270 271 // Rewind the header chain, deleting all block bodies until then 272 delFn := func(hash common.Hash, num uint64) { 273 rawdb.DeleteBody(bc.db, hash, num) 274 } 275 bc.hc.SetHead(head, delFn) 276 currentHeader := bc.hc.CurrentHeader() 277 278 // Clear out any stale content from the caches 279 bc.bodyCache.Purge() 280 bc.bodyRLPCache.Purge() 281 bc.blockCache.Purge() 282 bc.futureBlocks.Purge() 283 284 // Rewind the block chain, ensuring we don't end up with a stateless head block 285 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 286 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 287 } 288 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 289 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 290 // Rewound state missing, rolled back to before pivot, reset to genesis 291 bc.currentBlock.Store(bc.genesisBlock) 292 } 293 } 294 // Rewind the fast block in a simpleton way to the target head 295 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 296 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 297 } 298 // If either blocks reached nil, reset to the genesis state 299 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 300 bc.currentBlock.Store(bc.genesisBlock) 301 } 302 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 303 bc.currentFastBlock.Store(bc.genesisBlock) 304 } 305 currentBlock := bc.CurrentBlock() 306 currentFastBlock := bc.CurrentFastBlock() 307 308 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 309 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 310 311 return bc.loadLastState() 312 } 313 314 // FastSyncCommitHead sets the current head block to the one defined by the hash 315 // irrelevant what the chain contents were prior. 316 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 317 // Make sure that both the block as well at its state trie exists 318 block := bc.GetBlockByHash(hash) 319 if block == nil { 320 return fmt.Errorf("non existent block [%x…]", hash[:4]) 321 } 322 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 323 return err 324 } 325 // If all checks out, manually set the head block 326 bc.mu.Lock() 327 bc.currentBlock.Store(block) 328 bc.mu.Unlock() 329 330 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 331 return nil 332 } 333 334 // GasLimit returns the gas limit of the current HEAD block. 335 func (bc *BlockChain) GasLimit() uint64 { 336 return bc.CurrentBlock().GasLimit() 337 } 338 339 // CurrentBlock retrieves the current head block of the canonical chain. The 340 // block is retrieved from the blockchain's internal cache. 341 func (bc *BlockChain) CurrentBlock() *types.Block { 342 return bc.currentBlock.Load().(*types.Block) 343 } 344 345 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 346 // chain. The block is retrieved from the blockchain's internal cache. 347 func (bc *BlockChain) CurrentFastBlock() *types.Block { 348 return bc.currentFastBlock.Load().(*types.Block) 349 } 350 351 // SetProcessor sets the processor required for making state modifications. 352 func (bc *BlockChain) SetProcessor(processor Processor) { 353 bc.procmu.Lock() 354 defer bc.procmu.Unlock() 355 bc.processor = processor 356 } 357 358 // SetValidator sets the validator which is used to validate incoming blocks. 359 func (bc *BlockChain) SetValidator(validator Validator) { 360 bc.procmu.Lock() 361 defer bc.procmu.Unlock() 362 bc.validator = validator 363 } 364 365 // Validator returns the current validator. 366 func (bc *BlockChain) Validator() Validator { 367 bc.procmu.RLock() 368 defer bc.procmu.RUnlock() 369 return bc.validator 370 } 371 372 // Processor returns the current processor. 373 func (bc *BlockChain) Processor() Processor { 374 bc.procmu.RLock() 375 defer bc.procmu.RUnlock() 376 return bc.processor 377 } 378 379 // State returns a new mutable state based on the current HEAD block. 380 func (bc *BlockChain) State() (*state.StateDB, error) { 381 return bc.StateAt(bc.CurrentBlock().Root()) 382 } 383 384 // StateAt returns a new mutable state based on a particular point in time. 385 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 386 return state.New(root, bc.stateCache) 387 } 388 389 // Reset purges the entire blockchain, restoring it to its genesis state. 390 func (bc *BlockChain) Reset() error { 391 return bc.ResetWithGenesisBlock(bc.genesisBlock) 392 } 393 394 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 395 // specified genesis state. 396 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 397 // Dump the entire block chain and purge the caches 398 if err := bc.SetHead(0); err != nil { 399 return err 400 } 401 bc.mu.Lock() 402 defer bc.mu.Unlock() 403 404 // Prepare the genesis block and reinitialise the chain 405 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 406 log.Crit("Failed to write genesis block TD", "err", err) 407 } 408 rawdb.WriteBlock(bc.db, genesis) 409 410 bc.genesisBlock = genesis 411 bc.insert(bc.genesisBlock) 412 bc.currentBlock.Store(bc.genesisBlock) 413 bc.hc.SetGenesis(bc.genesisBlock.Header()) 414 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 415 bc.currentFastBlock.Store(bc.genesisBlock) 416 417 return nil 418 } 419 420 // repair tries to repair the current blockchain by rolling back the current block 421 // until one with associated state is found. This is needed to fix incomplete db 422 // writes caused either by crashes/power outages, or simply non-committed tries. 423 // 424 // This method only rolls back the current block. The current header and current 425 // fast block are left intact. 426 func (bc *BlockChain) repair(head **types.Block) error { 427 for { 428 // Abort if we've rewound to a head block that does have associated state 429 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 430 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 431 return nil 432 } 433 // Otherwise rewind one block and recheck state availability there 434 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 435 } 436 } 437 438 // Export writes the active chain to the given writer. 439 func (bc *BlockChain) Export(w io.Writer) error { 440 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 441 } 442 443 // ExportN writes a subset of the active chain to the given writer. 444 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 445 bc.mu.RLock() 446 defer bc.mu.RUnlock() 447 448 if first > last { 449 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 450 } 451 log.Info("Exporting batch of blocks", "count", last-first+1) 452 453 for nr := first; nr <= last; nr++ { 454 block := bc.GetBlockByNumber(nr) 455 if block == nil { 456 return fmt.Errorf("export failed on #%d: not found", nr) 457 } 458 459 if err := block.EncodeRLP(w); err != nil { 460 return err 461 } 462 } 463 464 return nil 465 } 466 467 // insert injects a new head block into the current block chain. This method 468 // assumes that the block is indeed a true head. It will also reset the head 469 // header and the head fast sync block to this very same block if they are older 470 // or if they are on a different side chain. 471 // 472 // Note, this function assumes that the `mu` mutex is held! 473 func (bc *BlockChain) insert(block *types.Block) { 474 // If the block is on a side chain or an unknown one, force other heads onto it too 475 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 476 477 // Add the block to the canonical chain number scheme and mark as the head 478 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 479 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 480 481 bc.currentBlock.Store(block) 482 483 // If the block is better than our head or is on a different chain, force update heads 484 if updateHeads { 485 bc.hc.SetCurrentHeader(block.Header()) 486 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 487 488 bc.currentFastBlock.Store(block) 489 } 490 } 491 492 // Genesis retrieves the chain's genesis block. 493 func (bc *BlockChain) Genesis() *types.Block { 494 return bc.genesisBlock 495 } 496 497 // GetBody retrieves a block body (transactions and uncles) from the database by 498 // hash, caching it if found. 499 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 500 // Short circuit if the body's already in the cache, retrieve otherwise 501 if cached, ok := bc.bodyCache.Get(hash); ok { 502 body := cached.(*types.Body) 503 return body 504 } 505 number := bc.hc.GetBlockNumber(hash) 506 if number == nil { 507 return nil 508 } 509 body := rawdb.ReadBody(bc.db, hash, *number) 510 if body == nil { 511 return nil 512 } 513 // Cache the found body for next time and return 514 bc.bodyCache.Add(hash, body) 515 return body 516 } 517 518 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 519 // caching it if found. 520 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 521 // Short circuit if the body's already in the cache, retrieve otherwise 522 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 523 return cached.(rlp.RawValue) 524 } 525 number := bc.hc.GetBlockNumber(hash) 526 if number == nil { 527 return nil 528 } 529 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 530 if len(body) == 0 { 531 return nil 532 } 533 // Cache the found body for next time and return 534 bc.bodyRLPCache.Add(hash, body) 535 return body 536 } 537 538 // HasBlock checks if a block is fully present in the database or not. 539 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 540 if bc.blockCache.Contains(hash) { 541 return true 542 } 543 return rawdb.HasBody(bc.db, hash, number) 544 } 545 546 // HasState checks if state trie is fully present in the database or not. 547 func (bc *BlockChain) HasState(hash common.Hash) bool { 548 _, err := bc.stateCache.OpenTrie(hash) 549 return err == nil 550 } 551 552 // HasBlockAndState checks if a block and associated state trie is fully present 553 // in the database or not, caching it if present. 554 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 555 // Check first that the block itself is known 556 block := bc.GetBlock(hash, number) 557 if block == nil { 558 return false 559 } 560 return bc.HasState(block.Root()) 561 } 562 563 // GetBlock retrieves a block from the database by hash and number, 564 // caching it if found. 565 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 566 // Short circuit if the block's already in the cache, retrieve otherwise 567 if block, ok := bc.blockCache.Get(hash); ok { 568 return block.(*types.Block) 569 } 570 block := rawdb.ReadBlock(bc.db, hash, number) 571 if block == nil { 572 return nil 573 } 574 // Cache the found block for next time and return 575 bc.blockCache.Add(block.Hash(), block) 576 return block 577 } 578 579 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 580 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 581 number := bc.hc.GetBlockNumber(hash) 582 if number == nil { 583 return nil 584 } 585 return bc.GetBlock(hash, *number) 586 } 587 588 // GetBlockByNumber retrieves a block from the database by number, caching it 589 // (associated with its hash) if found. 590 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 591 hash := rawdb.ReadCanonicalHash(bc.db, number) 592 if hash == (common.Hash{}) { 593 return nil 594 } 595 return bc.GetBlock(hash, number) 596 } 597 598 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 599 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 600 number := rawdb.ReadHeaderNumber(bc.db, hash) 601 if number == nil { 602 return nil 603 } 604 return rawdb.ReadReceipts(bc.db, hash, *number) 605 } 606 607 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 608 // [deprecated by eth/62] 609 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 610 number := bc.hc.GetBlockNumber(hash) 611 if number == nil { 612 return nil 613 } 614 for i := 0; i < n; i++ { 615 block := bc.GetBlock(hash, *number) 616 if block == nil { 617 break 618 } 619 blocks = append(blocks, block) 620 hash = block.ParentHash() 621 *number-- 622 } 623 return 624 } 625 626 // GetUnclesInChain retrieves all the uncles from a given block backwards until 627 // a specific distance is reached. 628 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 629 uncles := []*types.Header{} 630 for i := 0; block != nil && i < length; i++ { 631 uncles = append(uncles, block.Uncles()...) 632 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 633 } 634 return uncles 635 } 636 637 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 638 // either from ephemeral in-memory cache, or from persistent storage. 639 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 640 return bc.stateCache.TrieDB().Node(hash) 641 } 642 643 // Stop stops the blockchain service. If any imports are currently in progress 644 // it will abort them using the procInterrupt. 645 func (bc *BlockChain) Stop() { 646 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 647 return 648 } 649 // Unsubscribe all subscriptions registered from blockchain 650 bc.scope.Close() 651 close(bc.quit) 652 atomic.StoreInt32(&bc.procInterrupt, 1) 653 654 bc.wg.Wait() 655 656 // Ensure the state of a recent block is also stored to disk before exiting. 657 // We're writing three different states to catch different restart scenarios: 658 // - HEAD: So we don't need to reprocess any blocks in the general case 659 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 660 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 661 if !bc.cacheConfig.Disabled { 662 triedb := bc.stateCache.TrieDB() 663 664 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 665 if number := bc.CurrentBlock().NumberU64(); number > offset { 666 recent := bc.GetBlockByNumber(number - offset) 667 668 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 669 if err := triedb.Commit(recent.Root(), true); err != nil { 670 log.Error("Failed to commit recent state trie", "err", err) 671 } 672 } 673 } 674 for !bc.triegc.Empty() { 675 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 676 } 677 if size := triedb.Size(); size != 0 { 678 log.Error("Dangling trie nodes after full cleanup") 679 } 680 } 681 log.Info("Blockchain manager stopped") 682 } 683 684 func (bc *BlockChain) procFutureBlocks() { 685 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 686 for _, hash := range bc.futureBlocks.Keys() { 687 if block, exist := bc.futureBlocks.Peek(hash); exist { 688 blocks = append(blocks, block.(*types.Block)) 689 } 690 } 691 if len(blocks) > 0 { 692 types.BlockBy(types.Number).Sort(blocks) 693 694 // Insert one by one as chain insertion needs contiguous ancestry between blocks 695 for i := range blocks { 696 bc.InsertChain(blocks[i : i+1]) 697 } 698 } 699 } 700 701 // WriteStatus status of write 702 type WriteStatus byte 703 704 const ( 705 NonStatTy WriteStatus = iota 706 CanonStatTy 707 SideStatTy 708 ) 709 710 // Rollback is designed to remove a chain of links from the database that aren't 711 // certain enough to be valid. 712 func (bc *BlockChain) Rollback(chain []common.Hash) { 713 bc.mu.Lock() 714 defer bc.mu.Unlock() 715 716 for i := len(chain) - 1; i >= 0; i-- { 717 hash := chain[i] 718 719 currentHeader := bc.hc.CurrentHeader() 720 if currentHeader.Hash() == hash { 721 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 722 } 723 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 724 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 725 bc.currentFastBlock.Store(newFastBlock) 726 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 727 } 728 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 729 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 730 bc.currentBlock.Store(newBlock) 731 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 732 } 733 } 734 } 735 736 // SetReceiptsData computes all the non-consensus fields of the receipts 737 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 738 signer := types.MakeSigner(config, block.Number()) 739 740 transactions, logIndex := block.Transactions(), uint(0) 741 if len(transactions) != len(receipts) { 742 return errors.New("transaction and receipt count mismatch") 743 } 744 745 for j := 0; j < len(receipts); j++ { 746 // The transaction hash can be retrieved from the transaction itself 747 receipts[j].TxHash = transactions[j].Hash() 748 749 // The contract address can be derived from the transaction itself 750 if transactions[j].To() == nil { 751 // Deriving the signer is expensive, only do if it's actually needed 752 from, _ := types.Sender(signer, transactions[j]) 753 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 754 } 755 // The used gas can be calculated based on previous receipts 756 if j == 0 { 757 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 758 } else { 759 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 760 } 761 // The derived log fields can simply be set from the block and transaction 762 for k := 0; k < len(receipts[j].Logs); k++ { 763 receipts[j].Logs[k].BlockNumber = block.NumberU64() 764 receipts[j].Logs[k].BlockHash = block.Hash() 765 receipts[j].Logs[k].TxHash = receipts[j].TxHash 766 receipts[j].Logs[k].TxIndex = uint(j) 767 receipts[j].Logs[k].Index = logIndex 768 logIndex++ 769 } 770 } 771 return nil 772 } 773 774 // InsertReceiptChain attempts to complete an already existing header chain with 775 // transaction and receipt data. 776 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 777 bc.wg.Add(1) 778 defer bc.wg.Done() 779 780 // Do a sanity check that the provided chain is actually ordered and linked 781 for i := 1; i < len(blockChain); i++ { 782 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 783 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 784 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 785 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 786 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 787 } 788 } 789 790 var ( 791 stats = struct{ processed, ignored int32 }{} 792 start = time.Now() 793 bytes = 0 794 batch = bc.db.NewBatch() 795 ) 796 for i, block := range blockChain { 797 receipts := receiptChain[i] 798 // Short circuit insertion if shutting down or processing failed 799 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 800 return 0, nil 801 } 802 // Short circuit if the owner header is unknown 803 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 804 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 805 } 806 // Skip if the entire data is already known 807 if bc.HasBlock(block.Hash(), block.NumberU64()) { 808 stats.ignored++ 809 continue 810 } 811 // Compute all the non-consensus fields of the receipts 812 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 813 return i, fmt.Errorf("failed to set receipts data: %v", err) 814 } 815 // Write all the data out into the database 816 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 817 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 818 rawdb.WriteTxLookupEntries(batch, block) 819 820 stats.processed++ 821 822 if batch.ValueSize() >= ethdb.IdealBatchSize { 823 if err := batch.Write(); err != nil { 824 return 0, err 825 } 826 bytes += batch.ValueSize() 827 batch.Reset() 828 } 829 } 830 if batch.ValueSize() > 0 { 831 bytes += batch.ValueSize() 832 if err := batch.Write(); err != nil { 833 return 0, err 834 } 835 } 836 837 // Update the head fast sync block if better 838 bc.mu.Lock() 839 head := blockChain[len(blockChain)-1] 840 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 841 currentFastBlock := bc.CurrentFastBlock() 842 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 843 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 844 bc.currentFastBlock.Store(head) 845 } 846 } 847 bc.mu.Unlock() 848 849 log.Info("Imported new block receipts", 850 "count", stats.processed, 851 "elapsed", common.PrettyDuration(time.Since(start)), 852 "number", head.Number(), 853 "hash", head.Hash(), 854 "size", common.StorageSize(bytes), 855 "ignored", stats.ignored) 856 return 0, nil 857 } 858 859 var lastWrite uint64 860 861 // WriteBlockWithoutState writes only the block and its metadata to the database, 862 // but does not write any state. This is used to construct competing side forks 863 // up to the point where they exceed the canonical total difficulty. 864 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 865 bc.wg.Add(1) 866 defer bc.wg.Done() 867 868 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 869 return err 870 } 871 rawdb.WriteBlock(bc.db, block) 872 873 return nil 874 } 875 876 // WriteBlockWithState writes the block and all associated state to the database. 877 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 878 bc.wg.Add(1) 879 defer bc.wg.Done() 880 881 // Calculate the total difficulty of the block 882 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 883 if ptd == nil { 884 return NonStatTy, consensus.ErrUnknownAncestor 885 } 886 // Make sure no inconsistent state is leaked during insertion 887 bc.mu.Lock() 888 defer bc.mu.Unlock() 889 890 currentBlock := bc.CurrentBlock() 891 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 892 externTd := new(big.Int).Add(block.Difficulty(), ptd) 893 894 // Irrelevant of the canonical status, write the block itself to the database 895 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 896 return NonStatTy, err 897 } 898 // Write other block data using a batch. 899 batch := bc.db.NewBatch() 900 rawdb.WriteBlock(batch, block) 901 902 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 903 if err != nil { 904 return NonStatTy, err 905 } 906 triedb := bc.stateCache.TrieDB() 907 908 // If we're running an archive node, always flush 909 if bc.cacheConfig.Disabled { 910 if err := triedb.Commit(root, false); err != nil { 911 return NonStatTy, err 912 } 913 } else { 914 // Full but not archive node, do proper garbage collection 915 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 916 bc.triegc.Push(root, -float32(block.NumberU64())) 917 918 if current := block.NumberU64(); current > triesInMemory { 919 // Find the next state trie we need to commit 920 header := bc.GetHeaderByNumber(current - triesInMemory) 921 chosen := header.Number.Uint64() 922 923 // Only write to disk if we exceeded our memory allowance *and* also have at 924 // least a given number of tries gapped. 925 var ( 926 size = triedb.Size() 927 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 928 ) 929 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 930 // If we're exceeding limits but haven't reached a large enough memory gap, 931 // warn the user that the system is becoming unstable. 932 if chosen < lastWrite+triesInMemory { 933 switch { 934 case size >= 2*limit: 935 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 936 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 937 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 938 } 939 } 940 // If optimum or critical limits reached, write to disk 941 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 942 triedb.Commit(header.Root, true) 943 lastWrite = chosen 944 bc.gcproc = 0 945 } 946 } 947 // Garbage collect anything below our required write retention 948 for !bc.triegc.Empty() { 949 root, number := bc.triegc.Pop() 950 if uint64(-number) > chosen { 951 bc.triegc.Push(root, number) 952 break 953 } 954 triedb.Dereference(root.(common.Hash), common.Hash{}) 955 } 956 } 957 } 958 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 959 960 // If the total difficulty is higher than our known, add it to the canonical chain 961 // Second clause in the if statement reduces the vulnerability to selfish mining. 962 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 963 reorg := externTd.Cmp(localTd) > 0 964 currentBlock = bc.CurrentBlock() 965 if !reorg && externTd.Cmp(localTd) == 0 { 966 // Split same-difficulty blocks by number, then at random 967 reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) 968 } 969 if reorg { 970 // Reorganise the chain if the parent is not the head block 971 if block.ParentHash() != currentBlock.Hash() { 972 if err := bc.reorg(currentBlock, block); err != nil { 973 return NonStatTy, err 974 } 975 } 976 // Write the positional metadata for transaction/receipt lookups and preimages 977 rawdb.WriteTxLookupEntries(batch, block) 978 rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()) 979 980 status = CanonStatTy 981 } else { 982 status = SideStatTy 983 } 984 if err := batch.Write(); err != nil { 985 return NonStatTy, err 986 } 987 988 // Set new head. 989 if status == CanonStatTy { 990 bc.insert(block) 991 } 992 bc.futureBlocks.Remove(block.Hash()) 993 return status, nil 994 } 995 996 // InsertChain attempts to insert the given batch of blocks in to the canonical 997 // chain or, otherwise, create a fork. If an error is returned it will return 998 // the index number of the failing block as well an error describing what went 999 // wrong. 1000 // 1001 // After insertion is done, all accumulated events will be fired. 1002 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1003 n, events, logs, err := bc.insertChain(chain) 1004 bc.PostChainEvents(events, logs) 1005 return n, err 1006 } 1007 1008 // insertChain will execute the actual chain insertion and event aggregation. The 1009 // only reason this method exists as a separate one is to make locking cleaner 1010 // with deferred statements. 1011 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1012 // Do a sanity check that the provided chain is actually ordered and linked 1013 for i := 1; i < len(chain); i++ { 1014 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1015 // Chain broke ancestry, log a messge (programming error) and skip insertion 1016 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1017 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1018 1019 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1020 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1021 } 1022 } 1023 // Pre-checks passed, start the full block imports 1024 bc.wg.Add(1) 1025 defer bc.wg.Done() 1026 1027 bc.chainmu.Lock() 1028 defer bc.chainmu.Unlock() 1029 1030 // A queued approach to delivering events. This is generally 1031 // faster than direct delivery and requires much less mutex 1032 // acquiring. 1033 var ( 1034 stats = insertStats{startTime: mclock.Now()} 1035 events = make([]interface{}, 0, len(chain)) 1036 lastCanon *types.Block 1037 coalescedLogs []*types.Log 1038 ) 1039 // Start the parallel header verifier 1040 headers := make([]*types.Header, len(chain)) 1041 seals := make([]bool, len(chain)) 1042 1043 for i, block := range chain { 1044 headers[i] = block.Header() 1045 seals[i] = true 1046 } 1047 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1048 defer close(abort) 1049 1050 // Iterate over the blocks and insert when the verifier permits 1051 for i, block := range chain { 1052 // If the chain is terminating, stop processing blocks 1053 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1054 log.Debug("Premature abort during blocks processing") 1055 break 1056 } 1057 // If the header is a banned one, straight out abort 1058 if BadHashes[block.Hash()] { 1059 bc.reportBlock(block, nil, ErrBlacklistedHash) 1060 return i, events, coalescedLogs, ErrBlacklistedHash 1061 } 1062 // Wait for the block's verification to complete 1063 bstart := time.Now() 1064 1065 err := <-results 1066 if err == nil { 1067 err = bc.Validator().ValidateBody(block) 1068 } 1069 switch { 1070 case err == ErrKnownBlock: 1071 // Block and state both already known. However if the current block is below 1072 // this number we did a rollback and we should reimport it nonetheless. 1073 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1074 stats.ignored++ 1075 continue 1076 } 1077 1078 case err == consensus.ErrFutureBlock: 1079 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1080 // the chain is discarded and processed at a later time if given. 1081 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1082 if block.Time().Cmp(max) > 0 { 1083 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1084 } 1085 bc.futureBlocks.Add(block.Hash(), block) 1086 stats.queued++ 1087 continue 1088 1089 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1090 bc.futureBlocks.Add(block.Hash(), block) 1091 stats.queued++ 1092 continue 1093 1094 case err == consensus.ErrPrunedAncestor: 1095 // Block competing with the canonical chain, store in the db, but don't process 1096 // until the competitor TD goes above the canonical TD 1097 currentBlock := bc.CurrentBlock() 1098 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1099 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1100 if localTd.Cmp(externTd) > 0 { 1101 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1102 return i, events, coalescedLogs, err 1103 } 1104 continue 1105 } 1106 // Competitor chain beat canonical, gather all blocks from the common ancestor 1107 var winner []*types.Block 1108 1109 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1110 for !bc.HasState(parent.Root()) { 1111 winner = append(winner, parent) 1112 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1113 } 1114 for j := 0; j < len(winner)/2; j++ { 1115 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1116 } 1117 // Import all the pruned blocks to make the state available 1118 bc.chainmu.Unlock() 1119 _, evs, logs, err := bc.insertChain(winner) 1120 bc.chainmu.Lock() 1121 events, coalescedLogs = evs, logs 1122 1123 if err != nil { 1124 return i, events, coalescedLogs, err 1125 } 1126 1127 case err != nil: 1128 bc.reportBlock(block, nil, err) 1129 return i, events, coalescedLogs, err 1130 } 1131 // Create a new statedb using the parent block and report an 1132 // error if it fails. 1133 var parent *types.Block 1134 if i == 0 { 1135 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1136 } else { 1137 parent = chain[i-1] 1138 } 1139 state, err := state.New(parent.Root(), bc.stateCache) 1140 if err != nil { 1141 return i, events, coalescedLogs, err 1142 } 1143 // Process block using the parent state as reference point. 1144 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1145 if err != nil { 1146 bc.reportBlock(block, receipts, err) 1147 return i, events, coalescedLogs, err 1148 } 1149 // Validate the state using the default validator 1150 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1151 if err != nil { 1152 bc.reportBlock(block, receipts, err) 1153 return i, events, coalescedLogs, err 1154 } 1155 proctime := time.Since(bstart) 1156 1157 // Write the block to the chain and get the status. 1158 status, err := bc.WriteBlockWithState(block, receipts, state) 1159 if err != nil { 1160 return i, events, coalescedLogs, err 1161 } 1162 switch status { 1163 case CanonStatTy: 1164 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1165 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1166 1167 coalescedLogs = append(coalescedLogs, logs...) 1168 blockInsertTimer.UpdateSince(bstart) 1169 events = append(events, ChainEvent{block, block.Hash(), logs}) 1170 lastCanon = block 1171 1172 // Only count canonical blocks for GC processing time 1173 bc.gcproc += proctime 1174 1175 case SideStatTy: 1176 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1177 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1178 1179 blockInsertTimer.UpdateSince(bstart) 1180 events = append(events, ChainSideEvent{block}) 1181 } 1182 stats.processed++ 1183 stats.usedGas += usedGas 1184 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1185 } 1186 // Append a single chain head event if we've progressed the chain 1187 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1188 events = append(events, ChainHeadEvent{lastCanon}) 1189 } 1190 return 0, events, coalescedLogs, nil 1191 } 1192 1193 // insertStats tracks and reports on block insertion. 1194 type insertStats struct { 1195 queued, processed, ignored int 1196 usedGas uint64 1197 lastIndex int 1198 startTime mclock.AbsTime 1199 } 1200 1201 // statsReportLimit is the time limit during import after which we always print 1202 // out progress. This avoids the user wondering what's going on. 1203 const statsReportLimit = 8 * time.Second 1204 1205 // report prints statistics if some number of blocks have been processed 1206 // or more than a few seconds have passed since the last message. 1207 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1208 // Fetch the timings for the batch 1209 var ( 1210 now = mclock.Now() 1211 elapsed = time.Duration(now) - time.Duration(st.startTime) 1212 ) 1213 // If we're at the last block of the batch or report period reached, log 1214 if index == len(chain)-1 || elapsed >= statsReportLimit { 1215 var ( 1216 end = chain[index] 1217 txs = countTransactions(chain[st.lastIndex : index+1]) 1218 ) 1219 context := []interface{}{ 1220 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1221 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1222 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1223 } 1224 if st.queued > 0 { 1225 context = append(context, []interface{}{"queued", st.queued}...) 1226 } 1227 if st.ignored > 0 { 1228 context = append(context, []interface{}{"ignored", st.ignored}...) 1229 } 1230 log.Info("Imported new chain segment", context...) 1231 1232 *st = insertStats{startTime: now, lastIndex: index + 1} 1233 } 1234 } 1235 1236 func countTransactions(chain []*types.Block) (c int) { 1237 for _, b := range chain { 1238 c += len(b.Transactions()) 1239 } 1240 return c 1241 } 1242 1243 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1244 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1245 // event about them 1246 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1247 var ( 1248 newChain types.Blocks 1249 oldChain types.Blocks 1250 commonBlock *types.Block 1251 deletedTxs types.Transactions 1252 deletedLogs []*types.Log 1253 // collectLogs collects the logs that were generated during the 1254 // processing of the block that corresponds with the given hash. 1255 // These logs are later announced as deleted. 1256 collectLogs = func(hash common.Hash) { 1257 // Coalesce logs and set 'Removed'. 1258 number := bc.hc.GetBlockNumber(hash) 1259 if number == nil { 1260 return 1261 } 1262 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1263 for _, receipt := range receipts { 1264 for _, log := range receipt.Logs { 1265 del := *log 1266 del.Removed = true 1267 deletedLogs = append(deletedLogs, &del) 1268 } 1269 } 1270 } 1271 ) 1272 1273 // first reduce whoever is higher bound 1274 if oldBlock.NumberU64() > newBlock.NumberU64() { 1275 // reduce old chain 1276 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1277 oldChain = append(oldChain, oldBlock) 1278 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1279 1280 collectLogs(oldBlock.Hash()) 1281 } 1282 } else { 1283 // reduce new chain and append new chain blocks for inserting later on 1284 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1285 newChain = append(newChain, newBlock) 1286 } 1287 } 1288 if oldBlock == nil { 1289 return fmt.Errorf("Invalid old chain") 1290 } 1291 if newBlock == nil { 1292 return fmt.Errorf("Invalid new chain") 1293 } 1294 1295 for { 1296 if oldBlock.Hash() == newBlock.Hash() { 1297 commonBlock = oldBlock 1298 break 1299 } 1300 1301 oldChain = append(oldChain, oldBlock) 1302 newChain = append(newChain, newBlock) 1303 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1304 collectLogs(oldBlock.Hash()) 1305 1306 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1307 if oldBlock == nil { 1308 return fmt.Errorf("Invalid old chain") 1309 } 1310 if newBlock == nil { 1311 return fmt.Errorf("Invalid new chain") 1312 } 1313 } 1314 // Ensure the user sees large reorgs 1315 if len(oldChain) > 0 && len(newChain) > 0 { 1316 logFn := log.Debug 1317 if len(oldChain) > 63 { 1318 logFn = log.Warn 1319 } 1320 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1321 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1322 } else { 1323 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1324 } 1325 // Insert the new chain, taking care of the proper incremental order 1326 var addedTxs types.Transactions 1327 for i := len(newChain) - 1; i >= 0; i-- { 1328 // insert the block in the canonical way, re-writing history 1329 bc.insert(newChain[i]) 1330 // write lookup entries for hash based transaction/receipt searches 1331 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1332 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1333 } 1334 // calculate the difference between deleted and added transactions 1335 diff := types.TxDifference(deletedTxs, addedTxs) 1336 // When transactions get deleted from the database that means the 1337 // receipts that were created in the fork must also be deleted 1338 for _, tx := range diff { 1339 rawdb.DeleteTxLookupEntry(bc.db, tx.Hash()) 1340 } 1341 if len(deletedLogs) > 0 { 1342 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1343 } 1344 if len(oldChain) > 0 { 1345 go func() { 1346 for _, block := range oldChain { 1347 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1348 } 1349 }() 1350 } 1351 1352 return nil 1353 } 1354 1355 // PostChainEvents iterates over the events generated by a chain insertion and 1356 // posts them into the event feed. 1357 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1358 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1359 // post event logs for further processing 1360 if logs != nil { 1361 bc.logsFeed.Send(logs) 1362 } 1363 for _, event := range events { 1364 switch ev := event.(type) { 1365 case ChainEvent: 1366 bc.chainFeed.Send(ev) 1367 1368 case ChainHeadEvent: 1369 bc.chainHeadFeed.Send(ev) 1370 1371 case ChainSideEvent: 1372 bc.chainSideFeed.Send(ev) 1373 } 1374 } 1375 } 1376 1377 func (bc *BlockChain) update() { 1378 futureTimer := time.NewTicker(5 * time.Second) 1379 defer futureTimer.Stop() 1380 for { 1381 select { 1382 case <-futureTimer.C: 1383 bc.procFutureBlocks() 1384 case <-bc.quit: 1385 return 1386 } 1387 } 1388 } 1389 1390 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1391 type BadBlockArgs struct { 1392 Hash common.Hash `json:"hash"` 1393 Header *types.Header `json:"header"` 1394 } 1395 1396 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1397 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1398 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1399 for _, hash := range bc.badBlocks.Keys() { 1400 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1401 header := hdr.(*types.Header) 1402 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1403 } 1404 } 1405 return headers, nil 1406 } 1407 1408 // addBadBlock adds a bad block to the bad-block LRU cache 1409 func (bc *BlockChain) addBadBlock(block *types.Block) { 1410 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1411 } 1412 1413 // reportBlock logs a bad block error. 1414 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1415 bc.addBadBlock(block) 1416 1417 var receiptString string 1418 for _, receipt := range receipts { 1419 receiptString += fmt.Sprintf("\t%v\n", receipt) 1420 } 1421 log.Error(fmt.Sprintf(` 1422 ########## BAD BLOCK ######### 1423 Chain config: %v 1424 1425 Number: %v 1426 Hash: 0x%x 1427 %v 1428 1429 Error: %v 1430 ############################## 1431 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1432 } 1433 1434 // InsertHeaderChain attempts to insert the given header chain in to the local 1435 // chain, possibly creating a reorg. If an error is returned, it will return the 1436 // index number of the failing header as well an error describing what went wrong. 1437 // 1438 // The verify parameter can be used to fine tune whether nonce verification 1439 // should be done or not. The reason behind the optional check is because some 1440 // of the header retrieval mechanisms already need to verify nonces, as well as 1441 // because nonces can be verified sparsely, not needing to check each. 1442 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1443 start := time.Now() 1444 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1445 return i, err 1446 } 1447 1448 // Make sure only one thread manipulates the chain at once 1449 bc.chainmu.Lock() 1450 defer bc.chainmu.Unlock() 1451 1452 bc.wg.Add(1) 1453 defer bc.wg.Done() 1454 1455 whFunc := func(header *types.Header) error { 1456 bc.mu.Lock() 1457 defer bc.mu.Unlock() 1458 1459 _, err := bc.hc.WriteHeader(header) 1460 return err 1461 } 1462 1463 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1464 } 1465 1466 // writeHeader writes a header into the local chain, given that its parent is 1467 // already known. If the total difficulty of the newly inserted header becomes 1468 // greater than the current known TD, the canonical chain is re-routed. 1469 // 1470 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1471 // into the chain, as side effects caused by reorganisations cannot be emulated 1472 // without the real blocks. Hence, writing headers directly should only be done 1473 // in two scenarios: pure-header mode of operation (light clients), or properly 1474 // separated header/block phases (non-archive clients). 1475 func (bc *BlockChain) writeHeader(header *types.Header) error { 1476 bc.wg.Add(1) 1477 defer bc.wg.Done() 1478 1479 bc.mu.Lock() 1480 defer bc.mu.Unlock() 1481 1482 _, err := bc.hc.WriteHeader(header) 1483 return err 1484 } 1485 1486 // CurrentHeader retrieves the current head header of the canonical chain. The 1487 // header is retrieved from the HeaderChain's internal cache. 1488 func (bc *BlockChain) CurrentHeader() *types.Header { 1489 return bc.hc.CurrentHeader() 1490 } 1491 1492 // GetTd retrieves a block's total difficulty in the canonical chain from the 1493 // database by hash and number, caching it if found. 1494 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1495 return bc.hc.GetTd(hash, number) 1496 } 1497 1498 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1499 // database by hash, caching it if found. 1500 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1501 return bc.hc.GetTdByHash(hash) 1502 } 1503 1504 // GetHeader retrieves a block header from the database by hash and number, 1505 // caching it if found. 1506 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1507 return bc.hc.GetHeader(hash, number) 1508 } 1509 1510 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1511 // found. 1512 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1513 return bc.hc.GetHeaderByHash(hash) 1514 } 1515 1516 // HasHeader checks if a block header is present in the database or not, caching 1517 // it if present. 1518 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1519 return bc.hc.HasHeader(hash, number) 1520 } 1521 1522 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1523 // hash, fetching towards the genesis block. 1524 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1525 return bc.hc.GetBlockHashesFromHash(hash, max) 1526 } 1527 1528 // GetHeaderByNumber retrieves a block header from the database by number, 1529 // caching it (associated with its hash) if found. 1530 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1531 return bc.hc.GetHeaderByNumber(number) 1532 } 1533 1534 // Config retrieves the blockchain's chain configuration. 1535 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1536 1537 // Engine retrieves the blockchain's consensus engine. 1538 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1539 1540 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1541 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1542 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1543 } 1544 1545 // SubscribeChainEvent registers a subscription of ChainEvent. 1546 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1547 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1548 } 1549 1550 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1551 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1552 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1553 } 1554 1555 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1556 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1557 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1558 } 1559 1560 // SubscribeLogsEvent registers a subscription of []*types.Log. 1561 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1562 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1563 }