github.com/vantum/vantum@v0.0.0-20180815184342-fe37d5f7a990/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/vantum/vantum/common" 31 "github.com/vantum/vantum/common/mclock" 32 "github.com/vantum/vantum/consensus" 33 "github.com/vantum/vantum/core/state" 34 "github.com/vantum/vantum/core/types" 35 "github.com/vantum/vantum/core/vm" 36 "github.com/vantum/vantum/crypto" 37 "github.com/vantum/vantum/ethdb" 38 "github.com/vantum/vantum/event" 39 "github.com/vantum/vantum/log" 40 "github.com/vantum/vantum/metrics" 41 "github.com/vantum/vantum/params" 42 "github.com/vantum/vantum/rlp" 43 "github.com/vantum/vantum/trie" 44 "github.com/hashicorp/golang-lru" 45 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewTimer("chain/inserts") 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 3 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db ethdb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 checkpoint int // checkpoint counts towards the new checkpoint 110 currentBlock *types.Block // Current head of the block chain 111 currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!) 112 113 stateCache state.Database // State database to reuse between imports (contains state cache) 114 bodyCache *lru.Cache // Cache for the most recent block bodies 115 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 116 blockCache *lru.Cache // Cache for the most recent entire blocks 117 futureBlocks *lru.Cache // future blocks are blocks added for later processing 118 119 quit chan struct{} // blockchain quit channel 120 running int32 // running must be called atomically 121 // procInterrupt must be atomically called 122 procInterrupt int32 // interrupt signaler for block processing 123 wg sync.WaitGroup // chain processing wait group for shutting down 124 125 engine consensus.Engine 126 processor Processor // block processor interface 127 validator Validator // block and state validator interface 128 vmConfig vm.Config 129 130 badBlocks *lru.Cache // Bad block cache 131 } 132 133 // NewBlockChain returns a fully initialised block chain using information 134 // available in the database. It initialises the default Ethereum Validator and 135 // Processor. 136 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 137 if cacheConfig == nil { 138 cacheConfig = &CacheConfig{ 139 TrieNodeLimit: 256 * 1024 * 1024, 140 TrieTimeLimit: 5 * time.Minute, 141 } 142 } 143 bodyCache, _ := lru.New(bodyCacheLimit) 144 bodyRLPCache, _ := lru.New(bodyCacheLimit) 145 blockCache, _ := lru.New(blockCacheLimit) 146 futureBlocks, _ := lru.New(maxFutureBlocks) 147 badBlocks, _ := lru.New(badBlockLimit) 148 149 bc := &BlockChain{ 150 chainConfig: chainConfig, 151 cacheConfig: cacheConfig, 152 db: db, 153 triegc: prque.New(), 154 stateCache: state.NewDatabase(db), 155 quit: make(chan struct{}), 156 bodyCache: bodyCache, 157 bodyRLPCache: bodyRLPCache, 158 blockCache: blockCache, 159 futureBlocks: futureBlocks, 160 engine: engine, 161 vmConfig: vmConfig, 162 badBlocks: badBlocks, 163 } 164 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 165 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 166 167 var err error 168 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 169 if err != nil { 170 return nil, err 171 } 172 bc.genesisBlock = bc.GetBlockByNumber(0) 173 if bc.genesisBlock == nil { 174 return nil, ErrNoGenesis 175 } 176 if err := bc.loadLastState(); err != nil { 177 return nil, err 178 } 179 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 180 for hash := range BadHashes { 181 if header := bc.GetHeaderByHash(hash); header != nil { 182 // get the canonical block corresponding to the offending header's number 183 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 184 // make sure the headerByNumber (if present) is in our current canonical chain 185 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 186 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 187 bc.SetHead(header.Number.Uint64() - 1) 188 log.Error("Chain rewind was successful, resuming normal operation") 189 } 190 } 191 } 192 // Take ownership of this particular state 193 go bc.update() 194 return bc, nil 195 } 196 197 func (bc *BlockChain) getProcInterrupt() bool { 198 return atomic.LoadInt32(&bc.procInterrupt) == 1 199 } 200 201 // loadLastState loads the last known chain state from the database. This method 202 // assumes that the chain manager mutex is held. 203 func (bc *BlockChain) loadLastState() error { 204 // Restore the last known head block 205 head := GetHeadBlockHash(bc.db) 206 if head == (common.Hash{}) { 207 // Corrupt or empty database, init from scratch 208 log.Warn("Empty database, resetting chain") 209 return bc.Reset() 210 } 211 // Make sure the entire head block is available 212 currentBlock := bc.GetBlockByHash(head) 213 if currentBlock == nil { 214 // Corrupt or empty database, init from scratch 215 log.Warn("Head block missing, resetting chain", "hash", head) 216 return bc.Reset() 217 } 218 // Make sure the state associated with the block is available 219 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 220 // Dangling block without a state associated, init from scratch 221 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 222 if err := bc.repair(¤tBlock); err != nil { 223 return err 224 } 225 } 226 // Everything seems to be fine, set as the head block 227 bc.currentBlock = currentBlock 228 229 // Restore the last known head header 230 currentHeader := bc.currentBlock.Header() 231 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 232 if header := bc.GetHeaderByHash(head); header != nil { 233 currentHeader = header 234 } 235 } 236 bc.hc.SetCurrentHeader(currentHeader) 237 238 // Restore the last known head fast block 239 bc.currentFastBlock = bc.currentBlock 240 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 241 if block := bc.GetBlockByHash(head); block != nil { 242 bc.currentFastBlock = block 243 } 244 } 245 246 // Issue a status log for the user 247 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 248 blockTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 249 fastTd := bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()) 250 251 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 252 log.Info("Loaded most recent local full block", "number", bc.currentBlock.Number(), "hash", bc.currentBlock.Hash(), "td", blockTd) 253 log.Info("Loaded most recent local fast block", "number", bc.currentFastBlock.Number(), "hash", bc.currentFastBlock.Hash(), "td", fastTd) 254 255 return nil 256 } 257 258 // SetHead rewinds the local chain to a new head. In the case of headers, everything 259 // above the new head will be deleted and the new one set. In the case of blocks 260 // though, the head may be further rewound if block bodies are missing (non-archive 261 // nodes after a fast sync). 262 func (bc *BlockChain) SetHead(head uint64) error { 263 log.Warn("Rewinding blockchain", "target", head) 264 265 bc.mu.Lock() 266 defer bc.mu.Unlock() 267 268 // Rewind the header chain, deleting all block bodies until then 269 delFn := func(hash common.Hash, num uint64) { 270 DeleteBody(bc.db, hash, num) 271 } 272 bc.hc.SetHead(head, delFn) 273 currentHeader := bc.hc.CurrentHeader() 274 275 // Clear out any stale content from the caches 276 bc.bodyCache.Purge() 277 bc.bodyRLPCache.Purge() 278 bc.blockCache.Purge() 279 bc.futureBlocks.Purge() 280 281 // Rewind the block chain, ensuring we don't end up with a stateless head block 282 if bc.currentBlock != nil && currentHeader.Number.Uint64() < bc.currentBlock.NumberU64() { 283 bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()) 284 } 285 if bc.currentBlock != nil { 286 if _, err := state.New(bc.currentBlock.Root(), bc.stateCache); err != nil { 287 // Rewound state missing, rolled back to before pivot, reset to genesis 288 bc.currentBlock = nil 289 } 290 } 291 // Rewind the fast block in a simpleton way to the target head 292 if bc.currentFastBlock != nil && currentHeader.Number.Uint64() < bc.currentFastBlock.NumberU64() { 293 bc.currentFastBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()) 294 } 295 // If either blocks reached nil, reset to the genesis state 296 if bc.currentBlock == nil { 297 bc.currentBlock = bc.genesisBlock 298 } 299 if bc.currentFastBlock == nil { 300 bc.currentFastBlock = bc.genesisBlock 301 } 302 if err := WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()); err != nil { 303 log.Crit("Failed to reset head full block", "err", err) 304 } 305 if err := WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()); err != nil { 306 log.Crit("Failed to reset head fast block", "err", err) 307 } 308 return bc.loadLastState() 309 } 310 311 // FastSyncCommitHead sets the current head block to the one defined by the hash 312 // irrelevant what the chain contents were prior. 313 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 314 // Make sure that both the block as well at its state trie exists 315 block := bc.GetBlockByHash(hash) 316 if block == nil { 317 return fmt.Errorf("non existent block [%x…]", hash[:4]) 318 } 319 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 320 return err 321 } 322 // If all checks out, manually set the head block 323 bc.mu.Lock() 324 bc.currentBlock = block 325 bc.mu.Unlock() 326 327 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 328 return nil 329 } 330 331 // GasLimit returns the gas limit of the current HEAD block. 332 func (bc *BlockChain) GasLimit() uint64 { 333 bc.mu.RLock() 334 defer bc.mu.RUnlock() 335 336 return bc.currentBlock.GasLimit() 337 } 338 339 // CurrentBlock retrieves the current head block of the canonical chain. The 340 // block is retrieved from the blockchain's internal cache. 341 func (bc *BlockChain) CurrentBlock() *types.Block { 342 bc.mu.RLock() 343 defer bc.mu.RUnlock() 344 345 return bc.currentBlock 346 } 347 348 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 349 // chain. The block is retrieved from the blockchain's internal cache. 350 func (bc *BlockChain) CurrentFastBlock() *types.Block { 351 bc.mu.RLock() 352 defer bc.mu.RUnlock() 353 354 return bc.currentFastBlock 355 } 356 357 // SetProcessor sets the processor required for making state modifications. 358 func (bc *BlockChain) SetProcessor(processor Processor) { 359 bc.procmu.Lock() 360 defer bc.procmu.Unlock() 361 bc.processor = processor 362 } 363 364 // SetValidator sets the validator which is used to validate incoming blocks. 365 func (bc *BlockChain) SetValidator(validator Validator) { 366 bc.procmu.Lock() 367 defer bc.procmu.Unlock() 368 bc.validator = validator 369 } 370 371 // Validator returns the current validator. 372 func (bc *BlockChain) Validator() Validator { 373 bc.procmu.RLock() 374 defer bc.procmu.RUnlock() 375 return bc.validator 376 } 377 378 // Processor returns the current processor. 379 func (bc *BlockChain) Processor() Processor { 380 bc.procmu.RLock() 381 defer bc.procmu.RUnlock() 382 return bc.processor 383 } 384 385 // State returns a new mutable state based on the current HEAD block. 386 func (bc *BlockChain) State() (*state.StateDB, error) { 387 return bc.StateAt(bc.CurrentBlock().Root()) 388 } 389 390 // StateAt returns a new mutable state based on a particular point in time. 391 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 392 return state.New(root, bc.stateCache) 393 } 394 395 // Reset purges the entire blockchain, restoring it to its genesis state. 396 func (bc *BlockChain) Reset() error { 397 return bc.ResetWithGenesisBlock(bc.genesisBlock) 398 } 399 400 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 401 // specified genesis state. 402 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 403 // Dump the entire block chain and purge the caches 404 if err := bc.SetHead(0); err != nil { 405 return err 406 } 407 bc.mu.Lock() 408 defer bc.mu.Unlock() 409 410 // Prepare the genesis block and reinitialise the chain 411 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 412 log.Crit("Failed to write genesis block TD", "err", err) 413 } 414 if err := WriteBlock(bc.db, genesis); err != nil { 415 log.Crit("Failed to write genesis block", "err", err) 416 } 417 bc.genesisBlock = genesis 418 bc.insert(bc.genesisBlock) 419 bc.currentBlock = bc.genesisBlock 420 bc.hc.SetGenesis(bc.genesisBlock.Header()) 421 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 422 bc.currentFastBlock = bc.genesisBlock 423 424 return nil 425 } 426 427 // repair tries to repair the current blockchain by rolling back the current block 428 // until one with associated state is found. This is needed to fix incomplete db 429 // writes caused either by crashes/power outages, or simply non-committed tries. 430 // 431 // This method only rolls back the current block. The current header and current 432 // fast block are left intact. 433 func (bc *BlockChain) repair(head **types.Block) error { 434 for { 435 // Abort if we've rewound to a head block that does have associated state 436 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 437 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 438 return nil 439 } 440 // Otherwise rewind one block and recheck state availability there 441 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 442 } 443 } 444 445 // Export writes the active chain to the given writer. 446 func (bc *BlockChain) Export(w io.Writer) error { 447 return bc.ExportN(w, uint64(0), bc.currentBlock.NumberU64()) 448 } 449 450 // ExportN writes a subset of the active chain to the given writer. 451 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 452 bc.mu.RLock() 453 defer bc.mu.RUnlock() 454 455 if first > last { 456 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 457 } 458 log.Info("Exporting batch of blocks", "count", last-first+1) 459 460 for nr := first; nr <= last; nr++ { 461 block := bc.GetBlockByNumber(nr) 462 if block == nil { 463 return fmt.Errorf("export failed on #%d: not found", nr) 464 } 465 466 if err := block.EncodeRLP(w); err != nil { 467 return err 468 } 469 } 470 471 return nil 472 } 473 474 // insert injects a new head block into the current block chain. This method 475 // assumes that the block is indeed a true head. It will also reset the head 476 // header and the head fast sync block to this very same block if they are older 477 // or if they are on a different side chain. 478 // 479 // Note, this function assumes that the `mu` mutex is held! 480 func (bc *BlockChain) insert(block *types.Block) { 481 // If the block is on a side chain or an unknown one, force other heads onto it too 482 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 483 484 // Add the block to the canonical chain number scheme and mark as the head 485 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 486 log.Crit("Failed to insert block number", "err", err) 487 } 488 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 489 log.Crit("Failed to insert head block hash", "err", err) 490 } 491 bc.currentBlock = block 492 493 // If the block is better than our head or is on a different chain, force update heads 494 if updateHeads { 495 bc.hc.SetCurrentHeader(block.Header()) 496 497 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 498 log.Crit("Failed to insert head fast block hash", "err", err) 499 } 500 bc.currentFastBlock = block 501 } 502 } 503 504 // Genesis retrieves the chain's genesis block. 505 func (bc *BlockChain) Genesis() *types.Block { 506 return bc.genesisBlock 507 } 508 509 // GetBody retrieves a block body (transactions and uncles) from the database by 510 // hash, caching it if found. 511 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 512 // Short circuit if the body's already in the cache, retrieve otherwise 513 if cached, ok := bc.bodyCache.Get(hash); ok { 514 body := cached.(*types.Body) 515 return body 516 } 517 body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) 518 if body == nil { 519 return nil 520 } 521 // Cache the found body for next time and return 522 bc.bodyCache.Add(hash, body) 523 return body 524 } 525 526 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 527 // caching it if found. 528 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 529 // Short circuit if the body's already in the cache, retrieve otherwise 530 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 531 return cached.(rlp.RawValue) 532 } 533 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 534 if len(body) == 0 { 535 return nil 536 } 537 // Cache the found body for next time and return 538 bc.bodyRLPCache.Add(hash, body) 539 return body 540 } 541 542 // HasBlock checks if a block is fully present in the database or not. 543 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 544 if bc.blockCache.Contains(hash) { 545 return true 546 } 547 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 548 return ok 549 } 550 551 // HasState checks if state trie is fully present in the database or not. 552 func (bc *BlockChain) HasState(hash common.Hash) bool { 553 _, err := bc.stateCache.OpenTrie(hash) 554 return err == nil 555 } 556 557 // HasBlockAndState checks if a block and associated state trie is fully present 558 // in the database or not, caching it if present. 559 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 560 // Check first that the block itself is known 561 block := bc.GetBlock(hash, number) 562 if block == nil { 563 return false 564 } 565 return bc.HasState(block.Root()) 566 } 567 568 // GetBlock retrieves a block from the database by hash and number, 569 // caching it if found. 570 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 571 // Short circuit if the block's already in the cache, retrieve otherwise 572 if block, ok := bc.blockCache.Get(hash); ok { 573 return block.(*types.Block) 574 } 575 block := GetBlock(bc.db, hash, number) 576 if block == nil { 577 return nil 578 } 579 // Cache the found block for next time and return 580 bc.blockCache.Add(block.Hash(), block) 581 return block 582 } 583 584 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 585 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 586 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 587 } 588 589 // GetBlockByNumber retrieves a block from the database by number, caching it 590 // (associated with its hash) if found. 591 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 592 hash := GetCanonicalHash(bc.db, number) 593 if hash == (common.Hash{}) { 594 return nil 595 } 596 return bc.GetBlock(hash, number) 597 } 598 599 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 600 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 601 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 602 } 603 604 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 605 // [deprecated by eth/62] 606 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 607 number := bc.hc.GetBlockNumber(hash) 608 for i := 0; i < n; i++ { 609 block := bc.GetBlock(hash, number) 610 if block == nil { 611 break 612 } 613 blocks = append(blocks, block) 614 hash = block.ParentHash() 615 number-- 616 } 617 return 618 } 619 620 // GetUnclesInChain retrieves all the uncles from a given block backwards until 621 // a specific distance is reached. 622 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 623 uncles := []*types.Header{} 624 for i := 0; block != nil && i < length; i++ { 625 uncles = append(uncles, block.Uncles()...) 626 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 627 } 628 return uncles 629 } 630 631 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 632 // either from ephemeral in-memory cache, or from persistent storage. 633 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 634 return bc.stateCache.TrieDB().Node(hash) 635 } 636 637 func (bc *BlockChain) Stop() { 638 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 639 return 640 } 641 // Unsubscribe all subscriptions registered from blockchain 642 bc.scope.Close() 643 close(bc.quit) 644 atomic.StoreInt32(&bc.procInterrupt, 1) 645 646 bc.wg.Wait() 647 648 // Ensure the state of a recent block is also stored to disk before exiting. 649 // We're writing three different states to catch different restart scenarios: 650 // - HEAD: So we don't need to reprocess any blocks in the general case 651 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 652 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 653 654 655 656 657 if !bc.cacheConfig.Disabled { 658 triedb := bc.stateCache.TrieDB() 659 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 660 if number := bc.CurrentBlock().NumberU64(); number > offset { 661 recent := bc.GetBlockByNumber(number - offset) 662 663 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 664 if err := triedb.Commit(recent.Root(), true); err != nil { 665 log.Error("Failed to commit recent state trie", "err", err) 666 } 667 } 668 } 669 for !bc.triegc.Empty() { 670 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 671 } 672 if size := triedb.Size(); size != 0 { 673 log.Error("Dangling trie nodes after full cleanup") 674 } 675 } 676 log.Info("Blockchain manager stopped") 677 } 678 679 // Stop stops the blockchain service. If any imports are currently in progress 680 // it will abort them using the procInterrupt. 681 func (bc *BlockChain) StopO() { 682 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 683 return 684 } 685 // Unsubscribe all subscriptions registered from blockchain 686 bc.scope.Close() 687 close(bc.quit) 688 atomic.StoreInt32(&bc.procInterrupt, 1) 689 690 bc.wg.Wait() 691 692 // Ensure the state of a recent block is also stored to disk before exiting. 693 // It is fine if this state does not exist (fast start/stop cycle), but it is 694 // advisable to leave an N block gap from the head so 1) a restart loads up 695 // the last N blocks as sync assistance to remote nodes; 2) a restart during 696 // a (small) reorg doesn't require deep reprocesses; 3) chain "repair" from 697 // missing states are constantly tested. 698 // 699 // This may be tuned a bit on mainnet if its too annoying to reprocess the last 700 // N blocks. 701 if !bc.cacheConfig.Disabled { 702 triedb := bc.stateCache.TrieDB() 703 if number := bc.CurrentBlock().NumberU64(); number >= triesInMemory { 704 recent := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - triesInMemory + 1) 705 706 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 707 if err := triedb.Commit(recent.Root(), true); err != nil { 708 log.Error("Failed to commit recent state trie", "err", err) 709 } 710 } 711 for !bc.triegc.Empty() { 712 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 713 } 714 if size := triedb.Size(); size != 0 { 715 log.Error("Dangling trie nodes after full cleanup") 716 } 717 } 718 log.Info("Blockchain manager stopped") 719 } 720 721 func (bc *BlockChain) procFutureBlocks() { 722 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 723 for _, hash := range bc.futureBlocks.Keys() { 724 if block, exist := bc.futureBlocks.Peek(hash); exist { 725 blocks = append(blocks, block.(*types.Block)) 726 } 727 } 728 if len(blocks) > 0 { 729 types.BlockBy(types.Number).Sort(blocks) 730 731 // Insert one by one as chain insertion needs contiguous ancestry between blocks 732 for i := range blocks { 733 bc.InsertChain(blocks[i : i+1]) 734 } 735 } 736 } 737 738 // WriteStatus status of write 739 type WriteStatus byte 740 741 const ( 742 NonStatTy WriteStatus = iota 743 CanonStatTy 744 SideStatTy 745 ) 746 747 // Rollback is designed to remove a chain of links from the database that aren't 748 // certain enough to be valid. 749 func (bc *BlockChain) Rollback(chain []common.Hash) { 750 bc.mu.Lock() 751 defer bc.mu.Unlock() 752 753 for i := len(chain) - 1; i >= 0; i-- { 754 hash := chain[i] 755 756 currentHeader := bc.hc.CurrentHeader() 757 if currentHeader.Hash() == hash { 758 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 759 } 760 if bc.currentFastBlock.Hash() == hash { 761 bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash(), bc.currentFastBlock.NumberU64()-1) 762 WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()) 763 } 764 if bc.currentBlock.Hash() == hash { 765 bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash(), bc.currentBlock.NumberU64()-1) 766 WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()) 767 } 768 } 769 } 770 771 // SetReceiptsData computes all the non-consensus fields of the receipts 772 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) { 773 signer := types.MakeSigner(config, block.Number()) 774 775 transactions, logIndex := block.Transactions(), uint(0) 776 777 for j := 0; j < len(receipts); j++ { 778 // The transaction hash can be retrieved from the transaction itself 779 receipts[j].TxHash = transactions[j].Hash() 780 781 // The contract address can be derived from the transaction itself 782 if transactions[j].To() == nil { 783 // Deriving the signer is expensive, only do if it's actually needed 784 from, _ := types.Sender(signer, transactions[j]) 785 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 786 } 787 // The used gas can be calculated based on previous receipts 788 if j == 0 { 789 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 790 } else { 791 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 792 } 793 // The derived log fields can simply be set from the block and transaction 794 for k := 0; k < len(receipts[j].Logs); k++ { 795 receipts[j].Logs[k].BlockNumber = block.NumberU64() 796 receipts[j].Logs[k].BlockHash = block.Hash() 797 receipts[j].Logs[k].TxHash = receipts[j].TxHash 798 receipts[j].Logs[k].TxIndex = uint(j) 799 receipts[j].Logs[k].Index = logIndex 800 logIndex++ 801 } 802 } 803 } 804 805 // InsertReceiptChain attempts to complete an already existing header chain with 806 // transaction and receipt data. 807 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 808 bc.wg.Add(1) 809 defer bc.wg.Done() 810 811 // Do a sanity check that the provided chain is actually ordered and linked 812 for i := 1; i < len(blockChain); i++ { 813 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 814 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 815 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 816 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 817 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 818 } 819 } 820 821 var ( 822 stats = struct{ processed, ignored int32 }{} 823 start = time.Now() 824 bytes = 0 825 batch = bc.db.NewBatch() 826 ) 827 for i, block := range blockChain { 828 receipts := receiptChain[i] 829 // Short circuit insertion if shutting down or processing failed 830 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 831 return 0, nil 832 } 833 // Short circuit if the owner header is unknown 834 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 835 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 836 } 837 // Skip if the entire data is already known 838 if bc.HasBlock(block.Hash(), block.NumberU64()) { 839 stats.ignored++ 840 continue 841 } 842 // Compute all the non-consensus fields of the receipts 843 SetReceiptsData(bc.chainConfig, block, receipts) 844 // Write all the data out into the database 845 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 846 return i, fmt.Errorf("failed to write block body: %v", err) 847 } 848 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 849 return i, fmt.Errorf("failed to write block receipts: %v", err) 850 } 851 if err := WriteTxLookupEntries(batch, block); err != nil { 852 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 853 } 854 stats.processed++ 855 856 if batch.ValueSize() >= ethdb.IdealBatchSize { 857 if err := batch.Write(); err != nil { 858 return 0, err 859 } 860 bytes += batch.ValueSize() 861 batch.Reset() 862 } 863 } 864 if batch.ValueSize() > 0 { 865 bytes += batch.ValueSize() 866 if err := batch.Write(); err != nil { 867 return 0, err 868 } 869 } 870 871 // Update the head fast sync block if better 872 bc.mu.Lock() 873 head := blockChain[len(blockChain)-1] 874 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 875 if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 { 876 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 877 log.Crit("Failed to update head fast block hash", "err", err) 878 } 879 bc.currentFastBlock = head 880 } 881 } 882 bc.mu.Unlock() 883 884 log.Info("Imported new block receipts", 885 "count", stats.processed, 886 "elapsed", common.PrettyDuration(time.Since(start)), 887 "number", head.Number(), 888 "hash", head.Hash(), 889 "size", common.StorageSize(bytes), 890 "ignored", stats.ignored) 891 return 0, nil 892 } 893 894 var lastWrite uint64 895 896 // WriteBlockWithoutState writes only the block and its metadata to the database, 897 // but does not write any state. This is used to construct competing side forks 898 // up to the point where they exceed the canonical total difficulty. 899 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 900 bc.wg.Add(1) 901 defer bc.wg.Done() 902 903 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 904 return err 905 } 906 if err := WriteBlock(bc.db, block); err != nil { 907 return err 908 } 909 return nil 910 } 911 912 // WriteBlockWithState writes the block and all associated state to the database. 913 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 914 bc.wg.Add(1) 915 defer bc.wg.Done() 916 917 // Calculate the total difficulty of the block 918 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 919 if ptd == nil { 920 return NonStatTy, consensus.ErrUnknownAncestor 921 } 922 // Make sure no inconsistent state is leaked during insertion 923 bc.mu.Lock() 924 defer bc.mu.Unlock() 925 926 localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 927 externTd := new(big.Int).Add(block.Difficulty(), ptd) 928 929 // Irrelevant of the canonical status, write the block itself to the database 930 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 931 return NonStatTy, err 932 } 933 // Write other block data using a batch. 934 batch := bc.db.NewBatch() 935 if err := WriteBlock(batch, block); err != nil { 936 return NonStatTy, err 937 } 938 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 939 if err != nil { 940 return NonStatTy, err 941 } 942 triedb := bc.stateCache.TrieDB() 943 944 // If we're running an archive node, always flush 945 if bc.cacheConfig.Disabled { 946 if err := triedb.Commit(root, false); err != nil { 947 return NonStatTy, err 948 } 949 } else { 950 // Full but not archive node, do proper garbage collection 951 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 952 bc.triegc.Push(root, -float32(block.NumberU64())) 953 954 if current := block.NumberU64(); current > triesInMemory { 955 // Find the next state trie we need to commit 956 header := bc.GetHeaderByNumber(current - triesInMemory) 957 chosen := header.Number.Uint64() 958 959 // Only write to disk if we exceeded our memory allowance *and* also have at 960 // least a given number of tries gapped. 961 var ( 962 size = triedb.Size() 963 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 964 ) 965 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 966 // If we're exceeding limits but haven't reached a large enough memory gap, 967 // warn the user that the system is becoming unstable. 968 if chosen < lastWrite+triesInMemory { 969 switch { 970 case size >= 2*limit: 971 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 972 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 973 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 974 } 975 } 976 // If optimum or critical limits reached, write to disk 977 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 978 triedb.Commit(header.Root, true) 979 lastWrite = chosen 980 bc.gcproc = 0 981 } 982 } 983 // Garbage collect anything below our required write retention 984 for !bc.triegc.Empty() { 985 root, number := bc.triegc.Pop() 986 if uint64(-number) > chosen { 987 bc.triegc.Push(root, number) 988 break 989 } 990 triedb.Dereference(root.(common.Hash), common.Hash{}) 991 } 992 } 993 } 994 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 995 return NonStatTy, err 996 } 997 // If the total difficulty is higher than our known, add it to the canonical chain 998 // Second clause in the if statement reduces the vulnerability to selfish mining. 999 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1000 reorg := externTd.Cmp(localTd) > 0 1001 if !reorg && externTd.Cmp(localTd) == 0 { 1002 // Split same-difficulty blocks by number, then at random 1003 reorg = block.NumberU64() < bc.currentBlock.NumberU64() || (block.NumberU64() == bc.currentBlock.NumberU64() && mrand.Float64() < 0.5) 1004 } 1005 if reorg { 1006 // Reorganise the chain if the parent is not the head block 1007 if block.ParentHash() != bc.currentBlock.Hash() { 1008 if err := bc.reorg(bc.currentBlock, block); err != nil { 1009 return NonStatTy, err 1010 } 1011 } 1012 // Write the positional metadata for transaction and receipt lookups 1013 if err := WriteTxLookupEntries(batch, block); err != nil { 1014 return NonStatTy, err 1015 } 1016 // Write hash preimages 1017 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 1018 return NonStatTy, err 1019 } 1020 status = CanonStatTy 1021 } else { 1022 status = SideStatTy 1023 } 1024 if err := batch.Write(); err != nil { 1025 return NonStatTy, err 1026 } 1027 1028 // Set new head. 1029 if status == CanonStatTy { 1030 bc.insert(block) 1031 } 1032 bc.futureBlocks.Remove(block.Hash()) 1033 return status, nil 1034 } 1035 1036 // InsertChain attempts to insert the given batch of blocks in to the canonical 1037 // chain or, otherwise, create a fork. If an error is returned it will return 1038 // the index number of the failing block as well an error describing what went 1039 // wrong. 1040 // 1041 // After insertion is done, all accumulated events will be fired. 1042 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1043 n, events, logs, err := bc.insertChain(chain) 1044 bc.PostChainEvents(events, logs) 1045 return n, err 1046 } 1047 1048 // insertChain will execute the actual chain insertion and event aggregation. The 1049 // only reason this method exists as a separate one is to make locking cleaner 1050 // with deferred statements. 1051 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1052 // Do a sanity check that the provided chain is actually ordered and linked 1053 for i := 1; i < len(chain); i++ { 1054 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1055 // Chain broke ancestry, log a messge (programming error) and skip insertion 1056 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1057 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1058 1059 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1060 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1061 } 1062 } 1063 // Pre-checks passed, start the full block imports 1064 bc.wg.Add(1) 1065 defer bc.wg.Done() 1066 1067 bc.chainmu.Lock() 1068 defer bc.chainmu.Unlock() 1069 1070 // A queued approach to delivering events. This is generally 1071 // faster than direct delivery and requires much less mutex 1072 // acquiring. 1073 var ( 1074 stats = insertStats{startTime: mclock.Now()} 1075 events = make([]interface{}, 0, len(chain)) 1076 lastCanon *types.Block 1077 coalescedLogs []*types.Log 1078 ) 1079 // Start the parallel header verifier 1080 headers := make([]*types.Header, len(chain)) 1081 seals := make([]bool, len(chain)) 1082 1083 for i, block := range chain { 1084 headers[i] = block.Header() 1085 seals[i] = true 1086 } 1087 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1088 defer close(abort) 1089 1090 // Iterate over the blocks and insert when the verifier permits 1091 for i, block := range chain { 1092 // If the chain is terminating, stop processing blocks 1093 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1094 log.Debug("Premature abort during blocks processing") 1095 break 1096 } 1097 // If the header is a banned one, straight out abort 1098 if BadHashes[block.Hash()] { 1099 bc.reportBlock(block, nil, ErrBlacklistedHash) 1100 return i, events, coalescedLogs, ErrBlacklistedHash 1101 } 1102 // Wait for the block's verification to complete 1103 bstart := time.Now() 1104 1105 err := <-results 1106 if err == nil { 1107 err = bc.Validator().ValidateBody(block) 1108 } 1109 switch { 1110 case err == ErrKnownBlock: 1111 // Block and state both already known. However if the current block is below 1112 // this number we did a rollback and we should reimport it nonetheless. 1113 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1114 stats.ignored++ 1115 continue 1116 } 1117 1118 case err == consensus.ErrFutureBlock: 1119 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1120 // the chain is discarded and processed at a later time if given. 1121 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1122 if block.Time().Cmp(max) > 0 { 1123 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1124 } 1125 bc.futureBlocks.Add(block.Hash(), block) 1126 stats.queued++ 1127 continue 1128 1129 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1130 bc.futureBlocks.Add(block.Hash(), block) 1131 stats.queued++ 1132 continue 1133 1134 case err == consensus.ErrPrunedAncestor: 1135 // Block competing with the canonical chain, store in the db, but don't process 1136 // until the competitor TD goes above the canonical TD 1137 localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()) 1138 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1139 if localTd.Cmp(externTd) > 0 { 1140 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1141 return i, events, coalescedLogs, err 1142 } 1143 continue 1144 } 1145 // Competitor chain beat canonical, gather all blocks from the common ancestor 1146 var winner []*types.Block 1147 1148 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1149 for !bc.HasState(parent.Root()) { 1150 winner = append(winner, parent) 1151 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1152 } 1153 for j := 0; j < len(winner)/2; j++ { 1154 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1155 } 1156 // Import all the pruned blocks to make the state available 1157 bc.chainmu.Unlock() 1158 _, evs, logs, err := bc.insertChain(winner) 1159 bc.chainmu.Lock() 1160 events, coalescedLogs = evs, logs 1161 1162 if err != nil { 1163 return i, events, coalescedLogs, err 1164 } 1165 1166 case err != nil: 1167 bc.reportBlock(block, nil, err) 1168 return i, events, coalescedLogs, err 1169 } 1170 // Create a new statedb using the parent block and report an 1171 // error if it fails. 1172 var parent *types.Block 1173 if i == 0 { 1174 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1175 } else { 1176 parent = chain[i-1] 1177 } 1178 state, err := state.New(parent.Root(), bc.stateCache) 1179 if err != nil { 1180 return i, events, coalescedLogs, err 1181 } 1182 // Process block using the parent state as reference point. 1183 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1184 if err != nil { 1185 bc.reportBlock(block, receipts, err) 1186 return i, events, coalescedLogs, err 1187 } 1188 // Validate the state using the default validator 1189 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1190 if err != nil { 1191 bc.reportBlock(block, receipts, err) 1192 return i, events, coalescedLogs, err 1193 } 1194 proctime := time.Since(bstart) 1195 1196 // Write the block to the chain and get the status. 1197 status, err := bc.WriteBlockWithState(block, receipts, state) 1198 if err != nil { 1199 return i, events, coalescedLogs, err 1200 } 1201 switch status { 1202 case CanonStatTy: 1203 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1204 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1205 1206 coalescedLogs = append(coalescedLogs, logs...) 1207 blockInsertTimer.UpdateSince(bstart) 1208 events = append(events, ChainEvent{block, block.Hash(), logs}) 1209 lastCanon = block 1210 1211 // Only count canonical blocks for GC processing time 1212 bc.gcproc += proctime 1213 1214 case SideStatTy: 1215 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1216 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1217 1218 blockInsertTimer.UpdateSince(bstart) 1219 events = append(events, ChainSideEvent{block}) 1220 } 1221 stats.processed++ 1222 stats.usedGas += usedGas 1223 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1224 } 1225 // Append a single chain head event if we've progressed the chain 1226 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1227 events = append(events, ChainHeadEvent{lastCanon}) 1228 } 1229 return 0, events, coalescedLogs, nil 1230 } 1231 1232 // insertStats tracks and reports on block insertion. 1233 type insertStats struct { 1234 queued, processed, ignored int 1235 usedGas uint64 1236 lastIndex int 1237 startTime mclock.AbsTime 1238 } 1239 1240 // statsReportLimit is the time limit during import after which we always print 1241 // out progress. This avoids the user wondering what's going on. 1242 const statsReportLimit = 8 * time.Second 1243 1244 // report prints statistics if some number of blocks have been processed 1245 // or more than a few seconds have passed since the last message. 1246 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1247 // Fetch the timings for the batch 1248 var ( 1249 now = mclock.Now() 1250 elapsed = time.Duration(now) - time.Duration(st.startTime) 1251 ) 1252 // If we're at the last block of the batch or report period reached, log 1253 if index == len(chain)-1 || elapsed >= statsReportLimit { 1254 var ( 1255 end = chain[index] 1256 txs = countTransactions(chain[st.lastIndex : index+1]) 1257 ) 1258 context := []interface{}{ 1259 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1260 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1261 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1262 } 1263 if st.queued > 0 { 1264 context = append(context, []interface{}{"queued", st.queued}...) 1265 } 1266 if st.ignored > 0 { 1267 context = append(context, []interface{}{"ignored", st.ignored}...) 1268 } 1269 log.Info("Imported new chain segment", context...) 1270 1271 *st = insertStats{startTime: now, lastIndex: index + 1} 1272 } 1273 } 1274 1275 func countTransactions(chain []*types.Block) (c int) { 1276 for _, b := range chain { 1277 c += len(b.Transactions()) 1278 } 1279 return c 1280 } 1281 1282 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1283 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1284 // event about them 1285 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1286 var ( 1287 newChain types.Blocks 1288 oldChain types.Blocks 1289 commonBlock *types.Block 1290 deletedTxs types.Transactions 1291 deletedLogs []*types.Log 1292 // collectLogs collects the logs that were generated during the 1293 // processing of the block that corresponds with the given hash. 1294 // These logs are later announced as deleted. 1295 collectLogs = func(h common.Hash) { 1296 // Coalesce logs and set 'Removed'. 1297 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1298 for _, receipt := range receipts { 1299 for _, log := range receipt.Logs { 1300 del := *log 1301 del.Removed = true 1302 deletedLogs = append(deletedLogs, &del) 1303 } 1304 } 1305 } 1306 ) 1307 1308 // first reduce whoever is higher bound 1309 if oldBlock.NumberU64() > newBlock.NumberU64() { 1310 // reduce old chain 1311 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1312 oldChain = append(oldChain, oldBlock) 1313 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1314 1315 collectLogs(oldBlock.Hash()) 1316 } 1317 } else { 1318 // reduce new chain and append new chain blocks for inserting later on 1319 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1320 newChain = append(newChain, newBlock) 1321 } 1322 } 1323 if oldBlock == nil { 1324 return fmt.Errorf("Invalid old chain") 1325 } 1326 if newBlock == nil { 1327 return fmt.Errorf("Invalid new chain") 1328 } 1329 1330 for { 1331 if oldBlock.Hash() == newBlock.Hash() { 1332 commonBlock = oldBlock 1333 break 1334 } 1335 1336 oldChain = append(oldChain, oldBlock) 1337 newChain = append(newChain, newBlock) 1338 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1339 collectLogs(oldBlock.Hash()) 1340 1341 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1342 if oldBlock == nil { 1343 return fmt.Errorf("Invalid old chain") 1344 } 1345 if newBlock == nil { 1346 return fmt.Errorf("Invalid new chain") 1347 } 1348 } 1349 // Ensure the user sees large reorgs 1350 if len(oldChain) > 0 && len(newChain) > 0 { 1351 logFn := log.Debug 1352 if len(oldChain) > 63 { 1353 logFn = log.Warn 1354 } 1355 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1356 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1357 } else { 1358 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1359 } 1360 // Insert the new chain, taking care of the proper incremental order 1361 var addedTxs types.Transactions 1362 for i := len(newChain) - 1; i >= 0; i-- { 1363 // insert the block in the canonical way, re-writing history 1364 bc.insert(newChain[i]) 1365 // write lookup entries for hash based transaction/receipt searches 1366 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1367 return err 1368 } 1369 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1370 } 1371 // calculate the difference between deleted and added transactions 1372 diff := types.TxDifference(deletedTxs, addedTxs) 1373 // When transactions get deleted from the database that means the 1374 // receipts that were created in the fork must also be deleted 1375 for _, tx := range diff { 1376 DeleteTxLookupEntry(bc.db, tx.Hash()) 1377 } 1378 if len(deletedLogs) > 0 { 1379 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1380 } 1381 if len(oldChain) > 0 { 1382 go func() { 1383 for _, block := range oldChain { 1384 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1385 } 1386 }() 1387 } 1388 1389 return nil 1390 } 1391 1392 // PostChainEvents iterates over the events generated by a chain insertion and 1393 // posts them into the event feed. 1394 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1395 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1396 // post event logs for further processing 1397 if logs != nil { 1398 bc.logsFeed.Send(logs) 1399 } 1400 for _, event := range events { 1401 switch ev := event.(type) { 1402 case ChainEvent: 1403 bc.chainFeed.Send(ev) 1404 1405 case ChainHeadEvent: 1406 bc.chainHeadFeed.Send(ev) 1407 1408 case ChainSideEvent: 1409 bc.chainSideFeed.Send(ev) 1410 } 1411 } 1412 } 1413 1414 func (bc *BlockChain) update() { 1415 futureTimer := time.NewTicker(5 * time.Second) 1416 defer futureTimer.Stop() 1417 for { 1418 select { 1419 case <-futureTimer.C: 1420 bc.procFutureBlocks() 1421 case <-bc.quit: 1422 return 1423 } 1424 } 1425 } 1426 1427 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1428 type BadBlockArgs struct { 1429 Hash common.Hash `json:"hash"` 1430 Header *types.Header `json:"header"` 1431 } 1432 1433 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1434 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1435 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1436 for _, hash := range bc.badBlocks.Keys() { 1437 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1438 header := hdr.(*types.Header) 1439 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1440 } 1441 } 1442 return headers, nil 1443 } 1444 1445 // addBadBlock adds a bad block to the bad-block LRU cache 1446 func (bc *BlockChain) addBadBlock(block *types.Block) { 1447 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1448 } 1449 1450 // reportBlock logs a bad block error. 1451 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1452 bc.addBadBlock(block) 1453 1454 var receiptString string 1455 for _, receipt := range receipts { 1456 receiptString += fmt.Sprintf("\t%v\n", receipt) 1457 } 1458 log.Error(fmt.Sprintf(` 1459 ########## BAD BLOCK ######### 1460 Chain config: %v 1461 1462 Number: %v 1463 Hash: 0x%x 1464 %v 1465 1466 Error: %v 1467 ############################## 1468 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1469 } 1470 1471 // InsertHeaderChain attempts to insert the given header chain in to the local 1472 // chain, possibly creating a reorg. If an error is returned, it will return the 1473 // index number of the failing header as well an error describing what went wrong. 1474 // 1475 // The verify parameter can be used to fine tune whether nonce verification 1476 // should be done or not. The reason behind the optional check is because some 1477 // of the header retrieval mechanisms already need to verify nonces, as well as 1478 // because nonces can be verified sparsely, not needing to check each. 1479 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1480 start := time.Now() 1481 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1482 return i, err 1483 } 1484 1485 // Make sure only one thread manipulates the chain at once 1486 bc.chainmu.Lock() 1487 defer bc.chainmu.Unlock() 1488 1489 bc.wg.Add(1) 1490 defer bc.wg.Done() 1491 1492 whFunc := func(header *types.Header) error { 1493 bc.mu.Lock() 1494 defer bc.mu.Unlock() 1495 1496 _, err := bc.hc.WriteHeader(header) 1497 return err 1498 } 1499 1500 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1501 } 1502 1503 // writeHeader writes a header into the local chain, given that its parent is 1504 // already known. If the total difficulty of the newly inserted header becomes 1505 // greater than the current known TD, the canonical chain is re-routed. 1506 // 1507 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1508 // into the chain, as side effects caused by reorganisations cannot be emulated 1509 // without the real blocks. Hence, writing headers directly should only be done 1510 // in two scenarios: pure-header mode of operation (light clients), or properly 1511 // separated header/block phases (non-archive clients). 1512 func (bc *BlockChain) writeHeader(header *types.Header) error { 1513 bc.wg.Add(1) 1514 defer bc.wg.Done() 1515 1516 bc.mu.Lock() 1517 defer bc.mu.Unlock() 1518 1519 _, err := bc.hc.WriteHeader(header) 1520 return err 1521 } 1522 1523 // CurrentHeader retrieves the current head header of the canonical chain. The 1524 // header is retrieved from the HeaderChain's internal cache. 1525 func (bc *BlockChain) CurrentHeader() *types.Header { 1526 bc.mu.RLock() 1527 defer bc.mu.RUnlock() 1528 1529 return bc.hc.CurrentHeader() 1530 } 1531 1532 // GetTd retrieves a block's total difficulty in the canonical chain from the 1533 // database by hash and number, caching it if found. 1534 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1535 return bc.hc.GetTd(hash, number) 1536 } 1537 1538 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1539 // database by hash, caching it if found. 1540 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1541 return bc.hc.GetTdByHash(hash) 1542 } 1543 1544 // GetHeader retrieves a block header from the database by hash and number, 1545 // caching it if found. 1546 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1547 return bc.hc.GetHeader(hash, number) 1548 } 1549 1550 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1551 // found. 1552 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1553 return bc.hc.GetHeaderByHash(hash) 1554 } 1555 1556 // HasHeader checks if a block header is present in the database or not, caching 1557 // it if present. 1558 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1559 return bc.hc.HasHeader(hash, number) 1560 } 1561 1562 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1563 // hash, fetching towards the genesis block. 1564 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1565 return bc.hc.GetBlockHashesFromHash(hash, max) 1566 } 1567 1568 // GetHeaderByNumber retrieves a block header from the database by number, 1569 // caching it (associated with its hash) if found. 1570 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1571 return bc.hc.GetHeaderByNumber(number) 1572 } 1573 1574 // Config retrieves the blockchain's chain configuration. 1575 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1576 1577 // Engine retrieves the blockchain's consensus engine. 1578 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1579 1580 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1581 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1582 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1583 } 1584 1585 // SubscribeChainEvent registers a subscription of ChainEvent. 1586 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1587 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1588 } 1589 1590 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1591 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1592 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1593 } 1594 1595 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1596 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1597 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1598 } 1599 1600 // SubscribeLogsEvent registers a subscription of []*types.Log. 1601 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1602 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1603 }