github.com/zjj1991/quorum@v0.0.0-20190524123704-ae4b0a1e1a19/core/blockchain.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the Ethereum consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/math" 32 "github.com/ethereum/go-ethereum/common/mclock" 33 "github.com/ethereum/go-ethereum/common/prque" 34 "github.com/ethereum/go-ethereum/consensus" 35 "github.com/ethereum/go-ethereum/core/rawdb" 36 "github.com/ethereum/go-ethereum/core/state" 37 "github.com/ethereum/go-ethereum/core/types" 38 "github.com/ethereum/go-ethereum/core/vm" 39 "github.com/ethereum/go-ethereum/crypto" 40 "github.com/ethereum/go-ethereum/ethdb" 41 "github.com/ethereum/go-ethereum/event" 42 "github.com/ethereum/go-ethereum/log" 43 "github.com/ethereum/go-ethereum/metrics" 44 "github.com/ethereum/go-ethereum/params" 45 "github.com/ethereum/go-ethereum/rlp" 46 "github.com/ethereum/go-ethereum/trie" 47 "github.com/hashicorp/golang-lru" 48 ) 49 50 var ( 51 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 52 53 ErrNoGenesis = errors.New("Genesis not found in chain") 54 ) 55 56 const ( 57 bodyCacheLimit = 256 58 blockCacheLimit = 256 59 receiptsCacheLimit = 32 60 maxFutureBlocks = 256 61 maxTimeFutureBlocks = 30 62 badBlockLimit = 10 63 triesInMemory = 128 64 65 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 66 BlockChainVersion = 3 67 ) 68 69 // CacheConfig contains the configuration values for the trie caching/pruning 70 // that's resident in a blockchain. 71 type CacheConfig struct { 72 Disabled bool // Whether to disable trie write caching (archive node) 73 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 74 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 75 } 76 77 // BlockChain represents the canonical chain given a database with a genesis 78 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 79 // 80 // Importing blocks in to the block chain happens according to the set of rules 81 // defined by the two stage Validator. Processing of blocks is done using the 82 // Processor which processes the included transaction. The validation of the state 83 // is done in the second part of the Validator. Failing results in aborting of 84 // the import. 85 // 86 // The BlockChain also helps in returning blocks from **any** chain included 87 // in the database as well as blocks that represents the canonical chain. It's 88 // important to note that GetBlock can return any block and does not need to be 89 // included in the canonical one where as GetBlockByNumber always represents the 90 // canonical chain. 91 type BlockChain struct { 92 chainConfig *params.ChainConfig // Chain & network configuration 93 cacheConfig *CacheConfig // Cache configuration for pruning 94 95 db ethdb.Database // Low level persistent database to store final content in 96 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 97 gcproc time.Duration // Accumulates canonical block processing for trie dumping 98 99 hc *HeaderChain 100 rmLogsFeed event.Feed 101 chainFeed event.Feed 102 chainSideFeed event.Feed 103 chainHeadFeed event.Feed 104 logsFeed event.Feed 105 scope event.SubscriptionScope 106 genesisBlock *types.Block 107 108 mu sync.RWMutex // global mutex for locking chain operations 109 chainmu sync.RWMutex // blockchain insertion lock 110 procmu sync.RWMutex // block processor lock 111 112 checkpoint int // checkpoint counts towards the new checkpoint 113 currentBlock atomic.Value // Current head of the block chain 114 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 115 116 stateCache state.Database // State database to reuse between imports (contains state cache) 117 bodyCache *lru.Cache // Cache for the most recent block bodies 118 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 119 receiptsCache *lru.Cache // Cache for the most recent receipts per block 120 blockCache *lru.Cache // Cache for the most recent entire blocks 121 futureBlocks *lru.Cache // future blocks are blocks added for later processing 122 123 quit chan struct{} // blockchain quit channel 124 running int32 // running must be called atomically 125 // procInterrupt must be atomically called 126 procInterrupt int32 // interrupt signaler for block processing 127 wg sync.WaitGroup // chain processing wait group for shutting down 128 129 engine consensus.Engine 130 processor Processor // block processor interface 131 validator Validator // block and state validator interface 132 vmConfig vm.Config 133 134 badBlocks *lru.Cache // Bad block cache 135 shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. 136 137 privateStateCache state.Database // Private state database to reuse between imports (contains state cache) 138 } 139 140 // NewBlockChain returns a fully initialised block chain using information 141 // available in the database. It initialises the default Ethereum Validator and 142 // Processor. 143 func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { 144 if cacheConfig == nil { 145 cacheConfig = &CacheConfig{ 146 TrieNodeLimit: 256, 147 TrieTimeLimit: 5 * time.Minute, 148 } 149 } 150 bodyCache, _ := lru.New(bodyCacheLimit) 151 bodyRLPCache, _ := lru.New(bodyCacheLimit) 152 receiptsCache, _ := lru.New(receiptsCacheLimit) 153 blockCache, _ := lru.New(blockCacheLimit) 154 futureBlocks, _ := lru.New(maxFutureBlocks) 155 badBlocks, _ := lru.New(badBlockLimit) 156 157 bc := &BlockChain{ 158 chainConfig: chainConfig, 159 cacheConfig: cacheConfig, 160 db: db, 161 triegc: prque.New(nil), 162 stateCache: state.NewDatabase(db), 163 quit: make(chan struct{}), 164 shouldPreserve: shouldPreserve, 165 bodyCache: bodyCache, 166 bodyRLPCache: bodyRLPCache, 167 receiptsCache: receiptsCache, 168 blockCache: blockCache, 169 futureBlocks: futureBlocks, 170 engine: engine, 171 vmConfig: vmConfig, 172 badBlocks: badBlocks, 173 privateStateCache: state.NewDatabase(db), 174 } 175 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 176 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 177 178 var err error 179 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 180 if err != nil { 181 return nil, err 182 } 183 bc.genesisBlock = bc.GetBlockByNumber(0) 184 if bc.genesisBlock == nil { 185 return nil, ErrNoGenesis 186 } 187 if err := bc.loadLastState(); err != nil { 188 return nil, err 189 } 190 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 191 for hash := range BadHashes { 192 if header := bc.GetHeaderByHash(hash); header != nil { 193 // get the canonical block corresponding to the offending header's number 194 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 195 // make sure the headerByNumber (if present) is in our current canonical chain 196 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 197 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 198 bc.SetHead(header.Number.Uint64() - 1) 199 log.Error("Chain rewind was successful, resuming normal operation") 200 } 201 } 202 } 203 // Take ownership of this particular state 204 go bc.update() 205 return bc, nil 206 } 207 208 func (bc *BlockChain) getProcInterrupt() bool { 209 return atomic.LoadInt32(&bc.procInterrupt) == 1 210 } 211 212 // loadLastState loads the last known chain state from the database. This method 213 // assumes that the chain manager mutex is held. 214 func (bc *BlockChain) loadLastState() error { 215 // Restore the last known head block 216 head := rawdb.ReadHeadBlockHash(bc.db) 217 if head == (common.Hash{}) { 218 // Corrupt or empty database, init from scratch 219 log.Warn("Empty database, resetting chain") 220 return bc.Reset() 221 } 222 // Make sure the entire head block is available 223 currentBlock := bc.GetBlockByHash(head) 224 if currentBlock == nil { 225 // Corrupt or empty database, init from scratch 226 log.Warn("Head block missing, resetting chain", "hash", head) 227 return bc.Reset() 228 } 229 // Make sure the state associated with the block is available 230 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 231 // Dangling block without a state associated, init from scratch 232 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 233 if err := bc.repair(¤tBlock); err != nil { 234 return err 235 } 236 } 237 238 // Quorum 239 if _, err := state.New(GetPrivateStateRoot(bc.db, currentBlock.Root()), bc.privateStateCache); err != nil { 240 log.Warn("Head private state missing, resetting chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 241 return bc.Reset() 242 } 243 // /Quorum 244 245 // Everything seems to be fine, set as the head block 246 bc.currentBlock.Store(currentBlock) 247 248 // Restore the last known head header 249 currentHeader := currentBlock.Header() 250 if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { 251 if header := bc.GetHeaderByHash(head); header != nil { 252 currentHeader = header 253 } 254 } 255 bc.hc.SetCurrentHeader(currentHeader) 256 257 // Restore the last known head fast block 258 bc.currentFastBlock.Store(currentBlock) 259 if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { 260 if block := bc.GetBlockByHash(head); block != nil { 261 bc.currentFastBlock.Store(block) 262 } 263 } 264 265 // Issue a status log for the user 266 currentFastBlock := bc.CurrentFastBlock() 267 268 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 269 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 270 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 271 272 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) 273 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) 274 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) 275 276 return nil 277 } 278 279 // SetHead rewinds the local chain to a new head. In the case of headers, everything 280 // above the new head will be deleted and the new one set. In the case of blocks 281 // though, the head may be further rewound if block bodies are missing (non-archive 282 // nodes after a fast sync). 283 func (bc *BlockChain) SetHead(head uint64) error { 284 log.Warn("Rewinding blockchain", "target", head) 285 286 bc.mu.Lock() 287 defer bc.mu.Unlock() 288 289 // Rewind the header chain, deleting all block bodies until then 290 delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { 291 rawdb.DeleteBody(db, hash, num) 292 } 293 bc.hc.SetHead(head, delFn) 294 currentHeader := bc.hc.CurrentHeader() 295 296 // Clear out any stale content from the caches 297 bc.bodyCache.Purge() 298 bc.bodyRLPCache.Purge() 299 bc.receiptsCache.Purge() 300 bc.blockCache.Purge() 301 bc.futureBlocks.Purge() 302 303 // Rewind the block chain, ensuring we don't end up with a stateless head block 304 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 305 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 306 } 307 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 308 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 309 // Rewound state missing, rolled back to before pivot, reset to genesis 310 bc.currentBlock.Store(bc.genesisBlock) 311 } 312 } 313 // Rewind the fast block in a simpleton way to the target head 314 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 315 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 316 } 317 // If either blocks reached nil, reset to the genesis state 318 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 319 bc.currentBlock.Store(bc.genesisBlock) 320 } 321 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 322 bc.currentFastBlock.Store(bc.genesisBlock) 323 } 324 currentBlock := bc.CurrentBlock() 325 currentFastBlock := bc.CurrentFastBlock() 326 327 rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) 328 rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) 329 330 return bc.loadLastState() 331 } 332 333 // FastSyncCommitHead sets the current head block to the one defined by the hash 334 // irrelevant what the chain contents were prior. 335 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 336 // Make sure that both the block as well at its state trie exists 337 block := bc.GetBlockByHash(hash) 338 if block == nil { 339 return fmt.Errorf("non existent block [%x…]", hash[:4]) 340 } 341 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 342 return err 343 } 344 // If all checks out, manually set the head block 345 bc.mu.Lock() 346 bc.currentBlock.Store(block) 347 bc.mu.Unlock() 348 349 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 350 return nil 351 } 352 353 // GasLimit returns the gas limit of the current HEAD block. 354 func (bc *BlockChain) GasLimit() uint64 { 355 bc.mu.RLock() 356 defer bc.mu.RUnlock() 357 358 if bc.Config().IsQuorum { 359 return math.MaxBig256.Uint64() // HACK(joel) a very large number 360 } else { 361 return bc.CurrentBlock().GasLimit() 362 } 363 } 364 365 // CurrentBlock retrieves the current head block of the canonical chain. The 366 // block is retrieved from the blockchain's internal cache. 367 func (bc *BlockChain) CurrentBlock() *types.Block { 368 return bc.currentBlock.Load().(*types.Block) 369 } 370 371 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 372 // chain. The block is retrieved from the blockchain's internal cache. 373 func (bc *BlockChain) CurrentFastBlock() *types.Block { 374 return bc.currentFastBlock.Load().(*types.Block) 375 } 376 377 // SetProcessor sets the processor required for making state modifications. 378 func (bc *BlockChain) SetProcessor(processor Processor) { 379 bc.procmu.Lock() 380 defer bc.procmu.Unlock() 381 bc.processor = processor 382 } 383 384 // SetValidator sets the validator which is used to validate incoming blocks. 385 func (bc *BlockChain) SetValidator(validator Validator) { 386 bc.procmu.Lock() 387 defer bc.procmu.Unlock() 388 bc.validator = validator 389 } 390 391 // Validator returns the current validator. 392 func (bc *BlockChain) Validator() Validator { 393 bc.procmu.RLock() 394 defer bc.procmu.RUnlock() 395 return bc.validator 396 } 397 398 // Processor returns the current processor. 399 func (bc *BlockChain) Processor() Processor { 400 bc.procmu.RLock() 401 defer bc.procmu.RUnlock() 402 return bc.processor 403 } 404 405 // State returns a new mutable state based on the current HEAD block. 406 func (bc *BlockChain) State() (*state.StateDB, *state.StateDB, error) { 407 return bc.StateAt(bc.CurrentBlock().Root()) 408 } 409 410 // StateAt returns a new mutable state based on a particular point in time. 411 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, *state.StateDB, error) { 412 publicStateDb, publicStateDbErr := state.New(root, bc.stateCache) 413 if publicStateDbErr != nil { 414 return nil, nil, publicStateDbErr 415 } 416 privateStateDb, privateStateDbErr := state.New(GetPrivateStateRoot(bc.db, root), bc.privateStateCache) 417 if privateStateDbErr != nil { 418 return nil, nil, privateStateDbErr 419 } 420 421 return publicStateDb, privateStateDb, nil 422 } 423 424 // Reset purges the entire blockchain, restoring it to its genesis state. 425 func (bc *BlockChain) Reset() error { 426 return bc.ResetWithGenesisBlock(bc.genesisBlock) 427 } 428 429 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 430 // specified genesis state. 431 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 432 // Dump the entire block chain and purge the caches 433 if err := bc.SetHead(0); err != nil { 434 return err 435 } 436 bc.mu.Lock() 437 defer bc.mu.Unlock() 438 439 // Prepare the genesis block and reinitialise the chain 440 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 441 log.Crit("Failed to write genesis block TD", "err", err) 442 } 443 rawdb.WriteBlock(bc.db, genesis) 444 445 bc.genesisBlock = genesis 446 bc.insert(bc.genesisBlock) 447 bc.currentBlock.Store(bc.genesisBlock) 448 bc.hc.SetGenesis(bc.genesisBlock.Header()) 449 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 450 bc.currentFastBlock.Store(bc.genesisBlock) 451 452 return nil 453 } 454 455 // repair tries to repair the current blockchain by rolling back the current block 456 // until one with associated state is found. This is needed to fix incomplete db 457 // writes caused either by crashes/power outages, or simply non-committed tries. 458 // 459 // This method only rolls back the current block. The current header and current 460 // fast block are left intact. 461 func (bc *BlockChain) repair(head **types.Block) error { 462 for { 463 // Abort if we've rewound to a head block that does have associated state 464 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 465 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 466 return nil 467 } 468 // Otherwise rewind one block and recheck state availability there 469 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 470 } 471 } 472 473 // Export writes the active chain to the given writer. 474 func (bc *BlockChain) Export(w io.Writer) error { 475 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 476 } 477 478 // ExportN writes a subset of the active chain to the given writer. 479 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 480 bc.mu.RLock() 481 defer bc.mu.RUnlock() 482 483 if first > last { 484 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 485 } 486 log.Info("Exporting batch of blocks", "count", last-first+1) 487 488 start, reported := time.Now(), time.Now() 489 for nr := first; nr <= last; nr++ { 490 block := bc.GetBlockByNumber(nr) 491 if block == nil { 492 return fmt.Errorf("export failed on #%d: not found", nr) 493 } 494 if err := block.EncodeRLP(w); err != nil { 495 return err 496 } 497 if time.Since(reported) >= statsReportLimit { 498 log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) 499 reported = time.Now() 500 } 501 } 502 503 return nil 504 } 505 506 // insert injects a new head block into the current block chain. This method 507 // assumes that the block is indeed a true head. It will also reset the head 508 // header and the head fast sync block to this very same block if they are older 509 // or if they are on a different side chain. 510 // 511 // Note, this function assumes that the `mu` mutex is held! 512 func (bc *BlockChain) insert(block *types.Block) { 513 // If the block is on a side chain or an unknown one, force other heads onto it too 514 updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 515 516 // Add the block to the canonical chain number scheme and mark as the head 517 rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) 518 rawdb.WriteHeadBlockHash(bc.db, block.Hash()) 519 520 bc.currentBlock.Store(block) 521 522 // If the block is better than our head or is on a different chain, force update heads 523 if updateHeads { 524 bc.hc.SetCurrentHeader(block.Header()) 525 rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) 526 527 bc.currentFastBlock.Store(block) 528 } 529 } 530 531 // Genesis retrieves the chain's genesis block. 532 func (bc *BlockChain) Genesis() *types.Block { 533 return bc.genesisBlock 534 } 535 536 // GetBody retrieves a block body (transactions and uncles) from the database by 537 // hash, caching it if found. 538 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 539 // Short circuit if the body's already in the cache, retrieve otherwise 540 if cached, ok := bc.bodyCache.Get(hash); ok { 541 body := cached.(*types.Body) 542 return body 543 } 544 number := bc.hc.GetBlockNumber(hash) 545 if number == nil { 546 return nil 547 } 548 body := rawdb.ReadBody(bc.db, hash, *number) 549 if body == nil { 550 return nil 551 } 552 // Cache the found body for next time and return 553 bc.bodyCache.Add(hash, body) 554 return body 555 } 556 557 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 558 // caching it if found. 559 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 560 // Short circuit if the body's already in the cache, retrieve otherwise 561 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 562 return cached.(rlp.RawValue) 563 } 564 number := bc.hc.GetBlockNumber(hash) 565 if number == nil { 566 return nil 567 } 568 body := rawdb.ReadBodyRLP(bc.db, hash, *number) 569 if len(body) == 0 { 570 return nil 571 } 572 // Cache the found body for next time and return 573 bc.bodyRLPCache.Add(hash, body) 574 return body 575 } 576 577 // HasBlock checks if a block is fully present in the database or not. 578 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 579 if bc.blockCache.Contains(hash) { 580 return true 581 } 582 return rawdb.HasBody(bc.db, hash, number) 583 } 584 585 // HasState checks if state trie is fully present in the database or not. 586 func (bc *BlockChain) HasState(hash common.Hash) bool { 587 _, err := bc.stateCache.OpenTrie(hash) 588 return err == nil 589 } 590 591 // HasBlockAndState checks if a block and associated state trie is fully present 592 // in the database or not, caching it if present. 593 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 594 // Check first that the block itself is known 595 block := bc.GetBlock(hash, number) 596 if block == nil { 597 return false 598 } 599 return bc.HasState(block.Root()) 600 } 601 602 // GetBlock retrieves a block from the database by hash and number, 603 // caching it if found. 604 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 605 // Short circuit if the block's already in the cache, retrieve otherwise 606 if block, ok := bc.blockCache.Get(hash); ok { 607 return block.(*types.Block) 608 } 609 block := rawdb.ReadBlock(bc.db, hash, number) 610 if block == nil { 611 return nil 612 } 613 // Cache the found block for next time and return 614 bc.blockCache.Add(block.Hash(), block) 615 return block 616 } 617 618 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 619 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 620 number := bc.hc.GetBlockNumber(hash) 621 if number == nil { 622 return nil 623 } 624 return bc.GetBlock(hash, *number) 625 } 626 627 // GetBlockByNumber retrieves a block from the database by number, caching it 628 // (associated with its hash) if found. 629 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 630 hash := rawdb.ReadCanonicalHash(bc.db, number) 631 if hash == (common.Hash{}) { 632 return nil 633 } 634 return bc.GetBlock(hash, number) 635 } 636 637 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 638 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 639 if receipts, ok := bc.receiptsCache.Get(hash); ok { 640 return receipts.(types.Receipts) 641 } 642 643 number := rawdb.ReadHeaderNumber(bc.db, hash) 644 if number == nil { 645 return nil 646 } 647 648 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 649 bc.receiptsCache.Add(hash, receipts) 650 return receipts 651 } 652 653 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 654 // [deprecated by eth/62] 655 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 656 number := bc.hc.GetBlockNumber(hash) 657 if number == nil { 658 return nil 659 } 660 for i := 0; i < n; i++ { 661 block := bc.GetBlock(hash, *number) 662 if block == nil { 663 break 664 } 665 blocks = append(blocks, block) 666 hash = block.ParentHash() 667 *number-- 668 } 669 return 670 } 671 672 // GetUnclesInChain retrieves all the uncles from a given block backwards until 673 // a specific distance is reached. 674 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 675 uncles := []*types.Header{} 676 for i := 0; block != nil && i < length; i++ { 677 uncles = append(uncles, block.Uncles()...) 678 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 679 } 680 return uncles 681 } 682 683 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 684 // either from ephemeral in-memory cache, or from persistent storage. 685 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 686 return bc.stateCache.TrieDB().Node(hash) 687 } 688 689 // Stop stops the blockchain service. If any imports are currently in progress 690 // it will abort them using the procInterrupt. 691 func (bc *BlockChain) Stop() { 692 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 693 return 694 } 695 // Unsubscribe all subscriptions registered from blockchain 696 bc.scope.Close() 697 close(bc.quit) 698 atomic.StoreInt32(&bc.procInterrupt, 1) 699 700 bc.wg.Wait() 701 702 // Ensure the state of a recent block is also stored to disk before exiting. 703 // We're writing three different states to catch different restart scenarios: 704 // - HEAD: So we don't need to reprocess any blocks in the general case 705 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 706 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 707 if !bc.cacheConfig.Disabled { 708 triedb := bc.stateCache.TrieDB() 709 710 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 711 if number := bc.CurrentBlock().NumberU64(); number > offset { 712 recent := bc.GetBlockByNumber(number - offset) 713 714 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 715 if err := triedb.Commit(recent.Root(), true); err != nil { 716 log.Error("Failed to commit recent state trie", "err", err) 717 } 718 } 719 } 720 for !bc.triegc.Empty() { 721 triedb.Dereference(bc.triegc.PopItem().(common.Hash)) 722 } 723 if size, _ := triedb.Size(); size != 0 { 724 log.Error("Dangling trie nodes after full cleanup") 725 } 726 } 727 log.Info("Blockchain manager stopped") 728 } 729 730 func (bc *BlockChain) procFutureBlocks() { 731 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 732 for _, hash := range bc.futureBlocks.Keys() { 733 if block, exist := bc.futureBlocks.Peek(hash); exist { 734 blocks = append(blocks, block.(*types.Block)) 735 } 736 } 737 if len(blocks) > 0 { 738 types.BlockBy(types.Number).Sort(blocks) 739 740 // Insert one by one as chain insertion needs contiguous ancestry between blocks 741 for i := range blocks { 742 bc.InsertChain(blocks[i : i+1]) 743 } 744 } 745 } 746 747 // WriteStatus status of write 748 type WriteStatus byte 749 750 const ( 751 NonStatTy WriteStatus = iota 752 CanonStatTy 753 SideStatTy 754 ) 755 756 // Rollback is designed to remove a chain of links from the database that aren't 757 // certain enough to be valid. 758 func (bc *BlockChain) Rollback(chain []common.Hash) { 759 bc.mu.Lock() 760 defer bc.mu.Unlock() 761 762 for i := len(chain) - 1; i >= 0; i-- { 763 hash := chain[i] 764 765 currentHeader := bc.hc.CurrentHeader() 766 if currentHeader.Hash() == hash { 767 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 768 } 769 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 770 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 771 bc.currentFastBlock.Store(newFastBlock) 772 rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 773 } 774 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 775 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 776 bc.currentBlock.Store(newBlock) 777 rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) 778 } 779 } 780 } 781 782 // SetReceiptsData computes all the non-consensus fields of the receipts 783 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { 784 signer := types.MakeSigner(config, block.Number()) 785 786 transactions, logIndex := block.Transactions(), uint(0) 787 if len(transactions) != len(receipts) { 788 return errors.New("transaction and receipt count mismatch") 789 } 790 791 for j := 0; j < len(receipts); j++ { 792 // The transaction hash can be retrieved from the transaction itself 793 receipts[j].TxHash = transactions[j].Hash() 794 795 // The contract address can be derived from the transaction itself 796 if transactions[j].To() == nil { 797 // Deriving the signer is expensive, only do if it's actually needed 798 from, _ := types.Sender(signer, transactions[j]) 799 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 800 } 801 // The used gas can be calculated based on previous receipts 802 if j == 0 { 803 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 804 } else { 805 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 806 } 807 // The derived log fields can simply be set from the block and transaction 808 for k := 0; k < len(receipts[j].Logs); k++ { 809 receipts[j].Logs[k].BlockNumber = block.NumberU64() 810 receipts[j].Logs[k].BlockHash = block.Hash() 811 receipts[j].Logs[k].TxHash = receipts[j].TxHash 812 receipts[j].Logs[k].TxIndex = uint(j) 813 receipts[j].Logs[k].Index = logIndex 814 logIndex++ 815 } 816 } 817 return nil 818 } 819 820 // InsertReceiptChain attempts to complete an already existing header chain with 821 // transaction and receipt data. 822 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 823 bc.wg.Add(1) 824 defer bc.wg.Done() 825 826 // Do a sanity check that the provided chain is actually ordered and linked 827 for i := 1; i < len(blockChain); i++ { 828 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 829 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 830 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 831 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 832 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 833 } 834 } 835 836 var ( 837 stats = struct{ processed, ignored int32 }{} 838 start = time.Now() 839 bytes = 0 840 batch = bc.db.NewBatch() 841 ) 842 for i, block := range blockChain { 843 receipts := receiptChain[i] 844 // Short circuit insertion if shutting down or processing failed 845 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 846 return 0, nil 847 } 848 // Short circuit if the owner header is unknown 849 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 850 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 851 } 852 // Skip if the entire data is already known 853 if bc.HasBlock(block.Hash(), block.NumberU64()) { 854 stats.ignored++ 855 continue 856 } 857 // Compute all the non-consensus fields of the receipts 858 if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { 859 return i, fmt.Errorf("failed to set receipts data: %v", err) 860 } 861 // Write all the data out into the database 862 rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) 863 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 864 rawdb.WriteTxLookupEntries(batch, block) 865 866 stats.processed++ 867 868 if batch.ValueSize() >= ethdb.IdealBatchSize { 869 if err := batch.Write(); err != nil { 870 return 0, err 871 } 872 bytes += batch.ValueSize() 873 batch.Reset() 874 } 875 } 876 if batch.ValueSize() > 0 { 877 bytes += batch.ValueSize() 878 if err := batch.Write(); err != nil { 879 return 0, err 880 } 881 } 882 883 // Update the head fast sync block if better 884 bc.mu.Lock() 885 head := blockChain[len(blockChain)-1] 886 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 887 currentFastBlock := bc.CurrentFastBlock() 888 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 889 rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) 890 bc.currentFastBlock.Store(head) 891 } 892 } 893 bc.mu.Unlock() 894 895 context := []interface{}{ 896 "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), 897 "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), 898 "size", common.StorageSize(bytes), 899 } 900 if stats.ignored > 0 { 901 context = append(context, []interface{}{"ignored", stats.ignored}...) 902 } 903 log.Info("Imported new block receipts", context...) 904 905 return 0, nil 906 } 907 908 var lastWrite uint64 909 910 // WriteBlockWithoutState writes only the block and its metadata to the database, 911 // but does not write any state. This is used to construct competing side forks 912 // up to the point where they exceed the canonical total difficulty. 913 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 914 bc.wg.Add(1) 915 defer bc.wg.Done() 916 917 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 918 return err 919 } 920 rawdb.WriteBlock(bc.db, block) 921 922 return nil 923 } 924 925 // WriteBlockWithState writes the block and all associated state to the database. 926 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state, privateState *state.StateDB) (status WriteStatus, err error) { 927 bc.wg.Add(1) 928 defer bc.wg.Done() 929 930 // Calculate the total difficulty of the block 931 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 932 if ptd == nil { 933 return NonStatTy, consensus.ErrUnknownAncestor 934 } 935 // Make sure no inconsistent state is leaked during insertion 936 bc.mu.Lock() 937 defer bc.mu.Unlock() 938 939 currentBlock := bc.CurrentBlock() 940 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 941 externTd := new(big.Int).Add(block.Difficulty(), ptd) 942 943 // Irrelevant of the canonical status, write the block itself to the database 944 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 945 return NonStatTy, err 946 } 947 rawdb.WriteBlock(bc.db, block) 948 949 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 950 951 if err != nil { 952 return NonStatTy, err 953 } 954 triedb := bc.stateCache.TrieDB() 955 956 // Explicit commit for privateStateTriedb to handle Raft db issues 957 if privateState != nil { 958 privateRoot, err := privateState.Commit(bc.chainConfig.IsEIP158(block.Number())) 959 if err != nil { 960 return NonStatTy, err 961 } 962 privateTriedb := bc.privateStateCache.TrieDB() 963 if err := privateTriedb.Commit(privateRoot, false); err != nil { 964 return NonStatTy, err 965 } 966 } 967 968 // If we're running an archive node, always flush 969 if bc.cacheConfig.Disabled { 970 if err := triedb.Commit(root, false); err != nil { 971 return NonStatTy, err 972 } 973 974 } else { 975 // Full but not archive node, do proper garbage collection 976 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 977 bc.triegc.Push(root, -int64(block.NumberU64())) 978 979 if current := block.NumberU64(); current > triesInMemory { 980 // If we exceeded our memory allowance, flush matured singleton nodes to disk 981 var ( 982 nodes, imgs = triedb.Size() 983 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 984 ) 985 if nodes > limit || imgs > 4*1024*1024 { 986 triedb.Cap(limit - ethdb.IdealBatchSize) 987 } 988 // Find the next state trie we need to commit 989 header := bc.GetHeaderByNumber(current - triesInMemory) 990 chosen := header.Number.Uint64() 991 992 // If we exceeded out time allowance, flush an entire trie to disk 993 if bc.gcproc > bc.cacheConfig.TrieTimeLimit { 994 // If we're exceeding limits but haven't reached a large enough memory gap, 995 // warn the user that the system is becoming unstable. 996 if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 997 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 998 } 999 // Flush an entire trie and restart the counters 1000 triedb.Commit(header.Root, true) 1001 lastWrite = chosen 1002 bc.gcproc = 0 1003 } 1004 // Garbage collect anything below our required write retention 1005 for !bc.triegc.Empty() { 1006 root, number := bc.triegc.Pop() 1007 if uint64(-number) > chosen { 1008 bc.triegc.Push(root, number) 1009 break 1010 } 1011 triedb.Dereference(root.(common.Hash)) 1012 } 1013 } 1014 } 1015 1016 // Write other block data using a batch. 1017 batch := bc.db.NewBatch() 1018 rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) 1019 1020 // If the total difficulty is higher than our known, add it to the canonical chain 1021 // Second clause in the if statement reduces the vulnerability to selfish mining. 1022 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 1023 reorg := externTd.Cmp(localTd) > 0 1024 currentBlock = bc.CurrentBlock() 1025 if !reorg && externTd.Cmp(localTd) == 0 { 1026 // Split same-difficulty blocks by number, then preferentially select 1027 // the block generated by the local miner as the canonical block. 1028 if block.NumberU64() < currentBlock.NumberU64() { 1029 reorg = true 1030 } else if block.NumberU64() == currentBlock.NumberU64() { 1031 var currentPreserve, blockPreserve bool 1032 if bc.shouldPreserve != nil { 1033 currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) 1034 } 1035 reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) 1036 } 1037 } 1038 if reorg { 1039 // Reorganise the chain if the parent is not the head block 1040 if block.ParentHash() != currentBlock.Hash() { 1041 if err := bc.reorg(currentBlock, block); err != nil { 1042 return NonStatTy, err 1043 } 1044 } 1045 // Write the positional metadata for transaction/receipt lookups and preimages 1046 rawdb.WriteTxLookupEntries(batch, block) 1047 rawdb.WritePreimages(batch, state.Preimages()) 1048 1049 status = CanonStatTy 1050 } else { 1051 status = SideStatTy 1052 } 1053 if err := batch.Write(); err != nil { 1054 return NonStatTy, err 1055 } 1056 1057 // Set new head. 1058 if status == CanonStatTy { 1059 bc.insert(block) 1060 } 1061 bc.futureBlocks.Remove(block.Hash()) 1062 return status, nil 1063 } 1064 1065 // InsertChain attempts to insert the given batch of blocks in to the canonical 1066 // chain or, otherwise, create a fork. If an error is returned it will return 1067 // the index number of the failing block as well an error describing what went 1068 // wrong. 1069 // 1070 // After insertion is done, all accumulated events will be fired. 1071 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1072 n, events, logs, err := bc.insertChain(chain) 1073 bc.PostChainEvents(events, logs) 1074 return n, err 1075 } 1076 1077 // Given a slice of public receipts and an overlapping (smaller) slice of 1078 // private receipts, return a new slice where the default for each location is 1079 // the public receipt but we take the private receipt in each place we have 1080 // one. 1081 func mergeReceipts(pub, priv types.Receipts) types.Receipts { 1082 m := make(map[common.Hash]*types.Receipt) 1083 for _, receipt := range pub { 1084 m[receipt.TxHash] = receipt 1085 } 1086 for _, receipt := range priv { 1087 m[receipt.TxHash] = receipt 1088 } 1089 1090 ret := make(types.Receipts, 0, len(pub)) 1091 for _, pubReceipt := range pub { 1092 ret = append(ret, m[pubReceipt.TxHash]) 1093 } 1094 1095 return ret 1096 } 1097 1098 // insertChain will execute the actual chain insertion and event aggregation. The 1099 // only reason this method exists as a separate one is to make locking cleaner 1100 // with deferred statements. 1101 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1102 // Sanity check that we have something meaningful to import 1103 if len(chain) == 0 { 1104 return 0, nil, nil, nil 1105 } 1106 // Do a sanity check that the provided chain is actually ordered and linked 1107 for i := 1; i < len(chain); i++ { 1108 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1109 // Chain broke ancestry, log a message (programming error) and skip insertion 1110 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1111 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1112 1113 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1114 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1115 } 1116 } 1117 // Pre-checks passed, start the full block imports 1118 bc.wg.Add(1) 1119 defer bc.wg.Done() 1120 1121 bc.chainmu.Lock() 1122 defer bc.chainmu.Unlock() 1123 1124 // A queued approach to delivering events. This is generally 1125 // faster than direct delivery and requires much less mutex 1126 // acquiring. 1127 var ( 1128 stats = insertStats{startTime: mclock.Now()} 1129 events = make([]interface{}, 0, len(chain)) 1130 lastCanon *types.Block 1131 coalescedLogs []*types.Log 1132 ) 1133 // Start the parallel header verifier 1134 headers := make([]*types.Header, len(chain)) 1135 seals := make([]bool, len(chain)) 1136 1137 for i, block := range chain { 1138 headers[i] = block.Header() 1139 seals[i] = true 1140 } 1141 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1142 defer close(abort) 1143 1144 // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) 1145 senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) 1146 1147 // Iterate over the blocks and insert when the verifier permits 1148 for i, block := range chain { 1149 // If the chain is terminating, stop processing blocks 1150 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1151 log.Debug("Premature abort during blocks processing") 1152 break 1153 } 1154 // If the header is a banned one, straight out abort 1155 if BadHashes[block.Hash()] { 1156 bc.reportBlock(block, nil, ErrBlacklistedHash) 1157 return i, events, coalescedLogs, ErrBlacklistedHash 1158 } 1159 // Wait for the block's verification to complete 1160 bstart := time.Now() 1161 1162 err := <-results 1163 if err == nil { 1164 err = bc.Validator().ValidateBody(block) 1165 } 1166 switch { 1167 case err == ErrKnownBlock: 1168 // Block and state both already known. However if the current block is below 1169 // this number we did a rollback and we should reimport it nonetheless. 1170 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1171 stats.ignored++ 1172 continue 1173 } 1174 1175 case err == consensus.ErrFutureBlock: 1176 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1177 // the chain is discarded and processed at a later time if given. 1178 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1179 if block.Time().Cmp(max) > 0 && !bc.chainConfig.IsQuorum { 1180 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1181 } 1182 bc.futureBlocks.Add(block.Hash(), block) 1183 stats.queued++ 1184 continue 1185 1186 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1187 bc.futureBlocks.Add(block.Hash(), block) 1188 stats.queued++ 1189 continue 1190 1191 case err == consensus.ErrPrunedAncestor: 1192 // Block competing with the canonical chain, store in the db, but don't process 1193 // until the competitor TD goes above the canonical TD 1194 currentBlock := bc.CurrentBlock() 1195 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1196 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1197 if localTd.Cmp(externTd) > 0 { 1198 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1199 return i, events, coalescedLogs, err 1200 } 1201 continue 1202 } 1203 // Competitor chain beat canonical, gather all blocks from the common ancestor 1204 var winner []*types.Block 1205 1206 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1207 for !bc.HasState(parent.Root()) { 1208 winner = append(winner, parent) 1209 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1210 } 1211 for j := 0; j < len(winner)/2; j++ { 1212 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1213 } 1214 // Import all the pruned blocks to make the state available 1215 bc.chainmu.Unlock() 1216 _, evs, logs, err := bc.insertChain(winner) 1217 bc.chainmu.Lock() 1218 events, coalescedLogs = evs, logs 1219 1220 if err != nil { 1221 return i, events, coalescedLogs, err 1222 } 1223 1224 case err != nil: 1225 bc.reportBlock(block, nil, err) 1226 return i, events, coalescedLogs, err 1227 } 1228 // Create a new statedb using the parent block and report an 1229 // error if it fails. 1230 var parent *types.Block 1231 if i == 0 { 1232 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1233 } else { 1234 parent = chain[i-1] 1235 } 1236 1237 // alias state.New because we introduce a variable named state on the next line 1238 stateNew := state.New 1239 1240 state, err := state.New(parent.Root(), bc.stateCache) 1241 if err != nil { 1242 return i, events, coalescedLogs, err 1243 } 1244 1245 // Quorum 1246 privateStateRoot := GetPrivateStateRoot(bc.db, parent.Root()) 1247 privateState, err := stateNew(privateStateRoot, bc.privateStateCache) 1248 if err != nil { 1249 return i, events, coalescedLogs, err 1250 } 1251 // /Quorum 1252 1253 // Process block using the parent state as reference point. 1254 receipts, privateReceipts, logs, usedGas, err := bc.processor.Process(block, state, privateState, bc.vmConfig) 1255 if err != nil { 1256 bc.reportBlock(block, receipts, err) 1257 return i, events, coalescedLogs, err 1258 } 1259 // Validate the state using the default validator 1260 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1261 if err != nil { 1262 bc.reportBlock(block, receipts, err) 1263 return i, events, coalescedLogs, err 1264 } 1265 1266 // Quorum 1267 // Write private state changes to database 1268 if privateStateRoot, err = privateState.Commit(bc.Config().IsEIP158(block.Number())); err != nil { 1269 return i, events, coalescedLogs, err 1270 } 1271 if err := WritePrivateStateRoot(bc.db, block.Root(), privateStateRoot); err != nil { 1272 return i, events, coalescedLogs, err 1273 } 1274 // /Quorum 1275 1276 allReceipts := mergeReceipts(receipts, privateReceipts) 1277 1278 proctime := time.Since(bstart) 1279 1280 // Write the block to the chain and get the status. 1281 status, err := bc.WriteBlockWithState(block, allReceipts, state, privateState) 1282 if err != nil { 1283 return i, events, coalescedLogs, err 1284 } 1285 if err := WritePrivateBlockBloom(bc.db, block.NumberU64(), privateReceipts); err != nil { 1286 return i, events, coalescedLogs, err 1287 } 1288 switch status { 1289 case CanonStatTy: 1290 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1291 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1292 1293 coalescedLogs = append(coalescedLogs, logs...) 1294 blockInsertTimer.UpdateSince(bstart) 1295 events = append(events, ChainEvent{block, block.Hash(), logs}) 1296 lastCanon = block 1297 1298 // Only count canonical blocks for GC processing time 1299 bc.gcproc += proctime 1300 1301 case SideStatTy: 1302 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1303 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1304 1305 blockInsertTimer.UpdateSince(bstart) 1306 events = append(events, ChainSideEvent{block}) 1307 } 1308 stats.processed++ 1309 stats.usedGas += usedGas 1310 1311 cache, _ := bc.stateCache.TrieDB().Size() 1312 stats.report(chain, i, cache) 1313 } 1314 // Append a single chain head event if we've progressed the chain 1315 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1316 events = append(events, ChainHeadEvent{lastCanon}) 1317 } 1318 return 0, events, coalescedLogs, nil 1319 } 1320 1321 // insertStats tracks and reports on block insertion. 1322 type insertStats struct { 1323 queued, processed, ignored int 1324 usedGas uint64 1325 lastIndex int 1326 startTime mclock.AbsTime 1327 } 1328 1329 // statsReportLimit is the time limit during import and export after which we 1330 // always print out progress. This avoids the user wondering what's going on. 1331 const statsReportLimit = 8 * time.Second 1332 1333 // report prints statistics if some number of blocks have been processed 1334 // or more than a few seconds have passed since the last message. 1335 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1336 // Fetch the timings for the batch 1337 var ( 1338 now = mclock.Now() 1339 elapsed = time.Duration(now) - time.Duration(st.startTime) 1340 ) 1341 // If we're at the last block of the batch or report period reached, log 1342 if index == len(chain)-1 || elapsed >= statsReportLimit { 1343 var ( 1344 end = chain[index] 1345 txs = countTransactions(chain[st.lastIndex : index+1]) 1346 ) 1347 context := []interface{}{ 1348 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1349 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1350 "number", end.Number(), "hash", end.Hash(), 1351 } 1352 if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { 1353 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 1354 } 1355 context = append(context, []interface{}{"cache", cache}...) 1356 1357 if st.queued > 0 { 1358 context = append(context, []interface{}{"queued", st.queued}...) 1359 } 1360 if st.ignored > 0 { 1361 context = append(context, []interface{}{"ignored", st.ignored}...) 1362 } 1363 log.Info("Imported new chain segment", context...) 1364 1365 *st = insertStats{startTime: now, lastIndex: index + 1} 1366 } 1367 } 1368 1369 func countTransactions(chain []*types.Block) (c int) { 1370 for _, b := range chain { 1371 c += len(b.Transactions()) 1372 } 1373 return c 1374 } 1375 1376 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1377 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1378 // event about them 1379 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1380 var ( 1381 newChain types.Blocks 1382 oldChain types.Blocks 1383 commonBlock *types.Block 1384 deletedTxs types.Transactions 1385 deletedLogs []*types.Log 1386 // collectLogs collects the logs that were generated during the 1387 // processing of the block that corresponds with the given hash. 1388 // These logs are later announced as deleted. 1389 collectLogs = func(hash common.Hash) { 1390 // Coalesce logs and set 'Removed'. 1391 number := bc.hc.GetBlockNumber(hash) 1392 if number == nil { 1393 return 1394 } 1395 receipts := rawdb.ReadReceipts(bc.db, hash, *number) 1396 for _, receipt := range receipts { 1397 for _, log := range receipt.Logs { 1398 del := *log 1399 del.Removed = true 1400 deletedLogs = append(deletedLogs, &del) 1401 } 1402 } 1403 } 1404 ) 1405 1406 // first reduce whoever is higher bound 1407 if oldBlock.NumberU64() > newBlock.NumberU64() { 1408 // reduce old chain 1409 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1410 oldChain = append(oldChain, oldBlock) 1411 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1412 1413 collectLogs(oldBlock.Hash()) 1414 } 1415 } else { 1416 // reduce new chain and append new chain blocks for inserting later on 1417 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1418 newChain = append(newChain, newBlock) 1419 } 1420 } 1421 if oldBlock == nil { 1422 return fmt.Errorf("Invalid old chain") 1423 } 1424 if newBlock == nil { 1425 return fmt.Errorf("Invalid new chain") 1426 } 1427 1428 for { 1429 if oldBlock.Hash() == newBlock.Hash() { 1430 commonBlock = oldBlock 1431 break 1432 } 1433 1434 oldChain = append(oldChain, oldBlock) 1435 newChain = append(newChain, newBlock) 1436 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1437 collectLogs(oldBlock.Hash()) 1438 1439 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1440 if oldBlock == nil { 1441 return fmt.Errorf("Invalid old chain") 1442 } 1443 if newBlock == nil { 1444 return fmt.Errorf("Invalid new chain") 1445 } 1446 } 1447 // Ensure the user sees large reorgs 1448 if len(oldChain) > 0 && len(newChain) > 0 { 1449 logFn := log.Debug 1450 if len(oldChain) > 63 { 1451 logFn = log.Warn 1452 } 1453 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1454 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1455 } else { 1456 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1457 } 1458 // Insert the new chain, taking care of the proper incremental order 1459 var addedTxs types.Transactions 1460 for i := len(newChain) - 1; i >= 0; i-- { 1461 // insert the block in the canonical way, re-writing history 1462 bc.insert(newChain[i]) 1463 // write lookup entries for hash based transaction/receipt searches 1464 rawdb.WriteTxLookupEntries(bc.db, newChain[i]) 1465 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1466 } 1467 // calculate the difference between deleted and added transactions 1468 diff := types.TxDifference(deletedTxs, addedTxs) 1469 // When transactions get deleted from the database that means the 1470 // receipts that were created in the fork must also be deleted 1471 batch := bc.db.NewBatch() 1472 for _, tx := range diff { 1473 rawdb.DeleteTxLookupEntry(batch, tx.Hash()) 1474 } 1475 batch.Write() 1476 1477 if len(deletedLogs) > 0 { 1478 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1479 } 1480 if len(oldChain) > 0 { 1481 go func() { 1482 for _, block := range oldChain { 1483 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1484 } 1485 }() 1486 } 1487 1488 return nil 1489 } 1490 1491 // PostChainEvents iterates over the events generated by a chain insertion and 1492 // posts them into the event feed. 1493 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1494 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1495 // post event logs for further processing 1496 if logs != nil { 1497 bc.logsFeed.Send(logs) 1498 } 1499 for _, event := range events { 1500 switch ev := event.(type) { 1501 case ChainEvent: 1502 bc.chainFeed.Send(ev) 1503 1504 case ChainHeadEvent: 1505 bc.chainHeadFeed.Send(ev) 1506 1507 case ChainSideEvent: 1508 bc.chainSideFeed.Send(ev) 1509 } 1510 } 1511 } 1512 1513 func (bc *BlockChain) update() { 1514 futureTimer := time.NewTicker(5 * time.Second) 1515 defer futureTimer.Stop() 1516 for { 1517 select { 1518 case <-futureTimer.C: 1519 bc.procFutureBlocks() 1520 case <-bc.quit: 1521 return 1522 } 1523 } 1524 } 1525 1526 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1527 func (bc *BlockChain) BadBlocks() []*types.Block { 1528 blocks := make([]*types.Block, 0, bc.badBlocks.Len()) 1529 for _, hash := range bc.badBlocks.Keys() { 1530 if blk, exist := bc.badBlocks.Peek(hash); exist { 1531 block := blk.(*types.Block) 1532 blocks = append(blocks, block) 1533 } 1534 } 1535 return blocks 1536 } 1537 1538 // HasBadBlock returns whether the block with the hash is a bad block. dep: Istanbul 1539 func (bc *BlockChain) HasBadBlock(hash common.Hash) bool { 1540 return bc.badBlocks.Contains(hash) 1541 } 1542 1543 // addBadBlock adds a bad block to the bad-block LRU cache 1544 func (bc *BlockChain) addBadBlock(block *types.Block) { 1545 bc.badBlocks.Add(block.Hash(), block) 1546 } 1547 1548 // reportBlock logs a bad block error. 1549 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1550 bc.addBadBlock(block) 1551 1552 var receiptString string 1553 for _, receipt := range receipts { 1554 receiptString += fmt.Sprintf("\t%v\n", receipt) 1555 } 1556 log.Error(fmt.Sprintf(` 1557 ########## BAD BLOCK ######### 1558 Chain config: %v 1559 1560 Number: %v 1561 Hash: 0x%x 1562 %v 1563 1564 Error: %v 1565 ############################## 1566 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1567 } 1568 1569 // InsertHeaderChain attempts to insert the given header chain in to the local 1570 // chain, possibly creating a reorg. If an error is returned, it will return the 1571 // index number of the failing header as well an error describing what went wrong. 1572 // 1573 // The verify parameter can be used to fine tune whether nonce verification 1574 // should be done or not. The reason behind the optional check is because some 1575 // of the header retrieval mechanisms already need to verify nonces, as well as 1576 // because nonces can be verified sparsely, not needing to check each. 1577 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1578 start := time.Now() 1579 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1580 return i, err 1581 } 1582 1583 // Make sure only one thread manipulates the chain at once 1584 bc.chainmu.Lock() 1585 defer bc.chainmu.Unlock() 1586 1587 bc.wg.Add(1) 1588 defer bc.wg.Done() 1589 1590 whFunc := func(header *types.Header) error { 1591 bc.mu.Lock() 1592 defer bc.mu.Unlock() 1593 1594 _, err := bc.hc.WriteHeader(header) 1595 return err 1596 } 1597 1598 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1599 } 1600 1601 // writeHeader writes a header into the local chain, given that its parent is 1602 // already known. If the total difficulty of the newly inserted header becomes 1603 // greater than the current known TD, the canonical chain is re-routed. 1604 // 1605 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1606 // into the chain, as side effects caused by reorganisations cannot be emulated 1607 // without the real blocks. Hence, writing headers directly should only be done 1608 // in two scenarios: pure-header mode of operation (light clients), or properly 1609 // separated header/block phases (non-archive clients). 1610 func (bc *BlockChain) writeHeader(header *types.Header) error { 1611 bc.wg.Add(1) 1612 defer bc.wg.Done() 1613 1614 bc.mu.Lock() 1615 defer bc.mu.Unlock() 1616 1617 _, err := bc.hc.WriteHeader(header) 1618 return err 1619 } 1620 1621 // CurrentHeader retrieves the current head header of the canonical chain. The 1622 // header is retrieved from the HeaderChain's internal cache. 1623 func (bc *BlockChain) CurrentHeader() *types.Header { 1624 return bc.hc.CurrentHeader() 1625 } 1626 1627 // GetTd retrieves a block's total difficulty in the canonical chain from the 1628 // database by hash and number, caching it if found. 1629 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1630 return bc.hc.GetTd(hash, number) 1631 } 1632 1633 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1634 // database by hash, caching it if found. 1635 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1636 return bc.hc.GetTdByHash(hash) 1637 } 1638 1639 // GetHeader retrieves a block header from the database by hash and number, 1640 // caching it if found. 1641 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1642 return bc.hc.GetHeader(hash, number) 1643 } 1644 1645 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1646 // found. 1647 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1648 return bc.hc.GetHeaderByHash(hash) 1649 } 1650 1651 // HasHeader checks if a block header is present in the database or not, caching 1652 // it if present. 1653 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1654 return bc.hc.HasHeader(hash, number) 1655 } 1656 1657 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1658 // hash, fetching towards the genesis block. 1659 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1660 return bc.hc.GetBlockHashesFromHash(hash, max) 1661 } 1662 1663 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 1664 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 1665 // number of blocks to be individually checked before we reach the canonical chain. 1666 // 1667 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 1668 func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 1669 bc.chainmu.Lock() 1670 defer bc.chainmu.Unlock() 1671 1672 return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) 1673 } 1674 1675 // GetHeaderByNumber retrieves a block header from the database by number, 1676 // caching it (associated with its hash) if found. 1677 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1678 return bc.hc.GetHeaderByNumber(number) 1679 } 1680 1681 // Config retrieves the blockchain's chain configuration. 1682 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1683 1684 // Engine retrieves the blockchain's consensus engine. 1685 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1686 1687 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1688 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1689 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1690 } 1691 1692 // SubscribeChainEvent registers a subscription of ChainEvent. 1693 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1694 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1695 } 1696 1697 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1698 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1699 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1700 } 1701 1702 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1703 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1704 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1705 } 1706 1707 // SubscribeLogsEvent registers a subscription of []*types.Log. 1708 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1709 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1710 }