github.com/aerth/aquachain@v1.4.1/core/blockchain.go (about) 1 // Copyright 2014 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the AquaChain consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/aquanetwork/aquachain/aquadb" 31 "github.com/aquanetwork/aquachain/common" 32 "github.com/aquanetwork/aquachain/common/mclock" 33 "github.com/aquanetwork/aquachain/consensus" 34 "github.com/aquanetwork/aquachain/core/state" 35 "github.com/aquanetwork/aquachain/core/types" 36 "github.com/aquanetwork/aquachain/core/vm" 37 "github.com/aquanetwork/aquachain/crypto" 38 "github.com/aquanetwork/aquachain/event" 39 "github.com/aquanetwork/aquachain/log" 40 "github.com/aquanetwork/aquachain/metrics" 41 "github.com/aquanetwork/aquachain/params" 42 "github.com/aquanetwork/aquachain/rlp" 43 "github.com/aquanetwork/aquachain/trie" 44 "github.com/hashicorp/golang-lru" 45 "gopkg.in/karalabe/cookiejar.v2/collections/prque" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 1 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db aquadb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 checkpoint int // checkpoint counts towards the new checkpoint 110 currentBlock atomic.Value // Current head of the block chain 111 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 112 113 stateCache state.Database // State database to reuse between imports (contains state cache) 114 bodyCache *lru.Cache // Cache for the most recent block bodies 115 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 116 blockCache *lru.Cache // Cache for the most recent entire blocks 117 futureBlocks *lru.Cache // future blocks are blocks added for later processing 118 119 quit chan struct{} // blockchain quit channel 120 running int32 // running must be called atomically 121 // procInterrupt must be atomically called 122 procInterrupt int32 // interrupt signaler for block processing 123 wg sync.WaitGroup // chain processing wait group for shutting down 124 125 engine consensus.Engine 126 processor Processor // block processor interface 127 validator Validator // block and state validator interface 128 vmConfig vm.Config 129 130 badBlocks *lru.Cache // Bad block cache 131 } 132 133 // NewBlockChain returns a fully initialised block chain using information 134 // available in the database. It initialises the default AquaChain Validator and 135 // Processor. 136 func NewBlockChain(db aquadb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 137 if cacheConfig == nil { 138 cacheConfig = &CacheConfig{ 139 TrieNodeLimit: 256 * 1024 * 1024, 140 TrieTimeLimit: 5 * time.Minute, 141 } 142 } 143 bodyCache, _ := lru.New(bodyCacheLimit) 144 bodyRLPCache, _ := lru.New(bodyCacheLimit) 145 blockCache, _ := lru.New(blockCacheLimit) 146 futureBlocks, _ := lru.New(maxFutureBlocks) 147 badBlocks, _ := lru.New(badBlockLimit) 148 149 bc := &BlockChain{ 150 chainConfig: chainConfig, 151 cacheConfig: cacheConfig, 152 db: db, 153 triegc: prque.New(), 154 stateCache: state.NewDatabase(db), 155 quit: make(chan struct{}), 156 bodyCache: bodyCache, 157 bodyRLPCache: bodyRLPCache, 158 blockCache: blockCache, 159 futureBlocks: futureBlocks, 160 engine: engine, 161 vmConfig: vmConfig, 162 badBlocks: badBlocks, 163 } 164 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 165 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 166 167 var err error 168 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 169 if err != nil { 170 return nil, err 171 } 172 bc.genesisBlock = bc.GetBlockByNumber(0) 173 if bc.genesisBlock == nil { 174 return nil, ErrNoGenesis 175 } 176 if err := bc.loadLastState(); err != nil { 177 return nil, err 178 } 179 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 180 for hash := range BadHashes { 181 if header := bc.GetHeaderByHash(hash); header != nil { 182 // get the canonical block corresponding to the offending header's number 183 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 184 // make sure the headerByNumber (if present) is in our current canonical chain 185 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 186 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 187 bc.SetHead(header.Number.Uint64() - 1) 188 log.Error("Chain rewind was successful, resuming normal operation") 189 } 190 } 191 } 192 // Take ownership of this particular state 193 go bc.update() 194 return bc, nil 195 } 196 197 func (bc *BlockChain) getProcInterrupt() bool { 198 return atomic.LoadInt32(&bc.procInterrupt) == 1 199 } 200 201 // loadLastState loads the last known chain state from the database. This method 202 // assumes that the chain manager mutex is held. 203 func (bc *BlockChain) loadLastState() error { 204 // Restore the last known head block 205 head := GetHeadBlockHash(bc.db) 206 if head == (common.Hash{}) { 207 // Corrupt or empty database, init from scratch 208 log.Warn("Empty database, resetting chain") 209 return bc.Reset() 210 } 211 // Make sure the entire head block is available 212 currentBlock := bc.GetBlockByHash(head) 213 if currentBlock == nil { 214 // Corrupt or empty database, init from scratch 215 log.Warn("Head block missing, resetting chain", "hash", head) 216 return bc.Reset() 217 } 218 // Make sure the state associated with the block is available 219 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 220 // Dangling block without a state associated, init from scratch 221 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 222 if err := bc.repair(¤tBlock); err != nil { 223 return err 224 } 225 } 226 // Everything seems to be fine, set as the head block 227 bc.currentBlock.Store(currentBlock) 228 229 // Restore the last known head header 230 currentHeader := currentBlock.Header() 231 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 232 if header := bc.GetHeaderByHash(head); header != nil { 233 currentHeader = header 234 } 235 } 236 bc.hc.SetCurrentHeader(currentHeader) 237 238 // Restore the last known head fast block 239 bc.currentFastBlock.Store(currentBlock) 240 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 241 if block := bc.GetBlockByHash(head); block != nil { 242 bc.currentFastBlock.Store(block) 243 } 244 } 245 246 // Issue a status log for the user 247 currentFastBlock := bc.CurrentFastBlock() 248 249 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 250 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 251 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 252 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 253 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) 254 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) 255 256 return nil 257 } 258 259 // SetHead rewinds the local chain to a new head. In the case of headers, everything 260 // above the new head will be deleted and the new one set. In the case of blocks 261 // though, the head may be further rewound if block bodies are missing (non-archive 262 // nodes after a fast sync). 263 func (bc *BlockChain) SetHead(head uint64) error { 264 log.Warn("Rewinding blockchain", "target", head) 265 266 bc.mu.Lock() 267 defer bc.mu.Unlock() 268 269 // Rewind the header chain, deleting all block bodies until then 270 delFn := func(hash common.Hash, num uint64) { 271 DeleteBody(bc.db, hash, num) 272 } 273 bc.hc.SetHead(head, delFn) 274 currentHeader := bc.hc.CurrentHeader() 275 276 // Clear out any stale content from the caches 277 bc.bodyCache.Purge() 278 bc.bodyRLPCache.Purge() 279 bc.blockCache.Purge() 280 bc.futureBlocks.Purge() 281 282 // Rewind the block chain, ensuring we don't end up with a stateless head block 283 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 284 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 285 } 286 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 287 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 288 // Rewound state missing, rolled back to before pivot, reset to genesis 289 bc.currentBlock.Store(bc.genesisBlock) 290 } 291 } 292 // Rewind the fast block in a simpleton way to the target head 293 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 294 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 295 } 296 // If either blocks reached nil, reset to the genesis state 297 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 298 bc.currentBlock.Store(bc.genesisBlock) 299 } 300 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 301 bc.currentFastBlock.Store(bc.genesisBlock) 302 } 303 currentBlock := bc.CurrentBlock() 304 currentFastBlock := bc.CurrentFastBlock() 305 if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { 306 log.Crit("Failed to reset head full block", "err", err) 307 } 308 if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { 309 log.Crit("Failed to reset head fast block", "err", err) 310 } 311 return bc.loadLastState() 312 } 313 314 // FastSyncCommitHead sets the current head block to the one defined by the hash 315 // irrelevant what the chain contents were prior. 316 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 317 // Make sure that both the block as well at its state trie exists 318 block := bc.GetBlockByHash(hash) 319 if block == nil { 320 return fmt.Errorf("non existent block [%x…]", hash[:4]) 321 } 322 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 323 return err 324 } 325 // If all checks out, manually set the head block 326 bc.mu.Lock() 327 bc.currentBlock.Store(block) 328 bc.mu.Unlock() 329 330 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 331 return nil 332 } 333 334 // GasLimit returns the gas limit of the current HEAD block. 335 func (bc *BlockChain) GasLimit() uint64 { 336 return bc.CurrentBlock().GasLimit() 337 } 338 339 // CurrentBlock retrieves the current head block of the canonical chain. The 340 // block is retrieved from the blockchain's internal cache. 341 func (bc *BlockChain) CurrentBlock() *types.Block { 342 return bc.currentBlock.Load().(*types.Block) 343 } 344 345 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 346 // chain. The block is retrieved from the blockchain's internal cache. 347 func (bc *BlockChain) CurrentFastBlock() *types.Block { 348 return bc.currentFastBlock.Load().(*types.Block) 349 } 350 351 // SetProcessor sets the processor required for making state modifications. 352 func (bc *BlockChain) SetProcessor(processor Processor) { 353 bc.procmu.Lock() 354 defer bc.procmu.Unlock() 355 bc.processor = processor 356 } 357 358 // SetValidator sets the validator which is used to validate incoming blocks. 359 func (bc *BlockChain) SetValidator(validator Validator) { 360 bc.procmu.Lock() 361 defer bc.procmu.Unlock() 362 bc.validator = validator 363 } 364 365 // Validator returns the current validator. 366 func (bc *BlockChain) Validator() Validator { 367 bc.procmu.RLock() 368 defer bc.procmu.RUnlock() 369 return bc.validator 370 } 371 372 // Processor returns the current processor. 373 func (bc *BlockChain) Processor() Processor { 374 bc.procmu.RLock() 375 defer bc.procmu.RUnlock() 376 return bc.processor 377 } 378 379 // State returns a new mutable state based on the current HEAD block. 380 func (bc *BlockChain) State() (*state.StateDB, error) { 381 return bc.StateAt(bc.CurrentBlock().Root()) 382 } 383 384 // StateAt returns a new mutable state based on a particular point in time. 385 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 386 return state.New(root, bc.stateCache) 387 } 388 389 // Reset purges the entire blockchain, restoring it to its genesis state. 390 func (bc *BlockChain) Reset() error { 391 return bc.ResetWithGenesisBlock(bc.genesisBlock) 392 } 393 394 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 395 // specified genesis state. 396 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 397 // Dump the entire block chain and purge the caches 398 if err := bc.SetHead(0); err != nil { 399 return err 400 } 401 bc.mu.Lock() 402 defer bc.mu.Unlock() 403 404 // Prepare the genesis block and reinitialise the chain 405 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 406 log.Crit("Failed to write genesis block TD", "err", err) 407 } 408 if err := WriteBlock(bc.db, genesis); err != nil { 409 log.Crit("Failed to write genesis block", "err", err) 410 } 411 bc.genesisBlock = genesis 412 bc.insert(bc.genesisBlock) 413 bc.currentBlock.Store(bc.genesisBlock) 414 bc.hc.SetGenesis(bc.genesisBlock.Header()) 415 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 416 bc.currentFastBlock.Store(bc.genesisBlock) 417 418 return nil 419 } 420 421 // repair tries to repair the current blockchain by rolling back the current block 422 // until one with associated state is found. This is needed to fix incomplete db 423 // writes caused either by crashes/power outages, or simply non-committed tries. 424 // 425 // This method only rolls back the current block. The current header and current 426 // fast block are left intact. 427 func (bc *BlockChain) repair(head **types.Block) error { 428 for { 429 // Abort if we've rewound to a head block that does have associated state 430 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 431 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 432 return nil 433 } 434 // Otherwise rewind one block and recheck state availability there 435 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 436 } 437 } 438 439 // Export writes the active chain to the given writer. 440 func (bc *BlockChain) Export(w io.Writer) error { 441 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 442 } 443 444 // ExportN writes a subset of the active chain to the given writer. 445 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 446 bc.mu.RLock() 447 defer bc.mu.RUnlock() 448 449 if first > last { 450 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 451 } 452 log.Info("Exporting batch of blocks", "count", last-first+1) 453 454 for nr := first; nr <= last; nr++ { 455 block := bc.GetBlockByNumber(nr) 456 if block == nil { 457 return fmt.Errorf("export failed on #%d: not found", nr) 458 } 459 460 if err := block.EncodeRLP(w); err != nil { 461 return err 462 } 463 } 464 465 return nil 466 } 467 468 // insert injects a new head block into the current block chain. This method 469 // assumes that the block is indeed a true head. It will also reset the head 470 // header and the head fast sync block to this very same block if they are older 471 // or if they are on a different side chain. 472 // 473 // Note, this function assumes that the `mu` mutex is held! 474 func (bc *BlockChain) insert(block *types.Block) { 475 // If the block is on a side chain or an unknown one, force other heads onto it too 476 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 477 478 // Add the block to the canonical chain number scheme and mark as the head 479 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 480 log.Crit("Failed to insert block number", "err", err) 481 } 482 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 483 log.Crit("Failed to insert head block hash", "err", err) 484 } 485 bc.currentBlock.Store(block) 486 487 // If the block is better than our head or is on a different chain, force update heads 488 if updateHeads { 489 bc.hc.SetCurrentHeader(block.Header()) 490 491 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 492 log.Crit("Failed to insert head fast block hash", "err", err) 493 } 494 bc.currentFastBlock.Store(block) 495 } 496 } 497 498 // Genesis retrieves the chain's genesis block. 499 func (bc *BlockChain) Genesis() *types.Block { 500 return bc.genesisBlock 501 } 502 503 // GetBody retrieves a block body (transactions and uncles) from the database by 504 // hash, caching it if found. 505 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 506 // Short circuit if the body's already in the cache, retrieve otherwise 507 if cached, ok := bc.bodyCache.Get(hash); ok { 508 body := cached.(*types.Body) 509 return body 510 } 511 body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) 512 if body == nil { 513 return nil 514 } 515 // Cache the found body for next time and return 516 bc.bodyCache.Add(hash, body) 517 return body 518 } 519 520 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 521 // caching it if found. 522 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 523 // Short circuit if the body's already in the cache, retrieve otherwise 524 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 525 return cached.(rlp.RawValue) 526 } 527 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 528 if len(body) == 0 { 529 return nil 530 } 531 // Cache the found body for next time and return 532 bc.bodyRLPCache.Add(hash, body) 533 return body 534 } 535 536 // HasBlock checks if a block is fully present in the database or not. 537 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 538 if bc.blockCache.Contains(hash) { 539 return true 540 } 541 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 542 return ok 543 } 544 545 // HasState checks if state trie is fully present in the database or not. 546 func (bc *BlockChain) HasState(hash common.Hash) bool { 547 _, err := bc.stateCache.OpenTrie(hash) 548 return err == nil 549 } 550 551 // HasBlockAndState checks if a block and associated state trie is fully present 552 // in the database or not, caching it if present. 553 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 554 // Check first that the block itself is known 555 block := bc.GetBlock(hash, number) 556 if block == nil { 557 return false 558 } 559 return bc.HasState(block.Root()) 560 } 561 562 // GetBlock retrieves a block from the database by hash and number, 563 // caching it if found. 564 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 565 // Short circuit if the block's already in the cache, retrieve otherwise 566 if block, ok := bc.blockCache.Get(hash); ok { 567 return block.(*types.Block) 568 } 569 block := GetBlock(bc.db, hash, number) 570 if block == nil { 571 return nil 572 } 573 // Cache the found block for next time and return 574 bc.blockCache.Add(block.Hash(), block) 575 return block 576 } 577 578 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 579 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 580 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 581 } 582 583 // GetBlockByNumber retrieves a block from the database by number, caching it 584 // (associated with its hash) if found. 585 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 586 hash := GetCanonicalHash(bc.db, number) 587 if hash == (common.Hash{}) { 588 return nil 589 } 590 return bc.GetBlock(hash, number) 591 } 592 593 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 594 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 595 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 596 } 597 598 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 599 // [deprecated by aqua/62] 600 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 601 number := bc.hc.GetBlockNumber(hash) 602 for i := 0; i < n; i++ { 603 block := bc.GetBlock(hash, number) 604 if block == nil { 605 break 606 } 607 blocks = append(blocks, block) 608 hash = block.ParentHash() 609 number-- 610 } 611 return 612 } 613 614 // GetUnclesInChain retrieves all the uncles from a given block backwards until 615 // a specific distance is reached. 616 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 617 uncles := []*types.Header{} 618 for i := 0; block != nil && i < length; i++ { 619 uncles = append(uncles, block.Uncles()...) 620 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 621 } 622 return uncles 623 } 624 625 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 626 // either from ephemeral in-memory cache, or from persistent storage. 627 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 628 return bc.stateCache.TrieDB().Node(hash) 629 } 630 631 // Stop stops the blockchain service. If any imports are currently in progress 632 // it will abort them using the procInterrupt. 633 func (bc *BlockChain) Stop() { 634 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 635 return 636 } 637 // Unsubscribe all subscriptions registered from blockchain 638 bc.scope.Close() 639 close(bc.quit) 640 atomic.StoreInt32(&bc.procInterrupt, 1) 641 642 bc.wg.Wait() 643 644 // Ensure the state of a recent block is also stored to disk before exiting. 645 // We're writing three different states to catch different restart scenarios: 646 // - HEAD: So we don't need to reprocess any blocks in the general case 647 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 648 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 649 if !bc.cacheConfig.Disabled { 650 triedb := bc.stateCache.TrieDB() 651 652 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 653 if number := bc.CurrentBlock().NumberU64(); number > offset { 654 recent := bc.GetBlockByNumber(number - offset) 655 656 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) 657 if err := triedb.Commit(recent.Root(), true); err != nil { 658 log.Error("Failed to commit recent state trie", "err", err) 659 } 660 } 661 } 662 for !bc.triegc.Empty() { 663 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 664 } 665 if size := triedb.Size(); size != 0 { 666 log.Error("Dangling trie nodes after full cleanup") 667 } 668 } 669 log.Info("Blockchain manager stopped") 670 } 671 672 func (bc *BlockChain) procFutureBlocks() { 673 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 674 for _, hash := range bc.futureBlocks.Keys() { 675 if block, exist := bc.futureBlocks.Peek(hash); exist { 676 blocks = append(blocks, block.(*types.Block)) 677 } 678 } 679 if len(blocks) > 0 { 680 types.BlockBy(types.Number).Sort(blocks) 681 682 // Insert one by one as chain insertion needs contiguous ancestry between blocks 683 for i := range blocks { 684 bc.InsertChain(blocks[i : i+1]) 685 } 686 } 687 } 688 689 // WriteStatus status of write 690 type WriteStatus byte 691 692 const ( 693 NonStatTy WriteStatus = iota 694 CanonStatTy 695 SideStatTy 696 ) 697 698 // Rollback is designed to remove a chain of links from the database that aren't 699 // certain enough to be valid. 700 func (bc *BlockChain) Rollback(chain []common.Hash) { 701 bc.mu.Lock() 702 defer bc.mu.Unlock() 703 704 for i := len(chain) - 1; i >= 0; i-- { 705 hash := chain[i] 706 707 currentHeader := bc.hc.CurrentHeader() 708 if currentHeader.Hash() == hash { 709 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 710 } 711 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 712 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 713 bc.currentFastBlock.Store(newFastBlock) 714 WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 715 } 716 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 717 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 718 bc.currentBlock.Store(newBlock) 719 WriteHeadBlockHash(bc.db, newBlock.Hash()) 720 } 721 } 722 } 723 724 // SetReceiptsData computes all the non-consensus fields of the receipts 725 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) { 726 signer := types.MakeSigner(config, block.Number()) 727 728 transactions, logIndex := block.Transactions(), uint(0) 729 730 for j := 0; j < len(receipts); j++ { 731 // The transaction hash can be retrieved from the transaction itself 732 receipts[j].TxHash = transactions[j].Hash() 733 734 // The contract address can be derived from the transaction itself 735 if transactions[j].To() == nil { 736 // Deriving the signer is expensive, only do if it's actually needed 737 from, _ := types.Sender(signer, transactions[j]) 738 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 739 } 740 // The used gas can be calculated based on previous receipts 741 if j == 0 { 742 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 743 } else { 744 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 745 } 746 // The derived log fields can simply be set from the block and transaction 747 for k := 0; k < len(receipts[j].Logs); k++ { 748 receipts[j].Logs[k].BlockNumber = block.NumberU64() 749 receipts[j].Logs[k].BlockHash = block.Hash() 750 receipts[j].Logs[k].TxHash = receipts[j].TxHash 751 receipts[j].Logs[k].TxIndex = uint(j) 752 receipts[j].Logs[k].Index = logIndex 753 logIndex++ 754 } 755 } 756 } 757 758 // InsertReceiptChain attempts to complete an already existing header chain with 759 // transaction and receipt data. 760 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 761 bc.wg.Add(1) 762 defer bc.wg.Done() 763 764 // Do a sanity check that the provided chain is actually ordered and linked 765 for i := 1; i < len(blockChain); i++ { 766 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 767 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 768 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 769 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 770 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 771 } 772 } 773 774 var ( 775 stats = struct{ processed, ignored int32 }{} 776 start = time.Now() 777 bytes = 0 778 batch = bc.db.NewBatch() 779 ) 780 for i, block := range blockChain { 781 receipts := receiptChain[i] 782 // Short circuit insertion if shutting down or processing failed 783 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 784 return 0, nil 785 } 786 // Short circuit if the owner header is unknown 787 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 788 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 789 } 790 // Skip if the entire data is already known 791 if bc.HasBlock(block.Hash(), block.NumberU64()) { 792 stats.ignored++ 793 continue 794 } 795 // Compute all the non-consensus fields of the receipts 796 SetReceiptsData(bc.chainConfig, block, receipts) 797 // Write all the data out into the database 798 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 799 return i, fmt.Errorf("failed to write block body: %v", err) 800 } 801 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 802 return i, fmt.Errorf("failed to write block receipts: %v", err) 803 } 804 if err := WriteTxLookupEntries(batch, block); err != nil { 805 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 806 } 807 stats.processed++ 808 809 if batch.ValueSize() >= aquadb.IdealBatchSize { 810 if err := batch.Write(); err != nil { 811 return 0, err 812 } 813 bytes += batch.ValueSize() 814 batch.Reset() 815 } 816 } 817 if batch.ValueSize() > 0 { 818 bytes += batch.ValueSize() 819 if err := batch.Write(); err != nil { 820 return 0, err 821 } 822 } 823 824 // Update the head fast sync block if better 825 bc.mu.Lock() 826 head := blockChain[len(blockChain)-1] 827 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 828 currentFastBlock := bc.CurrentFastBlock() 829 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 830 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 831 log.Crit("Failed to update head fast block hash", "err", err) 832 } 833 bc.currentFastBlock.Store(head) 834 } 835 } 836 bc.mu.Unlock() 837 838 log.Info("Imported new block receipts", 839 "count", stats.processed, 840 "elapsed", common.PrettyDuration(time.Since(start)), 841 "number", head.Number(), 842 "hash", head.Hash(), 843 "size", common.StorageSize(bytes), 844 "ignored", stats.ignored) 845 return 0, nil 846 } 847 848 var lastWrite uint64 849 850 // WriteBlockWithoutState writes only the block and its metadata to the database, 851 // but does not write any state. This is used to construct competing side forks 852 // up to the point where they exceed the canonical total difficulty. 853 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 854 bc.wg.Add(1) 855 defer bc.wg.Done() 856 857 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 858 return err 859 } 860 if err := WriteBlock(bc.db, block); err != nil { 861 return err 862 } 863 return nil 864 } 865 866 // WriteBlockWithState writes the block and all associated state to the database. 867 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 868 bc.wg.Add(1) 869 defer bc.wg.Done() 870 871 // Calculate the total difficulty of the block 872 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 873 if ptd == nil { 874 return NonStatTy, consensus.ErrUnknownAncestor 875 } 876 // Make sure no inconsistent state is leaked during insertion 877 bc.mu.Lock() 878 defer bc.mu.Unlock() 879 880 currentBlock := bc.CurrentBlock() 881 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 882 externTd := new(big.Int).Add(block.Difficulty(), ptd) 883 884 // Irrelevant of the canonical status, write the block itself to the database 885 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 886 return NonStatTy, err 887 } 888 // Write other block data using a batch. 889 batch := bc.db.NewBatch() 890 if err := WriteBlock(batch, block); err != nil { 891 return NonStatTy, err 892 } 893 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 894 if err != nil { 895 return NonStatTy, err 896 } 897 triedb := bc.stateCache.TrieDB() 898 899 // If we're running an archive node, always flush 900 if bc.cacheConfig.Disabled { 901 if err := triedb.Commit(root, false); err != nil { 902 return NonStatTy, err 903 } 904 } else { 905 // Full but not archive node, do proper garbage collection 906 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 907 bc.triegc.Push(root, -float32(block.NumberU64())) 908 909 if current := block.NumberU64(); current > triesInMemory { 910 // Find the next state trie we need to commit 911 header := bc.GetHeaderByNumber(current - triesInMemory) 912 chosen := header.Number.Uint64() 913 914 // Only write to disk if we exceeded our memory allowance *and* also have at 915 // least a given number of tries gapped. 916 var ( 917 size = triedb.Size() 918 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 919 ) 920 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 921 // If we're exceeding limits but haven't reached a large enough memory gap, 922 // warn the user that the system is becoming unstable. 923 if chosen < lastWrite+triesInMemory { 924 switch { 925 case size >= 2*limit: 926 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 927 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 928 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 929 } 930 } 931 // If optimum or critical limits reached, write to disk 932 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 933 triedb.Commit(header.Root, true) 934 lastWrite = chosen 935 bc.gcproc = 0 936 } 937 } 938 // Garbage collect anything below our required write retention 939 for !bc.triegc.Empty() { 940 root, number := bc.triegc.Pop() 941 if uint64(-number) > chosen { 942 bc.triegc.Push(root, number) 943 break 944 } 945 triedb.Dereference(root.(common.Hash), common.Hash{}) 946 } 947 } 948 } 949 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 950 return NonStatTy, err 951 } 952 // If the total difficulty is higher than our known, add it to the canonical chain 953 // Second clause in the if statement reduces the vulnerability to selfish mining. 954 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 955 reorg := externTd.Cmp(localTd) > 0 956 currentBlock = bc.CurrentBlock() 957 if !reorg && externTd.Cmp(localTd) == 0 { 958 // Split same-difficulty blocks by number, then at random 959 reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) 960 } 961 if reorg { 962 // Reorganise the chain if the parent is not the head block 963 if block.ParentHash() != currentBlock.Hash() { 964 if err := bc.reorg(currentBlock, block); err != nil { 965 return NonStatTy, err 966 } 967 } 968 // Write the positional metadata for transaction and receipt lookups 969 if err := WriteTxLookupEntries(batch, block); err != nil { 970 return NonStatTy, err 971 } 972 // Write hash preimages 973 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 974 return NonStatTy, err 975 } 976 status = CanonStatTy 977 } else { 978 status = SideStatTy 979 } 980 if err := batch.Write(); err != nil { 981 return NonStatTy, err 982 } 983 984 // Set new head. 985 if status == CanonStatTy { 986 bc.insert(block) 987 } 988 bc.futureBlocks.Remove(block.Hash()) 989 return status, nil 990 } 991 992 // InsertChain attempts to insert the given batch of blocks in to the canonical 993 // chain or, otherwise, create a fork. If an error is returned it will return 994 // the index number of the failing block as well an error describing what went 995 // wrong. 996 // 997 // After insertion is done, all accumulated events will be fired. 998 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 999 n, events, logs, err := bc.insertChain(chain) 1000 bc.PostChainEvents(events, logs) 1001 return n, err 1002 } 1003 1004 // insertChain will execute the actual chain insertion and event aggregation. The 1005 // only reason this method exists as a separate one is to make locking cleaner 1006 // with deferred statements. 1007 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1008 // Do a sanity check that the provided chain is actually ordered and linked 1009 for i := 1; i < len(chain); i++ { 1010 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1011 // Chain broke ancestry, log a messge (programming error) and skip insertion 1012 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1013 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1014 1015 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1016 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1017 } 1018 } 1019 // Pre-checks passed, start the full block imports 1020 bc.wg.Add(1) 1021 defer bc.wg.Done() 1022 1023 bc.chainmu.Lock() 1024 defer bc.chainmu.Unlock() 1025 1026 // A queued approach to delivering events. This is generally 1027 // faster than direct delivery and requires much less mutex 1028 // acquiring. 1029 var ( 1030 stats = insertStats{startTime: mclock.Now()} 1031 events = make([]interface{}, 0, len(chain)) 1032 lastCanon *types.Block 1033 coalescedLogs []*types.Log 1034 ) 1035 // Start the parallel header verifier 1036 headers := make([]*types.Header, len(chain)) 1037 seals := make([]bool, len(chain)) 1038 1039 for i, block := range chain { 1040 headers[i] = block.Header() 1041 seals[i] = true 1042 } 1043 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1044 defer close(abort) 1045 1046 // Iterate over the blocks and insert when the verifier permits 1047 for i, block := range chain { 1048 // If the chain is terminating, stop processing blocks 1049 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1050 log.Debug("Premature abort during blocks processing") 1051 break 1052 } 1053 // If the header is a banned one, straight out abort 1054 if BadHashes[block.Hash()] { 1055 bc.reportBlock(block, nil, ErrBlacklistedHash) 1056 return i, events, coalescedLogs, ErrBlacklistedHash 1057 } 1058 // Wait for the block's verification to complete 1059 bstart := time.Now() 1060 1061 err := <-results 1062 if err == nil { 1063 err = bc.Validator().ValidateBody(block) 1064 } 1065 switch { 1066 case err == ErrKnownBlock: 1067 // Block and state both already known. However if the current block is below 1068 // this number we did a rollback and we should reimport it nonetheless. 1069 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1070 stats.ignored++ 1071 continue 1072 } 1073 1074 case err == consensus.ErrFutureBlock: 1075 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1076 // the chain is discarded and processed at a later time if given. 1077 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1078 if block.Time().Cmp(max) > 0 { 1079 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1080 } 1081 bc.futureBlocks.Add(block.Hash(), block) 1082 stats.queued++ 1083 continue 1084 1085 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1086 bc.futureBlocks.Add(block.Hash(), block) 1087 stats.queued++ 1088 continue 1089 1090 case err == consensus.ErrPrunedAncestor: 1091 // Block competing with the canonical chain, store in the db, but don't process 1092 // until the competitor TD goes above the canonical TD 1093 currentBlock := bc.CurrentBlock() 1094 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1095 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1096 if localTd.Cmp(externTd) > 0 { 1097 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1098 return i, events, coalescedLogs, err 1099 } 1100 continue 1101 } 1102 // Competitor chain beat canonical, gather all blocks from the common ancestor 1103 var winner []*types.Block 1104 1105 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1106 for !bc.HasState(parent.Root()) { 1107 winner = append(winner, parent) 1108 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1109 } 1110 for j := 0; j < len(winner)/2; j++ { 1111 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1112 } 1113 // Import all the pruned blocks to make the state available 1114 bc.chainmu.Unlock() 1115 _, evs, logs, err := bc.insertChain(winner) 1116 bc.chainmu.Lock() 1117 events, coalescedLogs = evs, logs 1118 1119 if err != nil { 1120 return i, events, coalescedLogs, err 1121 } 1122 1123 case err != nil: 1124 bc.reportBlock(block, nil, err) 1125 return i, events, coalescedLogs, err 1126 } 1127 // Create a new statedb using the parent block and report an 1128 // error if it fails. 1129 var parent *types.Block 1130 if i == 0 { 1131 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1132 } else { 1133 parent = chain[i-1] 1134 } 1135 state, err := state.New(parent.Root(), bc.stateCache) 1136 if err != nil { 1137 return i, events, coalescedLogs, err 1138 } 1139 // Process block using the parent state as reference point. 1140 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1141 if err != nil { 1142 bc.reportBlock(block, receipts, err) 1143 return i, events, coalescedLogs, err 1144 } 1145 // Validate the state using the default validator 1146 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1147 if err != nil { 1148 bc.reportBlock(block, receipts, err) 1149 return i, events, coalescedLogs, err 1150 } 1151 proctime := time.Since(bstart) 1152 1153 // Write the block to the chain and get the status. 1154 status, err := bc.WriteBlockWithState(block, receipts, state) 1155 if err != nil { 1156 return i, events, coalescedLogs, err 1157 } 1158 switch status { 1159 case CanonStatTy: 1160 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1161 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1162 1163 coalescedLogs = append(coalescedLogs, logs...) 1164 blockInsertTimer.UpdateSince(bstart) 1165 events = append(events, ChainEvent{block, block.Hash(), logs}) 1166 lastCanon = block 1167 1168 // Only count canonical blocks for GC processing time 1169 bc.gcproc += proctime 1170 1171 case SideStatTy: 1172 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1173 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1174 1175 blockInsertTimer.UpdateSince(bstart) 1176 events = append(events, ChainSideEvent{block}) 1177 } 1178 stats.processed++ 1179 stats.usedGas += usedGas 1180 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1181 } 1182 // Append a single chain head event if we've progressed the chain 1183 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1184 events = append(events, ChainHeadEvent{lastCanon}) 1185 } 1186 return 0, events, coalescedLogs, nil 1187 } 1188 1189 // insertStats tracks and reports on block insertion. 1190 type insertStats struct { 1191 queued, processed, ignored int 1192 usedGas uint64 1193 lastIndex int 1194 startTime mclock.AbsTime 1195 } 1196 1197 // statsReportLimit is the time limit during import after which we always print 1198 // out progress. This avoids the user wondering what's going on. 1199 const statsReportLimit = 8 * time.Second 1200 1201 // report prints statistics if some number of blocks have been processed 1202 // or more than a few seconds have passed since the last message. 1203 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1204 // Fetch the timings for the batch 1205 var ( 1206 now = mclock.Now() 1207 elapsed = time.Duration(now) - time.Duration(st.startTime) 1208 ) 1209 // If we're at the last block of the batch or report period reached, log 1210 if index == len(chain)-1 || elapsed >= statsReportLimit { 1211 var ( 1212 end = chain[index] 1213 txs = countTransactions(chain[st.lastIndex : index+1]) 1214 ) 1215 context := []interface{}{ 1216 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1217 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1218 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1219 } 1220 if st.queued > 0 { 1221 context = append(context, []interface{}{"queued", st.queued}...) 1222 } 1223 if st.ignored > 0 { 1224 context = append(context, []interface{}{"ignored", st.ignored}...) 1225 } 1226 log.Info("Imported new chain segment", context...) 1227 1228 *st = insertStats{startTime: now, lastIndex: index + 1} 1229 } 1230 } 1231 1232 func countTransactions(chain []*types.Block) (c int) { 1233 for _, b := range chain { 1234 c += len(b.Transactions()) 1235 } 1236 return c 1237 } 1238 1239 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1240 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1241 // event about them 1242 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1243 var ( 1244 newChain types.Blocks 1245 oldChain types.Blocks 1246 commonBlock *types.Block 1247 deletedTxs types.Transactions 1248 deletedLogs []*types.Log 1249 // collectLogs collects the logs that were generated during the 1250 // processing of the block that corresponds with the given hash. 1251 // These logs are later announced as deleted. 1252 collectLogs = func(h common.Hash) { 1253 // Coalesce logs and set 'Removed'. 1254 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1255 for _, receipt := range receipts { 1256 for _, log := range receipt.Logs { 1257 del := *log 1258 del.Removed = true 1259 deletedLogs = append(deletedLogs, &del) 1260 } 1261 } 1262 } 1263 ) 1264 1265 // first reduce whoever is higher bound 1266 if oldBlock.NumberU64() > newBlock.NumberU64() { 1267 // reduce old chain 1268 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1269 oldChain = append(oldChain, oldBlock) 1270 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1271 1272 collectLogs(oldBlock.Hash()) 1273 } 1274 } else { 1275 // reduce new chain and append new chain blocks for inserting later on 1276 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1277 newChain = append(newChain, newBlock) 1278 } 1279 } 1280 if oldBlock == nil { 1281 return fmt.Errorf("Invalid old chain") 1282 } 1283 if newBlock == nil { 1284 return fmt.Errorf("Invalid new chain") 1285 } 1286 1287 for { 1288 if oldBlock.Hash() == newBlock.Hash() { 1289 commonBlock = oldBlock 1290 break 1291 } 1292 1293 oldChain = append(oldChain, oldBlock) 1294 newChain = append(newChain, newBlock) 1295 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1296 collectLogs(oldBlock.Hash()) 1297 1298 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1299 if oldBlock == nil { 1300 return fmt.Errorf("Invalid old chain") 1301 } 1302 if newBlock == nil { 1303 return fmt.Errorf("Invalid new chain") 1304 } 1305 } 1306 // Ensure the user sees large reorgs 1307 if len(oldChain) > 0 && len(newChain) > 0 { 1308 logFn := log.Debug 1309 if len(oldChain) > 63 { 1310 logFn = log.Warn 1311 } 1312 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1313 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1314 } else { 1315 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1316 } 1317 // Insert the new chain, taking care of the proper incremental order 1318 var addedTxs types.Transactions 1319 for i := len(newChain) - 1; i >= 0; i-- { 1320 // insert the block in the canonical way, re-writing history 1321 bc.insert(newChain[i]) 1322 // write lookup entries for hash based transaction/receipt searches 1323 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1324 return err 1325 } 1326 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1327 } 1328 // calculate the difference between deleted and added transactions 1329 diff := types.TxDifference(deletedTxs, addedTxs) 1330 // When transactions get deleted from the database that means the 1331 // receipts that were created in the fork must also be deleted 1332 for _, tx := range diff { 1333 DeleteTxLookupEntry(bc.db, tx.Hash()) 1334 } 1335 if len(deletedLogs) > 0 { 1336 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1337 } 1338 if len(oldChain) > 0 { 1339 go func() { 1340 for _, block := range oldChain { 1341 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1342 } 1343 }() 1344 } 1345 1346 return nil 1347 } 1348 1349 // PostChainEvents iterates over the events generated by a chain insertion and 1350 // posts them into the event feed. 1351 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1352 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1353 // post event logs for further processing 1354 if logs != nil { 1355 bc.logsFeed.Send(logs) 1356 } 1357 for _, event := range events { 1358 switch ev := event.(type) { 1359 case ChainEvent: 1360 bc.chainFeed.Send(ev) 1361 1362 case ChainHeadEvent: 1363 bc.chainHeadFeed.Send(ev) 1364 1365 case ChainSideEvent: 1366 bc.chainSideFeed.Send(ev) 1367 } 1368 } 1369 } 1370 1371 func (bc *BlockChain) update() { 1372 futureTimer := time.NewTicker(5 * time.Second) 1373 defer futureTimer.Stop() 1374 for { 1375 select { 1376 case <-futureTimer.C: 1377 bc.procFutureBlocks() 1378 case <-bc.quit: 1379 return 1380 } 1381 } 1382 } 1383 1384 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1385 type BadBlockArgs struct { 1386 Hash common.Hash `json:"hash"` 1387 Header *types.Header `json:"header"` 1388 } 1389 1390 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1391 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1392 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1393 for _, hash := range bc.badBlocks.Keys() { 1394 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1395 header := hdr.(*types.Header) 1396 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1397 } 1398 } 1399 return headers, nil 1400 } 1401 1402 // addBadBlock adds a bad block to the bad-block LRU cache 1403 func (bc *BlockChain) addBadBlock(block *types.Block) { 1404 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1405 } 1406 1407 // reportBlock logs a bad block error. 1408 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1409 bc.addBadBlock(block) 1410 1411 var receiptString string 1412 for _, receipt := range receipts { 1413 receiptString += fmt.Sprintf("\t%v\n", receipt) 1414 } 1415 log.Error(fmt.Sprintf(` 1416 ########## BAD BLOCK ######### 1417 Chain config: %v 1418 1419 Number: %v 1420 Hash: 0x%x 1421 %v 1422 1423 Error: %v 1424 ############################## 1425 `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err)) 1426 } 1427 1428 // InsertHeaderChain attempts to insert the given header chain in to the local 1429 // chain, possibly creating a reorg. If an error is returned, it will return the 1430 // index number of the failing header as well an error describing what went wrong. 1431 // 1432 // The verify parameter can be used to fine tune whether nonce verification 1433 // should be done or not. The reason behind the optional check is because some 1434 // of the header retrieval mechanisms already need to verify nonces, as well as 1435 // because nonces can be verified sparsely, not needing to check each. 1436 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1437 start := time.Now() 1438 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1439 return i, err 1440 } 1441 1442 // Make sure only one thread manipulates the chain at once 1443 bc.chainmu.Lock() 1444 defer bc.chainmu.Unlock() 1445 1446 bc.wg.Add(1) 1447 defer bc.wg.Done() 1448 1449 whFunc := func(header *types.Header) error { 1450 bc.mu.Lock() 1451 defer bc.mu.Unlock() 1452 1453 _, err := bc.hc.WriteHeader(header) 1454 return err 1455 } 1456 1457 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1458 } 1459 1460 // writeHeader writes a header into the local chain, given that its parent is 1461 // already known. If the total difficulty of the newly inserted header becomes 1462 // greater than the current known TD, the canonical chain is re-routed. 1463 // 1464 // Note: This method is not concurrent-safe with inserting blocks simultaneously 1465 // into the chain, as side effects caused by reorganisations cannot be emulated 1466 // without the real blocks. Hence, writing headers directly should only be done 1467 // in two scenarios: pure-header mode of operation (light clients), or properly 1468 // separated header/block phases (non-archive clients). 1469 func (bc *BlockChain) writeHeader(header *types.Header) error { 1470 bc.wg.Add(1) 1471 defer bc.wg.Done() 1472 1473 bc.mu.Lock() 1474 defer bc.mu.Unlock() 1475 1476 _, err := bc.hc.WriteHeader(header) 1477 return err 1478 } 1479 1480 // CurrentHeader retrieves the current head header of the canonical chain. The 1481 // header is retrieved from the HeaderChain's internal cache. 1482 func (bc *BlockChain) CurrentHeader() *types.Header { 1483 return bc.hc.CurrentHeader() 1484 } 1485 1486 // GetTd retrieves a block's total difficulty in the canonical chain from the 1487 // database by hash and number, caching it if found. 1488 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1489 return bc.hc.GetTd(hash, number) 1490 } 1491 1492 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1493 // database by hash, caching it if found. 1494 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1495 return bc.hc.GetTdByHash(hash) 1496 } 1497 1498 // GetHeader retrieves a block header from the database by hash and number, 1499 // caching it if found. 1500 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1501 return bc.hc.GetHeader(hash, number) 1502 } 1503 1504 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1505 // found. 1506 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1507 return bc.hc.GetHeaderByHash(hash) 1508 } 1509 1510 // HasHeader checks if a block header is present in the database or not, caching 1511 // it if present. 1512 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1513 return bc.hc.HasHeader(hash, number) 1514 } 1515 1516 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1517 // hash, fetching towards the genesis block. 1518 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1519 return bc.hc.GetBlockHashesFromHash(hash, max) 1520 } 1521 1522 // GetHeaderByNumber retrieves a block header from the database by number, 1523 // caching it (associated with its hash) if found. 1524 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1525 return bc.hc.GetHeaderByNumber(number) 1526 } 1527 1528 // Config retrieves the blockchain's chain configuration. 1529 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1530 1531 // Engine retrieves the blockchain's consensus engine. 1532 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1533 1534 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1535 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1536 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1537 } 1538 1539 // SubscribeChainEvent registers a subscription of ChainEvent. 1540 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1541 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1542 } 1543 1544 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1545 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1546 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1547 } 1548 1549 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1550 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1551 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1552 } 1553 1554 // SubscribeLogsEvent registers a subscription of []*types.Log. 1555 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1556 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1557 }