github.com/aquanetwork/aquachain@v1.7.8/core/blockchain.go (about) 1 // Copyright 2014 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package core implements the AquaChain consensus protocol. 18 package core 19 20 import ( 21 "errors" 22 "fmt" 23 "io" 24 "math/big" 25 mrand "math/rand" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/hashicorp/golang-lru" 31 "gitlab.com/aquachain/aquachain/aqua/event" 32 "gitlab.com/aquachain/aquachain/aquadb" 33 "gitlab.com/aquachain/aquachain/common" 34 "gitlab.com/aquachain/aquachain/common/log" 35 "gitlab.com/aquachain/aquachain/common/mclock" 36 "gitlab.com/aquachain/aquachain/common/metrics" 37 "gitlab.com/aquachain/aquachain/common/prque" 38 "gitlab.com/aquachain/aquachain/consensus" 39 "gitlab.com/aquachain/aquachain/core/state" 40 "gitlab.com/aquachain/aquachain/core/types" 41 "gitlab.com/aquachain/aquachain/core/vm" 42 "gitlab.com/aquachain/aquachain/crypto" 43 "gitlab.com/aquachain/aquachain/params" 44 "gitlab.com/aquachain/aquachain/rlp" 45 "gitlab.com/aquachain/aquachain/trie" 46 ) 47 48 var ( 49 blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) 50 51 ErrNoGenesis = errors.New("Genesis not found in chain") 52 ) 53 54 const ( 55 bodyCacheLimit = 256 56 blockCacheLimit = 256 57 maxFutureBlocks = 256 58 maxTimeFutureBlocks = 30 59 badBlockLimit = 10 60 triesInMemory = 128 61 62 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. 63 BlockChainVersion = 1 64 ) 65 66 // CacheConfig contains the configuration values for the trie caching/pruning 67 // that's resident in a blockchain. 68 type CacheConfig struct { 69 Disabled bool // Whether to disable trie write caching (archive node) 70 TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk 71 TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk 72 } 73 74 // BlockChain represents the canonical chain given a database with a genesis 75 // block. The Blockchain manages chain imports, reverts, chain reorganisations. 76 // 77 // Importing blocks in to the block chain happens according to the set of rules 78 // defined by the two stage Validator. Processing of blocks is done using the 79 // Processor which processes the included transaction. The validation of the state 80 // is done in the second part of the Validator. Failing results in aborting of 81 // the import. 82 // 83 // The BlockChain also helps in returning blocks from **any** chain included 84 // in the database as well as blocks that represents the canonical chain. It's 85 // important to note that GetBlock can return any block and does not need to be 86 // included in the canonical one where as GetBlockByNumber always represents the 87 // canonical chain. 88 type BlockChain struct { 89 chainConfig *params.ChainConfig // Chain & network configuration 90 cacheConfig *CacheConfig // Cache configuration for pruning 91 92 db aquadb.Database // Low level persistent database to store final content in 93 triegc *prque.Prque // Priority queue mapping block numbers to tries to gc 94 gcproc time.Duration // Accumulates canonical block processing for trie dumping 95 96 hc *HeaderChain 97 rmLogsFeed event.Feed 98 chainFeed event.Feed 99 chainSideFeed event.Feed 100 chainHeadFeed event.Feed 101 logsFeed event.Feed 102 scope event.SubscriptionScope 103 genesisBlock *types.Block 104 105 mu sync.RWMutex // global mutex for locking chain operations 106 chainmu sync.RWMutex // blockchain insertion lock 107 procmu sync.RWMutex // block processor lock 108 109 currentBlock atomic.Value // Current head of the block chain 110 currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) 111 112 stateCache state.Database // State database to reuse between imports (contains state cache) 113 bodyCache *lru.Cache // Cache for the most recent block bodies 114 bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format 115 blockCache *lru.Cache // Cache for the most recent entire blocks 116 futureBlocks *lru.Cache // future blocks are blocks added for later processing 117 118 quit chan struct{} // blockchain quit channel 119 running int32 // running must be called atomically 120 // procInterrupt must be atomically called 121 procInterrupt int32 // interrupt signaler for block processing 122 wg sync.WaitGroup // chain processing wait group for shutting down 123 124 engine consensus.Engine 125 processor Processor // block processor interface 126 validator Validator // block and state validator interface 127 vmConfig vm.Config 128 129 badBlocks *lru.Cache // Bad block cache 130 } 131 132 // NewBlockChain returns a fully initialised block chain using information 133 // available in the database. It initialises the default AquaChain Validator and 134 // Processor. 135 func NewBlockChain(db aquadb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { 136 if cacheConfig == nil { 137 cacheConfig = &CacheConfig{ 138 TrieNodeLimit: 256 * 1024 * 1024, 139 TrieTimeLimit: 5 * time.Minute, 140 } 141 } 142 if chainConfig == nil { 143 return nil, fmt.Errorf("nil config") 144 } 145 bodyCache, _ := lru.New(bodyCacheLimit) 146 bodyRLPCache, _ := lru.New(bodyCacheLimit) 147 blockCache, _ := lru.New(blockCacheLimit) 148 futureBlocks, _ := lru.New(maxFutureBlocks) 149 badBlocks, _ := lru.New(badBlockLimit) 150 151 bc := &BlockChain{ 152 chainConfig: chainConfig, 153 cacheConfig: cacheConfig, 154 db: db, 155 triegc: prque.New(nil), 156 stateCache: state.NewDatabase(db), 157 quit: make(chan struct{}), 158 bodyCache: bodyCache, 159 bodyRLPCache: bodyRLPCache, 160 blockCache: blockCache, 161 futureBlocks: futureBlocks, 162 engine: engine, 163 vmConfig: vmConfig, 164 badBlocks: badBlocks, 165 } 166 bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) 167 bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) 168 169 var err error 170 bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt) 171 if err != nil { 172 return nil, err 173 } 174 bc.genesisBlock = bc.GetBlockByNumber(0) 175 if bc.genesisBlock == nil { 176 return nil, ErrNoGenesis 177 } 178 if err := bc.loadLastState(); err != nil { 179 return nil, err 180 } 181 // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain 182 for hash := range BadHashes { 183 if header := bc.GetHeaderByHash(hash); header != nil { 184 // get the canonical block corresponding to the offending header's number 185 headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) 186 // make sure the headerByNumber (if present) is in our current canonical chain 187 if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { 188 log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) 189 bc.SetHead(header.Number.Uint64() - 1) 190 log.Error("Chain rewind was successful, resuming normal operation") 191 } 192 } 193 } 194 // Take ownership of this particular state 195 go bc.update() 196 return bc, nil 197 } 198 199 func (bc *BlockChain) getProcInterrupt() bool { 200 return atomic.LoadInt32(&bc.procInterrupt) == 1 201 } 202 203 // loadLastState loads the last known chain state from the database. This method 204 // assumes that the chain manager mutex is held. 205 func (bc *BlockChain) loadLastState() error { 206 // Restore the last known head block 207 head := GetHeadBlockHash(bc.db) 208 if head == (common.Hash{}) { 209 // Corrupt or empty database, init from scratch 210 log.Warn("Empty database, resetting chain") 211 return bc.Reset() 212 } 213 // Make sure the entire head block is available 214 currentBlock := bc.GetBlockByHash(head) 215 if currentBlock == nil { 216 // Corrupt or empty database, init from scratch 217 log.Warn("Head block missing, resetting chain", "hash", head) 218 return bc.Reset() 219 } 220 // Make sure the state associated with the block is available 221 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 222 // Dangling block without a state associated, init from scratch 223 log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) 224 if err := bc.repair(¤tBlock); err != nil { 225 return err 226 } 227 } 228 // Everything seems to be fine, set as the head block 229 bc.currentBlock.Store(currentBlock) 230 231 // Restore the last known head header 232 currentHeader := currentBlock.Header() 233 if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { 234 if header := bc.GetHeaderByHash(head); header != nil { 235 currentHeader = header 236 } 237 } 238 bc.hc.SetCurrentHeader(currentHeader) 239 240 // Restore the last known head fast block 241 bc.currentFastBlock.Store(currentBlock) 242 if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { 243 if block := bc.GetBlockByHash(head); block != nil { 244 bc.currentFastBlock.Store(block) 245 } 246 } 247 248 // Issue a status log for the user 249 currentFastBlock := bc.CurrentFastBlock() 250 251 headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) 252 blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 253 fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) 254 log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) 255 log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) 256 log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) 257 258 return nil 259 } 260 261 // SetHead rewinds the local chain to a new head. In the case of headers, everything 262 // above the new head will be deleted and the new one set. In the case of blocks 263 // though, the head may be further rewound if block bodies are missing (non-archive 264 // nodes after a fast sync). 265 func (bc *BlockChain) SetHead(head uint64) error { 266 log.Warn("Rewinding blockchain", "target", head) 267 268 bc.mu.Lock() 269 defer bc.mu.Unlock() 270 271 // Rewind the header chain, deleting all block bodies until then 272 delFn := func(hash common.Hash, num uint64) { 273 DeleteBody(bc.db, hash, num) 274 } 275 bc.hc.SetHead(head, delFn) 276 currentHeader := bc.hc.CurrentHeader() 277 278 // Clear out any stale content from the caches 279 bc.bodyCache.Purge() 280 bc.bodyRLPCache.Purge() 281 bc.blockCache.Purge() 282 bc.futureBlocks.Purge() 283 284 // Rewind the block chain, ensuring we don't end up with a stateless head block 285 if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() { 286 bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 287 } 288 if currentBlock := bc.CurrentBlock(); currentBlock != nil { 289 if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { 290 // Rewound state missing, rolled back to before pivot, reset to genesis 291 bc.currentBlock.Store(bc.genesisBlock) 292 } 293 } 294 // Rewind the fast block in a simpleton way to the target head 295 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() { 296 bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())) 297 } 298 // If either blocks reached nil, reset to the genesis state 299 if currentBlock := bc.CurrentBlock(); currentBlock == nil { 300 bc.currentBlock.Store(bc.genesisBlock) 301 } 302 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { 303 bc.currentFastBlock.Store(bc.genesisBlock) 304 } 305 currentBlock := bc.CurrentBlock() 306 currentFastBlock := bc.CurrentFastBlock() 307 if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { 308 log.Crit("Failed to reset head full block", "err", err) 309 } 310 if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { 311 log.Crit("Failed to reset head fast block", "err", err) 312 } 313 return bc.loadLastState() 314 } 315 316 // FastSyncCommitHead sets the current head block to the one defined by the hash 317 // irrelevant what the chain contents were prior. 318 func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { 319 // Make sure that both the block as well at its state trie exists 320 block := bc.GetBlockByHash(hash) 321 if block == nil { 322 return fmt.Errorf("non existent block [%x…]", hash[:4]) 323 } 324 if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil { 325 return err 326 } 327 // If all checks out, manually set the head block 328 bc.mu.Lock() 329 bc.currentBlock.Store(block) 330 bc.mu.Unlock() 331 332 log.Info("Committed new head block", "number", block.Number(), "hash", hash) 333 return nil 334 } 335 336 // GasLimit returns the gas limit of the current HEAD block. 337 func (bc *BlockChain) GasLimit() uint64 { 338 return bc.CurrentBlock().GasLimit() 339 } 340 341 // CurrentBlock retrieves the current head block of the canonical chain. The 342 // block is retrieved from the blockchain's internal cache. 343 func (bc *BlockChain) CurrentBlock() *types.Block { 344 b := bc.currentBlock.Load().(*types.Block) 345 //b.SetVersion(bc.Config().GetBlockVersion(b.Number())) 346 return b 347 } 348 349 // CurrentFastBlock retrieves the current fast-sync head block of the canonical 350 // chain. The block is retrieved from the blockchain's internal cache. 351 func (bc *BlockChain) CurrentFastBlock() *types.Block { 352 return bc.currentFastBlock.Load().(*types.Block) 353 } 354 355 // SetProcessor sets the processor required for making state modifications. 356 func (bc *BlockChain) SetProcessor(processor Processor) { 357 bc.procmu.Lock() 358 defer bc.procmu.Unlock() 359 bc.processor = processor 360 } 361 362 // SetValidator sets the validator which is used to validate incoming blocks. 363 func (bc *BlockChain) SetValidator(validator Validator) { 364 bc.procmu.Lock() 365 defer bc.procmu.Unlock() 366 bc.validator = validator 367 } 368 369 // Validator returns the current validator. 370 func (bc *BlockChain) Validator() Validator { 371 bc.procmu.RLock() 372 defer bc.procmu.RUnlock() 373 return bc.validator 374 } 375 376 // Processor returns the current processor. 377 func (bc *BlockChain) Processor() Processor { 378 bc.procmu.RLock() 379 defer bc.procmu.RUnlock() 380 return bc.processor 381 } 382 383 // State returns a new mutable state based on the current HEAD block. 384 func (bc *BlockChain) State() (*state.StateDB, error) { 385 return bc.StateAt(bc.CurrentBlock().Root()) 386 } 387 388 // StateAt returns a new mutable state based on a particular point in time. 389 func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { 390 return state.New(root, bc.stateCache) 391 } 392 393 // Reset purges the entire blockchain, restoring it to its genesis state. 394 func (bc *BlockChain) Reset() error { 395 return bc.ResetWithGenesisBlock(bc.genesisBlock) 396 } 397 398 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the 399 // specified genesis state. 400 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { 401 // Dump the entire block chain and purge the caches 402 if err := bc.SetHead(0); err != nil { 403 return err 404 } 405 bc.mu.Lock() 406 defer bc.mu.Unlock() 407 408 // Prepare the genesis block and reinitialise the chain 409 if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { 410 log.Crit("Failed to write genesis block TD", "err", err) 411 } 412 if err := WriteBlock(bc.db, genesis); err != nil { 413 log.Crit("Failed to write genesis block", "err", err) 414 } 415 bc.genesisBlock = genesis 416 bc.insert(bc.genesisBlock) 417 bc.currentBlock.Store(bc.genesisBlock) 418 bc.hc.SetGenesis(bc.genesisBlock.Header()) 419 bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) 420 bc.currentFastBlock.Store(bc.genesisBlock) 421 422 return nil 423 } 424 425 // repair tries to repair the current blockchain by rolling back the current block 426 // until one with associated state is found. This is needed to fix incomplete db 427 // writes caused either by crashes/power outages, or simply non-committed tries. 428 // 429 // This method only rolls back the current block. The current header and current 430 // fast block are left intact. 431 func (bc *BlockChain) repair(head **types.Block) error { 432 for { 433 // Abort if we've rewound to a head block that does have associated state 434 if _, err := state.New((*head).Root(), bc.stateCache); err == nil { 435 log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) 436 return nil 437 } 438 // Otherwise rewind one block and recheck state availability there 439 (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) 440 } 441 } 442 443 // Export writes the active chain to the given writer. 444 func (bc *BlockChain) Export(w io.Writer) error { 445 return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) 446 } 447 448 // ExportN writes a subset of the active chain to the given writer. 449 func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { 450 bc.mu.RLock() 451 defer bc.mu.RUnlock() 452 453 if first > last { 454 return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) 455 } 456 log.Info("Exporting batch of blocks", "count", last-first+1) 457 458 for nr := first; nr <= last; nr++ { 459 block := bc.GetBlockByNumber(nr) 460 if block == nil { 461 return fmt.Errorf("export failed on #%d: not found", nr) 462 } 463 464 if err := block.EncodeRLP(w); err != nil { 465 return err 466 } 467 } 468 469 return nil 470 } 471 472 // insert injects a new head block into the current block chain. This method 473 // assumes that the block is indeed a true head. It will also reset the head 474 // header and the head fast sync block to this very same block if they are older 475 // or if they are on a different side chain. 476 // 477 // Note, this function assumes that the `mu` mutex is held! 478 func (bc *BlockChain) insert(block *types.Block) { 479 // If the block is on a side chain or an unknown one, force other heads onto it too 480 updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() 481 482 // Add the block to the canonical chain number scheme and mark as the head 483 if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { 484 log.Crit("Failed to insert block number", "err", err) 485 } 486 if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { 487 log.Crit("Failed to insert head block hash", "err", err) 488 } 489 bc.currentBlock.Store(block) 490 491 // If the block is better than our head or is on a different chain, force update heads 492 if updateHeads { 493 bc.hc.SetCurrentHeader(block.Header()) 494 495 if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { 496 log.Crit("Failed to insert head fast block hash", "err", err) 497 } 498 bc.currentFastBlock.Store(block) 499 } 500 } 501 502 // Genesis retrieves the chain's genesis block. 503 func (bc *BlockChain) Genesis() *types.Block { 504 return bc.genesisBlock 505 } 506 507 // GetBlockVersion returns the version byte for the given height 508 func (bc *BlockChain) GetBlockVersion(height *big.Int) params.HeaderVersion { 509 return bc.Config().GetBlockVersion(height) 510 } 511 512 // GetBody retrieves a block body (transactions and uncles) from the database by 513 // hash, caching it if found. 514 func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { 515 // Short circuit if the body's already in the cache, retrieve otherwise 516 if cached, ok := bc.bodyCache.Get(hash); ok { 517 body := cached.(*types.Body) 518 return body 519 } 520 body := GetBodyNoVersion(bc.db, hash, bc.hc.GetBlockNumber(hash)) 521 if body == nil { 522 return nil 523 } 524 525 for i := range body.Uncles { 526 body.Uncles[i].Version = bc.GetBlockVersion(body.Uncles[0].Number) // only one version 527 } 528 // Cache the found body for next time and return 529 bc.bodyCache.Add(hash, body) 530 return body 531 } 532 533 // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, 534 // caching it if found. 535 func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { 536 // Short circuit if the body's already in the cache, retrieve otherwise 537 if cached, ok := bc.bodyRLPCache.Get(hash); ok { 538 return cached.(rlp.RawValue) 539 } 540 body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) 541 if len(body) == 0 { 542 return nil 543 } 544 // Cache the found body for next time and return 545 bc.bodyRLPCache.Add(hash, body) 546 return body 547 } 548 549 // HasBlock checks if a block is fully present in the database or not. 550 func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { 551 if bc.blockCache.Contains(hash) { 552 return true 553 } 554 ok, _ := bc.db.Has(blockBodyKey(hash, number)) 555 return ok 556 } 557 558 // HasState checks if state trie is fully present in the database or not. 559 func (bc *BlockChain) HasState(hash common.Hash) bool { 560 _, err := bc.stateCache.OpenTrie(hash) 561 return err == nil 562 } 563 564 // HasBlockAndState checks if a block and associated state trie is fully present 565 // in the database or not, caching it if present. 566 func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { 567 // Check first that the block itself is known 568 block := bc.GetBlock(hash, number) 569 if block == nil { 570 return false 571 } 572 // block.SetVersion(bc.GetBlockVersion(block.Number())) 573 return bc.HasState(block.Root()) 574 } 575 576 // GetBlock retrieves a block from the database by hash and number, 577 // caching it if found, adding the correct version 578 func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { 579 // Short circuit if the block's already in the cache, retrieve otherwise 580 if block, ok := bc.blockCache.Get(hash); ok { 581 block := block.(*types.Block) 582 if block.Version() == 0 { 583 block.SetVersion(bc.Config().GetBlockVersion(block.Number())) 584 } 585 return block 586 } 587 block := GetBlockNoVersion(bc.db, hash, number) 588 if block == nil { 589 return nil 590 } 591 hashv := block.SetVersion(bc.Config().GetBlockVersion(block.Number())) 592 // Cache the found block for next time and return 593 bc.blockCache.Add(hashv, block) 594 return block 595 } 596 597 // GetBlockByHash retrieves a block from the database by hash, caching it if found. 598 func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { 599 return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) 600 } 601 602 // GetBlockByNumber retrieves a block from the database by number, caching it 603 // (associated with its hash) if found. 604 func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { 605 hash := GetCanonicalHash(bc.db, number) 606 if hash == (common.Hash{}) { 607 return nil 608 } 609 return bc.GetBlock(hash, number) 610 } 611 612 // GetReceiptsByHash retrieves the receipts for all transactions in a given block. 613 func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { 614 return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) 615 } 616 617 // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. 618 // [deprecated by aqua/62] 619 func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { 620 number := bc.hc.GetBlockNumber(hash) 621 for i := 0; i < n; i++ { 622 block := bc.GetBlock(hash, number) 623 if block == nil { 624 break 625 } 626 blocks = append(blocks, block) 627 hash = block.ParentHash() 628 number-- 629 } 630 return 631 } 632 633 // GetUnclesInChain retrieves all the uncles from a given block backwards until 634 // a specific distance is reached. 635 func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { 636 uncles := []*types.Header{} 637 for i := 0; block != nil && i < length; i++ { 638 uncles = append(uncles, block.Uncles()...) 639 block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 640 } 641 return uncles 642 } 643 644 // TrieNode retrieves a blob of data associated with a trie node (or code hash) 645 // either from ephemeral in-memory cache, or from persistent storage. 646 func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { 647 return bc.stateCache.TrieDB().Node(hash) 648 } 649 650 // Stop stops the blockchain service. If any imports are currently in progress 651 // it will abort them using the procInterrupt. 652 func (bc *BlockChain) Stop() { 653 if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { 654 return 655 } 656 // Unsubscribe all subscriptions registered from blockchain 657 bc.scope.Close() 658 close(bc.quit) 659 atomic.StoreInt32(&bc.procInterrupt, 1) 660 661 bc.wg.Wait() 662 663 // Ensure the state of a recent block is also stored to disk before exiting. 664 // We're writing three different states to catch different restart scenarios: 665 // - HEAD: So we don't need to reprocess any blocks in the general case 666 // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle 667 // - HEAD-127: So we have a hard limit on the number of blocks reexecuted 668 if !bc.cacheConfig.Disabled { 669 triedb := bc.stateCache.TrieDB() 670 671 for _, offset := range []uint64{0, 1, triesInMemory - 1} { 672 if number := bc.CurrentBlock().NumberU64(); number > offset { 673 //fmt.Printf("number: %v\n", number-offset) 674 recent := bc.GetBlockByNumber(number - offset) 675 //fmt.Printf("Recent: %s\n", recent) 676 //fmt.Printf("Root: %x\n", recent.Root()) 677 hash := recent.Hash() 678 log.Info("Writing cached state to disk", "block", recent.Number(), "hash", hash, "root", recent.Root()) 679 if err := triedb.Commit(recent.Root(), true); err != nil { 680 log.Error("Failed to commit recent state trie", "err", err) 681 } 682 } 683 } 684 for !bc.triegc.Empty() { 685 triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) 686 } 687 if size := triedb.Size(); size != 0 { 688 log.Error("Dangling trie nodes after full cleanup") 689 } 690 } 691 log.Info("Blockchain manager stopped") 692 } 693 694 func (bc *BlockChain) procFutureBlocks() { 695 blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) 696 for _, hash := range bc.futureBlocks.Keys() { 697 if block, exist := bc.futureBlocks.Peek(hash); exist { 698 blocks = append(blocks, block.(*types.Block)) 699 } 700 } 701 if len(blocks) > 0 { 702 types.BlockBy(types.Number).Sort(blocks) 703 704 // Insert one by one as chain insertion needs contiguous ancestry between blocks 705 for i := range blocks { 706 bc.InsertChain(blocks[i : i+1]) 707 } 708 } 709 } 710 711 // WriteStatus status of write 712 type WriteStatus byte 713 714 const ( 715 NonStatTy WriteStatus = iota 716 CanonStatTy 717 SideStatTy 718 ) 719 720 // Rollback is designed to remove a chain of links from the database that aren't 721 // certain enough to be valid. 722 func (bc *BlockChain) Rollback(chain []common.Hash) { 723 bc.mu.Lock() 724 defer bc.mu.Unlock() 725 726 for i := len(chain) - 1; i >= 0; i-- { 727 hash := chain[i] 728 729 currentHeader := bc.hc.CurrentHeader() 730 if currentHeader.Hash() == hash { 731 bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)) 732 } 733 if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { 734 newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) 735 bc.currentFastBlock.Store(newFastBlock) 736 WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) 737 } 738 if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { 739 newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) 740 bc.currentBlock.Store(newBlock) 741 WriteHeadBlockHash(bc.db, newBlock.Hash()) 742 } 743 } 744 } 745 746 // SetReceiptsData computes all the non-consensus fields of the receipts 747 func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) { 748 signer := types.MakeSigner(config, block.Number()) 749 750 transactions, logIndex := block.Transactions(), uint(0) 751 752 for j := 0; j < len(receipts); j++ { 753 // The transaction hash can be retrieved from the transaction itself 754 receipts[j].TxHash = transactions[j].Hash() 755 756 // The contract address can be derived from the transaction itself 757 if transactions[j].To() == nil { 758 // Deriving the signer is expensive, only do if it's actually needed 759 from, _ := types.Sender(signer, transactions[j]) 760 receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) 761 } 762 // The used gas can be calculated based on previous receipts 763 if j == 0 { 764 receipts[j].GasUsed = receipts[j].CumulativeGasUsed 765 } else { 766 receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed 767 } 768 // The derived log fields can simply be set from the block and transaction 769 for k := 0; k < len(receipts[j].Logs); k++ { 770 receipts[j].Logs[k].BlockNumber = block.NumberU64() 771 receipts[j].Logs[k].BlockHash = block.Hash() 772 receipts[j].Logs[k].TxHash = receipts[j].TxHash 773 receipts[j].Logs[k].TxIndex = uint(j) 774 receipts[j].Logs[k].Index = logIndex 775 logIndex++ 776 } 777 } 778 } 779 780 // InsertReceiptChain attempts to complete an already existing header chain with 781 // transaction and receipt data. 782 func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { 783 bc.wg.Add(1) 784 defer bc.wg.Done() 785 786 // Do a sanity check that the provided chain is actually ordered and linked 787 for i := 1; i < len(blockChain); i++ { 788 if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { 789 log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), 790 "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) 791 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), 792 blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) 793 } 794 } 795 796 var ( 797 stats = struct{ processed, ignored int32 }{} 798 start = time.Now() 799 bytes = 0 800 batch = bc.db.NewBatch() 801 ) 802 for i, block := range blockChain { 803 receipts := receiptChain[i] 804 // Short circuit insertion if shutting down or processing failed 805 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 806 return 0, nil 807 } 808 // Short circuit if the owner header is unknown 809 if !bc.HasHeader(block.Hash(), block.NumberU64()) { 810 return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) 811 } 812 // Skip if the entire data is already known 813 if bc.HasBlock(block.Hash(), block.NumberU64()) { 814 stats.ignored++ 815 continue 816 } 817 // Compute all the non-consensus fields of the receipts 818 SetReceiptsData(bc.chainConfig, block, receipts) 819 // Write all the data out into the database 820 if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { 821 return i, fmt.Errorf("failed to write block body: %v", err) 822 } 823 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 824 return i, fmt.Errorf("failed to write block receipts: %v", err) 825 } 826 if err := WriteTxLookupEntries(batch, block); err != nil { 827 return i, fmt.Errorf("failed to write lookup metadata: %v", err) 828 } 829 stats.processed++ 830 831 if batch.ValueSize() >= aquadb.IdealBatchSize { 832 if err := batch.Write(); err != nil { 833 return 0, err 834 } 835 bytes += batch.ValueSize() 836 batch.Reset() 837 } 838 } 839 if batch.ValueSize() > 0 { 840 bytes += batch.ValueSize() 841 if err := batch.Write(); err != nil { 842 return 0, err 843 } 844 } 845 846 // Update the head fast sync block if better 847 bc.mu.Lock() 848 head := blockChain[len(blockChain)-1] 849 if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case 850 currentFastBlock := bc.CurrentFastBlock() 851 if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { 852 if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { 853 log.Crit("Failed to update head fast block hash", "err", err) 854 } 855 bc.currentFastBlock.Store(head) 856 } 857 } 858 bc.mu.Unlock() 859 860 log.Info("Imported new block receipts", 861 "count", stats.processed, 862 "elapsed", common.PrettyDuration(time.Since(start)), 863 "number", head.Number(), 864 "hash", head.Hash(), 865 "size", common.StorageSize(bytes), 866 "ignored", stats.ignored) 867 return 0, nil 868 } 869 870 var lastWrite uint64 871 872 // WriteBlockWithoutState writes only the block and its metadata to the database, 873 // but does not write any state. This is used to construct competing side forks 874 // up to the point where they exceed the canonical total difficulty. 875 func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { 876 bc.wg.Add(1) 877 defer bc.wg.Done() 878 879 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { 880 return err 881 } 882 if err := WriteBlock(bc.db, block); err != nil { 883 return err 884 } 885 return nil 886 } 887 888 // WriteBlockWithState writes the block and all associated state to the database. 889 func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) { 890 bc.wg.Add(1) 891 defer bc.wg.Done() 892 893 // Calculate the total difficulty of the block 894 ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) 895 if ptd == nil { 896 return NonStatTy, consensus.ErrUnknownAncestor 897 } 898 // Make sure no inconsistent state is leaked during insertion 899 bc.mu.Lock() 900 defer bc.mu.Unlock() 901 902 currentBlock := bc.CurrentBlock() 903 if hf7 := bc.Config().GetHF(7); hf7 != nil && hf7.Cmp(currentBlock.Number()) == 0 { 904 log.Info("Activating Hardfork", "HF", 7, "BlockNumber", hf7) 905 } 906 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 907 externTd := new(big.Int).Add(block.Difficulty(), ptd) 908 909 // Irrelevant of the canonical status, write the block itself to the database 910 if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { 911 return NonStatTy, err 912 } 913 // Write other block data using a batch. 914 batch := bc.db.NewBatch() 915 if err := WriteBlock(batch, block); err != nil { 916 return NonStatTy, err 917 } 918 root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) 919 if err != nil { 920 return NonStatTy, err 921 } 922 triedb := bc.stateCache.TrieDB() 923 924 // If we're running an archive node, always flush 925 if bc.cacheConfig.Disabled { 926 if err := triedb.Commit(root, false); err != nil { 927 return NonStatTy, err 928 } 929 } else { 930 // Full but not archive node, do proper garbage collection 931 triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive 932 bc.triegc.Push(root, -int64(block.NumberU64())) 933 934 if current := block.NumberU64(); current > triesInMemory { 935 // Find the next state trie we need to commit 936 header := bc.GetHeaderByNumber(current - triesInMemory) 937 chosen := header.Number.Uint64() 938 // Only write to disk if we exceeded our memory allowance *and* also have at 939 // least a given number of tries gapped. 940 var ( 941 size = triedb.Size() 942 limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 943 ) 944 if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { 945 // If we're exceeding limits but haven't reached a large enough memory gap, 946 // warn the user that the system is becoming unstable. 947 if chosen < lastWrite+triesInMemory { 948 switch { 949 case size >= 2*limit: 950 log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) 951 case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: 952 log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) 953 } 954 } 955 // If optimum or critical limits reached, write to disk 956 if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { 957 triedb.Commit(header.Root, true) 958 lastWrite = chosen 959 bc.gcproc = 0 960 } 961 } 962 // Garbage collect anything below our required write retention 963 for !bc.triegc.Empty() { 964 root, number := bc.triegc.Pop() 965 if uint64(-number) > chosen { 966 bc.triegc.Push(root, number) 967 break 968 } 969 triedb.Dereference(root.(common.Hash), common.Hash{}) 970 } 971 } 972 } 973 if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { 974 return NonStatTy, err 975 } 976 // If the total difficulty is higher than our known, add it to the canonical chain 977 // Second clause in the if statement reduces the vulnerability to selfish mining. 978 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 979 reorg := externTd.Cmp(localTd) > 0 980 currentBlock = bc.CurrentBlock() 981 if !reorg && externTd.Cmp(localTd) == 0 { 982 // Split same-difficulty blocks by number, then at random 983 reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) 984 } 985 if reorg { 986 // Reorganise the chain if the parent is not the head block 987 if block.ParentHash() != currentBlock.Hash() { 988 if err := bc.reorg(currentBlock, block); err != nil { 989 return NonStatTy, err 990 } 991 } 992 // Write the positional metadata for transaction and receipt lookups 993 if err := WriteTxLookupEntries(batch, block); err != nil { 994 return NonStatTy, err 995 } 996 // Write hash preimages 997 if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { 998 return NonStatTy, err 999 } 1000 status = CanonStatTy 1001 } else { 1002 status = SideStatTy 1003 } 1004 if err := batch.Write(); err != nil { 1005 return NonStatTy, err 1006 } 1007 1008 // Set new head. 1009 if status == CanonStatTy { 1010 bc.insert(block) 1011 } 1012 bc.futureBlocks.Remove(block.Hash()) 1013 return status, nil 1014 } 1015 1016 // InsertChain attempts to insert the given batch of blocks in to the canonical 1017 // chain or, otherwise, create a fork. If an error is returned it will return 1018 // the index number of the failing block as well an error describing what went 1019 // wrong. 1020 // 1021 // After insertion is done, all accumulated events will be fired. 1022 func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { 1023 n, events, logs, err := bc.insertChain(chain) 1024 bc.PostChainEvents(events, logs) 1025 return n, err 1026 } 1027 1028 // insertChain will execute the actual chain insertion and event aggregation. The 1029 // only reason this method exists as a separate one is to make locking cleaner 1030 // with deferred statements. 1031 func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { 1032 // Do a sanity check that the provided chain is actually ordered and linked 1033 for i := 1; i < len(chain); i++ { 1034 if chain[i-1].Version() == 0 { 1035 chain[i-1].SetVersion(bc.Config().GetBlockVersion(chain[i-1].Number())) 1036 } 1037 if chain[i].Version() == 0 { 1038 chain[i].SetVersion(bc.Config().GetBlockVersion(chain[i].Number())) 1039 } 1040 if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { 1041 // Chain broke ancestry, log a messge (programming error) and skip insertion 1042 log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), 1043 "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) 1044 1045 return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), 1046 chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) 1047 } 1048 } 1049 // Pre-checks passed, start the full block imports 1050 bc.wg.Add(1) 1051 defer bc.wg.Done() 1052 1053 bc.chainmu.Lock() 1054 defer bc.chainmu.Unlock() 1055 1056 // A queued approach to delivering events. This is generally 1057 // faster than direct delivery and requires much less mutex 1058 // acquiring. 1059 var ( 1060 stats = insertStats{startTime: mclock.Now()} 1061 events = make([]interface{}, 0, len(chain)) 1062 lastCanon *types.Block 1063 coalescedLogs []*types.Log 1064 ) 1065 // Start the parallel header verifier 1066 headers := make([]*types.Header, len(chain)) 1067 seals := make([]bool, len(chain)) 1068 1069 for i, block := range chain { 1070 headers[i] = block.Header() 1071 if headers[i].Version == 0 { 1072 panic("header version not set") 1073 } 1074 seals[i] = true 1075 } 1076 abort, results := bc.engine.VerifyHeaders(bc, headers, seals) 1077 defer close(abort) 1078 1079 // Iterate over the blocks and insert when the verifier permits 1080 for i, block := range chain { 1081 // If the chain is terminating, stop processing blocks 1082 if atomic.LoadInt32(&bc.procInterrupt) == 1 { 1083 log.Warn("Premature abort during blocks processing") 1084 return i, events, coalescedLogs, fmt.Errorf("aborted") 1085 } 1086 // If the header is a banned one, straight out abort 1087 if BadHashes[block.Hash()] { 1088 bc.reportBlock(block, nil, ErrBlacklistedHash) 1089 return i, events, coalescedLogs, ErrBlacklistedHash 1090 } 1091 // Wait for the block's verification to complete 1092 bstart := time.Now() 1093 1094 err := <-results 1095 if err == nil { 1096 block.Hash() 1097 err = bc.Validator().ValidateBody(block) 1098 } 1099 switch { 1100 case err == ErrKnownBlock: 1101 // Block and state both already known. However if the current block is below 1102 // this number we did a rollback and we should reimport it nonetheless. 1103 if bc.CurrentBlock().NumberU64() >= block.NumberU64() { 1104 stats.ignored++ 1105 continue 1106 } 1107 1108 case err == consensus.ErrFutureBlock: 1109 // Allow up to MaxFuture second in the future blocks. If this limit is exceeded 1110 // the chain is discarded and processed at a later time if given. 1111 max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) 1112 if block.Time().Cmp(max) > 0 { 1113 return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) 1114 } 1115 bc.futureBlocks.Add(block.Hash(), block) 1116 stats.queued++ 1117 continue 1118 1119 case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): 1120 bc.futureBlocks.Add(block.Hash(), block) 1121 stats.queued++ 1122 continue 1123 1124 case err == consensus.ErrPrunedAncestor: 1125 // Block competing with the canonical chain, store in the db, but don't process 1126 // until the competitor TD goes above the canonical TD 1127 currentBlock := bc.CurrentBlock() 1128 localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) 1129 externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) 1130 if localTd.Cmp(externTd) > 0 { 1131 if err = bc.WriteBlockWithoutState(block, externTd); err != nil { 1132 return i, events, coalescedLogs, err 1133 } 1134 continue 1135 } 1136 // Competitor chain beat canonical, gather all blocks from the common ancestor 1137 var winner []*types.Block 1138 1139 parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1140 for !bc.HasState(parent.Root()) { 1141 winner = append(winner, parent) 1142 parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) 1143 } 1144 for j := 0; j < len(winner)/2; j++ { 1145 winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] 1146 } 1147 // Import all the pruned blocks to make the state available 1148 bc.chainmu.Unlock() 1149 _, evs, logs, err := bc.insertChain(winner) 1150 bc.chainmu.Lock() 1151 events, coalescedLogs = evs, logs 1152 1153 if err != nil { 1154 return i, events, coalescedLogs, err 1155 } 1156 1157 case err != nil: 1158 bc.reportBlock(block, nil, err) 1159 return i, events, coalescedLogs, err 1160 } 1161 // Create a new statedb using the parent block and report an 1162 // error if it fails. 1163 var parent *types.Block 1164 if i == 0 { 1165 parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) 1166 } else { 1167 parent = chain[i-1] 1168 } 1169 state, err := state.New(parent.Root(), bc.stateCache) 1170 if err != nil { 1171 return i, events, coalescedLogs, err 1172 } 1173 // Process block using the parent state as reference point. 1174 receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) 1175 if err != nil { 1176 bc.reportBlock(block, receipts, err) 1177 return i, events, coalescedLogs, err 1178 } 1179 // Validate the state using the default validator 1180 err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) 1181 if err != nil { 1182 bc.reportBlock(block, receipts, err) 1183 return i, events, coalescedLogs, err 1184 } 1185 proctime := time.Since(bstart) 1186 1187 // Write the block to the chain and get the status. 1188 status, err := bc.WriteBlockWithState(block, receipts, state) 1189 if err != nil { 1190 return i, events, coalescedLogs, err 1191 } 1192 switch status { 1193 case CanonStatTy: 1194 log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), 1195 "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) 1196 1197 coalescedLogs = append(coalescedLogs, logs...) 1198 blockInsertTimer.UpdateSince(bstart) 1199 events = append(events, ChainEvent{block, block.Hash(), logs}) 1200 lastCanon = block 1201 1202 // Only count canonical blocks for GC processing time 1203 bc.gcproc += proctime 1204 1205 case SideStatTy: 1206 log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", 1207 common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) 1208 1209 blockInsertTimer.UpdateSince(bstart) 1210 events = append(events, ChainSideEvent{block}) 1211 } 1212 stats.processed++ 1213 stats.usedGas += usedGas 1214 stats.report(chain, i, bc.stateCache.TrieDB().Size()) 1215 } 1216 // Append a single chain head event if we've progressed the chain 1217 if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { 1218 events = append(events, ChainHeadEvent{lastCanon}) 1219 } 1220 return 0, events, coalescedLogs, nil 1221 } 1222 1223 // insertStats tracks and reports on block insertion. 1224 type insertStats struct { 1225 queued, processed, ignored int 1226 usedGas uint64 1227 lastIndex int 1228 startTime mclock.AbsTime 1229 } 1230 1231 // statsReportLimit is the time limit during import after which we always print 1232 // out progress. This avoids the user wondering what's going on. 1233 const statsReportLimit = 8 * time.Second 1234 1235 // report prints statistics if some number of blocks have been processed 1236 // or more than a few seconds have passed since the last message. 1237 func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { 1238 // Fetch the timings for the batch 1239 var ( 1240 now = mclock.Now() 1241 elapsed = time.Duration(now) - time.Duration(st.startTime) 1242 ) 1243 // If we're at the last block of the batch or report period reached, log 1244 if index == len(chain)-1 || elapsed >= statsReportLimit { 1245 var ( 1246 end = chain[index] 1247 txs = countTransactions(chain[st.lastIndex : index+1]) 1248 ) 1249 context := []interface{}{ 1250 "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, 1251 "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), 1252 "number", end.Number(), "hash", end.Hash(), "cache", cache, 1253 } 1254 if st.queued > 0 { 1255 context = append(context, []interface{}{"queued", st.queued}...) 1256 } 1257 if st.ignored > 0 { 1258 context = append(context, []interface{}{"ignored", st.ignored}...) 1259 } 1260 if st.processed == 1 { 1261 context = append(context, []interface{}{"miner", chain[0].Coinbase()}...) 1262 } 1263 log.Info("Imported new chain segment", context...) 1264 1265 *st = insertStats{startTime: now, lastIndex: index + 1} 1266 } 1267 } 1268 1269 func countTransactions(chain []*types.Block) (c int) { 1270 for _, b := range chain { 1271 c += len(b.Transactions()) 1272 } 1273 return c 1274 } 1275 1276 // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them 1277 // to be part of the new canonical chain and accumulates potential missing transactions and post an 1278 // event about them 1279 func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { 1280 var ( 1281 newChain types.Blocks 1282 oldChain types.Blocks 1283 commonBlock *types.Block 1284 deletedTxs types.Transactions 1285 deletedLogs []*types.Log 1286 // collectLogs collects the logs that were generated during the 1287 // processing of the block that corresponds with the given hash. 1288 // These logs are later announced as deleted. 1289 collectLogs = func(h common.Hash) { 1290 // Coalesce logs and set 'Removed'. 1291 receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) 1292 for _, receipt := range receipts { 1293 for _, log := range receipt.Logs { 1294 del := *log 1295 del.Removed = true 1296 deletedLogs = append(deletedLogs, &del) 1297 } 1298 } 1299 } 1300 ) 1301 1302 // first reduce whoever is higher bound 1303 if oldBlock.NumberU64() > newBlock.NumberU64() { 1304 // reduce old chain 1305 for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { 1306 oldChain = append(oldChain, oldBlock) 1307 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1308 1309 collectLogs(oldBlock.Hash()) 1310 } 1311 } else { 1312 // reduce new chain and append new chain blocks for inserting later on 1313 for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { 1314 newChain = append(newChain, newBlock) 1315 } 1316 } 1317 if oldBlock == nil { 1318 return fmt.Errorf("Invalid old chain") 1319 } 1320 if newBlock == nil { 1321 return fmt.Errorf("Invalid new chain") 1322 } 1323 1324 for { 1325 if oldBlock.Hash() == newBlock.Hash() { 1326 commonBlock = oldBlock 1327 break 1328 } 1329 1330 oldChain = append(oldChain, oldBlock) 1331 newChain = append(newChain, newBlock) 1332 deletedTxs = append(deletedTxs, oldBlock.Transactions()...) 1333 collectLogs(oldBlock.Hash()) 1334 1335 oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) 1336 if oldBlock == nil { 1337 return fmt.Errorf("Invalid old chain") 1338 } 1339 if newBlock == nil { 1340 return fmt.Errorf("Invalid new chain") 1341 } 1342 } 1343 // Ensure the user sees large reorgs 1344 if len(oldChain) > 0 && len(newChain) > 0 { 1345 logFn := log.Debug 1346 if len(oldChain) > 5 { 1347 logFn = log.Warn 1348 } 1349 logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), 1350 "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) 1351 } else { 1352 log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) 1353 } 1354 // Insert the new chain, taking care of the proper incremental order 1355 var addedTxs types.Transactions 1356 for i := len(newChain) - 1; i >= 0; i-- { 1357 // insert the block in the canonical way, re-writing history 1358 bc.insert(newChain[i]) 1359 // write lookup entries for hash based transaction/receipt searches 1360 if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { 1361 return err 1362 } 1363 addedTxs = append(addedTxs, newChain[i].Transactions()...) 1364 } 1365 // calculate the difference between deleted and added transactions 1366 diff := types.TxDifference(deletedTxs, addedTxs) 1367 // When transactions get deleted from the database that means the 1368 // receipts that were created in the fork must also be deleted 1369 for _, tx := range diff { 1370 DeleteTxLookupEntry(bc.db, tx.Hash()) 1371 } 1372 if len(deletedLogs) > 0 { 1373 go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) 1374 } 1375 if len(oldChain) > 0 { 1376 go func() { 1377 for _, block := range oldChain { 1378 bc.chainSideFeed.Send(ChainSideEvent{Block: block}) 1379 } 1380 }() 1381 } 1382 1383 return nil 1384 } 1385 1386 // PostChainEvents iterates over the events generated by a chain insertion and 1387 // posts them into the event feed. 1388 // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock. 1389 func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) { 1390 // post event logs for further processing 1391 if logs != nil { 1392 bc.logsFeed.Send(logs) 1393 } 1394 for _, event := range events { 1395 switch ev := event.(type) { 1396 case ChainEvent: 1397 bc.chainFeed.Send(ev) 1398 1399 case ChainHeadEvent: 1400 bc.chainHeadFeed.Send(ev) 1401 1402 case ChainSideEvent: 1403 bc.chainSideFeed.Send(ev) 1404 } 1405 } 1406 } 1407 1408 func (bc *BlockChain) update() { 1409 futureTimer := time.NewTicker(5 * time.Second) 1410 defer futureTimer.Stop() 1411 for { 1412 select { 1413 case <-futureTimer.C: 1414 bc.procFutureBlocks() 1415 case <-bc.quit: 1416 return 1417 } 1418 } 1419 } 1420 1421 // BadBlockArgs represents the entries in the list returned when bad blocks are queried. 1422 type BadBlockArgs struct { 1423 Hash common.Hash `json:"hash"` 1424 Header *types.Header `json:"header"` 1425 } 1426 1427 // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network 1428 func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { 1429 headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) 1430 for _, hash := range bc.badBlocks.Keys() { 1431 if hdr, exist := bc.badBlocks.Peek(hash); exist { 1432 header := hdr.(*types.Header) 1433 headers = append(headers, BadBlockArgs{header.Hash(), header}) 1434 } 1435 } 1436 return headers, nil 1437 } 1438 1439 // addBadBlock adds a bad block to the bad-block LRU cache 1440 func (bc *BlockChain) addBadBlock(block *types.Block) { 1441 bc.badBlocks.Add(block.Header().Hash(), block.Header()) 1442 } 1443 1444 // reportBlock logs a bad block error. 1445 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { 1446 bc.addBadBlock(block) 1447 1448 var receiptString string 1449 for _, receipt := range receipts { 1450 receiptString += fmt.Sprintf("\t%v\n", receipt) 1451 } 1452 log.Error(fmt.Sprintf(` 1453 ########## BAD BLOCK ######### 1454 Chain config: %v 1455 1456 Number: %v 1457 Hash: 0x%x 1458 Version: %v 1459 %v 1460 1461 Error: %v 1462 ############################## 1463 `, bc.chainConfig, block.Number(), block.Hash(), block.Version(), receiptString, err)) 1464 } 1465 1466 // InsertHeaderChain attempts to insert the given header chain in to the local 1467 // chain, possibly creating a reorg. If an error is returned, it will return the 1468 // index number of the failing header as well an error describing what went wrong. 1469 // 1470 // The verify parameter can be used to fine tune whether nonce verification 1471 // should be done or not. The reason behind the optional check is because some 1472 // of the header retrieval mechanisms already need to verify nonces, as well as 1473 // because nonces can be verified sparsely, not needing to check each. 1474 func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 1475 start := time.Now() 1476 if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { 1477 return i, err 1478 } 1479 1480 // Make sure only one thread manipulates the chain at once 1481 bc.chainmu.Lock() 1482 defer bc.chainmu.Unlock() 1483 1484 bc.wg.Add(1) 1485 defer bc.wg.Done() 1486 1487 whFunc := func(header *types.Header) error { 1488 bc.mu.Lock() 1489 defer bc.mu.Unlock() 1490 1491 _, err := bc.hc.WriteHeader(header) 1492 return err 1493 } 1494 1495 return bc.hc.InsertHeaderChain(chain, whFunc, start) 1496 } 1497 1498 // CurrentHeader retrieves the current head header of the canonical chain. The 1499 // header is retrieved from the HeaderChain's internal cache. 1500 func (bc *BlockChain) CurrentHeader() *types.Header { 1501 return bc.hc.CurrentHeader() 1502 } 1503 1504 // GetTd retrieves a block's total difficulty in the canonical chain from the 1505 // database by hash and number, caching it if found. 1506 func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int { 1507 return bc.hc.GetTd(hash, number) 1508 } 1509 1510 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 1511 // database by hash, caching it if found. 1512 func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int { 1513 return bc.hc.GetTdByHash(hash) 1514 } 1515 1516 // GetHeader retrieves a block header from the database by hash and number, 1517 // caching it if found. 1518 func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header { 1519 return bc.hc.GetHeader(hash, number) 1520 } 1521 1522 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 1523 // found. 1524 func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header { 1525 return bc.hc.GetHeaderByHash(hash) 1526 } 1527 1528 // HasHeader checks if a block header is present in the database or not, caching 1529 // it if present. 1530 func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool { 1531 return bc.hc.HasHeader(hash, number) 1532 } 1533 1534 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 1535 // hash, fetching towards the genesis block. 1536 func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 1537 return bc.hc.GetBlockHashesFromHash(hash, max) 1538 } 1539 1540 // GetHeaderByNumber retrieves a block header from the database by number, 1541 // caching it (associated with its hash) if found. 1542 func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { 1543 return bc.hc.GetHeaderByNumber(number) 1544 } 1545 1546 // Config retrieves the blockchain's chain configuration. 1547 func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig } 1548 1549 // Engine retrieves the blockchain's consensus engine. 1550 func (bc *BlockChain) Engine() consensus.Engine { return bc.engine } 1551 1552 // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. 1553 func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { 1554 return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) 1555 } 1556 1557 // SubscribeChainEvent registers a subscription of ChainEvent. 1558 func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription { 1559 return bc.scope.Track(bc.chainFeed.Subscribe(ch)) 1560 } 1561 1562 // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent. 1563 func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { 1564 return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch)) 1565 } 1566 1567 // SubscribeChainSideEvent registers a subscription of ChainSideEvent. 1568 func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription { 1569 return bc.scope.Track(bc.chainSideFeed.Subscribe(ch)) 1570 } 1571 1572 // SubscribeLogsEvent registers a subscription of []*types.Log. 1573 func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 1574 return bc.scope.Track(bc.logsFeed.Subscribe(ch)) 1575 }