github.com/isti4github/eth-ecc@v0.0.0-20201227085832-c337f2d99319/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/Onther-Tech/go-ethereum/common"
    31  	"github.com/Onther-Tech/go-ethereum/common/mclock"
    32  	"github.com/Onther-Tech/go-ethereum/common/prque"
    33  	"github.com/Onther-Tech/go-ethereum/consensus"
    34  	"github.com/Onther-Tech/go-ethereum/core/rawdb"
    35  	"github.com/Onther-Tech/go-ethereum/core/state"
    36  	"github.com/Onther-Tech/go-ethereum/core/types"
    37  	"github.com/Onther-Tech/go-ethereum/core/vm"
    38  	"github.com/Onther-Tech/go-ethereum/ethdb"
    39  	"github.com/Onther-Tech/go-ethereum/event"
    40  	"github.com/Onther-Tech/go-ethereum/log"
    41  	"github.com/Onther-Tech/go-ethereum/metrics"
    42  	"github.com/Onther-Tech/go-ethereum/params"
    43  	"github.com/Onther-Tech/go-ethereum/rlp"
    44  	"github.com/Onther-Tech/go-ethereum/trie"
    45  	"github.com/hashicorp/golang-lru"
    46  )
    47  
    48  var (
    49  	headBlockGauge     = metrics.NewRegisteredGauge("chain/head/block", nil)
    50  	headHeaderGauge    = metrics.NewRegisteredGauge("chain/head/header", nil)
    51  	headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    52  
    53  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    54  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    55  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    56  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    57  
    58  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    59  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    60  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    61  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    62  
    63  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    64  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    65  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    66  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    67  	blockReorgAddMeter   = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    68  	blockReorgDropMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    69  
    70  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    71  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    72  
    73  	errInsertionInterrupted = errors.New("insertion is interrupted")
    74  )
    75  
    76  const (
    77  	bodyCacheLimit      = 256
    78  	blockCacheLimit     = 256
    79  	receiptsCacheLimit  = 32
    80  	txLookupCacheLimit  = 1024
    81  	maxFutureBlocks     = 256
    82  	maxTimeFutureBlocks = 30
    83  	badBlockLimit       = 10
    84  	TriesInMemory       = 128
    85  
    86  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    87  	//
    88  	// Changelog:
    89  	//
    90  	// - Version 4
    91  	//   The following incompatible database changes were added:
    92  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
    93  	//   * the `Bloom` field of receipt is deleted
    94  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
    95  	// - Version 5
    96  	//  The following incompatible database changes were added:
    97  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
    98  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
    99  	//      receipts' corresponding block
   100  	// - Version 6
   101  	//  The following incompatible database changes were added:
   102  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   103  	// - Version 7
   104  	//  The following incompatible database changes were added:
   105  	//    * Use freezer as the ancient database to maintain all ancient data
   106  	BlockChainVersion uint64 = 7
   107  )
   108  
   109  // CacheConfig contains the configuration values for the trie caching/pruning
   110  // that's resident in a blockchain.
   111  type CacheConfig struct {
   112  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   113  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   114  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   115  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   116  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   117  }
   118  
   119  // BlockChain represents the canonical chain given a database with a genesis
   120  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   121  //
   122  // Importing blocks in to the block chain happens according to the set of rules
   123  // defined by the two stage Validator. Processing of blocks is done using the
   124  // Processor which processes the included transaction. The validation of the state
   125  // is done in the second part of the Validator. Failing results in aborting of
   126  // the import.
   127  //
   128  // The BlockChain also helps in returning blocks from **any** chain included
   129  // in the database as well as blocks that represents the canonical chain. It's
   130  // important to note that GetBlock can return any block and does not need to be
   131  // included in the canonical one where as GetBlockByNumber always represents the
   132  // canonical chain.
   133  type BlockChain struct {
   134  	chainConfig *params.ChainConfig // Chain & network configuration
   135  	cacheConfig *CacheConfig        // Cache configuration for pruning
   136  
   137  	db     ethdb.Database // Low level persistent database to store final content in
   138  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
   139  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
   140  
   141  	hc            *HeaderChain
   142  	rmLogsFeed    event.Feed
   143  	chainFeed     event.Feed
   144  	chainSideFeed event.Feed
   145  	chainHeadFeed event.Feed
   146  	logsFeed      event.Feed
   147  	blockProcFeed event.Feed
   148  	scope         event.SubscriptionScope
   149  	genesisBlock  *types.Block
   150  
   151  	chainmu sync.RWMutex // blockchain insertion lock
   152  
   153  	currentBlock     atomic.Value // Current head of the block chain
   154  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   155  
   156  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   157  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   158  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   159  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   160  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   161  	txLookupCache *lru.Cache     // Cache for the most recent transaction lookup data.
   162  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   163  
   164  	quit    chan struct{} // blockchain quit channel
   165  	running int32         // running must be called atomically
   166  	// procInterrupt must be atomically called
   167  	procInterrupt int32          // interrupt signaler for block processing
   168  	wg            sync.WaitGroup // chain processing wait group for shutting down
   169  
   170  	engine     consensus.Engine
   171  	validator  Validator  // Block and state validator interface
   172  	prefetcher Prefetcher // Block state prefetcher interface
   173  	processor  Processor  // Block transaction processor interface
   174  	vmConfig   vm.Config
   175  
   176  	badBlocks       *lru.Cache                     // Bad block cache
   177  	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
   178  	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
   179  }
   180  
   181  // NewBlockChain returns a fully initialised block chain using information
   182  // available in the database. It initialises the default Ethereum Validator and
   183  // Processor.
   184  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
   185  	if cacheConfig == nil {
   186  		cacheConfig = &CacheConfig{
   187  			TrieCleanLimit: 256,
   188  			TrieDirtyLimit: 256,
   189  			TrieTimeLimit:  5 * time.Minute,
   190  		}
   191  	}
   192  	bodyCache, _ := lru.New(bodyCacheLimit)
   193  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   194  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   195  	blockCache, _ := lru.New(blockCacheLimit)
   196  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   197  	futureBlocks, _ := lru.New(maxFutureBlocks)
   198  	badBlocks, _ := lru.New(badBlockLimit)
   199  
   200  	bc := &BlockChain{
   201  		chainConfig:    chainConfig,
   202  		cacheConfig:    cacheConfig,
   203  		db:             db,
   204  		triegc:         prque.New(nil),
   205  		stateCache:     state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
   206  		quit:           make(chan struct{}),
   207  		shouldPreserve: shouldPreserve,
   208  		bodyCache:      bodyCache,
   209  		bodyRLPCache:   bodyRLPCache,
   210  		receiptsCache:  receiptsCache,
   211  		blockCache:     blockCache,
   212  		txLookupCache:  txLookupCache,
   213  		futureBlocks:   futureBlocks,
   214  		engine:         engine,
   215  		vmConfig:       vmConfig,
   216  		badBlocks:      badBlocks,
   217  	}
   218  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   219  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   220  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   221  
   222  	var err error
   223  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   224  	if err != nil {
   225  		return nil, err
   226  	}
   227  	bc.genesisBlock = bc.GetBlockByNumber(0)
   228  	if bc.genesisBlock == nil {
   229  		return nil, ErrNoGenesis
   230  	}
   231  	// Initialize the chain with ancient data if it isn't empty.
   232  	if bc.empty() {
   233  		rawdb.InitDatabaseFromFreezer(bc.db)
   234  	}
   235  	if err := bc.loadLastState(); err != nil {
   236  		return nil, err
   237  	}
   238  	// The first thing the node will do is reconstruct the verification data for
   239  	// the head block (ethash cache or clique voting snapshot). Might as well do
   240  	// it in advance.
   241  	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   242  
   243  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   244  		var (
   245  			needRewind bool
   246  			low        uint64
   247  		)
   248  		// The head full block may be rolled back to a very low height due to
   249  		// blockchain repair. If the head full block is even lower than the ancient
   250  		// chain, truncate the ancient store.
   251  		fullBlock := bc.CurrentBlock()
   252  		if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
   253  			needRewind = true
   254  			low = fullBlock.NumberU64()
   255  		}
   256  		// In fast sync, it may happen that ancient data has been written to the
   257  		// ancient store, but the LastFastBlock has not been updated, truncate the
   258  		// extra data here.
   259  		fastBlock := bc.CurrentFastBlock()
   260  		if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
   261  			needRewind = true
   262  			if fastBlock.NumberU64() < low || low == 0 {
   263  				low = fastBlock.NumberU64()
   264  			}
   265  		}
   266  		if needRewind {
   267  			var hashes []common.Hash
   268  			previous := bc.CurrentHeader().Number.Uint64()
   269  			for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
   270  				hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
   271  			}
   272  			bc.Rollback(hashes)
   273  			log.Warn("Truncate ancient chain", "from", previous, "to", low)
   274  		}
   275  	}
   276  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   277  	for hash := range BadHashes {
   278  		if header := bc.GetHeaderByHash(hash); header != nil {
   279  			// get the canonical block corresponding to the offending header's number
   280  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   281  			// make sure the headerByNumber (if present) is in our current canonical chain
   282  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   283  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   284  				bc.SetHead(header.Number.Uint64() - 1)
   285  				log.Error("Chain rewind was successful, resuming normal operation")
   286  			}
   287  		}
   288  	}
   289  	// Take ownership of this particular state
   290  	go bc.update()
   291  	return bc, nil
   292  }
   293  
   294  func (bc *BlockChain) getProcInterrupt() bool {
   295  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   296  }
   297  
   298  // GetVMConfig returns the block chain VM config.
   299  func (bc *BlockChain) GetVMConfig() *vm.Config {
   300  	return &bc.vmConfig
   301  }
   302  
   303  // empty returns an indicator whether the blockchain is empty.
   304  // Note, it's a special case that we connect a non-empty ancient
   305  // database with an empty node, so that we can plugin the ancient
   306  // into node seamlessly.
   307  func (bc *BlockChain) empty() bool {
   308  	genesis := bc.genesisBlock.Hash()
   309  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   310  		if hash != genesis {
   311  			return false
   312  		}
   313  	}
   314  	return true
   315  }
   316  
   317  // loadLastState loads the last known chain state from the database. This method
   318  // assumes that the chain manager mutex is held.
   319  func (bc *BlockChain) loadLastState() error {
   320  	// Restore the last known head block
   321  	head := rawdb.ReadHeadBlockHash(bc.db)
   322  	if head == (common.Hash{}) {
   323  		// Corrupt or empty database, init from scratch
   324  		log.Warn("Empty database, resetting chain")
   325  		return bc.Reset()
   326  	}
   327  	// Make sure the entire head block is available
   328  	currentBlock := bc.GetBlockByHash(head)
   329  	if currentBlock == nil {
   330  		// Corrupt or empty database, init from scratch
   331  		log.Warn("Head block missing, resetting chain", "hash", head)
   332  		return bc.Reset()
   333  	}
   334  	// Make sure the state associated with the block is available
   335  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   336  		// Dangling block without a state associated, init from scratch
   337  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   338  		if err := bc.repair(&currentBlock); err != nil {
   339  			return err
   340  		}
   341  		rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   342  	}
   343  	// Everything seems to be fine, set as the head block
   344  	bc.currentBlock.Store(currentBlock)
   345  	headBlockGauge.Update(int64(currentBlock.NumberU64()))
   346  
   347  	// Restore the last known head header
   348  	currentHeader := currentBlock.Header()
   349  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   350  		if header := bc.GetHeaderByHash(head); header != nil {
   351  			currentHeader = header
   352  		}
   353  	}
   354  	bc.hc.SetCurrentHeader(currentHeader)
   355  
   356  	// Restore the last known head fast block
   357  	bc.currentFastBlock.Store(currentBlock)
   358  	headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
   359  
   360  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   361  		if block := bc.GetBlockByHash(head); block != nil {
   362  			bc.currentFastBlock.Store(block)
   363  			headFastBlockGauge.Update(int64(block.NumberU64()))
   364  		}
   365  	}
   366  	// Issue a status log for the user
   367  	currentFastBlock := bc.CurrentFastBlock()
   368  
   369  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   370  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   371  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   372  
   373  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   374  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   375  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
   376  
   377  	return nil
   378  }
   379  
   380  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   381  // above the new head will be deleted and the new one set. In the case of blocks
   382  // though, the head may be further rewound if block bodies are missing (non-archive
   383  // nodes after a fast sync).
   384  func (bc *BlockChain) SetHead(head uint64) error {
   385  	log.Warn("Rewinding blockchain", "target", head)
   386  
   387  	bc.chainmu.Lock()
   388  	defer bc.chainmu.Unlock()
   389  
   390  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
   391  		// Rewind the block chain, ensuring we don't end up with a stateless head block
   392  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
   393  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   394  			if newHeadBlock == nil {
   395  				newHeadBlock = bc.genesisBlock
   396  			} else {
   397  				if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
   398  					// Rewound state missing, rolled back to before pivot, reset to genesis
   399  					newHeadBlock = bc.genesisBlock
   400  				}
   401  			}
   402  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   403  			bc.currentBlock.Store(newHeadBlock)
   404  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   405  		}
   406  
   407  		// Rewind the fast block in a simpleton way to the target head
   408  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   409  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   410  			// If either blocks reached nil, reset to the genesis state
   411  			if newHeadFastBlock == nil {
   412  				newHeadFastBlock = bc.genesisBlock
   413  			}
   414  			rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
   415  			bc.currentFastBlock.Store(newHeadFastBlock)
   416  			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
   417  		}
   418  	}
   419  
   420  	// Rewind the header chain, deleting all block bodies until then
   421  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   422  		// Ignore the error here since light client won't hit this path
   423  		frozen, _ := bc.db.Ancients()
   424  		if num+1 <= frozen {
   425  			// Truncate all relative data(header, total difficulty, body, receipt
   426  			// and canonical hash) from ancient store.
   427  			if err := bc.db.TruncateAncients(num + 1); err != nil {
   428  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   429  			}
   430  
   431  			// Remove the hash <-> number mapping from the active store.
   432  			rawdb.DeleteHeaderNumber(db, hash)
   433  		} else {
   434  			// Remove relative body and receipts from the active store.
   435  			// The header, total difficulty and canonical hash will be
   436  			// removed in the hc.SetHead function.
   437  			rawdb.DeleteBody(db, hash, num)
   438  			rawdb.DeleteReceipts(db, hash, num)
   439  		}
   440  		// Todo(rjl493456442) txlookup, bloombits, etc
   441  	}
   442  	bc.hc.SetHead(head, updateFn, delFn)
   443  
   444  	// Clear out any stale content from the caches
   445  	bc.bodyCache.Purge()
   446  	bc.bodyRLPCache.Purge()
   447  	bc.receiptsCache.Purge()
   448  	bc.blockCache.Purge()
   449  	bc.txLookupCache.Purge()
   450  	bc.futureBlocks.Purge()
   451  
   452  	return bc.loadLastState()
   453  }
   454  
   455  // FastSyncCommitHead sets the current head block to the one defined by the hash
   456  // irrelevant what the chain contents were prior.
   457  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   458  	// Make sure that both the block as well at its state trie exists
   459  	block := bc.GetBlockByHash(hash)
   460  	if block == nil {
   461  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   462  	}
   463  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   464  		return err
   465  	}
   466  	// If all checks out, manually set the head block
   467  	bc.chainmu.Lock()
   468  	bc.currentBlock.Store(block)
   469  	headBlockGauge.Update(int64(block.NumberU64()))
   470  	bc.chainmu.Unlock()
   471  
   472  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   473  	return nil
   474  }
   475  
   476  // GasLimit returns the gas limit of the current HEAD block.
   477  func (bc *BlockChain) GasLimit() uint64 {
   478  	return bc.CurrentBlock().GasLimit()
   479  }
   480  
   481  // CurrentBlock retrieves the current head block of the canonical chain. The
   482  // block is retrieved from the blockchain's internal cache.
   483  func (bc *BlockChain) CurrentBlock() *types.Block {
   484  	return bc.currentBlock.Load().(*types.Block)
   485  }
   486  
   487  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   488  // chain. The block is retrieved from the blockchain's internal cache.
   489  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   490  	return bc.currentFastBlock.Load().(*types.Block)
   491  }
   492  
   493  // Validator returns the current validator.
   494  func (bc *BlockChain) Validator() Validator {
   495  	return bc.validator
   496  }
   497  
   498  // Processor returns the current processor.
   499  func (bc *BlockChain) Processor() Processor {
   500  	return bc.processor
   501  }
   502  
   503  // State returns a new mutable state based on the current HEAD block.
   504  func (bc *BlockChain) State() (*state.StateDB, error) {
   505  	return bc.StateAt(bc.CurrentBlock().Root())
   506  }
   507  
   508  // StateAt returns a new mutable state based on a particular point in time.
   509  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   510  	return state.New(root, bc.stateCache)
   511  }
   512  
   513  // StateCache returns the caching database underpinning the blockchain instance.
   514  func (bc *BlockChain) StateCache() state.Database {
   515  	return bc.stateCache
   516  }
   517  
   518  // Reset purges the entire blockchain, restoring it to its genesis state.
   519  func (bc *BlockChain) Reset() error {
   520  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   521  }
   522  
   523  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   524  // specified genesis state.
   525  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   526  	// Dump the entire block chain and purge the caches
   527  	if err := bc.SetHead(0); err != nil {
   528  		return err
   529  	}
   530  	bc.chainmu.Lock()
   531  	defer bc.chainmu.Unlock()
   532  
   533  	// Prepare the genesis block and reinitialise the chain
   534  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   535  		log.Crit("Failed to write genesis block TD", "err", err)
   536  	}
   537  	rawdb.WriteBlock(bc.db, genesis)
   538  
   539  	bc.genesisBlock = genesis
   540  	bc.insert(bc.genesisBlock)
   541  	bc.currentBlock.Store(bc.genesisBlock)
   542  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   543  
   544  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   545  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   546  	bc.currentFastBlock.Store(bc.genesisBlock)
   547  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   548  
   549  	return nil
   550  }
   551  
   552  // repair tries to repair the current blockchain by rolling back the current block
   553  // until one with associated state is found. This is needed to fix incomplete db
   554  // writes caused either by crashes/power outages, or simply non-committed tries.
   555  //
   556  // This method only rolls back the current block. The current header and current
   557  // fast block are left intact.
   558  func (bc *BlockChain) repair(head **types.Block) error {
   559  	for {
   560  		// Abort if we've rewound to a head block that does have associated state
   561  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   562  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   563  			return nil
   564  		}
   565  		// Otherwise rewind one block and recheck state availability there
   566  		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   567  		if block == nil {
   568  			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   569  		}
   570  		*head = block
   571  	}
   572  }
   573  
   574  // Export writes the active chain to the given writer.
   575  func (bc *BlockChain) Export(w io.Writer) error {
   576  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   577  }
   578  
   579  // ExportN writes a subset of the active chain to the given writer.
   580  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   581  	bc.chainmu.RLock()
   582  	defer bc.chainmu.RUnlock()
   583  
   584  	if first > last {
   585  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   586  	}
   587  	log.Info("Exporting batch of blocks", "count", last-first+1)
   588  
   589  	start, reported := time.Now(), time.Now()
   590  	for nr := first; nr <= last; nr++ {
   591  		block := bc.GetBlockByNumber(nr)
   592  		if block == nil {
   593  			return fmt.Errorf("export failed on #%d: not found", nr)
   594  		}
   595  		if err := block.EncodeRLP(w); err != nil {
   596  			return err
   597  		}
   598  		if time.Since(reported) >= statsReportLimit {
   599  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   600  			reported = time.Now()
   601  		}
   602  	}
   603  	return nil
   604  }
   605  
   606  // insert injects a new head block into the current block chain. This method
   607  // assumes that the block is indeed a true head. It will also reset the head
   608  // header and the head fast sync block to this very same block if they are older
   609  // or if they are on a different side chain.
   610  //
   611  // Note, this function assumes that the `mu` mutex is held!
   612  func (bc *BlockChain) insert(block *types.Block) {
   613  	// If the block is on a side chain or an unknown one, force other heads onto it too
   614  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   615  
   616  	// Add the block to the canonical chain number scheme and mark as the head
   617  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   618  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   619  
   620  	bc.currentBlock.Store(block)
   621  	headBlockGauge.Update(int64(block.NumberU64()))
   622  
   623  	// If the block is better than our head or is on a different chain, force update heads
   624  	if updateHeads {
   625  		bc.hc.SetCurrentHeader(block.Header())
   626  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   627  
   628  		bc.currentFastBlock.Store(block)
   629  		headFastBlockGauge.Update(int64(block.NumberU64()))
   630  	}
   631  }
   632  
   633  // Genesis retrieves the chain's genesis block.
   634  func (bc *BlockChain) Genesis() *types.Block {
   635  	return bc.genesisBlock
   636  }
   637  
   638  // GetBody retrieves a block body (transactions and uncles) from the database by
   639  // hash, caching it if found.
   640  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   641  	// Short circuit if the body's already in the cache, retrieve otherwise
   642  	if cached, ok := bc.bodyCache.Get(hash); ok {
   643  		body := cached.(*types.Body)
   644  		return body
   645  	}
   646  	number := bc.hc.GetBlockNumber(hash)
   647  	if number == nil {
   648  		return nil
   649  	}
   650  	body := rawdb.ReadBody(bc.db, hash, *number)
   651  	if body == nil {
   652  		return nil
   653  	}
   654  	// Cache the found body for next time and return
   655  	bc.bodyCache.Add(hash, body)
   656  	return body
   657  }
   658  
   659  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   660  // caching it if found.
   661  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   662  	// Short circuit if the body's already in the cache, retrieve otherwise
   663  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   664  		return cached.(rlp.RawValue)
   665  	}
   666  	number := bc.hc.GetBlockNumber(hash)
   667  	if number == nil {
   668  		return nil
   669  	}
   670  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   671  	if len(body) == 0 {
   672  		return nil
   673  	}
   674  	// Cache the found body for next time and return
   675  	bc.bodyRLPCache.Add(hash, body)
   676  	return body
   677  }
   678  
   679  // HasBlock checks if a block is fully present in the database or not.
   680  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   681  	if bc.blockCache.Contains(hash) {
   682  		return true
   683  	}
   684  	return rawdb.HasBody(bc.db, hash, number)
   685  }
   686  
   687  // HasFastBlock checks if a fast block is fully present in the database or not.
   688  func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
   689  	if !bc.HasBlock(hash, number) {
   690  		return false
   691  	}
   692  	if bc.receiptsCache.Contains(hash) {
   693  		return true
   694  	}
   695  	return rawdb.HasReceipts(bc.db, hash, number)
   696  }
   697  
   698  // HasState checks if state trie is fully present in the database or not.
   699  func (bc *BlockChain) HasState(hash common.Hash) bool {
   700  	_, err := bc.stateCache.OpenTrie(hash)
   701  	return err == nil
   702  }
   703  
   704  // HasBlockAndState checks if a block and associated state trie is fully present
   705  // in the database or not, caching it if present.
   706  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   707  	// Check first that the block itself is known
   708  	block := bc.GetBlock(hash, number)
   709  	if block == nil {
   710  		return false
   711  	}
   712  	return bc.HasState(block.Root())
   713  }
   714  
   715  // GetBlock retrieves a block from the database by hash and number,
   716  // caching it if found.
   717  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   718  	// Short circuit if the block's already in the cache, retrieve otherwise
   719  	if block, ok := bc.blockCache.Get(hash); ok {
   720  		return block.(*types.Block)
   721  	}
   722  	block := rawdb.ReadBlock(bc.db, hash, number)
   723  	if block == nil {
   724  		return nil
   725  	}
   726  	// Cache the found block for next time and return
   727  	bc.blockCache.Add(block.Hash(), block)
   728  	return block
   729  }
   730  
   731  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   732  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   733  	number := bc.hc.GetBlockNumber(hash)
   734  	if number == nil {
   735  		return nil
   736  	}
   737  	return bc.GetBlock(hash, *number)
   738  }
   739  
   740  // GetBlockByNumber retrieves a block from the database by number, caching it
   741  // (associated with its hash) if found.
   742  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   743  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   744  	if hash == (common.Hash{}) {
   745  		return nil
   746  	}
   747  	return bc.GetBlock(hash, number)
   748  }
   749  
   750  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   751  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   752  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   753  		return receipts.(types.Receipts)
   754  	}
   755  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   756  	if number == nil {
   757  		return nil
   758  	}
   759  	receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
   760  	if receipts == nil {
   761  		return nil
   762  	}
   763  	bc.receiptsCache.Add(hash, receipts)
   764  	return receipts
   765  }
   766  
   767  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   768  // [deprecated by eth/62]
   769  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   770  	number := bc.hc.GetBlockNumber(hash)
   771  	if number == nil {
   772  		return nil
   773  	}
   774  	for i := 0; i < n; i++ {
   775  		block := bc.GetBlock(hash, *number)
   776  		if block == nil {
   777  			break
   778  		}
   779  		blocks = append(blocks, block)
   780  		hash = block.ParentHash()
   781  		*number--
   782  	}
   783  	return
   784  }
   785  
   786  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   787  // a specific distance is reached.
   788  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   789  	uncles := []*types.Header{}
   790  	for i := 0; block != nil && i < length; i++ {
   791  		uncles = append(uncles, block.Uncles()...)
   792  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   793  	}
   794  	return uncles
   795  }
   796  
   797  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   798  // either from ephemeral in-memory cache, or from persistent storage.
   799  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   800  	return bc.stateCache.TrieDB().Node(hash)
   801  }
   802  
   803  // Stop stops the blockchain service. If any imports are currently in progress
   804  // it will abort them using the procInterrupt.
   805  func (bc *BlockChain) Stop() {
   806  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   807  		return
   808  	}
   809  	// Unsubscribe all subscriptions registered from blockchain
   810  	bc.scope.Close()
   811  	close(bc.quit)
   812  	atomic.StoreInt32(&bc.procInterrupt, 1)
   813  
   814  	bc.wg.Wait()
   815  
   816  	// Ensure the state of a recent block is also stored to disk before exiting.
   817  	// We're writing three different states to catch different restart scenarios:
   818  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   819  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   820  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   821  	if !bc.cacheConfig.TrieDirtyDisabled {
   822  		triedb := bc.stateCache.TrieDB()
   823  
   824  		for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
   825  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   826  				recent := bc.GetBlockByNumber(number - offset)
   827  
   828  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   829  				if err := triedb.Commit(recent.Root(), true); err != nil {
   830  					log.Error("Failed to commit recent state trie", "err", err)
   831  				}
   832  			}
   833  		}
   834  		for !bc.triegc.Empty() {
   835  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   836  		}
   837  		if size, _ := triedb.Size(); size != 0 {
   838  			log.Error("Dangling trie nodes after full cleanup")
   839  		}
   840  	}
   841  	log.Info("Blockchain manager stopped")
   842  }
   843  
   844  func (bc *BlockChain) procFutureBlocks() {
   845  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   846  	for _, hash := range bc.futureBlocks.Keys() {
   847  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   848  			blocks = append(blocks, block.(*types.Block))
   849  		}
   850  	}
   851  	if len(blocks) > 0 {
   852  		types.BlockBy(types.Number).Sort(blocks)
   853  
   854  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   855  		for i := range blocks {
   856  			bc.InsertChain(blocks[i : i+1])
   857  		}
   858  	}
   859  }
   860  
   861  // WriteStatus status of write
   862  type WriteStatus byte
   863  
   864  const (
   865  	NonStatTy WriteStatus = iota
   866  	CanonStatTy
   867  	SideStatTy
   868  )
   869  
   870  // Rollback is designed to remove a chain of links from the database that aren't
   871  // certain enough to be valid.
   872  func (bc *BlockChain) Rollback(chain []common.Hash) {
   873  	bc.chainmu.Lock()
   874  	defer bc.chainmu.Unlock()
   875  
   876  	for i := len(chain) - 1; i >= 0; i-- {
   877  		hash := chain[i]
   878  
   879  		currentHeader := bc.hc.CurrentHeader()
   880  		if currentHeader.Hash() == hash {
   881  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   882  		}
   883  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   884  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   885  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   886  			bc.currentFastBlock.Store(newFastBlock)
   887  			headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
   888  		}
   889  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   890  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   891  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   892  			bc.currentBlock.Store(newBlock)
   893  			headBlockGauge.Update(int64(newBlock.NumberU64()))
   894  		}
   895  	}
   896  	// Truncate ancient data which exceeds the current header.
   897  	//
   898  	// Notably, it can happen that system crashes without truncating the ancient data
   899  	// but the head indicator has been updated in the active store. Regarding this issue,
   900  	// system will self recovery by truncating the extra data during the setup phase.
   901  	if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
   902  		log.Crit("Truncate ancient store failed", "err", err)
   903  	}
   904  }
   905  
   906  // truncateAncient rewinds the blockchain to the specified header and deletes all
   907  // data in the ancient store that exceeds the specified header.
   908  func (bc *BlockChain) truncateAncient(head uint64) error {
   909  	frozen, err := bc.db.Ancients()
   910  	if err != nil {
   911  		return err
   912  	}
   913  	// Short circuit if there is no data to truncate in ancient store.
   914  	if frozen <= head+1 {
   915  		return nil
   916  	}
   917  	// Truncate all the data in the freezer beyond the specified head
   918  	if err := bc.db.TruncateAncients(head + 1); err != nil {
   919  		return err
   920  	}
   921  	// Clear out any stale content from the caches
   922  	bc.hc.headerCache.Purge()
   923  	bc.hc.tdCache.Purge()
   924  	bc.hc.numberCache.Purge()
   925  
   926  	// Clear out any stale content from the caches
   927  	bc.bodyCache.Purge()
   928  	bc.bodyRLPCache.Purge()
   929  	bc.receiptsCache.Purge()
   930  	bc.blockCache.Purge()
   931  	bc.txLookupCache.Purge()
   932  	bc.futureBlocks.Purge()
   933  
   934  	log.Info("Rewind ancient data", "number", head)
   935  	return nil
   936  }
   937  
   938  // numberHash is just a container for a number and a hash, to represent a block
   939  type numberHash struct {
   940  	number uint64
   941  	hash   common.Hash
   942  }
   943  
   944  // InsertReceiptChain attempts to complete an already existing header chain with
   945  // transaction and receipt data.
   946  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
   947  	// We don't require the chainMu here since we want to maximize the
   948  	// concurrency of header insertion and receipt insertion.
   949  	bc.wg.Add(1)
   950  	defer bc.wg.Done()
   951  
   952  	var (
   953  		ancientBlocks, liveBlocks     types.Blocks
   954  		ancientReceipts, liveReceipts []types.Receipts
   955  	)
   956  	// Do a sanity check that the provided chain is actually ordered and linked
   957  	for i := 0; i < len(blockChain); i++ {
   958  		if i != 0 {
   959  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   960  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   961  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   962  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   963  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   964  			}
   965  		}
   966  		if blockChain[i].NumberU64() <= ancientLimit {
   967  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
   968  		} else {
   969  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
   970  		}
   971  	}
   972  
   973  	var (
   974  		stats = struct{ processed, ignored int32 }{}
   975  		start = time.Now()
   976  		size  = 0
   977  	)
   978  	// updateHead updates the head fast sync block if the inserted blocks are better
   979  	// and returns a indicator whether the inserted blocks are canonical.
   980  	updateHead := func(head *types.Block) bool {
   981  		bc.chainmu.Lock()
   982  
   983  		// Rewind may have occurred, skip in that case.
   984  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
   985  			currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
   986  			if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   987  				rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   988  				bc.currentFastBlock.Store(head)
   989  				headFastBlockGauge.Update(int64(head.NumberU64()))
   990  				bc.chainmu.Unlock()
   991  				return true
   992  			}
   993  		}
   994  		bc.chainmu.Unlock()
   995  		return false
   996  	}
   997  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
   998  	//
   999  	// this function only accepts canonical chain data. All side chain will be reverted
  1000  	// eventually.
  1001  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1002  		var (
  1003  			previous = bc.CurrentFastBlock()
  1004  			batch    = bc.db.NewBatch()
  1005  		)
  1006  		// If any error occurs before updating the head or we are inserting a side chain,
  1007  		// all the data written this time wll be rolled back.
  1008  		defer func() {
  1009  			if previous != nil {
  1010  				if err := bc.truncateAncient(previous.NumberU64()); err != nil {
  1011  					log.Crit("Truncate ancient store failed", "err", err)
  1012  				}
  1013  			}
  1014  		}()
  1015  		var deleted []*numberHash
  1016  		for i, block := range blockChain {
  1017  			// Short circuit insertion if shutting down or processing failed
  1018  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1019  				return 0, errInsertionInterrupted
  1020  			}
  1021  			// Short circuit insertion if it is required(used in testing only)
  1022  			if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
  1023  				return i, errors.New("insertion is terminated for testing purpose")
  1024  			}
  1025  			// Short circuit if the owner header is unknown
  1026  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1027  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1028  			}
  1029  			var (
  1030  				start  = time.Now()
  1031  				logged = time.Now()
  1032  				count  int
  1033  			)
  1034  			// Migrate all ancient blocks. This can happen if someone upgrades from Geth
  1035  			// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
  1036  			// long term.
  1037  			for {
  1038  				// We can ignore the error here since light client won't hit this code path.
  1039  				frozen, _ := bc.db.Ancients()
  1040  				if frozen >= block.NumberU64() {
  1041  					break
  1042  				}
  1043  				h := rawdb.ReadCanonicalHash(bc.db, frozen)
  1044  				b := rawdb.ReadBlock(bc.db, h, frozen)
  1045  				size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
  1046  				count += 1
  1047  
  1048  				// Always keep genesis block in active database.
  1049  				if b.NumberU64() != 0 {
  1050  					deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
  1051  				}
  1052  				if time.Since(logged) > 8*time.Second {
  1053  					log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1054  					logged = time.Now()
  1055  				}
  1056  				// Don't collect too much in-memory, write it out every 100K blocks
  1057  				if len(deleted) > 100000 {
  1058  
  1059  					// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1060  					if err := bc.db.Sync(); err != nil {
  1061  						return 0, err
  1062  					}
  1063  					// Wipe out canonical block data.
  1064  					for _, nh := range deleted {
  1065  						rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1066  						rawdb.DeleteCanonicalHash(batch, nh.number)
  1067  					}
  1068  					if err := batch.Write(); err != nil {
  1069  						return 0, err
  1070  					}
  1071  					batch.Reset()
  1072  					// Wipe out side chain too.
  1073  					for _, nh := range deleted {
  1074  						for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1075  							rawdb.DeleteBlock(batch, hash, nh.number)
  1076  						}
  1077  					}
  1078  					if err := batch.Write(); err != nil {
  1079  						return 0, err
  1080  					}
  1081  					batch.Reset()
  1082  					deleted = deleted[0:]
  1083  				}
  1084  			}
  1085  			if count > 0 {
  1086  				log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1087  			}
  1088  			// Flush data into ancient database.
  1089  			size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
  1090  			rawdb.WriteTxLookupEntries(batch, block)
  1091  
  1092  			stats.processed++
  1093  		}
  1094  		// Flush all tx-lookup index data.
  1095  		size += batch.ValueSize()
  1096  		if err := batch.Write(); err != nil {
  1097  			return 0, err
  1098  		}
  1099  		batch.Reset()
  1100  
  1101  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1102  		if err := bc.db.Sync(); err != nil {
  1103  			return 0, err
  1104  		}
  1105  		if !updateHead(blockChain[len(blockChain)-1]) {
  1106  			return 0, errors.New("side blocks can't be accepted as the ancient chain data")
  1107  		}
  1108  		previous = nil // disable rollback explicitly
  1109  
  1110  		// Wipe out canonical block data.
  1111  		for _, nh := range deleted {
  1112  			rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1113  			rawdb.DeleteCanonicalHash(batch, nh.number)
  1114  		}
  1115  		for _, block := range blockChain {
  1116  			// Always keep genesis block in active database.
  1117  			if block.NumberU64() != 0 {
  1118  				rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1119  				rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1120  			}
  1121  		}
  1122  		if err := batch.Write(); err != nil {
  1123  			return 0, err
  1124  		}
  1125  		batch.Reset()
  1126  
  1127  		// Wipe out side chain too.
  1128  		for _, nh := range deleted {
  1129  			for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1130  				rawdb.DeleteBlock(batch, hash, nh.number)
  1131  			}
  1132  		}
  1133  		for _, block := range blockChain {
  1134  			// Always keep genesis block in active database.
  1135  			if block.NumberU64() != 0 {
  1136  				for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
  1137  					rawdb.DeleteBlock(batch, hash, block.NumberU64())
  1138  				}
  1139  			}
  1140  		}
  1141  		if err := batch.Write(); err != nil {
  1142  			return 0, err
  1143  		}
  1144  		return 0, nil
  1145  	}
  1146  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1147  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1148  		batch := bc.db.NewBatch()
  1149  		for i, block := range blockChain {
  1150  			// Short circuit insertion if shutting down or processing failed
  1151  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1152  				return 0, errInsertionInterrupted
  1153  			}
  1154  			// Short circuit if the owner header is unknown
  1155  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1156  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1157  			}
  1158  			if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1159  				stats.ignored++
  1160  				continue
  1161  			}
  1162  			// Write all the data out into the database
  1163  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1164  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1165  			rawdb.WriteTxLookupEntries(batch, block)
  1166  
  1167  			stats.processed++
  1168  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1169  				if err := batch.Write(); err != nil {
  1170  					return 0, err
  1171  				}
  1172  				size += batch.ValueSize()
  1173  				batch.Reset()
  1174  			}
  1175  		}
  1176  		if batch.ValueSize() > 0 {
  1177  			size += batch.ValueSize()
  1178  			if err := batch.Write(); err != nil {
  1179  				return 0, err
  1180  			}
  1181  		}
  1182  		updateHead(blockChain[len(blockChain)-1])
  1183  		return 0, nil
  1184  	}
  1185  	// Write downloaded chain data and corresponding receipt chain data.
  1186  	if len(ancientBlocks) > 0 {
  1187  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1188  			if err == errInsertionInterrupted {
  1189  				return 0, nil
  1190  			}
  1191  			return n, err
  1192  		}
  1193  	}
  1194  	if len(liveBlocks) > 0 {
  1195  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1196  			if err == errInsertionInterrupted {
  1197  				return 0, nil
  1198  			}
  1199  			return n, err
  1200  		}
  1201  	}
  1202  
  1203  	head := blockChain[len(blockChain)-1]
  1204  	context := []interface{}{
  1205  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1206  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1207  		"size", common.StorageSize(size),
  1208  	}
  1209  	if stats.ignored > 0 {
  1210  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1211  	}
  1212  	log.Info("Imported new block receipts", context...)
  1213  
  1214  	return 0, nil
  1215  }
  1216  
  1217  var lastWrite uint64
  1218  
  1219  // writeBlockWithoutState writes only the block and its metadata to the database,
  1220  // but does not write any state. This is used to construct competing side forks
  1221  // up to the point where they exceed the canonical total difficulty.
  1222  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1223  	bc.wg.Add(1)
  1224  	defer bc.wg.Done()
  1225  
  1226  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  1227  		return err
  1228  	}
  1229  	rawdb.WriteBlock(bc.db, block)
  1230  
  1231  	return nil
  1232  }
  1233  
  1234  // writeKnownBlock updates the head block flag with a known block
  1235  // and introduces chain reorg if necessary.
  1236  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1237  	bc.wg.Add(1)
  1238  	defer bc.wg.Done()
  1239  
  1240  	current := bc.CurrentBlock()
  1241  	if block.ParentHash() != current.Hash() {
  1242  		if err := bc.reorg(current, block); err != nil {
  1243  			return err
  1244  		}
  1245  	}
  1246  	// Write the positional metadata for transaction/receipt lookups.
  1247  	// Preimages here is empty, ignore it.
  1248  	rawdb.WriteTxLookupEntries(bc.db, block)
  1249  
  1250  	bc.insert(block)
  1251  	return nil
  1252  }
  1253  
  1254  // WriteBlockWithState writes the block and all associated state to the database.
  1255  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  1256  	bc.chainmu.Lock()
  1257  	defer bc.chainmu.Unlock()
  1258  
  1259  	return bc.writeBlockWithState(block, receipts, state)
  1260  }
  1261  
  1262  // writeBlockWithState writes the block and all associated state to the database,
  1263  // but is expects the chain mutex to be held.
  1264  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  1265  	bc.wg.Add(1)
  1266  	defer bc.wg.Done()
  1267  
  1268  	// Calculate the total difficulty of the block
  1269  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1270  	if ptd == nil {
  1271  		return NonStatTy, consensus.ErrUnknownAncestor
  1272  	}
  1273  	// Make sure no inconsistent state is leaked during insertion
  1274  	currentBlock := bc.CurrentBlock()
  1275  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1276  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1277  
  1278  	// Irrelevant of the canonical status, write the block itself to the database
  1279  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  1280  		return NonStatTy, err
  1281  	}
  1282  	rawdb.WriteBlock(bc.db, block)
  1283  
  1284  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1285  	if err != nil {
  1286  		return NonStatTy, err
  1287  	}
  1288  	triedb := bc.stateCache.TrieDB()
  1289  
  1290  	// If we're running an archive node, always flush
  1291  	if bc.cacheConfig.TrieDirtyDisabled {
  1292  		if err := triedb.Commit(root, false); err != nil {
  1293  			return NonStatTy, err
  1294  		}
  1295  	} else {
  1296  		// Full but not archive node, do proper garbage collection
  1297  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1298  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1299  
  1300  		if current := block.NumberU64(); current > TriesInMemory {
  1301  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1302  			var (
  1303  				nodes, imgs = triedb.Size()
  1304  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1305  			)
  1306  			if nodes > limit || imgs > 4*1024*1024 {
  1307  				triedb.Cap(limit - ethdb.IdealBatchSize)
  1308  			}
  1309  			// Find the next state trie we need to commit
  1310  			chosen := current - TriesInMemory
  1311  
  1312  			// If we exceeded out time allowance, flush an entire trie to disk
  1313  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1314  				// If the header is missing (canonical chain behind), we're reorging a low
  1315  				// diff sidechain. Suspend committing until this operation is completed.
  1316  				header := bc.GetHeaderByNumber(chosen)
  1317  				if header == nil {
  1318  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1319  				} else {
  1320  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1321  					// warn the user that the system is becoming unstable.
  1322  					if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1323  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
  1324  					}
  1325  					// Flush an entire trie and restart the counters
  1326  					triedb.Commit(header.Root, true)
  1327  					lastWrite = chosen
  1328  					bc.gcproc = 0
  1329  				}
  1330  			}
  1331  			// Garbage collect anything below our required write retention
  1332  			for !bc.triegc.Empty() {
  1333  				root, number := bc.triegc.Pop()
  1334  				if uint64(-number) > chosen {
  1335  					bc.triegc.Push(root, number)
  1336  					break
  1337  				}
  1338  				triedb.Dereference(root.(common.Hash))
  1339  			}
  1340  		}
  1341  	}
  1342  
  1343  	// Write other block data using a batch.
  1344  	batch := bc.db.NewBatch()
  1345  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  1346  
  1347  	// If the total difficulty is higher than our known, add it to the canonical chain
  1348  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1349  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1350  	reorg := externTd.Cmp(localTd) > 0
  1351  	currentBlock = bc.CurrentBlock()
  1352  	if !reorg && externTd.Cmp(localTd) == 0 {
  1353  		// Split same-difficulty blocks by number, then preferentially select
  1354  		// the block generated by the local miner as the canonical block.
  1355  		if block.NumberU64() < currentBlock.NumberU64() {
  1356  			reorg = true
  1357  		} else if block.NumberU64() == currentBlock.NumberU64() {
  1358  			var currentPreserve, blockPreserve bool
  1359  			if bc.shouldPreserve != nil {
  1360  				currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  1361  			}
  1362  			reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  1363  		}
  1364  	}
  1365  	if reorg {
  1366  		// Reorganise the chain if the parent is not the head block
  1367  		if block.ParentHash() != currentBlock.Hash() {
  1368  			if err := bc.reorg(currentBlock, block); err != nil {
  1369  				return NonStatTy, err
  1370  			}
  1371  		}
  1372  		// Write the positional metadata for transaction/receipt lookups and preimages
  1373  		rawdb.WriteTxLookupEntries(batch, block)
  1374  		rawdb.WritePreimages(batch, state.Preimages())
  1375  
  1376  		status = CanonStatTy
  1377  	} else {
  1378  		status = SideStatTy
  1379  	}
  1380  	if err := batch.Write(); err != nil {
  1381  		return NonStatTy, err
  1382  	}
  1383  
  1384  	// Set new head.
  1385  	if status == CanonStatTy {
  1386  		bc.insert(block)
  1387  	}
  1388  	bc.futureBlocks.Remove(block.Hash())
  1389  	return status, nil
  1390  }
  1391  
  1392  // addFutureBlock checks if the block is within the max allowed window to get
  1393  // accepted for future processing, and returns an error if the block is too far
  1394  // ahead and was not added.
  1395  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1396  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1397  	if block.Time() > max {
  1398  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1399  	}
  1400  	bc.futureBlocks.Add(block.Hash(), block)
  1401  	return nil
  1402  }
  1403  
  1404  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1405  // chain or, otherwise, create a fork. If an error is returned it will return
  1406  // the index number of the failing block as well an error describing what went
  1407  // wrong.
  1408  //
  1409  // After insertion is done, all accumulated events will be fired.
  1410  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1411  	// Sanity check that we have something meaningful to import
  1412  	if len(chain) == 0 {
  1413  		return 0, nil
  1414  	}
  1415  
  1416  	bc.blockProcFeed.Send(true)
  1417  	defer bc.blockProcFeed.Send(false)
  1418  
  1419  	// Remove already known canon-blocks
  1420  	var (
  1421  		block, prev *types.Block
  1422  	)
  1423  	// Do a sanity check that the provided chain is actually ordered and linked
  1424  	for i := 1; i < len(chain); i++ {
  1425  		block = chain[i]
  1426  		prev = chain[i-1]
  1427  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1428  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1429  			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  1430  				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1431  
  1432  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1433  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1434  		}
  1435  	}
  1436  	// Pre-checks passed, start the full block imports
  1437  	bc.wg.Add(1)
  1438  	bc.chainmu.Lock()
  1439  	n, events, logs, err := bc.insertChain(chain, true)
  1440  	bc.chainmu.Unlock()
  1441  	bc.wg.Done()
  1442  
  1443  	bc.PostChainEvents(events, logs)
  1444  	return n, err
  1445  }
  1446  
  1447  // insertChain is the internal implementation of InsertChain, which assumes that
  1448  // 1) chains are contiguous, and 2) The chain mutex is held.
  1449  //
  1450  // This method is split out so that import batches that require re-injecting
  1451  // historical blocks can do so without releasing the lock, which could lead to
  1452  // racey behaviour. If a sidechain import is in progress, and the historic state
  1453  // is imported, but then new canon-head is added before the actual sidechain
  1454  // completes, then the historic state could be pruned again
  1455  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1456  	// If the chain is terminating, don't even bother starting up
  1457  	if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1458  		return 0, nil, nil, nil
  1459  	}
  1460  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1461  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1462  
  1463  	// A queued approach to delivering events. This is generally
  1464  	// faster than direct delivery and requires much less mutex
  1465  	// acquiring.
  1466  	var (
  1467  		stats         = insertStats{startTime: mclock.Now()}
  1468  		events        = make([]interface{}, 0, len(chain))
  1469  		lastCanon     *types.Block
  1470  		coalescedLogs []*types.Log
  1471  	)
  1472  	// Start the parallel header verifier
  1473  	headers := make([]*types.Header, len(chain))
  1474  	seals := make([]bool, len(chain))
  1475  
  1476  	for i, block := range chain {
  1477  		headers[i] = block.Header()
  1478  		seals[i] = verifySeals
  1479  	}
  1480  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1481  	defer close(abort)
  1482  
  1483  	// Peek the error for the first block to decide the directing import logic
  1484  	it := newInsertIterator(chain, results, bc.validator)
  1485  
  1486  	block, err := it.next()
  1487  
  1488  	// Left-trim all the known blocks
  1489  	if err == ErrKnownBlock {
  1490  		// First block (and state) is known
  1491  		//   1. We did a roll-back, and should now do a re-import
  1492  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1493  		// 	    from the canonical chain, which has not been verified.
  1494  		// Skip all known blocks that are behind us
  1495  		var (
  1496  			current  = bc.CurrentBlock()
  1497  			localTd  = bc.GetTd(current.Hash(), current.NumberU64())
  1498  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
  1499  		)
  1500  		for block != nil && err == ErrKnownBlock {
  1501  			externTd = new(big.Int).Add(externTd, block.Difficulty())
  1502  			if localTd.Cmp(externTd) < 0 {
  1503  				break
  1504  			}
  1505  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1506  			stats.ignored++
  1507  
  1508  			block, err = it.next()
  1509  		}
  1510  		// The remaining blocks are still known blocks, the only scenario here is:
  1511  		// During the fast sync, the pivot point is already submitted but rollback
  1512  		// happens. Then node resets the head full block to a lower height via `rollback`
  1513  		// and leaves a few known blocks in the database.
  1514  		//
  1515  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1516  		// `insertChain` while a part of them have higher total difficulty than current
  1517  		// head full block(new pivot point).
  1518  		for block != nil && err == ErrKnownBlock {
  1519  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1520  			if err := bc.writeKnownBlock(block); err != nil {
  1521  				return it.index, nil, nil, err
  1522  			}
  1523  			lastCanon = block
  1524  
  1525  			block, err = it.next()
  1526  		}
  1527  		// Falls through to the block import
  1528  	}
  1529  	switch {
  1530  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1531  	case err == consensus.ErrPrunedAncestor:
  1532  		log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1533  		return bc.insertSideChain(block, it)
  1534  
  1535  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1536  	case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1537  		for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1538  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1539  			if err := bc.addFutureBlock(block); err != nil {
  1540  				return it.index, events, coalescedLogs, err
  1541  			}
  1542  			block, err = it.next()
  1543  		}
  1544  		stats.queued += it.processed()
  1545  		stats.ignored += it.remaining()
  1546  
  1547  		// If there are any still remaining, mark as ignored
  1548  		return it.index, events, coalescedLogs, err
  1549  
  1550  	// Some other error occurred, abort
  1551  	case err != nil:
  1552  		stats.ignored += len(it.chain)
  1553  		bc.reportBlock(block, nil, err)
  1554  		return it.index, events, coalescedLogs, err
  1555  	}
  1556  	// No validation errors for the first block (or chain prefix skipped)
  1557  	for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
  1558  		// If the chain is terminating, stop processing blocks
  1559  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1560  			log.Debug("Premature abort during blocks processing")
  1561  			break
  1562  		}
  1563  		// If the header is a banned one, straight out abort
  1564  		if BadHashes[block.Hash()] {
  1565  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1566  			return it.index, events, coalescedLogs, ErrBlacklistedHash
  1567  		}
  1568  		// If the block is known (in the middle of the chain), it's a special case for
  1569  		// Clique blocks where they can share state among each other, so importing an
  1570  		// older block might complete the state of the subsequent one. In this case,
  1571  		// just skip the block (we already validated it once fully (and crashed), since
  1572  		// its header and body was already in the database).
  1573  		if err == ErrKnownBlock {
  1574  			logger := log.Debug
  1575  			if bc.chainConfig.Clique == nil {
  1576  				logger = log.Warn
  1577  			}
  1578  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1579  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1580  				"root", block.Root())
  1581  
  1582  			if err := bc.writeKnownBlock(block); err != nil {
  1583  				return it.index, nil, nil, err
  1584  			}
  1585  			stats.processed++
  1586  
  1587  			// We can assume that logs are empty here, since the only way for consecutive
  1588  			// Clique blocks to have the same state is if there are no transactions.
  1589  			events = append(events, ChainEvent{block, block.Hash(), nil})
  1590  			lastCanon = block
  1591  
  1592  			continue
  1593  		}
  1594  		// Retrieve the parent block and it's state to execute on top
  1595  		start := time.Now()
  1596  
  1597  		parent := it.previous()
  1598  		if parent == nil {
  1599  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1600  		}
  1601  		statedb, err := state.New(parent.Root, bc.stateCache)
  1602  		if err != nil {
  1603  			return it.index, events, coalescedLogs, err
  1604  		}
  1605  		// If we have a followup block, run that against the current state to pre-cache
  1606  		// transactions and probabilistically some of the account/storage trie nodes.
  1607  		var followupInterrupt uint32
  1608  
  1609  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1610  			if followup, err := it.peek(); followup != nil && err == nil {
  1611  				go func(start time.Time) {
  1612  					throwaway, _ := state.New(parent.Root, bc.stateCache)
  1613  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1614  
  1615  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1616  					if atomic.LoadUint32(&followupInterrupt) == 1 {
  1617  						blockPrefetchInterruptMeter.Mark(1)
  1618  					}
  1619  				}(time.Now())
  1620  			}
  1621  		}
  1622  		// Process block using the parent state as reference point
  1623  		substart := time.Now()
  1624  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1625  		if err != nil {
  1626  			bc.reportBlock(block, receipts, err)
  1627  			atomic.StoreUint32(&followupInterrupt, 1)
  1628  			return it.index, events, coalescedLogs, err
  1629  		}
  1630  		// Update the metrics touched during block processing
  1631  		accountReadTimer.Update(statedb.AccountReads)     // Account reads are complete, we can mark them
  1632  		storageReadTimer.Update(statedb.StorageReads)     // Storage reads are complete, we can mark them
  1633  		accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
  1634  		storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
  1635  
  1636  		triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
  1637  		trieproc := statedb.AccountReads + statedb.AccountUpdates
  1638  		trieproc += statedb.StorageReads + statedb.StorageUpdates
  1639  
  1640  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1641  
  1642  		// Validate the state using the default validator
  1643  		substart = time.Now()
  1644  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1645  			bc.reportBlock(block, receipts, err)
  1646  			atomic.StoreUint32(&followupInterrupt, 1)
  1647  			return it.index, events, coalescedLogs, err
  1648  		}
  1649  		proctime := time.Since(start)
  1650  
  1651  		// Update the metrics touched during block validation
  1652  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1653  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1654  
  1655  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1656  
  1657  		// Write the block to the chain and get the status.
  1658  		substart = time.Now()
  1659  		status, err := bc.writeBlockWithState(block, receipts, statedb)
  1660  		if err != nil {
  1661  			atomic.StoreUint32(&followupInterrupt, 1)
  1662  			return it.index, events, coalescedLogs, err
  1663  		}
  1664  		atomic.StoreUint32(&followupInterrupt, 1)
  1665  
  1666  		// Update the metrics touched during block commit
  1667  		accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
  1668  		storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
  1669  
  1670  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
  1671  		blockInsertTimer.UpdateSince(start)
  1672  
  1673  		switch status {
  1674  		case CanonStatTy:
  1675  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1676  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1677  				"elapsed", common.PrettyDuration(time.Since(start)),
  1678  				"root", block.Root())
  1679  
  1680  			coalescedLogs = append(coalescedLogs, logs...)
  1681  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1682  			lastCanon = block
  1683  
  1684  			// Only count canonical blocks for GC processing time
  1685  			bc.gcproc += proctime
  1686  
  1687  		case SideStatTy:
  1688  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1689  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1690  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1691  				"root", block.Root())
  1692  			events = append(events, ChainSideEvent{block})
  1693  
  1694  		default:
  1695  			// This in theory is impossible, but lets be nice to our future selves and leave
  1696  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1697  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1698  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1699  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1700  				"root", block.Root())
  1701  		}
  1702  		stats.processed++
  1703  		stats.usedGas += usedGas
  1704  
  1705  		dirty, _ := bc.stateCache.TrieDB().Size()
  1706  		stats.report(chain, it.index, dirty)
  1707  	}
  1708  	// Any blocks remaining here? The only ones we care about are the future ones
  1709  	if block != nil && err == consensus.ErrFutureBlock {
  1710  		if err := bc.addFutureBlock(block); err != nil {
  1711  			return it.index, events, coalescedLogs, err
  1712  		}
  1713  		block, err = it.next()
  1714  
  1715  		for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1716  			if err := bc.addFutureBlock(block); err != nil {
  1717  				return it.index, events, coalescedLogs, err
  1718  			}
  1719  			stats.queued++
  1720  		}
  1721  	}
  1722  	stats.ignored += it.remaining()
  1723  
  1724  	// Append a single chain head event if we've progressed the chain
  1725  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1726  		events = append(events, ChainHeadEvent{lastCanon})
  1727  	}
  1728  	return it.index, events, coalescedLogs, err
  1729  }
  1730  
  1731  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1732  // error, which happens when a sidechain with a sufficiently old fork-block is
  1733  // found.
  1734  //
  1735  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1736  // switch over to the new chain if the TD exceeded the current chain.
  1737  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1738  	var (
  1739  		externTd *big.Int
  1740  		current  = bc.CurrentBlock()
  1741  	)
  1742  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1743  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1744  	// ones. Any other errors means that the block is invalid, and should not be written
  1745  	// to disk.
  1746  	err := consensus.ErrPrunedAncestor
  1747  	for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1748  		// Check the canonical state root for that number
  1749  		if number := block.NumberU64(); current.NumberU64() >= number {
  1750  			canonical := bc.GetBlockByNumber(number)
  1751  			if canonical != nil && canonical.Hash() == block.Hash() {
  1752  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1753  
  1754  				// Collect the TD of the block. Since we know it's a canon one,
  1755  				// we can get it directly, and not (like further below) use
  1756  				// the parent and then add the block on top
  1757  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  1758  				continue
  1759  			}
  1760  			if canonical != nil && canonical.Root() == block.Root() {
  1761  				// This is most likely a shadow-state attack. When a fork is imported into the
  1762  				// database, and it eventually reaches a block height which is not pruned, we
  1763  				// just found that the state already exist! This means that the sidechain block
  1764  				// refers to a state which already exists in our canon chain.
  1765  				//
  1766  				// If left unchecked, we would now proceed importing the blocks, without actually
  1767  				// having verified the state of the previous blocks.
  1768  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1769  
  1770  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1771  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1772  				// mechanism.
  1773  				return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1774  			}
  1775  		}
  1776  		if externTd == nil {
  1777  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1778  		}
  1779  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1780  
  1781  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1782  			start := time.Now()
  1783  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  1784  				return it.index, nil, nil, err
  1785  			}
  1786  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1787  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1788  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1789  				"root", block.Root())
  1790  		}
  1791  	}
  1792  	// At this point, we've written all sidechain blocks to database. Loop ended
  1793  	// either on some other error or all were processed. If there was some other
  1794  	// error, we can ignore the rest of those blocks.
  1795  	//
  1796  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1797  	// blocks to regenerate the required state
  1798  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1799  	if localTd.Cmp(externTd) > 0 {
  1800  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1801  		return it.index, nil, nil, err
  1802  	}
  1803  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1804  	var (
  1805  		hashes  []common.Hash
  1806  		numbers []uint64
  1807  	)
  1808  	parent := it.previous()
  1809  	for parent != nil && !bc.HasState(parent.Root) {
  1810  		hashes = append(hashes, parent.Hash())
  1811  		numbers = append(numbers, parent.Number.Uint64())
  1812  
  1813  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1814  	}
  1815  	if parent == nil {
  1816  		return it.index, nil, nil, errors.New("missing parent")
  1817  	}
  1818  	// Import all the pruned blocks to make the state available
  1819  	var (
  1820  		blocks []*types.Block
  1821  		memory common.StorageSize
  1822  	)
  1823  	for i := len(hashes) - 1; i >= 0; i-- {
  1824  		// Append the next block to our batch
  1825  		block := bc.GetBlock(hashes[i], numbers[i])
  1826  
  1827  		blocks = append(blocks, block)
  1828  		memory += block.Size()
  1829  
  1830  		// If memory use grew too large, import and continue. Sadly we need to discard
  1831  		// all raised events and logs from notifications since we're too heavy on the
  1832  		// memory here.
  1833  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1834  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1835  			if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1836  				return 0, nil, nil, err
  1837  			}
  1838  			blocks, memory = blocks[:0], 0
  1839  
  1840  			// If the chain is terminating, stop processing blocks
  1841  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1842  				log.Debug("Premature abort during blocks processing")
  1843  				return 0, nil, nil, nil
  1844  			}
  1845  		}
  1846  	}
  1847  	if len(blocks) > 0 {
  1848  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1849  		return bc.insertChain(blocks, false)
  1850  	}
  1851  	return 0, nil, nil, nil
  1852  }
  1853  
  1854  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1855  // blocks and inserts them to be part of the new canonical chain and accumulates
  1856  // potential missing transactions and post an event about them.
  1857  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1858  	var (
  1859  		newChain    types.Blocks
  1860  		oldChain    types.Blocks
  1861  		commonBlock *types.Block
  1862  
  1863  		deletedTxs types.Transactions
  1864  		addedTxs   types.Transactions
  1865  
  1866  		deletedLogs []*types.Log
  1867  		rebirthLogs []*types.Log
  1868  
  1869  		// collectLogs collects the logs that were generated during the
  1870  		// processing of the block that corresponds with the given hash.
  1871  		// These logs are later announced as deleted or reborn
  1872  		collectLogs = func(hash common.Hash, removed bool) {
  1873  			number := bc.hc.GetBlockNumber(hash)
  1874  			if number == nil {
  1875  				return
  1876  			}
  1877  			receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  1878  			for _, receipt := range receipts {
  1879  				for _, log := range receipt.Logs {
  1880  					l := *log
  1881  					if removed {
  1882  						l.Removed = true
  1883  						deletedLogs = append(deletedLogs, &l)
  1884  					} else {
  1885  						rebirthLogs = append(rebirthLogs, &l)
  1886  					}
  1887  				}
  1888  			}
  1889  		}
  1890  	)
  1891  	// Reduce the longer chain to the same number as the shorter one
  1892  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1893  		// Old chain is longer, gather all transactions and logs as deleted ones
  1894  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1895  			oldChain = append(oldChain, oldBlock)
  1896  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1897  			collectLogs(oldBlock.Hash(), true)
  1898  		}
  1899  	} else {
  1900  		// New chain is longer, stash all blocks away for subsequent insertion
  1901  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1902  			newChain = append(newChain, newBlock)
  1903  		}
  1904  	}
  1905  	if oldBlock == nil {
  1906  		return fmt.Errorf("invalid old chain")
  1907  	}
  1908  	if newBlock == nil {
  1909  		return fmt.Errorf("invalid new chain")
  1910  	}
  1911  	// Both sides of the reorg are at the same number, reduce both until the common
  1912  	// ancestor is found
  1913  	for {
  1914  		// If the common ancestor was found, bail out
  1915  		if oldBlock.Hash() == newBlock.Hash() {
  1916  			commonBlock = oldBlock
  1917  			break
  1918  		}
  1919  		// Remove an old block as well as stash away a new block
  1920  		oldChain = append(oldChain, oldBlock)
  1921  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1922  		collectLogs(oldBlock.Hash(), true)
  1923  
  1924  		newChain = append(newChain, newBlock)
  1925  
  1926  		// Step back with both chains
  1927  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1928  		if oldBlock == nil {
  1929  			return fmt.Errorf("invalid old chain")
  1930  		}
  1931  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1932  		if newBlock == nil {
  1933  			return fmt.Errorf("invalid new chain")
  1934  		}
  1935  	}
  1936  	// Ensure the user sees large reorgs
  1937  	if len(oldChain) > 0 && len(newChain) > 0 {
  1938  		logFn := log.Info
  1939  		msg := "Chain reorg detected"
  1940  		if len(oldChain) > 63 {
  1941  			msg = "Large chain reorg detected"
  1942  			logFn = log.Warn
  1943  		}
  1944  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1945  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1946  		blockReorgAddMeter.Mark(int64(len(newChain)))
  1947  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  1948  	} else {
  1949  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1950  	}
  1951  	// Insert the new chain(except the head block(reverse order)),
  1952  	// taking care of the proper incremental order.
  1953  	for i := len(newChain) - 1; i >= 1; i-- {
  1954  		// Insert the block in the canonical way, re-writing history
  1955  		bc.insert(newChain[i])
  1956  
  1957  		// Collect reborn logs due to chain reorg
  1958  		collectLogs(newChain[i].Hash(), false)
  1959  
  1960  		// Write lookup entries for hash based transaction/receipt searches
  1961  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1962  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1963  	}
  1964  	// When transactions get deleted from the database, the receipts that were
  1965  	// created in the fork must also be deleted
  1966  	batch := bc.db.NewBatch()
  1967  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  1968  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1969  	}
  1970  	// Delete any canonical number assignments above the new head
  1971  	number := bc.CurrentBlock().NumberU64()
  1972  	for i := number + 1; ; i++ {
  1973  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  1974  		if hash == (common.Hash{}) {
  1975  			break
  1976  		}
  1977  		rawdb.DeleteCanonicalHash(batch, i)
  1978  	}
  1979  	batch.Write()
  1980  	// If any logs need to be fired, do it now. In theory we could avoid creating
  1981  	// this goroutine if there are no events to fire, but realistcally that only
  1982  	// ever happens if we're reorging empty blocks, which will only happen on idle
  1983  	// networks where performance is not an issue either way.
  1984  	//
  1985  	// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
  1986  	// event ordering?
  1987  	go func() {
  1988  		if len(deletedLogs) > 0 {
  1989  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1990  		}
  1991  		if len(rebirthLogs) > 0 {
  1992  			bc.logsFeed.Send(rebirthLogs)
  1993  		}
  1994  		if len(oldChain) > 0 {
  1995  			for _, block := range oldChain {
  1996  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1997  			}
  1998  		}
  1999  	}()
  2000  	return nil
  2001  }
  2002  
  2003  // PostChainEvents iterates over the events generated by a chain insertion and
  2004  // posts them into the event feed.
  2005  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  2006  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  2007  	// post event logs for further processing
  2008  	if logs != nil {
  2009  		bc.logsFeed.Send(logs)
  2010  	}
  2011  	for _, event := range events {
  2012  		switch ev := event.(type) {
  2013  		case ChainEvent:
  2014  			bc.chainFeed.Send(ev)
  2015  
  2016  		case ChainHeadEvent:
  2017  			bc.chainHeadFeed.Send(ev)
  2018  
  2019  		case ChainSideEvent:
  2020  			bc.chainSideFeed.Send(ev)
  2021  		}
  2022  	}
  2023  }
  2024  
  2025  func (bc *BlockChain) update() {
  2026  	futureTimer := time.NewTicker(5 * time.Second)
  2027  	defer futureTimer.Stop()
  2028  	for {
  2029  		select {
  2030  		case <-futureTimer.C:
  2031  			bc.procFutureBlocks()
  2032  		case <-bc.quit:
  2033  			return
  2034  		}
  2035  	}
  2036  }
  2037  
  2038  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  2039  func (bc *BlockChain) BadBlocks() []*types.Block {
  2040  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  2041  	for _, hash := range bc.badBlocks.Keys() {
  2042  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  2043  			block := blk.(*types.Block)
  2044  			blocks = append(blocks, block)
  2045  		}
  2046  	}
  2047  	return blocks
  2048  }
  2049  
  2050  // addBadBlock adds a bad block to the bad-block LRU cache
  2051  func (bc *BlockChain) addBadBlock(block *types.Block) {
  2052  	bc.badBlocks.Add(block.Hash(), block)
  2053  }
  2054  
  2055  // reportBlock logs a bad block error.
  2056  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2057  	bc.addBadBlock(block)
  2058  
  2059  	var receiptString string
  2060  	for i, receipt := range receipts {
  2061  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  2062  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2063  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2064  	}
  2065  	log.Error(fmt.Sprintf(`
  2066  ########## BAD BLOCK #########
  2067  Chain config: %v
  2068  
  2069  Number: %v
  2070  Hash: 0x%x
  2071  %v
  2072  
  2073  Error: %v
  2074  ##############################
  2075  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  2076  }
  2077  
  2078  // InsertHeaderChain attempts to insert the given header chain in to the local
  2079  // chain, possibly creating a reorg. If an error is returned, it will return the
  2080  // index number of the failing header as well an error describing what went wrong.
  2081  //
  2082  // The verify parameter can be used to fine tune whether nonce verification
  2083  // should be done or not. The reason behind the optional check is because some
  2084  // of the header retrieval mechanisms already need to verify nonces, as well as
  2085  // because nonces can be verified sparsely, not needing to check each.
  2086  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2087  	start := time.Now()
  2088  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2089  		return i, err
  2090  	}
  2091  
  2092  	// Make sure only one thread manipulates the chain at once
  2093  	bc.chainmu.Lock()
  2094  	defer bc.chainmu.Unlock()
  2095  
  2096  	bc.wg.Add(1)
  2097  	defer bc.wg.Done()
  2098  
  2099  	whFunc := func(header *types.Header) error {
  2100  		_, err := bc.hc.WriteHeader(header)
  2101  		return err
  2102  	}
  2103  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  2104  }
  2105  
  2106  // CurrentHeader retrieves the current head header of the canonical chain. The
  2107  // header is retrieved from the HeaderChain's internal cache.
  2108  func (bc *BlockChain) CurrentHeader() *types.Header {
  2109  	return bc.hc.CurrentHeader()
  2110  }
  2111  
  2112  // GetTd retrieves a block's total difficulty in the canonical chain from the
  2113  // database by hash and number, caching it if found.
  2114  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  2115  	return bc.hc.GetTd(hash, number)
  2116  }
  2117  
  2118  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  2119  // database by hash, caching it if found.
  2120  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  2121  	return bc.hc.GetTdByHash(hash)
  2122  }
  2123  
  2124  // GetHeader retrieves a block header from the database by hash and number,
  2125  // caching it if found.
  2126  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  2127  	return bc.hc.GetHeader(hash, number)
  2128  }
  2129  
  2130  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  2131  // found.
  2132  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  2133  	return bc.hc.GetHeaderByHash(hash)
  2134  }
  2135  
  2136  // HasHeader checks if a block header is present in the database or not, caching
  2137  // it if present.
  2138  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  2139  	return bc.hc.HasHeader(hash, number)
  2140  }
  2141  
  2142  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  2143  // hash, fetching towards the genesis block.
  2144  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  2145  	return bc.hc.GetBlockHashesFromHash(hash, max)
  2146  }
  2147  
  2148  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  2149  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  2150  // number of blocks to be individually checked before we reach the canonical chain.
  2151  //
  2152  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  2153  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  2154  	bc.chainmu.RLock()
  2155  	defer bc.chainmu.RUnlock()
  2156  
  2157  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  2158  }
  2159  
  2160  // GetHeaderByNumber retrieves a block header from the database by number,
  2161  // caching it (associated with its hash) if found.
  2162  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  2163  	return bc.hc.GetHeaderByNumber(number)
  2164  }
  2165  
  2166  // GetTransactionLookup retrieves the lookup associate with the given transaction
  2167  // hash from the cache or database.
  2168  func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
  2169  	// Short circuit if the txlookup already in the cache, retrieve otherwise
  2170  	if lookup, exist := bc.txLookupCache.Get(hash); exist {
  2171  		return lookup.(*rawdb.LegacyTxLookupEntry)
  2172  	}
  2173  	tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
  2174  	if tx == nil {
  2175  		return nil
  2176  	}
  2177  	lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
  2178  	bc.txLookupCache.Add(hash, lookup)
  2179  	return lookup
  2180  }
  2181  
  2182  // Config retrieves the chain's fork configuration.
  2183  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  2184  
  2185  // Engine retrieves the blockchain's consensus engine.
  2186  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  2187  
  2188  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  2189  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  2190  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  2191  }
  2192  
  2193  // SubscribeChainEvent registers a subscription of ChainEvent.
  2194  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  2195  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  2196  }
  2197  
  2198  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  2199  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  2200  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  2201  }
  2202  
  2203  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  2204  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  2205  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  2206  }
  2207  
  2208  // SubscribeLogsEvent registers a subscription of []*types.Log.
  2209  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  2210  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  2211  }
  2212  
  2213  // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  2214  // block processing has started while false means it has stopped.
  2215  func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  2216  	return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  2217  }