git.pirl.io/community/pirl@v0.0.0-20201111064343-9d3d31ff74be/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sort"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"git.pirl.io/community/pirl/common"
    32  	"git.pirl.io/community/pirl/common/mclock"
    33  	"git.pirl.io/community/pirl/common/prque"
    34  	"git.pirl.io/community/pirl/consensus"
    35  	"git.pirl.io/community/pirl/core/rawdb"
    36  	"git.pirl.io/community/pirl/core/state"
    37  	"git.pirl.io/community/pirl/core/types"
    38  	"git.pirl.io/community/pirl/core/vm"
    39  	"git.pirl.io/community/pirl/ethdb"
    40  	"git.pirl.io/community/pirl/event"
    41  	"git.pirl.io/community/pirl/log"
    42  	"git.pirl.io/community/pirl/metrics"
    43  	"git.pirl.io/community/pirl/params"
    44  	"git.pirl.io/community/pirl/rlp"
    45  	"git.pirl.io/community/pirl/trie"
    46  	lru "github.com/hashicorp/golang-lru"
    47  )
    48  
    49  var (
    50  	headBlockGauge     = metrics.NewRegisteredGauge("chain/head/block", nil)
    51  	headHeaderGauge    = metrics.NewRegisteredGauge("chain/head/header", nil)
    52  	headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    53  
    54  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    55  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    56  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    57  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    58  
    59  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    60  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    61  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    62  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    63  
    64  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    65  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    66  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    67  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    68  	blockReorgAddMeter   = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    69  	blockReorgDropMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    70  
    71  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    72  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    73  
    74  	errInsertionInterrupted = errors.New("insertion is interrupted")
    75  )
    76  
    77  const (
    78  	bodyCacheLimit      = 256
    79  	blockCacheLimit     = 256
    80  	receiptsCacheLimit  = 32
    81  	txLookupCacheLimit  = 1024
    82  	maxFutureBlocks     = 256
    83  	maxTimeFutureBlocks = 30
    84  	badBlockLimit       = 10
    85  	TriesInMemory       = 128
    86  
    87  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    88  	//
    89  	// Changelog:
    90  	//
    91  	// - Version 4
    92  	//   The following incompatible database changes were added:
    93  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
    94  	//   * the `Bloom` field of receipt is deleted
    95  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
    96  	// - Version 5
    97  	//  The following incompatible database changes were added:
    98  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
    99  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   100  	//      receipts' corresponding block
   101  	// - Version 6
   102  	//  The following incompatible database changes were added:
   103  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   104  	// - Version 7
   105  	//  The following incompatible database changes were added:
   106  	//    * Use freezer as the ancient database to maintain all ancient data
   107  	BlockChainVersion uint64 = 7
   108  )
   109  
   110  // CacheConfig contains the configuration values for the trie caching/pruning
   111  // that's resident in a blockchain.
   112  type CacheConfig struct {
   113  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   114  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   115  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   116  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   117  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   118  }
   119  
   120  // BlockChain represents the canonical chain given a database with a genesis
   121  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   122  //
   123  // Importing blocks in to the block chain happens according to the set of rules
   124  // defined by the two stage Validator. Processing of blocks is done using the
   125  // Processor which processes the included transaction. The validation of the state
   126  // is done in the second part of the Validator. Failing results in aborting of
   127  // the import.
   128  //
   129  // The BlockChain also helps in returning blocks from **any** chain included
   130  // in the database as well as blocks that represents the canonical chain. It's
   131  // important to note that GetBlock can return any block and does not need to be
   132  // included in the canonical one where as GetBlockByNumber always represents the
   133  // canonical chain.
   134  type BlockChain struct {
   135  	chainConfig *params.ChainConfig // Chain & network configuration
   136  	cacheConfig *CacheConfig        // Cache configuration for pruning
   137  
   138  	db     ethdb.Database // Low level persistent database to store final content in
   139  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
   140  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
   141  
   142  	hc            *HeaderChain
   143  	rmLogsFeed    event.Feed
   144  	chainFeed     event.Feed
   145  	chainSideFeed event.Feed
   146  	chainHeadFeed event.Feed
   147  	logsFeed      event.Feed
   148  	blockProcFeed event.Feed
   149  	scope         event.SubscriptionScope
   150  	genesisBlock  *types.Block
   151  
   152  	chainmu sync.RWMutex // blockchain insertion lock
   153  
   154  	currentBlock     atomic.Value // Current head of the block chain
   155  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   156  
   157  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   158  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   159  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   160  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   161  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   162  	txLookupCache *lru.Cache     // Cache for the most recent transaction lookup data.
   163  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   164  
   165  	quit    chan struct{} // blockchain quit channel
   166  	running int32         // running must be called atomically
   167  	// procInterrupt must be atomically called
   168  	procInterrupt int32          // interrupt signaler for block processing
   169  	wg            sync.WaitGroup // chain processing wait group for shutting down
   170  
   171  	engine     consensus.Engine
   172  	validator  Validator  // Block and state validator interface
   173  	prefetcher Prefetcher // Block state prefetcher interface
   174  	processor  Processor  // Block transaction processor interface
   175  	vmConfig   vm.Config
   176  
   177  	badBlocks       *lru.Cache                     // Bad block cache
   178  	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
   179  	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
   180  }
   181  
   182  // NewBlockChain returns a fully initialised block chain using information
   183  // available in the database. It initialises the default Ethereum Validator and
   184  // Processor.
   185  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
   186  	if cacheConfig == nil {
   187  		cacheConfig = &CacheConfig{
   188  			TrieCleanLimit: 256,
   189  			TrieDirtyLimit: 256,
   190  			TrieTimeLimit:  5 * time.Minute,
   191  		}
   192  	}
   193  	bodyCache, _ := lru.New(bodyCacheLimit)
   194  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   195  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   196  	blockCache, _ := lru.New(blockCacheLimit)
   197  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   198  	futureBlocks, _ := lru.New(maxFutureBlocks)
   199  	badBlocks, _ := lru.New(badBlockLimit)
   200  
   201  	bc := &BlockChain{
   202  		chainConfig:    chainConfig,
   203  		cacheConfig:    cacheConfig,
   204  		db:             db,
   205  		triegc:         prque.New(nil),
   206  		stateCache:     state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
   207  		quit:           make(chan struct{}),
   208  		shouldPreserve: shouldPreserve,
   209  		bodyCache:      bodyCache,
   210  		bodyRLPCache:   bodyRLPCache,
   211  		receiptsCache:  receiptsCache,
   212  		blockCache:     blockCache,
   213  		txLookupCache:  txLookupCache,
   214  		futureBlocks:   futureBlocks,
   215  		engine:         engine,
   216  		vmConfig:       vmConfig,
   217  		badBlocks:      badBlocks,
   218  	}
   219  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   220  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   221  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   222  
   223  	var err error
   224  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   225  	if err != nil {
   226  		return nil, err
   227  	}
   228  	bc.genesisBlock = bc.GetBlockByNumber(0)
   229  	if bc.genesisBlock == nil {
   230  		return nil, ErrNoGenesis
   231  	}
   232  
   233  	var nilBlock *types.Block
   234  	bc.currentBlock.Store(nilBlock)
   235  	bc.currentFastBlock.Store(nilBlock)
   236  
   237  	// Initialize the chain with ancient data if it isn't empty.
   238  	if bc.empty() {
   239  		rawdb.InitDatabaseFromFreezer(bc.db)
   240  	}
   241  
   242  	if err := bc.loadLastState(); err != nil {
   243  		return nil, err
   244  	}
   245  	// The first thing the node will do is reconstruct the verification data for
   246  	// the head block (ethash cache or clique voting snapshot). Might as well do
   247  	// it in advance.
   248  	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   249  
   250  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   251  		var (
   252  			needRewind bool
   253  			low        uint64
   254  		)
   255  		// The head full block may be rolled back to a very low height due to
   256  		// blockchain repair. If the head full block is even lower than the ancient
   257  		// chain, truncate the ancient store.
   258  		fullBlock := bc.CurrentBlock()
   259  		if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
   260  			needRewind = true
   261  			low = fullBlock.NumberU64()
   262  		}
   263  		// In fast sync, it may happen that ancient data has been written to the
   264  		// ancient store, but the LastFastBlock has not been updated, truncate the
   265  		// extra data here.
   266  		fastBlock := bc.CurrentFastBlock()
   267  		if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
   268  			needRewind = true
   269  			if fastBlock.NumberU64() < low || low == 0 {
   270  				low = fastBlock.NumberU64()
   271  			}
   272  		}
   273  		if needRewind {
   274  			var hashes []common.Hash
   275  			previous := bc.CurrentHeader().Number.Uint64()
   276  			for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
   277  				hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
   278  			}
   279  			bc.Rollback(hashes)
   280  			log.Warn("Truncate ancient chain", "from", previous, "to", low)
   281  		}
   282  	}
   283  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   284  	for hash := range BadHashes {
   285  		if header := bc.GetHeaderByHash(hash); header != nil {
   286  			// get the canonical block corresponding to the offending header's number
   287  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   288  			// make sure the headerByNumber (if present) is in our current canonical chain
   289  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   290  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   291  				bc.SetHead(header.Number.Uint64() - 1)
   292  				log.Error("Chain rewind was successful, resuming normal operation")
   293  			}
   294  		}
   295  	}
   296  	// Take ownership of this particular state
   297  	go bc.update()
   298  	return bc, nil
   299  }
   300  
   301  func (bc *BlockChain) getProcInterrupt() bool {
   302  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   303  }
   304  
   305  // GetVMConfig returns the block chain VM config.
   306  func (bc *BlockChain) GetVMConfig() *vm.Config {
   307  	return &bc.vmConfig
   308  }
   309  
   310  // empty returns an indicator whether the blockchain is empty.
   311  // Note, it's a special case that we connect a non-empty ancient
   312  // database with an empty node, so that we can plugin the ancient
   313  // into node seamlessly.
   314  func (bc *BlockChain) empty() bool {
   315  	genesis := bc.genesisBlock.Hash()
   316  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   317  		if hash != genesis {
   318  			return false
   319  		}
   320  	}
   321  	return true
   322  }
   323  
   324  // loadLastState loads the last known chain state from the database. This method
   325  // assumes that the chain manager mutex is held.
   326  func (bc *BlockChain) loadLastState() error {
   327  	// Restore the last known head block
   328  	head := rawdb.ReadHeadBlockHash(bc.db)
   329  	if head == (common.Hash{}) {
   330  		// Corrupt or empty database, init from scratch
   331  		log.Warn("Empty database, resetting chain")
   332  		return bc.Reset()
   333  	}
   334  	// Make sure the entire head block is available
   335  	currentBlock := bc.GetBlockByHash(head)
   336  	if currentBlock == nil {
   337  		// Corrupt or empty database, init from scratch
   338  		log.Warn("Head block missing, resetting chain", "hash", head)
   339  		return bc.Reset()
   340  	}
   341  	// Make sure the state associated with the block is available
   342  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   343  		// Dangling block without a state associated, init from scratch
   344  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   345  		if err := bc.repair(&currentBlock); err != nil {
   346  			return err
   347  		}
   348  		rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   349  	}
   350  	// Everything seems to be fine, set as the head block
   351  	bc.currentBlock.Store(currentBlock)
   352  	headBlockGauge.Update(int64(currentBlock.NumberU64()))
   353  
   354  	// Restore the last known head header
   355  	currentHeader := currentBlock.Header()
   356  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   357  		if header := bc.GetHeaderByHash(head); header != nil {
   358  			currentHeader = header
   359  		}
   360  	}
   361  	bc.hc.SetCurrentHeader(currentHeader)
   362  
   363  	// Restore the last known head fast block
   364  	bc.currentFastBlock.Store(currentBlock)
   365  	headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
   366  
   367  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   368  		if block := bc.GetBlockByHash(head); block != nil {
   369  			bc.currentFastBlock.Store(block)
   370  			headFastBlockGauge.Update(int64(block.NumberU64()))
   371  		}
   372  	}
   373  	// Issue a status log for the user
   374  	currentFastBlock := bc.CurrentFastBlock()
   375  
   376  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   377  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   378  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   379  
   380  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   381  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   382  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
   383  
   384  	return nil
   385  }
   386  
   387  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   388  // above the new head will be deleted and the new one set. In the case of blocks
   389  // though, the head may be further rewound if block bodies are missing (non-archive
   390  // nodes after a fast sync).
   391  func (bc *BlockChain) SetHead(head uint64) error {
   392  	log.Warn("Rewinding blockchain", "target", head)
   393  
   394  	bc.chainmu.Lock()
   395  	defer bc.chainmu.Unlock()
   396  
   397  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
   398  		// Rewind the block chain, ensuring we don't end up with a stateless head block
   399  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
   400  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   401  			if newHeadBlock == nil {
   402  				newHeadBlock = bc.genesisBlock
   403  			} else {
   404  				if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
   405  					// Rewound state missing, rolled back to before pivot, reset to genesis
   406  					newHeadBlock = bc.genesisBlock
   407  				}
   408  			}
   409  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   410  
   411  			// Degrade the chain markers if they are explicitly reverted.
   412  			// In theory we should update all in-memory markers in the
   413  			// last step, however the direction of SetHead is from high
   414  			// to low, so it's safe the update in-memory markers directly.
   415  			bc.currentBlock.Store(newHeadBlock)
   416  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   417  		}
   418  
   419  		// Rewind the fast block in a simpleton way to the target head
   420  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   421  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   422  			// If either blocks reached nil, reset to the genesis state
   423  			if newHeadFastBlock == nil {
   424  				newHeadFastBlock = bc.genesisBlock
   425  			}
   426  			rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
   427  
   428  			// Degrade the chain markers if they are explicitly reverted.
   429  			// In theory we should update all in-memory markers in the
   430  			// last step, however the direction of SetHead is from high
   431  			// to low, so it's safe the update in-memory markers directly.
   432  			bc.currentFastBlock.Store(newHeadFastBlock)
   433  			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
   434  		}
   435  	}
   436  
   437  	// Rewind the header chain, deleting all block bodies until then
   438  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   439  		// Ignore the error here since light client won't hit this path
   440  		frozen, _ := bc.db.Ancients()
   441  		if num+1 <= frozen {
   442  			// Truncate all relative data(header, total difficulty, body, receipt
   443  			// and canonical hash) from ancient store.
   444  			if err := bc.db.TruncateAncients(num + 1); err != nil {
   445  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   446  			}
   447  
   448  			// Remove the hash <-> number mapping from the active store.
   449  			rawdb.DeleteHeaderNumber(db, hash)
   450  		} else {
   451  			// Remove relative body and receipts from the active store.
   452  			// The header, total difficulty and canonical hash will be
   453  			// removed in the hc.SetHead function.
   454  			rawdb.DeleteBody(db, hash, num)
   455  			rawdb.DeleteReceipts(db, hash, num)
   456  		}
   457  		// Todo(rjl493456442) txlookup, bloombits, etc
   458  	}
   459  	bc.hc.SetHead(head, updateFn, delFn)
   460  
   461  	// Clear out any stale content from the caches
   462  	bc.bodyCache.Purge()
   463  	bc.bodyRLPCache.Purge()
   464  	bc.receiptsCache.Purge()
   465  	bc.blockCache.Purge()
   466  	bc.txLookupCache.Purge()
   467  	bc.futureBlocks.Purge()
   468  
   469  	return bc.loadLastState()
   470  }
   471  
   472  // FastSyncCommitHead sets the current head block to the one defined by the hash
   473  // irrelevant what the chain contents were prior.
   474  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   475  	// Make sure that both the block as well at its state trie exists
   476  	block := bc.GetBlockByHash(hash)
   477  	if block == nil {
   478  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   479  	}
   480  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   481  		return err
   482  	}
   483  	// If all checks out, manually set the head block
   484  	bc.chainmu.Lock()
   485  	bc.currentBlock.Store(block)
   486  	headBlockGauge.Update(int64(block.NumberU64()))
   487  	bc.chainmu.Unlock()
   488  
   489  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   490  	return nil
   491  }
   492  
   493  // GasLimit returns the gas limit of the current HEAD block.
   494  func (bc *BlockChain) GasLimit() uint64 {
   495  	return bc.CurrentBlock().GasLimit()
   496  }
   497  
   498  // CurrentBlock retrieves the current head block of the canonical chain. The
   499  // block is retrieved from the blockchain's internal cache.
   500  func (bc *BlockChain) CurrentBlock() *types.Block {
   501  	return bc.currentBlock.Load().(*types.Block)
   502  }
   503  
   504  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   505  // chain. The block is retrieved from the blockchain's internal cache.
   506  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   507  	return bc.currentFastBlock.Load().(*types.Block)
   508  }
   509  
   510  // Validator returns the current validator.
   511  func (bc *BlockChain) Validator() Validator {
   512  	return bc.validator
   513  }
   514  
   515  // Processor returns the current processor.
   516  func (bc *BlockChain) Processor() Processor {
   517  	return bc.processor
   518  }
   519  
   520  // State returns a new mutable state based on the current HEAD block.
   521  func (bc *BlockChain) State() (*state.StateDB, error) {
   522  	return bc.StateAt(bc.CurrentBlock().Root())
   523  }
   524  
   525  // StateAt returns a new mutable state based on a particular point in time.
   526  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   527  	return state.New(root, bc.stateCache)
   528  }
   529  
   530  // StateCache returns the caching database underpinning the blockchain instance.
   531  func (bc *BlockChain) StateCache() state.Database {
   532  	return bc.stateCache
   533  }
   534  
   535  // Reset purges the entire blockchain, restoring it to its genesis state.
   536  func (bc *BlockChain) Reset() error {
   537  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   538  }
   539  
   540  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   541  // specified genesis state.
   542  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   543  	// Dump the entire block chain and purge the caches
   544  	if err := bc.SetHead(0); err != nil {
   545  		return err
   546  	}
   547  	bc.chainmu.Lock()
   548  	defer bc.chainmu.Unlock()
   549  
   550  	// Prepare the genesis block and reinitialise the chain
   551  	batch := bc.db.NewBatch()
   552  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   553  	rawdb.WriteBlock(batch, genesis)
   554  	if err := batch.Write(); err != nil {
   555  		log.Crit("Failed to write genesis block", "err", err)
   556  	}
   557  	bc.writeHeadBlock(genesis)
   558  
   559  	// Last update all in-memory chain markers
   560  	bc.genesisBlock = genesis
   561  	bc.currentBlock.Store(bc.genesisBlock)
   562  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   563  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   564  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   565  	bc.currentFastBlock.Store(bc.genesisBlock)
   566  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   567  	return nil
   568  }
   569  
   570  // repair tries to repair the current blockchain by rolling back the current block
   571  // until one with associated state is found. This is needed to fix incomplete db
   572  // writes caused either by crashes/power outages, or simply non-committed tries.
   573  //
   574  // This method only rolls back the current block. The current header and current
   575  // fast block are left intact.
   576  func (bc *BlockChain) repair(head **types.Block) error {
   577  	for {
   578  		// Abort if we've rewound to a head block that does have associated state
   579  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   580  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   581  			return nil
   582  		}
   583  		// Otherwise rewind one block and recheck state availability there
   584  		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   585  		if block == nil {
   586  			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   587  		}
   588  		*head = block
   589  	}
   590  }
   591  
   592  // Export writes the active chain to the given writer.
   593  func (bc *BlockChain) Export(w io.Writer) error {
   594  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   595  }
   596  
   597  // ExportN writes a subset of the active chain to the given writer.
   598  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   599  	bc.chainmu.RLock()
   600  	defer bc.chainmu.RUnlock()
   601  
   602  	if first > last {
   603  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   604  	}
   605  	log.Info("Exporting batch of blocks", "count", last-first+1)
   606  
   607  	start, reported := time.Now(), time.Now()
   608  	for nr := first; nr <= last; nr++ {
   609  		block := bc.GetBlockByNumber(nr)
   610  		if block == nil {
   611  			return fmt.Errorf("export failed on #%d: not found", nr)
   612  		}
   613  		if err := block.EncodeRLP(w); err != nil {
   614  			return err
   615  		}
   616  		if time.Since(reported) >= statsReportLimit {
   617  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   618  			reported = time.Now()
   619  		}
   620  	}
   621  	return nil
   622  }
   623  
   624  // writeHeadBlock injects a new head block into the current block chain. This method
   625  // assumes that the block is indeed a true head. It will also reset the head
   626  // header and the head fast sync block to this very same block if they are older
   627  // or if they are on a different side chain.
   628  //
   629  // Note, this function assumes that the `mu` mutex is held!
   630  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   631  	// If the block is on a side chain or an unknown one, force other heads onto it too
   632  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   633  
   634  	// Add the block to the canonical chain number scheme and mark as the head
   635  	batch := bc.db.NewBatch()
   636  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   637  	rawdb.WriteTxLookupEntries(batch, block)
   638  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   639  
   640  	// If the block is better than our head or is on a different chain, force update heads
   641  	if updateHeads {
   642  		rawdb.WriteHeadHeaderHash(batch, block.Hash())
   643  		rawdb.WriteHeadFastBlockHash(batch, block.Hash())
   644  	}
   645  	// Flush the whole batch into the disk, exit the node if failed
   646  	if err := batch.Write(); err != nil {
   647  		log.Crit("Failed to update chain indexes and markers", "err", err)
   648  	}
   649  	// Update all in-memory chain markers in the last step
   650  	if updateHeads {
   651  		bc.hc.SetCurrentHeader(block.Header())
   652  		bc.currentFastBlock.Store(block)
   653  		headFastBlockGauge.Update(int64(block.NumberU64()))
   654  	}
   655  	bc.currentBlock.Store(block)
   656  	headBlockGauge.Update(int64(block.NumberU64()))
   657  }
   658  
   659  // Genesis retrieves the chain's genesis block.
   660  func (bc *BlockChain) Genesis() *types.Block {
   661  	return bc.genesisBlock
   662  }
   663  
   664  // GetBody retrieves a block body (transactions and uncles) from the database by
   665  // hash, caching it if found.
   666  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   667  	// Short circuit if the body's already in the cache, retrieve otherwise
   668  	if cached, ok := bc.bodyCache.Get(hash); ok {
   669  		body := cached.(*types.Body)
   670  		return body
   671  	}
   672  	number := bc.hc.GetBlockNumber(hash)
   673  	if number == nil {
   674  		return nil
   675  	}
   676  	body := rawdb.ReadBody(bc.db, hash, *number)
   677  	if body == nil {
   678  		return nil
   679  	}
   680  	// Cache the found body for next time and return
   681  	bc.bodyCache.Add(hash, body)
   682  	return body
   683  }
   684  
   685  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   686  // caching it if found.
   687  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   688  	// Short circuit if the body's already in the cache, retrieve otherwise
   689  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   690  		return cached.(rlp.RawValue)
   691  	}
   692  	number := bc.hc.GetBlockNumber(hash)
   693  	if number == nil {
   694  		return nil
   695  	}
   696  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   697  	if len(body) == 0 {
   698  		return nil
   699  	}
   700  	// Cache the found body for next time and return
   701  	bc.bodyRLPCache.Add(hash, body)
   702  	return body
   703  }
   704  
   705  // HasBlock checks if a block is fully present in the database or not.
   706  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   707  	if bc.blockCache.Contains(hash) {
   708  		return true
   709  	}
   710  	return rawdb.HasBody(bc.db, hash, number)
   711  }
   712  
   713  // HasFastBlock checks if a fast block is fully present in the database or not.
   714  func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
   715  	if !bc.HasBlock(hash, number) {
   716  		return false
   717  	}
   718  	if bc.receiptsCache.Contains(hash) {
   719  		return true
   720  	}
   721  	return rawdb.HasReceipts(bc.db, hash, number)
   722  }
   723  
   724  // HasState checks if state trie is fully present in the database or not.
   725  func (bc *BlockChain) HasState(hash common.Hash) bool {
   726  	_, err := bc.stateCache.OpenTrie(hash)
   727  	return err == nil
   728  }
   729  
   730  // HasBlockAndState checks if a block and associated state trie is fully present
   731  // in the database or not, caching it if present.
   732  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   733  	// Check first that the block itself is known
   734  	block := bc.GetBlock(hash, number)
   735  	if block == nil {
   736  		return false
   737  	}
   738  	return bc.HasState(block.Root())
   739  }
   740  
   741  // GetBlock retrieves a block from the database by hash and number,
   742  // caching it if found.
   743  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   744  	// Short circuit if the block's already in the cache, retrieve otherwise
   745  	if block, ok := bc.blockCache.Get(hash); ok {
   746  		return block.(*types.Block)
   747  	}
   748  	block := rawdb.ReadBlock(bc.db, hash, number)
   749  	if block == nil {
   750  		return nil
   751  	}
   752  	// Cache the found block for next time and return
   753  	bc.blockCache.Add(block.Hash(), block)
   754  	return block
   755  }
   756  
   757  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   758  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   759  	number := bc.hc.GetBlockNumber(hash)
   760  	if number == nil {
   761  		return nil
   762  	}
   763  	return bc.GetBlock(hash, *number)
   764  }
   765  
   766  // GetBlockByNumber retrieves a block from the database by number, caching it
   767  // (associated with its hash) if found.
   768  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   769  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   770  	if hash == (common.Hash{}) {
   771  		return nil
   772  	}
   773  	return bc.GetBlock(hash, number)
   774  }
   775  
   776  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   777  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   778  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   779  		return receipts.(types.Receipts)
   780  	}
   781  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   782  	if number == nil {
   783  		return nil
   784  	}
   785  	receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
   786  	if receipts == nil {
   787  		return nil
   788  	}
   789  	bc.receiptsCache.Add(hash, receipts)
   790  	return receipts
   791  }
   792  
   793  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   794  // [deprecated by eth/62]
   795  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   796  	number := bc.hc.GetBlockNumber(hash)
   797  	if number == nil {
   798  		return nil
   799  	}
   800  	for i := 0; i < n; i++ {
   801  		block := bc.GetBlock(hash, *number)
   802  		if block == nil {
   803  			break
   804  		}
   805  		blocks = append(blocks, block)
   806  		hash = block.ParentHash()
   807  		*number--
   808  	}
   809  	return
   810  }
   811  
   812  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   813  // a specific distance is reached.
   814  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   815  	uncles := []*types.Header{}
   816  	for i := 0; block != nil && i < length; i++ {
   817  		uncles = append(uncles, block.Uncles()...)
   818  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   819  	}
   820  	return uncles
   821  }
   822  
   823  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   824  // either from ephemeral in-memory cache, or from persistent storage.
   825  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   826  	return bc.stateCache.TrieDB().Node(hash)
   827  }
   828  
   829  // Stop stops the blockchain service. If any imports are currently in progress
   830  // it will abort them using the procInterrupt.
   831  func (bc *BlockChain) Stop() {
   832  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   833  		return
   834  	}
   835  	// Unsubscribe all subscriptions registered from blockchain
   836  	bc.scope.Close()
   837  	close(bc.quit)
   838  	atomic.StoreInt32(&bc.procInterrupt, 1)
   839  
   840  	bc.wg.Wait()
   841  
   842  	// Ensure the state of a recent block is also stored to disk before exiting.
   843  	// We're writing three different states to catch different restart scenarios:
   844  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   845  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   846  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   847  	if !bc.cacheConfig.TrieDirtyDisabled {
   848  		triedb := bc.stateCache.TrieDB()
   849  
   850  		for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
   851  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   852  				recent := bc.GetBlockByNumber(number - offset)
   853  
   854  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   855  				if err := triedb.Commit(recent.Root(), true); err != nil {
   856  					log.Error("Failed to commit recent state trie", "err", err)
   857  				}
   858  			}
   859  		}
   860  		for !bc.triegc.Empty() {
   861  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   862  		}
   863  		if size, _ := triedb.Size(); size != 0 {
   864  			log.Error("Dangling trie nodes after full cleanup")
   865  		}
   866  	}
   867  	log.Info("Blockchain manager stopped")
   868  }
   869  
   870  func (bc *BlockChain) procFutureBlocks() {
   871  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   872  	for _, hash := range bc.futureBlocks.Keys() {
   873  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   874  			blocks = append(blocks, block.(*types.Block))
   875  		}
   876  	}
   877  	if len(blocks) > 0 {
   878  		sort.Slice(blocks, func(i, j int) bool {
   879  			return blocks[i].NumberU64() < blocks[j].NumberU64()
   880  		})
   881  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   882  		for i := range blocks {
   883  			bc.InsertChain(blocks[i : i+1])
   884  		}
   885  	}
   886  }
   887  
   888  // WriteStatus status of write
   889  type WriteStatus byte
   890  
   891  const (
   892  	NonStatTy WriteStatus = iota
   893  	CanonStatTy
   894  	SideStatTy
   895  )
   896  
   897  // Rollback is designed to remove a chain of links from the database that aren't
   898  // certain enough to be valid.
   899  func (bc *BlockChain) Rollback(chain []common.Hash) {
   900  	bc.chainmu.Lock()
   901  	defer bc.chainmu.Unlock()
   902  
   903  	batch := bc.db.NewBatch()
   904  	for i := len(chain) - 1; i >= 0; i-- {
   905  		hash := chain[i]
   906  
   907  		// Degrade the chain markers if they are explicitly reverted.
   908  		// In theory we should update all in-memory markers in the
   909  		// last step, however the direction of rollback is from high
   910  		// to low, so it's safe the update in-memory markers directly.
   911  		currentHeader := bc.hc.CurrentHeader()
   912  		if currentHeader.Hash() == hash {
   913  			newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
   914  			rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
   915  			bc.hc.SetCurrentHeader(newHeadHeader)
   916  		}
   917  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   918  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   919  			rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
   920  			bc.currentFastBlock.Store(newFastBlock)
   921  			headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
   922  		}
   923  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   924  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   925  			rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
   926  			bc.currentBlock.Store(newBlock)
   927  			headBlockGauge.Update(int64(newBlock.NumberU64()))
   928  		}
   929  	}
   930  	if err := batch.Write(); err != nil {
   931  		log.Crit("Failed to rollback chain markers", "err", err)
   932  	}
   933  	// Truncate ancient data which exceeds the current header.
   934  	//
   935  	// Notably, it can happen that system crashes without truncating the ancient data
   936  	// but the head indicator has been updated in the active store. Regarding this issue,
   937  	// system will self recovery by truncating the extra data during the setup phase.
   938  	if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
   939  		log.Crit("Truncate ancient store failed", "err", err)
   940  	}
   941  }
   942  
   943  // truncateAncient rewinds the blockchain to the specified header and deletes all
   944  // data in the ancient store that exceeds the specified header.
   945  func (bc *BlockChain) truncateAncient(head uint64) error {
   946  	frozen, err := bc.db.Ancients()
   947  	if err != nil {
   948  		return err
   949  	}
   950  	// Short circuit if there is no data to truncate in ancient store.
   951  	if frozen <= head+1 {
   952  		return nil
   953  	}
   954  	// Truncate all the data in the freezer beyond the specified head
   955  	if err := bc.db.TruncateAncients(head + 1); err != nil {
   956  		return err
   957  	}
   958  	// Clear out any stale content from the caches
   959  	bc.hc.headerCache.Purge()
   960  	bc.hc.tdCache.Purge()
   961  	bc.hc.numberCache.Purge()
   962  
   963  	// Clear out any stale content from the caches
   964  	bc.bodyCache.Purge()
   965  	bc.bodyRLPCache.Purge()
   966  	bc.receiptsCache.Purge()
   967  	bc.blockCache.Purge()
   968  	bc.txLookupCache.Purge()
   969  	bc.futureBlocks.Purge()
   970  
   971  	log.Info("Rewind ancient data", "number", head)
   972  	return nil
   973  }
   974  
   975  // numberHash is just a container for a number and a hash, to represent a block
   976  type numberHash struct {
   977  	number uint64
   978  	hash   common.Hash
   979  }
   980  
   981  // InsertReceiptChain attempts to complete an already existing header chain with
   982  // transaction and receipt data.
   983  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
   984  	// We don't require the chainMu here since we want to maximize the
   985  	// concurrency of header insertion and receipt insertion.
   986  	bc.wg.Add(1)
   987  	defer bc.wg.Done()
   988  
   989  	var (
   990  		ancientBlocks, liveBlocks     types.Blocks
   991  		ancientReceipts, liveReceipts []types.Receipts
   992  	)
   993  	// Do a sanity check that the provided chain is actually ordered and linked
   994  	for i := 0; i < len(blockChain); i++ {
   995  		if i != 0 {
   996  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   997  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   998  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   999  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  1000  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  1001  			}
  1002  		}
  1003  		if blockChain[i].NumberU64() <= ancientLimit {
  1004  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
  1005  		} else {
  1006  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
  1007  		}
  1008  	}
  1009  
  1010  	var (
  1011  		stats = struct{ processed, ignored int32 }{}
  1012  		start = time.Now()
  1013  		size  = 0
  1014  	)
  1015  	// updateHead updates the head fast sync block if the inserted blocks are better
  1016  	// and returns a indicator whether the inserted blocks are canonical.
  1017  	updateHead := func(head *types.Block) bool {
  1018  		bc.chainmu.Lock()
  1019  
  1020  		// Rewind may have occurred, skip in that case.
  1021  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
  1022  			currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
  1023  			if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  1024  				rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  1025  				bc.currentFastBlock.Store(head)
  1026  				headFastBlockGauge.Update(int64(head.NumberU64()))
  1027  				bc.chainmu.Unlock()
  1028  				return true
  1029  			}
  1030  		}
  1031  		bc.chainmu.Unlock()
  1032  		return false
  1033  	}
  1034  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1035  	//
  1036  	// this function only accepts canonical chain data. All side chain will be reverted
  1037  	// eventually.
  1038  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1039  		var (
  1040  			previous = bc.CurrentFastBlock()
  1041  			batch    = bc.db.NewBatch()
  1042  		)
  1043  		// If any error occurs before updating the head or we are inserting a side chain,
  1044  		// all the data written this time wll be rolled back.
  1045  		defer func() {
  1046  			if previous != nil {
  1047  				if err := bc.truncateAncient(previous.NumberU64()); err != nil {
  1048  					log.Crit("Truncate ancient store failed", "err", err)
  1049  				}
  1050  			}
  1051  		}()
  1052  		var deleted []*numberHash
  1053  		for i, block := range blockChain {
  1054  			// Short circuit insertion if shutting down or processing failed
  1055  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1056  				return 0, errInsertionInterrupted
  1057  			}
  1058  			// Short circuit insertion if it is required(used in testing only)
  1059  			if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
  1060  				return i, errors.New("insertion is terminated for testing purpose")
  1061  			}
  1062  			// Short circuit if the owner header is unknown
  1063  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1064  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1065  			}
  1066  			var (
  1067  				start  = time.Now()
  1068  				logged = time.Now()
  1069  				count  int
  1070  			)
  1071  			// Migrate all ancient blocks. This can happen if someone upgrades from Geth
  1072  			// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
  1073  			// long term.
  1074  			for {
  1075  				// We can ignore the error here since light client won't hit this code path.
  1076  				frozen, _ := bc.db.Ancients()
  1077  				if frozen >= block.NumberU64() {
  1078  					break
  1079  				}
  1080  				h := rawdb.ReadCanonicalHash(bc.db, frozen)
  1081  				b := rawdb.ReadBlock(bc.db, h, frozen)
  1082  				size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
  1083  				count += 1
  1084  
  1085  				// Always keep genesis block in active database.
  1086  				if b.NumberU64() != 0 {
  1087  					deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
  1088  				}
  1089  				if time.Since(logged) > 8*time.Second {
  1090  					log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1091  					logged = time.Now()
  1092  				}
  1093  				// Don't collect too much in-memory, write it out every 100K blocks
  1094  				if len(deleted) > 100000 {
  1095  					// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1096  					if err := bc.db.Sync(); err != nil {
  1097  						return 0, err
  1098  					}
  1099  					// Wipe out canonical block data.
  1100  					for _, nh := range deleted {
  1101  						rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1102  						rawdb.DeleteCanonicalHash(batch, nh.number)
  1103  					}
  1104  					if err := batch.Write(); err != nil {
  1105  						return 0, err
  1106  					}
  1107  					batch.Reset()
  1108  					// Wipe out side chain too.
  1109  					for _, nh := range deleted {
  1110  						for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1111  							rawdb.DeleteBlock(batch, hash, nh.number)
  1112  						}
  1113  					}
  1114  					if err := batch.Write(); err != nil {
  1115  						return 0, err
  1116  					}
  1117  					batch.Reset()
  1118  					deleted = deleted[0:]
  1119  				}
  1120  			}
  1121  			if count > 0 {
  1122  				log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1123  			}
  1124  			// Flush data into ancient database.
  1125  			size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
  1126  			rawdb.WriteTxLookupEntries(batch, block)
  1127  
  1128  			stats.processed++
  1129  		}
  1130  		// Flush all tx-lookup index data.
  1131  		size += batch.ValueSize()
  1132  		if err := batch.Write(); err != nil {
  1133  			return 0, err
  1134  		}
  1135  		batch.Reset()
  1136  
  1137  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1138  		if err := bc.db.Sync(); err != nil {
  1139  			return 0, err
  1140  		}
  1141  		if !updateHead(blockChain[len(blockChain)-1]) {
  1142  			return 0, errors.New("side blocks can't be accepted as the ancient chain data")
  1143  		}
  1144  		previous = nil // disable rollback explicitly
  1145  
  1146  		// Wipe out canonical block data.
  1147  		for _, nh := range deleted {
  1148  			rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1149  			rawdb.DeleteCanonicalHash(batch, nh.number)
  1150  		}
  1151  		for _, block := range blockChain {
  1152  			// Always keep genesis block in active database.
  1153  			if block.NumberU64() != 0 {
  1154  				rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1155  				rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1156  			}
  1157  		}
  1158  		if err := batch.Write(); err != nil {
  1159  			return 0, err
  1160  		}
  1161  		batch.Reset()
  1162  
  1163  		// Wipe out side chain too.
  1164  		for _, nh := range deleted {
  1165  			for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1166  				rawdb.DeleteBlock(batch, hash, nh.number)
  1167  			}
  1168  		}
  1169  		for _, block := range blockChain {
  1170  			// Always keep genesis block in active database.
  1171  			if block.NumberU64() != 0 {
  1172  				for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
  1173  					rawdb.DeleteBlock(batch, hash, block.NumberU64())
  1174  				}
  1175  			}
  1176  		}
  1177  		if err := batch.Write(); err != nil {
  1178  			return 0, err
  1179  		}
  1180  		return 0, nil
  1181  	}
  1182  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1183  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1184  		batch := bc.db.NewBatch()
  1185  		for i, block := range blockChain {
  1186  			// Short circuit insertion if shutting down or processing failed
  1187  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1188  				return 0, errInsertionInterrupted
  1189  			}
  1190  			// Short circuit if the owner header is unknown
  1191  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1192  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1193  			}
  1194  			if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1195  				stats.ignored++
  1196  				continue
  1197  			}
  1198  			// Write all the data out into the database
  1199  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1200  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1201  			rawdb.WriteTxLookupEntries(batch, block)
  1202  
  1203  			// Write everything belongs to the blocks into the database. So that
  1204  			// we can ensure all components of body is completed(body, receipts,
  1205  			// tx indexes)
  1206  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1207  				if err := batch.Write(); err != nil {
  1208  					return 0, err
  1209  				}
  1210  				size += batch.ValueSize()
  1211  				batch.Reset()
  1212  			}
  1213  			stats.processed++
  1214  		}
  1215  		// Write everything belongs to the blocks into the database. So that
  1216  		// we can ensure all components of body is completed(body, receipts,
  1217  		// tx indexes)
  1218  		if batch.ValueSize() > 0 {
  1219  			size += batch.ValueSize()
  1220  			if err := batch.Write(); err != nil {
  1221  				return 0, err
  1222  			}
  1223  		}
  1224  		updateHead(blockChain[len(blockChain)-1])
  1225  		return 0, nil
  1226  	}
  1227  	// Write downloaded chain data and corresponding receipt chain data.
  1228  	if len(ancientBlocks) > 0 {
  1229  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1230  			if err == errInsertionInterrupted {
  1231  				return 0, nil
  1232  			}
  1233  			return n, err
  1234  		}
  1235  	}
  1236  	if len(liveBlocks) > 0 {
  1237  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1238  			if err == errInsertionInterrupted {
  1239  				return 0, nil
  1240  			}
  1241  			return n, err
  1242  		}
  1243  	}
  1244  
  1245  	head := blockChain[len(blockChain)-1]
  1246  	context := []interface{}{
  1247  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1248  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1249  		"size", common.StorageSize(size),
  1250  	}
  1251  	if stats.ignored > 0 {
  1252  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1253  	}
  1254  	log.Info("Imported new block receipts", context...)
  1255  
  1256  	return 0, nil
  1257  }
  1258  
  1259  var lastWrite uint64
  1260  
  1261  // writeBlockWithoutState writes only the block and its metadata to the database,
  1262  // but does not write any state. This is used to construct competing side forks
  1263  // up to the point where they exceed the canonical total difficulty.
  1264  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1265  	bc.wg.Add(1)
  1266  	defer bc.wg.Done()
  1267  
  1268  	batch := bc.db.NewBatch()
  1269  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1270  	rawdb.WriteBlock(batch, block)
  1271  	if err := batch.Write(); err != nil {
  1272  		log.Crit("Failed to write block into disk", "err", err)
  1273  	}
  1274  	return nil
  1275  }
  1276  
  1277  // writeKnownBlock updates the head block flag with a known block
  1278  // and introduces chain reorg if necessary.
  1279  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1280  	bc.wg.Add(1)
  1281  	defer bc.wg.Done()
  1282  
  1283  	current := bc.CurrentBlock()
  1284  	if block.ParentHash() != current.Hash() {
  1285  		if err := bc.reorg(current, block); err != nil {
  1286  			return err
  1287  		}
  1288  	}
  1289  	bc.writeHeadBlock(block)
  1290  	return nil
  1291  }
  1292  
  1293  // WriteBlockWithState writes the block and all associated state to the database.
  1294  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1295  	bc.chainmu.Lock()
  1296  	defer bc.chainmu.Unlock()
  1297  
  1298  	return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
  1299  }
  1300  
  1301  // writeBlockWithState writes the block and all associated state to the database,
  1302  // but is expects the chain mutex to be held.
  1303  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1304  	bc.wg.Add(1)
  1305  	defer bc.wg.Done()
  1306  
  1307  	// Calculate the total difficulty of the block
  1308  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1309  	if ptd == nil {
  1310  		return NonStatTy, consensus.ErrUnknownAncestor
  1311  	}
  1312  	// Make sure no inconsistent state is leaked during insertion
  1313  	currentBlock := bc.CurrentBlock()
  1314  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1315  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1316  
  1317  	// Irrelevant of the canonical status, write the block itself to the database.
  1318  	//
  1319  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1320  	// should be written atomically. BlockBatch is used for containing all components.
  1321  	blockBatch := bc.db.NewBatch()
  1322  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1323  	rawdb.WriteBlock(blockBatch, block)
  1324  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1325  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1326  	if err := blockBatch.Write(); err != nil {
  1327  		log.Crit("Failed to write block into disk", "err", err)
  1328  	}
  1329  	// Commit all cached state changes into underlying memory database.
  1330  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1331  	if err != nil {
  1332  		return NonStatTy, err
  1333  	}
  1334  	triedb := bc.stateCache.TrieDB()
  1335  
  1336  	// If we're running an archive node, always flush
  1337  	if bc.cacheConfig.TrieDirtyDisabled {
  1338  		if err := triedb.Commit(root, false); err != nil {
  1339  			return NonStatTy, err
  1340  		}
  1341  	} else {
  1342  		// Full but not archive node, do proper garbage collection
  1343  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1344  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1345  
  1346  		if current := block.NumberU64(); current > TriesInMemory {
  1347  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1348  			var (
  1349  				nodes, imgs = triedb.Size()
  1350  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1351  			)
  1352  			if nodes > limit || imgs > 4*1024*1024 {
  1353  				triedb.Cap(limit - ethdb.IdealBatchSize)
  1354  			}
  1355  			// Find the next state trie we need to commit
  1356  			chosen := current - TriesInMemory
  1357  
  1358  			// If we exceeded out time allowance, flush an entire trie to disk
  1359  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1360  				// If the header is missing (canonical chain behind), we're reorging a low
  1361  				// diff sidechain. Suspend committing until this operation is completed.
  1362  				header := bc.GetHeaderByNumber(chosen)
  1363  				if header == nil {
  1364  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1365  				} else {
  1366  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1367  					// warn the user that the system is becoming unstable.
  1368  					if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1369  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
  1370  					}
  1371  					// Flush an entire trie and restart the counters
  1372  					triedb.Commit(header.Root, true)
  1373  					lastWrite = chosen
  1374  					bc.gcproc = 0
  1375  				}
  1376  			}
  1377  			// Garbage collect anything below our required write retention
  1378  			for !bc.triegc.Empty() {
  1379  				root, number := bc.triegc.Pop()
  1380  				if uint64(-number) > chosen {
  1381  					bc.triegc.Push(root, number)
  1382  					break
  1383  				}
  1384  				triedb.Dereference(root.(common.Hash))
  1385  			}
  1386  		}
  1387  	}
  1388  	// If the total difficulty is higher than our known, add it to the canonical chain
  1389  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1390  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1391  	reorg := externTd.Cmp(localTd) > 0
  1392  	currentBlock = bc.CurrentBlock()
  1393  	if !reorg && externTd.Cmp(localTd) == 0 {
  1394  		// Split same-difficulty blocks by number, then preferentially select
  1395  		// the block generated by the local miner as the canonical block.
  1396  		if block.NumberU64() < currentBlock.NumberU64() {
  1397  			reorg = true
  1398  		} else if block.NumberU64() == currentBlock.NumberU64() {
  1399  			var currentPreserve, blockPreserve bool
  1400  			if bc.shouldPreserve != nil {
  1401  				currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  1402  			}
  1403  			reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  1404  		}
  1405  	}
  1406  	if reorg {
  1407  		// Reorganise the chain if the parent is not the head block
  1408  		if block.ParentHash() != currentBlock.Hash() {
  1409  			if err := bc.reorg(currentBlock, block); err != nil {
  1410  				return NonStatTy, err
  1411  			}
  1412  		}
  1413  		status = CanonStatTy
  1414  	} else {
  1415  		status = SideStatTy
  1416  	}
  1417  	// Set new head.
  1418  	if status == CanonStatTy {
  1419  		bc.writeHeadBlock(block)
  1420  	}
  1421  	bc.futureBlocks.Remove(block.Hash())
  1422  
  1423  	if status == CanonStatTy {
  1424  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1425  		if len(logs) > 0 {
  1426  			bc.logsFeed.Send(logs)
  1427  		}
  1428  		// In theory we should fire a ChainHeadEvent when we inject
  1429  		// a canonical block, but sometimes we can insert a batch of
  1430  		// canonicial blocks. Avoid firing too much ChainHeadEvents,
  1431  		// we will fire an accumulated ChainHeadEvent and disable fire
  1432  		// event here.
  1433  		if emitHeadEvent {
  1434  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1435  		}
  1436  	} else {
  1437  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1438  	}
  1439  	return status, nil
  1440  }
  1441  
  1442  // addFutureBlock checks if the block is within the max allowed window to get
  1443  // accepted for future processing, and returns an error if the block is too far
  1444  // ahead and was not added.
  1445  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1446  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1447  	if block.Time() > max {
  1448  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1449  	}
  1450  	bc.futureBlocks.Add(block.Hash(), block)
  1451  	return nil
  1452  }
  1453  
  1454  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1455  // chain or, otherwise, create a fork. If an error is returned it will return
  1456  // the index number of the failing block as well an error describing what went
  1457  // wrong.
  1458  //
  1459  // After insertion is done, all accumulated events will be fired.
  1460  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1461  	// Sanity check that we have something meaningful to import
  1462  	if len(chain) == 0 {
  1463  		return 0, nil
  1464  	}
  1465  
  1466  	bc.blockProcFeed.Send(true)
  1467  	defer bc.blockProcFeed.Send(false)
  1468  
  1469  	// Remove already known canon-blocks
  1470  	var (
  1471  		block, prev *types.Block
  1472  	)
  1473  	// Do a sanity check that the provided chain is actually ordered and linked
  1474  	for i := 1; i < len(chain); i++ {
  1475  		block = chain[i]
  1476  		prev = chain[i-1]
  1477  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1478  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1479  			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  1480  				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1481  
  1482  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1483  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1484  		}
  1485  	}
  1486  	// Pre-checks passed, start the full block imports
  1487  	bc.wg.Add(1)
  1488  	bc.chainmu.Lock()
  1489  	n, err := bc.insertChain(chain, true)
  1490  	bc.chainmu.Unlock()
  1491  	bc.wg.Done()
  1492  
  1493  	return n, err
  1494  }
  1495  
  1496  // insertChain is the internal implementation of InsertChain, which assumes that
  1497  // 1) chains are contiguous, and 2) The chain mutex is held.
  1498  //
  1499  // This method is split out so that import batches that require re-injecting
  1500  // historical blocks can do so without releasing the lock, which could lead to
  1501  // racey behaviour. If a sidechain import is in progress, and the historic state
  1502  // is imported, but then new canon-head is added before the actual sidechain
  1503  // completes, then the historic state could be pruned again
  1504  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
  1505  	// If the chain is terminating, don't even bother starting up
  1506  	if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1507  		return 0, nil
  1508  	}
  1509  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1510  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1511  
  1512  	var (
  1513  		stats     = insertStats{startTime: mclock.Now()}
  1514  		lastCanon *types.Block
  1515  	)
  1516  	// Fire a single chain head event if we've progressed the chain
  1517  	defer func() {
  1518  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1519  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1520  		}
  1521  	}()
  1522  	// Start the parallel header verifier
  1523  	headers := make([]*types.Header, len(chain))
  1524  	seals := make([]bool, len(chain))
  1525  
  1526  	for i, block := range chain {
  1527  		headers[i] = block.Header()
  1528  		seals[i] = verifySeals
  1529  	}
  1530  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1531  	defer close(abort)
  1532  	// pirlguard
  1533  	errChain := bc.checkChainForAttack(chain)
  1534  
  1535  	// Peek the error for the first block to decide the directing import logic
  1536  	it := newInsertIterator(chain, results, bc.validator)
  1537  
  1538  	block, err := it.next()
  1539  
  1540  	// Left-trim all the known blocks
  1541  	if err == ErrKnownBlock {
  1542  		// First block (and state) is known
  1543  		//   1. We did a roll-back, and should now do a re-import
  1544  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1545  		// 	    from the canonical chain, which has not been verified.
  1546  		// Skip all known blocks that are behind us
  1547  		var (
  1548  			current  = bc.CurrentBlock()
  1549  			localTd  = bc.GetTd(current.Hash(), current.NumberU64())
  1550  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
  1551  		)
  1552  		for block != nil && err == ErrKnownBlock {
  1553  			externTd = new(big.Int).Add(externTd, block.Difficulty())
  1554  			if localTd.Cmp(externTd) < 0 {
  1555  				break
  1556  			}
  1557  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1558  			stats.ignored++
  1559  
  1560  			block, err = it.next()
  1561  		}
  1562  		// The remaining blocks are still known blocks, the only scenario here is:
  1563  		// During the fast sync, the pivot point is already submitted but rollback
  1564  		// happens. Then node resets the head full block to a lower height via `rollback`
  1565  		// and leaves a few known blocks in the database.
  1566  		//
  1567  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1568  		// `insertChain` while a part of them have higher total difficulty than current
  1569  		// head full block(new pivot point).
  1570  		for block != nil && err == ErrKnownBlock {
  1571  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1572  			if err := bc.writeKnownBlock(block); err != nil {
  1573  				return it.index, err
  1574  			}
  1575  			lastCanon = block
  1576  
  1577  			block, err = it.next()
  1578  		}
  1579  		// Falls through to the block import
  1580  	}
  1581  	switch {
  1582  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1583  	case err == consensus.ErrPrunedAncestor:
  1584  		log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1585  		return bc.insertSideChain(block, it)
  1586  
  1587  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1588  	case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1589  		for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1590  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1591  			if err := bc.addFutureBlock(block); err != nil {
  1592  				return it.index, err
  1593  			}
  1594  			block, err = it.next()
  1595  		}
  1596  		stats.queued += it.processed()
  1597  		stats.ignored += it.remaining()
  1598  
  1599  		// If there are any still remaining, mark as ignored
  1600  		return it.index, err
  1601  
  1602  	//Pirlguard check
  1603  	case errChain == ErrDelayTooHigh:
  1604  		stats.ignored += len(it.chain)
  1605  		bc.reportBlock(block, nil, errChain)
  1606  		return it.index, errChain
  1607  
  1608  
  1609  	// Some other error occurred, abort
  1610  	case err != nil:
  1611  		bc.futureBlocks.Remove(block.Hash())
  1612  		stats.ignored += len(it.chain)
  1613  		bc.reportBlock(block, nil, err)
  1614  		return it.index, err
  1615  	}
  1616  	// No validation errors for the first block (or chain prefix skipped)
  1617  	for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
  1618  		// If the chain is terminating, stop processing blocks
  1619  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1620  			log.Debug("Premature abort during blocks processing")
  1621  			break
  1622  		}
  1623  		// If the header is a banned one, straight out abort
  1624  		if BadHashes[block.Hash()] {
  1625  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1626  			return it.index, ErrBlacklistedHash
  1627  		}
  1628  		// If the block is known (in the middle of the chain), it's a special case for
  1629  		// Clique blocks where they can share state among each other, so importing an
  1630  		// older block might complete the state of the subsequent one. In this case,
  1631  		// just skip the block (we already validated it once fully (and crashed), since
  1632  		// its header and body was already in the database).
  1633  		if err == ErrKnownBlock {
  1634  			logger := log.Debug
  1635  			if bc.chainConfig.Clique == nil {
  1636  				logger = log.Warn
  1637  			}
  1638  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1639  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1640  				"root", block.Root())
  1641  
  1642  			if err := bc.writeKnownBlock(block); err != nil {
  1643  				return it.index, err
  1644  			}
  1645  			stats.processed++
  1646  
  1647  			// We can assume that logs are empty here, since the only way for consecutive
  1648  			// Clique blocks to have the same state is if there are no transactions.
  1649  			lastCanon = block
  1650  			continue
  1651  		}
  1652  		// Retrieve the parent block and it's state to execute on top
  1653  		start := time.Now()
  1654  
  1655  		parent := it.previous()
  1656  		if parent == nil {
  1657  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1658  		}
  1659  		statedb, err := state.New(parent.Root, bc.stateCache)
  1660  		if err != nil {
  1661  			return it.index, err
  1662  		}
  1663  		// If we have a followup block, run that against the current state to pre-cache
  1664  		// transactions and probabilistically some of the account/storage trie nodes.
  1665  		var followupInterrupt uint32
  1666  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1667  			if followup, err := it.peek(); followup != nil && err == nil {
  1668  				throwaway, _ := state.New(parent.Root, bc.stateCache)
  1669  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
  1670  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, interrupt)
  1671  
  1672  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1673  					if atomic.LoadUint32(interrupt) == 1 {
  1674  						blockPrefetchInterruptMeter.Mark(1)
  1675  					}
  1676  				}(time.Now(), followup, throwaway, &followupInterrupt)
  1677  			}
  1678  		}
  1679  		// Process block using the parent state as reference point
  1680  		substart := time.Now()
  1681  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1682  		if err != nil {
  1683  			bc.reportBlock(block, receipts, err)
  1684  			atomic.StoreUint32(&followupInterrupt, 1)
  1685  			return it.index, err
  1686  		}
  1687  		// Update the metrics touched during block processing
  1688  		accountReadTimer.Update(statedb.AccountReads)     // Account reads are complete, we can mark them
  1689  		storageReadTimer.Update(statedb.StorageReads)     // Storage reads are complete, we can mark them
  1690  		accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
  1691  		storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
  1692  
  1693  		triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
  1694  		trieproc := statedb.AccountReads + statedb.AccountUpdates
  1695  		trieproc += statedb.StorageReads + statedb.StorageUpdates
  1696  
  1697  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1698  
  1699  		// Validate the state using the default validator
  1700  		substart = time.Now()
  1701  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1702  			bc.reportBlock(block, receipts, err)
  1703  			atomic.StoreUint32(&followupInterrupt, 1)
  1704  			return it.index, err
  1705  		}
  1706  		proctime := time.Since(start)
  1707  
  1708  		// Update the metrics touched during block validation
  1709  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1710  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1711  
  1712  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1713  
  1714  		// Write the block to the chain and get the status.
  1715  		substart = time.Now()
  1716  		status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
  1717  		if err != nil {
  1718  			atomic.StoreUint32(&followupInterrupt, 1)
  1719  			return it.index, err
  1720  		}
  1721  		atomic.StoreUint32(&followupInterrupt, 1)
  1722  
  1723  		// Update the metrics touched during block commit
  1724  		accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
  1725  		storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
  1726  
  1727  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
  1728  		blockInsertTimer.UpdateSince(start)
  1729  
  1730  		switch status {
  1731  		case CanonStatTy:
  1732  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1733  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1734  				"elapsed", common.PrettyDuration(time.Since(start)),
  1735  				"root", block.Root())
  1736  
  1737  			lastCanon = block
  1738  
  1739  			// Only count canonical blocks for GC processing time
  1740  			bc.gcproc += proctime
  1741  
  1742  		case SideStatTy:
  1743  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1744  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1745  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1746  				"root", block.Root())
  1747  
  1748  		default:
  1749  			// This in theory is impossible, but lets be nice to our future selves and leave
  1750  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1751  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1752  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1753  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1754  				"root", block.Root())
  1755  		}
  1756  		stats.processed++
  1757  		stats.usedGas += usedGas
  1758  
  1759  		dirty, _ := bc.stateCache.TrieDB().Size()
  1760  		stats.report(chain, it.index, dirty)
  1761  	}
  1762  	// Any blocks remaining here? The only ones we care about are the future ones
  1763  	if block != nil && err == consensus.ErrFutureBlock {
  1764  		if err := bc.addFutureBlock(block); err != nil {
  1765  			return it.index, err
  1766  		}
  1767  		block, err = it.next()
  1768  
  1769  		for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1770  			if err := bc.addFutureBlock(block); err != nil {
  1771  				return it.index, err
  1772  			}
  1773  			stats.queued++
  1774  		}
  1775  	}
  1776  	stats.ignored += it.remaining()
  1777  
  1778  	return it.index, err
  1779  }
  1780  
  1781  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1782  // error, which happens when a sidechain with a sufficiently old fork-block is
  1783  // found.
  1784  //
  1785  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1786  // switch over to the new chain if the TD exceeded the current chain.
  1787  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  1788  	var (
  1789  		externTd *big.Int
  1790  		current  = bc.CurrentBlock()
  1791  	)
  1792  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1793  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1794  	// ones. Any other errors means that the block is invalid, and should not be written
  1795  	// to disk.
  1796  	err := consensus.ErrPrunedAncestor
  1797  	for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1798  		// Check the canonical state root for that number
  1799  		if number := block.NumberU64(); current.NumberU64() >= number {
  1800  			canonical := bc.GetBlockByNumber(number)
  1801  			if canonical != nil && canonical.Hash() == block.Hash() {
  1802  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1803  
  1804  				// Collect the TD of the block. Since we know it's a canon one,
  1805  				// we can get it directly, and not (like further below) use
  1806  				// the parent and then add the block on top
  1807  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  1808  				continue
  1809  			}
  1810  			if canonical != nil && canonical.Root() == block.Root() {
  1811  				// This is most likely a shadow-state attack. When a fork is imported into the
  1812  				// database, and it eventually reaches a block height which is not pruned, we
  1813  				// just found that the state already exist! This means that the sidechain block
  1814  				// refers to a state which already exists in our canon chain.
  1815  				//
  1816  				// If left unchecked, we would now proceed importing the blocks, without actually
  1817  				// having verified the state of the previous blocks.
  1818  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1819  
  1820  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1821  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1822  				// mechanism.
  1823  				return it.index, errors.New("sidechain ghost-state attack")
  1824  			}
  1825  		}
  1826  		if externTd == nil {
  1827  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1828  		}
  1829  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1830  
  1831  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1832  			start := time.Now()
  1833  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  1834  				return it.index, err
  1835  			}
  1836  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1837  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1838  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1839  				"root", block.Root())
  1840  		}
  1841  	}
  1842  	// At this point, we've written all sidechain blocks to database. Loop ended
  1843  	// either on some other error or all were processed. If there was some other
  1844  	// error, we can ignore the rest of those blocks.
  1845  	//
  1846  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1847  	// blocks to regenerate the required state
  1848  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1849  	if localTd.Cmp(externTd) > 0 {
  1850  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1851  		return it.index, err
  1852  	}
  1853  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1854  	var (
  1855  		hashes  []common.Hash
  1856  		numbers []uint64
  1857  	)
  1858  	parent := it.previous()
  1859  	for parent != nil && !bc.HasState(parent.Root) {
  1860  		hashes = append(hashes, parent.Hash())
  1861  		numbers = append(numbers, parent.Number.Uint64())
  1862  
  1863  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1864  	}
  1865  	if parent == nil {
  1866  		return it.index, errors.New("missing parent")
  1867  	}
  1868  	// Import all the pruned blocks to make the state available
  1869  	var (
  1870  		blocks []*types.Block
  1871  		memory common.StorageSize
  1872  	)
  1873  	for i := len(hashes) - 1; i >= 0; i-- {
  1874  		// Append the next block to our batch
  1875  		block := bc.GetBlock(hashes[i], numbers[i])
  1876  
  1877  		blocks = append(blocks, block)
  1878  		memory += block.Size()
  1879  
  1880  		// If memory use grew too large, import and continue. Sadly we need to discard
  1881  		// all raised events and logs from notifications since we're too heavy on the
  1882  		// memory here.
  1883  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1884  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1885  			if _, err := bc.insertChain(blocks, false); err != nil {
  1886  				return 0, err
  1887  			}
  1888  			blocks, memory = blocks[:0], 0
  1889  
  1890  			// If the chain is terminating, stop processing blocks
  1891  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1892  				log.Debug("Premature abort during blocks processing")
  1893  				return 0, nil
  1894  			}
  1895  		}
  1896  	}
  1897  	if len(blocks) > 0 {
  1898  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1899  		return bc.insertChain(blocks, false)
  1900  	}
  1901  	return 0, nil
  1902  }
  1903  
  1904  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1905  // blocks and inserts them to be part of the new canonical chain and accumulates
  1906  // potential missing transactions and post an event about them.
  1907  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1908  	var (
  1909  		newChain    types.Blocks
  1910  		oldChain    types.Blocks
  1911  		commonBlock *types.Block
  1912  
  1913  		deletedTxs types.Transactions
  1914  		addedTxs   types.Transactions
  1915  
  1916  		deletedLogs [][]*types.Log
  1917  		rebirthLogs [][]*types.Log
  1918  
  1919  		// collectLogs collects the logs that were generated or removed during
  1920  		// the processing of the block that corresponds with the given hash.
  1921  		// These logs are later announced as deleted or reborn
  1922  		collectLogs = func(hash common.Hash, removed bool) {
  1923  			number := bc.hc.GetBlockNumber(hash)
  1924  			if number == nil {
  1925  				return
  1926  			}
  1927  			receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  1928  
  1929  			var logs []*types.Log
  1930  			for _, receipt := range receipts {
  1931  				for _, log := range receipt.Logs {
  1932  					l := *log
  1933  					if removed {
  1934  						l.Removed = true
  1935  					} else {
  1936  					}
  1937  					logs = append(logs, &l)
  1938  				}
  1939  			}
  1940  			if len(logs) > 0 {
  1941  				if removed {
  1942  					deletedLogs = append(deletedLogs, logs)
  1943  				} else {
  1944  					rebirthLogs = append(rebirthLogs, logs)
  1945  				}
  1946  			}
  1947  		}
  1948  		// mergeLogs returns a merged log slice with specified sort order.
  1949  		mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
  1950  			var ret []*types.Log
  1951  			if reverse {
  1952  				for i := len(logs) - 1; i >= 0; i-- {
  1953  					ret = append(ret, logs[i]...)
  1954  				}
  1955  			} else {
  1956  				for i := 0; i < len(logs); i++ {
  1957  					ret = append(ret, logs[i]...)
  1958  				}
  1959  			}
  1960  			return ret
  1961  		}
  1962  	)
  1963  	// Reduce the longer chain to the same number as the shorter one
  1964  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1965  		// Old chain is longer, gather all transactions and logs as deleted ones
  1966  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1967  			oldChain = append(oldChain, oldBlock)
  1968  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1969  			collectLogs(oldBlock.Hash(), true)
  1970  		}
  1971  	} else {
  1972  		// New chain is longer, stash all blocks away for subsequent insertion
  1973  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1974  			newChain = append(newChain, newBlock)
  1975  		}
  1976  	}
  1977  	if oldBlock == nil {
  1978  		return fmt.Errorf("invalid old chain")
  1979  	}
  1980  	if newBlock == nil {
  1981  		return fmt.Errorf("invalid new chain")
  1982  	}
  1983  	// Both sides of the reorg are at the same number, reduce both until the common
  1984  	// ancestor is found
  1985  	for {
  1986  		// If the common ancestor was found, bail out
  1987  		if oldBlock.Hash() == newBlock.Hash() {
  1988  			commonBlock = oldBlock
  1989  			break
  1990  		}
  1991  		// Remove an old block as well as stash away a new block
  1992  		oldChain = append(oldChain, oldBlock)
  1993  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1994  		collectLogs(oldBlock.Hash(), true)
  1995  
  1996  		newChain = append(newChain, newBlock)
  1997  
  1998  		// Step back with both chains
  1999  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2000  		if oldBlock == nil {
  2001  			return fmt.Errorf("invalid old chain")
  2002  		}
  2003  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2004  		if newBlock == nil {
  2005  			return fmt.Errorf("invalid new chain")
  2006  		}
  2007  	}
  2008  	// Ensure the user sees large reorgs
  2009  	if len(oldChain) > 0 && len(newChain) > 0 {
  2010  		logFn := log.Info
  2011  		msg := "Chain reorg detected"
  2012  		if len(oldChain) > 63 {
  2013  			msg = "Large chain reorg detected"
  2014  			logFn = log.Warn
  2015  		}
  2016  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2017  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2018  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2019  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2020  	} else {
  2021  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  2022  	}
  2023  	// Insert the new chain(except the head block(reverse order)),
  2024  	// taking care of the proper incremental order.
  2025  	for i := len(newChain) - 1; i >= 1; i-- {
  2026  		// Insert the block in the canonical way, re-writing history
  2027  		bc.writeHeadBlock(newChain[i])
  2028  
  2029  		// Collect reborn logs due to chain reorg
  2030  		collectLogs(newChain[i].Hash(), false)
  2031  
  2032  		// Collect the new added transactions.
  2033  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  2034  	}
  2035  	// Delete useless indexes right now which includes the non-canonical
  2036  	// transaction indexes, canonical chain indexes which above the head.
  2037  	indexesBatch := bc.db.NewBatch()
  2038  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  2039  		rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
  2040  	}
  2041  	// Delete any canonical number assignments above the new head
  2042  	number := bc.CurrentBlock().NumberU64()
  2043  	for i := number + 1; ; i++ {
  2044  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2045  		if hash == (common.Hash{}) {
  2046  			break
  2047  		}
  2048  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2049  	}
  2050  	if err := indexesBatch.Write(); err != nil {
  2051  		log.Crit("Failed to delete useless indexes", "err", err)
  2052  	}
  2053  	// If any logs need to be fired, do it now. In theory we could avoid creating
  2054  	// this goroutine if there are no events to fire, but realistcally that only
  2055  	// ever happens if we're reorging empty blocks, which will only happen on idle
  2056  	// networks where performance is not an issue either way.
  2057  	if len(deletedLogs) > 0 {
  2058  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  2059  	}
  2060  	if len(rebirthLogs) > 0 {
  2061  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  2062  	}
  2063  	if len(oldChain) > 0 {
  2064  		for i := len(oldChain) - 1; i >= 0; i-- {
  2065  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2066  		}
  2067  	}
  2068  	return nil
  2069  }
  2070  
  2071  func (bc *BlockChain) update() {
  2072  	futureTimer := time.NewTicker(5 * time.Second)
  2073  	defer futureTimer.Stop()
  2074  	for {
  2075  		select {
  2076  		case <-futureTimer.C:
  2077  			bc.procFutureBlocks()
  2078  		case <-bc.quit:
  2079  			return
  2080  		}
  2081  	}
  2082  }
  2083  
  2084  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  2085  func (bc *BlockChain) BadBlocks() []*types.Block {
  2086  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  2087  	for _, hash := range bc.badBlocks.Keys() {
  2088  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  2089  			block := blk.(*types.Block)
  2090  			blocks = append(blocks, block)
  2091  		}
  2092  	}
  2093  	return blocks
  2094  }
  2095  
  2096  // addBadBlock adds a bad block to the bad-block LRU cache
  2097  func (bc *BlockChain) addBadBlock(block *types.Block) {
  2098  	bc.badBlocks.Add(block.Hash(), block)
  2099  }
  2100  
  2101  // reportBlock logs a bad block error.
  2102  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2103  	bc.addBadBlock(block)
  2104  
  2105  	var receiptString string
  2106  	for i, receipt := range receipts {
  2107  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  2108  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2109  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2110  	}
  2111  	log.Error(fmt.Sprintf(`
  2112  ########## BAD BLOCK #########
  2113  Chain config: %v
  2114  
  2115  Number: %v
  2116  Hash: 0x%x
  2117  %v
  2118  
  2119  Error: %v
  2120  ##############################
  2121  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  2122  }
  2123  
  2124  // InsertHeaderChain attempts to insert the given header chain in to the local
  2125  // chain, possibly creating a reorg. If an error is returned, it will return the
  2126  // index number of the failing header as well an error describing what went wrong.
  2127  //
  2128  // The verify parameter can be used to fine tune whether nonce verification
  2129  // should be done or not. The reason behind the optional check is because some
  2130  // of the header retrieval mechanisms already need to verify nonces, as well as
  2131  // because nonces can be verified sparsely, not needing to check each.
  2132  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2133  	start := time.Now()
  2134  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2135  		return i, err
  2136  	}
  2137  
  2138  	// Make sure only one thread manipulates the chain at once
  2139  	bc.chainmu.Lock()
  2140  	defer bc.chainmu.Unlock()
  2141  
  2142  	bc.wg.Add(1)
  2143  	defer bc.wg.Done()
  2144  
  2145  	whFunc := func(header *types.Header) error {
  2146  		_, err := bc.hc.WriteHeader(header)
  2147  		return err
  2148  	}
  2149  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  2150  }
  2151  
  2152  // CurrentHeader retrieves the current head header of the canonical chain. The
  2153  // header is retrieved from the HeaderChain's internal cache.
  2154  func (bc *BlockChain) CurrentHeader() *types.Header {
  2155  	return bc.hc.CurrentHeader()
  2156  }
  2157  
  2158  // GetTd retrieves a block's total difficulty in the canonical chain from the
  2159  // database by hash and number, caching it if found.
  2160  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  2161  	return bc.hc.GetTd(hash, number)
  2162  }
  2163  
  2164  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  2165  // database by hash, caching it if found.
  2166  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  2167  	return bc.hc.GetTdByHash(hash)
  2168  }
  2169  
  2170  // GetHeader retrieves a block header from the database by hash and number,
  2171  // caching it if found.
  2172  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  2173  	return bc.hc.GetHeader(hash, number)
  2174  }
  2175  
  2176  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  2177  // found.
  2178  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  2179  	return bc.hc.GetHeaderByHash(hash)
  2180  }
  2181  
  2182  // HasHeader checks if a block header is present in the database or not, caching
  2183  // it if present.
  2184  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  2185  	return bc.hc.HasHeader(hash, number)
  2186  }
  2187  
  2188  // GetCanonicalHash returns the canonical hash for a given block number
  2189  func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
  2190  	return bc.hc.GetCanonicalHash(number)
  2191  }
  2192  
  2193  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  2194  // hash, fetching towards the genesis block.
  2195  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  2196  	return bc.hc.GetBlockHashesFromHash(hash, max)
  2197  }
  2198  
  2199  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  2200  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  2201  // number of blocks to be individually checked before we reach the canonical chain.
  2202  //
  2203  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  2204  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  2205  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  2206  }
  2207  
  2208  // GetHeaderByNumber retrieves a block header from the database by number,
  2209  // caching it (associated with its hash) if found.
  2210  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  2211  	return bc.hc.GetHeaderByNumber(number)
  2212  }
  2213  
  2214  // GetTransactionLookup retrieves the lookup associate with the given transaction
  2215  // hash from the cache or database.
  2216  func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
  2217  	// Short circuit if the txlookup already in the cache, retrieve otherwise
  2218  	if lookup, exist := bc.txLookupCache.Get(hash); exist {
  2219  		return lookup.(*rawdb.LegacyTxLookupEntry)
  2220  	}
  2221  	tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
  2222  	if tx == nil {
  2223  		return nil
  2224  	}
  2225  	lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
  2226  	bc.txLookupCache.Add(hash, lookup)
  2227  	return lookup
  2228  }
  2229  
  2230  // Config retrieves the chain's fork configuration.
  2231  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  2232  
  2233  // Engine retrieves the blockchain's consensus engine.
  2234  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  2235  
  2236  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  2237  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  2238  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  2239  }
  2240  
  2241  // SubscribeChainEvent registers a subscription of ChainEvent.
  2242  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  2243  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  2244  }
  2245  
  2246  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  2247  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  2248  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  2249  }
  2250  
  2251  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  2252  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  2253  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  2254  }
  2255  
  2256  // SubscribeLogsEvent registers a subscription of []*types.Log.
  2257  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  2258  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  2259  }
  2260  
  2261  // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  2262  // block processing has started while false means it has stopped.
  2263  func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  2264  	return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  2265  }