github.com/newbtp/btp@v0.0.0-20190709081714-e4aafa07224e/core/blockchain.go (about)

     1  // Copyright 2014 The go-btpereum Authors
     2  // This file is part of the go-btpereum library.
     3  //
     4  // The go-btpereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-btpereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-btpereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the btpereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/btpereum/go-btpereum/common"
    31  	"github.com/btpereum/go-btpereum/common/mclock"
    32  	"github.com/btpereum/go-btpereum/common/prque"
    33  	"github.com/btpereum/go-btpereum/consensus"
    34  	"github.com/btpereum/go-btpereum/core/rawdb"
    35  	"github.com/btpereum/go-btpereum/core/state"
    36  	"github.com/btpereum/go-btpereum/core/types"
    37  	"github.com/btpereum/go-btpereum/core/vm"
    38  	"github.com/btpereum/go-btpereum/btpdb"
    39  	"github.com/btpereum/go-btpereum/event"
    40  	"github.com/btpereum/go-btpereum/log"
    41  	"github.com/btpereum/go-btpereum/metrics"
    42  	"github.com/btpereum/go-btpereum/params"
    43  	"github.com/btpereum/go-btpereum/rlp"
    44  	"github.com/btpereum/go-btpereum/trie"
    45  	"github.com/hashicorp/golang-lru"
    46  )
    47  
    48  var (
    49  	headBlockGauge     = metrics.NewRegisteredGauge("chain/head/block", nil)
    50  	headHeaderGauge    = metrics.NewRegisteredGauge("chain/head/header", nil)
    51  	headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    52  
    53  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    54  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    55  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    56  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    57  
    58  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    59  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    60  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    61  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    62  
    63  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    64  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    65  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    66  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    67  
    68  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    69  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    70  
    71  	errInsertionInterrupted = errors.New("insertion is interrupted")
    72  )
    73  
    74  const (
    75  	bodyCacheLimit      = 256
    76  	blockCacheLimit     = 256
    77  	receiptsCacheLimit  = 32
    78  	maxFutureBlocks     = 256
    79  	maxTimeFutureBlocks = 30
    80  	badBlockLimit       = 10
    81  	TriesInMemory       = 128
    82  
    83  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    84  	//
    85  	// Changelog:
    86  	//
    87  	// - Version 4
    88  	//   The following incompatible database changes were added:
    89  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
    90  	//   * the `Bloom` field of receipt is deleted
    91  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
    92  	// - Version 5
    93  	//  The following incompatible database changes were added:
    94  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
    95  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
    96  	//      receipts' corresponding block
    97  	// - Version 6
    98  	//  The following incompatible database changes were added:
    99  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   100  	// - Version 7
   101  	//  The following incompatible database changes were added:
   102  	//    * Use freezer as the ancient database to maintain all ancient data
   103  	BlockChainVersion uint64 = 7
   104  )
   105  
   106  // CacheConfig contains the configuration values for the trie caching/pruning
   107  // that's resident in a blockchain.
   108  type CacheConfig struct {
   109  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   110  	TrieCleanNoPrefetch bool          // Whbtper to disable heuristic state prefetching for followup blocks
   111  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   112  	TrieDirtyDisabled   bool          // Whbtper to disable trie write caching and GC altogbtper (archive node)
   113  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   114  }
   115  
   116  // BlockChain represents the canonical chain given a database with a genesis
   117  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   118  //
   119  // Importing blocks in to the block chain happens according to the set of rules
   120  // defined by the two stage Validator. Processing of blocks is done using the
   121  // Processor which processes the included transaction. The validation of the state
   122  // is done in the second part of the Validator. Failing results in aborting of
   123  // the import.
   124  //
   125  // The BlockChain also helps in returning blocks from **any** chain included
   126  // in the database as well as blocks that represents the canonical chain. It's
   127  // important to note that GetBlock can return any block and does not need to be
   128  // included in the canonical one where as GetBlockByNumber always represents the
   129  // canonical chain.
   130  type BlockChain struct {
   131  	chainConfig *params.ChainConfig // Chain & network configuration
   132  	cacheConfig *CacheConfig        // Cache configuration for pruning
   133  
   134  	db     btpdb.Database // Low level persistent database to store final content in
   135  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
   136  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
   137  
   138  	hc            *HeaderChain
   139  	rmLogsFeed    event.Feed
   140  	chainFeed     event.Feed
   141  	chainSideFeed event.Feed
   142  	chainHeadFeed event.Feed
   143  	logsFeed      event.Feed
   144  	blockProcFeed event.Feed
   145  	scope         event.SubscriptionScope
   146  	genesisBlock  *types.Block
   147  
   148  	chainmu sync.RWMutex // blockchain insertion lock
   149  
   150  	currentBlock     atomic.Value // Current head of the block chain
   151  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   152  
   153  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   154  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   155  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   156  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   157  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   158  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   159  
   160  	quit    chan struct{} // blockchain quit channel
   161  	running int32         // running must be called atomically
   162  	// procInterrupt must be atomically called
   163  	procInterrupt int32          // interrupt signaler for block processing
   164  	wg            sync.WaitGroup // chain processing wait group for shutting down
   165  
   166  	engine     consensus.Engine
   167  	validator  Validator  // Block and state validator interface
   168  	prefetcher Prefetcher // Block state prefetcher interface
   169  	processor  Processor  // Block transaction processor interface
   170  	vmConfig   vm.Config
   171  
   172  	badBlocks       *lru.Cache                     // Bad block cache
   173  	shouldPreserve  func(*types.Block) bool        // Function used to determine whbtper should preserve the given block.
   174  	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
   175  }
   176  
   177  // NewBlockChain returns a fully initialised block chain using information
   178  // available in the database. It initialises the default btpereum Validator and
   179  // Processor.
   180  func NewBlockChain(db btpdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
   181  	if cacheConfig == nil {
   182  		cacheConfig = &CacheConfig{
   183  			TrieCleanLimit: 256,
   184  			TrieDirtyLimit: 256,
   185  			TrieTimeLimit:  5 * time.Minute,
   186  		}
   187  	}
   188  	bodyCache, _ := lru.New(bodyCacheLimit)
   189  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   190  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   191  	blockCache, _ := lru.New(blockCacheLimit)
   192  	futureBlocks, _ := lru.New(maxFutureBlocks)
   193  	badBlocks, _ := lru.New(badBlockLimit)
   194  
   195  	bc := &BlockChain{
   196  		chainConfig:    chainConfig,
   197  		cacheConfig:    cacheConfig,
   198  		db:             db,
   199  		triegc:         prque.New(nil),
   200  		stateCache:     state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
   201  		quit:           make(chan struct{}),
   202  		shouldPreserve: shouldPreserve,
   203  		bodyCache:      bodyCache,
   204  		bodyRLPCache:   bodyRLPCache,
   205  		receiptsCache:  receiptsCache,
   206  		blockCache:     blockCache,
   207  		futureBlocks:   futureBlocks,
   208  		engine:         engine,
   209  		vmConfig:       vmConfig,
   210  		badBlocks:      badBlocks,
   211  	}
   212  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   213  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   214  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   215  
   216  	var err error
   217  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   218  	if err != nil {
   219  		return nil, err
   220  	}
   221  	bc.genesisBlock = bc.GetBlockByNumber(0)
   222  	if bc.genesisBlock == nil {
   223  		return nil, ErrNoGenesis
   224  	}
   225  	// Initialize the chain with ancient data if it isn't empty.
   226  	if bc.empty() {
   227  		rawdb.InitDatabaseFromFreezer(bc.db)
   228  	}
   229  	if err := bc.loadLastState(); err != nil {
   230  		return nil, err
   231  	}
   232  	// The first thing the node will do is reconstruct the verification data for
   233  	// the head block (btpash cache or clique voting snapshot). Might as well do
   234  	// it in advance.
   235  	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   236  
   237  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   238  		var (
   239  			needRewind bool
   240  			low        uint64
   241  		)
   242  		// The head full block may be rolled back to a very low height due to
   243  		// blockchain repair. If the head full block is even lower than the ancient
   244  		// chain, truncate the ancient store.
   245  		fullBlock := bc.CurrentBlock()
   246  		if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
   247  			needRewind = true
   248  			low = fullBlock.NumberU64()
   249  		}
   250  		// In fast sync, it may happen that ancient data has been written to the
   251  		// ancient store, but the LastFastBlock has not been updated, truncate the
   252  		// extra data here.
   253  		fastBlock := bc.CurrentFastBlock()
   254  		if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
   255  			needRewind = true
   256  			if fastBlock.NumberU64() < low || low == 0 {
   257  				low = fastBlock.NumberU64()
   258  			}
   259  		}
   260  		if needRewind {
   261  			var hashes []common.Hash
   262  			previous := bc.CurrentHeader().Number.Uint64()
   263  			for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
   264  				hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
   265  			}
   266  			bc.Rollback(hashes)
   267  			log.Warn("Truncate ancient chain", "from", previous, "to", low)
   268  		}
   269  	}
   270  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   271  	for hash := range BadHashes {
   272  		if header := bc.GbtpeaderByHash(hash); header != nil {
   273  			// get the canonical block corresponding to the offending header's number
   274  			headerByNumber := bc.GbtpeaderByNumber(header.Number.Uint64())
   275  			// make sure the headerByNumber (if present) is in our current canonical chain
   276  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   277  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   278  				bc.Sbtpead(header.Number.Uint64() - 1)
   279  				log.Error("Chain rewind was successful, resuming normal operation")
   280  			}
   281  		}
   282  	}
   283  	// Take ownership of this particular state
   284  	go bc.update()
   285  	return bc, nil
   286  }
   287  
   288  func (bc *BlockChain) getProcInterrupt() bool {
   289  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   290  }
   291  
   292  // GetVMConfig returns the block chain VM config.
   293  func (bc *BlockChain) GetVMConfig() *vm.Config {
   294  	return &bc.vmConfig
   295  }
   296  
   297  // empty returns an indicator whbtper the blockchain is empty.
   298  // Note, it's a special case that we connect a non-empty ancient
   299  // database with an empty node, so that we can plugin the ancient
   300  // into node seamlessly.
   301  func (bc *BlockChain) empty() bool {
   302  	genesis := bc.genesisBlock.Hash()
   303  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   304  		if hash != genesis {
   305  			return false
   306  		}
   307  	}
   308  	return true
   309  }
   310  
   311  // loadLastState loads the last known chain state from the database. This mbtpod
   312  // assumes that the chain manager mutex is held.
   313  func (bc *BlockChain) loadLastState() error {
   314  	// Restore the last known head block
   315  	head := rawdb.ReadHeadBlockHash(bc.db)
   316  	if head == (common.Hash{}) {
   317  		// Corrupt or empty database, init from scratch
   318  		log.Warn("Empty database, resetting chain")
   319  		return bc.Reset()
   320  	}
   321  	// Make sure the entire head block is available
   322  	currentBlock := bc.GetBlockByHash(head)
   323  	if currentBlock == nil {
   324  		// Corrupt or empty database, init from scratch
   325  		log.Warn("Head block missing, resetting chain", "hash", head)
   326  		return bc.Reset()
   327  	}
   328  	// Make sure the state associated with the block is available
   329  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   330  		// Dangling block without a state associated, init from scratch
   331  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   332  		if err := bc.repair(&currentBlock); err != nil {
   333  			return err
   334  		}
   335  		rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   336  	}
   337  	// Everything seems to be fine, set as the head block
   338  	bc.currentBlock.Store(currentBlock)
   339  	headBlockGauge.Update(int64(currentBlock.NumberU64()))
   340  
   341  	// Restore the last known head header
   342  	currentHeader := currentBlock.Header()
   343  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   344  		if header := bc.GbtpeaderByHash(head); header != nil {
   345  			currentHeader = header
   346  		}
   347  	}
   348  	bc.hc.SetCurrentHeader(currentHeader)
   349  
   350  	// Restore the last known head fast block
   351  	bc.currentFastBlock.Store(currentBlock)
   352  	headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
   353  
   354  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   355  		if block := bc.GetBlockByHash(head); block != nil {
   356  			bc.currentFastBlock.Store(block)
   357  			headFastBlockGauge.Update(int64(block.NumberU64()))
   358  		}
   359  	}
   360  	// Issue a status log for the user
   361  	currentFastBlock := bc.CurrentFastBlock()
   362  
   363  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   364  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   365  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   366  
   367  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   368  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   369  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
   370  
   371  	return nil
   372  }
   373  
   374  // Sbtpead rewinds the local chain to a new head. In the case of headers, everything
   375  // above the new head will be deleted and the new one set. In the case of blocks
   376  // though, the head may be further rewound if block bodies are missing (non-archive
   377  // nodes after a fast sync).
   378  func (bc *BlockChain) Sbtpead(head uint64) error {
   379  	log.Warn("Rewinding blockchain", "target", head)
   380  
   381  	bc.chainmu.Lock()
   382  	defer bc.chainmu.Unlock()
   383  
   384  	updateFn := func(db btpdb.KeyValueWriter, header *types.Header) {
   385  		// Rewind the block chain, ensuring we don't end up with a stateless head block
   386  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
   387  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   388  			if newHeadBlock == nil {
   389  				newHeadBlock = bc.genesisBlock
   390  			} else {
   391  				if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
   392  					// Rewound state missing, rolled back to before pivot, reset to genesis
   393  					newHeadBlock = bc.genesisBlock
   394  				}
   395  			}
   396  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   397  			bc.currentBlock.Store(newHeadBlock)
   398  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   399  		}
   400  
   401  		// Rewind the fast block in a simpleton way to the target head
   402  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   403  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   404  			// If either blocks reached nil, reset to the genesis state
   405  			if newHeadFastBlock == nil {
   406  				newHeadFastBlock = bc.genesisBlock
   407  			}
   408  			rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
   409  			bc.currentFastBlock.Store(newHeadFastBlock)
   410  			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
   411  		}
   412  	}
   413  
   414  	// Rewind the header chain, deleting all block bodies until then
   415  	delFn := func(db btpdb.KeyValueWriter, hash common.Hash, num uint64) {
   416  		// Ignore the error here since light client won't hit this path
   417  		frozen, _ := bc.db.Ancients()
   418  		if num+1 <= frozen {
   419  			// Truncate all relative data(header, total difficulty, body, receipt
   420  			// and canonical hash) from ancient store.
   421  			if err := bc.db.TruncateAncients(num + 1); err != nil {
   422  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   423  			}
   424  
   425  			// Remove the hash <-> number mapping from the active store.
   426  			rawdb.DeleteHeaderNumber(db, hash)
   427  		} else {
   428  			// Remove relative body and receipts from the active store.
   429  			// The header, total difficulty and canonical hash will be
   430  			// removed in the hc.Sbtpead function.
   431  			rawdb.DeleteBody(db, hash, num)
   432  			rawdb.DeleteReceipts(db, hash, num)
   433  		}
   434  		// Todo(rjl493456442) txlookup, bloombits, etc
   435  	}
   436  	bc.hc.Sbtpead(head, updateFn, delFn)
   437  
   438  	// Clear out any stale content from the caches
   439  	bc.bodyCache.Purge()
   440  	bc.bodyRLPCache.Purge()
   441  	bc.receiptsCache.Purge()
   442  	bc.blockCache.Purge()
   443  	bc.futureBlocks.Purge()
   444  
   445  	return bc.loadLastState()
   446  }
   447  
   448  // FastSyncCommitHead sets the current head block to the one defined by the hash
   449  // irrelevant what the chain contents were prior.
   450  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   451  	// Make sure that both the block as well at its state trie exists
   452  	block := bc.GetBlockByHash(hash)
   453  	if block == nil {
   454  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   455  	}
   456  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   457  		return err
   458  	}
   459  	// If all checks out, manually set the head block
   460  	bc.chainmu.Lock()
   461  	bc.currentBlock.Store(block)
   462  	headBlockGauge.Update(int64(block.NumberU64()))
   463  	bc.chainmu.Unlock()
   464  
   465  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   466  	return nil
   467  }
   468  
   469  // GasLimit returns the gas limit of the current HEAD block.
   470  func (bc *BlockChain) GasLimit() uint64 {
   471  	return bc.CurrentBlock().GasLimit()
   472  }
   473  
   474  // CurrentBlock retrieves the current head block of the canonical chain. The
   475  // block is retrieved from the blockchain's internal cache.
   476  func (bc *BlockChain) CurrentBlock() *types.Block {
   477  	return bc.currentBlock.Load().(*types.Block)
   478  }
   479  
   480  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   481  // chain. The block is retrieved from the blockchain's internal cache.
   482  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   483  	return bc.currentFastBlock.Load().(*types.Block)
   484  }
   485  
   486  // Validator returns the current validator.
   487  func (bc *BlockChain) Validator() Validator {
   488  	return bc.validator
   489  }
   490  
   491  // Processor returns the current processor.
   492  func (bc *BlockChain) Processor() Processor {
   493  	return bc.processor
   494  }
   495  
   496  // State returns a new mutable state based on the current HEAD block.
   497  func (bc *BlockChain) State() (*state.StateDB, error) {
   498  	return bc.StateAt(bc.CurrentBlock().Root())
   499  }
   500  
   501  // StateAt returns a new mutable state based on a particular point in time.
   502  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   503  	return state.New(root, bc.stateCache)
   504  }
   505  
   506  // StateCache returns the caching database underpinning the blockchain instance.
   507  func (bc *BlockChain) StateCache() state.Database {
   508  	return bc.stateCache
   509  }
   510  
   511  // Reset purges the entire blockchain, restoring it to its genesis state.
   512  func (bc *BlockChain) Reset() error {
   513  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   514  }
   515  
   516  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   517  // specified genesis state.
   518  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   519  	// Dump the entire block chain and purge the caches
   520  	if err := bc.Sbtpead(0); err != nil {
   521  		return err
   522  	}
   523  	bc.chainmu.Lock()
   524  	defer bc.chainmu.Unlock()
   525  
   526  	// Prepare the genesis block and reinitialise the chain
   527  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   528  		log.Crit("Failed to write genesis block TD", "err", err)
   529  	}
   530  	rawdb.WriteBlock(bc.db, genesis)
   531  
   532  	bc.genesisBlock = genesis
   533  	bc.insert(bc.genesisBlock)
   534  	bc.currentBlock.Store(bc.genesisBlock)
   535  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   536  
   537  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   538  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   539  	bc.currentFastBlock.Store(bc.genesisBlock)
   540  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   541  
   542  	return nil
   543  }
   544  
   545  // repair tries to repair the current blockchain by rolling back the current block
   546  // until one with associated state is found. This is needed to fix incomplete db
   547  // writes caused either by crashes/power outages, or simply non-committed tries.
   548  //
   549  // This mbtpod only rolls back the current block. The current header and current
   550  // fast block are left intact.
   551  func (bc *BlockChain) repair(head **types.Block) error {
   552  	for {
   553  		// Abort if we've rewound to a head block that does have associated state
   554  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   555  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   556  			return nil
   557  		}
   558  		// Otherwise rewind one block and recheck state availability there
   559  		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   560  		if block == nil {
   561  			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   562  		}
   563  		*head = block
   564  	}
   565  }
   566  
   567  // Export writes the active chain to the given writer.
   568  func (bc *BlockChain) Export(w io.Writer) error {
   569  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   570  }
   571  
   572  // ExportN writes a subset of the active chain to the given writer.
   573  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   574  	bc.chainmu.RLock()
   575  	defer bc.chainmu.RUnlock()
   576  
   577  	if first > last {
   578  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   579  	}
   580  	log.Info("Exporting batch of blocks", "count", last-first+1)
   581  
   582  	start, reported := time.Now(), time.Now()
   583  	for nr := first; nr <= last; nr++ {
   584  		block := bc.GetBlockByNumber(nr)
   585  		if block == nil {
   586  			return fmt.Errorf("export failed on #%d: not found", nr)
   587  		}
   588  		if err := block.EncodeRLP(w); err != nil {
   589  			return err
   590  		}
   591  		if time.Since(reported) >= statsReportLimit {
   592  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   593  			reported = time.Now()
   594  		}
   595  	}
   596  	return nil
   597  }
   598  
   599  // insert injects a new head block into the current block chain. This mbtpod
   600  // assumes that the block is indeed a true head. It will also reset the head
   601  // header and the head fast sync block to this very same block if they are older
   602  // or if they are on a different side chain.
   603  //
   604  // Note, this function assumes that the `mu` mutex is held!
   605  func (bc *BlockChain) insert(block *types.Block) {
   606  	// If the block is on a side chain or an unknown one, force other heads onto it too
   607  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   608  
   609  	// Add the block to the canonical chain number scheme and mark as the head
   610  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   611  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   612  
   613  	bc.currentBlock.Store(block)
   614  	headBlockGauge.Update(int64(block.NumberU64()))
   615  
   616  	// If the block is better than our head or is on a different chain, force update heads
   617  	if updateHeads {
   618  		bc.hc.SetCurrentHeader(block.Header())
   619  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   620  
   621  		bc.currentFastBlock.Store(block)
   622  		headFastBlockGauge.Update(int64(block.NumberU64()))
   623  	}
   624  }
   625  
   626  // Genesis retrieves the chain's genesis block.
   627  func (bc *BlockChain) Genesis() *types.Block {
   628  	return bc.genesisBlock
   629  }
   630  
   631  // GetBody retrieves a block body (transactions and uncles) from the database by
   632  // hash, caching it if found.
   633  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   634  	// Short circuit if the body's already in the cache, retrieve otherwise
   635  	if cached, ok := bc.bodyCache.Get(hash); ok {
   636  		body := cached.(*types.Body)
   637  		return body
   638  	}
   639  	number := bc.hc.GetBlockNumber(hash)
   640  	if number == nil {
   641  		return nil
   642  	}
   643  	body := rawdb.ReadBody(bc.db, hash, *number)
   644  	if body == nil {
   645  		return nil
   646  	}
   647  	// Cache the found body for next time and return
   648  	bc.bodyCache.Add(hash, body)
   649  	return body
   650  }
   651  
   652  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   653  // caching it if found.
   654  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   655  	// Short circuit if the body's already in the cache, retrieve otherwise
   656  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   657  		return cached.(rlp.RawValue)
   658  	}
   659  	number := bc.hc.GetBlockNumber(hash)
   660  	if number == nil {
   661  		return nil
   662  	}
   663  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   664  	if len(body) == 0 {
   665  		return nil
   666  	}
   667  	// Cache the found body for next time and return
   668  	bc.bodyRLPCache.Add(hash, body)
   669  	return body
   670  }
   671  
   672  // HasBlock checks if a block is fully present in the database or not.
   673  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   674  	if bc.blockCache.Contains(hash) {
   675  		return true
   676  	}
   677  	return rawdb.HasBody(bc.db, hash, number)
   678  }
   679  
   680  // HasFastBlock checks if a fast block is fully present in the database or not.
   681  func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
   682  	if !bc.HasBlock(hash, number) {
   683  		return false
   684  	}
   685  	if bc.receiptsCache.Contains(hash) {
   686  		return true
   687  	}
   688  	return rawdb.HasReceipts(bc.db, hash, number)
   689  }
   690  
   691  // HasState checks if state trie is fully present in the database or not.
   692  func (bc *BlockChain) HasState(hash common.Hash) bool {
   693  	_, err := bc.stateCache.OpenTrie(hash)
   694  	return err == nil
   695  }
   696  
   697  // HasBlockAndState checks if a block and associated state trie is fully present
   698  // in the database or not, caching it if present.
   699  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   700  	// Check first that the block itself is known
   701  	block := bc.GetBlock(hash, number)
   702  	if block == nil {
   703  		return false
   704  	}
   705  	return bc.HasState(block.Root())
   706  }
   707  
   708  // GetBlock retrieves a block from the database by hash and number,
   709  // caching it if found.
   710  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   711  	// Short circuit if the block's already in the cache, retrieve otherwise
   712  	if block, ok := bc.blockCache.Get(hash); ok {
   713  		return block.(*types.Block)
   714  	}
   715  	block := rawdb.ReadBlock(bc.db, hash, number)
   716  	if block == nil {
   717  		return nil
   718  	}
   719  	// Cache the found block for next time and return
   720  	bc.blockCache.Add(block.Hash(), block)
   721  	return block
   722  }
   723  
   724  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   725  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   726  	number := bc.hc.GetBlockNumber(hash)
   727  	if number == nil {
   728  		return nil
   729  	}
   730  	return bc.GetBlock(hash, *number)
   731  }
   732  
   733  // GetBlockByNumber retrieves a block from the database by number, caching it
   734  // (associated with its hash) if found.
   735  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   736  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   737  	if hash == (common.Hash{}) {
   738  		return nil
   739  	}
   740  	return bc.GetBlock(hash, number)
   741  }
   742  
   743  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   744  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   745  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   746  		return receipts.(types.Receipts)
   747  	}
   748  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   749  	if number == nil {
   750  		return nil
   751  	}
   752  	receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
   753  	if receipts == nil {
   754  		return nil
   755  	}
   756  	bc.receiptsCache.Add(hash, receipts)
   757  	return receipts
   758  }
   759  
   760  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   761  // [deprecated by btp/62]
   762  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   763  	number := bc.hc.GetBlockNumber(hash)
   764  	if number == nil {
   765  		return nil
   766  	}
   767  	for i := 0; i < n; i++ {
   768  		block := bc.GetBlock(hash, *number)
   769  		if block == nil {
   770  			break
   771  		}
   772  		blocks = append(blocks, block)
   773  		hash = block.ParentHash()
   774  		*number--
   775  	}
   776  	return
   777  }
   778  
   779  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   780  // a specific distance is reached.
   781  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   782  	uncles := []*types.Header{}
   783  	for i := 0; block != nil && i < length; i++ {
   784  		uncles = append(uncles, block.Uncles()...)
   785  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   786  	}
   787  	return uncles
   788  }
   789  
   790  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   791  // either from ephemeral in-memory cache, or from persistent storage.
   792  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   793  	return bc.stateCache.TrieDB().Node(hash)
   794  }
   795  
   796  // Stop stops the blockchain service. If any imports are currently in progress
   797  // it will abort them using the procInterrupt.
   798  func (bc *BlockChain) Stop() {
   799  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   800  		return
   801  	}
   802  	// Unsubscribe all subscriptions registered from blockchain
   803  	bc.scope.Close()
   804  	close(bc.quit)
   805  	atomic.StoreInt32(&bc.procInterrupt, 1)
   806  
   807  	bc.wg.Wait()
   808  
   809  	// Ensure the state of a recent block is also stored to disk before exiting.
   810  	// We're writing three different states to catch different restart scenarios:
   811  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   812  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   813  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   814  	if !bc.cacheConfig.TrieDirtyDisabled {
   815  		triedb := bc.stateCache.TrieDB()
   816  
   817  		for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
   818  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   819  				recent := bc.GetBlockByNumber(number - offset)
   820  
   821  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   822  				if err := triedb.Commit(recent.Root(), true); err != nil {
   823  					log.Error("Failed to commit recent state trie", "err", err)
   824  				}
   825  			}
   826  		}
   827  		for !bc.triegc.Empty() {
   828  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   829  		}
   830  		if size, _ := triedb.Size(); size != 0 {
   831  			log.Error("Dangling trie nodes after full cleanup")
   832  		}
   833  	}
   834  	log.Info("Blockchain manager stopped")
   835  }
   836  
   837  func (bc *BlockChain) procFutureBlocks() {
   838  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   839  	for _, hash := range bc.futureBlocks.Keys() {
   840  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   841  			blocks = append(blocks, block.(*types.Block))
   842  		}
   843  	}
   844  	if len(blocks) > 0 {
   845  		types.BlockBy(types.Number).Sort(blocks)
   846  
   847  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   848  		for i := range blocks {
   849  			bc.InsertChain(blocks[i : i+1])
   850  		}
   851  	}
   852  }
   853  
   854  // WriteStatus status of write
   855  type WriteStatus byte
   856  
   857  const (
   858  	NonStatTy WriteStatus = iota
   859  	CanonStatTy
   860  	SideStatTy
   861  )
   862  
   863  // Rollback is designed to remove a chain of links from the database that aren't
   864  // certain enough to be valid.
   865  func (bc *BlockChain) Rollback(chain []common.Hash) {
   866  	bc.chainmu.Lock()
   867  	defer bc.chainmu.Unlock()
   868  
   869  	for i := len(chain) - 1; i >= 0; i-- {
   870  		hash := chain[i]
   871  
   872  		currentHeader := bc.hc.CurrentHeader()
   873  		if currentHeader.Hash() == hash {
   874  			bc.hc.SetCurrentHeader(bc.Gbtpeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   875  		}
   876  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   877  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   878  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   879  			bc.currentFastBlock.Store(newFastBlock)
   880  			headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
   881  		}
   882  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   883  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   884  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   885  			bc.currentBlock.Store(newBlock)
   886  			headBlockGauge.Update(int64(newBlock.NumberU64()))
   887  		}
   888  	}
   889  	// Truncate ancient data which exceeds the current header.
   890  	//
   891  	// Notably, it can happen that system crashes without truncating the ancient data
   892  	// but the head indicator has been updated in the active store. Regarding this issue,
   893  	// system will self recovery by truncating the extra data during the setup phase.
   894  	if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
   895  		log.Crit("Truncate ancient store failed", "err", err)
   896  	}
   897  }
   898  
   899  // truncateAncient rewinds the blockchain to the specified header and deletes all
   900  // data in the ancient store that exceeds the specified header.
   901  func (bc *BlockChain) truncateAncient(head uint64) error {
   902  	frozen, err := bc.db.Ancients()
   903  	if err != nil {
   904  		return err
   905  	}
   906  	// Short circuit if there is no data to truncate in ancient store.
   907  	if frozen <= head+1 {
   908  		return nil
   909  	}
   910  	// Truncate all the data in the freezer beyond the specified head
   911  	if err := bc.db.TruncateAncients(head + 1); err != nil {
   912  		return err
   913  	}
   914  	// Clear out any stale content from the caches
   915  	bc.hc.headerCache.Purge()
   916  	bc.hc.tdCache.Purge()
   917  	bc.hc.numberCache.Purge()
   918  
   919  	// Clear out any stale content from the caches
   920  	bc.bodyCache.Purge()
   921  	bc.bodyRLPCache.Purge()
   922  	bc.receiptsCache.Purge()
   923  	bc.blockCache.Purge()
   924  	bc.futureBlocks.Purge()
   925  
   926  	log.Info("Rewind ancient data", "number", head)
   927  	return nil
   928  }
   929  
   930  // numberHash is just a container for a number and a hash, to represent a block
   931  type numberHash struct {
   932  	number uint64
   933  	hash   common.Hash
   934  }
   935  
   936  // InsertReceiptChain attempts to complete an already existing header chain with
   937  // transaction and receipt data.
   938  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
   939  	// We don't require the chainMu here since we want to maximize the
   940  	// concurrency of header insertion and receipt insertion.
   941  	bc.wg.Add(1)
   942  	defer bc.wg.Done()
   943  
   944  	var (
   945  		ancientBlocks, liveBlocks     types.Blocks
   946  		ancientReceipts, liveReceipts []types.Receipts
   947  	)
   948  	// Do a sanity check that the provided chain is actually ordered and linked
   949  	for i := 0; i < len(blockChain); i++ {
   950  		if i != 0 {
   951  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   952  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   953  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   954  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   955  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   956  			}
   957  		}
   958  		if blockChain[i].NumberU64() <= ancientLimit {
   959  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
   960  		} else {
   961  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
   962  		}
   963  	}
   964  
   965  	var (
   966  		stats = struct{ processed, ignored int32 }{}
   967  		start = time.Now()
   968  		size  = 0
   969  	)
   970  	// updateHead updates the head fast sync block if the inserted blocks are better
   971  	// and returns a indicator whbtper the inserted blocks are canonical.
   972  	updateHead := func(head *types.Block) bool {
   973  		bc.chainmu.Lock()
   974  
   975  		// Rewind may have occurred, skip in that case.
   976  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
   977  			currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
   978  			if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   979  				rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   980  				bc.currentFastBlock.Store(head)
   981  				headFastBlockGauge.Update(int64(head.NumberU64()))
   982  				bc.chainmu.Unlock()
   983  				return true
   984  			}
   985  		}
   986  		bc.chainmu.Unlock()
   987  		return false
   988  	}
   989  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
   990  	//
   991  	// this function only accepts canonical chain data. All side chain will be reverted
   992  	// eventually.
   993  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   994  		var (
   995  			previous = bc.CurrentFastBlock()
   996  			batch    = bc.db.NewBatch()
   997  		)
   998  		// If any error occurs before updating the head or we are inserting a side chain,
   999  		// all the data written this time wll be rolled back.
  1000  		defer func() {
  1001  			if previous != nil {
  1002  				if err := bc.truncateAncient(previous.NumberU64()); err != nil {
  1003  					log.Crit("Truncate ancient store failed", "err", err)
  1004  				}
  1005  			}
  1006  		}()
  1007  		var deleted []*numberHash
  1008  		for i, block := range blockChain {
  1009  			// Short circuit insertion if shutting down or processing failed
  1010  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1011  				return 0, errInsertionInterrupted
  1012  			}
  1013  			// Short circuit insertion if it is required(used in testing only)
  1014  			if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
  1015  				return i, errors.New("insertion is terminated for testing purpose")
  1016  			}
  1017  			// Short circuit if the owner header is unknown
  1018  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1019  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1020  			}
  1021  			var (
  1022  				start  = time.Now()
  1023  				logged = time.Now()
  1024  				count  int
  1025  			)
  1026  			// Migrate all ancient blocks. This can happen if someone upgrades from Gbtp
  1027  			// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
  1028  			// long term.
  1029  			for {
  1030  				// We can ignore the error here since light client won't hit this code path.
  1031  				frozen, _ := bc.db.Ancients()
  1032  				if frozen >= block.NumberU64() {
  1033  					break
  1034  				}
  1035  				h := rawdb.ReadCanonicalHash(bc.db, frozen)
  1036  				b := rawdb.ReadBlock(bc.db, h, frozen)
  1037  				size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
  1038  				count += 1
  1039  
  1040  				// Always keep genesis block in active database.
  1041  				if b.NumberU64() != 0 {
  1042  					deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
  1043  				}
  1044  				if time.Since(logged) > 8*time.Second {
  1045  					log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1046  					logged = time.Now()
  1047  				}
  1048  				// Don't collect too much in-memory, write it out every 100K blocks
  1049  				if len(deleted) > 100000 {
  1050  
  1051  					// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1052  					if err := bc.db.Sync(); err != nil {
  1053  						return 0, err
  1054  					}
  1055  					// Wipe out canonical block data.
  1056  					for _, nh := range deleted {
  1057  						rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1058  						rawdb.DeleteCanonicalHash(batch, nh.number)
  1059  					}
  1060  					if err := batch.Write(); err != nil {
  1061  						return 0, err
  1062  					}
  1063  					batch.Reset()
  1064  					// Wipe out side chain too.
  1065  					for _, nh := range deleted {
  1066  						for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1067  							rawdb.DeleteBlock(batch, hash, nh.number)
  1068  						}
  1069  					}
  1070  					if err := batch.Write(); err != nil {
  1071  						return 0, err
  1072  					}
  1073  					batch.Reset()
  1074  					deleted = deleted[0:]
  1075  				}
  1076  			}
  1077  			if count > 0 {
  1078  				log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  1079  			}
  1080  			// Flush data into ancient database.
  1081  			size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
  1082  			rawdb.WriteTxLookupEntries(batch, block)
  1083  
  1084  			stats.processed++
  1085  		}
  1086  		// Flush all tx-lookup index data.
  1087  		size += batch.ValueSize()
  1088  		if err := batch.Write(); err != nil {
  1089  			return 0, err
  1090  		}
  1091  		batch.Reset()
  1092  
  1093  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1094  		if err := bc.db.Sync(); err != nil {
  1095  			return 0, err
  1096  		}
  1097  		if !updateHead(blockChain[len(blockChain)-1]) {
  1098  			return 0, errors.New("side blocks can't be accepted as the ancient chain data")
  1099  		}
  1100  		previous = nil // disable rollback explicitly
  1101  
  1102  		// Wipe out canonical block data.
  1103  		for _, nh := range deleted {
  1104  			rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
  1105  			rawdb.DeleteCanonicalHash(batch, nh.number)
  1106  		}
  1107  		for _, block := range blockChain {
  1108  			// Always keep genesis block in active database.
  1109  			if block.NumberU64() != 0 {
  1110  				rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1111  				rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1112  			}
  1113  		}
  1114  		if err := batch.Write(); err != nil {
  1115  			return 0, err
  1116  		}
  1117  		batch.Reset()
  1118  
  1119  		// Wipe out side chain too.
  1120  		for _, nh := range deleted {
  1121  			for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
  1122  				rawdb.DeleteBlock(batch, hash, nh.number)
  1123  			}
  1124  		}
  1125  		for _, block := range blockChain {
  1126  			// Always keep genesis block in active database.
  1127  			if block.NumberU64() != 0 {
  1128  				for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
  1129  					rawdb.DeleteBlock(batch, hash, block.NumberU64())
  1130  				}
  1131  			}
  1132  		}
  1133  		if err := batch.Write(); err != nil {
  1134  			return 0, err
  1135  		}
  1136  		return 0, nil
  1137  	}
  1138  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1139  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1140  		batch := bc.db.NewBatch()
  1141  		for i, block := range blockChain {
  1142  			// Short circuit insertion if shutting down or processing failed
  1143  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1144  				return 0, errInsertionInterrupted
  1145  			}
  1146  			// Short circuit if the owner header is unknown
  1147  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1148  				return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1149  			}
  1150  			if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1151  				stats.ignored++
  1152  				continue
  1153  			}
  1154  			// Write all the data out into the database
  1155  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1156  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1157  			rawdb.WriteTxLookupEntries(batch, block)
  1158  
  1159  			stats.processed++
  1160  			if batch.ValueSize() >= btpdb.IdealBatchSize {
  1161  				if err := batch.Write(); err != nil {
  1162  					return 0, err
  1163  				}
  1164  				size += batch.ValueSize()
  1165  				batch.Reset()
  1166  			}
  1167  		}
  1168  		if batch.ValueSize() > 0 {
  1169  			size += batch.ValueSize()
  1170  			if err := batch.Write(); err != nil {
  1171  				return 0, err
  1172  			}
  1173  		}
  1174  		updateHead(blockChain[len(blockChain)-1])
  1175  		return 0, nil
  1176  	}
  1177  	// Write downloaded chain data and corresponding receipt chain data.
  1178  	if len(ancientBlocks) > 0 {
  1179  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1180  			if err == errInsertionInterrupted {
  1181  				return 0, nil
  1182  			}
  1183  			return n, err
  1184  		}
  1185  	}
  1186  	if len(liveBlocks) > 0 {
  1187  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1188  			if err == errInsertionInterrupted {
  1189  				return 0, nil
  1190  			}
  1191  			return n, err
  1192  		}
  1193  	}
  1194  
  1195  	head := blockChain[len(blockChain)-1]
  1196  	context := []interface{}{
  1197  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1198  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1199  		"size", common.StorageSize(size),
  1200  	}
  1201  	if stats.ignored > 0 {
  1202  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1203  	}
  1204  	log.Info("Imported new block receipts", context...)
  1205  
  1206  	return 0, nil
  1207  }
  1208  
  1209  var lastWrite uint64
  1210  
  1211  // writeBlockWithoutState writes only the block and its metadata to the database,
  1212  // but does not write any state. This is used to construct competing side forks
  1213  // up to the point where they exceed the canonical total difficulty.
  1214  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1215  	bc.wg.Add(1)
  1216  	defer bc.wg.Done()
  1217  
  1218  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  1219  		return err
  1220  	}
  1221  	rawdb.WriteBlock(bc.db, block)
  1222  
  1223  	return nil
  1224  }
  1225  
  1226  // writeKnownBlock updates the head block flag with a known block
  1227  // and introduces chain reorg if necessary.
  1228  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1229  	bc.wg.Add(1)
  1230  	defer bc.wg.Done()
  1231  
  1232  	current := bc.CurrentBlock()
  1233  	if block.ParentHash() != current.Hash() {
  1234  		if err := bc.reorg(current, block); err != nil {
  1235  			return err
  1236  		}
  1237  	}
  1238  	// Write the positional metadata for transaction/receipt lookups.
  1239  	// Preimages here is empty, ignore it.
  1240  	rawdb.WriteTxLookupEntries(bc.db, block)
  1241  
  1242  	bc.insert(block)
  1243  	return nil
  1244  }
  1245  
  1246  // WriteBlockWithState writes the block and all associated state to the database.
  1247  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  1248  	bc.chainmu.Lock()
  1249  	defer bc.chainmu.Unlock()
  1250  
  1251  	return bc.writeBlockWithState(block, receipts, state)
  1252  }
  1253  
  1254  // writeBlockWithState writes the block and all associated state to the database,
  1255  // but is expects the chain mutex to be held.
  1256  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  1257  	bc.wg.Add(1)
  1258  	defer bc.wg.Done()
  1259  
  1260  	// Calculate the total difficulty of the block
  1261  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1262  	if ptd == nil {
  1263  		return NonStatTy, consensus.ErrUnknownAncestor
  1264  	}
  1265  	// Make sure no inconsistent state is leaked during insertion
  1266  	currentBlock := bc.CurrentBlock()
  1267  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1268  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1269  
  1270  	// Irrelevant of the canonical status, write the block itself to the database
  1271  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  1272  		return NonStatTy, err
  1273  	}
  1274  	rawdb.WriteBlock(bc.db, block)
  1275  
  1276  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1277  	if err != nil {
  1278  		return NonStatTy, err
  1279  	}
  1280  	triedb := bc.stateCache.TrieDB()
  1281  
  1282  	// If we're running an archive node, always flush
  1283  	if bc.cacheConfig.TrieDirtyDisabled {
  1284  		if err := triedb.Commit(root, false); err != nil {
  1285  			return NonStatTy, err
  1286  		}
  1287  	} else {
  1288  		// Full but not archive node, do proper garbage collection
  1289  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1290  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1291  
  1292  		if current := block.NumberU64(); current > TriesInMemory {
  1293  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1294  			var (
  1295  				nodes, imgs = triedb.Size()
  1296  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1297  			)
  1298  			if nodes > limit || imgs > 4*1024*1024 {
  1299  				triedb.Cap(limit - btpdb.IdealBatchSize)
  1300  			}
  1301  			// Find the next state trie we need to commit
  1302  			chosen := current - TriesInMemory
  1303  
  1304  			// If we exceeded out time allowance, flush an entire trie to disk
  1305  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1306  				// If the header is missing (canonical chain behind), we're reorging a low
  1307  				// diff sidechain. Suspend committing until this operation is completed.
  1308  				header := bc.GbtpeaderByNumber(chosen)
  1309  				if header == nil {
  1310  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1311  				} else {
  1312  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1313  					// warn the user that the system is becoming unstable.
  1314  					if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1315  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
  1316  					}
  1317  					// Flush an entire trie and restart the counters
  1318  					triedb.Commit(header.Root, true)
  1319  					lastWrite = chosen
  1320  					bc.gcproc = 0
  1321  				}
  1322  			}
  1323  			// Garbage collect anything below our required write retention
  1324  			for !bc.triegc.Empty() {
  1325  				root, number := bc.triegc.Pop()
  1326  				if uint64(-number) > chosen {
  1327  					bc.triegc.Push(root, number)
  1328  					break
  1329  				}
  1330  				triedb.Dereference(root.(common.Hash))
  1331  			}
  1332  		}
  1333  	}
  1334  
  1335  	// Write other block data using a batch.
  1336  	batch := bc.db.NewBatch()
  1337  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  1338  
  1339  	// If the total difficulty is higher than our known, add it to the canonical chain
  1340  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1341  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1342  	reorg := externTd.Cmp(localTd) > 0
  1343  	currentBlock = bc.CurrentBlock()
  1344  	if !reorg && externTd.Cmp(localTd) == 0 {
  1345  		// Split same-difficulty blocks by number, then preferentially select
  1346  		// the block generated by the local miner as the canonical block.
  1347  		if block.NumberU64() < currentBlock.NumberU64() {
  1348  			reorg = true
  1349  		} else if block.NumberU64() == currentBlock.NumberU64() {
  1350  			var currentPreserve, blockPreserve bool
  1351  			if bc.shouldPreserve != nil {
  1352  				currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  1353  			}
  1354  			reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  1355  		}
  1356  	}
  1357  	if reorg {
  1358  		// Reorganise the chain if the parent is not the head block
  1359  		if block.ParentHash() != currentBlock.Hash() {
  1360  			if err := bc.reorg(currentBlock, block); err != nil {
  1361  				return NonStatTy, err
  1362  			}
  1363  		}
  1364  		// Write the positional metadata for transaction/receipt lookups and preimages
  1365  		rawdb.WriteTxLookupEntries(batch, block)
  1366  		rawdb.WritePreimages(batch, state.Preimages())
  1367  
  1368  		status = CanonStatTy
  1369  	} else {
  1370  		status = SideStatTy
  1371  	}
  1372  	if err := batch.Write(); err != nil {
  1373  		return NonStatTy, err
  1374  	}
  1375  
  1376  	// Set new head.
  1377  	if status == CanonStatTy {
  1378  		bc.insert(block)
  1379  	}
  1380  	bc.futureBlocks.Remove(block.Hash())
  1381  	return status, nil
  1382  }
  1383  
  1384  // addFutureBlock checks if the block is within the max allowed window to get
  1385  // accepted for future processing, and returns an error if the block is too far
  1386  // ahead and was not added.
  1387  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1388  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1389  	if block.Time() > max {
  1390  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1391  	}
  1392  	bc.futureBlocks.Add(block.Hash(), block)
  1393  	return nil
  1394  }
  1395  
  1396  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1397  // chain or, otherwise, create a fork. If an error is returned it will return
  1398  // the index number of the failing block as well an error describing what went
  1399  // wrong.
  1400  //
  1401  // After insertion is done, all accumulated events will be fired.
  1402  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1403  	// Sanity check that we have sombtping meaningful to import
  1404  	if len(chain) == 0 {
  1405  		return 0, nil
  1406  	}
  1407  
  1408  	bc.blockProcFeed.Send(true)
  1409  	defer bc.blockProcFeed.Send(false)
  1410  
  1411  	// Remove already known canon-blocks
  1412  	var (
  1413  		block, prev *types.Block
  1414  	)
  1415  	// Do a sanity check that the provided chain is actually ordered and linked
  1416  	for i := 1; i < len(chain); i++ {
  1417  		block = chain[i]
  1418  		prev = chain[i-1]
  1419  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1420  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1421  			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  1422  				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1423  
  1424  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1425  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1426  		}
  1427  	}
  1428  	// Pre-checks passed, start the full block imports
  1429  	bc.wg.Add(1)
  1430  	bc.chainmu.Lock()
  1431  	n, events, logs, err := bc.insertChain(chain, true)
  1432  	bc.chainmu.Unlock()
  1433  	bc.wg.Done()
  1434  
  1435  	bc.PostChainEvents(events, logs)
  1436  	return n, err
  1437  }
  1438  
  1439  // insertChain is the internal implementation of InsertChain, which assumes that
  1440  // 1) chains are contiguous, and 2) The chain mutex is held.
  1441  //
  1442  // This mbtpod is split out so that import batches that require re-injecting
  1443  // historical blocks can do so without releasing the lock, which could lead to
  1444  // racey behaviour. If a sidechain import is in progress, and the historic state
  1445  // is imported, but then new canon-head is added before the actual sidechain
  1446  // completes, then the historic state could be pruned again
  1447  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1448  	// If the chain is terminating, don't even bother starting up
  1449  	if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1450  		return 0, nil, nil, nil
  1451  	}
  1452  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1453  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1454  
  1455  	// A queued approach to delivering events. This is generally
  1456  	// faster than direct delivery and requires much less mutex
  1457  	// acquiring.
  1458  	var (
  1459  		stats         = insertStats{startTime: mclock.Now()}
  1460  		events        = make([]interface{}, 0, len(chain))
  1461  		lastCanon     *types.Block
  1462  		coalescedLogs []*types.Log
  1463  	)
  1464  	// Start the parallel header verifier
  1465  	headers := make([]*types.Header, len(chain))
  1466  	seals := make([]bool, len(chain))
  1467  
  1468  	for i, block := range chain {
  1469  		headers[i] = block.Header()
  1470  		seals[i] = verifySeals
  1471  	}
  1472  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1473  	defer close(abort)
  1474  
  1475  	// Peek the error for the first block to decide the directing import logic
  1476  	it := newInsertIterator(chain, results, bc.validator)
  1477  
  1478  	block, err := it.next()
  1479  
  1480  	// Left-trim all the known blocks
  1481  	if err == ErrKnownBlock {
  1482  		// First block (and state) is known
  1483  		//   1. We did a roll-back, and should now do a re-import
  1484  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1485  		// 	    from the canonical chain, which has not been verified.
  1486  		// Skip all known blocks that are behind us
  1487  		var (
  1488  			current  = bc.CurrentBlock()
  1489  			localTd  = bc.GetTd(current.Hash(), current.NumberU64())
  1490  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
  1491  		)
  1492  		for block != nil && err == ErrKnownBlock {
  1493  			externTd = new(big.Int).Add(externTd, block.Difficulty())
  1494  			if localTd.Cmp(externTd) < 0 {
  1495  				break
  1496  			}
  1497  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1498  			stats.ignored++
  1499  
  1500  			block, err = it.next()
  1501  		}
  1502  		// The remaining blocks are still known blocks, the only scenario here is:
  1503  		// During the fast sync, the pivot point is already submitted but rollback
  1504  		// happens. Then node resets the head full block to a lower height via `rollback`
  1505  		// and leaves a few known blocks in the database.
  1506  		//
  1507  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1508  		// `insertChain` while a part of them have higher total difficulty than current
  1509  		// head full block(new pivot point).
  1510  		for block != nil && err == ErrKnownBlock {
  1511  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1512  			if err := bc.writeKnownBlock(block); err != nil {
  1513  				return it.index, nil, nil, err
  1514  			}
  1515  			lastCanon = block
  1516  
  1517  			block, err = it.next()
  1518  		}
  1519  		// Falls through to the block import
  1520  	}
  1521  	switch {
  1522  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1523  	case err == consensus.ErrPrunedAncestor:
  1524  		log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1525  		return bc.insertSideChain(block, it)
  1526  
  1527  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1528  	case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1529  		for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1530  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1531  			if err := bc.addFutureBlock(block); err != nil {
  1532  				return it.index, events, coalescedLogs, err
  1533  			}
  1534  			block, err = it.next()
  1535  		}
  1536  		stats.queued += it.processed()
  1537  		stats.ignored += it.remaining()
  1538  
  1539  		// If there are any still remaining, mark as ignored
  1540  		return it.index, events, coalescedLogs, err
  1541  
  1542  	// Some other error occurred, abort
  1543  	case err != nil:
  1544  		stats.ignored += len(it.chain)
  1545  		bc.reportBlock(block, nil, err)
  1546  		return it.index, events, coalescedLogs, err
  1547  	}
  1548  	// No validation errors for the first block (or chain prefix skipped)
  1549  	for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
  1550  		// If the chain is terminating, stop processing blocks
  1551  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1552  			log.Debug("Premature abort during blocks processing")
  1553  			break
  1554  		}
  1555  		// If the header is a banned one, straight out abort
  1556  		if BadHashes[block.Hash()] {
  1557  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1558  			return it.index, events, coalescedLogs, ErrBlacklistedHash
  1559  		}
  1560  		// If the block is known (in the middle of the chain), it's a special case for
  1561  		// Clique blocks where they can share state among each other, so importing an
  1562  		// older block might complete the state of the subsequent one. In this case,
  1563  		// just skip the block (we already validated it once fully (and crashed), since
  1564  		// its header and body was already in the database).
  1565  		if err == ErrKnownBlock {
  1566  			logger := log.Debug
  1567  			if bc.chainConfig.Clique == nil {
  1568  				logger = log.Warn
  1569  			}
  1570  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1571  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1572  				"root", block.Root())
  1573  
  1574  			if err := bc.writeKnownBlock(block); err != nil {
  1575  				return it.index, nil, nil, err
  1576  			}
  1577  			stats.processed++
  1578  
  1579  			// We can assume that logs are empty here, since the only way for consecutive
  1580  			// Clique blocks to have the same state is if there are no transactions.
  1581  			events = append(events, ChainEvent{block, block.Hash(), nil})
  1582  			lastCanon = block
  1583  
  1584  			continue
  1585  		}
  1586  		// Retrieve the parent block and it's state to execute on top
  1587  		start := time.Now()
  1588  
  1589  		parent := it.previous()
  1590  		if parent == nil {
  1591  			parent = bc.Gbtpeader(block.ParentHash(), block.NumberU64()-1)
  1592  		}
  1593  		statedb, err := state.New(parent.Root, bc.stateCache)
  1594  		if err != nil {
  1595  			return it.index, events, coalescedLogs, err
  1596  		}
  1597  		// If we have a followup block, run that against the current state to pre-cache
  1598  		// transactions and probabilistically some of the account/storage trie nodes.
  1599  		var followupInterrupt uint32
  1600  
  1601  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1602  			if followup, err := it.peek(); followup != nil && err == nil {
  1603  				go func(start time.Time) {
  1604  					throwaway, _ := state.New(parent.Root, bc.stateCache)
  1605  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1606  
  1607  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1608  					if atomic.LoadUint32(&followupInterrupt) == 1 {
  1609  						blockPrefetchInterruptMeter.Mark(1)
  1610  					}
  1611  				}(time.Now())
  1612  			}
  1613  		}
  1614  		// Process block using the parent state as reference point
  1615  		substart := time.Now()
  1616  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1617  		if err != nil {
  1618  			bc.reportBlock(block, receipts, err)
  1619  			atomic.StoreUint32(&followupInterrupt, 1)
  1620  			return it.index, events, coalescedLogs, err
  1621  		}
  1622  		// Update the metrics touched during block processing
  1623  		accountReadTimer.Update(statedb.AccountReads)     // Account reads are complete, we can mark them
  1624  		storageReadTimer.Update(statedb.StorageReads)     // Storage reads are complete, we can mark them
  1625  		accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
  1626  		storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
  1627  
  1628  		triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
  1629  		trieproc := statedb.AccountReads + statedb.AccountUpdates
  1630  		trieproc += statedb.StorageReads + statedb.StorageUpdates
  1631  
  1632  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1633  
  1634  		// Validate the state using the default validator
  1635  		substart = time.Now()
  1636  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1637  			bc.reportBlock(block, receipts, err)
  1638  			atomic.StoreUint32(&followupInterrupt, 1)
  1639  			return it.index, events, coalescedLogs, err
  1640  		}
  1641  		proctime := time.Since(start)
  1642  
  1643  		// Update the metrics touched during block validation
  1644  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1645  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1646  
  1647  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1648  
  1649  		// Write the block to the chain and get the status.
  1650  		substart = time.Now()
  1651  		status, err := bc.writeBlockWithState(block, receipts, statedb)
  1652  		if err != nil {
  1653  			atomic.StoreUint32(&followupInterrupt, 1)
  1654  			return it.index, events, coalescedLogs, err
  1655  		}
  1656  		atomic.StoreUint32(&followupInterrupt, 1)
  1657  
  1658  		// Update the metrics touched during block commit
  1659  		accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
  1660  		storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
  1661  
  1662  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
  1663  		blockInsertTimer.UpdateSince(start)
  1664  
  1665  		switch status {
  1666  		case CanonStatTy:
  1667  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1668  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1669  				"elapsed", common.PrettyDuration(time.Since(start)),
  1670  				"root", block.Root())
  1671  
  1672  			coalescedLogs = append(coalescedLogs, logs...)
  1673  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1674  			lastCanon = block
  1675  
  1676  			// Only count canonical blocks for GC processing time
  1677  			bc.gcproc += proctime
  1678  
  1679  		case SideStatTy:
  1680  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1681  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1682  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1683  				"root", block.Root())
  1684  			events = append(events, ChainSideEvent{block})
  1685  
  1686  		default:
  1687  			// This in theory is impossible, but lets be nice to our future selves and leave
  1688  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1689  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1690  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1691  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1692  				"root", block.Root())
  1693  		}
  1694  		stats.processed++
  1695  		stats.usedGas += usedGas
  1696  
  1697  		dirty, _ := bc.stateCache.TrieDB().Size()
  1698  		stats.report(chain, it.index, dirty)
  1699  	}
  1700  	// Any blocks remaining here? The only ones we care about are the future ones
  1701  	if block != nil && err == consensus.ErrFutureBlock {
  1702  		if err := bc.addFutureBlock(block); err != nil {
  1703  			return it.index, events, coalescedLogs, err
  1704  		}
  1705  		block, err = it.next()
  1706  
  1707  		for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1708  			if err := bc.addFutureBlock(block); err != nil {
  1709  				return it.index, events, coalescedLogs, err
  1710  			}
  1711  			stats.queued++
  1712  		}
  1713  	}
  1714  	stats.ignored += it.remaining()
  1715  
  1716  	// Append a single chain head event if we've progressed the chain
  1717  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1718  		events = append(events, ChainHeadEvent{lastCanon})
  1719  	}
  1720  	return it.index, events, coalescedLogs, err
  1721  }
  1722  
  1723  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1724  // error, which happens when a sidechain with a sufficiently old fork-block is
  1725  // found.
  1726  //
  1727  // The mbtpod writes all (header-and-body-valid) blocks to disk, then tries to
  1728  // switch over to the new chain if the TD exceeded the current chain.
  1729  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1730  	var (
  1731  		externTd *big.Int
  1732  		current  = bc.CurrentBlock()
  1733  	)
  1734  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1735  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1736  	// ones. Any other errors means that the block is invalid, and should not be written
  1737  	// to disk.
  1738  	err := consensus.ErrPrunedAncestor
  1739  	for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1740  		// Check the canonical state root for that number
  1741  		if number := block.NumberU64(); current.NumberU64() >= number {
  1742  			canonical := bc.GetBlockByNumber(number)
  1743  			if canonical != nil && canonical.Hash() == block.Hash() {
  1744  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1745  				continue
  1746  			}
  1747  			if canonical != nil && canonical.Root() == block.Root() {
  1748  				// This is most likely a shadow-state attack. When a fork is imported into the
  1749  				// database, and it eventually reaches a block height which is not pruned, we
  1750  				// just found that the state already exist! This means that the sidechain block
  1751  				// refers to a state which already exists in our canon chain.
  1752  				//
  1753  				// If left unchecked, we would now proceed importing the blocks, without actually
  1754  				// having verified the state of the previous blocks.
  1755  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1756  
  1757  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1758  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1759  				// mechanism.
  1760  				return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1761  			}
  1762  		}
  1763  		if externTd == nil {
  1764  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1765  		}
  1766  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1767  
  1768  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1769  			start := time.Now()
  1770  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  1771  				return it.index, nil, nil, err
  1772  			}
  1773  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1774  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1775  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1776  				"root", block.Root())
  1777  		}
  1778  	}
  1779  	// At this point, we've written all sidechain blocks to database. Loop ended
  1780  	// either on some other error or all were processed. If there was some other
  1781  	// error, we can ignore the rest of those blocks.
  1782  	//
  1783  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1784  	// blocks to regenerate the required state
  1785  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1786  	if localTd.Cmp(externTd) > 0 {
  1787  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1788  		return it.index, nil, nil, err
  1789  	}
  1790  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1791  	var (
  1792  		hashes  []common.Hash
  1793  		numbers []uint64
  1794  	)
  1795  	parent := it.previous()
  1796  	for parent != nil && !bc.HasState(parent.Root) {
  1797  		hashes = append(hashes, parent.Hash())
  1798  		numbers = append(numbers, parent.Number.Uint64())
  1799  
  1800  		parent = bc.Gbtpeader(parent.ParentHash, parent.Number.Uint64()-1)
  1801  	}
  1802  	if parent == nil {
  1803  		return it.index, nil, nil, errors.New("missing parent")
  1804  	}
  1805  	// Import all the pruned blocks to make the state available
  1806  	var (
  1807  		blocks []*types.Block
  1808  		memory common.StorageSize
  1809  	)
  1810  	for i := len(hashes) - 1; i >= 0; i-- {
  1811  		// Append the next block to our batch
  1812  		block := bc.GetBlock(hashes[i], numbers[i])
  1813  
  1814  		blocks = append(blocks, block)
  1815  		memory += block.Size()
  1816  
  1817  		// If memory use grew too large, import and continue. Sadly we need to discard
  1818  		// all raised events and logs from notifications since we're too heavy on the
  1819  		// memory here.
  1820  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1821  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1822  			if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1823  				return 0, nil, nil, err
  1824  			}
  1825  			blocks, memory = blocks[:0], 0
  1826  
  1827  			// If the chain is terminating, stop processing blocks
  1828  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1829  				log.Debug("Premature abort during blocks processing")
  1830  				return 0, nil, nil, nil
  1831  			}
  1832  		}
  1833  	}
  1834  	if len(blocks) > 0 {
  1835  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1836  		return bc.insertChain(blocks, false)
  1837  	}
  1838  	return 0, nil, nil, nil
  1839  }
  1840  
  1841  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1842  // blocks and inserts them to be part of the new canonical chain and accumulates
  1843  // potential missing transactions and post an event about them.
  1844  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1845  	var (
  1846  		newChain    types.Blocks
  1847  		oldChain    types.Blocks
  1848  		commonBlock *types.Block
  1849  
  1850  		deletedTxs types.Transactions
  1851  		addedTxs   types.Transactions
  1852  
  1853  		deletedLogs []*types.Log
  1854  		rebirthLogs []*types.Log
  1855  
  1856  		// collectLogs collects the logs that were generated during the
  1857  		// processing of the block that corresponds with the given hash.
  1858  		// These logs are later announced as deleted or reborn
  1859  		collectLogs = func(hash common.Hash, removed bool) {
  1860  			number := bc.hc.GetBlockNumber(hash)
  1861  			if number == nil {
  1862  				return
  1863  			}
  1864  			receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  1865  			for _, receipt := range receipts {
  1866  				for _, log := range receipt.Logs {
  1867  					l := *log
  1868  					if removed {
  1869  						l.Removed = true
  1870  						deletedLogs = append(deletedLogs, &l)
  1871  					} else {
  1872  						rebirthLogs = append(rebirthLogs, &l)
  1873  					}
  1874  				}
  1875  			}
  1876  		}
  1877  	)
  1878  	// Reduce the longer chain to the same number as the shorter one
  1879  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1880  		// Old chain is longer, gather all transactions and logs as deleted ones
  1881  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1882  			oldChain = append(oldChain, oldBlock)
  1883  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1884  			collectLogs(oldBlock.Hash(), true)
  1885  		}
  1886  	} else {
  1887  		// New chain is longer, stash all blocks away for subsequent insertion
  1888  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1889  			newChain = append(newChain, newBlock)
  1890  		}
  1891  	}
  1892  	if oldBlock == nil {
  1893  		return fmt.Errorf("invalid old chain")
  1894  	}
  1895  	if newBlock == nil {
  1896  		return fmt.Errorf("invalid new chain")
  1897  	}
  1898  	// Both sides of the reorg are at the same number, reduce both until the common
  1899  	// ancestor is found
  1900  	for {
  1901  		// If the common ancestor was found, bail out
  1902  		if oldBlock.Hash() == newBlock.Hash() {
  1903  			commonBlock = oldBlock
  1904  			break
  1905  		}
  1906  		// Remove an old block as well as stash away a new block
  1907  		oldChain = append(oldChain, oldBlock)
  1908  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1909  		collectLogs(oldBlock.Hash(), true)
  1910  
  1911  		newChain = append(newChain, newBlock)
  1912  
  1913  		// Step back with both chains
  1914  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1915  		if oldBlock == nil {
  1916  			return fmt.Errorf("invalid old chain")
  1917  		}
  1918  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1919  		if newBlock == nil {
  1920  			return fmt.Errorf("invalid new chain")
  1921  		}
  1922  	}
  1923  	// Ensure the user sees large reorgs
  1924  	if len(oldChain) > 0 && len(newChain) > 0 {
  1925  		logFn := log.Debug
  1926  		if len(oldChain) > 63 {
  1927  			logFn = log.Warn
  1928  		}
  1929  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1930  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1931  	} else {
  1932  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1933  	}
  1934  	// Insert the new chain(except the head block(reverse order)),
  1935  	// taking care of the proper incremental order.
  1936  	for i := len(newChain) - 1; i >= 1; i-- {
  1937  		// Insert the block in the canonical way, re-writing history
  1938  		bc.insert(newChain[i])
  1939  
  1940  		// Collect reborn logs due to chain reorg
  1941  		collectLogs(newChain[i].Hash(), false)
  1942  
  1943  		// Write lookup entries for hash based transaction/receipt searches
  1944  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1945  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1946  	}
  1947  	// When transactions get deleted from the database, the receipts that were
  1948  	// created in the fork must also be deleted
  1949  	batch := bc.db.NewBatch()
  1950  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  1951  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1952  	}
  1953  	// Delete any canonical number assignments above the new head
  1954  	number := bc.CurrentBlock().NumberU64()
  1955  	for i := number + 1; ; i++ {
  1956  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  1957  		if hash == (common.Hash{}) {
  1958  			break
  1959  		}
  1960  		rawdb.DeleteCanonicalHash(batch, i)
  1961  	}
  1962  	batch.Write()
  1963  	// If any logs need to be fired, do it now. In theory we could avoid creating
  1964  	// this goroutine if there are no events to fire, but realistcally that only
  1965  	// ever happens if we're reorging empty blocks, which will only happen on idle
  1966  	// networks where performance is not an issue either way.
  1967  	//
  1968  	// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
  1969  	// event ordering?
  1970  	go func() {
  1971  		if len(deletedLogs) > 0 {
  1972  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1973  		}
  1974  		if len(rebirthLogs) > 0 {
  1975  			bc.logsFeed.Send(rebirthLogs)
  1976  		}
  1977  		if len(oldChain) > 0 {
  1978  			for _, block := range oldChain {
  1979  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1980  			}
  1981  		}
  1982  	}()
  1983  	return nil
  1984  }
  1985  
  1986  // PostChainEvents iterates over the events generated by a chain insertion and
  1987  // posts them into the event feed.
  1988  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1989  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1990  	// post event logs for further processing
  1991  	if logs != nil {
  1992  		bc.logsFeed.Send(logs)
  1993  	}
  1994  	for _, event := range events {
  1995  		switch ev := event.(type) {
  1996  		case ChainEvent:
  1997  			bc.chainFeed.Send(ev)
  1998  
  1999  		case ChainHeadEvent:
  2000  			bc.chainHeadFeed.Send(ev)
  2001  
  2002  		case ChainSideEvent:
  2003  			bc.chainSideFeed.Send(ev)
  2004  		}
  2005  	}
  2006  }
  2007  
  2008  func (bc *BlockChain) update() {
  2009  	futureTimer := time.NewTicker(5 * time.Second)
  2010  	defer futureTimer.Stop()
  2011  	for {
  2012  		select {
  2013  		case <-futureTimer.C:
  2014  			bc.procFutureBlocks()
  2015  		case <-bc.quit:
  2016  			return
  2017  		}
  2018  	}
  2019  }
  2020  
  2021  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  2022  func (bc *BlockChain) BadBlocks() []*types.Block {
  2023  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  2024  	for _, hash := range bc.badBlocks.Keys() {
  2025  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  2026  			block := blk.(*types.Block)
  2027  			blocks = append(blocks, block)
  2028  		}
  2029  	}
  2030  	return blocks
  2031  }
  2032  
  2033  // addBadBlock adds a bad block to the bad-block LRU cache
  2034  func (bc *BlockChain) addBadBlock(block *types.Block) {
  2035  	bc.badBlocks.Add(block.Hash(), block)
  2036  }
  2037  
  2038  // reportBlock logs a bad block error.
  2039  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2040  	bc.addBadBlock(block)
  2041  
  2042  	var receiptString string
  2043  	for i, receipt := range receipts {
  2044  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  2045  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2046  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2047  	}
  2048  	log.Error(fmt.Sprintf(`
  2049  ########## BAD BLOCK #########
  2050  Chain config: %v
  2051  
  2052  Number: %v
  2053  Hash: 0x%x
  2054  %v
  2055  
  2056  Error: %v
  2057  ##############################
  2058  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  2059  }
  2060  
  2061  // InsertHeaderChain attempts to insert the given header chain in to the local
  2062  // chain, possibly creating a reorg. If an error is returned, it will return the
  2063  // index number of the failing header as well an error describing what went wrong.
  2064  //
  2065  // The verify parameter can be used to fine tune whbtper nonce verification
  2066  // should be done or not. The reason behind the optional check is because some
  2067  // of the header retrieval mechanisms already need to verify nonces, as well as
  2068  // because nonces can be verified sparsely, not needing to check each.
  2069  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2070  	start := time.Now()
  2071  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2072  		return i, err
  2073  	}
  2074  
  2075  	// Make sure only one thread manipulates the chain at once
  2076  	bc.chainmu.Lock()
  2077  	defer bc.chainmu.Unlock()
  2078  
  2079  	bc.wg.Add(1)
  2080  	defer bc.wg.Done()
  2081  
  2082  	whFunc := func(header *types.Header) error {
  2083  		_, err := bc.hc.WriteHeader(header)
  2084  		return err
  2085  	}
  2086  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  2087  }
  2088  
  2089  // CurrentHeader retrieves the current head header of the canonical chain. The
  2090  // header is retrieved from the HeaderChain's internal cache.
  2091  func (bc *BlockChain) CurrentHeader() *types.Header {
  2092  	return bc.hc.CurrentHeader()
  2093  }
  2094  
  2095  // GetTd retrieves a block's total difficulty in the canonical chain from the
  2096  // database by hash and number, caching it if found.
  2097  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  2098  	return bc.hc.GetTd(hash, number)
  2099  }
  2100  
  2101  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  2102  // database by hash, caching it if found.
  2103  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  2104  	return bc.hc.GetTdByHash(hash)
  2105  }
  2106  
  2107  // Gbtpeader retrieves a block header from the database by hash and number,
  2108  // caching it if found.
  2109  func (bc *BlockChain) Gbtpeader(hash common.Hash, number uint64) *types.Header {
  2110  	return bc.hc.Gbtpeader(hash, number)
  2111  }
  2112  
  2113  // GbtpeaderByHash retrieves a block header from the database by hash, caching it if
  2114  // found.
  2115  func (bc *BlockChain) GbtpeaderByHash(hash common.Hash) *types.Header {
  2116  	return bc.hc.GbtpeaderByHash(hash)
  2117  }
  2118  
  2119  // HasHeader checks if a block header is present in the database or not, caching
  2120  // it if present.
  2121  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  2122  	return bc.hc.HasHeader(hash, number)
  2123  }
  2124  
  2125  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  2126  // hash, fetching towards the genesis block.
  2127  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  2128  	return bc.hc.GetBlockHashesFromHash(hash, max)
  2129  }
  2130  
  2131  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  2132  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  2133  // number of blocks to be individually checked before we reach the canonical chain.
  2134  //
  2135  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  2136  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  2137  	bc.chainmu.RLock()
  2138  	defer bc.chainmu.RUnlock()
  2139  
  2140  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  2141  }
  2142  
  2143  // GbtpeaderByNumber retrieves a block header from the database by number,
  2144  // caching it (associated with its hash) if found.
  2145  func (bc *BlockChain) GbtpeaderByNumber(number uint64) *types.Header {
  2146  	return bc.hc.GbtpeaderByNumber(number)
  2147  }
  2148  
  2149  // Config retrieves the chain's fork configuration.
  2150  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  2151  
  2152  // Engine retrieves the blockchain's consensus engine.
  2153  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  2154  
  2155  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  2156  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  2157  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  2158  }
  2159  
  2160  // SubscribeChainEvent registers a subscription of ChainEvent.
  2161  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  2162  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  2163  }
  2164  
  2165  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  2166  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  2167  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  2168  }
  2169  
  2170  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  2171  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  2172  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  2173  }
  2174  
  2175  // SubscribeLogsEvent registers a subscription of []*types.Log.
  2176  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  2177  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  2178  }
  2179  
  2180  // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  2181  // block processing has started while false means it has stopped.
  2182  func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  2183  	return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  2184  }