github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math"
    25  	"math/big"
    26  	"runtime"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"github.com/tacshi/go-ethereum/common"
    34  	"github.com/tacshi/go-ethereum/common/lru"
    35  	"github.com/tacshi/go-ethereum/common/mclock"
    36  	"github.com/tacshi/go-ethereum/common/prque"
    37  	"github.com/tacshi/go-ethereum/consensus"
    38  	"github.com/tacshi/go-ethereum/core/rawdb"
    39  	"github.com/tacshi/go-ethereum/core/state"
    40  	"github.com/tacshi/go-ethereum/core/state/snapshot"
    41  	"github.com/tacshi/go-ethereum/core/types"
    42  	"github.com/tacshi/go-ethereum/core/vm"
    43  	"github.com/tacshi/go-ethereum/ethdb"
    44  	"github.com/tacshi/go-ethereum/event"
    45  	"github.com/tacshi/go-ethereum/internal/syncx"
    46  	"github.com/tacshi/go-ethereum/internal/version"
    47  	"github.com/tacshi/go-ethereum/log"
    48  	"github.com/tacshi/go-ethereum/metrics"
    49  	"github.com/tacshi/go-ethereum/params"
    50  	"github.com/tacshi/go-ethereum/rlp"
    51  	"github.com/tacshi/go-ethereum/trie"
    52  )
    53  
    54  var (
    55  	headBlockGauge          = metrics.NewRegisteredGauge("chain/head/block", nil)
    56  	headHeaderGauge         = metrics.NewRegisteredGauge("chain/head/header", nil)
    57  	headFastBlockGauge      = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    58  	headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil)
    59  	headSafeBlockGauge      = metrics.NewRegisteredGauge("chain/head/safe", nil)
    60  
    61  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    62  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    63  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    64  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    65  
    66  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    67  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    68  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    69  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    70  
    71  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
    72  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
    73  	snapshotCommitTimer      = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
    74  
    75  	triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)
    76  
    77  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    78  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    79  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    80  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    81  
    82  	blockReorgMeter     = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
    83  	blockReorgAddMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    84  	blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    85  
    86  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    87  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    88  
    89  	errInsertionInterrupted = errors.New("insertion is interrupted")
    90  	errChainStopped         = errors.New("blockchain is stopped")
    91  )
    92  
    93  const (
    94  	bodyCacheLimit       = 256
    95  	blockCacheLimit      = 256
    96  	receiptsCacheLimit   = 32
    97  	txLookupCacheLimit   = 1024
    98  	maxFutureBlocks      = 256
    99  	maxTimeFutureBlocks  = 30
   100  	DefaultTriesInMemory = 128
   101  
   102  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   103  	//
   104  	// Changelog:
   105  	//
   106  	// - Version 4
   107  	//   The following incompatible database changes were added:
   108  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   109  	//   * the `Bloom` field of receipt is deleted
   110  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   111  	// - Version 5
   112  	//  The following incompatible database changes were added:
   113  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   114  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   115  	//      receipts' corresponding block
   116  	// - Version 6
   117  	//  The following incompatible database changes were added:
   118  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   119  	// - Version 7
   120  	//  The following incompatible database changes were added:
   121  	//    * Use freezer as the ancient database to maintain all ancient data
   122  	// - Version 8
   123  	//  The following incompatible database changes were added:
   124  	//    * New scheme for contract code in order to separate the codes and trie nodes
   125  	BlockChainVersion uint64 = 8
   126  )
   127  
   128  // CacheConfig contains the configuration values for the trie database
   129  // that's resident in a blockchain.
   130  type CacheConfig struct {
   131  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   132  	TrieCleanJournal    string        // Disk journal for saving clean cache entries.
   133  	TrieCleanRejournal  time.Duration // Time interval to dump clean cache to disk periodically
   134  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   135  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   136  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   137  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   138  	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
   139  	Preimages           bool          // Whether to store preimage of trie key to the disk
   140  
   141  	SnapshotRestoreMaxGas uint64 // Rollback up to this much gas to restore snapshot (otherwise snapshot recalculated from nothing)
   142  
   143  	// Arbitrum: configure GC window
   144  	TriesInMemory uint64        // Height difference before which a trie may not be garbage-collected
   145  	TrieRetention time.Duration // Time limit before which a trie may not be garbage-collected
   146  
   147  	SnapshotNoBuild bool // Whether the background generation is allowed
   148  	SnapshotWait    bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   149  }
   150  
   151  // defaultCacheConfig are the default caching values if none are specified by the
   152  // user (also used during testing).
   153  var defaultCacheConfig = &CacheConfig{
   154  
   155  	// Arbitrum Config Options
   156  	TriesInMemory: DefaultTriesInMemory,
   157  	TrieRetention: 30 * time.Minute,
   158  
   159  	TrieCleanLimit: 256,
   160  	TrieDirtyLimit: 256,
   161  	TrieTimeLimit:  5 * time.Minute,
   162  	SnapshotLimit:  256,
   163  	SnapshotWait:   true,
   164  }
   165  
   166  // BlockChain represents the canonical chain given a database with a genesis
   167  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   168  //
   169  // Importing blocks in to the block chain happens according to the set of rules
   170  // defined by the two stage Validator. Processing of blocks is done using the
   171  // Processor which processes the included transaction. The validation of the state
   172  // is done in the second part of the Validator. Failing results in aborting of
   173  // the import.
   174  //
   175  // The BlockChain also helps in returning blocks from **any** chain included
   176  // in the database as well as blocks that represents the canonical chain. It's
   177  // important to note that GetBlock can return any block and does not need to be
   178  // included in the canonical one where as GetBlockByNumber always represents the
   179  // canonical chain.
   180  type BlockChain struct {
   181  	chainConfig *params.ChainConfig // Chain & network configuration
   182  	cacheConfig *CacheConfig        // Cache configuration for pruning
   183  
   184  	db            ethdb.Database                   // Low level persistent database to store final content in
   185  	snaps         *snapshot.Tree                   // Snapshot tree for fast trie leaf access
   186  	triegc        *prque.Prque[int64, trieGcEntry] // Priority queue mapping block numbers to tries to gc
   187  	gcproc        time.Duration                    // Accumulates canonical block processing for trie dumping
   188  	lastWrite     uint64                           // Last block when the state was flushed
   189  	flushInterval int64                            // Time interval (processing time) after which to flush a state
   190  	triedb        *trie.Database                   // The database handler for maintaining trie nodes.
   191  	stateCache    state.Database                   // State database to reuse between imports (contains state cache)
   192  
   193  	// txLookupLimit is the maximum number of blocks from head whose tx indices
   194  	// are reserved:
   195  	//  * 0:   means no limit and regenerate any missing indexes
   196  	//  * N:   means N block limit [HEAD-N+1, HEAD] and delete extra indexes
   197  	//  * nil: disable tx reindexer/deleter, but still index new blocks
   198  	txLookupLimit uint64
   199  
   200  	hc            *HeaderChain
   201  	rmLogsFeed    event.Feed
   202  	chainFeed     event.Feed
   203  	chainSideFeed event.Feed
   204  	chainHeadFeed event.Feed
   205  	logsFeed      event.Feed
   206  	blockProcFeed event.Feed
   207  	scope         event.SubscriptionScope
   208  	genesisBlock  *types.Block
   209  
   210  	// This mutex synchronizes chain write operations.
   211  	// Readers don't need to take it, they can just read the database.
   212  	chainmu *syncx.ClosableMutex
   213  
   214  	currentBlock      atomic.Pointer[types.Header] // Current head of the chain
   215  	currentSnapBlock  atomic.Pointer[types.Header] // Current head of snap-sync
   216  	currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
   217  	currentSafeBlock  atomic.Pointer[types.Header] // Latest (consensus) safe block
   218  
   219  	bodyCache     *lru.Cache[common.Hash, *types.Body]
   220  	bodyRLPCache  *lru.Cache[common.Hash, rlp.RawValue]
   221  	receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
   222  	blockCache    *lru.Cache[common.Hash, *types.Block]
   223  	txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry]
   224  
   225  	// future blocks are blocks added for later processing
   226  	futureBlocks *lru.Cache[common.Hash, *types.Block]
   227  
   228  	wg            sync.WaitGroup //
   229  	quit          chan struct{}  // shutdown signal, closed in Stop.
   230  	running       int32          // 0 if chain is running, 1 when stopped
   231  	procInterrupt int32          // interrupt signaler for block processing
   232  
   233  	engine     consensus.Engine
   234  	validator  Validator // Block and state validator interface
   235  	prefetcher Prefetcher
   236  	processor  Processor // Block transaction processor interface
   237  	forker     *ForkChoice
   238  	vmConfig   vm.Config
   239  }
   240  
   241  type trieGcEntry struct {
   242  	Root      common.Hash
   243  	Timestamp uint64
   244  }
   245  
   246  // NewBlockChain returns a fully initialised block chain using information
   247  // available in the database. It initialises the default Ethereum Validator
   248  // and Processor.
   249  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
   250  	if cacheConfig == nil {
   251  		cacheConfig = defaultCacheConfig
   252  	}
   253  
   254  	// Open trie database with provided config
   255  	triedb := trie.NewDatabaseWithConfig(db, &trie.Config{
   256  		Cache:     cacheConfig.TrieCleanLimit,
   257  		Journal:   cacheConfig.TrieCleanJournal,
   258  		Preimages: cacheConfig.Preimages,
   259  	})
   260  
   261  	var genesisHash common.Hash
   262  	var genesisErr error
   263  
   264  	if chainConfig != nil && chainConfig.IsArbitrum() {
   265  		genesisHash = rawdb.ReadCanonicalHash(db, chainConfig.ArbitrumChainParams.GenesisBlockNum)
   266  		if genesisHash == (common.Hash{}) {
   267  			return nil, ErrNoGenesis
   268  		}
   269  	} else {
   270  		// Setup the genesis block, commit the provided genesis specification
   271  		// to database if the genesis block is not present yet, or load the
   272  		// stored one from database.
   273  		chainConfig, genesisHash, genesisErr = SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
   274  		if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
   275  			return nil, genesisErr
   276  		}
   277  	}
   278  	log.Info("")
   279  	log.Info(strings.Repeat("-", 153))
   280  	for _, line := range strings.Split(chainConfig.Description(), "\n") {
   281  		log.Info(line)
   282  	}
   283  	log.Info(strings.Repeat("-", 153))
   284  	log.Info("")
   285  
   286  	bc := &BlockChain{
   287  		chainConfig:   chainConfig,
   288  		cacheConfig:   cacheConfig,
   289  		db:            db,
   290  		triedb:        triedb,
   291  		flushInterval: int64(cacheConfig.TrieTimeLimit),
   292  		triegc:        prque.New[int64, trieGcEntry](nil),
   293  		quit:          make(chan struct{}),
   294  		chainmu:       syncx.NewClosableMutex(),
   295  		bodyCache:     lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
   296  		bodyRLPCache:  lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
   297  		receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
   298  		blockCache:    lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
   299  		txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit),
   300  		futureBlocks:  lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
   301  		engine:        engine,
   302  		vmConfig:      vmConfig,
   303  	}
   304  	bc.forker = NewForkChoice(bc, shouldPreserve)
   305  	bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
   306  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   307  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   308  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   309  
   310  	var err error
   311  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   312  	if err != nil {
   313  		return nil, err
   314  	}
   315  	if chainConfig.IsArbitrum() {
   316  		bc.genesisBlock = bc.GetBlockByNumber(chainConfig.ArbitrumChainParams.GenesisBlockNum)
   317  	} else {
   318  		bc.genesisBlock = bc.GetBlockByNumber(0)
   319  	}
   320  	if bc.genesisBlock == nil {
   321  		return nil, ErrNoGenesis
   322  	}
   323  
   324  	bc.currentBlock.Store(nil)
   325  	bc.currentSnapBlock.Store(nil)
   326  	bc.currentFinalBlock.Store(nil)
   327  	bc.currentSafeBlock.Store(nil)
   328  
   329  	// If Geth is initialized with an external ancient store, re-initialize the
   330  	// missing chain indexes and chain flags. This procedure can survive crash
   331  	// and can be resumed in next restart since chain flags are updated in last step.
   332  	if bc.empty() {
   333  		rawdb.InitDatabaseFromFreezer(bc.db)
   334  	}
   335  	// Load blockchain states from disk
   336  	if err := bc.loadLastState(); err != nil {
   337  		return nil, err
   338  	}
   339  	// Make sure the state associated with the block is available
   340  	head := bc.CurrentBlock()
   341  	if !bc.HasState(head.Root) {
   342  		// Head state is missing, before the state recovery, find out the
   343  		// disk layer point of snapshot(if it's enabled). Make sure the
   344  		// rewound point is lower than disk layer.
   345  		var diskRoot common.Hash
   346  		if bc.cacheConfig.SnapshotLimit > 0 {
   347  			diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   348  		}
   349  		if diskRoot != (common.Hash{}) {
   350  			log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
   351  
   352  			snapDisk, diskRootFound, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true, bc.cacheConfig.SnapshotRestoreMaxGas)
   353  			if err != nil {
   354  				return nil, err
   355  			}
   356  			// Chain rewound, persist old snapshot number to indicate recovery procedure
   357  			if diskRootFound {
   358  				rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   359  			} else {
   360  				log.Warn("Snapshot root not found or too far back. Recreating snapshot from scratch.")
   361  				rawdb.DeleteSnapshotRecoveryNumber(bc.db)
   362  			}
   363  		} else {
   364  			log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
   365  			if _, _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true, 0); err != nil {
   366  				return nil, err
   367  			}
   368  		}
   369  	}
   370  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   371  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   372  		var (
   373  			needRewind bool
   374  			low        uint64
   375  		)
   376  		// The head full block may be rolled back to a very low height due to
   377  		// blockchain repair. If the head full block is even lower than the ancient
   378  		// chain, truncate the ancient store.
   379  		fullBlock := bc.CurrentBlock()
   380  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
   381  			needRewind = true
   382  			low = fullBlock.Number.Uint64()
   383  		}
   384  		// In fast sync, it may happen that ancient data has been written to the
   385  		// ancient store, but the LastFastBlock has not been updated, truncate the
   386  		// extra data here.
   387  		snapBlock := bc.CurrentSnapBlock()
   388  		if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
   389  			needRewind = true
   390  			if snapBlock.Number.Uint64() < low || low == 0 {
   391  				low = snapBlock.Number.Uint64()
   392  			}
   393  		}
   394  		if needRewind {
   395  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   396  			if err := bc.SetHead(low); err != nil {
   397  				return nil, err
   398  			}
   399  		}
   400  	}
   401  	// The first thing the node will do is reconstruct the verification data for
   402  	// the head block (ethash cache or clique voting snapshot). Might as well do
   403  	// it in advance.
   404  	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   405  
   406  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   407  	for hash := range BadHashes {
   408  		if header := bc.GetHeaderByHash(hash); header != nil {
   409  			// get the canonical block corresponding to the offending header's number
   410  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   411  			// make sure the headerByNumber (if present) is in our current canonical chain
   412  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   413  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   414  				if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
   415  					return nil, err
   416  				}
   417  				log.Error("Chain rewind was successful, resuming normal operation")
   418  			}
   419  		}
   420  	}
   421  
   422  	// Load any existing snapshot, regenerating it if loading failed
   423  	if bc.cacheConfig.SnapshotLimit > 0 {
   424  		// If the chain was rewound past the snapshot persistent layer (causing
   425  		// a recovery block number to be persisted to disk), check if we're still
   426  		// in recovery mode and in that case, don't invalidate the snapshot on a
   427  		// head mismatch.
   428  		var recover bool
   429  
   430  		head := bc.CurrentBlock()
   431  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
   432  			log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
   433  			recover = true
   434  		}
   435  		snapconfig := snapshot.Config{
   436  			CacheSize:  bc.cacheConfig.SnapshotLimit,
   437  			Recovery:   recover,
   438  			NoBuild:    bc.cacheConfig.SnapshotNoBuild,
   439  			AsyncBuild: !bc.cacheConfig.SnapshotWait,
   440  		}
   441  		bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
   442  	}
   443  
   444  	// Start future block processor.
   445  	bc.wg.Add(1)
   446  	go bc.updateFutureBlocks()
   447  
   448  	// If periodic cache journal is required, spin it up.
   449  	if bc.cacheConfig.TrieCleanRejournal > 0 {
   450  		if bc.cacheConfig.TrieCleanRejournal < time.Minute {
   451  			log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
   452  			bc.cacheConfig.TrieCleanRejournal = time.Minute
   453  		}
   454  		bc.wg.Add(1)
   455  		go func() {
   456  			defer bc.wg.Done()
   457  			bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
   458  		}()
   459  	}
   460  	// Rewind the chain in case of an incompatible config upgrade.
   461  	if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
   462  		log.Warn("Rewinding chain to upgrade configuration", "err", compat)
   463  		if compat.RewindToTime > 0 {
   464  			bc.SetHeadWithTimestamp(compat.RewindToTime)
   465  		} else {
   466  			bc.SetHead(compat.RewindToBlock)
   467  		}
   468  		rawdb.WriteChainConfig(db, genesisHash, chainConfig)
   469  	}
   470  	// Start tx indexer/unindexer if required.
   471  	if txLookupLimit != nil {
   472  		bc.txLookupLimit = *txLookupLimit
   473  
   474  		bc.wg.Add(1)
   475  		go bc.maintainTxIndex()
   476  	}
   477  	return bc, nil
   478  }
   479  
   480  // empty returns an indicator whether the blockchain is empty.
   481  // Note, it's a special case that we connect a non-empty ancient
   482  // database with an empty node, so that we can plugin the ancient
   483  // into node seamlessly.
   484  func (bc *BlockChain) empty() bool {
   485  	genesis := bc.genesisBlock.Hash()
   486  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   487  		if hash != genesis {
   488  			return false
   489  		}
   490  	}
   491  	return true
   492  }
   493  
   494  // loadLastState loads the last known chain state from the database. This method
   495  // assumes that the chain manager mutex is held.
   496  func (bc *BlockChain) loadLastState() error {
   497  	// Restore the last known head block
   498  	head := rawdb.ReadHeadBlockHash(bc.db)
   499  	if head == (common.Hash{}) {
   500  		// Corrupt or empty database, init from scratch
   501  		log.Warn("Empty database, resetting chain")
   502  		return bc.Reset()
   503  	}
   504  	// Make sure the entire head block is available
   505  	headBlock := bc.GetBlockByHash(head)
   506  	if headBlock == nil {
   507  		// Corrupt or empty database, init from scratch
   508  		log.Warn("Head block missing, resetting chain", "hash", head)
   509  		return bc.Reset()
   510  	}
   511  	// Everything seems to be fine, set as the head block
   512  	bc.currentBlock.Store(headBlock.Header())
   513  	headBlockGauge.Update(int64(headBlock.NumberU64()))
   514  
   515  	// Restore the last known head header
   516  	headHeader := headBlock.Header()
   517  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   518  		if header := bc.GetHeaderByHash(head); header != nil {
   519  			headHeader = header
   520  		}
   521  	}
   522  	bc.hc.SetCurrentHeader(headHeader)
   523  
   524  	// Restore the last known head fast block
   525  	bc.currentSnapBlock.Store(headBlock.Header())
   526  	headFastBlockGauge.Update(int64(headBlock.NumberU64()))
   527  
   528  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   529  		if block := bc.GetBlockByHash(head); block != nil {
   530  			bc.currentSnapBlock.Store(block.Header())
   531  			headFastBlockGauge.Update(int64(block.NumberU64()))
   532  		}
   533  	}
   534  
   535  	// Restore the last known finalized block and safe block
   536  	// Note: the safe block is not stored on disk and it is set to the last
   537  	// known finalized block on startup
   538  	if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) {
   539  		if block := bc.GetBlockByHash(head); block != nil {
   540  			bc.currentFinalBlock.Store(block.Header())
   541  			headFinalizedBlockGauge.Update(int64(block.NumberU64()))
   542  			bc.currentSafeBlock.Store(block.Header())
   543  			headSafeBlockGauge.Update(int64(block.NumberU64()))
   544  		}
   545  	}
   546  	// Issue a status log for the user
   547  	var (
   548  		currentSnapBlock  = bc.CurrentSnapBlock()
   549  		currentFinalBlock = bc.CurrentFinalBlock()
   550  
   551  		headerTd = bc.GetTd(headHeader.Hash(), headHeader.Number.Uint64())
   552  		blockTd  = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
   553  	)
   554  	if headHeader.Hash() != headBlock.Hash() {
   555  		log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
   556  	}
   557  	log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
   558  	if headBlock.Hash() != currentSnapBlock.Hash() {
   559  		fastTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64())
   560  		log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
   561  	}
   562  	if currentFinalBlock != nil {
   563  		finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64())
   564  		log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
   565  	}
   566  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   567  		log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
   568  	}
   569  	return nil
   570  }
   571  
   572  // SetHead rewinds the local chain to a new head. Depending on whether the node
   573  // was fast synced or full synced and in which state, the method will try to
   574  // delete minimal data from disk whilst retaining chain consistency.
   575  func (bc *BlockChain) SetHead(head uint64) error {
   576  	if _, _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false, 0); err != nil {
   577  		return err
   578  	}
   579  	// Send chain head event to update the transaction pool
   580  	header := bc.CurrentBlock()
   581  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   582  	if block == nil {
   583  		// This should never happen. In practice, previsouly currentBlock
   584  		// contained the entire block whereas now only a "marker", so there
   585  		// is an ever so slight chance for a race we should handle.
   586  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   587  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   588  	}
   589  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   590  	return nil
   591  }
   592  
   593  // SetHeadWithTimestamp rewinds the local chain to a new head that has at max
   594  // the given timestamp. Depending on whether the node was fast synced or full
   595  // synced and in which state, the method will try to delete minimal data from
   596  // disk whilst retaining chain consistency.
   597  func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
   598  	if _, _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false, 0); err != nil {
   599  		return err
   600  	}
   601  	// Send chain head event to update the transaction pool
   602  	header := bc.CurrentBlock()
   603  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   604  	if block == nil {
   605  		// This should never happen. In practice, previsouly currentBlock
   606  		// contained the entire block whereas now only a "marker", so there
   607  		// is an ever so slight chance for a race we should handle.
   608  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   609  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   610  	}
   611  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   612  	return nil
   613  }
   614  
   615  // SetFinalized sets the finalized block.
   616  func (bc *BlockChain) SetFinalized(header *types.Header) {
   617  	bc.currentFinalBlock.Store(header)
   618  	if header != nil {
   619  		rawdb.WriteFinalizedBlockHash(bc.db, header.Hash())
   620  		headFinalizedBlockGauge.Update(int64(header.Number.Uint64()))
   621  	} else {
   622  		rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{})
   623  		headFinalizedBlockGauge.Update(0)
   624  	}
   625  }
   626  
   627  // SetSafe sets the safe block.
   628  func (bc *BlockChain) SetSafe(header *types.Header) {
   629  	bc.currentSafeBlock.Store(header)
   630  	if header != nil {
   631  		headSafeBlockGauge.Update(int64(header.Number.Uint64()))
   632  	} else {
   633  		headSafeBlockGauge.Update(0)
   634  	}
   635  }
   636  
   637  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   638  // that the rewind must pass the specified state root. The extra condition is
   639  // ignored if it causes rolling back more than rewindLimit Gas (0 meaning infinte).
   640  // If the limit was hit, rewind to last block with state. This method is meant to be
   641  // used when rewinding with snapshots enabled to ensure that we go back further than
   642  // persistent disk layer. Depending on whether the node was fast synced or full, and
   643  // in which state, the method will try to delete minimal data from disk whilst
   644  // retaining chain consistency.
   645  //
   646  // The method also works in timestamp mode if `head == 0` but `time != 0`. In that
   647  // case blocks are rolled back until the new head becomes older or equal to the
   648  // requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
   649  //
   650  // The method returns the block number where the requested root cap was found.
   651  func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool, rewindLimit uint64) (uint64, bool, error) {
   652  	if !bc.chainmu.TryLock() {
   653  		return 0, false, errChainStopped
   654  	}
   655  	defer bc.chainmu.Unlock()
   656  
   657  	// Track the block number of the requested root hash
   658  	var blockNumber uint64 // (no root == always 0)
   659  	var rootFound bool
   660  
   661  	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
   662  	// current freezer limit to start nuking id underflown
   663  	pivot := rawdb.ReadLastPivotNumber(bc.db)
   664  	frozen, _ := bc.db.Ancients()
   665  
   666  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
   667  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   668  		// block. Note, depth equality is permitted to allow using SetHead as a
   669  		// chain reparation mechanism without deleting any data!
   670  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
   671  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   672  			if newHeadBlock == nil {
   673  				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
   674  				newHeadBlock = bc.genesisBlock
   675  			} else {
   676  				// Block exists, keep rewinding until we find one with state,
   677  				// keeping rewinding until we exceed the optional threshold
   678  				// root hash
   679  				rootFound = (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
   680  				lastFullBlock := uint64(0)
   681  				lastFullBlockHash := common.Hash{}
   682  				gasRolledBack := uint64(0)
   683  
   684  				for {
   685  					if rewindLimit > 0 && lastFullBlock != 0 {
   686  						// Arbitrum: track the amount of gas rolled back and stop the rollback early if necessary
   687  						gasUsedInBlock := newHeadBlock.GasUsed()
   688  						if bc.chainConfig.IsArbitrum() {
   689  							receipts := bc.GetReceiptsByHash(newHeadBlock.Hash())
   690  							for _, receipt := range receipts {
   691  								gasUsedInBlock -= receipt.GasUsedForL1
   692  							}
   693  						}
   694  						gasRolledBack += gasUsedInBlock
   695  						if gasRolledBack >= rewindLimit {
   696  							blockNumber = lastFullBlock
   697  							newHeadBlock = bc.GetBlock(lastFullBlockHash, lastFullBlock)
   698  							log.Debug("Rewound to block with state but not snapshot", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   699  							break
   700  						}
   701  					}
   702  
   703  					// If a root threshold was requested but not yet crossed, check
   704  					if root != (common.Hash{}) && !rootFound && newHeadBlock.Root() == root {
   705  						rootFound, blockNumber = true, newHeadBlock.NumberU64()
   706  					}
   707  					if !bc.HasState(newHeadBlock.Root()) {
   708  						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   709  						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
   710  							parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
   711  							if parent != nil {
   712  								newHeadBlock = parent
   713  								continue
   714  							}
   715  							log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
   716  							newHeadBlock = bc.genesisBlock
   717  						} else {
   718  							log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
   719  							newHeadBlock = bc.genesisBlock
   720  						}
   721  					} else if lastFullBlock == 0 {
   722  						lastFullBlock = newHeadBlock.NumberU64()
   723  						lastFullBlockHash = newHeadBlock.Hash()
   724  					}
   725  
   726  					if rootFound || newHeadBlock.NumberU64() <= bc.genesisBlock.NumberU64() {
   727  						if newHeadBlock.NumberU64() <= bc.genesisBlock.NumberU64() {
   728  							// Recommit the genesis state into disk in case the rewinding destination
   729  							// is genesis block and the relevant state is gone. In the future this
   730  							// rewinding destination can be the earliest block stored in the chain
   731  							// if the historical chain pruning is enabled. In that case the logic
   732  							// needs to be improved here.
   733  							if !bc.HasState(bc.genesisBlock.Root()) {
   734  								// Arbitrum: we have a later block with state; use that instead.
   735  								if lastFullBlock != 0 {
   736  									blockNumber = lastFullBlock
   737  									newHeadBlock = bc.GetBlock(lastFullBlockHash, lastFullBlock)
   738  									log.Debug("Rewound to block with state but not snapshot", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   739  									break
   740  								}
   741  								if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
   742  									log.Crit("Failed to commit genesis state", "err", err)
   743  								}
   744  								log.Debug("Recommitted genesis state to disk")
   745  							}
   746  							newHeadBlock = bc.genesisBlock
   747  						}
   748  						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   749  						break
   750  					}
   751  					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
   752  					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
   753  				}
   754  			}
   755  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   756  
   757  			// Degrade the chain markers if they are explicitly reverted.
   758  			// In theory we should update all in-memory markers in the
   759  			// last step, however the direction of SetHead is from high
   760  			// to low, so it's safe to update in-memory markers directly.
   761  			bc.currentBlock.Store(newHeadBlock.Header())
   762  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   763  		}
   764  		// Rewind the fast block in a simpleton way to the target head
   765  		if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
   766  			newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   767  			// If either blocks reached nil, reset to the genesis state
   768  			if newHeadSnapBlock == nil {
   769  				newHeadSnapBlock = bc.genesisBlock
   770  			}
   771  			rawdb.WriteHeadFastBlockHash(db, newHeadSnapBlock.Hash())
   772  
   773  			// Degrade the chain markers if they are explicitly reverted.
   774  			// In theory we should update all in-memory markers in the
   775  			// last step, however the direction of SetHead is from high
   776  			// to low, so it's safe the update in-memory markers directly.
   777  			bc.currentSnapBlock.Store(newHeadSnapBlock.Header())
   778  			headFastBlockGauge.Update(int64(newHeadSnapBlock.NumberU64()))
   779  		}
   780  		var (
   781  			headHeader = bc.CurrentBlock()
   782  			headNumber = headHeader.Number.Uint64()
   783  		)
   784  		// If setHead underflown the freezer threshold and the block processing
   785  		// intent afterwards is full block importing, delete the chain segment
   786  		// between the stateful-block and the sethead target.
   787  		var wipe bool
   788  		if headNumber+1 < frozen {
   789  			wipe = pivot == nil || headNumber >= *pivot
   790  		}
   791  		return headHeader, wipe // Only force wipe if full synced
   792  	}
   793  	// Rewind the header chain, deleting all block bodies until then
   794  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   795  		// Ignore the error here since light client won't hit this path
   796  		frozen, _ := bc.db.Ancients()
   797  		if num+1 <= frozen {
   798  			// Truncate all relative data(header, total difficulty, body, receipt
   799  			// and canonical hash) from ancient store.
   800  			if err := bc.db.TruncateHead(num); err != nil {
   801  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   802  			}
   803  			// Remove the hash <-> number mapping from the active store.
   804  			rawdb.DeleteHeaderNumber(db, hash)
   805  		} else {
   806  			// Remove relative body and receipts from the active store.
   807  			// The header, total difficulty and canonical hash will be
   808  			// removed in the hc.SetHead function.
   809  			rawdb.DeleteBody(db, hash, num)
   810  			rawdb.DeleteReceipts(db, hash, num)
   811  		}
   812  		// Todo(rjl493456442) txlookup, bloombits, etc
   813  	}
   814  	// If SetHead was only called as a chain reparation method, try to skip
   815  	// touching the header chain altogether, unless the freezer is broken
   816  	if repair {
   817  		if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
   818  			bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
   819  		}
   820  	} else {
   821  		// Rewind the chain to the requested head and keep going backwards until a
   822  		// block with a state is found or fast sync pivot is passed
   823  		if time > 0 {
   824  			log.Warn("Rewinding blockchain to timestamp", "target", time)
   825  			bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
   826  		} else {
   827  			log.Warn("Rewinding blockchain to block", "target", head)
   828  			bc.hc.SetHead(head, updateFn, delFn)
   829  		}
   830  	}
   831  	// Clear out any stale content from the caches
   832  	bc.bodyCache.Purge()
   833  	bc.bodyRLPCache.Purge()
   834  	bc.receiptsCache.Purge()
   835  	bc.blockCache.Purge()
   836  	bc.txLookupCache.Purge()
   837  	bc.futureBlocks.Purge()
   838  
   839  	// Clear safe block, finalized block if needed
   840  	if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() {
   841  		log.Warn("SetHead invalidated safe block")
   842  		bc.SetSafe(nil)
   843  	}
   844  	if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() {
   845  		log.Error("SetHead invalidated finalized block")
   846  		bc.SetFinalized(nil)
   847  	}
   848  
   849  	return blockNumber, rootFound, bc.loadLastState()
   850  }
   851  
   852  // SnapSyncCommitHead sets the current head block to the one defined by the hash
   853  // irrelevant what the chain contents were prior.
   854  func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
   855  	// Make sure that both the block as well at its state trie exists
   856  	block := bc.GetBlockByHash(hash)
   857  	if block == nil {
   858  		return fmt.Errorf("non existent block [%x..]", hash[:4])
   859  	}
   860  	root := block.Root()
   861  	if !bc.HasState(root) {
   862  		return fmt.Errorf("non existent state [%x..]", root[:4])
   863  	}
   864  	// If all checks out, manually set the head block.
   865  	if !bc.chainmu.TryLock() {
   866  		return errChainStopped
   867  	}
   868  	bc.currentBlock.Store(block.Header())
   869  	headBlockGauge.Update(int64(block.NumberU64()))
   870  	bc.chainmu.Unlock()
   871  
   872  	// Destroy any existing state snapshot and regenerate it in the background,
   873  	// also resuming the normal maintenance of any previously paused snapshot.
   874  	if bc.snaps != nil {
   875  		bc.snaps.Rebuild(root)
   876  	}
   877  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   878  	return nil
   879  }
   880  
   881  // Reset purges the entire blockchain, restoring it to its genesis state.
   882  func (bc *BlockChain) Reset() error {
   883  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   884  }
   885  
   886  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   887  // specified genesis state.
   888  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   889  	// Dump the entire block chain and purge the caches
   890  	if err := bc.SetHead(0); err != nil {
   891  		return err
   892  	}
   893  	if !bc.chainmu.TryLock() {
   894  		return errChainStopped
   895  	}
   896  	defer bc.chainmu.Unlock()
   897  
   898  	// Prepare the genesis block and reinitialise the chain
   899  	batch := bc.db.NewBatch()
   900  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   901  	rawdb.WriteBlock(batch, genesis)
   902  	if err := batch.Write(); err != nil {
   903  		log.Crit("Failed to write genesis block", "err", err)
   904  	}
   905  	bc.writeHeadBlock(genesis)
   906  
   907  	// Last update all in-memory chain markers
   908  	bc.genesisBlock = genesis
   909  	bc.currentBlock.Store(bc.genesisBlock.Header())
   910  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   911  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   912  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   913  	bc.currentSnapBlock.Store(bc.genesisBlock.Header())
   914  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   915  	return nil
   916  }
   917  
   918  // Export writes the active chain to the given writer.
   919  func (bc *BlockChain) Export(w io.Writer) error {
   920  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64())
   921  }
   922  
   923  // ExportN writes a subset of the active chain to the given writer.
   924  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   925  	if first > last {
   926  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   927  	}
   928  	log.Info("Exporting batch of blocks", "count", last-first+1)
   929  
   930  	var (
   931  		parentHash common.Hash
   932  		start      = time.Now()
   933  		reported   = time.Now()
   934  	)
   935  	for nr := first; nr <= last; nr++ {
   936  		block := bc.GetBlockByNumber(nr)
   937  		if block == nil {
   938  			return fmt.Errorf("export failed on #%d: not found", nr)
   939  		}
   940  		if nr > first && block.ParentHash() != parentHash {
   941  			return fmt.Errorf("export failed: chain reorg during export")
   942  		}
   943  		parentHash = block.Hash()
   944  		if err := block.EncodeRLP(w); err != nil {
   945  			return err
   946  		}
   947  		if time.Since(reported) >= statsReportLimit {
   948  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   949  			reported = time.Now()
   950  		}
   951  	}
   952  	return nil
   953  }
   954  
   955  // writeHeadBlock injects a new head block into the current block chain. This method
   956  // assumes that the block is indeed a true head. It will also reset the head
   957  // header and the head fast sync block to this very same block if they are older
   958  // or if they are on a different side chain.
   959  //
   960  // Note, this function assumes that the `mu` mutex is held!
   961  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   962  	// Add the block to the canonical chain number scheme and mark as the head
   963  	batch := bc.db.NewBatch()
   964  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   965  	rawdb.WriteHeadFastBlockHash(batch, block.Hash())
   966  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   967  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
   968  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   969  
   970  	// Flush the whole batch into the disk, exit the node if failed
   971  	if err := batch.Write(); err != nil {
   972  		log.Crit("Failed to update chain indexes and markers", "err", err)
   973  	}
   974  	// Update all in-memory chain markers in the last step
   975  	bc.hc.SetCurrentHeader(block.Header())
   976  
   977  	bc.currentSnapBlock.Store(block.Header())
   978  	headFastBlockGauge.Update(int64(block.NumberU64()))
   979  
   980  	bc.currentBlock.Store(block.Header())
   981  	headBlockGauge.Update(int64(block.NumberU64()))
   982  }
   983  
   984  // stop stops the blockchain service. If any imports are currently in progress
   985  // it will abort them using the procInterrupt. This method stops all running
   986  // goroutines, but does not do all the post-stop work of persisting data.
   987  // OBS! It is generally recommended to use the Stop method!
   988  // This method has been exposed to allow tests to stop the blockchain while simulating
   989  // a crash.
   990  func (bc *BlockChain) stopWithoutSaving() {
   991  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   992  		return
   993  	}
   994  
   995  	// Unsubscribe all subscriptions registered from blockchain.
   996  	bc.scope.Close()
   997  
   998  	// Signal shutdown to all goroutines.
   999  	close(bc.quit)
  1000  	bc.StopInsert()
  1001  
  1002  	// Now wait for all chain modifications to end and persistent goroutines to exit.
  1003  	//
  1004  	// Note: Close waits for the mutex to become available, i.e. any running chain
  1005  	// modification will have exited when Close returns. Since we also called StopInsert,
  1006  	// the mutex should become available quickly. It cannot be taken again after Close has
  1007  	// returned.
  1008  	bc.chainmu.Close()
  1009  	bc.wg.Wait()
  1010  }
  1011  
  1012  // Stop stops the blockchain service. If any imports are currently in progress
  1013  // it will abort them using the procInterrupt.
  1014  func (bc *BlockChain) Stop() {
  1015  	bc.stopWithoutSaving()
  1016  
  1017  	// Ensure that the entirety of the state snapshot is journalled to disk.
  1018  	var snapBase common.Hash
  1019  	if bc.snaps != nil {
  1020  		var err error
  1021  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
  1022  			log.Error("Failed to journal state snapshot", "err", err)
  1023  		}
  1024  	}
  1025  
  1026  	// Ensure the state of a recent block is also stored to disk before exiting.
  1027  	// We're writing three different states to catch different restart scenarios:
  1028  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
  1029  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
  1030  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  1031  	if !bc.cacheConfig.TrieDirtyDisabled {
  1032  		triedb := bc.triedb
  1033  
  1034  		for _, offset := range []uint64{0, 1, bc.cacheConfig.TriesInMemory - 1, math.MaxUint64} {
  1035  			if number := bc.CurrentBlock().Number.Uint64(); number > offset {
  1036  				var recent *types.Block
  1037  				if offset == math.MaxUint {
  1038  					_, latest := bc.triegc.Peek()
  1039  					recent = bc.GetBlockByNumber(uint64(-latest))
  1040  				} else {
  1041  					recent = bc.GetBlockByNumber(number - offset)
  1042  				}
  1043  				if recent.Root() == (common.Hash{}) {
  1044  					continue
  1045  				}
  1046  
  1047  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  1048  				if err := triedb.Commit(recent.Root(), true); err != nil {
  1049  					log.Error("Failed to commit recent state trie", "err", err)
  1050  				}
  1051  			}
  1052  		}
  1053  		if snapBase != (common.Hash{}) {
  1054  			log.Info("Writing snapshot state to disk", "root", snapBase)
  1055  			if err := triedb.Commit(snapBase, true); err != nil {
  1056  				log.Error("Failed to commit recent state trie", "err", err)
  1057  			}
  1058  		}
  1059  		for !bc.triegc.Empty() {
  1060  			triedb.Dereference(bc.triegc.PopItem().Root)
  1061  		}
  1062  		if size, _ := triedb.Size(); size != 0 {
  1063  			log.Error("Dangling trie nodes after full cleanup")
  1064  		}
  1065  	}
  1066  	// Flush the collected preimages to disk
  1067  	if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil {
  1068  		log.Error("Failed to commit trie preimages", "err", err)
  1069  	}
  1070  	// Ensure all live cached entries be saved into disk, so that we can skip
  1071  	// cache warmup when node restarts.
  1072  	if bc.cacheConfig.TrieCleanJournal != "" {
  1073  		bc.triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
  1074  	}
  1075  	log.Info("Blockchain stopped")
  1076  }
  1077  
  1078  // StopInsert interrupts all insertion methods, causing them to return
  1079  // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
  1080  // calling this method.
  1081  func (bc *BlockChain) StopInsert() {
  1082  	atomic.StoreInt32(&bc.procInterrupt, 1)
  1083  }
  1084  
  1085  // insertStopped returns true after StopInsert has been called.
  1086  func (bc *BlockChain) insertStopped() bool {
  1087  	return atomic.LoadInt32(&bc.procInterrupt) == 1
  1088  }
  1089  
  1090  func (bc *BlockChain) procFutureBlocks() {
  1091  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  1092  	for _, hash := range bc.futureBlocks.Keys() {
  1093  		if block, exist := bc.futureBlocks.Peek(hash); exist {
  1094  			blocks = append(blocks, block)
  1095  		}
  1096  	}
  1097  	if len(blocks) > 0 {
  1098  		sort.Slice(blocks, func(i, j int) bool {
  1099  			return blocks[i].NumberU64() < blocks[j].NumberU64()
  1100  		})
  1101  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
  1102  		for i := range blocks {
  1103  			bc.InsertChain(blocks[i : i+1])
  1104  		}
  1105  	}
  1106  }
  1107  
  1108  // WriteStatus status of write
  1109  type WriteStatus byte
  1110  
  1111  const (
  1112  	NonStatTy WriteStatus = iota
  1113  	CanonStatTy
  1114  	SideStatTy
  1115  )
  1116  
  1117  // InsertReceiptChain attempts to complete an already existing header chain with
  1118  // transaction and receipt data.
  1119  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
  1120  	// We don't require the chainMu here since we want to maximize the
  1121  	// concurrency of header insertion and receipt insertion.
  1122  	bc.wg.Add(1)
  1123  	defer bc.wg.Done()
  1124  
  1125  	var (
  1126  		ancientBlocks, liveBlocks     types.Blocks
  1127  		ancientReceipts, liveReceipts []types.Receipts
  1128  	)
  1129  	// Do a sanity check that the provided chain is actually ordered and linked
  1130  	for i := 0; i < len(blockChain); i++ {
  1131  		if i != 0 {
  1132  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  1133  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  1134  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  1135  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(),
  1136  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  1137  			}
  1138  		}
  1139  		if blockChain[i].NumberU64() <= ancientLimit {
  1140  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
  1141  		} else {
  1142  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
  1143  		}
  1144  	}
  1145  
  1146  	var (
  1147  		stats = struct{ processed, ignored int32 }{}
  1148  		start = time.Now()
  1149  		size  = int64(0)
  1150  	)
  1151  
  1152  	// updateHead updates the head fast sync block if the inserted blocks are better
  1153  	// and returns an indicator whether the inserted blocks are canonical.
  1154  	updateHead := func(head *types.Block) bool {
  1155  		if !bc.chainmu.TryLock() {
  1156  			return false
  1157  		}
  1158  		defer bc.chainmu.Unlock()
  1159  
  1160  		// Rewind may have occurred, skip in that case.
  1161  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
  1162  			reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
  1163  			if err != nil {
  1164  				log.Warn("Reorg failed", "err", err)
  1165  				return false
  1166  			} else if !reorg {
  1167  				return false
  1168  			}
  1169  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  1170  			bc.currentSnapBlock.Store(head.Header())
  1171  			headFastBlockGauge.Update(int64(head.NumberU64()))
  1172  			return true
  1173  		}
  1174  		return false
  1175  	}
  1176  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1177  	//
  1178  	// this function only accepts canonical chain data. All side chain will be reverted
  1179  	// eventually.
  1180  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1181  		first := blockChain[0]
  1182  		last := blockChain[len(blockChain)-1]
  1183  
  1184  		// Ensure genesis is in ancients.
  1185  		if first.NumberU64() == 1 {
  1186  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
  1187  				b := bc.genesisBlock
  1188  				td := bc.genesisBlock.Difficulty()
  1189  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
  1190  				size += writeSize
  1191  				if err != nil {
  1192  					log.Error("Error writing genesis to ancients", "err", err)
  1193  					return 0, err
  1194  				}
  1195  				log.Info("Wrote genesis to ancients")
  1196  			}
  1197  		}
  1198  		// Before writing the blocks to the ancients, we need to ensure that
  1199  		// they correspond to the what the headerchain 'expects'.
  1200  		// We only check the last block/header, since it's a contiguous chain.
  1201  		if !bc.HasHeader(last.Hash(), last.NumberU64()) {
  1202  			return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
  1203  		}
  1204  
  1205  		// Write all chain data to ancients.
  1206  		td := bc.GetTd(first.Hash(), first.NumberU64())
  1207  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
  1208  		size += writeSize
  1209  		if err != nil {
  1210  			log.Error("Error importing chain data to ancients", "err", err)
  1211  			return 0, err
  1212  		}
  1213  
  1214  		// Write tx indices if any condition is satisfied:
  1215  		// * If user requires to reserve all tx indices(txlookuplimit=0)
  1216  		// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
  1217  		// * If block number is large enough to be regarded as a recent block
  1218  		// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
  1219  		//
  1220  		// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
  1221  		// an external ancient database, during the setup, blockchain will start
  1222  		// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
  1223  		// range. In this case, all tx indices of newly imported blocks should be
  1224  		// generated.
  1225  		var batch = bc.db.NewBatch()
  1226  		for i, block := range blockChain {
  1227  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
  1228  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1229  			} else if rawdb.ReadTxIndexTail(bc.db) != nil {
  1230  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1231  			}
  1232  			stats.processed++
  1233  
  1234  			if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
  1235  				size += int64(batch.ValueSize())
  1236  				if err = batch.Write(); err != nil {
  1237  					snapBlock := bc.CurrentSnapBlock().Number.Uint64()
  1238  					if err := bc.db.TruncateHead(snapBlock + 1); err != nil {
  1239  						log.Error("Can't truncate ancient store after failed insert", "err", err)
  1240  					}
  1241  					return 0, err
  1242  				}
  1243  				batch.Reset()
  1244  			}
  1245  		}
  1246  
  1247  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1248  		if err := bc.db.Sync(); err != nil {
  1249  			return 0, err
  1250  		}
  1251  		// Update the current fast block because all block data is now present in DB.
  1252  		previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64()
  1253  		if !updateHead(blockChain[len(blockChain)-1]) {
  1254  			// We end up here if the header chain has reorg'ed, and the blocks/receipts
  1255  			// don't match the canonical chain.
  1256  			if err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
  1257  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1258  			}
  1259  			return 0, errSideChainReceipts
  1260  		}
  1261  
  1262  		// Delete block data from the main database.
  1263  		batch.Reset()
  1264  		canonHashes := make(map[common.Hash]struct{})
  1265  		for _, block := range blockChain {
  1266  			canonHashes[block.Hash()] = struct{}{}
  1267  			if block.NumberU64() == 0 {
  1268  				continue
  1269  			}
  1270  			rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1271  			rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1272  		}
  1273  		// Delete side chain hash-to-number mappings.
  1274  		for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
  1275  			if _, canon := canonHashes[nh.Hash]; !canon {
  1276  				rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
  1277  			}
  1278  		}
  1279  		if err := batch.Write(); err != nil {
  1280  			return 0, err
  1281  		}
  1282  		return 0, nil
  1283  	}
  1284  
  1285  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1286  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1287  		skipPresenceCheck := false
  1288  		batch := bc.db.NewBatch()
  1289  		for i, block := range blockChain {
  1290  			// Short circuit insertion if shutting down or processing failed
  1291  			if bc.insertStopped() {
  1292  				return 0, errInsertionInterrupted
  1293  			}
  1294  			// Short circuit if the owner header is unknown
  1295  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1296  				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
  1297  			}
  1298  			if !skipPresenceCheck {
  1299  				// Ignore if the entire data is already known
  1300  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1301  					stats.ignored++
  1302  					continue
  1303  				} else {
  1304  					// If block N is not present, neither are the later blocks.
  1305  					// This should be true, but if we are mistaken, the shortcut
  1306  					// here will only cause overwriting of some existing data
  1307  					skipPresenceCheck = true
  1308  				}
  1309  			}
  1310  			// Write all the data out into the database
  1311  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1312  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1313  			rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
  1314  
  1315  			// Write everything belongs to the blocks into the database. So that
  1316  			// we can ensure all components of body is completed(body, receipts,
  1317  			// tx indexes)
  1318  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1319  				if err := batch.Write(); err != nil {
  1320  					return 0, err
  1321  				}
  1322  				size += int64(batch.ValueSize())
  1323  				batch.Reset()
  1324  			}
  1325  			stats.processed++
  1326  		}
  1327  		// Write everything belongs to the blocks into the database. So that
  1328  		// we can ensure all components of body is completed(body, receipts,
  1329  		// tx indexes)
  1330  		if batch.ValueSize() > 0 {
  1331  			size += int64(batch.ValueSize())
  1332  			if err := batch.Write(); err != nil {
  1333  				return 0, err
  1334  			}
  1335  		}
  1336  		updateHead(blockChain[len(blockChain)-1])
  1337  		return 0, nil
  1338  	}
  1339  
  1340  	// Write downloaded chain data and corresponding receipt chain data
  1341  	if len(ancientBlocks) > 0 {
  1342  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1343  			if err == errInsertionInterrupted {
  1344  				return 0, nil
  1345  			}
  1346  			return n, err
  1347  		}
  1348  	}
  1349  	// Write the tx index tail (block number from where we index) before write any live blocks
  1350  	if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
  1351  		// The tx index tail can only be one of the following two options:
  1352  		// * 0: all ancient blocks have been indexed
  1353  		// * ancient-limit: the indices of blocks before ancient-limit are ignored
  1354  		if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
  1355  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
  1356  				rawdb.WriteTxIndexTail(bc.db, 0)
  1357  			} else {
  1358  				rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
  1359  			}
  1360  		}
  1361  	}
  1362  	if len(liveBlocks) > 0 {
  1363  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1364  			if err == errInsertionInterrupted {
  1365  				return 0, nil
  1366  			}
  1367  			return n, err
  1368  		}
  1369  	}
  1370  
  1371  	head := blockChain[len(blockChain)-1]
  1372  	context := []interface{}{
  1373  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1374  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1375  		"size", common.StorageSize(size),
  1376  	}
  1377  	if stats.ignored > 0 {
  1378  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1379  	}
  1380  	log.Debug("Imported new block receipts", context...)
  1381  
  1382  	return 0, nil
  1383  }
  1384  
  1385  // writeBlockWithoutState writes only the block and its metadata to the database,
  1386  // but does not write any state. This is used to construct competing side forks
  1387  // up to the point where they exceed the canonical total difficulty.
  1388  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1389  	if bc.insertStopped() {
  1390  		return errInsertionInterrupted
  1391  	}
  1392  
  1393  	batch := bc.db.NewBatch()
  1394  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1395  	rawdb.WriteBlock(batch, block)
  1396  	if err := batch.Write(); err != nil {
  1397  		log.Crit("Failed to write block into disk", "err", err)
  1398  	}
  1399  	return nil
  1400  }
  1401  
  1402  // writeKnownBlock updates the head block flag with a known block
  1403  // and introduces chain reorg if necessary.
  1404  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1405  	current := bc.CurrentBlock()
  1406  	if block.ParentHash() != current.Hash() {
  1407  		if err := bc.reorg(current, block); err != nil {
  1408  			return err
  1409  		}
  1410  	}
  1411  	bc.writeHeadBlock(block)
  1412  	return nil
  1413  }
  1414  
  1415  // writeBlockWithState writes block, metadata and corresponding state data to the
  1416  // database.
  1417  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
  1418  	// Calculate the total difficulty of the block
  1419  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1420  	if ptd == nil {
  1421  		return consensus.ErrUnknownAncestor
  1422  	}
  1423  	// Make sure no inconsistent state is leaked during insertion
  1424  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1425  
  1426  	// Irrelevant of the canonical status, write the block itself to the database.
  1427  	//
  1428  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1429  	// should be written atomically. BlockBatch is used for containing all components.
  1430  	blockBatch := bc.db.NewBatch()
  1431  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1432  	rawdb.WriteBlock(blockBatch, block)
  1433  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1434  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1435  	if err := blockBatch.Write(); err != nil {
  1436  		log.Crit("Failed to write block into disk", "err", err)
  1437  	}
  1438  	// Commit all cached state changes into underlying memory database.
  1439  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1440  	if err != nil {
  1441  		return err
  1442  	}
  1443  	// If we're running an archive node, always flush
  1444  	if bc.cacheConfig.TrieDirtyDisabled {
  1445  		return bc.triedb.Commit(root, false)
  1446  	}
  1447  
  1448  	// Full but not archive node, do proper garbage collection
  1449  	bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1450  	bc.triegc.Push(trieGcEntry{root, block.Header().Time}, -int64(block.NumberU64()))
  1451  
  1452  	blockLimit := int64(block.NumberU64()) - int64(bc.cacheConfig.TriesInMemory)   // only cleared if below that
  1453  	timeLimit := time.Now().Unix() - int64(bc.cacheConfig.TrieRetention.Seconds()) // only cleared if less than that
  1454  
  1455  	if blockLimit > 0 && timeLimit > 0 {
  1456  		// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1457  		var (
  1458  			nodes, imgs = bc.triedb.Size()
  1459  			limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1460  		)
  1461  		if nodes > limit || imgs > 4*1024*1024 {
  1462  			bc.triedb.Cap(limit - ethdb.IdealBatchSize)
  1463  		}
  1464  		var prevEntry *trieGcEntry
  1465  		var prevNum uint64
  1466  		// Garbage collect anything below our required write retention
  1467  		for !bc.triegc.Empty() {
  1468  			triegcEntry, number := bc.triegc.Pop()
  1469  			if uint64(-number) > uint64(blockLimit) || triegcEntry.Timestamp > uint64(timeLimit) {
  1470  				bc.triegc.Push(triegcEntry, number)
  1471  				break
  1472  			}
  1473  			if prevEntry != nil {
  1474  				bc.triedb.Dereference(prevEntry.Root)
  1475  			}
  1476  			prevEntry = &triegcEntry
  1477  			prevNum = uint64(-number)
  1478  		}
  1479  		flushInterval := time.Duration(atomic.LoadInt64(&bc.flushInterval))
  1480  		// If we exceeded out time allowance, flush an entire trie to disk
  1481  		if bc.gcproc > flushInterval && prevEntry != nil {
  1482  			// If the header is missing (canonical chain behind), we're reorging a low
  1483  			// diff sidechain. Suspend committing until this operation is completed.
  1484  			header := bc.GetHeaderByNumber(prevNum)
  1485  			if header == nil {
  1486  				log.Warn("Reorg in progress, trie commit postponed")
  1487  			} else {
  1488  				// If we're exceeding limits but haven't reached a large enough memory gap,
  1489  				// warn the user that the system is becoming unstable.
  1490  				if blockLimit < int64(bc.lastWrite+bc.cacheConfig.TriesInMemory) && bc.gcproc >= 2*flushInterval {
  1491  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(prevNum-bc.lastWrite)/float64(bc.cacheConfig.TriesInMemory))
  1492  				}
  1493  				// Flush an entire trie and restart the counters
  1494  				bc.triedb.Commit(header.Root, true)
  1495  				bc.lastWrite = prevNum
  1496  				bc.gcproc = 0
  1497  			}
  1498  		}
  1499  		if prevEntry != nil {
  1500  			bc.triedb.Dereference(prevEntry.Root)
  1501  		}
  1502  	}
  1503  	return nil
  1504  }
  1505  
  1506  // WriteBlockAndSetHead writes the given block and all associated state to the database,
  1507  // and applies the block as the new chain head.
  1508  func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1509  	if !bc.chainmu.TryLock() {
  1510  		return NonStatTy, errChainStopped
  1511  	}
  1512  	defer bc.chainmu.Unlock()
  1513  
  1514  	return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
  1515  }
  1516  
  1517  // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
  1518  // This function expects the chain mutex to be held.
  1519  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1520  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1521  		return NonStatTy, err
  1522  	}
  1523  	currentBlock := bc.CurrentBlock()
  1524  	reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
  1525  	if err != nil {
  1526  		return NonStatTy, err
  1527  	}
  1528  	if reorg {
  1529  		// Reorganise the chain if the parent is not the head block
  1530  		if block.ParentHash() != currentBlock.Hash() {
  1531  			if err := bc.reorg(currentBlock, block); err != nil {
  1532  				return NonStatTy, err
  1533  			}
  1534  		}
  1535  		status = CanonStatTy
  1536  	} else {
  1537  		status = SideStatTy
  1538  	}
  1539  	// Set new head.
  1540  	if status == CanonStatTy {
  1541  		bc.writeHeadBlock(block)
  1542  	}
  1543  	bc.futureBlocks.Remove(block.Hash())
  1544  
  1545  	if status == CanonStatTy {
  1546  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1547  		if len(logs) > 0 {
  1548  			bc.logsFeed.Send(logs)
  1549  		}
  1550  		// In theory, we should fire a ChainHeadEvent when we inject
  1551  		// a canonical block, but sometimes we can insert a batch of
  1552  		// canonical blocks. Avoid firing too many ChainHeadEvents,
  1553  		// we will fire an accumulated ChainHeadEvent and disable fire
  1554  		// event here.
  1555  		if emitHeadEvent {
  1556  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1557  		}
  1558  	} else {
  1559  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1560  	}
  1561  	return status, nil
  1562  }
  1563  
  1564  // addFutureBlock checks if the block is within the max allowed window to get
  1565  // accepted for future processing, and returns an error if the block is too far
  1566  // ahead and was not added.
  1567  //
  1568  // TODO after the transition, the future block shouldn't be kept. Because
  1569  // it's not checked in the Geth side anymore.
  1570  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1571  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1572  	if block.Time() > max {
  1573  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1574  	}
  1575  	if block.Difficulty().Cmp(common.Big0) == 0 {
  1576  		// Never add PoS blocks into the future queue
  1577  		return nil
  1578  	}
  1579  	bc.futureBlocks.Add(block.Hash(), block)
  1580  	return nil
  1581  }
  1582  
  1583  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1584  // chain or, otherwise, create a fork. If an error is returned it will return
  1585  // the index number of the failing block as well an error describing what went
  1586  // wrong. After insertion is done, all accumulated events will be fired.
  1587  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1588  	// Sanity check that we have something meaningful to import
  1589  	if len(chain) == 0 {
  1590  		return 0, nil
  1591  	}
  1592  	bc.blockProcFeed.Send(true)
  1593  	defer bc.blockProcFeed.Send(false)
  1594  
  1595  	// Do a sanity check that the provided chain is actually ordered and linked.
  1596  	for i := 1; i < len(chain); i++ {
  1597  		block, prev := chain[i], chain[i-1]
  1598  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1599  			log.Error("Non contiguous block insert",
  1600  				"number", block.Number(),
  1601  				"hash", block.Hash(),
  1602  				"parent", block.ParentHash(),
  1603  				"prevnumber", prev.Number(),
  1604  				"prevhash", prev.Hash(),
  1605  			)
  1606  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1607  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1608  		}
  1609  	}
  1610  	// Pre-checks passed, start the full block imports
  1611  	if !bc.chainmu.TryLock() {
  1612  		return 0, errChainStopped
  1613  	}
  1614  	defer bc.chainmu.Unlock()
  1615  	return bc.insertChain(chain, true, true)
  1616  }
  1617  
  1618  // insertChain is the internal implementation of InsertChain, which assumes that
  1619  // 1) chains are contiguous, and 2) The chain mutex is held.
  1620  //
  1621  // This method is split out so that import batches that require re-injecting
  1622  // historical blocks can do so without releasing the lock, which could lead to
  1623  // racey behaviour. If a sidechain import is in progress, and the historic state
  1624  // is imported, but then new canon-head is added before the actual sidechain
  1625  // completes, then the historic state could be pruned again
  1626  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) {
  1627  	// If the chain is terminating, don't even bother starting up.
  1628  	if bc.insertStopped() {
  1629  		return 0, nil
  1630  	}
  1631  
  1632  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1633  	SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1634  
  1635  	var (
  1636  		stats     = insertStats{startTime: mclock.Now()}
  1637  		lastCanon *types.Block
  1638  	)
  1639  	// Fire a single chain head event if we've progressed the chain
  1640  	defer func() {
  1641  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1642  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1643  		}
  1644  	}()
  1645  	// Start the parallel header verifier
  1646  	headers := make([]*types.Header, len(chain))
  1647  	seals := make([]bool, len(chain))
  1648  
  1649  	for i, block := range chain {
  1650  		headers[i] = block.Header()
  1651  		seals[i] = verifySeals
  1652  	}
  1653  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1654  	defer close(abort)
  1655  
  1656  	// Peek the error for the first block to decide the directing import logic
  1657  	it := newInsertIterator(chain, results, bc.validator)
  1658  	block, err := it.next()
  1659  
  1660  	// Left-trim all the known blocks that don't need to build snapshot
  1661  	if bc.skipBlock(err, it) {
  1662  		// First block (and state) is known
  1663  		//   1. We did a roll-back, and should now do a re-import
  1664  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1665  		//      from the canonical chain, which has not been verified.
  1666  		// Skip all known blocks that are behind us.
  1667  		var (
  1668  			reorg   bool
  1669  			current = bc.CurrentBlock()
  1670  		)
  1671  		for block != nil && bc.skipBlock(err, it) {
  1672  			reorg, err = bc.forker.ReorgNeeded(current, block.Header())
  1673  			if err != nil {
  1674  				return it.index, err
  1675  			}
  1676  			if reorg {
  1677  				// Switch to import mode if the forker says the reorg is necessary
  1678  				// and also the block is not on the canonical chain.
  1679  				// In eth2 the forker always returns true for reorg decision (blindly trusting
  1680  				// the external consensus engine), but in order to prevent the unnecessary
  1681  				// reorgs when importing known blocks, the special case is handled here.
  1682  				if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
  1683  					break
  1684  				}
  1685  			}
  1686  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1687  			stats.ignored++
  1688  
  1689  			block, err = it.next()
  1690  		}
  1691  		// The remaining blocks are still known blocks, the only scenario here is:
  1692  		// During the fast sync, the pivot point is already submitted but rollback
  1693  		// happens. Then node resets the head full block to a lower height via `rollback`
  1694  		// and leaves a few known blocks in the database.
  1695  		//
  1696  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1697  		// `insertChain` while a part of them have higher total difficulty than current
  1698  		// head full block(new pivot point).
  1699  		for block != nil && bc.skipBlock(err, it) {
  1700  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1701  			if err := bc.writeKnownBlock(block); err != nil {
  1702  				return it.index, err
  1703  			}
  1704  			lastCanon = block
  1705  
  1706  			block, err = it.next()
  1707  		}
  1708  		// Falls through to the block import
  1709  	}
  1710  	switch {
  1711  	// First block is pruned
  1712  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1713  		if setHead {
  1714  			// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1715  			log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1716  			return bc.insertSideChain(block, it)
  1717  		} else {
  1718  			// We're post-merge and the parent is pruned, try to recover the parent state
  1719  			log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
  1720  			_, err := bc.recoverAncestors(block)
  1721  			return it.index, err
  1722  		}
  1723  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1724  	case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
  1725  		for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
  1726  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1727  			if err := bc.addFutureBlock(block); err != nil {
  1728  				return it.index, err
  1729  			}
  1730  			block, err = it.next()
  1731  		}
  1732  		stats.queued += it.processed()
  1733  		stats.ignored += it.remaining()
  1734  
  1735  		// If there are any still remaining, mark as ignored
  1736  		return it.index, err
  1737  
  1738  	// Some other error(except ErrKnownBlock) occurred, abort.
  1739  	// ErrKnownBlock is allowed here since some known blocks
  1740  	// still need re-execution to generate snapshots that are missing
  1741  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1742  		bc.futureBlocks.Remove(block.Hash())
  1743  		stats.ignored += len(it.chain)
  1744  		bc.reportBlock(block, nil, err)
  1745  		return it.index, err
  1746  	}
  1747  	// No validation errors for the first block (or chain prefix skipped)
  1748  	var activeState *state.StateDB
  1749  	defer func() {
  1750  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1751  		// block or other error is hit however, an early return may not properly
  1752  		// terminate the background threads. This defer ensures that we clean up
  1753  		// and dangling prefetcher, without defering each and holding on live refs.
  1754  		if activeState != nil {
  1755  			activeState.StopPrefetcher()
  1756  		}
  1757  	}()
  1758  
  1759  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1760  		// If the chain is terminating, stop processing blocks
  1761  		if bc.insertStopped() {
  1762  			log.Debug("Abort during block processing")
  1763  			break
  1764  		}
  1765  		// If the header is a banned one, straight out abort
  1766  		if BadHashes[block.Hash()] {
  1767  			bc.reportBlock(block, nil, ErrBannedHash)
  1768  			return it.index, ErrBannedHash
  1769  		}
  1770  		// If the block is known (in the middle of the chain), it's a special case for
  1771  		// Clique blocks where they can share state among each other, so importing an
  1772  		// older block might complete the state of the subsequent one. In this case,
  1773  		// just skip the block (we already validated it once fully (and crashed), since
  1774  		// its header and body was already in the database). But if the corresponding
  1775  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1776  		if bc.skipBlock(err, it) {
  1777  			logger := log.Debug
  1778  			if bc.chainConfig.Clique == nil {
  1779  				logger = log.Warn
  1780  			}
  1781  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1782  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1783  				"root", block.Root())
  1784  
  1785  			// Special case. Commit the empty receipt slice if we meet the known
  1786  			// block in the middle. It can only happen in the clique chain. Whenever
  1787  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1788  			// and `body` if it's non-existent. Since we don't have receipts without
  1789  			// reexecution, so nothing to commit. But if the sidechain will be adopted
  1790  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1791  			// state, but if it's this special case here(skip reexecution) we will lose
  1792  			// the empty receipt entry.
  1793  			if len(block.Transactions()) == 0 {
  1794  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1795  			} else {
  1796  				log.Error("Please file an issue, skip known block execution without receipt",
  1797  					"hash", block.Hash(), "number", block.NumberU64())
  1798  			}
  1799  			if err := bc.writeKnownBlock(block); err != nil {
  1800  				return it.index, err
  1801  			}
  1802  			stats.processed++
  1803  
  1804  			// We can assume that logs are empty here, since the only way for consecutive
  1805  			// Clique blocks to have the same state is if there are no transactions.
  1806  			lastCanon = block
  1807  			continue
  1808  		}
  1809  
  1810  		// Retrieve the parent block and it's state to execute on top
  1811  		start := time.Now()
  1812  		parent := it.previous()
  1813  		if parent == nil {
  1814  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1815  		}
  1816  		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1817  		if err != nil {
  1818  			return it.index, err
  1819  		}
  1820  
  1821  		// Enable prefetching to pull in trie node paths while processing transactions
  1822  		statedb.StartPrefetcher("chain")
  1823  		activeState = statedb
  1824  
  1825  		// If we have a followup block, run that against the current state to pre-cache
  1826  		// transactions and probabilistically some of the account/storage trie nodes.
  1827  		var followupInterrupt uint32
  1828  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1829  			if followup, err := it.peek(); followup != nil && err == nil {
  1830  				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
  1831  
  1832  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
  1833  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1834  
  1835  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1836  					if atomic.LoadUint32(interrupt) == 1 {
  1837  						blockPrefetchInterruptMeter.Mark(1)
  1838  					}
  1839  				}(time.Now(), followup, throwaway, &followupInterrupt)
  1840  			}
  1841  		}
  1842  
  1843  		// Process block using the parent state as reference point
  1844  		substart := time.Now()
  1845  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1846  		if err != nil {
  1847  			bc.reportBlock(block, receipts, err)
  1848  			atomic.StoreUint32(&followupInterrupt, 1)
  1849  			return it.index, err
  1850  		}
  1851  
  1852  		// Update the metrics touched during block processing
  1853  		accountReadTimer.Update(statedb.AccountReads)                 // Account reads are complete, we can mark them
  1854  		storageReadTimer.Update(statedb.StorageReads)                 // Storage reads are complete, we can mark them
  1855  		accountUpdateTimer.Update(statedb.AccountUpdates)             // Account updates are complete, we can mark them
  1856  		storageUpdateTimer.Update(statedb.StorageUpdates)             // Storage updates are complete, we can mark them
  1857  		snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
  1858  		snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
  1859  		triehash := statedb.AccountHashes + statedb.StorageHashes     // Save to not double count in validation
  1860  		trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
  1861  		trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
  1862  
  1863  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1864  
  1865  		// Validate the state using the default validator
  1866  		substart = time.Now()
  1867  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1868  			bc.reportBlock(block, receipts, err)
  1869  			atomic.StoreUint32(&followupInterrupt, 1)
  1870  			return it.index, err
  1871  		}
  1872  		proctime := time.Since(start)
  1873  
  1874  		// Update the metrics touched during block validation
  1875  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1876  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1877  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1878  
  1879  		// Write the block to the chain and get the status.
  1880  		substart = time.Now()
  1881  		var status WriteStatus
  1882  		if !setHead {
  1883  			// Don't set the head, only insert the block
  1884  			err = bc.writeBlockWithState(block, receipts, statedb)
  1885  		} else {
  1886  			status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
  1887  		}
  1888  		atomic.StoreUint32(&followupInterrupt, 1)
  1889  		if err != nil {
  1890  			return it.index, err
  1891  		}
  1892  		// Update the metrics touched during block commit
  1893  		accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  1894  		storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  1895  		snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  1896  		triedbCommitTimer.Update(statedb.TrieDBCommits)     // Triedb commits are complete, we can mark them
  1897  
  1898  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
  1899  		blockInsertTimer.UpdateSince(start)
  1900  
  1901  		// Report the import stats before returning the various results
  1902  		stats.processed++
  1903  		stats.usedGas += usedGas
  1904  
  1905  		dirty, _ := bc.triedb.Size()
  1906  		stats.report(chain, it.index, dirty, setHead)
  1907  
  1908  		if !setHead {
  1909  			// After merge we expect few side chains. Simply count
  1910  			// all blocks the CL gives us for GC processing time
  1911  			bc.gcproc += proctime
  1912  
  1913  			return it.index, nil // Direct block insertion of a single block
  1914  		}
  1915  		switch status {
  1916  		case CanonStatTy:
  1917  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1918  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1919  				"elapsed", common.PrettyDuration(time.Since(start)),
  1920  				"root", block.Root())
  1921  
  1922  			lastCanon = block
  1923  
  1924  			// Only count canonical blocks for GC processing time
  1925  			bc.gcproc += proctime
  1926  
  1927  		case SideStatTy:
  1928  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1929  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1930  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1931  				"root", block.Root())
  1932  
  1933  		default:
  1934  			// This in theory is impossible, but lets be nice to our future selves and leave
  1935  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1936  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1937  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1938  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1939  				"root", block.Root())
  1940  		}
  1941  	}
  1942  
  1943  	// Any blocks remaining here? The only ones we care about are the future ones
  1944  	if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
  1945  		if err := bc.addFutureBlock(block); err != nil {
  1946  			return it.index, err
  1947  		}
  1948  		block, err = it.next()
  1949  
  1950  		for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
  1951  			if err := bc.addFutureBlock(block); err != nil {
  1952  				return it.index, err
  1953  			}
  1954  			stats.queued++
  1955  		}
  1956  	}
  1957  	stats.ignored += it.remaining()
  1958  
  1959  	return it.index, err
  1960  }
  1961  
  1962  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1963  // error, which happens when a sidechain with a sufficiently old fork-block is
  1964  // found.
  1965  //
  1966  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1967  // switch over to the new chain if the TD exceeded the current chain.
  1968  // insertSideChain is only used pre-merge.
  1969  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  1970  	var (
  1971  		externTd  *big.Int
  1972  		lastBlock = block
  1973  		current   = bc.CurrentBlock()
  1974  	)
  1975  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1976  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1977  	// ones. Any other errors means that the block is invalid, and should not be written
  1978  	// to disk.
  1979  	err := consensus.ErrPrunedAncestor
  1980  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  1981  		// Check the canonical state root for that number
  1982  		if number := block.NumberU64(); current.Number.Uint64() >= number {
  1983  			canonical := bc.GetBlockByNumber(number)
  1984  			if canonical != nil && canonical.Hash() == block.Hash() {
  1985  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1986  
  1987  				// Collect the TD of the block. Since we know it's a canon one,
  1988  				// we can get it directly, and not (like further below) use
  1989  				// the parent and then add the block on top
  1990  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  1991  				continue
  1992  			}
  1993  			if canonical != nil && canonical.Root() == block.Root() {
  1994  				// This is most likely a shadow-state attack. When a fork is imported into the
  1995  				// database, and it eventually reaches a block height which is not pruned, we
  1996  				// just found that the state already exist! This means that the sidechain block
  1997  				// refers to a state which already exists in our canon chain.
  1998  				//
  1999  				// If left unchecked, we would now proceed importing the blocks, without actually
  2000  				// having verified the state of the previous blocks.
  2001  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  2002  
  2003  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  2004  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  2005  				// mechanism.
  2006  				return it.index, errors.New("sidechain ghost-state attack")
  2007  			}
  2008  		}
  2009  		if externTd == nil {
  2010  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  2011  		}
  2012  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  2013  
  2014  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  2015  			start := time.Now()
  2016  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  2017  				return it.index, err
  2018  			}
  2019  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  2020  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  2021  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  2022  				"root", block.Root())
  2023  		}
  2024  		lastBlock = block
  2025  	}
  2026  	// At this point, we've written all sidechain blocks to database. Loop ended
  2027  	// either on some other error or all were processed. If there was some other
  2028  	// error, we can ignore the rest of those blocks.
  2029  	//
  2030  	// If the externTd was larger than our local TD, we now need to reimport the previous
  2031  	// blocks to regenerate the required state
  2032  	reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
  2033  	if err != nil {
  2034  		return it.index, err
  2035  	}
  2036  	if !reorg {
  2037  		localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
  2038  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  2039  		return it.index, err
  2040  	}
  2041  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2042  	var (
  2043  		hashes  []common.Hash
  2044  		numbers []uint64
  2045  	)
  2046  	parent := it.previous()
  2047  	for parent != nil && !bc.HasState(parent.Root) {
  2048  		hashes = append(hashes, parent.Hash())
  2049  		numbers = append(numbers, parent.Number.Uint64())
  2050  
  2051  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  2052  	}
  2053  	if parent == nil {
  2054  		return it.index, errors.New("missing parent")
  2055  	}
  2056  	// Import all the pruned blocks to make the state available
  2057  	var (
  2058  		blocks []*types.Block
  2059  		memory uint64
  2060  	)
  2061  	for i := len(hashes) - 1; i >= 0; i-- {
  2062  		// Append the next block to our batch
  2063  		block := bc.GetBlock(hashes[i], numbers[i])
  2064  
  2065  		blocks = append(blocks, block)
  2066  		memory += block.Size()
  2067  
  2068  		// If memory use grew too large, import and continue. Sadly we need to discard
  2069  		// all raised events and logs from notifications since we're too heavy on the
  2070  		// memory here.
  2071  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  2072  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  2073  			if _, err := bc.insertChain(blocks, false, true); err != nil {
  2074  				return 0, err
  2075  			}
  2076  			blocks, memory = blocks[:0], 0
  2077  
  2078  			// If the chain is terminating, stop processing blocks
  2079  			if bc.insertStopped() {
  2080  				log.Debug("Abort during blocks processing")
  2081  				return 0, nil
  2082  			}
  2083  		}
  2084  	}
  2085  	if len(blocks) > 0 {
  2086  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  2087  		return bc.insertChain(blocks, false, true)
  2088  	}
  2089  	return 0, nil
  2090  }
  2091  
  2092  // recoverAncestors finds the closest ancestor with available state and re-execute
  2093  // all the ancestor blocks since that.
  2094  // recoverAncestors is only used post-merge.
  2095  // We return the hash of the latest block that we could correctly validate.
  2096  func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) {
  2097  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2098  	var (
  2099  		hashes  []common.Hash
  2100  		numbers []uint64
  2101  		parent  = block
  2102  	)
  2103  	for parent != nil && !bc.HasState(parent.Root()) {
  2104  		hashes = append(hashes, parent.Hash())
  2105  		numbers = append(numbers, parent.NumberU64())
  2106  		parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2107  
  2108  		// If the chain is terminating, stop iteration
  2109  		if bc.insertStopped() {
  2110  			log.Debug("Abort during blocks iteration")
  2111  			return common.Hash{}, errInsertionInterrupted
  2112  		}
  2113  	}
  2114  	if parent == nil {
  2115  		return common.Hash{}, errors.New("missing parent")
  2116  	}
  2117  	// Import all the pruned blocks to make the state available
  2118  	for i := len(hashes) - 1; i >= 0; i-- {
  2119  		// If the chain is terminating, stop processing blocks
  2120  		if bc.insertStopped() {
  2121  			log.Debug("Abort during blocks processing")
  2122  			return common.Hash{}, errInsertionInterrupted
  2123  		}
  2124  		var b *types.Block
  2125  		if i == 0 {
  2126  			b = block
  2127  		} else {
  2128  			b = bc.GetBlock(hashes[i], numbers[i])
  2129  		}
  2130  		if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil {
  2131  			return b.ParentHash(), err
  2132  		}
  2133  	}
  2134  	return block.Hash(), nil
  2135  }
  2136  
  2137  // collectLogs collects the logs that were generated or removed during
  2138  // the processing of a block. These logs are later announced as deleted or reborn.
  2139  func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
  2140  	receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
  2141  	receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.BaseFee(), b.Transactions())
  2142  
  2143  	var logs []*types.Log
  2144  	for _, receipt := range receipts {
  2145  		for _, log := range receipt.Logs {
  2146  			l := *log
  2147  			if removed {
  2148  				l.Removed = true
  2149  			}
  2150  			logs = append(logs, &l)
  2151  		}
  2152  	}
  2153  	return logs
  2154  }
  2155  
  2156  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  2157  // blocks and inserts them to be part of the new canonical chain and accumulates
  2158  // potential missing transactions and post an event about them.
  2159  // Note the new head block won't be processed here, callers need to handle it
  2160  // externally.
  2161  func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
  2162  	var (
  2163  		newChain    types.Blocks
  2164  		oldChain    types.Blocks
  2165  		commonBlock *types.Block
  2166  
  2167  		deletedTxs []common.Hash
  2168  		addedTxs   []common.Hash
  2169  	)
  2170  	oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  2171  	if oldBlock == nil {
  2172  		return errors.New("current head block missing")
  2173  	}
  2174  	newBlock := newHead
  2175  
  2176  	// Reduce the longer chain to the same number as the shorter one
  2177  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  2178  		// Old chain is longer, gather all transactions and logs as deleted ones
  2179  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  2180  			oldChain = append(oldChain, oldBlock)
  2181  			for _, tx := range oldBlock.Transactions() {
  2182  				deletedTxs = append(deletedTxs, tx.Hash())
  2183  			}
  2184  		}
  2185  	} else {
  2186  		// New chain is longer, stash all blocks away for subsequent insertion
  2187  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  2188  			newChain = append(newChain, newBlock)
  2189  		}
  2190  	}
  2191  	if oldBlock == nil {
  2192  		return errors.New("invalid old chain")
  2193  	}
  2194  	if newBlock == nil {
  2195  		return errors.New("invalid new chain")
  2196  	}
  2197  	// Both sides of the reorg are at the same number, reduce both until the common
  2198  	// ancestor is found
  2199  	for {
  2200  		// If the common ancestor was found, bail out
  2201  		if oldBlock.Hash() == newBlock.Hash() {
  2202  			commonBlock = oldBlock
  2203  			break
  2204  		}
  2205  		// Remove an old block as well as stash away a new block
  2206  		oldChain = append(oldChain, oldBlock)
  2207  		for _, tx := range oldBlock.Transactions() {
  2208  			deletedTxs = append(deletedTxs, tx.Hash())
  2209  		}
  2210  		newChain = append(newChain, newBlock)
  2211  
  2212  		// Step back with both chains
  2213  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2214  		if oldBlock == nil {
  2215  			return fmt.Errorf("invalid old chain")
  2216  		}
  2217  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2218  		if newBlock == nil {
  2219  			return fmt.Errorf("invalid new chain")
  2220  		}
  2221  	}
  2222  
  2223  	// Ensure the user sees large reorgs
  2224  	if len(oldChain) > 0 {
  2225  		logFn := log.Info
  2226  		msg := "Chain reorg detected"
  2227  		if len(oldChain) > 63 {
  2228  			msg = "Large chain reorg detected"
  2229  			logFn = log.Warn
  2230  		}
  2231  		var addFromHash common.Hash
  2232  		if len(newChain) > 0 {
  2233  			addFromHash = newChain[0].Hash()
  2234  		}
  2235  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2236  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", addFromHash)
  2237  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2238  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2239  		blockReorgMeter.Mark(1)
  2240  	} else if len(newChain) > 0 {
  2241  		// Special case happens in the post merge stage that current head is
  2242  		// the ancestor of new head while these two blocks are not consecutive
  2243  		log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
  2244  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2245  	} else {
  2246  		// len(newChain) == 0 && len(oldChain) > 0
  2247  		// rewind the canonical chain to a lower point.
  2248  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
  2249  	}
  2250  	// Insert the new chain(except the head block(reverse order)),
  2251  	// taking care of the proper incremental order.
  2252  	for i := len(newChain) - 1; i >= 1; i-- {
  2253  		// Insert the block in the canonical way, re-writing history
  2254  		bc.writeHeadBlock(newChain[i])
  2255  
  2256  		// Collect the new added transactions.
  2257  		for _, tx := range newChain[i].Transactions() {
  2258  			addedTxs = append(addedTxs, tx.Hash())
  2259  		}
  2260  	}
  2261  
  2262  	// Delete useless indexes right now which includes the non-canonical
  2263  	// transaction indexes, canonical chain indexes which above the head.
  2264  	indexesBatch := bc.db.NewBatch()
  2265  	for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
  2266  		rawdb.DeleteTxLookupEntry(indexesBatch, tx)
  2267  	}
  2268  
  2269  	// Delete all hash markers that are not part of the new canonical chain.
  2270  	// Because the reorg function does not handle new chain head, all hash
  2271  	// markers greater than or equal to new chain head should be deleted.
  2272  	number := commonBlock.NumberU64()
  2273  	if len(newChain) > 1 {
  2274  		number = newChain[1].NumberU64()
  2275  	}
  2276  	for i := number + 1; ; i++ {
  2277  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2278  		if hash == (common.Hash{}) {
  2279  			break
  2280  		}
  2281  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2282  	}
  2283  	if err := indexesBatch.Write(); err != nil {
  2284  		log.Crit("Failed to delete useless indexes", "err", err)
  2285  	}
  2286  
  2287  	// Send out events for logs from the old canon chain, and 'reborn'
  2288  	// logs from the new canon chain. The number of logs can be very
  2289  	// high, so the events are sent in batches of size around 512.
  2290  
  2291  	// Deleted logs + blocks:
  2292  	var deletedLogs []*types.Log
  2293  	for i := len(oldChain) - 1; i >= 0; i-- {
  2294  		// Also send event for blocks removed from the canon chain.
  2295  		bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2296  
  2297  		// Collect deleted logs for notification
  2298  		if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
  2299  			deletedLogs = append(deletedLogs, logs...)
  2300  		}
  2301  		if len(deletedLogs) > 512 {
  2302  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2303  			deletedLogs = nil
  2304  		}
  2305  	}
  2306  	if len(deletedLogs) > 0 {
  2307  		bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2308  	}
  2309  
  2310  	// New logs:
  2311  	var rebirthLogs []*types.Log
  2312  	for i := len(newChain) - 1; i >= 1; i-- {
  2313  		if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 {
  2314  			rebirthLogs = append(rebirthLogs, logs...)
  2315  		}
  2316  		if len(rebirthLogs) > 512 {
  2317  			bc.logsFeed.Send(rebirthLogs)
  2318  			rebirthLogs = nil
  2319  		}
  2320  	}
  2321  	if len(rebirthLogs) > 0 {
  2322  		bc.logsFeed.Send(rebirthLogs)
  2323  	}
  2324  	return nil
  2325  }
  2326  
  2327  // InsertBlockWithoutSetHead executes the block, runs the necessary verification
  2328  // upon it and then persist the block and the associate state into the database.
  2329  // The key difference between the InsertChain is it won't do the canonical chain
  2330  // updating. It relies on the additional SetCanonical call to finalize the entire
  2331  // procedure.
  2332  func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
  2333  	if !bc.chainmu.TryLock() {
  2334  		return errChainStopped
  2335  	}
  2336  	defer bc.chainmu.Unlock()
  2337  
  2338  	_, err := bc.insertChain(types.Blocks{block}, true, false)
  2339  	return err
  2340  }
  2341  
  2342  // SetCanonical rewinds the chain to set the new head block as the specified
  2343  // block. It's possible that the state of the new head is missing, and it will
  2344  // be recovered in this function as well.
  2345  func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
  2346  	if !bc.chainmu.TryLock() {
  2347  		return common.Hash{}, errChainStopped
  2348  	}
  2349  	defer bc.chainmu.Unlock()
  2350  
  2351  	// Re-execute the reorged chain in case the head state is missing.
  2352  	if !bc.HasState(head.Root()) {
  2353  		if latestValidHash, err := bc.recoverAncestors(head); err != nil {
  2354  			return latestValidHash, err
  2355  		}
  2356  		log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash())
  2357  	}
  2358  	// Run the reorg if necessary and set the given block as new head.
  2359  	start := time.Now()
  2360  	if head.ParentHash() != bc.CurrentBlock().Hash() {
  2361  		if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
  2362  			return common.Hash{}, err
  2363  		}
  2364  	}
  2365  	bc.writeHeadBlock(head)
  2366  
  2367  	// Emit events
  2368  	logs := bc.collectLogs(head, false)
  2369  	bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
  2370  	if len(logs) > 0 {
  2371  		bc.logsFeed.Send(logs)
  2372  	}
  2373  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
  2374  
  2375  	context := []interface{}{
  2376  		"number", head.Number(),
  2377  		"hash", head.Hash(),
  2378  		"root", head.Root(),
  2379  		"elapsed", time.Since(start),
  2380  	}
  2381  	if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
  2382  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2383  	}
  2384  	log.Info("Chain head was updated", context...)
  2385  	return head.Hash(), nil
  2386  }
  2387  
  2388  func (bc *BlockChain) updateFutureBlocks() {
  2389  	futureTimer := time.NewTicker(5 * time.Second)
  2390  	defer futureTimer.Stop()
  2391  	defer bc.wg.Done()
  2392  	for {
  2393  		select {
  2394  		case <-futureTimer.C:
  2395  			bc.procFutureBlocks()
  2396  		case <-bc.quit:
  2397  			return
  2398  		}
  2399  	}
  2400  }
  2401  
  2402  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2403  // that the block does not need to be processed but can be considered already fully 'done'.
  2404  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2405  	// We can only ever bypass processing if the only error returned by the validator
  2406  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2407  	// and state.
  2408  	if !errors.Is(err, ErrKnownBlock) {
  2409  		return false
  2410  	}
  2411  	// If we're not using snapshots, we can skip this, since we have both block
  2412  	// and (trie-) state
  2413  	if bc.snaps == nil {
  2414  		return true
  2415  	}
  2416  	var (
  2417  		header     = it.current() // header can't be nil
  2418  		parentRoot common.Hash
  2419  	)
  2420  	// If we also have the snapshot-state, we can skip the processing.
  2421  	if bc.snaps.Snapshot(header.Root) != nil {
  2422  		return true
  2423  	}
  2424  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2425  	// snapshot-state exists, we need to process this in order to not get a gap
  2426  	// in the snapshot layers.
  2427  	// Resolve parent block
  2428  	if parent := it.previous(); parent != nil {
  2429  		parentRoot = parent.Root
  2430  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2431  		parentRoot = parent.Root
  2432  	}
  2433  	if parentRoot == (common.Hash{}) {
  2434  		return false // Theoretically impossible case
  2435  	}
  2436  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2437  	if bc.snaps.Snapshot(parentRoot) == nil {
  2438  		return true
  2439  	}
  2440  	return false
  2441  }
  2442  
  2443  // indexBlocks reindexes or unindexes transactions depending on user configuration
  2444  func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) {
  2445  	defer func() { close(done) }()
  2446  
  2447  	// The tail flag is not existent, it means the node is just initialized
  2448  	// and all blocks(may from ancient store) are not indexed yet.
  2449  	if tail == nil {
  2450  		from := uint64(0)
  2451  		if bc.txLookupLimit != 0 && head >= bc.txLookupLimit {
  2452  			from = head - bc.txLookupLimit + 1
  2453  		}
  2454  		rawdb.IndexTransactions(bc.db, from, head+1, bc.quit)
  2455  		return
  2456  	}
  2457  	// The tail flag is existent, but the whole chain is required to be indexed.
  2458  	if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2459  		if *tail > 0 {
  2460  			// It can happen when chain is rewound to a historical point which
  2461  			// is even lower than the indexes tail, recap the indexing target
  2462  			// to new head to avoid reading non-existent block bodies.
  2463  			end := *tail
  2464  			if end > head+1 {
  2465  				end = head + 1
  2466  			}
  2467  			rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
  2468  		}
  2469  		return
  2470  	}
  2471  	// Update the transaction index to the new chain state
  2472  	if head-bc.txLookupLimit+1 < *tail {
  2473  		// Reindex a part of missing indices and rewind index tail to HEAD-limit
  2474  		rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
  2475  	} else {
  2476  		// Unindex a part of stale indices and forward index tail to HEAD-limit
  2477  		rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
  2478  	}
  2479  }
  2480  
  2481  // maintainTxIndex is responsible for the construction and deletion of the
  2482  // transaction index.
  2483  //
  2484  // User can use flag `txlookuplimit` to specify a "recentness" block, below
  2485  // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
  2486  // all tx indices will be reserved.
  2487  //
  2488  // The user can adjust the txlookuplimit value for each launch after sync,
  2489  // Geth will automatically construct the missing indices or delete the extra
  2490  // indices.
  2491  func (bc *BlockChain) maintainTxIndex() {
  2492  	defer bc.wg.Done()
  2493  
  2494  	// Listening to chain events and manipulate the transaction indexes.
  2495  	var (
  2496  		done   chan struct{}                  // Non-nil if background unindexing or reindexing routine is active.
  2497  		headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
  2498  	)
  2499  	sub := bc.SubscribeChainHeadEvent(headCh)
  2500  	if sub == nil {
  2501  		return
  2502  	}
  2503  	defer sub.Unsubscribe()
  2504  
  2505  	for {
  2506  		select {
  2507  		case head := <-headCh:
  2508  			if done == nil {
  2509  				done = make(chan struct{})
  2510  				go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
  2511  			}
  2512  		case <-done:
  2513  			done = nil
  2514  		case <-bc.quit:
  2515  			if done != nil {
  2516  				log.Info("Waiting background transaction indexer to exit")
  2517  				<-done
  2518  			}
  2519  			return
  2520  		}
  2521  	}
  2522  }
  2523  
  2524  // reportBlock logs a bad block error.
  2525  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2526  	rawdb.WriteBadBlock(bc.db, block)
  2527  	log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
  2528  }
  2529  
  2530  // summarizeBadBlock returns a string summarizing the bad block and other
  2531  // relevant information.
  2532  func summarizeBadBlock(block *types.Block, receipts []*types.Receipt, config *params.ChainConfig, err error) string {
  2533  	var receiptString string
  2534  	for i, receipt := range receipts {
  2535  		receiptString += fmt.Sprintf("\n  %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x",
  2536  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2537  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2538  	}
  2539  	version, vcs := version.Info()
  2540  	platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS)
  2541  	if vcs != "" {
  2542  		vcs = fmt.Sprintf("\nVCS: %s", vcs)
  2543  	}
  2544  	return fmt.Sprintf(`
  2545  ########## BAD BLOCK #########
  2546  Block: %v (%#x)
  2547  Error: %v
  2548  Platform: %v%v
  2549  Chain config: %#v
  2550  Receipts: %v
  2551  ##############################
  2552  `, block.Number(), block.Hash(), err, platform, vcs, config, receiptString)
  2553  }
  2554  
  2555  // InsertHeaderChain attempts to insert the given header chain in to the local
  2556  // chain, possibly creating a reorg. If an error is returned, it will return the
  2557  // index number of the failing header as well an error describing what went wrong.
  2558  //
  2559  // The verify parameter can be used to fine tune whether nonce verification
  2560  // should be done or not. The reason behind the optional check is because some
  2561  // of the header retrieval mechanisms already need to verify nonces, as well as
  2562  // because nonces can be verified sparsely, not needing to check each.
  2563  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2564  	if len(chain) == 0 {
  2565  		return 0, nil
  2566  	}
  2567  	start := time.Now()
  2568  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2569  		return i, err
  2570  	}
  2571  
  2572  	if !bc.chainmu.TryLock() {
  2573  		return 0, errChainStopped
  2574  	}
  2575  	defer bc.chainmu.Unlock()
  2576  	_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
  2577  	return 0, err
  2578  }
  2579  
  2580  // SetBlockValidatorAndProcessorForTesting sets the current validator and processor.
  2581  // This method can be used to force an invalid blockchain to be verified for tests.
  2582  // This method is unsafe and should only be used before block import starts.
  2583  func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Processor) {
  2584  	bc.validator = v
  2585  	bc.processor = p
  2586  }
  2587  
  2588  // SetTrieFlushInterval configures how often in-memory tries are persisted to disk.
  2589  // The interval is in terms of block processing time, not wall clock.
  2590  // It is thread-safe and can be called repeatedly without side effects.
  2591  func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
  2592  	atomic.StoreInt64(&bc.flushInterval, int64(interval))
  2593  }