github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	"runtime"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/common/lru"
    33  	"github.com/ethereum/go-ethereum/common/mclock"
    34  	"github.com/ethereum/go-ethereum/common/prque"
    35  	"github.com/ethereum/go-ethereum/consensus"
    36  	"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
    37  	"github.com/ethereum/go-ethereum/core/rawdb"
    38  	"github.com/ethereum/go-ethereum/core/state"
    39  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    40  	"github.com/ethereum/go-ethereum/core/tracing"
    41  	"github.com/ethereum/go-ethereum/core/types"
    42  	"github.com/ethereum/go-ethereum/core/vm"
    43  	"github.com/ethereum/go-ethereum/ethdb"
    44  	"github.com/ethereum/go-ethereum/event"
    45  	"github.com/ethereum/go-ethereum/internal/syncx"
    46  	"github.com/ethereum/go-ethereum/internal/version"
    47  	"github.com/ethereum/go-ethereum/log"
    48  	"github.com/ethereum/go-ethereum/metrics"
    49  	"github.com/ethereum/go-ethereum/params"
    50  	"github.com/ethereum/go-ethereum/rlp"
    51  	"github.com/ethereum/go-ethereum/triedb"
    52  	"github.com/ethereum/go-ethereum/triedb/hashdb"
    53  	"github.com/ethereum/go-ethereum/triedb/pathdb"
    54  )
    55  
    56  var (
    57  	headBlockGauge          = metrics.NewRegisteredGauge("chain/head/block", nil)
    58  	headHeaderGauge         = metrics.NewRegisteredGauge("chain/head/header", nil)
    59  	headFastBlockGauge      = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    60  	headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil)
    61  	headSafeBlockGauge      = metrics.NewRegisteredGauge("chain/head/safe", nil)
    62  
    63  	chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
    64  
    65  	accountReadTimer   = metrics.NewRegisteredResettingTimer("chain/account/reads", nil)
    66  	accountHashTimer   = metrics.NewRegisteredResettingTimer("chain/account/hashes", nil)
    67  	accountUpdateTimer = metrics.NewRegisteredResettingTimer("chain/account/updates", nil)
    68  	accountCommitTimer = metrics.NewRegisteredResettingTimer("chain/account/commits", nil)
    69  
    70  	storageReadTimer   = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil)
    71  	storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil)
    72  	storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil)
    73  
    74  	snapshotAccountReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/account/reads", nil)
    75  	snapshotStorageReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/storage/reads", nil)
    76  	snapshotCommitTimer      = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
    77  
    78  	triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
    79  
    80  	blockInsertTimer     = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
    81  	blockValidationTimer = metrics.NewRegisteredResettingTimer("chain/validation", nil)
    82  	blockExecutionTimer  = metrics.NewRegisteredResettingTimer("chain/execution", nil)
    83  	blockWriteTimer      = metrics.NewRegisteredResettingTimer("chain/write", nil)
    84  
    85  	blockReorgMeter     = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
    86  	blockReorgAddMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    87  	blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    88  
    89  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    90  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    91  
    92  	errInsertionInterrupted = errors.New("insertion is interrupted")
    93  	errChainStopped         = errors.New("blockchain is stopped")
    94  	errInvalidOldChain      = errors.New("invalid old chain")
    95  	errInvalidNewChain      = errors.New("invalid new chain")
    96  )
    97  
    98  const (
    99  	bodyCacheLimit     = 256
   100  	blockCacheLimit    = 256
   101  	receiptsCacheLimit = 32
   102  	txLookupCacheLimit = 1024
   103  
   104  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   105  	//
   106  	// Changelog:
   107  	//
   108  	// - Version 4
   109  	//   The following incompatible database changes were added:
   110  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   111  	//   * the `Bloom` field of receipt is deleted
   112  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   113  	// - Version 5
   114  	//  The following incompatible database changes were added:
   115  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   116  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   117  	//      receipts' corresponding block
   118  	// - Version 6
   119  	//  The following incompatible database changes were added:
   120  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   121  	// - Version 7
   122  	//  The following incompatible database changes were added:
   123  	//    * Use freezer as the ancient database to maintain all ancient data
   124  	// - Version 8
   125  	//  The following incompatible database changes were added:
   126  	//    * New scheme for contract code in order to separate the codes and trie nodes
   127  	BlockChainVersion uint64 = 8
   128  )
   129  
   130  // CacheConfig contains the configuration values for the trie database
   131  // and state snapshot these are resident in a blockchain.
   132  type CacheConfig struct {
   133  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   134  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   135  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   136  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   137  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   138  	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
   139  	Preimages           bool          // Whether to store preimage of trie key to the disk
   140  	StateHistory        uint64        // Number of blocks from head whose state histories are reserved.
   141  	StateScheme         string        // Scheme used to store ethereum states and merkle tree nodes on top
   142  
   143  	SnapshotNoBuild bool // Whether the background generation is allowed
   144  	SnapshotWait    bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   145  }
   146  
   147  // triedbConfig derives the configures for trie database.
   148  func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config {
   149  	config := &triedb.Config{
   150  		Preimages: c.Preimages,
   151  		IsVerkle:  isVerkle,
   152  	}
   153  	if c.StateScheme == rawdb.HashScheme {
   154  		config.HashDB = &hashdb.Config{
   155  			CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
   156  		}
   157  	}
   158  	if c.StateScheme == rawdb.PathScheme {
   159  		config.PathDB = &pathdb.Config{
   160  			StateHistory:   c.StateHistory,
   161  			CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
   162  			DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
   163  		}
   164  	}
   165  	return config
   166  }
   167  
   168  // defaultCacheConfig are the default caching values if none are specified by the
   169  // user (also used during testing).
   170  var defaultCacheConfig = &CacheConfig{
   171  	TrieCleanLimit: 256,
   172  	TrieDirtyLimit: 256,
   173  	TrieTimeLimit:  5 * time.Minute,
   174  	SnapshotLimit:  256,
   175  	SnapshotWait:   true,
   176  	StateScheme:    rawdb.HashScheme,
   177  }
   178  
   179  // DefaultCacheConfigWithScheme returns a deep copied default cache config with
   180  // a provided trie node scheme.
   181  func DefaultCacheConfigWithScheme(scheme string) *CacheConfig {
   182  	config := *defaultCacheConfig
   183  	config.StateScheme = scheme
   184  	return &config
   185  }
   186  
   187  // txLookup is wrapper over transaction lookup along with the corresponding
   188  // transaction object.
   189  type txLookup struct {
   190  	lookup      *rawdb.LegacyTxLookupEntry
   191  	transaction *types.Transaction
   192  }
   193  
   194  // BlockChain represents the canonical chain given a database with a genesis
   195  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   196  //
   197  // Importing blocks in to the block chain happens according to the set of rules
   198  // defined by the two stage Validator. Processing of blocks is done using the
   199  // Processor which processes the included transaction. The validation of the state
   200  // is done in the second part of the Validator. Failing results in aborting of
   201  // the import.
   202  //
   203  // The BlockChain also helps in returning blocks from **any** chain included
   204  // in the database as well as blocks that represents the canonical chain. It's
   205  // important to note that GetBlock can return any block and does not need to be
   206  // included in the canonical one where as GetBlockByNumber always represents the
   207  // canonical chain.
   208  type BlockChain struct {
   209  	chainConfig *params.ChainConfig // Chain & network configuration
   210  	cacheConfig *CacheConfig        // Cache configuration for pruning
   211  
   212  	db            ethdb.Database                   // Low level persistent database to store final content in
   213  	snaps         *snapshot.Tree                   // Snapshot tree for fast trie leaf access
   214  	triegc        *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
   215  	gcproc        time.Duration                    // Accumulates canonical block processing for trie dumping
   216  	lastWrite     uint64                           // Last block when the state was flushed
   217  	flushInterval atomic.Int64                     // Time interval (processing time) after which to flush a state
   218  	triedb        *triedb.Database                 // The database handler for maintaining trie nodes.
   219  	stateCache    state.Database                   // State database to reuse between imports (contains state cache)
   220  	txIndexer     *txIndexer                       // Transaction indexer, might be nil if not enabled
   221  
   222  	hc            *HeaderChain
   223  	rmLogsFeed    event.Feed
   224  	chainFeed     event.Feed
   225  	chainSideFeed event.Feed
   226  	chainHeadFeed event.Feed
   227  	logsFeed      event.Feed
   228  	blockProcFeed event.Feed
   229  	scope         event.SubscriptionScope
   230  	genesisBlock  *types.Block
   231  
   232  	// This mutex synchronizes chain write operations.
   233  	// Readers don't need to take it, they can just read the database.
   234  	chainmu *syncx.ClosableMutex
   235  
   236  	currentBlock      atomic.Pointer[types.Header] // Current head of the chain
   237  	currentSnapBlock  atomic.Pointer[types.Header] // Current head of snap-sync
   238  	currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
   239  	currentSafeBlock  atomic.Pointer[types.Header] // Latest (consensus) safe block
   240  
   241  	bodyCache     *lru.Cache[common.Hash, *types.Body]
   242  	bodyRLPCache  *lru.Cache[common.Hash, rlp.RawValue]
   243  	receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
   244  	blockCache    *lru.Cache[common.Hash, *types.Block]
   245  
   246  	txLookupLock  sync.RWMutex
   247  	txLookupCache *lru.Cache[common.Hash, txLookup]
   248  
   249  	wg            sync.WaitGroup
   250  	quit          chan struct{} // shutdown signal, closed in Stop.
   251  	stopping      atomic.Bool   // false if chain is running, true when stopped
   252  	procInterrupt atomic.Bool   // interrupt signaler for block processing
   253  
   254  	engine     consensus.Engine
   255  	validator  Validator // Block and state validator interface
   256  	prefetcher Prefetcher
   257  	processor  Processor // Block transaction processor interface
   258  	forker     *ForkChoice
   259  	vmConfig   vm.Config
   260  	logger     *tracing.Hooks
   261  }
   262  
   263  // NewBlockChain returns a fully initialised block chain using information
   264  // available in the database. It initialises the default Ethereum Validator
   265  // and Processor.
   266  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
   267  	if cacheConfig == nil {
   268  		cacheConfig = defaultCacheConfig
   269  	}
   270  	// Open trie database with provided config
   271  	triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(genesis != nil && genesis.IsVerkle()))
   272  
   273  	// Setup the genesis block, commit the provided genesis specification
   274  	// to database if the genesis block is not present yet, or load the
   275  	// stored one from database.
   276  	chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
   277  	if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
   278  		return nil, genesisErr
   279  	}
   280  	log.Info("")
   281  	log.Info(strings.Repeat("-", 153))
   282  	for _, line := range strings.Split(chainConfig.Description(), "\n") {
   283  		log.Info(line)
   284  	}
   285  	log.Info(strings.Repeat("-", 153))
   286  	log.Info("")
   287  
   288  	bc := &BlockChain{
   289  		chainConfig:   chainConfig,
   290  		cacheConfig:   cacheConfig,
   291  		db:            db,
   292  		triedb:        triedb,
   293  		triegc:        prque.New[int64, common.Hash](nil),
   294  		quit:          make(chan struct{}),
   295  		chainmu:       syncx.NewClosableMutex(),
   296  		bodyCache:     lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
   297  		bodyRLPCache:  lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
   298  		receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
   299  		blockCache:    lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
   300  		txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
   301  		engine:        engine,
   302  		vmConfig:      vmConfig,
   303  		logger:        vmConfig.Tracer,
   304  	}
   305  	bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
   306  	bc.forker = NewForkChoice(bc, shouldPreserve)
   307  	bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
   308  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   309  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   310  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   311  
   312  	var err error
   313  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   314  	if err != nil {
   315  		return nil, err
   316  	}
   317  	bc.genesisBlock = bc.GetBlockByNumber(0)
   318  	if bc.genesisBlock == nil {
   319  		return nil, ErrNoGenesis
   320  	}
   321  
   322  	bc.currentBlock.Store(nil)
   323  	bc.currentSnapBlock.Store(nil)
   324  	bc.currentFinalBlock.Store(nil)
   325  	bc.currentSafeBlock.Store(nil)
   326  
   327  	// Update chain info data metrics
   328  	chainInfoGauge.Update(metrics.GaugeInfoValue{"chain_id": bc.chainConfig.ChainID.String()})
   329  
   330  	// If Geth is initialized with an external ancient store, re-initialize the
   331  	// missing chain indexes and chain flags. This procedure can survive crash
   332  	// and can be resumed in next restart since chain flags are updated in last step.
   333  	if bc.empty() {
   334  		rawdb.InitDatabaseFromFreezer(bc.db)
   335  	}
   336  	// Load blockchain states from disk
   337  	if err := bc.loadLastState(); err != nil {
   338  		return nil, err
   339  	}
   340  	// Make sure the state associated with the block is available, or log out
   341  	// if there is no available state, waiting for state sync.
   342  	head := bc.CurrentBlock()
   343  	if !bc.HasState(head.Root) {
   344  		if head.Number.Uint64() == 0 {
   345  			// The genesis state is missing, which is only possible in the path-based
   346  			// scheme. This situation occurs when the initial state sync is not finished
   347  			// yet, or the chain head is rewound below the pivot point. In both scenarios,
   348  			// there is no possible recovery approach except for rerunning a snap sync.
   349  			// Do nothing here until the state syncer picks it up.
   350  			log.Info("Genesis state is missing, wait state sync")
   351  		} else {
   352  			// Head state is missing, before the state recovery, find out the
   353  			// disk layer point of snapshot(if it's enabled). Make sure the
   354  			// rewound point is lower than disk layer.
   355  			var diskRoot common.Hash
   356  			if bc.cacheConfig.SnapshotLimit > 0 {
   357  				diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   358  			}
   359  			if diskRoot != (common.Hash{}) {
   360  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
   361  
   362  				snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
   363  				if err != nil {
   364  					return nil, err
   365  				}
   366  				// Chain rewound, persist old snapshot number to indicate recovery procedure
   367  				if snapDisk != 0 {
   368  					rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   369  				}
   370  			} else {
   371  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
   372  				if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
   373  					return nil, err
   374  				}
   375  			}
   376  		}
   377  	}
   378  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   379  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   380  		var (
   381  			needRewind bool
   382  			low        uint64
   383  		)
   384  		// The head full block may be rolled back to a very low height due to
   385  		// blockchain repair. If the head full block is even lower than the ancient
   386  		// chain, truncate the ancient store.
   387  		fullBlock := bc.CurrentBlock()
   388  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
   389  			needRewind = true
   390  			low = fullBlock.Number.Uint64()
   391  		}
   392  		// In snap sync, it may happen that ancient data has been written to the
   393  		// ancient store, but the LastFastBlock has not been updated, truncate the
   394  		// extra data here.
   395  		snapBlock := bc.CurrentSnapBlock()
   396  		if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
   397  			needRewind = true
   398  			if snapBlock.Number.Uint64() < low || low == 0 {
   399  				low = snapBlock.Number.Uint64()
   400  			}
   401  		}
   402  		if needRewind {
   403  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   404  			if err := bc.SetHead(low); err != nil {
   405  				return nil, err
   406  			}
   407  		}
   408  	}
   409  	// The first thing the node will do is reconstruct the verification data for
   410  	// the head block (ethash cache or clique voting snapshot). Might as well do
   411  	// it in advance.
   412  	bc.engine.VerifyHeader(bc, bc.CurrentHeader())
   413  
   414  	if bc.logger != nil && bc.logger.OnBlockchainInit != nil {
   415  		bc.logger.OnBlockchainInit(chainConfig)
   416  	}
   417  	if bc.logger != nil && bc.logger.OnGenesisBlock != nil {
   418  		if block := bc.CurrentBlock(); block.Number.Uint64() == 0 {
   419  			alloc, err := getGenesisState(bc.db, block.Hash())
   420  			if err != nil {
   421  				return nil, fmt.Errorf("failed to get genesis state: %w", err)
   422  			}
   423  			if alloc == nil {
   424  				return nil, errors.New("live blockchain tracer requires genesis alloc to be set")
   425  			}
   426  			bc.logger.OnGenesisBlock(bc.genesisBlock, alloc)
   427  		}
   428  	}
   429  
   430  	// Load any existing snapshot, regenerating it if loading failed
   431  	if bc.cacheConfig.SnapshotLimit > 0 {
   432  		// If the chain was rewound past the snapshot persistent layer (causing
   433  		// a recovery block number to be persisted to disk), check if we're still
   434  		// in recovery mode and in that case, don't invalidate the snapshot on a
   435  		// head mismatch.
   436  		var recover bool
   437  
   438  		head := bc.CurrentBlock()
   439  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
   440  			log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
   441  			recover = true
   442  		}
   443  		snapconfig := snapshot.Config{
   444  			CacheSize:  bc.cacheConfig.SnapshotLimit,
   445  			Recovery:   recover,
   446  			NoBuild:    bc.cacheConfig.SnapshotNoBuild,
   447  			AsyncBuild: !bc.cacheConfig.SnapshotWait,
   448  		}
   449  		bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
   450  	}
   451  	// Rewind the chain in case of an incompatible config upgrade.
   452  	if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
   453  		log.Warn("Rewinding chain to upgrade configuration", "err", compat)
   454  		if compat.RewindToTime > 0 {
   455  			bc.SetHeadWithTimestamp(compat.RewindToTime)
   456  		} else {
   457  			bc.SetHead(compat.RewindToBlock)
   458  		}
   459  		rawdb.WriteChainConfig(db, genesisHash, chainConfig)
   460  	}
   461  
   462  	// Start tx indexer if it's enabled.
   463  	if txLookupLimit != nil {
   464  		bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
   465  	}
   466  	return bc, nil
   467  }
   468  
   469  // empty returns an indicator whether the blockchain is empty.
   470  // Note, it's a special case that we connect a non-empty ancient
   471  // database with an empty node, so that we can plugin the ancient
   472  // into node seamlessly.
   473  func (bc *BlockChain) empty() bool {
   474  	genesis := bc.genesisBlock.Hash()
   475  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   476  		if hash != genesis {
   477  			return false
   478  		}
   479  	}
   480  	return true
   481  }
   482  
   483  // loadLastState loads the last known chain state from the database. This method
   484  // assumes that the chain manager mutex is held.
   485  func (bc *BlockChain) loadLastState() error {
   486  	// Restore the last known head block
   487  	head := rawdb.ReadHeadBlockHash(bc.db)
   488  	if head == (common.Hash{}) {
   489  		// Corrupt or empty database, init from scratch
   490  		log.Warn("Empty database, resetting chain")
   491  		return bc.Reset()
   492  	}
   493  	// Make sure the entire head block is available
   494  	headBlock := bc.GetBlockByHash(head)
   495  	if headBlock == nil {
   496  		// Corrupt or empty database, init from scratch
   497  		log.Warn("Head block missing, resetting chain", "hash", head)
   498  		return bc.Reset()
   499  	}
   500  	// Everything seems to be fine, set as the head block
   501  	bc.currentBlock.Store(headBlock.Header())
   502  	headBlockGauge.Update(int64(headBlock.NumberU64()))
   503  
   504  	// Restore the last known head header
   505  	headHeader := headBlock.Header()
   506  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   507  		if header := bc.GetHeaderByHash(head); header != nil {
   508  			headHeader = header
   509  		}
   510  	}
   511  	bc.hc.SetCurrentHeader(headHeader)
   512  
   513  	// Restore the last known head snap block
   514  	bc.currentSnapBlock.Store(headBlock.Header())
   515  	headFastBlockGauge.Update(int64(headBlock.NumberU64()))
   516  
   517  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   518  		if block := bc.GetBlockByHash(head); block != nil {
   519  			bc.currentSnapBlock.Store(block.Header())
   520  			headFastBlockGauge.Update(int64(block.NumberU64()))
   521  		}
   522  	}
   523  
   524  	// Restore the last known finalized block and safe block
   525  	// Note: the safe block is not stored on disk and it is set to the last
   526  	// known finalized block on startup
   527  	if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) {
   528  		if block := bc.GetBlockByHash(head); block != nil {
   529  			bc.currentFinalBlock.Store(block.Header())
   530  			headFinalizedBlockGauge.Update(int64(block.NumberU64()))
   531  			bc.currentSafeBlock.Store(block.Header())
   532  			headSafeBlockGauge.Update(int64(block.NumberU64()))
   533  		}
   534  	}
   535  	// Issue a status log for the user
   536  	var (
   537  		currentSnapBlock  = bc.CurrentSnapBlock()
   538  		currentFinalBlock = bc.CurrentFinalBlock()
   539  
   540  		headerTd = bc.GetTd(headHeader.Hash(), headHeader.Number.Uint64())
   541  		blockTd  = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
   542  	)
   543  	if headHeader.Hash() != headBlock.Hash() {
   544  		log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
   545  	}
   546  	log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
   547  	if headBlock.Hash() != currentSnapBlock.Hash() {
   548  		snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64())
   549  		log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
   550  	}
   551  	if currentFinalBlock != nil {
   552  		finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64())
   553  		log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
   554  	}
   555  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   556  		log.Info("Loaded last snap-sync pivot marker", "number", *pivot)
   557  	}
   558  	return nil
   559  }
   560  
   561  // SetHead rewinds the local chain to a new head. Depending on whether the node
   562  // was snap synced or full synced and in which state, the method will try to
   563  // delete minimal data from disk whilst retaining chain consistency.
   564  func (bc *BlockChain) SetHead(head uint64) error {
   565  	if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil {
   566  		return err
   567  	}
   568  	// Send chain head event to update the transaction pool
   569  	header := bc.CurrentBlock()
   570  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   571  	if block == nil {
   572  		// This should never happen. In practice, previously currentBlock
   573  		// contained the entire block whereas now only a "marker", so there
   574  		// is an ever so slight chance for a race we should handle.
   575  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   576  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   577  	}
   578  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   579  	return nil
   580  }
   581  
   582  // SetHeadWithTimestamp rewinds the local chain to a new head that has at max
   583  // the given timestamp. Depending on whether the node was snap synced or full
   584  // synced and in which state, the method will try to delete minimal data from
   585  // disk whilst retaining chain consistency.
   586  func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
   587  	if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil {
   588  		return err
   589  	}
   590  	// Send chain head event to update the transaction pool
   591  	header := bc.CurrentBlock()
   592  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   593  	if block == nil {
   594  		// This should never happen. In practice, previously currentBlock
   595  		// contained the entire block whereas now only a "marker", so there
   596  		// is an ever so slight chance for a race we should handle.
   597  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   598  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   599  	}
   600  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   601  	return nil
   602  }
   603  
   604  // SetFinalized sets the finalized block.
   605  func (bc *BlockChain) SetFinalized(header *types.Header) {
   606  	bc.currentFinalBlock.Store(header)
   607  	if header != nil {
   608  		rawdb.WriteFinalizedBlockHash(bc.db, header.Hash())
   609  		headFinalizedBlockGauge.Update(int64(header.Number.Uint64()))
   610  	} else {
   611  		rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{})
   612  		headFinalizedBlockGauge.Update(0)
   613  	}
   614  }
   615  
   616  // SetSafe sets the safe block.
   617  func (bc *BlockChain) SetSafe(header *types.Header) {
   618  	bc.currentSafeBlock.Store(header)
   619  	if header != nil {
   620  		headSafeBlockGauge.Update(int64(header.Number.Uint64()))
   621  	} else {
   622  		headSafeBlockGauge.Update(0)
   623  	}
   624  }
   625  
   626  // rewindHashHead implements the logic of rewindHead in the context of hash scheme.
   627  func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   628  	var (
   629  		limit      uint64                             // The oldest block that will be searched for this rewinding
   630  		beyondRoot = root == common.Hash{}            // Flag whether we're beyond the requested root (no root, always true)
   631  		pivot      = rawdb.ReadLastPivotNumber(bc.db) // Associated block number of pivot point state
   632  		rootNumber uint64                             // Associated block number of requested root
   633  
   634  		start  = time.Now() // Timestamp the rewinding is restarted
   635  		logged = time.Now() // Timestamp last progress log was printed
   636  	)
   637  	// The oldest block to be searched is determined by the pivot block or a constant
   638  	// searching threshold. The rationale behind this is as follows:
   639  	//
   640  	// - Snap sync is selected if the pivot block is available. The earliest available
   641  	//   state is the pivot block itself, so there is no sense in going further back.
   642  	//
   643  	// - Full sync is selected if the pivot block does not exist. The hash database
   644  	//   periodically flushes the state to disk, and the used searching threshold is
   645  	//   considered sufficient to find a persistent state, even for the testnet. It
   646  	//   might be not enough for a chain that is nearly empty. In the worst case,
   647  	//   the entire chain is reset to genesis, and snap sync is re-enabled on top,
   648  	//   which is still acceptable.
   649  	if pivot != nil {
   650  		limit = *pivot
   651  	} else if head.Number.Uint64() > params.FullImmutabilityThreshold {
   652  		limit = head.Number.Uint64() - params.FullImmutabilityThreshold
   653  	}
   654  	for {
   655  		logger := log.Trace
   656  		if time.Since(logged) > time.Second*8 {
   657  			logged = time.Now()
   658  			logger = log.Info
   659  		}
   660  		logger("Block state missing, rewinding further", "number", head.Number, "hash", head.Hash(), "elapsed", common.PrettyDuration(time.Since(start)))
   661  
   662  		// If a root threshold was requested but not yet crossed, check
   663  		if !beyondRoot && head.Root == root {
   664  			beyondRoot, rootNumber = true, head.Number.Uint64()
   665  		}
   666  		// If search limit is reached, return the genesis block as the
   667  		// new chain head.
   668  		if head.Number.Uint64() < limit {
   669  			log.Info("Rewinding limit reached, resetting to genesis", "number", head.Number, "hash", head.Hash(), "limit", limit)
   670  			return bc.genesisBlock.Header(), rootNumber
   671  		}
   672  		// If the associated state is not reachable, continue searching
   673  		// backwards until an available state is found.
   674  		if !bc.HasState(head.Root) {
   675  			// If the chain is gapped in the middle, return the genesis
   676  			// block as the new chain head.
   677  			parent := bc.GetHeader(head.ParentHash, head.Number.Uint64()-1)
   678  			if parent == nil {
   679  				log.Error("Missing block in the middle, resetting to genesis", "number", head.Number.Uint64()-1, "hash", head.ParentHash)
   680  				return bc.genesisBlock.Header(), rootNumber
   681  			}
   682  			head = parent
   683  
   684  			// If the genesis block is reached, stop searching.
   685  			if head.Number.Uint64() == 0 {
   686  				log.Info("Genesis block reached", "number", head.Number, "hash", head.Hash())
   687  				return head, rootNumber
   688  			}
   689  			continue // keep rewinding
   690  		}
   691  		// Once the available state is found, ensure that the requested root
   692  		// has already been crossed. If not, continue rewinding.
   693  		if beyondRoot || head.Number.Uint64() == 0 {
   694  			log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash())
   695  			return head, rootNumber
   696  		}
   697  		log.Debug("Skipping block with threshold state", "number", head.Number, "hash", head.Hash(), "root", head.Root)
   698  		head = bc.GetHeader(head.ParentHash, head.Number.Uint64()-1) // Keep rewinding
   699  	}
   700  }
   701  
   702  // rewindPathHead implements the logic of rewindHead in the context of path scheme.
   703  func (bc *BlockChain) rewindPathHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   704  	var (
   705  		pivot      = rawdb.ReadLastPivotNumber(bc.db) // Associated block number of pivot block
   706  		rootNumber uint64                             // Associated block number of requested root
   707  
   708  		// BeyondRoot represents whether the requested root is already
   709  		// crossed. The flag value is set to true if the root is empty.
   710  		beyondRoot = root == common.Hash{}
   711  
   712  		// noState represents if the target state requested for search
   713  		// is unavailable and impossible to be recovered.
   714  		noState = !bc.HasState(root) && !bc.stateRecoverable(root)
   715  
   716  		start  = time.Now() // Timestamp the rewinding is restarted
   717  		logged = time.Now() // Timestamp last progress log was printed
   718  	)
   719  	// Rewind the head block tag until an available state is found.
   720  	for {
   721  		logger := log.Trace
   722  		if time.Since(logged) > time.Second*8 {
   723  			logged = time.Now()
   724  			logger = log.Info
   725  		}
   726  		logger("Block state missing, rewinding further", "number", head.Number, "hash", head.Hash(), "elapsed", common.PrettyDuration(time.Since(start)))
   727  
   728  		// If a root threshold was requested but not yet crossed, check
   729  		if !beyondRoot && head.Root == root {
   730  			beyondRoot, rootNumber = true, head.Number.Uint64()
   731  		}
   732  		// If the root threshold hasn't been crossed but the available
   733  		// state is reached, quickly determine if the target state is
   734  		// possible to be reached or not.
   735  		if !beyondRoot && noState && bc.HasState(head.Root) {
   736  			beyondRoot = true
   737  			log.Info("Disable the search for unattainable state", "root", root)
   738  		}
   739  		// Check if the associated state is available or recoverable if
   740  		// the requested root has already been crossed.
   741  		if beyondRoot && (bc.HasState(head.Root) || bc.stateRecoverable(head.Root)) {
   742  			break
   743  		}
   744  		// If pivot block is reached, return the genesis block as the
   745  		// new chain head. Theoretically there must be a persistent
   746  		// state before or at the pivot block, prevent endless rewinding
   747  		// towards the genesis just in case.
   748  		if pivot != nil && *pivot >= head.Number.Uint64() {
   749  			log.Info("Pivot block reached, resetting to genesis", "number", head.Number, "hash", head.Hash())
   750  			return bc.genesisBlock.Header(), rootNumber
   751  		}
   752  		// If the chain is gapped in the middle, return the genesis
   753  		// block as the new chain head
   754  		parent := bc.GetHeader(head.ParentHash, head.Number.Uint64()-1) // Keep rewinding
   755  		if parent == nil {
   756  			log.Error("Missing block in the middle, resetting to genesis", "number", head.Number.Uint64()-1, "hash", head.ParentHash)
   757  			return bc.genesisBlock.Header(), rootNumber
   758  		}
   759  		head = parent
   760  
   761  		// If the genesis block is reached, stop searching.
   762  		if head.Number.Uint64() == 0 {
   763  			log.Info("Genesis block reached", "number", head.Number, "hash", head.Hash())
   764  			return head, rootNumber
   765  		}
   766  	}
   767  	// Recover if the target state if it's not available yet.
   768  	if !bc.HasState(head.Root) {
   769  		if err := bc.triedb.Recover(head.Root); err != nil {
   770  			log.Crit("Failed to rollback state", "err", err)
   771  		}
   772  	}
   773  	log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash())
   774  	return head, rootNumber
   775  }
   776  
   777  // rewindHead searches the available states in the database and returns the associated
   778  // block as the new head block.
   779  //
   780  // If the given root is not empty, then the rewind should attempt to pass the specified
   781  // state root and return the associated block number as well. If the root, typically
   782  // representing the state corresponding to snapshot disk layer, is deemed impassable,
   783  // then block number zero is returned, indicating that snapshot recovery is disabled
   784  // and the whole snapshot should be auto-generated in case of head mismatch.
   785  func (bc *BlockChain) rewindHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   786  	if bc.triedb.Scheme() == rawdb.PathScheme {
   787  		return bc.rewindPathHead(head, root)
   788  	}
   789  	return bc.rewindHashHead(head, root)
   790  }
   791  
   792  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   793  // that the rewind must pass the specified state root. This method is meant to be
   794  // used when rewinding with snapshots enabled to ensure that we go back further than
   795  // persistent disk layer. Depending on whether the node was snap synced or full, and
   796  // in which state, the method will try to delete minimal data from disk whilst
   797  // retaining chain consistency.
   798  //
   799  // The method also works in timestamp mode if `head == 0` but `time != 0`. In that
   800  // case blocks are rolled back until the new head becomes older or equal to the
   801  // requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
   802  //
   803  // The method returns the block number where the requested root cap was found.
   804  func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
   805  	if !bc.chainmu.TryLock() {
   806  		return 0, errChainStopped
   807  	}
   808  	defer bc.chainmu.Unlock()
   809  
   810  	var (
   811  		// Track the block number of the requested root hash
   812  		rootNumber uint64 // (no root == always 0)
   813  
   814  		// Retrieve the last pivot block to short circuit rollbacks beyond it
   815  		// and the current freezer limit to start nuking it's underflown.
   816  		pivot = rawdb.ReadLastPivotNumber(bc.db)
   817  	)
   818  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
   819  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   820  		// block. Note, depth equality is permitted to allow using SetHead as a
   821  		// chain reparation mechanism without deleting any data!
   822  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
   823  			var newHeadBlock *types.Header
   824  			newHeadBlock, rootNumber = bc.rewindHead(header, root)
   825  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   826  
   827  			// Degrade the chain markers if they are explicitly reverted.
   828  			// In theory we should update all in-memory markers in the
   829  			// last step, however the direction of SetHead is from high
   830  			// to low, so it's safe to update in-memory markers directly.
   831  			bc.currentBlock.Store(newHeadBlock)
   832  			headBlockGauge.Update(int64(newHeadBlock.Number.Uint64()))
   833  
   834  			// The head state is missing, which is only possible in the path-based
   835  			// scheme. This situation occurs when the chain head is rewound below
   836  			// the pivot point. In this scenario, there is no possible recovery
   837  			// approach except for rerunning a snap sync. Do nothing here until the
   838  			// state syncer picks it up.
   839  			if !bc.HasState(newHeadBlock.Root) {
   840  				if newHeadBlock.Number.Uint64() != 0 {
   841  					log.Crit("Chain is stateless at a non-genesis block")
   842  				}
   843  				log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number, "hash", newHeadBlock.Hash())
   844  			}
   845  		}
   846  		// Rewind the snap block in a simpleton way to the target head
   847  		if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
   848  			newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   849  			// If either blocks reached nil, reset to the genesis state
   850  			if newHeadSnapBlock == nil {
   851  				newHeadSnapBlock = bc.genesisBlock
   852  			}
   853  			rawdb.WriteHeadFastBlockHash(db, newHeadSnapBlock.Hash())
   854  
   855  			// Degrade the chain markers if they are explicitly reverted.
   856  			// In theory we should update all in-memory markers in the
   857  			// last step, however the direction of SetHead is from high
   858  			// to low, so it's safe the update in-memory markers directly.
   859  			bc.currentSnapBlock.Store(newHeadSnapBlock.Header())
   860  			headFastBlockGauge.Update(int64(newHeadSnapBlock.NumberU64()))
   861  		}
   862  		var (
   863  			headHeader = bc.CurrentBlock()
   864  			headNumber = headHeader.Number.Uint64()
   865  		)
   866  		// If setHead underflown the freezer threshold and the block processing
   867  		// intent afterwards is full block importing, delete the chain segment
   868  		// between the stateful-block and the sethead target.
   869  		var wipe bool
   870  		frozen, _ := bc.db.Ancients()
   871  		if headNumber+1 < frozen {
   872  			wipe = pivot == nil || headNumber >= *pivot
   873  		}
   874  		return headHeader, wipe // Only force wipe if full synced
   875  	}
   876  	// Rewind the header chain, deleting all block bodies until then
   877  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   878  		// Ignore the error here since light client won't hit this path
   879  		frozen, _ := bc.db.Ancients()
   880  		if num+1 <= frozen {
   881  			// Truncate all relative data(header, total difficulty, body, receipt
   882  			// and canonical hash) from ancient store.
   883  			if _, err := bc.db.TruncateHead(num); err != nil {
   884  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   885  			}
   886  			// Remove the hash <-> number mapping from the active store.
   887  			rawdb.DeleteHeaderNumber(db, hash)
   888  		} else {
   889  			// Remove relative body and receipts from the active store.
   890  			// The header, total difficulty and canonical hash will be
   891  			// removed in the hc.SetHead function.
   892  			rawdb.DeleteBody(db, hash, num)
   893  			rawdb.DeleteReceipts(db, hash, num)
   894  		}
   895  		// Todo(rjl493456442) txlookup, bloombits, etc
   896  	}
   897  	// If SetHead was only called as a chain reparation method, try to skip
   898  	// touching the header chain altogether, unless the freezer is broken
   899  	if repair {
   900  		if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
   901  			bc.hc.SetHead(target.Number.Uint64(), nil, delFn)
   902  		}
   903  	} else {
   904  		// Rewind the chain to the requested head and keep going backwards until a
   905  		// block with a state is found or snap sync pivot is passed
   906  		if time > 0 {
   907  			log.Warn("Rewinding blockchain to timestamp", "target", time)
   908  			bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
   909  		} else {
   910  			log.Warn("Rewinding blockchain to block", "target", head)
   911  			bc.hc.SetHead(head, updateFn, delFn)
   912  		}
   913  	}
   914  	// Clear out any stale content from the caches
   915  	bc.bodyCache.Purge()
   916  	bc.bodyRLPCache.Purge()
   917  	bc.receiptsCache.Purge()
   918  	bc.blockCache.Purge()
   919  	bc.txLookupCache.Purge()
   920  
   921  	// Clear safe block, finalized block if needed
   922  	if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() {
   923  		log.Warn("SetHead invalidated safe block")
   924  		bc.SetSafe(nil)
   925  	}
   926  	if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() {
   927  		log.Error("SetHead invalidated finalized block")
   928  		bc.SetFinalized(nil)
   929  	}
   930  	return rootNumber, bc.loadLastState()
   931  }
   932  
   933  // SnapSyncCommitHead sets the current head block to the one defined by the hash
   934  // irrelevant what the chain contents were prior.
   935  func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
   936  	// Make sure that both the block as well at its state trie exists
   937  	block := bc.GetBlockByHash(hash)
   938  	if block == nil {
   939  		return fmt.Errorf("non existent block [%x..]", hash[:4])
   940  	}
   941  	// Reset the trie database with the fresh snap synced state.
   942  	root := block.Root()
   943  	if bc.triedb.Scheme() == rawdb.PathScheme {
   944  		if err := bc.triedb.Enable(root); err != nil {
   945  			return err
   946  		}
   947  	}
   948  	if !bc.HasState(root) {
   949  		return fmt.Errorf("non existent state [%x..]", root[:4])
   950  	}
   951  	// If all checks out, manually set the head block.
   952  	if !bc.chainmu.TryLock() {
   953  		return errChainStopped
   954  	}
   955  	bc.currentBlock.Store(block.Header())
   956  	headBlockGauge.Update(int64(block.NumberU64()))
   957  	bc.chainmu.Unlock()
   958  
   959  	// Destroy any existing state snapshot and regenerate it in the background,
   960  	// also resuming the normal maintenance of any previously paused snapshot.
   961  	if bc.snaps != nil {
   962  		bc.snaps.Rebuild(root)
   963  	}
   964  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   965  	return nil
   966  }
   967  
   968  // Reset purges the entire blockchain, restoring it to its genesis state.
   969  func (bc *BlockChain) Reset() error {
   970  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   971  }
   972  
   973  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   974  // specified genesis state.
   975  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   976  	// Dump the entire block chain and purge the caches
   977  	if err := bc.SetHead(0); err != nil {
   978  		return err
   979  	}
   980  	if !bc.chainmu.TryLock() {
   981  		return errChainStopped
   982  	}
   983  	defer bc.chainmu.Unlock()
   984  
   985  	// Prepare the genesis block and reinitialise the chain
   986  	batch := bc.db.NewBatch()
   987  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   988  	rawdb.WriteBlock(batch, genesis)
   989  	if err := batch.Write(); err != nil {
   990  		log.Crit("Failed to write genesis block", "err", err)
   991  	}
   992  	bc.writeHeadBlock(genesis)
   993  
   994  	// Last update all in-memory chain markers
   995  	bc.genesisBlock = genesis
   996  	bc.currentBlock.Store(bc.genesisBlock.Header())
   997  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   998  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   999  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  1000  	bc.currentSnapBlock.Store(bc.genesisBlock.Header())
  1001  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
  1002  	return nil
  1003  }
  1004  
  1005  // Export writes the active chain to the given writer.
  1006  func (bc *BlockChain) Export(w io.Writer) error {
  1007  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64())
  1008  }
  1009  
  1010  // ExportN writes a subset of the active chain to the given writer.
  1011  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  1012  	if first > last {
  1013  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  1014  	}
  1015  	log.Info("Exporting batch of blocks", "count", last-first+1)
  1016  
  1017  	var (
  1018  		parentHash common.Hash
  1019  		start      = time.Now()
  1020  		reported   = time.Now()
  1021  	)
  1022  	for nr := first; nr <= last; nr++ {
  1023  		block := bc.GetBlockByNumber(nr)
  1024  		if block == nil {
  1025  			return fmt.Errorf("export failed on #%d: not found", nr)
  1026  		}
  1027  		if nr > first && block.ParentHash() != parentHash {
  1028  			return errors.New("export failed: chain reorg during export")
  1029  		}
  1030  		parentHash = block.Hash()
  1031  		if err := block.EncodeRLP(w); err != nil {
  1032  			return err
  1033  		}
  1034  		if time.Since(reported) >= statsReportLimit {
  1035  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  1036  			reported = time.Now()
  1037  		}
  1038  	}
  1039  	return nil
  1040  }
  1041  
  1042  // writeHeadBlock injects a new head block into the current block chain. This method
  1043  // assumes that the block is indeed a true head. It will also reset the head
  1044  // header and the head snap sync block to this very same block if they are older
  1045  // or if they are on a different side chain.
  1046  //
  1047  // Note, this function assumes that the `mu` mutex is held!
  1048  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
  1049  	// Add the block to the canonical chain number scheme and mark as the head
  1050  	batch := bc.db.NewBatch()
  1051  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
  1052  	rawdb.WriteHeadFastBlockHash(batch, block.Hash())
  1053  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
  1054  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1055  	rawdb.WriteHeadBlockHash(batch, block.Hash())
  1056  
  1057  	// Flush the whole batch into the disk, exit the node if failed
  1058  	if err := batch.Write(); err != nil {
  1059  		log.Crit("Failed to update chain indexes and markers", "err", err)
  1060  	}
  1061  	// Update all in-memory chain markers in the last step
  1062  	bc.hc.SetCurrentHeader(block.Header())
  1063  
  1064  	bc.currentSnapBlock.Store(block.Header())
  1065  	headFastBlockGauge.Update(int64(block.NumberU64()))
  1066  
  1067  	bc.currentBlock.Store(block.Header())
  1068  	headBlockGauge.Update(int64(block.NumberU64()))
  1069  }
  1070  
  1071  // stopWithoutSaving stops the blockchain service. If any imports are currently in progress
  1072  // it will abort them using the procInterrupt. This method stops all running
  1073  // goroutines, but does not do all the post-stop work of persisting data.
  1074  // OBS! It is generally recommended to use the Stop method!
  1075  // This method has been exposed to allow tests to stop the blockchain while simulating
  1076  // a crash.
  1077  func (bc *BlockChain) stopWithoutSaving() {
  1078  	if !bc.stopping.CompareAndSwap(false, true) {
  1079  		return
  1080  	}
  1081  	// Signal shutdown tx indexer.
  1082  	if bc.txIndexer != nil {
  1083  		bc.txIndexer.close()
  1084  	}
  1085  	// Unsubscribe all subscriptions registered from blockchain.
  1086  	bc.scope.Close()
  1087  
  1088  	// Signal shutdown to all goroutines.
  1089  	close(bc.quit)
  1090  	bc.StopInsert()
  1091  
  1092  	// Now wait for all chain modifications to end and persistent goroutines to exit.
  1093  	//
  1094  	// Note: Close waits for the mutex to become available, i.e. any running chain
  1095  	// modification will have exited when Close returns. Since we also called StopInsert,
  1096  	// the mutex should become available quickly. It cannot be taken again after Close has
  1097  	// returned.
  1098  	bc.chainmu.Close()
  1099  	bc.wg.Wait()
  1100  }
  1101  
  1102  // Stop stops the blockchain service. If any imports are currently in progress
  1103  // it will abort them using the procInterrupt.
  1104  func (bc *BlockChain) Stop() {
  1105  	bc.stopWithoutSaving()
  1106  
  1107  	// Ensure that the entirety of the state snapshot is journaled to disk.
  1108  	var snapBase common.Hash
  1109  	if bc.snaps != nil {
  1110  		var err error
  1111  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
  1112  			log.Error("Failed to journal state snapshot", "err", err)
  1113  		}
  1114  		bc.snaps.Release()
  1115  	}
  1116  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1117  		// Ensure that the in-memory trie nodes are journaled to disk properly.
  1118  		if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil {
  1119  			log.Info("Failed to journal in-memory trie nodes", "err", err)
  1120  		}
  1121  	} else {
  1122  		// Ensure the state of a recent block is also stored to disk before exiting.
  1123  		// We're writing three different states to catch different restart scenarios:
  1124  		//  - HEAD:     So we don't need to reprocess any blocks in the general case
  1125  		//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
  1126  		//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  1127  		if !bc.cacheConfig.TrieDirtyDisabled {
  1128  			triedb := bc.triedb
  1129  
  1130  			for _, offset := range []uint64{0, 1, state.TriesInMemory - 1} {
  1131  				if number := bc.CurrentBlock().Number.Uint64(); number > offset {
  1132  					recent := bc.GetBlockByNumber(number - offset)
  1133  
  1134  					log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  1135  					if err := triedb.Commit(recent.Root(), true); err != nil {
  1136  						log.Error("Failed to commit recent state trie", "err", err)
  1137  					}
  1138  				}
  1139  			}
  1140  			if snapBase != (common.Hash{}) {
  1141  				log.Info("Writing snapshot state to disk", "root", snapBase)
  1142  				if err := triedb.Commit(snapBase, true); err != nil {
  1143  					log.Error("Failed to commit recent state trie", "err", err)
  1144  				}
  1145  			}
  1146  			for !bc.triegc.Empty() {
  1147  				triedb.Dereference(bc.triegc.PopItem())
  1148  			}
  1149  			if _, nodes, _ := triedb.Size(); nodes != 0 { // all memory is contained within the nodes return for hashdb
  1150  				log.Error("Dangling trie nodes after full cleanup")
  1151  			}
  1152  		}
  1153  	}
  1154  	// Allow tracers to clean-up and release resources.
  1155  	if bc.logger != nil && bc.logger.OnClose != nil {
  1156  		bc.logger.OnClose()
  1157  	}
  1158  	// Close the trie database, release all the held resources as the last step.
  1159  	if err := bc.triedb.Close(); err != nil {
  1160  		log.Error("Failed to close trie database", "err", err)
  1161  	}
  1162  	log.Info("Blockchain stopped")
  1163  }
  1164  
  1165  // StopInsert interrupts all insertion methods, causing them to return
  1166  // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
  1167  // calling this method.
  1168  func (bc *BlockChain) StopInsert() {
  1169  	bc.procInterrupt.Store(true)
  1170  }
  1171  
  1172  // insertStopped returns true after StopInsert has been called.
  1173  func (bc *BlockChain) insertStopped() bool {
  1174  	return bc.procInterrupt.Load()
  1175  }
  1176  
  1177  // WriteStatus status of write
  1178  type WriteStatus byte
  1179  
  1180  const (
  1181  	NonStatTy WriteStatus = iota
  1182  	CanonStatTy
  1183  	SideStatTy
  1184  )
  1185  
  1186  // InsertReceiptChain attempts to complete an already existing header chain with
  1187  // transaction and receipt data.
  1188  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
  1189  	// We don't require the chainMu here since we want to maximize the
  1190  	// concurrency of header insertion and receipt insertion.
  1191  	bc.wg.Add(1)
  1192  	defer bc.wg.Done()
  1193  
  1194  	var (
  1195  		ancientBlocks, liveBlocks     types.Blocks
  1196  		ancientReceipts, liveReceipts []types.Receipts
  1197  	)
  1198  	// Do a sanity check that the provided chain is actually ordered and linked
  1199  	for i, block := range blockChain {
  1200  		if i != 0 {
  1201  			prev := blockChain[i-1]
  1202  			if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1203  				log.Error("Non contiguous receipt insert",
  1204  					"number", block.Number(), "hash", block.Hash(), "parent", block.ParentHash(),
  1205  					"prevnumber", prev.Number(), "prevhash", prev.Hash())
  1206  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])",
  1207  					i-1, prev.NumberU64(), prev.Hash().Bytes()[:4],
  1208  					i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1209  			}
  1210  		}
  1211  		if block.NumberU64() <= ancientLimit {
  1212  			ancientBlocks, ancientReceipts = append(ancientBlocks, block), append(ancientReceipts, receiptChain[i])
  1213  		} else {
  1214  			liveBlocks, liveReceipts = append(liveBlocks, block), append(liveReceipts, receiptChain[i])
  1215  		}
  1216  
  1217  		// Here we also validate that blob transactions in the block do not contain a sidecar.
  1218  		// While the sidecar does not affect the block hash / tx hash, sending blobs within a block is not allowed.
  1219  		for txIndex, tx := range block.Transactions() {
  1220  			if tx.Type() == types.BlobTxType && tx.BlobTxSidecar() != nil {
  1221  				return 0, fmt.Errorf("block #%d contains unexpected blob sidecar in tx at index %d", block.NumberU64(), txIndex)
  1222  			}
  1223  		}
  1224  	}
  1225  
  1226  	var (
  1227  		stats = struct{ processed, ignored int32 }{}
  1228  		start = time.Now()
  1229  		size  = int64(0)
  1230  	)
  1231  
  1232  	// updateHead updates the head snap sync block if the inserted blocks are better
  1233  	// and returns an indicator whether the inserted blocks are canonical.
  1234  	updateHead := func(head *types.Block) bool {
  1235  		if !bc.chainmu.TryLock() {
  1236  			return false
  1237  		}
  1238  		defer bc.chainmu.Unlock()
  1239  
  1240  		// Rewind may have occurred, skip in that case.
  1241  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
  1242  			reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
  1243  			if err != nil {
  1244  				log.Warn("Reorg failed", "err", err)
  1245  				return false
  1246  			} else if !reorg {
  1247  				return false
  1248  			}
  1249  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  1250  			bc.currentSnapBlock.Store(head.Header())
  1251  			headFastBlockGauge.Update(int64(head.NumberU64()))
  1252  			return true
  1253  		}
  1254  		return false
  1255  	}
  1256  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1257  	//
  1258  	// this function only accepts canonical chain data. All side chain will be reverted
  1259  	// eventually.
  1260  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1261  		first := blockChain[0]
  1262  		last := blockChain[len(blockChain)-1]
  1263  
  1264  		// Ensure genesis is in ancients.
  1265  		if first.NumberU64() == 1 {
  1266  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
  1267  				td := bc.genesisBlock.Difficulty()
  1268  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
  1269  				if err != nil {
  1270  					log.Error("Error writing genesis to ancients", "err", err)
  1271  					return 0, err
  1272  				}
  1273  				size += writeSize
  1274  				log.Info("Wrote genesis to ancients")
  1275  			}
  1276  		}
  1277  		// Before writing the blocks to the ancients, we need to ensure that
  1278  		// they correspond to the what the headerchain 'expects'.
  1279  		// We only check the last block/header, since it's a contiguous chain.
  1280  		if !bc.HasHeader(last.Hash(), last.NumberU64()) {
  1281  			return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
  1282  		}
  1283  
  1284  		// Write all chain data to ancients.
  1285  		td := bc.GetTd(first.Hash(), first.NumberU64())
  1286  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
  1287  		if err != nil {
  1288  			log.Error("Error importing chain data to ancients", "err", err)
  1289  			return 0, err
  1290  		}
  1291  		size += writeSize
  1292  
  1293  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1294  		if err := bc.db.Sync(); err != nil {
  1295  			return 0, err
  1296  		}
  1297  		// Update the current snap block because all block data is now present in DB.
  1298  		previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64()
  1299  		if !updateHead(blockChain[len(blockChain)-1]) {
  1300  			// We end up here if the header chain has reorg'ed, and the blocks/receipts
  1301  			// don't match the canonical chain.
  1302  			if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
  1303  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1304  			}
  1305  			return 0, errSideChainReceipts
  1306  		}
  1307  
  1308  		// Delete block data from the main database.
  1309  		var (
  1310  			batch       = bc.db.NewBatch()
  1311  			canonHashes = make(map[common.Hash]struct{}, len(blockChain))
  1312  		)
  1313  		for _, block := range blockChain {
  1314  			canonHashes[block.Hash()] = struct{}{}
  1315  			if block.NumberU64() == 0 {
  1316  				continue
  1317  			}
  1318  			rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1319  			rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1320  		}
  1321  		// Delete side chain hash-to-number mappings.
  1322  		for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
  1323  			if _, canon := canonHashes[nh.Hash]; !canon {
  1324  				rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
  1325  			}
  1326  		}
  1327  		if err := batch.Write(); err != nil {
  1328  			return 0, err
  1329  		}
  1330  		stats.processed += int32(len(blockChain))
  1331  		return 0, nil
  1332  	}
  1333  
  1334  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1335  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1336  		var (
  1337  			skipPresenceCheck = false
  1338  			batch             = bc.db.NewBatch()
  1339  		)
  1340  		for i, block := range blockChain {
  1341  			// Short circuit insertion if shutting down or processing failed
  1342  			if bc.insertStopped() {
  1343  				return 0, errInsertionInterrupted
  1344  			}
  1345  			// Short circuit if the owner header is unknown
  1346  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1347  				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
  1348  			}
  1349  			if !skipPresenceCheck {
  1350  				// Ignore if the entire data is already known
  1351  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1352  					stats.ignored++
  1353  					continue
  1354  				} else {
  1355  					// If block N is not present, neither are the later blocks.
  1356  					// This should be true, but if we are mistaken, the shortcut
  1357  					// here will only cause overwriting of some existing data
  1358  					skipPresenceCheck = true
  1359  				}
  1360  			}
  1361  			// Write all the data out into the database
  1362  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1363  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1364  
  1365  			// Write everything belongs to the blocks into the database. So that
  1366  			// we can ensure all components of body is completed(body, receipts)
  1367  			// except transaction indexes(will be created once sync is finished).
  1368  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1369  				if err := batch.Write(); err != nil {
  1370  					return 0, err
  1371  				}
  1372  				size += int64(batch.ValueSize())
  1373  				batch.Reset()
  1374  			}
  1375  			stats.processed++
  1376  		}
  1377  		// Write everything belongs to the blocks into the database. So that
  1378  		// we can ensure all components of body is completed(body, receipts,
  1379  		// tx indexes)
  1380  		if batch.ValueSize() > 0 {
  1381  			size += int64(batch.ValueSize())
  1382  			if err := batch.Write(); err != nil {
  1383  				return 0, err
  1384  			}
  1385  		}
  1386  		updateHead(blockChain[len(blockChain)-1])
  1387  		return 0, nil
  1388  	}
  1389  
  1390  	// Write downloaded chain data and corresponding receipt chain data
  1391  	if len(ancientBlocks) > 0 {
  1392  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1393  			if err == errInsertionInterrupted {
  1394  				return 0, nil
  1395  			}
  1396  			return n, err
  1397  		}
  1398  	}
  1399  	if len(liveBlocks) > 0 {
  1400  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1401  			if err == errInsertionInterrupted {
  1402  				return 0, nil
  1403  			}
  1404  			return n, err
  1405  		}
  1406  	}
  1407  	var (
  1408  		head    = blockChain[len(blockChain)-1]
  1409  		context = []interface{}{
  1410  			"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1411  			"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1412  			"size", common.StorageSize(size),
  1413  		}
  1414  	)
  1415  	if stats.ignored > 0 {
  1416  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1417  	}
  1418  	log.Debug("Imported new block receipts", context...)
  1419  
  1420  	return 0, nil
  1421  }
  1422  
  1423  // writeBlockWithoutState writes only the block and its metadata to the database,
  1424  // but does not write any state. This is used to construct competing side forks
  1425  // up to the point where they exceed the canonical total difficulty.
  1426  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1427  	if bc.insertStopped() {
  1428  		return errInsertionInterrupted
  1429  	}
  1430  	batch := bc.db.NewBatch()
  1431  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1432  	rawdb.WriteBlock(batch, block)
  1433  	if err := batch.Write(); err != nil {
  1434  		log.Crit("Failed to write block into disk", "err", err)
  1435  	}
  1436  	return nil
  1437  }
  1438  
  1439  // writeKnownBlock updates the head block flag with a known block
  1440  // and introduces chain reorg if necessary.
  1441  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1442  	current := bc.CurrentBlock()
  1443  	if block.ParentHash() != current.Hash() {
  1444  		if err := bc.reorg(current, block); err != nil {
  1445  			return err
  1446  		}
  1447  	}
  1448  	bc.writeHeadBlock(block)
  1449  	return nil
  1450  }
  1451  
  1452  // writeBlockWithState writes block, metadata and corresponding state data to the
  1453  // database.
  1454  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
  1455  	// Calculate the total difficulty of the block
  1456  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1457  	if ptd == nil {
  1458  		return consensus.ErrUnknownAncestor
  1459  	}
  1460  	// Make sure no inconsistent state is leaked during insertion
  1461  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1462  
  1463  	// Irrelevant of the canonical status, write the block itself to the database.
  1464  	//
  1465  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1466  	// should be written atomically. BlockBatch is used for containing all components.
  1467  	blockBatch := bc.db.NewBatch()
  1468  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1469  	rawdb.WriteBlock(blockBatch, block)
  1470  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1471  	rawdb.WritePreimages(blockBatch, statedb.Preimages())
  1472  	if err := blockBatch.Write(); err != nil {
  1473  		log.Crit("Failed to write block into disk", "err", err)
  1474  	}
  1475  	// Commit all cached state changes into underlying memory database.
  1476  	root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
  1477  	if err != nil {
  1478  		return err
  1479  	}
  1480  	// If node is running in path mode, skip explicit gc operation
  1481  	// which is unnecessary in this mode.
  1482  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1483  		return nil
  1484  	}
  1485  	// If we're running an archive node, always flush
  1486  	if bc.cacheConfig.TrieDirtyDisabled {
  1487  		return bc.triedb.Commit(root, false)
  1488  	}
  1489  	// Full but not archive node, do proper garbage collection
  1490  	bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1491  	bc.triegc.Push(root, -int64(block.NumberU64()))
  1492  
  1493  	// Flush limits are not considered for the first TriesInMemory blocks.
  1494  	current := block.NumberU64()
  1495  	if current <= state.TriesInMemory {
  1496  		return nil
  1497  	}
  1498  	// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1499  	var (
  1500  		_, nodes, imgs = bc.triedb.Size() // all memory is contained within the nodes return for hashdb
  1501  		limit          = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1502  	)
  1503  	if nodes > limit || imgs > 4*1024*1024 {
  1504  		bc.triedb.Cap(limit - ethdb.IdealBatchSize)
  1505  	}
  1506  	// Find the next state trie we need to commit
  1507  	chosen := current - state.TriesInMemory
  1508  	flushInterval := time.Duration(bc.flushInterval.Load())
  1509  	// If we exceeded time allowance, flush an entire trie to disk
  1510  	if bc.gcproc > flushInterval {
  1511  		// If the header is missing (canonical chain behind), we're reorging a low
  1512  		// diff sidechain. Suspend committing until this operation is completed.
  1513  		header := bc.GetHeaderByNumber(chosen)
  1514  		if header == nil {
  1515  			log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1516  		} else {
  1517  			// If we're exceeding limits but haven't reached a large enough memory gap,
  1518  			// warn the user that the system is becoming unstable.
  1519  			if chosen < bc.lastWrite+state.TriesInMemory && bc.gcproc >= 2*flushInterval {
  1520  				log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/state.TriesInMemory)
  1521  			}
  1522  			// Flush an entire trie and restart the counters
  1523  			bc.triedb.Commit(header.Root, true)
  1524  			bc.lastWrite = chosen
  1525  			bc.gcproc = 0
  1526  		}
  1527  	}
  1528  	// Garbage collect anything below our required write retention
  1529  	for !bc.triegc.Empty() {
  1530  		root, number := bc.triegc.Pop()
  1531  		if uint64(-number) > chosen {
  1532  			bc.triegc.Push(root, number)
  1533  			break
  1534  		}
  1535  		bc.triedb.Dereference(root)
  1536  	}
  1537  	return nil
  1538  }
  1539  
  1540  // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
  1541  // This function expects the chain mutex to be held.
  1542  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1543  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1544  		return NonStatTy, err
  1545  	}
  1546  	currentBlock := bc.CurrentBlock()
  1547  	reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
  1548  	if err != nil {
  1549  		return NonStatTy, err
  1550  	}
  1551  	if reorg {
  1552  		// Reorganise the chain if the parent is not the head block
  1553  		if block.ParentHash() != currentBlock.Hash() {
  1554  			if err := bc.reorg(currentBlock, block); err != nil {
  1555  				return NonStatTy, err
  1556  			}
  1557  		}
  1558  		status = CanonStatTy
  1559  	} else {
  1560  		status = SideStatTy
  1561  	}
  1562  	// Set new head.
  1563  	if status == CanonStatTy {
  1564  		bc.writeHeadBlock(block)
  1565  	}
  1566  	if status == CanonStatTy {
  1567  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1568  		if len(logs) > 0 {
  1569  			bc.logsFeed.Send(logs)
  1570  		}
  1571  		// In theory, we should fire a ChainHeadEvent when we inject
  1572  		// a canonical block, but sometimes we can insert a batch of
  1573  		// canonical blocks. Avoid firing too many ChainHeadEvents,
  1574  		// we will fire an accumulated ChainHeadEvent and disable fire
  1575  		// event here.
  1576  		if emitHeadEvent {
  1577  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1578  		}
  1579  	} else {
  1580  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1581  	}
  1582  	return status, nil
  1583  }
  1584  
  1585  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1586  // chain or, otherwise, create a fork. If an error is returned it will return
  1587  // the index number of the failing block as well an error describing what went
  1588  // wrong. After insertion is done, all accumulated events will be fired.
  1589  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1590  	// Sanity check that we have something meaningful to import
  1591  	if len(chain) == 0 {
  1592  		return 0, nil
  1593  	}
  1594  	bc.blockProcFeed.Send(true)
  1595  	defer bc.blockProcFeed.Send(false)
  1596  
  1597  	// Do a sanity check that the provided chain is actually ordered and linked.
  1598  	for i := 1; i < len(chain); i++ {
  1599  		block, prev := chain[i], chain[i-1]
  1600  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1601  			log.Error("Non contiguous block insert",
  1602  				"number", block.Number(),
  1603  				"hash", block.Hash(),
  1604  				"parent", block.ParentHash(),
  1605  				"prevnumber", prev.Number(),
  1606  				"prevhash", prev.Hash(),
  1607  			)
  1608  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1609  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1610  		}
  1611  	}
  1612  	// Pre-checks passed, start the full block imports
  1613  	if !bc.chainmu.TryLock() {
  1614  		return 0, errChainStopped
  1615  	}
  1616  	defer bc.chainmu.Unlock()
  1617  	return bc.insertChain(chain, true)
  1618  }
  1619  
  1620  // insertChain is the internal implementation of InsertChain, which assumes that
  1621  // 1) chains are contiguous, and 2) The chain mutex is held.
  1622  //
  1623  // This method is split out so that import batches that require re-injecting
  1624  // historical blocks can do so without releasing the lock, which could lead to
  1625  // racey behaviour. If a sidechain import is in progress, and the historic state
  1626  // is imported, but then new canon-head is added before the actual sidechain
  1627  // completes, then the historic state could be pruned again
  1628  func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) {
  1629  	// If the chain is terminating, don't even bother starting up.
  1630  	if bc.insertStopped() {
  1631  		return 0, nil
  1632  	}
  1633  
  1634  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1635  	SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
  1636  
  1637  	var (
  1638  		stats     = insertStats{startTime: mclock.Now()}
  1639  		lastCanon *types.Block
  1640  	)
  1641  	// Fire a single chain head event if we've progressed the chain
  1642  	defer func() {
  1643  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1644  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1645  		}
  1646  	}()
  1647  	// Start the parallel header verifier
  1648  	headers := make([]*types.Header, len(chain))
  1649  	for i, block := range chain {
  1650  		headers[i] = block.Header()
  1651  	}
  1652  	abort, results := bc.engine.VerifyHeaders(bc, headers)
  1653  	defer close(abort)
  1654  
  1655  	// Peek the error for the first block to decide the directing import logic
  1656  	it := newInsertIterator(chain, results, bc.validator)
  1657  	block, err := it.next()
  1658  
  1659  	// Left-trim all the known blocks that don't need to build snapshot
  1660  	if bc.skipBlock(err, it) {
  1661  		// First block (and state) is known
  1662  		//   1. We did a roll-back, and should now do a re-import
  1663  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1664  		//      from the canonical chain, which has not been verified.
  1665  		// Skip all known blocks that are behind us.
  1666  		var (
  1667  			reorg   bool
  1668  			current = bc.CurrentBlock()
  1669  		)
  1670  		for block != nil && bc.skipBlock(err, it) {
  1671  			reorg, err = bc.forker.ReorgNeeded(current, block.Header())
  1672  			if err != nil {
  1673  				return it.index, err
  1674  			}
  1675  			if reorg {
  1676  				// Switch to import mode if the forker says the reorg is necessary
  1677  				// and also the block is not on the canonical chain.
  1678  				// In eth2 the forker always returns true for reorg decision (blindly trusting
  1679  				// the external consensus engine), but in order to prevent the unnecessary
  1680  				// reorgs when importing known blocks, the special case is handled here.
  1681  				if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
  1682  					break
  1683  				}
  1684  			}
  1685  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1686  			stats.ignored++
  1687  
  1688  			block, err = it.next()
  1689  		}
  1690  		// The remaining blocks are still known blocks, the only scenario here is:
  1691  		// During the snap sync, the pivot point is already submitted but rollback
  1692  		// happens. Then node resets the head full block to a lower height via `rollback`
  1693  		// and leaves a few known blocks in the database.
  1694  		//
  1695  		// When node runs a snap sync again, it can re-import a batch of known blocks via
  1696  		// `insertChain` while a part of them have higher total difficulty than current
  1697  		// head full block(new pivot point).
  1698  		for block != nil && bc.skipBlock(err, it) {
  1699  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1700  			if err := bc.writeKnownBlock(block); err != nil {
  1701  				return it.index, err
  1702  			}
  1703  			lastCanon = block
  1704  
  1705  			block, err = it.next()
  1706  		}
  1707  		// Falls through to the block import
  1708  	}
  1709  	switch {
  1710  	// First block is pruned
  1711  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1712  		if setHead {
  1713  			// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1714  			log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1715  			return bc.insertSideChain(block, it)
  1716  		} else {
  1717  			// We're post-merge and the parent is pruned, try to recover the parent state
  1718  			log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
  1719  			_, err := bc.recoverAncestors(block)
  1720  			return it.index, err
  1721  		}
  1722  	// Some other error(except ErrKnownBlock) occurred, abort.
  1723  	// ErrKnownBlock is allowed here since some known blocks
  1724  	// still need re-execution to generate snapshots that are missing
  1725  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1726  		stats.ignored += len(it.chain)
  1727  		bc.reportBlock(block, nil, err)
  1728  		return it.index, err
  1729  	}
  1730  	// No validation errors for the first block (or chain prefix skipped)
  1731  	var activeState *state.StateDB
  1732  	defer func() {
  1733  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1734  		// block or other error is hit however, an early return may not properly
  1735  		// terminate the background threads. This defer ensures that we clean up
  1736  		// and dangling prefetcher, without deferring each and holding on live refs.
  1737  		if activeState != nil {
  1738  			activeState.StopPrefetcher()
  1739  		}
  1740  	}()
  1741  
  1742  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1743  		// If the chain is terminating, stop processing blocks
  1744  		if bc.insertStopped() {
  1745  			log.Debug("Abort during block processing")
  1746  			break
  1747  		}
  1748  		// If the block is known (in the middle of the chain), it's a special case for
  1749  		// Clique blocks where they can share state among each other, so importing an
  1750  		// older block might complete the state of the subsequent one. In this case,
  1751  		// just skip the block (we already validated it once fully (and crashed), since
  1752  		// its header and body was already in the database). But if the corresponding
  1753  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1754  		if bc.skipBlock(err, it) {
  1755  			logger := log.Debug
  1756  			if bc.chainConfig.Clique == nil {
  1757  				logger = log.Warn
  1758  			}
  1759  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1760  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1761  				"root", block.Root())
  1762  
  1763  			// Special case. Commit the empty receipt slice if we meet the known
  1764  			// block in the middle. It can only happen in the clique chain. Whenever
  1765  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1766  			// and `body` if it's non-existent. Since we don't have receipts without
  1767  			// reexecution, so nothing to commit. But if the sidechain will be adopted
  1768  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1769  			// state, but if it's this special case here(skip reexecution) we will lose
  1770  			// the empty receipt entry.
  1771  			if len(block.Transactions()) == 0 {
  1772  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1773  			} else {
  1774  				log.Error("Please file an issue, skip known block execution without receipt",
  1775  					"hash", block.Hash(), "number", block.NumberU64())
  1776  			}
  1777  			if err := bc.writeKnownBlock(block); err != nil {
  1778  				return it.index, err
  1779  			}
  1780  			stats.processed++
  1781  			if bc.logger != nil && bc.logger.OnSkippedBlock != nil {
  1782  				bc.logger.OnSkippedBlock(tracing.BlockEvent{
  1783  					Block:     block,
  1784  					TD:        bc.GetTd(block.ParentHash(), block.NumberU64()-1),
  1785  					Finalized: bc.CurrentFinalBlock(),
  1786  					Safe:      bc.CurrentSafeBlock(),
  1787  				})
  1788  			}
  1789  
  1790  			// We can assume that logs are empty here, since the only way for consecutive
  1791  			// Clique blocks to have the same state is if there are no transactions.
  1792  			lastCanon = block
  1793  			continue
  1794  		}
  1795  
  1796  		// Retrieve the parent block and it's state to execute on top
  1797  		start := time.Now()
  1798  		parent := it.previous()
  1799  		if parent == nil {
  1800  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1801  		}
  1802  		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1803  		if err != nil {
  1804  			return it.index, err
  1805  		}
  1806  		statedb.SetLogger(bc.logger)
  1807  
  1808  		// Enable prefetching to pull in trie node paths while processing transactions
  1809  		statedb.StartPrefetcher("chain")
  1810  		activeState = statedb
  1811  
  1812  		// If we have a followup block, run that against the current state to pre-cache
  1813  		// transactions and probabilistically some of the account/storage trie nodes.
  1814  		var followupInterrupt atomic.Bool
  1815  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1816  			if followup, err := it.peek(); followup != nil && err == nil {
  1817  				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
  1818  
  1819  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB) {
  1820  					// Disable tracing for prefetcher executions.
  1821  					vmCfg := bc.vmConfig
  1822  					vmCfg.Tracer = nil
  1823  					bc.prefetcher.Prefetch(followup, throwaway, vmCfg, &followupInterrupt)
  1824  
  1825  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1826  					if followupInterrupt.Load() {
  1827  						blockPrefetchInterruptMeter.Mark(1)
  1828  					}
  1829  				}(time.Now(), followup, throwaway)
  1830  			}
  1831  		}
  1832  
  1833  		// The traced section of block import.
  1834  		res, err := bc.processBlock(block, statedb, start, setHead)
  1835  		followupInterrupt.Store(true)
  1836  		if err != nil {
  1837  			return it.index, err
  1838  		}
  1839  		// Report the import stats before returning the various results
  1840  		stats.processed++
  1841  		stats.usedGas += res.usedGas
  1842  
  1843  		var snapDiffItems, snapBufItems common.StorageSize
  1844  		if bc.snaps != nil {
  1845  			snapDiffItems, snapBufItems = bc.snaps.Size()
  1846  		}
  1847  		trieDiffNodes, trieBufNodes, _ := bc.triedb.Size()
  1848  		stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead)
  1849  
  1850  		if !setHead {
  1851  			// After merge we expect few side chains. Simply count
  1852  			// all blocks the CL gives us for GC processing time
  1853  			bc.gcproc += res.procTime
  1854  			return it.index, nil // Direct block insertion of a single block
  1855  		}
  1856  		switch res.status {
  1857  		case CanonStatTy:
  1858  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1859  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1860  				"elapsed", common.PrettyDuration(time.Since(start)),
  1861  				"root", block.Root())
  1862  
  1863  			lastCanon = block
  1864  
  1865  			// Only count canonical blocks for GC processing time
  1866  			bc.gcproc += res.procTime
  1867  
  1868  		case SideStatTy:
  1869  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1870  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1871  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1872  				"root", block.Root())
  1873  
  1874  		default:
  1875  			// This in theory is impossible, but lets be nice to our future selves and leave
  1876  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1877  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1878  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1879  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1880  				"root", block.Root())
  1881  		}
  1882  	}
  1883  	stats.ignored += it.remaining()
  1884  	return it.index, err
  1885  }
  1886  
  1887  // blockProcessingResult is a summary of block processing
  1888  // used for updating the stats.
  1889  type blockProcessingResult struct {
  1890  	usedGas  uint64
  1891  	procTime time.Duration
  1892  	status   WriteStatus
  1893  }
  1894  
  1895  // processBlock executes and validates the given block. If there was no error
  1896  // it writes the block and associated state to database.
  1897  func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool) (_ *blockProcessingResult, blockEndErr error) {
  1898  	if bc.logger != nil && bc.logger.OnBlockStart != nil {
  1899  		td := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1900  		bc.logger.OnBlockStart(tracing.BlockEvent{
  1901  			Block:     block,
  1902  			TD:        td,
  1903  			Finalized: bc.CurrentFinalBlock(),
  1904  			Safe:      bc.CurrentSafeBlock(),
  1905  		})
  1906  	}
  1907  	if bc.logger != nil && bc.logger.OnBlockEnd != nil {
  1908  		defer func() {
  1909  			bc.logger.OnBlockEnd(blockEndErr)
  1910  		}()
  1911  	}
  1912  
  1913  	// Process block using the parent state as reference point
  1914  	pstart := time.Now()
  1915  	receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1916  	if err != nil {
  1917  		bc.reportBlock(block, receipts, err)
  1918  		return nil, err
  1919  	}
  1920  	ptime := time.Since(pstart)
  1921  
  1922  	vstart := time.Now()
  1923  	if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1924  		bc.reportBlock(block, receipts, err)
  1925  		return nil, err
  1926  	}
  1927  	vtime := time.Since(vstart)
  1928  	proctime := time.Since(start) // processing + validation
  1929  
  1930  	// Update the metrics touched during block processing and validation
  1931  	accountReadTimer.Update(statedb.AccountReads)                   // Account reads are complete(in processing)
  1932  	storageReadTimer.Update(statedb.StorageReads)                   // Storage reads are complete(in processing)
  1933  	snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads)   // Account reads are complete(in processing)
  1934  	snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads)   // Storage reads are complete(in processing)
  1935  	accountUpdateTimer.Update(statedb.AccountUpdates)               // Account updates are complete(in validation)
  1936  	storageUpdateTimer.Update(statedb.StorageUpdates)               // Storage updates are complete(in validation)
  1937  	accountHashTimer.Update(statedb.AccountHashes)                  // Account hashes are complete(in validation)
  1938  	triehash := statedb.AccountHashes                               // The time spent on tries hashing
  1939  	trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates   // The time spent on tries update
  1940  	trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
  1941  	trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
  1942  	blockExecutionTimer.Update(ptime - trieRead)                    // The time spent on EVM processing
  1943  	blockValidationTimer.Update(vtime - (triehash + trieUpdate))    // The time spent on block validation
  1944  
  1945  	// Write the block to the chain and get the status.
  1946  	var (
  1947  		wstart = time.Now()
  1948  		status WriteStatus
  1949  	)
  1950  	if !setHead {
  1951  		// Don't set the head, only insert the block
  1952  		err = bc.writeBlockWithState(block, receipts, statedb)
  1953  	} else {
  1954  		status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
  1955  	}
  1956  	if err != nil {
  1957  		return nil, err
  1958  	}
  1959  	// Update the metrics touched during block commit
  1960  	accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  1961  	storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  1962  	snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  1963  	triedbCommitTimer.Update(statedb.TrieDBCommits)     // Trie database commits are complete, we can mark them
  1964  
  1965  	blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
  1966  	blockInsertTimer.UpdateSince(start)
  1967  
  1968  	return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil
  1969  }
  1970  
  1971  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1972  // error, which happens when a sidechain with a sufficiently old fork-block is
  1973  // found.
  1974  //
  1975  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1976  // switch over to the new chain if the TD exceeded the current chain.
  1977  // insertSideChain is only used pre-merge.
  1978  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  1979  	var (
  1980  		externTd  *big.Int
  1981  		lastBlock = block
  1982  		current   = bc.CurrentBlock()
  1983  	)
  1984  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1985  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1986  	// ones. Any other errors means that the block is invalid, and should not be written
  1987  	// to disk.
  1988  	err := consensus.ErrPrunedAncestor
  1989  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  1990  		// Check the canonical state root for that number
  1991  		if number := block.NumberU64(); current.Number.Uint64() >= number {
  1992  			canonical := bc.GetBlockByNumber(number)
  1993  			if canonical != nil && canonical.Hash() == block.Hash() {
  1994  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1995  
  1996  				// Collect the TD of the block. Since we know it's a canon one,
  1997  				// we can get it directly, and not (like further below) use
  1998  				// the parent and then add the block on top
  1999  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  2000  				continue
  2001  			}
  2002  			if canonical != nil && canonical.Root() == block.Root() {
  2003  				// This is most likely a shadow-state attack. When a fork is imported into the
  2004  				// database, and it eventually reaches a block height which is not pruned, we
  2005  				// just found that the state already exist! This means that the sidechain block
  2006  				// refers to a state which already exists in our canon chain.
  2007  				//
  2008  				// If left unchecked, we would now proceed importing the blocks, without actually
  2009  				// having verified the state of the previous blocks.
  2010  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  2011  
  2012  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  2013  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  2014  				// mechanism.
  2015  				return it.index, errors.New("sidechain ghost-state attack")
  2016  			}
  2017  		}
  2018  		if externTd == nil {
  2019  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  2020  		}
  2021  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  2022  
  2023  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  2024  			start := time.Now()
  2025  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  2026  				return it.index, err
  2027  			}
  2028  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  2029  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  2030  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  2031  				"root", block.Root())
  2032  		}
  2033  		lastBlock = block
  2034  	}
  2035  	// At this point, we've written all sidechain blocks to database. Loop ended
  2036  	// either on some other error or all were processed. If there was some other
  2037  	// error, we can ignore the rest of those blocks.
  2038  	//
  2039  	// If the externTd was larger than our local TD, we now need to reimport the previous
  2040  	// blocks to regenerate the required state
  2041  	reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
  2042  	if err != nil {
  2043  		return it.index, err
  2044  	}
  2045  	if !reorg {
  2046  		localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
  2047  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  2048  		return it.index, err
  2049  	}
  2050  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2051  	var (
  2052  		hashes  []common.Hash
  2053  		numbers []uint64
  2054  	)
  2055  	parent := it.previous()
  2056  	for parent != nil && !bc.HasState(parent.Root) {
  2057  		if bc.stateRecoverable(parent.Root) {
  2058  			if err := bc.triedb.Recover(parent.Root); err != nil {
  2059  				return 0, err
  2060  			}
  2061  			break
  2062  		}
  2063  		hashes = append(hashes, parent.Hash())
  2064  		numbers = append(numbers, parent.Number.Uint64())
  2065  
  2066  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  2067  	}
  2068  	if parent == nil {
  2069  		return it.index, errors.New("missing parent")
  2070  	}
  2071  	// Import all the pruned blocks to make the state available
  2072  	var (
  2073  		blocks []*types.Block
  2074  		memory uint64
  2075  	)
  2076  	for i := len(hashes) - 1; i >= 0; i-- {
  2077  		// Append the next block to our batch
  2078  		block := bc.GetBlock(hashes[i], numbers[i])
  2079  
  2080  		blocks = append(blocks, block)
  2081  		memory += block.Size()
  2082  
  2083  		// If memory use grew too large, import and continue. Sadly we need to discard
  2084  		// all raised events and logs from notifications since we're too heavy on the
  2085  		// memory here.
  2086  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  2087  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  2088  			if _, err := bc.insertChain(blocks, true); err != nil {
  2089  				return 0, err
  2090  			}
  2091  			blocks, memory = blocks[:0], 0
  2092  
  2093  			// If the chain is terminating, stop processing blocks
  2094  			if bc.insertStopped() {
  2095  				log.Debug("Abort during blocks processing")
  2096  				return 0, nil
  2097  			}
  2098  		}
  2099  	}
  2100  	if len(blocks) > 0 {
  2101  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  2102  		return bc.insertChain(blocks, true)
  2103  	}
  2104  	return 0, nil
  2105  }
  2106  
  2107  // recoverAncestors finds the closest ancestor with available state and re-execute
  2108  // all the ancestor blocks since that.
  2109  // recoverAncestors is only used post-merge.
  2110  // We return the hash of the latest block that we could correctly validate.
  2111  func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) {
  2112  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2113  	var (
  2114  		hashes  []common.Hash
  2115  		numbers []uint64
  2116  		parent  = block
  2117  	)
  2118  	for parent != nil && !bc.HasState(parent.Root()) {
  2119  		if bc.stateRecoverable(parent.Root()) {
  2120  			if err := bc.triedb.Recover(parent.Root()); err != nil {
  2121  				return common.Hash{}, err
  2122  			}
  2123  			break
  2124  		}
  2125  		hashes = append(hashes, parent.Hash())
  2126  		numbers = append(numbers, parent.NumberU64())
  2127  		parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2128  
  2129  		// If the chain is terminating, stop iteration
  2130  		if bc.insertStopped() {
  2131  			log.Debug("Abort during blocks iteration")
  2132  			return common.Hash{}, errInsertionInterrupted
  2133  		}
  2134  	}
  2135  	if parent == nil {
  2136  		return common.Hash{}, errors.New("missing parent")
  2137  	}
  2138  	// Import all the pruned blocks to make the state available
  2139  	for i := len(hashes) - 1; i >= 0; i-- {
  2140  		// If the chain is terminating, stop processing blocks
  2141  		if bc.insertStopped() {
  2142  			log.Debug("Abort during blocks processing")
  2143  			return common.Hash{}, errInsertionInterrupted
  2144  		}
  2145  		var b *types.Block
  2146  		if i == 0 {
  2147  			b = block
  2148  		} else {
  2149  			b = bc.GetBlock(hashes[i], numbers[i])
  2150  		}
  2151  		if _, err := bc.insertChain(types.Blocks{b}, false); err != nil {
  2152  			return b.ParentHash(), err
  2153  		}
  2154  	}
  2155  	return block.Hash(), nil
  2156  }
  2157  
  2158  // collectLogs collects the logs that were generated or removed during
  2159  // the processing of a block. These logs are later announced as deleted or reborn.
  2160  func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
  2161  	var blobGasPrice *big.Int
  2162  	excessBlobGas := b.ExcessBlobGas()
  2163  	if excessBlobGas != nil {
  2164  		blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
  2165  	}
  2166  	receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
  2167  	if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {
  2168  		log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err)
  2169  	}
  2170  	var logs []*types.Log
  2171  	for _, receipt := range receipts {
  2172  		for _, log := range receipt.Logs {
  2173  			if removed {
  2174  				log.Removed = true
  2175  			}
  2176  			logs = append(logs, log)
  2177  		}
  2178  	}
  2179  	return logs
  2180  }
  2181  
  2182  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  2183  // blocks and inserts them to be part of the new canonical chain and accumulates
  2184  // potential missing transactions and post an event about them.
  2185  // Note the new head block won't be processed here, callers need to handle it
  2186  // externally.
  2187  func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
  2188  	var (
  2189  		newChain    types.Blocks
  2190  		oldChain    types.Blocks
  2191  		commonBlock *types.Block
  2192  
  2193  		deletedTxs []common.Hash
  2194  		addedTxs   []common.Hash
  2195  	)
  2196  	oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  2197  	if oldBlock == nil {
  2198  		return errors.New("current head block missing")
  2199  	}
  2200  	newBlock := newHead
  2201  
  2202  	// Reduce the longer chain to the same number as the shorter one
  2203  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  2204  		// Old chain is longer, gather all transactions and logs as deleted ones
  2205  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  2206  			oldChain = append(oldChain, oldBlock)
  2207  			for _, tx := range oldBlock.Transactions() {
  2208  				deletedTxs = append(deletedTxs, tx.Hash())
  2209  			}
  2210  		}
  2211  	} else {
  2212  		// New chain is longer, stash all blocks away for subsequent insertion
  2213  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  2214  			newChain = append(newChain, newBlock)
  2215  		}
  2216  	}
  2217  	if oldBlock == nil {
  2218  		return errInvalidOldChain
  2219  	}
  2220  	if newBlock == nil {
  2221  		return errInvalidNewChain
  2222  	}
  2223  	// Both sides of the reorg are at the same number, reduce both until the common
  2224  	// ancestor is found
  2225  	for {
  2226  		// If the common ancestor was found, bail out
  2227  		if oldBlock.Hash() == newBlock.Hash() {
  2228  			commonBlock = oldBlock
  2229  			break
  2230  		}
  2231  		// Remove an old block as well as stash away a new block
  2232  		oldChain = append(oldChain, oldBlock)
  2233  		for _, tx := range oldBlock.Transactions() {
  2234  			deletedTxs = append(deletedTxs, tx.Hash())
  2235  		}
  2236  		newChain = append(newChain, newBlock)
  2237  
  2238  		// Step back with both chains
  2239  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2240  		if oldBlock == nil {
  2241  			return errInvalidOldChain
  2242  		}
  2243  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2244  		if newBlock == nil {
  2245  			return errInvalidNewChain
  2246  		}
  2247  	}
  2248  
  2249  	// Ensure the user sees large reorgs
  2250  	if len(oldChain) > 0 && len(newChain) > 0 {
  2251  		logFn := log.Info
  2252  		msg := "Chain reorg detected"
  2253  		if len(oldChain) > 63 {
  2254  			msg = "Large chain reorg detected"
  2255  			logFn = log.Warn
  2256  		}
  2257  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2258  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2259  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2260  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2261  		blockReorgMeter.Mark(1)
  2262  	} else if len(newChain) > 0 {
  2263  		// Special case happens in the post merge stage that current head is
  2264  		// the ancestor of new head while these two blocks are not consecutive
  2265  		log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
  2266  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2267  	} else {
  2268  		// len(newChain) == 0 && len(oldChain) > 0
  2269  		// rewind the canonical chain to a lower point.
  2270  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
  2271  	}
  2272  	// Acquire the tx-lookup lock before mutation. This step is essential
  2273  	// as the txlookups should be changed atomically, and all subsequent
  2274  	// reads should be blocked until the mutation is complete.
  2275  	bc.txLookupLock.Lock()
  2276  
  2277  	// Insert the new chain segment in incremental order, from the old
  2278  	// to the new. The new chain head (newChain[0]) is not inserted here,
  2279  	// as it will be handled separately outside of this function
  2280  	for i := len(newChain) - 1; i >= 1; i-- {
  2281  		// Insert the block in the canonical way, re-writing history
  2282  		bc.writeHeadBlock(newChain[i])
  2283  
  2284  		// Collect the new added transactions.
  2285  		for _, tx := range newChain[i].Transactions() {
  2286  			addedTxs = append(addedTxs, tx.Hash())
  2287  		}
  2288  	}
  2289  
  2290  	// Delete useless indexes right now which includes the non-canonical
  2291  	// transaction indexes, canonical chain indexes which above the head.
  2292  	var (
  2293  		indexesBatch = bc.db.NewBatch()
  2294  		diffs        = types.HashDifference(deletedTxs, addedTxs)
  2295  	)
  2296  	for _, tx := range diffs {
  2297  		rawdb.DeleteTxLookupEntry(indexesBatch, tx)
  2298  	}
  2299  	// Delete all hash markers that are not part of the new canonical chain.
  2300  	// Because the reorg function does not handle new chain head, all hash
  2301  	// markers greater than or equal to new chain head should be deleted.
  2302  	number := commonBlock.NumberU64()
  2303  	if len(newChain) > 1 {
  2304  		number = newChain[1].NumberU64()
  2305  	}
  2306  	for i := number + 1; ; i++ {
  2307  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2308  		if hash == (common.Hash{}) {
  2309  			break
  2310  		}
  2311  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2312  	}
  2313  	if err := indexesBatch.Write(); err != nil {
  2314  		log.Crit("Failed to delete useless indexes", "err", err)
  2315  	}
  2316  	// Reset the tx lookup cache to clear stale txlookup cache.
  2317  	bc.txLookupCache.Purge()
  2318  
  2319  	// Release the tx-lookup lock after mutation.
  2320  	bc.txLookupLock.Unlock()
  2321  
  2322  	// Send out events for logs from the old canon chain, and 'reborn'
  2323  	// logs from the new canon chain. The number of logs can be very
  2324  	// high, so the events are sent in batches of size around 512.
  2325  
  2326  	// Deleted logs + blocks:
  2327  	var deletedLogs []*types.Log
  2328  	for i := len(oldChain) - 1; i >= 0; i-- {
  2329  		// Also send event for blocks removed from the canon chain.
  2330  		bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2331  
  2332  		// Collect deleted logs for notification
  2333  		if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
  2334  			deletedLogs = append(deletedLogs, logs...)
  2335  		}
  2336  		if len(deletedLogs) > 512 {
  2337  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2338  			deletedLogs = nil
  2339  		}
  2340  	}
  2341  	if len(deletedLogs) > 0 {
  2342  		bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2343  	}
  2344  
  2345  	// New logs:
  2346  	var rebirthLogs []*types.Log
  2347  	for i := len(newChain) - 1; i >= 1; i-- {
  2348  		if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 {
  2349  			rebirthLogs = append(rebirthLogs, logs...)
  2350  		}
  2351  		if len(rebirthLogs) > 512 {
  2352  			bc.logsFeed.Send(rebirthLogs)
  2353  			rebirthLogs = nil
  2354  		}
  2355  	}
  2356  	if len(rebirthLogs) > 0 {
  2357  		bc.logsFeed.Send(rebirthLogs)
  2358  	}
  2359  	return nil
  2360  }
  2361  
  2362  // InsertBlockWithoutSetHead executes the block, runs the necessary verification
  2363  // upon it and then persist the block and the associate state into the database.
  2364  // The key difference between the InsertChain is it won't do the canonical chain
  2365  // updating. It relies on the additional SetCanonical call to finalize the entire
  2366  // procedure.
  2367  func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
  2368  	if !bc.chainmu.TryLock() {
  2369  		return errChainStopped
  2370  	}
  2371  	defer bc.chainmu.Unlock()
  2372  
  2373  	_, err := bc.insertChain(types.Blocks{block}, false)
  2374  	return err
  2375  }
  2376  
  2377  // SetCanonical rewinds the chain to set the new head block as the specified
  2378  // block. It's possible that the state of the new head is missing, and it will
  2379  // be recovered in this function as well.
  2380  func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
  2381  	if !bc.chainmu.TryLock() {
  2382  		return common.Hash{}, errChainStopped
  2383  	}
  2384  	defer bc.chainmu.Unlock()
  2385  
  2386  	// Re-execute the reorged chain in case the head state is missing.
  2387  	if !bc.HasState(head.Root()) {
  2388  		if latestValidHash, err := bc.recoverAncestors(head); err != nil {
  2389  			return latestValidHash, err
  2390  		}
  2391  		log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash())
  2392  	}
  2393  	// Run the reorg if necessary and set the given block as new head.
  2394  	start := time.Now()
  2395  	if head.ParentHash() != bc.CurrentBlock().Hash() {
  2396  		if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
  2397  			return common.Hash{}, err
  2398  		}
  2399  	}
  2400  	bc.writeHeadBlock(head)
  2401  
  2402  	// Emit events
  2403  	logs := bc.collectLogs(head, false)
  2404  	bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
  2405  	if len(logs) > 0 {
  2406  		bc.logsFeed.Send(logs)
  2407  	}
  2408  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
  2409  
  2410  	context := []interface{}{
  2411  		"number", head.Number(),
  2412  		"hash", head.Hash(),
  2413  		"root", head.Root(),
  2414  		"elapsed", time.Since(start),
  2415  	}
  2416  	if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
  2417  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2418  	}
  2419  	log.Info("Chain head was updated", context...)
  2420  	return head.Hash(), nil
  2421  }
  2422  
  2423  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2424  // that the block does not need to be processed but can be considered already fully 'done'.
  2425  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2426  	// We can only ever bypass processing if the only error returned by the validator
  2427  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2428  	// and state.
  2429  	if !errors.Is(err, ErrKnownBlock) {
  2430  		return false
  2431  	}
  2432  	// If we're not using snapshots, we can skip this, since we have both block
  2433  	// and (trie-) state
  2434  	if bc.snaps == nil {
  2435  		return true
  2436  	}
  2437  	var (
  2438  		header     = it.current() // header can't be nil
  2439  		parentRoot common.Hash
  2440  	)
  2441  	// If we also have the snapshot-state, we can skip the processing.
  2442  	if bc.snaps.Snapshot(header.Root) != nil {
  2443  		return true
  2444  	}
  2445  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2446  	// snapshot-state exists, we need to process this in order to not get a gap
  2447  	// in the snapshot layers.
  2448  	// Resolve parent block
  2449  	if parent := it.previous(); parent != nil {
  2450  		parentRoot = parent.Root
  2451  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2452  		parentRoot = parent.Root
  2453  	}
  2454  	if parentRoot == (common.Hash{}) {
  2455  		return false // Theoretically impossible case
  2456  	}
  2457  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2458  	if bc.snaps.Snapshot(parentRoot) == nil {
  2459  		return true
  2460  	}
  2461  	return false
  2462  }
  2463  
  2464  // reportBlock logs a bad block error.
  2465  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2466  	rawdb.WriteBadBlock(bc.db, block)
  2467  	log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
  2468  }
  2469  
  2470  // summarizeBadBlock returns a string summarizing the bad block and other
  2471  // relevant information.
  2472  func summarizeBadBlock(block *types.Block, receipts []*types.Receipt, config *params.ChainConfig, err error) string {
  2473  	var receiptString string
  2474  	for i, receipt := range receipts {
  2475  		receiptString += fmt.Sprintf("\n  %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x",
  2476  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2477  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2478  	}
  2479  	version, vcs := version.Info()
  2480  	platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS)
  2481  	if vcs != "" {
  2482  		vcs = fmt.Sprintf("\nVCS: %s", vcs)
  2483  	}
  2484  	return fmt.Sprintf(`
  2485  ########## BAD BLOCK #########
  2486  Block: %v (%#x)
  2487  Error: %v
  2488  Platform: %v%v
  2489  Chain config: %#v
  2490  Receipts: %v
  2491  ##############################
  2492  `, block.Number(), block.Hash(), err, platform, vcs, config, receiptString)
  2493  }
  2494  
  2495  // InsertHeaderChain attempts to insert the given header chain in to the local
  2496  // chain, possibly creating a reorg. If an error is returned, it will return the
  2497  // index number of the failing header as well an error describing what went wrong.
  2498  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
  2499  	if len(chain) == 0 {
  2500  		return 0, nil
  2501  	}
  2502  	start := time.Now()
  2503  	if i, err := bc.hc.ValidateHeaderChain(chain); err != nil {
  2504  		return i, err
  2505  	}
  2506  
  2507  	if !bc.chainmu.TryLock() {
  2508  		return 0, errChainStopped
  2509  	}
  2510  	defer bc.chainmu.Unlock()
  2511  	_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
  2512  	return 0, err
  2513  }
  2514  
  2515  // SetBlockValidatorAndProcessorForTesting sets the current validator and processor.
  2516  // This method can be used to force an invalid blockchain to be verified for tests.
  2517  // This method is unsafe and should only be used before block import starts.
  2518  func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Processor) {
  2519  	bc.validator = v
  2520  	bc.processor = p
  2521  }
  2522  
  2523  // SetTrieFlushInterval configures how often in-memory tries are persisted to disk.
  2524  // The interval is in terms of block processing time, not wall clock.
  2525  // It is thread-safe and can be called repeatedly without side effects.
  2526  func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
  2527  	bc.flushInterval.Store(int64(interval))
  2528  }
  2529  
  2530  // GetTrieFlushInterval gets the in-memory tries flushAlloc interval
  2531  func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
  2532  	return time.Duration(bc.flushInterval.Load())
  2533  }