github.com/ethereum/go-ethereum@v1.16.1/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math"
    25  	"math/big"
    26  	"runtime"
    27  	"slices"
    28  	"sort"
    29  	"strings"
    30  	"sync"
    31  	"sync/atomic"
    32  	"time"
    33  
    34  	"github.com/ethereum/go-ethereum/common"
    35  	"github.com/ethereum/go-ethereum/common/lru"
    36  	"github.com/ethereum/go-ethereum/common/mclock"
    37  	"github.com/ethereum/go-ethereum/common/prque"
    38  	"github.com/ethereum/go-ethereum/consensus"
    39  	"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
    40  	"github.com/ethereum/go-ethereum/core/history"
    41  	"github.com/ethereum/go-ethereum/core/rawdb"
    42  	"github.com/ethereum/go-ethereum/core/state"
    43  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    44  	"github.com/ethereum/go-ethereum/core/stateless"
    45  	"github.com/ethereum/go-ethereum/core/tracing"
    46  	"github.com/ethereum/go-ethereum/core/types"
    47  	"github.com/ethereum/go-ethereum/core/vm"
    48  	"github.com/ethereum/go-ethereum/ethdb"
    49  	"github.com/ethereum/go-ethereum/event"
    50  	"github.com/ethereum/go-ethereum/internal/syncx"
    51  	"github.com/ethereum/go-ethereum/internal/version"
    52  	"github.com/ethereum/go-ethereum/log"
    53  	"github.com/ethereum/go-ethereum/metrics"
    54  	"github.com/ethereum/go-ethereum/params"
    55  	"github.com/ethereum/go-ethereum/rlp"
    56  	"github.com/ethereum/go-ethereum/triedb"
    57  	"github.com/ethereum/go-ethereum/triedb/hashdb"
    58  	"github.com/ethereum/go-ethereum/triedb/pathdb"
    59  )
    60  
    61  var (
    62  	headBlockGauge          = metrics.NewRegisteredGauge("chain/head/block", nil)
    63  	headHeaderGauge         = metrics.NewRegisteredGauge("chain/head/header", nil)
    64  	headFastBlockGauge      = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    65  	headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil)
    66  	headSafeBlockGauge      = metrics.NewRegisteredGauge("chain/head/safe", nil)
    67  
    68  	chainInfoGauge   = metrics.NewRegisteredGaugeInfo("chain/info", nil)
    69  	chainMgaspsMeter = metrics.NewRegisteredResettingTimer("chain/mgasps", nil)
    70  
    71  	accountReadTimer   = metrics.NewRegisteredResettingTimer("chain/account/reads", nil)
    72  	accountHashTimer   = metrics.NewRegisteredResettingTimer("chain/account/hashes", nil)
    73  	accountUpdateTimer = metrics.NewRegisteredResettingTimer("chain/account/updates", nil)
    74  	accountCommitTimer = metrics.NewRegisteredResettingTimer("chain/account/commits", nil)
    75  
    76  	storageReadTimer   = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil)
    77  	storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil)
    78  	storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil)
    79  
    80  	accountCacheHitMeter  = metrics.NewRegisteredMeter("chain/account/reads/cache/process/hit", nil)
    81  	accountCacheMissMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/process/miss", nil)
    82  	storageCacheHitMeter  = metrics.NewRegisteredMeter("chain/storage/reads/cache/process/hit", nil)
    83  	storageCacheMissMeter = metrics.NewRegisteredMeter("chain/storage/reads/cache/process/miss", nil)
    84  
    85  	accountCacheHitPrefetchMeter  = metrics.NewRegisteredMeter("chain/account/reads/cache/prefetch/hit", nil)
    86  	accountCacheMissPrefetchMeter = metrics.NewRegisteredMeter("chain/account/reads/cache/prefetch/miss", nil)
    87  	storageCacheHitPrefetchMeter  = metrics.NewRegisteredMeter("chain/storage/reads/cache/prefetch/hit", nil)
    88  	storageCacheMissPrefetchMeter = metrics.NewRegisteredMeter("chain/storage/reads/cache/prefetch/miss", nil)
    89  
    90  	accountReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/account/single/reads", nil)
    91  	storageReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/storage/single/reads", nil)
    92  
    93  	snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
    94  	triedbCommitTimer   = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
    95  
    96  	blockInsertTimer          = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
    97  	blockValidationTimer      = metrics.NewRegisteredResettingTimer("chain/validation", nil)
    98  	blockCrossValidationTimer = metrics.NewRegisteredResettingTimer("chain/crossvalidation", nil)
    99  	blockExecutionTimer       = metrics.NewRegisteredResettingTimer("chain/execution", nil)
   100  	blockWriteTimer           = metrics.NewRegisteredResettingTimer("chain/write", nil)
   101  
   102  	blockReorgMeter     = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
   103  	blockReorgAddMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
   104  	blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
   105  
   106  	blockPrefetchExecuteTimer    = metrics.NewRegisteredResettingTimer("chain/prefetch/executes", nil)
   107  	blockPrefetchInterruptMeter  = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
   108  	blockPrefetchTxsInvalidMeter = metrics.NewRegisteredMeter("chain/prefetch/txs/invalid", nil)
   109  	blockPrefetchTxsValidMeter   = metrics.NewRegisteredMeter("chain/prefetch/txs/valid", nil)
   110  
   111  	errInsertionInterrupted = errors.New("insertion is interrupted")
   112  	errChainStopped         = errors.New("blockchain is stopped")
   113  	errInvalidOldChain      = errors.New("invalid old chain")
   114  	errInvalidNewChain      = errors.New("invalid new chain")
   115  )
   116  
   117  var (
   118  	forkReadyInterval = 3 * time.Minute
   119  )
   120  
   121  const (
   122  	bodyCacheLimit     = 256
   123  	blockCacheLimit    = 256
   124  	receiptsCacheLimit = 32
   125  	txLookupCacheLimit = 1024
   126  
   127  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   128  	//
   129  	// Changelog:
   130  	//
   131  	// - Version 4
   132  	//   The following incompatible database changes were added:
   133  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   134  	//   * the `Bloom` field of receipt is deleted
   135  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   136  	//
   137  	// - Version 5
   138  	//  The following incompatible database changes were added:
   139  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   140  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   141  	//      receipts' corresponding block
   142  	//
   143  	// - Version 6
   144  	//  The following incompatible database changes were added:
   145  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   146  	//
   147  	// - Version 7
   148  	//  The following incompatible database changes were added:
   149  	//    * Use freezer as the ancient database to maintain all ancient data
   150  	//
   151  	// - Version 8
   152  	//  The following incompatible database changes were added:
   153  	//    * New scheme for contract code in order to separate the codes and trie nodes
   154  	//
   155  	// - Version 9
   156  	//  The following incompatible database changes were added:
   157  	//  * Total difficulty has been removed from both the key-value store and the ancient store.
   158  	//  * The metadata structure of freezer is changed by adding 'flushOffset'
   159  	BlockChainVersion uint64 = 9
   160  )
   161  
   162  // BlockChainConfig contains the configuration of the BlockChain object.
   163  type BlockChainConfig struct {
   164  	// Trie database related options
   165  	TrieCleanLimit   int           // Memory allowance (MB) to use for caching trie nodes in memory
   166  	TrieDirtyLimit   int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   167  	TrieTimeLimit    time.Duration // Time limit after which to flush the current in-memory trie to disk
   168  	TrieNoAsyncFlush bool          // Whether the asynchronous buffer flushing is disallowed
   169  
   170  	Preimages    bool   // Whether to store preimage of trie key to the disk
   171  	StateHistory uint64 // Number of blocks from head whose state histories are reserved.
   172  	StateScheme  string // Scheme used to store ethereum states and merkle tree nodes on top
   173  	ArchiveMode  bool   // Whether to enable the archive mode
   174  
   175  	// State snapshot related options
   176  	SnapshotLimit   int  // Memory allowance (MB) to use for caching snapshot entries in memory
   177  	SnapshotNoBuild bool // Whether the background generation is allowed
   178  	SnapshotWait    bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   179  
   180  	// This defines the cutoff block for history expiry.
   181  	// Blocks before this number may be unavailable in the chain database.
   182  	ChainHistoryMode history.HistoryMode
   183  
   184  	// Misc options
   185  	NoPrefetch bool            // Whether to disable heuristic state prefetching when processing blocks
   186  	Overrides  *ChainOverrides // Optional chain config overrides
   187  	VmConfig   vm.Config       // Config options for the EVM Interpreter
   188  
   189  	// TxLookupLimit specifies the maximum number of blocks from head for which
   190  	// transaction hashes will be indexed.
   191  	//
   192  	// If the value is zero, all transactions of the entire chain will be indexed.
   193  	// If the value is -1, indexing is disabled.
   194  	TxLookupLimit int64
   195  }
   196  
   197  // DefaultConfig returns the default config.
   198  // Note the returned object is safe to modify!
   199  func DefaultConfig() *BlockChainConfig {
   200  	return &BlockChainConfig{
   201  		TrieCleanLimit:   256,
   202  		TrieDirtyLimit:   256,
   203  		TrieTimeLimit:    5 * time.Minute,
   204  		StateScheme:      rawdb.HashScheme,
   205  		SnapshotLimit:    256,
   206  		SnapshotWait:     true,
   207  		ChainHistoryMode: history.KeepAll,
   208  		// Transaction indexing is disabled by default.
   209  		// This is appropriate for most unit tests.
   210  		TxLookupLimit: -1,
   211  	}
   212  }
   213  
   214  // WithArchive enables/disables archive mode on the config.
   215  func (cfg BlockChainConfig) WithArchive(on bool) *BlockChainConfig {
   216  	cfg.ArchiveMode = on
   217  	return &cfg
   218  }
   219  
   220  // WithStateScheme sets the state storage scheme on the config.
   221  func (cfg BlockChainConfig) WithStateScheme(scheme string) *BlockChainConfig {
   222  	cfg.StateScheme = scheme
   223  	return &cfg
   224  }
   225  
   226  // WithNoAsyncFlush enables/disables asynchronous buffer flushing mode on the config.
   227  func (cfg BlockChainConfig) WithNoAsyncFlush(on bool) *BlockChainConfig {
   228  	cfg.TrieNoAsyncFlush = on
   229  	return &cfg
   230  }
   231  
   232  // triedbConfig derives the configures for trie database.
   233  func (cfg *BlockChainConfig) triedbConfig(isVerkle bool) *triedb.Config {
   234  	config := &triedb.Config{
   235  		Preimages: cfg.Preimages,
   236  		IsVerkle:  isVerkle,
   237  	}
   238  	if cfg.StateScheme == rawdb.HashScheme {
   239  		config.HashDB = &hashdb.Config{
   240  			CleanCacheSize: cfg.TrieCleanLimit * 1024 * 1024,
   241  		}
   242  	}
   243  	if cfg.StateScheme == rawdb.PathScheme {
   244  		config.PathDB = &pathdb.Config{
   245  			StateHistory:        cfg.StateHistory,
   246  			EnableStateIndexing: cfg.ArchiveMode,
   247  			TrieCleanSize:       cfg.TrieCleanLimit * 1024 * 1024,
   248  			StateCleanSize:      cfg.SnapshotLimit * 1024 * 1024,
   249  
   250  			// TODO(rjl493456442): The write buffer represents the memory limit used
   251  			// for flushing both trie data and state data to disk. The config name
   252  			// should be updated to eliminate the confusion.
   253  			WriteBufferSize: cfg.TrieDirtyLimit * 1024 * 1024,
   254  			NoAsyncFlush:    cfg.TrieNoAsyncFlush,
   255  		}
   256  	}
   257  	return config
   258  }
   259  
   260  // txLookup is wrapper over transaction lookup along with the corresponding
   261  // transaction object.
   262  type txLookup struct {
   263  	lookup      *rawdb.LegacyTxLookupEntry
   264  	transaction *types.Transaction
   265  }
   266  
   267  // BlockChain represents the canonical chain given a database with a genesis
   268  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   269  //
   270  // Importing blocks in to the block chain happens according to the set of rules
   271  // defined by the two stage Validator. Processing of blocks is done using the
   272  // Processor which processes the included transaction. The validation of the state
   273  // is done in the second part of the Validator. Failing results in aborting of
   274  // the import.
   275  //
   276  // The BlockChain also helps in returning blocks from **any** chain included
   277  // in the database as well as blocks that represents the canonical chain. It's
   278  // important to note that GetBlock can return any block and does not need to be
   279  // included in the canonical one where as GetBlockByNumber always represents the
   280  // canonical chain.
   281  type BlockChain struct {
   282  	chainConfig *params.ChainConfig // Chain & network configuration
   283  	cfg         *BlockChainConfig   // Blockchain configuration
   284  
   285  	db            ethdb.Database                   // Low level persistent database to store final content in
   286  	snaps         *snapshot.Tree                   // Snapshot tree for fast trie leaf access
   287  	triegc        *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
   288  	gcproc        time.Duration                    // Accumulates canonical block processing for trie dumping
   289  	lastWrite     uint64                           // Last block when the state was flushed
   290  	flushInterval atomic.Int64                     // Time interval (processing time) after which to flush a state
   291  	triedb        *triedb.Database                 // The database handler for maintaining trie nodes.
   292  	statedb       *state.CachingDB                 // State database to reuse between imports (contains state cache)
   293  	txIndexer     *txIndexer                       // Transaction indexer, might be nil if not enabled
   294  
   295  	hc               *HeaderChain
   296  	rmLogsFeed       event.Feed
   297  	chainFeed        event.Feed
   298  	chainHeadFeed    event.Feed
   299  	logsFeed         event.Feed
   300  	blockProcFeed    event.Feed
   301  	blockProcCounter int32
   302  	scope            event.SubscriptionScope
   303  	genesisBlock     *types.Block
   304  
   305  	// This mutex synchronizes chain write operations.
   306  	// Readers don't need to take it, they can just read the database.
   307  	chainmu *syncx.ClosableMutex
   308  
   309  	currentBlock      atomic.Pointer[types.Header] // Current head of the chain
   310  	currentSnapBlock  atomic.Pointer[types.Header] // Current head of snap-sync
   311  	currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
   312  	currentSafeBlock  atomic.Pointer[types.Header] // Latest (consensus) safe block
   313  	historyPrunePoint atomic.Pointer[history.PrunePoint]
   314  
   315  	bodyCache     *lru.Cache[common.Hash, *types.Body]
   316  	bodyRLPCache  *lru.Cache[common.Hash, rlp.RawValue]
   317  	receiptsCache *lru.Cache[common.Hash, []*types.Receipt] // Receipts cache with all fields derived
   318  	blockCache    *lru.Cache[common.Hash, *types.Block]
   319  
   320  	txLookupLock  sync.RWMutex
   321  	txLookupCache *lru.Cache[common.Hash, txLookup]
   322  
   323  	stopping      atomic.Bool // false if chain is running, true when stopped
   324  	procInterrupt atomic.Bool // interrupt signaler for block processing
   325  
   326  	engine     consensus.Engine
   327  	validator  Validator // Block and state validator interface
   328  	prefetcher Prefetcher
   329  	processor  Processor // Block transaction processor interface
   330  	logger     *tracing.Hooks
   331  
   332  	lastForkReadyAlert time.Time // Last time there was a fork readiness print out
   333  }
   334  
   335  // NewBlockChain returns a fully initialised block chain using information
   336  // available in the database. It initialises the default Ethereum Validator
   337  // and Processor.
   338  func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine, cfg *BlockChainConfig) (*BlockChain, error) {
   339  	if cfg == nil {
   340  		cfg = DefaultConfig()
   341  	}
   342  
   343  	// Open trie database with provided config
   344  	enableVerkle, err := EnableVerkleAtGenesis(db, genesis)
   345  	if err != nil {
   346  		return nil, err
   347  	}
   348  	triedb := triedb.NewDatabase(db, cfg.triedbConfig(enableVerkle))
   349  
   350  	// Write the supplied genesis to the database if it has not been initialized
   351  	// yet. The corresponding chain config will be returned, either from the
   352  	// provided genesis or from the locally stored configuration if the genesis
   353  	// has already been initialized.
   354  	chainConfig, genesisHash, compatErr, err := SetupGenesisBlockWithOverride(db, triedb, genesis, cfg.Overrides)
   355  	if err != nil {
   356  		return nil, err
   357  	}
   358  	log.Info("")
   359  	log.Info(strings.Repeat("-", 153))
   360  	for _, line := range strings.Split(chainConfig.Description(), "\n") {
   361  		log.Info(line)
   362  	}
   363  	log.Info(strings.Repeat("-", 153))
   364  	log.Info("")
   365  
   366  	bc := &BlockChain{
   367  		chainConfig:   chainConfig,
   368  		cfg:           cfg,
   369  		db:            db,
   370  		triedb:        triedb,
   371  		triegc:        prque.New[int64, common.Hash](nil),
   372  		chainmu:       syncx.NewClosableMutex(),
   373  		bodyCache:     lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
   374  		bodyRLPCache:  lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
   375  		receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
   376  		blockCache:    lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
   377  		txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
   378  		engine:        engine,
   379  		logger:        cfg.VmConfig.Tracer,
   380  	}
   381  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   382  	if err != nil {
   383  		return nil, err
   384  	}
   385  	bc.flushInterval.Store(int64(cfg.TrieTimeLimit))
   386  	bc.statedb = state.NewDatabase(bc.triedb, nil)
   387  	bc.validator = NewBlockValidator(chainConfig, bc)
   388  	bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
   389  	bc.processor = NewStateProcessor(chainConfig, bc.hc)
   390  
   391  	genesisHeader := bc.GetHeaderByNumber(0)
   392  	if genesisHeader == nil {
   393  		return nil, ErrNoGenesis
   394  	}
   395  	bc.genesisBlock = types.NewBlockWithHeader(genesisHeader)
   396  
   397  	bc.currentBlock.Store(nil)
   398  	bc.currentSnapBlock.Store(nil)
   399  	bc.currentFinalBlock.Store(nil)
   400  	bc.currentSafeBlock.Store(nil)
   401  
   402  	// Update chain info data metrics
   403  	chainInfoGauge.Update(metrics.GaugeInfoValue{"chain_id": bc.chainConfig.ChainID.String()})
   404  
   405  	// If Geth is initialized with an external ancient store, re-initialize the
   406  	// missing chain indexes and chain flags. This procedure can survive crash
   407  	// and can be resumed in next restart since chain flags are updated in last step.
   408  	if bc.empty() {
   409  		rawdb.InitDatabaseFromFreezer(bc.db)
   410  	}
   411  	// Load blockchain states from disk
   412  	if err := bc.loadLastState(); err != nil {
   413  		return nil, err
   414  	}
   415  	// Make sure the state associated with the block is available, or log out
   416  	// if there is no available state, waiting for state sync.
   417  	head := bc.CurrentBlock()
   418  	if !bc.HasState(head.Root) {
   419  		if head.Number.Uint64() == 0 {
   420  			// The genesis state is missing, which is only possible in the path-based
   421  			// scheme. This situation occurs when the initial state sync is not finished
   422  			// yet, or the chain head is rewound below the pivot point. In both scenarios,
   423  			// there is no possible recovery approach except for rerunning a snap sync.
   424  			// Do nothing here until the state syncer picks it up.
   425  			log.Info("Genesis state is missing, wait state sync")
   426  		} else {
   427  			// Head state is missing, before the state recovery, find out the disk
   428  			// layer point of snapshot(if it's enabled). Make sure the rewound point
   429  			// is lower than disk layer.
   430  			//
   431  			// Note it's unnecessary in path mode which always keep trie data and
   432  			// state data consistent.
   433  			var diskRoot common.Hash
   434  			if bc.cfg.SnapshotLimit > 0 && bc.cfg.StateScheme == rawdb.HashScheme {
   435  				diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   436  			}
   437  			if diskRoot != (common.Hash{}) {
   438  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
   439  
   440  				snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
   441  				if err != nil {
   442  					return nil, err
   443  				}
   444  				// Chain rewound, persist old snapshot number to indicate recovery procedure
   445  				if snapDisk != 0 {
   446  					rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   447  				}
   448  			} else {
   449  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
   450  				if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
   451  					return nil, err
   452  				}
   453  			}
   454  		}
   455  	}
   456  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   457  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   458  		var (
   459  			needRewind bool
   460  			low        uint64
   461  		)
   462  		// The head full block may be rolled back to a very low height due to
   463  		// blockchain repair. If the head full block is even lower than the ancient
   464  		// chain, truncate the ancient store.
   465  		fullBlock := bc.CurrentBlock()
   466  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
   467  			needRewind = true
   468  			low = fullBlock.Number.Uint64()
   469  		}
   470  		// In snap sync, it may happen that ancient data has been written to the
   471  		// ancient store, but the LastFastBlock has not been updated, truncate the
   472  		// extra data here.
   473  		snapBlock := bc.CurrentSnapBlock()
   474  		if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
   475  			needRewind = true
   476  			if snapBlock.Number.Uint64() < low || low == 0 {
   477  				low = snapBlock.Number.Uint64()
   478  			}
   479  		}
   480  		if needRewind {
   481  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   482  			if err := bc.SetHead(low); err != nil {
   483  				return nil, err
   484  			}
   485  		}
   486  	}
   487  	// The first thing the node will do is reconstruct the verification data for
   488  	// the head block (ethash cache or clique voting snapshot). Might as well do
   489  	// it in advance.
   490  	bc.engine.VerifyHeader(bc, bc.CurrentHeader())
   491  
   492  	if bc.logger != nil && bc.logger.OnBlockchainInit != nil {
   493  		bc.logger.OnBlockchainInit(chainConfig)
   494  	}
   495  	if bc.logger != nil && bc.logger.OnGenesisBlock != nil {
   496  		if block := bc.CurrentBlock(); block.Number.Uint64() == 0 {
   497  			alloc, err := getGenesisState(bc.db, block.Hash())
   498  			if err != nil {
   499  				return nil, fmt.Errorf("failed to get genesis state: %w", err)
   500  			}
   501  			if alloc == nil {
   502  				return nil, errors.New("live blockchain tracer requires genesis alloc to be set")
   503  			}
   504  			bc.logger.OnGenesisBlock(bc.genesisBlock, alloc)
   505  		}
   506  	}
   507  	bc.setupSnapshot()
   508  
   509  	// Rewind the chain in case of an incompatible config upgrade.
   510  	if compatErr != nil {
   511  		log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
   512  		if compatErr.RewindToTime > 0 {
   513  			bc.SetHeadWithTimestamp(compatErr.RewindToTime)
   514  		} else {
   515  			bc.SetHead(compatErr.RewindToBlock)
   516  		}
   517  		rawdb.WriteChainConfig(db, genesisHash, chainConfig)
   518  	}
   519  
   520  	// Start tx indexer if it's enabled.
   521  	if bc.cfg.TxLookupLimit >= 0 {
   522  		bc.txIndexer = newTxIndexer(uint64(bc.cfg.TxLookupLimit), bc)
   523  	}
   524  	return bc, nil
   525  }
   526  
   527  func (bc *BlockChain) setupSnapshot() {
   528  	// Short circuit if the chain is established with path scheme, as the
   529  	// state snapshot has been integrated into path database natively.
   530  	if bc.cfg.StateScheme == rawdb.PathScheme {
   531  		return
   532  	}
   533  	// Load any existing snapshot, regenerating it if loading failed
   534  	if bc.cfg.SnapshotLimit > 0 {
   535  		// If the chain was rewound past the snapshot persistent layer (causing
   536  		// a recovery block number to be persisted to disk), check if we're still
   537  		// in recovery mode and in that case, don't invalidate the snapshot on a
   538  		// head mismatch.
   539  		var recover bool
   540  		head := bc.CurrentBlock()
   541  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
   542  			log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
   543  			recover = true
   544  		}
   545  		snapconfig := snapshot.Config{
   546  			CacheSize:  bc.cfg.SnapshotLimit,
   547  			Recovery:   recover,
   548  			NoBuild:    bc.cfg.SnapshotNoBuild,
   549  			AsyncBuild: !bc.cfg.SnapshotWait,
   550  		}
   551  		bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
   552  
   553  		// Re-initialize the state database with snapshot
   554  		bc.statedb = state.NewDatabase(bc.triedb, bc.snaps)
   555  	}
   556  }
   557  
   558  // empty returns an indicator whether the blockchain is empty.
   559  // Note, it's a special case that we connect a non-empty ancient
   560  // database with an empty node, so that we can plugin the ancient
   561  // into node seamlessly.
   562  func (bc *BlockChain) empty() bool {
   563  	genesis := bc.genesisBlock.Hash()
   564  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   565  		if hash != genesis {
   566  			return false
   567  		}
   568  	}
   569  	return true
   570  }
   571  
   572  // loadLastState loads the last known chain state from the database. This method
   573  // assumes that the chain manager mutex is held.
   574  func (bc *BlockChain) loadLastState() error {
   575  	// Restore the last known head block
   576  	head := rawdb.ReadHeadBlockHash(bc.db)
   577  	if head == (common.Hash{}) {
   578  		// Corrupt or empty database, init from scratch
   579  		log.Warn("Empty database, resetting chain")
   580  		return bc.Reset()
   581  	}
   582  	headHeader := bc.GetHeaderByHash(head)
   583  	if headHeader == nil {
   584  		// Corrupt or empty database, init from scratch
   585  		log.Warn("Head header missing, resetting chain", "hash", head)
   586  		return bc.Reset()
   587  	}
   588  
   589  	var headBlock *types.Block
   590  	if cmp := headHeader.Number.Cmp(new(big.Int)); cmp == 1 {
   591  		// Make sure the entire head block is available.
   592  		headBlock = bc.GetBlockByHash(head)
   593  	} else if cmp == 0 {
   594  		// On a pruned node the block body might not be available. But a pruned
   595  		// block should never be the head block. The only exception is when, as
   596  		// a last resort, chain is reset to genesis.
   597  		headBlock = bc.genesisBlock
   598  	}
   599  	if headBlock == nil {
   600  		// Corrupt or empty database, init from scratch
   601  		log.Warn("Head block missing, resetting chain", "hash", head)
   602  		return bc.Reset()
   603  	}
   604  	// Everything seems to be fine, set as the head block
   605  	bc.currentBlock.Store(headHeader)
   606  	headBlockGauge.Update(int64(headBlock.NumberU64()))
   607  
   608  	// Restore the last known head header
   609  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   610  		if header := bc.GetHeaderByHash(head); header != nil {
   611  			headHeader = header
   612  		}
   613  	}
   614  	bc.hc.SetCurrentHeader(headHeader)
   615  
   616  	// Initialize history pruning.
   617  	latest := max(headBlock.NumberU64(), headHeader.Number.Uint64())
   618  	if err := bc.initializeHistoryPruning(latest); err != nil {
   619  		return err
   620  	}
   621  
   622  	// Restore the last known head snap block
   623  	bc.currentSnapBlock.Store(headBlock.Header())
   624  	headFastBlockGauge.Update(int64(headBlock.NumberU64()))
   625  
   626  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   627  		if block := bc.GetBlockByHash(head); block != nil {
   628  			bc.currentSnapBlock.Store(block.Header())
   629  			headFastBlockGauge.Update(int64(block.NumberU64()))
   630  		}
   631  	}
   632  
   633  	// Restore the last known finalized block and safe block
   634  	// Note: the safe block is not stored on disk and it is set to the last
   635  	// known finalized block on startup
   636  	if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) {
   637  		if block := bc.GetBlockByHash(head); block != nil {
   638  			bc.currentFinalBlock.Store(block.Header())
   639  			headFinalizedBlockGauge.Update(int64(block.NumberU64()))
   640  			bc.currentSafeBlock.Store(block.Header())
   641  			headSafeBlockGauge.Update(int64(block.NumberU64()))
   642  		}
   643  	}
   644  
   645  	// Issue a status log for the user
   646  	var (
   647  		currentSnapBlock  = bc.CurrentSnapBlock()
   648  		currentFinalBlock = bc.CurrentFinalBlock()
   649  	)
   650  	if headHeader.Hash() != headBlock.Hash() {
   651  		log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
   652  	}
   653  	log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
   654  	if headBlock.Hash() != currentSnapBlock.Hash() {
   655  		log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
   656  	}
   657  	if currentFinalBlock != nil {
   658  		log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
   659  	}
   660  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   661  		log.Info("Loaded last snap-sync pivot marker", "number", *pivot)
   662  	}
   663  	if pruning := bc.historyPrunePoint.Load(); pruning != nil {
   664  		log.Info("Chain history is pruned", "earliest", pruning.BlockNumber, "hash", pruning.BlockHash)
   665  	}
   666  	return nil
   667  }
   668  
   669  // initializeHistoryPruning sets bc.historyPrunePoint.
   670  func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
   671  	freezerTail, _ := bc.db.Tail()
   672  
   673  	switch bc.cfg.ChainHistoryMode {
   674  	case history.KeepAll:
   675  		if freezerTail == 0 {
   676  			return nil
   677  		}
   678  		// The database was pruned somehow, so we need to figure out if it's a known
   679  		// configuration or an error.
   680  		predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
   681  		if predefinedPoint == nil || freezerTail != predefinedPoint.BlockNumber {
   682  			log.Error("Chain history database is pruned with unknown configuration", "tail", freezerTail)
   683  			return fmt.Errorf("unexpected database tail")
   684  		}
   685  		bc.historyPrunePoint.Store(predefinedPoint)
   686  		return nil
   687  
   688  	case history.KeepPostMerge:
   689  		if freezerTail == 0 && latest != 0 {
   690  			// This is the case where a user is trying to run with --history.chain
   691  			// postmerge directly on an existing DB. We could just trigger the pruning
   692  			// here, but it'd be a bit dangerous since they may not have intended this
   693  			// action to happen. So just tell them how to do it.
   694  			log.Error(fmt.Sprintf("Chain history mode is configured as %q, but database is not pruned.", bc.cfg.ChainHistoryMode.String()))
   695  			log.Error(fmt.Sprintf("Run 'geth prune-history' to prune pre-merge history."))
   696  			return fmt.Errorf("history pruning requested via configuration")
   697  		}
   698  		predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
   699  		if predefinedPoint == nil {
   700  			log.Error("Chain history pruning is not supported for this network", "genesis", bc.genesisBlock.Hash())
   701  			return fmt.Errorf("history pruning requested for unknown network")
   702  		} else if freezerTail > 0 && freezerTail != predefinedPoint.BlockNumber {
   703  			log.Error("Chain history database is pruned to unknown block", "tail", freezerTail)
   704  			return fmt.Errorf("unexpected database tail")
   705  		}
   706  		bc.historyPrunePoint.Store(predefinedPoint)
   707  		return nil
   708  
   709  	default:
   710  		return fmt.Errorf("invalid history mode: %d", bc.cfg.ChainHistoryMode)
   711  	}
   712  }
   713  
   714  // SetHead rewinds the local chain to a new head. Depending on whether the node
   715  // was snap synced or full synced and in which state, the method will try to
   716  // delete minimal data from disk whilst retaining chain consistency.
   717  func (bc *BlockChain) SetHead(head uint64) error {
   718  	if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil {
   719  		return err
   720  	}
   721  	// Send chain head event to update the transaction pool
   722  	header := bc.CurrentBlock()
   723  	if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
   724  		// In a pruned node the genesis block will not exist in the freezer.
   725  		// It should not happen that we set head to any other pruned block.
   726  		if header.Number.Uint64() > 0 {
   727  			// This should never happen. In practice, previously currentBlock
   728  			// contained the entire block whereas now only a "marker", so there
   729  			// is an ever so slight chance for a race we should handle.
   730  			log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   731  			return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   732  		}
   733  	}
   734  	bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
   735  	return nil
   736  }
   737  
   738  // SetHeadWithTimestamp rewinds the local chain to a new head that has at max
   739  // the given timestamp. Depending on whether the node was snap synced or full
   740  // synced and in which state, the method will try to delete minimal data from
   741  // disk whilst retaining chain consistency.
   742  func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
   743  	if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil {
   744  		return err
   745  	}
   746  	// Send chain head event to update the transaction pool
   747  	header := bc.CurrentBlock()
   748  	if block := bc.GetBlock(header.Hash(), header.Number.Uint64()); block == nil {
   749  		// In a pruned node the genesis block will not exist in the freezer.
   750  		// It should not happen that we set head to any other pruned block.
   751  		if header.Number.Uint64() > 0 {
   752  			// This should never happen. In practice, previously currentBlock
   753  			// contained the entire block whereas now only a "marker", so there
   754  			// is an ever so slight chance for a race we should handle.
   755  			log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   756  			return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   757  		}
   758  	}
   759  	bc.chainHeadFeed.Send(ChainHeadEvent{Header: header})
   760  	return nil
   761  }
   762  
   763  // SetFinalized sets the finalized block.
   764  func (bc *BlockChain) SetFinalized(header *types.Header) {
   765  	bc.currentFinalBlock.Store(header)
   766  	if header != nil {
   767  		rawdb.WriteFinalizedBlockHash(bc.db, header.Hash())
   768  		headFinalizedBlockGauge.Update(int64(header.Number.Uint64()))
   769  	} else {
   770  		rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{})
   771  		headFinalizedBlockGauge.Update(0)
   772  	}
   773  }
   774  
   775  // SetSafe sets the safe block.
   776  func (bc *BlockChain) SetSafe(header *types.Header) {
   777  	bc.currentSafeBlock.Store(header)
   778  	if header != nil {
   779  		headSafeBlockGauge.Update(int64(header.Number.Uint64()))
   780  	} else {
   781  		headSafeBlockGauge.Update(0)
   782  	}
   783  }
   784  
   785  // rewindHashHead implements the logic of rewindHead in the context of hash scheme.
   786  func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   787  	var (
   788  		limit      uint64                             // The oldest block that will be searched for this rewinding
   789  		beyondRoot = root == common.Hash{}            // Flag whether we're beyond the requested root (no root, always true)
   790  		pivot      = rawdb.ReadLastPivotNumber(bc.db) // Associated block number of pivot point state
   791  		rootNumber uint64                             // Associated block number of requested root
   792  
   793  		start  = time.Now() // Timestamp the rewinding is restarted
   794  		logged = time.Now() // Timestamp last progress log was printed
   795  	)
   796  	// The oldest block to be searched is determined by the pivot block or a constant
   797  	// searching threshold. The rationale behind this is as follows:
   798  	//
   799  	// - Snap sync is selected if the pivot block is available. The earliest available
   800  	//   state is the pivot block itself, so there is no sense in going further back.
   801  	//
   802  	// - Full sync is selected if the pivot block does not exist. The hash database
   803  	//   periodically flushes the state to disk, and the used searching threshold is
   804  	//   considered sufficient to find a persistent state, even for the testnet. It
   805  	//   might be not enough for a chain that is nearly empty. In the worst case,
   806  	//   the entire chain is reset to genesis, and snap sync is re-enabled on top,
   807  	//   which is still acceptable.
   808  	if pivot != nil {
   809  		limit = *pivot
   810  	} else if head.Number.Uint64() > params.FullImmutabilityThreshold {
   811  		limit = head.Number.Uint64() - params.FullImmutabilityThreshold
   812  	}
   813  	for {
   814  		logger := log.Trace
   815  		if time.Since(logged) > time.Second*8 {
   816  			logged = time.Now()
   817  			logger = log.Info
   818  		}
   819  		logger("Block state missing, rewinding further", "number", head.Number, "hash", head.Hash(), "elapsed", common.PrettyDuration(time.Since(start)))
   820  
   821  		// If a root threshold was requested but not yet crossed, check
   822  		if !beyondRoot && head.Root == root {
   823  			beyondRoot, rootNumber = true, head.Number.Uint64()
   824  		}
   825  		// If search limit is reached, return the genesis block as the
   826  		// new chain head.
   827  		if head.Number.Uint64() < limit {
   828  			log.Info("Rewinding limit reached, resetting to genesis", "number", head.Number, "hash", head.Hash(), "limit", limit)
   829  			return bc.genesisBlock.Header(), rootNumber
   830  		}
   831  		// If the associated state is not reachable, continue searching
   832  		// backwards until an available state is found.
   833  		if !bc.HasState(head.Root) {
   834  			// If the chain is gapped in the middle, return the genesis
   835  			// block as the new chain head.
   836  			parent := bc.GetHeader(head.ParentHash, head.Number.Uint64()-1)
   837  			if parent == nil {
   838  				log.Error("Missing block in the middle, resetting to genesis", "number", head.Number.Uint64()-1, "hash", head.ParentHash)
   839  				return bc.genesisBlock.Header(), rootNumber
   840  			}
   841  			head = parent
   842  
   843  			// If the genesis block is reached, stop searching.
   844  			if head.Number.Uint64() == 0 {
   845  				log.Info("Genesis block reached", "number", head.Number, "hash", head.Hash())
   846  				return head, rootNumber
   847  			}
   848  			continue // keep rewinding
   849  		}
   850  		// Once the available state is found, ensure that the requested root
   851  		// has already been crossed. If not, continue rewinding.
   852  		if beyondRoot || head.Number.Uint64() == 0 {
   853  			log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash())
   854  			return head, rootNumber
   855  		}
   856  		log.Debug("Skipping block with threshold state", "number", head.Number, "hash", head.Hash(), "root", head.Root)
   857  		head = bc.GetHeader(head.ParentHash, head.Number.Uint64()-1) // Keep rewinding
   858  	}
   859  }
   860  
   861  // rewindPathHead implements the logic of rewindHead in the context of path scheme.
   862  func (bc *BlockChain) rewindPathHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   863  	var (
   864  		pivot      = rawdb.ReadLastPivotNumber(bc.db) // Associated block number of pivot block
   865  		rootNumber uint64                             // Associated block number of requested root
   866  
   867  		// BeyondRoot represents whether the requested root is already
   868  		// crossed. The flag value is set to true if the root is empty.
   869  		beyondRoot = root == common.Hash{}
   870  
   871  		// noState represents if the target state requested for search
   872  		// is unavailable and impossible to be recovered.
   873  		noState = !bc.HasState(root) && !bc.stateRecoverable(root)
   874  
   875  		start  = time.Now() // Timestamp the rewinding is restarted
   876  		logged = time.Now() // Timestamp last progress log was printed
   877  	)
   878  	// Rewind the head block tag until an available state is found.
   879  	for {
   880  		logger := log.Trace
   881  		if time.Since(logged) > time.Second*8 {
   882  			logged = time.Now()
   883  			logger = log.Info
   884  		}
   885  		logger("Block state missing, rewinding further", "number", head.Number, "hash", head.Hash(), "elapsed", common.PrettyDuration(time.Since(start)))
   886  
   887  		// If a root threshold was requested but not yet crossed, check
   888  		if !beyondRoot && head.Root == root {
   889  			beyondRoot, rootNumber = true, head.Number.Uint64()
   890  		}
   891  		// If the root threshold hasn't been crossed but the available
   892  		// state is reached, quickly determine if the target state is
   893  		// possible to be reached or not.
   894  		if !beyondRoot && noState && bc.HasState(head.Root) {
   895  			beyondRoot = true
   896  			log.Info("Disable the search for unattainable state", "root", root)
   897  		}
   898  		// Check if the associated state is available or recoverable if
   899  		// the requested root has already been crossed.
   900  		if beyondRoot && (bc.HasState(head.Root) || bc.stateRecoverable(head.Root)) {
   901  			break
   902  		}
   903  		// If pivot block is reached, return the genesis block as the
   904  		// new chain head. Theoretically there must be a persistent
   905  		// state before or at the pivot block, prevent endless rewinding
   906  		// towards the genesis just in case.
   907  		if pivot != nil && *pivot >= head.Number.Uint64() {
   908  			log.Info("Pivot block reached, resetting to genesis", "number", head.Number, "hash", head.Hash())
   909  			return bc.genesisBlock.Header(), rootNumber
   910  		}
   911  		// If the chain is gapped in the middle, return the genesis
   912  		// block as the new chain head
   913  		parent := bc.GetHeader(head.ParentHash, head.Number.Uint64()-1) // Keep rewinding
   914  		if parent == nil {
   915  			log.Error("Missing block in the middle, resetting to genesis", "number", head.Number.Uint64()-1, "hash", head.ParentHash)
   916  			return bc.genesisBlock.Header(), rootNumber
   917  		}
   918  		head = parent
   919  
   920  		// If the genesis block is reached, stop searching.
   921  		if head.Number.Uint64() == 0 {
   922  			log.Info("Genesis block reached", "number", head.Number, "hash", head.Hash())
   923  			return head, rootNumber
   924  		}
   925  	}
   926  	// Recover if the target state if it's not available yet.
   927  	if !bc.HasState(head.Root) {
   928  		if err := bc.triedb.Recover(head.Root); err != nil {
   929  			log.Crit("Failed to rollback state", "err", err)
   930  		}
   931  	}
   932  	log.Info("Rewound to block with state", "number", head.Number, "hash", head.Hash())
   933  	return head, rootNumber
   934  }
   935  
   936  // rewindHead searches the available states in the database and returns the associated
   937  // block as the new head block.
   938  //
   939  // If the given root is not empty, then the rewind should attempt to pass the specified
   940  // state root and return the associated block number as well. If the root, typically
   941  // representing the state corresponding to snapshot disk layer, is deemed impassable,
   942  // then block number zero is returned, indicating that snapshot recovery is disabled
   943  // and the whole snapshot should be auto-generated in case of head mismatch.
   944  func (bc *BlockChain) rewindHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
   945  	if bc.triedb.Scheme() == rawdb.PathScheme {
   946  		return bc.rewindPathHead(head, root)
   947  	}
   948  	return bc.rewindHashHead(head, root)
   949  }
   950  
   951  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   952  // that the rewind must pass the specified state root. This method is meant to be
   953  // used when rewinding with snapshots enabled to ensure that we go back further than
   954  // persistent disk layer. Depending on whether the node was snap synced or full, and
   955  // in which state, the method will try to delete minimal data from disk whilst
   956  // retaining chain consistency.
   957  //
   958  // The method also works in timestamp mode if `head == 0` but `time != 0`. In that
   959  // case blocks are rolled back until the new head becomes older or equal to the
   960  // requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
   961  //
   962  // The method returns the block number where the requested root cap was found.
   963  func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
   964  	if !bc.chainmu.TryLock() {
   965  		return 0, errChainStopped
   966  	}
   967  	defer bc.chainmu.Unlock()
   968  
   969  	var (
   970  		// Track the block number of the requested root hash
   971  		rootNumber uint64 // (no root == always 0)
   972  
   973  		// Retrieve the last pivot block to short circuit rollbacks beyond it
   974  		// and the current freezer limit to start nuking it's underflown.
   975  		pivot = rawdb.ReadLastPivotNumber(bc.db)
   976  	)
   977  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
   978  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   979  		// block. Note, depth equality is permitted to allow using SetHead as a
   980  		// chain reparation mechanism without deleting any data!
   981  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
   982  			var newHeadBlock *types.Header
   983  			newHeadBlock, rootNumber = bc.rewindHead(header, root)
   984  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   985  
   986  			// Degrade the chain markers if they are explicitly reverted.
   987  			// In theory we should update all in-memory markers in the
   988  			// last step, however the direction of SetHead is from high
   989  			// to low, so it's safe to update in-memory markers directly.
   990  			bc.currentBlock.Store(newHeadBlock)
   991  			headBlockGauge.Update(int64(newHeadBlock.Number.Uint64()))
   992  
   993  			// The head state is missing, which is only possible in the path-based
   994  			// scheme. This situation occurs when the chain head is rewound below
   995  			// the pivot point. In this scenario, there is no possible recovery
   996  			// approach except for rerunning a snap sync. Do nothing here until the
   997  			// state syncer picks it up.
   998  			if !bc.HasState(newHeadBlock.Root) {
   999  				if newHeadBlock.Number.Uint64() != 0 {
  1000  					log.Crit("Chain is stateless at a non-genesis block")
  1001  				}
  1002  				log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number, "hash", newHeadBlock.Hash())
  1003  			}
  1004  		}
  1005  		// Rewind the snap block in a simpleton way to the target head
  1006  		if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
  1007  			newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
  1008  			// If either blocks reached nil, reset to the genesis state
  1009  			if newHeadSnapBlock == nil {
  1010  				newHeadSnapBlock = bc.genesisBlock
  1011  			}
  1012  			rawdb.WriteHeadFastBlockHash(db, newHeadSnapBlock.Hash())
  1013  
  1014  			// Degrade the chain markers if they are explicitly reverted.
  1015  			// In theory we should update all in-memory markers in the
  1016  			// last step, however the direction of SetHead is from high
  1017  			// to low, so it's safe the update in-memory markers directly.
  1018  			bc.currentSnapBlock.Store(newHeadSnapBlock.Header())
  1019  			headFastBlockGauge.Update(int64(newHeadSnapBlock.NumberU64()))
  1020  		}
  1021  		var (
  1022  			headHeader = bc.CurrentBlock()
  1023  			headNumber = headHeader.Number.Uint64()
  1024  		)
  1025  		// If setHead underflown the freezer threshold and the block processing
  1026  		// intent afterwards is full block importing, delete the chain segment
  1027  		// between the stateful-block and the sethead target.
  1028  		var wipe bool
  1029  		frozen, _ := bc.db.Ancients()
  1030  		if headNumber+1 < frozen {
  1031  			wipe = pivot == nil || headNumber >= *pivot
  1032  		}
  1033  		return headHeader, wipe // Only force wipe if full synced
  1034  	}
  1035  	// Rewind the header chain, deleting all block bodies until then
  1036  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
  1037  		// Ignore the error here since light client won't hit this path
  1038  		frozen, _ := bc.db.Ancients()
  1039  		if num+1 <= frozen {
  1040  			// The chain segment, such as the block header, canonical hash,
  1041  			// body, and receipt, will be removed from the ancient store
  1042  			// in one go.
  1043  			//
  1044  			// The hash-to-number mapping in the key-value store will be
  1045  			// removed by the hc.SetHead function.
  1046  		} else {
  1047  			// Remove the associated body and receipts from the key-value store.
  1048  			// The header, hash-to-number mapping, and canonical hash will be
  1049  			// removed by the hc.SetHead function.
  1050  			rawdb.DeleteBody(db, hash, num)
  1051  			rawdb.DeleteReceipts(db, hash, num)
  1052  		}
  1053  		// Todo(rjl493456442) txlookup, log index, etc
  1054  	}
  1055  	// If SetHead was only called as a chain reparation method, try to skip
  1056  	// touching the header chain altogether, unless the freezer is broken
  1057  	if repair {
  1058  		if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
  1059  			bc.hc.SetHead(target.Number.Uint64(), nil, delFn)
  1060  		}
  1061  	} else {
  1062  		// Rewind the chain to the requested head and keep going backwards until a
  1063  		// block with a state is found or snap sync pivot is passed
  1064  		if time > 0 {
  1065  			log.Warn("Rewinding blockchain to timestamp", "target", time)
  1066  			bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
  1067  		} else {
  1068  			log.Warn("Rewinding blockchain to block", "target", head)
  1069  			bc.hc.SetHead(head, updateFn, delFn)
  1070  		}
  1071  	}
  1072  	// Clear out any stale content from the caches
  1073  	bc.bodyCache.Purge()
  1074  	bc.bodyRLPCache.Purge()
  1075  	bc.receiptsCache.Purge()
  1076  	bc.blockCache.Purge()
  1077  	bc.txLookupCache.Purge()
  1078  
  1079  	// Clear safe block, finalized block if needed
  1080  	if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() {
  1081  		log.Warn("SetHead invalidated safe block")
  1082  		bc.SetSafe(nil)
  1083  	}
  1084  	if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() {
  1085  		log.Error("SetHead invalidated finalized block")
  1086  		bc.SetFinalized(nil)
  1087  	}
  1088  	return rootNumber, bc.loadLastState()
  1089  }
  1090  
  1091  // SnapSyncCommitHead sets the current head block to the one defined by the hash
  1092  // irrelevant what the chain contents were prior.
  1093  func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
  1094  	// Make sure that both the block as well at its state trie exists
  1095  	block := bc.GetBlockByHash(hash)
  1096  	if block == nil {
  1097  		return fmt.Errorf("non existent block [%x..]", hash[:4])
  1098  	}
  1099  	// Reset the trie database with the fresh snap synced state.
  1100  	root := block.Root()
  1101  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1102  		if err := bc.triedb.Enable(root); err != nil {
  1103  			return err
  1104  		}
  1105  	}
  1106  	if !bc.HasState(root) {
  1107  		return fmt.Errorf("non existent state [%x..]", root[:4])
  1108  	}
  1109  	// If all checks out, manually set the head block.
  1110  	if !bc.chainmu.TryLock() {
  1111  		return errChainStopped
  1112  	}
  1113  	bc.currentBlock.Store(block.Header())
  1114  	headBlockGauge.Update(int64(block.NumberU64()))
  1115  	bc.chainmu.Unlock()
  1116  
  1117  	// Destroy any existing state snapshot and regenerate it in the background,
  1118  	// also resuming the normal maintenance of any previously paused snapshot.
  1119  	if bc.snaps != nil {
  1120  		bc.snaps.Rebuild(root)
  1121  	}
  1122  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
  1123  	return nil
  1124  }
  1125  
  1126  // Reset purges the entire blockchain, restoring it to its genesis state.
  1127  func (bc *BlockChain) Reset() error {
  1128  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
  1129  }
  1130  
  1131  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
  1132  // specified genesis state.
  1133  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
  1134  	// Dump the entire block chain and purge the caches
  1135  	if err := bc.SetHead(0); err != nil {
  1136  		return err
  1137  	}
  1138  	if !bc.chainmu.TryLock() {
  1139  		return errChainStopped
  1140  	}
  1141  	defer bc.chainmu.Unlock()
  1142  
  1143  	// Prepare the genesis block and reinitialise the chain
  1144  	batch := bc.db.NewBatch()
  1145  	rawdb.WriteBlock(batch, genesis)
  1146  	if err := batch.Write(); err != nil {
  1147  		log.Crit("Failed to write genesis block", "err", err)
  1148  	}
  1149  	bc.writeHeadBlock(genesis)
  1150  
  1151  	// Last update all in-memory chain markers
  1152  	bc.genesisBlock = genesis
  1153  	bc.currentBlock.Store(bc.genesisBlock.Header())
  1154  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
  1155  	bc.hc.SetGenesis(bc.genesisBlock.Header())
  1156  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  1157  	bc.currentSnapBlock.Store(bc.genesisBlock.Header())
  1158  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
  1159  
  1160  	// Reset history pruning status.
  1161  	return bc.initializeHistoryPruning(0)
  1162  }
  1163  
  1164  // Export writes the active chain to the given writer.
  1165  func (bc *BlockChain) Export(w io.Writer) error {
  1166  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64())
  1167  }
  1168  
  1169  // ExportN writes a subset of the active chain to the given writer.
  1170  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  1171  	if first > last {
  1172  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  1173  	}
  1174  	log.Info("Exporting batch of blocks", "count", last-first+1)
  1175  
  1176  	var (
  1177  		parentHash common.Hash
  1178  		start      = time.Now()
  1179  		reported   = time.Now()
  1180  	)
  1181  	for nr := first; nr <= last; nr++ {
  1182  		block := bc.GetBlockByNumber(nr)
  1183  		if block == nil {
  1184  			return fmt.Errorf("export failed on #%d: not found", nr)
  1185  		}
  1186  		if nr > first && block.ParentHash() != parentHash {
  1187  			return errors.New("export failed: chain reorg during export")
  1188  		}
  1189  		parentHash = block.Hash()
  1190  		if err := block.EncodeRLP(w); err != nil {
  1191  			return err
  1192  		}
  1193  		if time.Since(reported) >= statsReportLimit {
  1194  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  1195  			reported = time.Now()
  1196  		}
  1197  	}
  1198  	return nil
  1199  }
  1200  
  1201  // writeHeadBlock injects a new head block into the current block chain. This method
  1202  // assumes that the block is indeed a true head. It will also reset the head
  1203  // header and the head snap sync block to this very same block if they are older
  1204  // or if they are on a different side chain.
  1205  //
  1206  // Note, this function assumes that the `mu` mutex is held!
  1207  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
  1208  	// Add the block to the canonical chain number scheme and mark as the head
  1209  	batch := bc.db.NewBatch()
  1210  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
  1211  	rawdb.WriteHeadFastBlockHash(batch, block.Hash())
  1212  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
  1213  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1214  	rawdb.WriteHeadBlockHash(batch, block.Hash())
  1215  
  1216  	// Flush the whole batch into the disk, exit the node if failed
  1217  	if err := batch.Write(); err != nil {
  1218  		log.Crit("Failed to update chain indexes and markers", "err", err)
  1219  	}
  1220  	// Update all in-memory chain markers in the last step
  1221  	bc.hc.SetCurrentHeader(block.Header())
  1222  
  1223  	bc.currentSnapBlock.Store(block.Header())
  1224  	headFastBlockGauge.Update(int64(block.NumberU64()))
  1225  
  1226  	bc.currentBlock.Store(block.Header())
  1227  	headBlockGauge.Update(int64(block.NumberU64()))
  1228  }
  1229  
  1230  // stopWithoutSaving stops the blockchain service. If any imports are currently in progress
  1231  // it will abort them using the procInterrupt. This method stops all running
  1232  // goroutines, but does not do all the post-stop work of persisting data.
  1233  // OBS! It is generally recommended to use the Stop method!
  1234  // This method has been exposed to allow tests to stop the blockchain while simulating
  1235  // a crash.
  1236  func (bc *BlockChain) stopWithoutSaving() {
  1237  	if !bc.stopping.CompareAndSwap(false, true) {
  1238  		return
  1239  	}
  1240  	// Signal shutdown tx indexer.
  1241  	if bc.txIndexer != nil {
  1242  		bc.txIndexer.close()
  1243  	}
  1244  	// Unsubscribe all subscriptions registered from blockchain.
  1245  	bc.scope.Close()
  1246  
  1247  	// Signal shutdown to all goroutines.
  1248  	bc.InterruptInsert(true)
  1249  
  1250  	// Now wait for all chain modifications to end and persistent goroutines to exit.
  1251  	//
  1252  	// Note: Close waits for the mutex to become available, i.e. any running chain
  1253  	// modification will have exited when Close returns. Since we also called StopInsert,
  1254  	// the mutex should become available quickly. It cannot be taken again after Close has
  1255  	// returned.
  1256  	bc.chainmu.Close()
  1257  }
  1258  
  1259  // Stop stops the blockchain service. If any imports are currently in progress
  1260  // it will abort them using the procInterrupt.
  1261  func (bc *BlockChain) Stop() {
  1262  	bc.stopWithoutSaving()
  1263  
  1264  	// Ensure that the entirety of the state snapshot is journaled to disk.
  1265  	var snapBase common.Hash
  1266  	if bc.snaps != nil {
  1267  		var err error
  1268  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
  1269  			log.Error("Failed to journal state snapshot", "err", err)
  1270  		}
  1271  		bc.snaps.Release()
  1272  	}
  1273  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1274  		// Ensure that the in-memory trie nodes are journaled to disk properly.
  1275  		if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil {
  1276  			log.Info("Failed to journal in-memory trie nodes", "err", err)
  1277  		}
  1278  	} else {
  1279  		// Ensure the state of a recent block is also stored to disk before exiting.
  1280  		// We're writing three different states to catch different restart scenarios:
  1281  		//  - HEAD:     So we don't need to reprocess any blocks in the general case
  1282  		//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
  1283  		//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  1284  		if !bc.cfg.ArchiveMode {
  1285  			triedb := bc.triedb
  1286  
  1287  			for _, offset := range []uint64{0, 1, state.TriesInMemory - 1} {
  1288  				if number := bc.CurrentBlock().Number.Uint64(); number > offset {
  1289  					recent := bc.GetBlockByNumber(number - offset)
  1290  
  1291  					log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  1292  					if err := triedb.Commit(recent.Root(), true); err != nil {
  1293  						log.Error("Failed to commit recent state trie", "err", err)
  1294  					}
  1295  				}
  1296  			}
  1297  			if snapBase != (common.Hash{}) {
  1298  				log.Info("Writing snapshot state to disk", "root", snapBase)
  1299  				if err := triedb.Commit(snapBase, true); err != nil {
  1300  					log.Error("Failed to commit recent state trie", "err", err)
  1301  				}
  1302  			}
  1303  			for !bc.triegc.Empty() {
  1304  				triedb.Dereference(bc.triegc.PopItem())
  1305  			}
  1306  			if _, nodes, _ := triedb.Size(); nodes != 0 { // all memory is contained within the nodes return for hashdb
  1307  				log.Error("Dangling trie nodes after full cleanup")
  1308  			}
  1309  		}
  1310  	}
  1311  	// Allow tracers to clean-up and release resources.
  1312  	if bc.logger != nil && bc.logger.OnClose != nil {
  1313  		bc.logger.OnClose()
  1314  	}
  1315  	// Close the trie database, release all the held resources as the last step.
  1316  	if err := bc.triedb.Close(); err != nil {
  1317  		log.Error("Failed to close trie database", "err", err)
  1318  	}
  1319  	log.Info("Blockchain stopped")
  1320  }
  1321  
  1322  // InterruptInsert interrupts all insertion methods, causing them to return
  1323  // errInsertionInterrupted as soon as possible, or resume the chain insertion
  1324  // if required.
  1325  func (bc *BlockChain) InterruptInsert(on bool) {
  1326  	if on {
  1327  		bc.procInterrupt.Store(true)
  1328  	} else {
  1329  		bc.procInterrupt.Store(false)
  1330  	}
  1331  }
  1332  
  1333  // insertStopped returns true after StopInsert has been called.
  1334  func (bc *BlockChain) insertStopped() bool {
  1335  	return bc.procInterrupt.Load()
  1336  }
  1337  
  1338  // WriteStatus status of write
  1339  type WriteStatus byte
  1340  
  1341  const (
  1342  	NonStatTy WriteStatus = iota
  1343  	CanonStatTy
  1344  	SideStatTy
  1345  )
  1346  
  1347  // InsertReceiptChain inserts a batch of blocks along with their receipts into
  1348  // the database. Unlike InsertChain, this function does not verify the state root
  1349  // in the blocks. It is used exclusively for snap sync. All the inserted blocks
  1350  // will be regarded as canonical, chain reorg is not supported.
  1351  //
  1352  // The optional ancientLimit can also be specified and chain segment before that
  1353  // will be directly stored in the ancient, getting rid of the chain migration.
  1354  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []rlp.RawValue, ancientLimit uint64) (int, error) {
  1355  	// Verify the supplied headers before insertion without lock
  1356  	var headers []*types.Header
  1357  	for _, block := range blockChain {
  1358  		headers = append(headers, block.Header())
  1359  		// Here we also validate that blob transactions in the block do not
  1360  		// contain a sidecar. While the sidecar does not affect the block hash
  1361  		// or tx hash, sending blobs within a block is not allowed.
  1362  		for txIndex, tx := range block.Transactions() {
  1363  			if tx.Type() == types.BlobTxType && tx.BlobTxSidecar() != nil {
  1364  				return 0, fmt.Errorf("block #%d contains unexpected blob sidecar in tx at index %d", block.NumberU64(), txIndex)
  1365  			}
  1366  		}
  1367  	}
  1368  	if n, err := bc.hc.ValidateHeaderChain(headers); err != nil {
  1369  		return n, err
  1370  	}
  1371  	// Hold the mutation lock
  1372  	if !bc.chainmu.TryLock() {
  1373  		return 0, errChainStopped
  1374  	}
  1375  	defer bc.chainmu.Unlock()
  1376  
  1377  	var (
  1378  		stats = struct{ processed, ignored int32 }{}
  1379  		start = time.Now()
  1380  		size  = int64(0)
  1381  	)
  1382  	// updateHead updates the head header and head snap block flags.
  1383  	updateHead := func(header *types.Header) error {
  1384  		batch := bc.db.NewBatch()
  1385  		hash := header.Hash()
  1386  		rawdb.WriteHeadHeaderHash(batch, hash)
  1387  		rawdb.WriteHeadFastBlockHash(batch, hash)
  1388  		if err := batch.Write(); err != nil {
  1389  			return err
  1390  		}
  1391  		bc.hc.currentHeader.Store(header)
  1392  		bc.currentSnapBlock.Store(header)
  1393  		headHeaderGauge.Update(header.Number.Int64())
  1394  		headFastBlockGauge.Update(header.Number.Int64())
  1395  		return nil
  1396  	}
  1397  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1398  	//
  1399  	// this function only accepts canonical chain data. All side chain will be reverted
  1400  	// eventually.
  1401  	writeAncient := func(blockChain types.Blocks, receiptChain []rlp.RawValue) (int, error) {
  1402  		// Ensure genesis is in the ancient store
  1403  		if blockChain[0].NumberU64() == 1 {
  1404  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
  1405  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList})
  1406  				if err != nil {
  1407  					log.Error("Error writing genesis to ancients", "err", err)
  1408  					return 0, err
  1409  				}
  1410  				size += writeSize
  1411  				log.Info("Wrote genesis to ancients")
  1412  			}
  1413  		}
  1414  		// Write all chain data to ancients.
  1415  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain)
  1416  		if err != nil {
  1417  			log.Error("Error importing chain data to ancients", "err", err)
  1418  			return 0, err
  1419  		}
  1420  		size += writeSize
  1421  
  1422  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1423  		if err := bc.db.SyncAncient(); err != nil {
  1424  			return 0, err
  1425  		}
  1426  		// Write hash to number mappings
  1427  		batch := bc.db.NewBatch()
  1428  		for _, block := range blockChain {
  1429  			rawdb.WriteHeaderNumber(batch, block.Hash(), block.NumberU64())
  1430  		}
  1431  		if err := batch.Write(); err != nil {
  1432  			return 0, err
  1433  		}
  1434  		// Update the current snap block because all block data is now present in DB.
  1435  		if err := updateHead(blockChain[len(blockChain)-1].Header()); err != nil {
  1436  			return 0, err
  1437  		}
  1438  		stats.processed += int32(len(blockChain))
  1439  		return 0, nil
  1440  	}
  1441  
  1442  	// writeLive writes the blockchain and corresponding receipt chain to the active store.
  1443  	//
  1444  	// Notably, in different snap sync cycles, the supplied chain may partially reorganize
  1445  	// existing local chain segments (reorg around the chain tip). The reorganized part
  1446  	// will be included in the provided chain segment, and stale canonical markers will be
  1447  	// silently rewritten. Therefore, no explicit reorg logic is needed.
  1448  	writeLive := func(blockChain types.Blocks, receiptChain []rlp.RawValue) (int, error) {
  1449  		var (
  1450  			skipPresenceCheck = false
  1451  			batch             = bc.db.NewBatch()
  1452  		)
  1453  		for i, block := range blockChain {
  1454  			// Short circuit insertion if shutting down or processing failed
  1455  			if bc.insertStopped() {
  1456  				return 0, errInsertionInterrupted
  1457  			}
  1458  			if !skipPresenceCheck {
  1459  				// Ignore if the entire data is already known
  1460  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1461  					stats.ignored++
  1462  					continue
  1463  				} else {
  1464  					// If block N is not present, neither are the later blocks.
  1465  					// This should be true, but if we are mistaken, the shortcut
  1466  					// here will only cause overwriting of some existing data
  1467  					skipPresenceCheck = true
  1468  				}
  1469  			}
  1470  			// Write all the data out into the database
  1471  			rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
  1472  			rawdb.WriteBlock(batch, block)
  1473  			rawdb.WriteRawReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1474  
  1475  			// Write everything belongs to the blocks into the database. So that
  1476  			// we can ensure all components of body is completed(body, receipts)
  1477  			// except transaction indexes(will be created once sync is finished).
  1478  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1479  				if err := batch.Write(); err != nil {
  1480  					return 0, err
  1481  				}
  1482  				size += int64(batch.ValueSize())
  1483  				batch.Reset()
  1484  			}
  1485  			stats.processed++
  1486  		}
  1487  		// Write everything belongs to the blocks into the database. So that
  1488  		// we can ensure all components of body is completed(body, receipts,
  1489  		// tx indexes)
  1490  		if batch.ValueSize() > 0 {
  1491  			size += int64(batch.ValueSize())
  1492  			if err := batch.Write(); err != nil {
  1493  				return 0, err
  1494  			}
  1495  		}
  1496  		if err := updateHead(blockChain[len(blockChain)-1].Header()); err != nil {
  1497  			return 0, err
  1498  		}
  1499  		return 0, nil
  1500  	}
  1501  
  1502  	// Split the supplied blocks into two groups, according to the
  1503  	// given ancient limit.
  1504  	index := sort.Search(len(blockChain), func(i int) bool {
  1505  		return blockChain[i].NumberU64() >= ancientLimit
  1506  	})
  1507  	if index > 0 {
  1508  		if n, err := writeAncient(blockChain[:index], receiptChain[:index]); err != nil {
  1509  			if err == errInsertionInterrupted {
  1510  				return 0, nil
  1511  			}
  1512  			return n, err
  1513  		}
  1514  	}
  1515  	if index != len(blockChain) {
  1516  		if n, err := writeLive(blockChain[index:], receiptChain[index:]); err != nil {
  1517  			if err == errInsertionInterrupted {
  1518  				return 0, nil
  1519  			}
  1520  			return n, err
  1521  		}
  1522  	}
  1523  	var (
  1524  		head    = blockChain[len(blockChain)-1]
  1525  		context = []interface{}{
  1526  			"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1527  			"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1528  			"size", common.StorageSize(size),
  1529  		}
  1530  	)
  1531  	if stats.ignored > 0 {
  1532  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1533  	}
  1534  	log.Debug("Imported new block receipts", context...)
  1535  	return 0, nil
  1536  }
  1537  
  1538  // writeBlockWithoutState writes only the block and its metadata to the database,
  1539  // but does not write any state. This is used to construct competing side forks
  1540  // up to the point where they exceed the canonical total difficulty.
  1541  func (bc *BlockChain) writeBlockWithoutState(block *types.Block) (err error) {
  1542  	if bc.insertStopped() {
  1543  		return errInsertionInterrupted
  1544  	}
  1545  	batch := bc.db.NewBatch()
  1546  	rawdb.WriteBlock(batch, block)
  1547  	if err := batch.Write(); err != nil {
  1548  		log.Crit("Failed to write block into disk", "err", err)
  1549  	}
  1550  	return nil
  1551  }
  1552  
  1553  // writeKnownBlock updates the head block flag with a known block
  1554  // and introduces chain reorg if necessary.
  1555  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1556  	current := bc.CurrentBlock()
  1557  	if block.ParentHash() != current.Hash() {
  1558  		if err := bc.reorg(current, block.Header()); err != nil {
  1559  			return err
  1560  		}
  1561  	}
  1562  	bc.writeHeadBlock(block)
  1563  	return nil
  1564  }
  1565  
  1566  // writeBlockWithState writes block, metadata and corresponding state data to the
  1567  // database.
  1568  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
  1569  	if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) {
  1570  		return consensus.ErrUnknownAncestor
  1571  	}
  1572  	// Irrelevant of the canonical status, write the block itself to the database.
  1573  	//
  1574  	// Note all the components of block(hash->number map, header, body, receipts)
  1575  	// should be written atomically. BlockBatch is used for containing all components.
  1576  	blockBatch := bc.db.NewBatch()
  1577  	rawdb.WriteBlock(blockBatch, block)
  1578  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1579  	rawdb.WritePreimages(blockBatch, statedb.Preimages())
  1580  	if err := blockBatch.Write(); err != nil {
  1581  		log.Crit("Failed to write block into disk", "err", err)
  1582  	}
  1583  	// Commit all cached state changes into underlying memory database.
  1584  	root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
  1585  	if err != nil {
  1586  		return err
  1587  	}
  1588  	// If node is running in path mode, skip explicit gc operation
  1589  	// which is unnecessary in this mode.
  1590  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1591  		return nil
  1592  	}
  1593  	// If we're running an archive node, always flush
  1594  	if bc.cfg.ArchiveMode {
  1595  		return bc.triedb.Commit(root, false)
  1596  	}
  1597  	// Full but not archive node, do proper garbage collection
  1598  	bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1599  	bc.triegc.Push(root, -int64(block.NumberU64()))
  1600  
  1601  	// Flush limits are not considered for the first TriesInMemory blocks.
  1602  	current := block.NumberU64()
  1603  	if current <= state.TriesInMemory {
  1604  		return nil
  1605  	}
  1606  	// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1607  	var (
  1608  		_, nodes, imgs = bc.triedb.Size() // all memory is contained within the nodes return for hashdb
  1609  		limit          = common.StorageSize(bc.cfg.TrieDirtyLimit) * 1024 * 1024
  1610  	)
  1611  	if nodes > limit || imgs > 4*1024*1024 {
  1612  		bc.triedb.Cap(limit - ethdb.IdealBatchSize)
  1613  	}
  1614  	// Find the next state trie we need to commit
  1615  	chosen := current - state.TriesInMemory
  1616  	flushInterval := time.Duration(bc.flushInterval.Load())
  1617  	// If we exceeded time allowance, flush an entire trie to disk
  1618  	if bc.gcproc > flushInterval {
  1619  		// If the header is missing (canonical chain behind), we're reorging a low
  1620  		// diff sidechain. Suspend committing until this operation is completed.
  1621  		header := bc.GetHeaderByNumber(chosen)
  1622  		if header == nil {
  1623  			log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1624  		} else {
  1625  			// If we're exceeding limits but haven't reached a large enough memory gap,
  1626  			// warn the user that the system is becoming unstable.
  1627  			if chosen < bc.lastWrite+state.TriesInMemory && bc.gcproc >= 2*flushInterval {
  1628  				log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/state.TriesInMemory)
  1629  			}
  1630  			// Flush an entire trie and restart the counters
  1631  			bc.triedb.Commit(header.Root, true)
  1632  			bc.lastWrite = chosen
  1633  			bc.gcproc = 0
  1634  		}
  1635  	}
  1636  	// Garbage collect anything below our required write retention
  1637  	for !bc.triegc.Empty() {
  1638  		root, number := bc.triegc.Pop()
  1639  		if uint64(-number) > chosen {
  1640  			bc.triegc.Push(root, number)
  1641  			break
  1642  		}
  1643  		bc.triedb.Dereference(root)
  1644  	}
  1645  	return nil
  1646  }
  1647  
  1648  // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
  1649  // This function expects the chain mutex to be held.
  1650  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1651  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1652  		return NonStatTy, err
  1653  	}
  1654  	currentBlock := bc.CurrentBlock()
  1655  
  1656  	// Reorganise the chain if the parent is not the head block
  1657  	if block.ParentHash() != currentBlock.Hash() {
  1658  		if err := bc.reorg(currentBlock, block.Header()); err != nil {
  1659  			return NonStatTy, err
  1660  		}
  1661  	}
  1662  
  1663  	// Set new head.
  1664  	bc.writeHeadBlock(block)
  1665  
  1666  	bc.chainFeed.Send(ChainEvent{Header: block.Header()})
  1667  	if len(logs) > 0 {
  1668  		bc.logsFeed.Send(logs)
  1669  	}
  1670  	// In theory, we should fire a ChainHeadEvent when we inject
  1671  	// a canonical block, but sometimes we can insert a batch of
  1672  	// canonical blocks. Avoid firing too many ChainHeadEvents,
  1673  	// we will fire an accumulated ChainHeadEvent and disable fire
  1674  	// event here.
  1675  	if emitHeadEvent {
  1676  		bc.chainHeadFeed.Send(ChainHeadEvent{Header: block.Header()})
  1677  	}
  1678  	return CanonStatTy, nil
  1679  }
  1680  
  1681  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1682  // chain or, otherwise, create a fork. If an error is returned it will return
  1683  // the index number of the failing block as well an error describing what went
  1684  // wrong. After insertion is done, all accumulated events will be fired.
  1685  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1686  	// Sanity check that we have something meaningful to import
  1687  	if len(chain) == 0 {
  1688  		return 0, nil
  1689  	}
  1690  
  1691  	// Do a sanity check that the provided chain is actually ordered and linked.
  1692  	for i := 1; i < len(chain); i++ {
  1693  		block, prev := chain[i], chain[i-1]
  1694  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1695  			log.Error("Non contiguous block insert",
  1696  				"number", block.Number(),
  1697  				"hash", block.Hash(),
  1698  				"parent", block.ParentHash(),
  1699  				"prevnumber", prev.Number(),
  1700  				"prevhash", prev.Hash(),
  1701  			)
  1702  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1703  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1704  		}
  1705  	}
  1706  	// Pre-checks passed, start the full block imports
  1707  	if !bc.chainmu.TryLock() {
  1708  		return 0, errChainStopped
  1709  	}
  1710  	defer bc.chainmu.Unlock()
  1711  
  1712  	_, n, err := bc.insertChain(chain, true, false) // No witness collection for mass inserts (would get super large)
  1713  	return n, err
  1714  }
  1715  
  1716  // insertChain is the internal implementation of InsertChain, which assumes that
  1717  // 1) chains are contiguous, and 2) The chain mutex is held.
  1718  //
  1719  // This method is split out so that import batches that require re-injecting
  1720  // historical blocks can do so without releasing the lock, which could lead to
  1721  // racey behaviour. If a sidechain import is in progress, and the historic state
  1722  // is imported, but then new canon-head is added before the actual sidechain
  1723  // completes, then the historic state could be pruned again
  1724  func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness bool) (*stateless.Witness, int, error) {
  1725  	// If the chain is terminating, don't even bother starting up.
  1726  	if bc.insertStopped() {
  1727  		return nil, 0, nil
  1728  	}
  1729  
  1730  	if atomic.AddInt32(&bc.blockProcCounter, 1) == 1 {
  1731  		bc.blockProcFeed.Send(true)
  1732  	}
  1733  	defer func() {
  1734  		if atomic.AddInt32(&bc.blockProcCounter, -1) == 0 {
  1735  			bc.blockProcFeed.Send(false)
  1736  		}
  1737  	}()
  1738  
  1739  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1740  	SenderCacher().RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
  1741  
  1742  	var (
  1743  		stats     = insertStats{startTime: mclock.Now()}
  1744  		lastCanon *types.Block
  1745  	)
  1746  	// Fire a single chain head event if we've progressed the chain
  1747  	defer func() {
  1748  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1749  			bc.chainHeadFeed.Send(ChainHeadEvent{Header: lastCanon.Header()})
  1750  		}
  1751  	}()
  1752  	// Start the parallel header verifier
  1753  	headers := make([]*types.Header, len(chain))
  1754  	for i, block := range chain {
  1755  		headers[i] = block.Header()
  1756  	}
  1757  	abort, results := bc.engine.VerifyHeaders(bc, headers)
  1758  	defer close(abort)
  1759  
  1760  	// Peek the error for the first block to decide the directing import logic
  1761  	it := newInsertIterator(chain, results, bc.validator)
  1762  	block, err := it.next()
  1763  
  1764  	// Left-trim all the known blocks that don't need to build snapshot
  1765  	if bc.skipBlock(err, it) {
  1766  		// First block (and state) is known
  1767  		//   1. We did a roll-back, and should now do a re-import
  1768  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1769  		//      from the canonical chain, which has not been verified.
  1770  		// Skip all known blocks that are behind us.
  1771  		current := bc.CurrentBlock()
  1772  		for block != nil && bc.skipBlock(err, it) {
  1773  			if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
  1774  				break
  1775  			}
  1776  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1777  			stats.ignored++
  1778  
  1779  			block, err = it.next()
  1780  		}
  1781  		// The remaining blocks are still known blocks, the only scenario here is:
  1782  		// During the snap sync, the pivot point is already submitted but rollback
  1783  		// happens. Then node resets the head full block to a lower height via `rollback`
  1784  		// and leaves a few known blocks in the database.
  1785  		//
  1786  		// When node runs a snap sync again, it can re-import a batch of known blocks via
  1787  		// `insertChain` while a part of them have higher total difficulty than current
  1788  		// head full block(new pivot point).
  1789  		for block != nil && bc.skipBlock(err, it) {
  1790  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1791  			if err := bc.writeKnownBlock(block); err != nil {
  1792  				return nil, it.index, err
  1793  			}
  1794  			lastCanon = block
  1795  
  1796  			block, err = it.next()
  1797  		}
  1798  		// Falls through to the block import
  1799  	}
  1800  	switch {
  1801  	// First block is pruned
  1802  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1803  		if setHead {
  1804  			// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1805  			log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1806  			return bc.insertSideChain(block, it, makeWitness)
  1807  		} else {
  1808  			// We're post-merge and the parent is pruned, try to recover the parent state
  1809  			log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
  1810  			_, err := bc.recoverAncestors(block, makeWitness)
  1811  			return nil, it.index, err
  1812  		}
  1813  	// Some other error(except ErrKnownBlock) occurred, abort.
  1814  	// ErrKnownBlock is allowed here since some known blocks
  1815  	// still need re-execution to generate snapshots that are missing
  1816  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1817  		stats.ignored += len(it.chain)
  1818  		bc.reportBlock(block, nil, err)
  1819  		return nil, it.index, err
  1820  	}
  1821  	// Track the singleton witness from this chain insertion (if any)
  1822  	var witness *stateless.Witness
  1823  
  1824  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1825  		// If the chain is terminating, stop processing blocks
  1826  		if bc.insertStopped() {
  1827  			log.Debug("Abort during block processing")
  1828  			break
  1829  		}
  1830  		// If the block is known (in the middle of the chain), it's a special case for
  1831  		// Clique blocks where they can share state among each other, so importing an
  1832  		// older block might complete the state of the subsequent one. In this case,
  1833  		// just skip the block (we already validated it once fully (and crashed), since
  1834  		// its header and body was already in the database). But if the corresponding
  1835  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1836  		if bc.skipBlock(err, it) {
  1837  			logger := log.Debug
  1838  			if bc.chainConfig.Clique == nil {
  1839  				logger = log.Warn
  1840  			}
  1841  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1842  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1843  				"root", block.Root())
  1844  
  1845  			// Special case. Commit the empty receipt slice if we meet the known
  1846  			// block in the middle. It can only happen in the clique chain. Whenever
  1847  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1848  			// and `body` if it's non-existent. Since we don't have receipts without
  1849  			// reexecution, so nothing to commit. But if the sidechain will be adopted
  1850  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1851  			// state, but if it's this special case here(skip reexecution) we will lose
  1852  			// the empty receipt entry.
  1853  			if len(block.Transactions()) == 0 {
  1854  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1855  			} else {
  1856  				log.Error("Please file an issue, skip known block execution without receipt",
  1857  					"hash", block.Hash(), "number", block.NumberU64())
  1858  			}
  1859  			if err := bc.writeKnownBlock(block); err != nil {
  1860  				return nil, it.index, err
  1861  			}
  1862  			stats.processed++
  1863  			if bc.logger != nil && bc.logger.OnSkippedBlock != nil {
  1864  				bc.logger.OnSkippedBlock(tracing.BlockEvent{
  1865  					Block:     block,
  1866  					Finalized: bc.CurrentFinalBlock(),
  1867  					Safe:      bc.CurrentSafeBlock(),
  1868  				})
  1869  			}
  1870  			// We can assume that logs are empty here, since the only way for consecutive
  1871  			// Clique blocks to have the same state is if there are no transactions.
  1872  			lastCanon = block
  1873  			continue
  1874  		}
  1875  		// Retrieve the parent block and it's state to execute on top
  1876  		parent := it.previous()
  1877  		if parent == nil {
  1878  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1879  		}
  1880  		// The traced section of block import.
  1881  		start := time.Now()
  1882  		res, err := bc.processBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
  1883  		if err != nil {
  1884  			return nil, it.index, err
  1885  		}
  1886  		// Report the import stats before returning the various results
  1887  		stats.processed++
  1888  		stats.usedGas += res.usedGas
  1889  		witness = res.witness
  1890  
  1891  		var snapDiffItems, snapBufItems common.StorageSize
  1892  		if bc.snaps != nil {
  1893  			snapDiffItems, snapBufItems = bc.snaps.Size()
  1894  		}
  1895  		trieDiffNodes, trieBufNodes, _ := bc.triedb.Size()
  1896  		stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead)
  1897  
  1898  		// Print confirmation that a future fork is scheduled, but not yet active.
  1899  		bc.logForkReadiness(block)
  1900  
  1901  		if !setHead {
  1902  			// After merge we expect few side chains. Simply count
  1903  			// all blocks the CL gives us for GC processing time
  1904  			bc.gcproc += res.procTime
  1905  			return witness, it.index, nil // Direct block insertion of a single block
  1906  		}
  1907  		switch res.status {
  1908  		case CanonStatTy:
  1909  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1910  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1911  				"elapsed", common.PrettyDuration(time.Since(start)),
  1912  				"root", block.Root())
  1913  
  1914  			lastCanon = block
  1915  
  1916  			// Only count canonical blocks for GC processing time
  1917  			bc.gcproc += res.procTime
  1918  
  1919  		case SideStatTy:
  1920  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1921  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1922  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1923  				"root", block.Root())
  1924  
  1925  		default:
  1926  			// This in theory is impossible, but lets be nice to our future selves and leave
  1927  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1928  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1929  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1930  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1931  				"root", block.Root())
  1932  		}
  1933  	}
  1934  
  1935  	stats.ignored += it.remaining()
  1936  	return witness, it.index, err
  1937  }
  1938  
  1939  // blockProcessingResult is a summary of block processing
  1940  // used for updating the stats.
  1941  type blockProcessingResult struct {
  1942  	usedGas  uint64
  1943  	procTime time.Duration
  1944  	status   WriteStatus
  1945  	witness  *stateless.Witness
  1946  }
  1947  
  1948  // processBlock executes and validates the given block. If there was no error
  1949  // it writes the block and associated state to database.
  1950  func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
  1951  	var (
  1952  		err       error
  1953  		startTime = time.Now()
  1954  		statedb   *state.StateDB
  1955  		interrupt atomic.Bool
  1956  	)
  1957  	defer interrupt.Store(true) // terminate the prefetch at the end
  1958  
  1959  	if bc.cfg.NoPrefetch {
  1960  		statedb, err = state.New(parentRoot, bc.statedb)
  1961  		if err != nil {
  1962  			return nil, err
  1963  		}
  1964  	} else {
  1965  		// If prefetching is enabled, run that against the current state to pre-cache
  1966  		// transactions and probabilistically some of the account/storage trie nodes.
  1967  		//
  1968  		// Note: the main processor and prefetcher share the same reader with a local
  1969  		// cache for mitigating the overhead of state access.
  1970  		prefetch, process, err := bc.statedb.ReadersWithCacheStats(parentRoot)
  1971  		if err != nil {
  1972  			return nil, err
  1973  		}
  1974  		throwaway, err := state.NewWithReader(parentRoot, bc.statedb, prefetch)
  1975  		if err != nil {
  1976  			return nil, err
  1977  		}
  1978  		statedb, err = state.NewWithReader(parentRoot, bc.statedb, process)
  1979  		if err != nil {
  1980  			return nil, err
  1981  		}
  1982  		// Upload the statistics of reader at the end
  1983  		defer func() {
  1984  			stats := prefetch.GetStats()
  1985  			accountCacheHitPrefetchMeter.Mark(stats.AccountHit)
  1986  			accountCacheMissPrefetchMeter.Mark(stats.AccountMiss)
  1987  			storageCacheHitPrefetchMeter.Mark(stats.StorageHit)
  1988  			storageCacheMissPrefetchMeter.Mark(stats.StorageMiss)
  1989  			stats = process.GetStats()
  1990  			accountCacheHitMeter.Mark(stats.AccountHit)
  1991  			accountCacheMissMeter.Mark(stats.AccountMiss)
  1992  			storageCacheHitMeter.Mark(stats.StorageHit)
  1993  			storageCacheMissMeter.Mark(stats.StorageMiss)
  1994  		}()
  1995  
  1996  		go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
  1997  			// Disable tracing for prefetcher executions.
  1998  			vmCfg := bc.cfg.VmConfig
  1999  			vmCfg.Tracer = nil
  2000  			bc.prefetcher.Prefetch(block, throwaway, vmCfg, &interrupt)
  2001  
  2002  			blockPrefetchExecuteTimer.Update(time.Since(start))
  2003  			if interrupt.Load() {
  2004  				blockPrefetchInterruptMeter.Mark(1)
  2005  			}
  2006  		}(time.Now(), throwaway, block)
  2007  	}
  2008  
  2009  	// If we are past Byzantium, enable prefetching to pull in trie node paths
  2010  	// while processing transactions. Before Byzantium the prefetcher is mostly
  2011  	// useless due to the intermediate root hashing after each transaction.
  2012  	var witness *stateless.Witness
  2013  	if bc.chainConfig.IsByzantium(block.Number()) {
  2014  		// Generate witnesses either if we're self-testing, or if it's the
  2015  		// only block being inserted. A bit crude, but witnesses are huge,
  2016  		// so we refuse to make an entire chain of them.
  2017  		if bc.cfg.VmConfig.StatelessSelfValidation || makeWitness {
  2018  			witness, err = stateless.NewWitness(block.Header(), bc)
  2019  			if err != nil {
  2020  				return nil, err
  2021  			}
  2022  		}
  2023  		statedb.StartPrefetcher("chain", witness)
  2024  		defer statedb.StopPrefetcher()
  2025  	}
  2026  
  2027  	if bc.logger != nil && bc.logger.OnBlockStart != nil {
  2028  		bc.logger.OnBlockStart(tracing.BlockEvent{
  2029  			Block:     block,
  2030  			Finalized: bc.CurrentFinalBlock(),
  2031  			Safe:      bc.CurrentSafeBlock(),
  2032  		})
  2033  	}
  2034  	if bc.logger != nil && bc.logger.OnBlockEnd != nil {
  2035  		defer func() {
  2036  			bc.logger.OnBlockEnd(blockEndErr)
  2037  		}()
  2038  	}
  2039  
  2040  	// Process block using the parent state as reference point
  2041  	pstart := time.Now()
  2042  	res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
  2043  	if err != nil {
  2044  		bc.reportBlock(block, res, err)
  2045  		return nil, err
  2046  	}
  2047  	ptime := time.Since(pstart)
  2048  
  2049  	vstart := time.Now()
  2050  	if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
  2051  		bc.reportBlock(block, res, err)
  2052  		return nil, err
  2053  	}
  2054  	vtime := time.Since(vstart)
  2055  
  2056  	// If witnesses was generated and stateless self-validation requested, do
  2057  	// that now. Self validation should *never* run in production, it's more of
  2058  	// a tight integration to enable running *all* consensus tests through the
  2059  	// witness builder/runner, which would otherwise be impossible due to the
  2060  	// various invalid chain states/behaviors being contained in those tests.
  2061  	xvstart := time.Now()
  2062  	if witness := statedb.Witness(); witness != nil && bc.cfg.VmConfig.StatelessSelfValidation {
  2063  		log.Warn("Running stateless self-validation", "block", block.Number(), "hash", block.Hash())
  2064  
  2065  		// Remove critical computed fields from the block to force true recalculation
  2066  		context := block.Header()
  2067  		context.Root = common.Hash{}
  2068  		context.ReceiptHash = common.Hash{}
  2069  
  2070  		task := types.NewBlockWithHeader(context).WithBody(*block.Body())
  2071  
  2072  		// Run the stateless self-cross-validation
  2073  		crossStateRoot, crossReceiptRoot, err := ExecuteStateless(bc.chainConfig, bc.cfg.VmConfig, task, witness)
  2074  		if err != nil {
  2075  			return nil, fmt.Errorf("stateless self-validation failed: %v", err)
  2076  		}
  2077  		if crossStateRoot != block.Root() {
  2078  			return nil, fmt.Errorf("stateless self-validation root mismatch (cross: %x local: %x)", crossStateRoot, block.Root())
  2079  		}
  2080  		if crossReceiptRoot != block.ReceiptHash() {
  2081  			return nil, fmt.Errorf("stateless self-validation receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, block.ReceiptHash())
  2082  		}
  2083  	}
  2084  	xvtime := time.Since(xvstart)
  2085  	proctime := time.Since(startTime) // processing + validation + cross validation
  2086  
  2087  	// Update the metrics touched during block processing and validation
  2088  	accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
  2089  	storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
  2090  	if statedb.AccountLoaded != 0 {
  2091  		accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
  2092  	}
  2093  	if statedb.StorageLoaded != 0 {
  2094  		storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
  2095  	}
  2096  	accountUpdateTimer.Update(statedb.AccountUpdates)                                 // Account updates are complete(in validation)
  2097  	storageUpdateTimer.Update(statedb.StorageUpdates)                                 // Storage updates are complete(in validation)
  2098  	accountHashTimer.Update(statedb.AccountHashes)                                    // Account hashes are complete(in validation)
  2099  	triehash := statedb.AccountHashes                                                 // The time spent on tries hashing
  2100  	trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates                     // The time spent on tries update
  2101  	blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
  2102  	blockValidationTimer.Update(vtime - (triehash + trieUpdate))                      // The time spent on block validation
  2103  	blockCrossValidationTimer.Update(xvtime)                                          // The time spent on stateless cross validation
  2104  
  2105  	// Write the block to the chain and get the status.
  2106  	var (
  2107  		wstart = time.Now()
  2108  		status WriteStatus
  2109  	)
  2110  	if !setHead {
  2111  		// Don't set the head, only insert the block
  2112  		err = bc.writeBlockWithState(block, res.Receipts, statedb)
  2113  	} else {
  2114  		status, err = bc.writeBlockAndSetHead(block, res.Receipts, res.Logs, statedb, false)
  2115  	}
  2116  	if err != nil {
  2117  		return nil, err
  2118  	}
  2119  	// Update the metrics touched during block commit
  2120  	accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  2121  	storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  2122  	snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  2123  	triedbCommitTimer.Update(statedb.TrieDBCommits)     // Trie database commits are complete, we can mark them
  2124  
  2125  	blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
  2126  	elapsed := time.Since(startTime) + 1 // prevent zero division
  2127  	blockInsertTimer.Update(elapsed)
  2128  
  2129  	// TODO(rjl493456442) generalize the ResettingTimer
  2130  	mgasps := float64(res.GasUsed) * 1000 / float64(elapsed)
  2131  	chainMgaspsMeter.Update(time.Duration(mgasps))
  2132  
  2133  	return &blockProcessingResult{
  2134  		usedGas:  res.GasUsed,
  2135  		procTime: proctime,
  2136  		status:   status,
  2137  		witness:  witness,
  2138  	}, nil
  2139  }
  2140  
  2141  // insertSideChain is called when an import batch hits upon a pruned ancestor
  2142  // error, which happens when a sidechain with a sufficiently old fork-block is
  2143  // found.
  2144  //
  2145  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  2146  // switch over to the new chain if the TD exceeded the current chain.
  2147  // insertSideChain is only used pre-merge.
  2148  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, error) {
  2149  	var current = bc.CurrentBlock()
  2150  
  2151  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  2152  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  2153  	// ones. Any other errors means that the block is invalid, and should not be written
  2154  	// to disk.
  2155  	err := consensus.ErrPrunedAncestor
  2156  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  2157  		// Check the canonical state root for that number
  2158  		if number := block.NumberU64(); current.Number.Uint64() >= number {
  2159  			canonical := bc.GetBlockByNumber(number)
  2160  			if canonical != nil && canonical.Hash() == block.Hash() {
  2161  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  2162  				continue
  2163  			}
  2164  			if canonical != nil && canonical.Root() == block.Root() {
  2165  				// This is most likely a shadow-state attack. When a fork is imported into the
  2166  				// database, and it eventually reaches a block height which is not pruned, we
  2167  				// just found that the state already exist! This means that the sidechain block
  2168  				// refers to a state which already exists in our canon chain.
  2169  				//
  2170  				// If left unchecked, we would now proceed importing the blocks, without actually
  2171  				// having verified the state of the previous blocks.
  2172  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  2173  
  2174  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  2175  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  2176  				// mechanism.
  2177  				return nil, it.index, errors.New("sidechain ghost-state attack")
  2178  			}
  2179  		}
  2180  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  2181  			start := time.Now()
  2182  			if err := bc.writeBlockWithoutState(block); err != nil {
  2183  				return nil, it.index, err
  2184  			}
  2185  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  2186  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  2187  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  2188  				"root", block.Root())
  2189  		}
  2190  	}
  2191  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2192  	var (
  2193  		hashes  []common.Hash
  2194  		numbers []uint64
  2195  	)
  2196  	parent := it.previous()
  2197  	for parent != nil && !bc.HasState(parent.Root) {
  2198  		if bc.stateRecoverable(parent.Root) {
  2199  			if err := bc.triedb.Recover(parent.Root); err != nil {
  2200  				return nil, 0, err
  2201  			}
  2202  			break
  2203  		}
  2204  		hashes = append(hashes, parent.Hash())
  2205  		numbers = append(numbers, parent.Number.Uint64())
  2206  
  2207  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  2208  	}
  2209  	if parent == nil {
  2210  		return nil, it.index, errors.New("missing parent")
  2211  	}
  2212  	// Import all the pruned blocks to make the state available
  2213  	var (
  2214  		blocks []*types.Block
  2215  		memory uint64
  2216  	)
  2217  	for i := len(hashes) - 1; i >= 0; i-- {
  2218  		// Append the next block to our batch
  2219  		block := bc.GetBlock(hashes[i], numbers[i])
  2220  
  2221  		blocks = append(blocks, block)
  2222  		memory += block.Size()
  2223  
  2224  		// If memory use grew too large, import and continue. Sadly we need to discard
  2225  		// all raised events and logs from notifications since we're too heavy on the
  2226  		// memory here.
  2227  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  2228  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  2229  			if _, _, err := bc.insertChain(blocks, true, false); err != nil {
  2230  				return nil, 0, err
  2231  			}
  2232  			blocks, memory = blocks[:0], 0
  2233  
  2234  			// If the chain is terminating, stop processing blocks
  2235  			if bc.insertStopped() {
  2236  				log.Debug("Abort during blocks processing")
  2237  				return nil, 0, nil
  2238  			}
  2239  		}
  2240  	}
  2241  	if len(blocks) > 0 {
  2242  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  2243  		return bc.insertChain(blocks, true, makeWitness)
  2244  	}
  2245  	return nil, 0, nil
  2246  }
  2247  
  2248  // recoverAncestors finds the closest ancestor with available state and re-execute
  2249  // all the ancestor blocks since that.
  2250  // recoverAncestors is only used post-merge.
  2251  // We return the hash of the latest block that we could correctly validate.
  2252  func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (common.Hash, error) {
  2253  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2254  	var (
  2255  		hashes  []common.Hash
  2256  		numbers []uint64
  2257  		parent  = block
  2258  	)
  2259  	for parent != nil && !bc.HasState(parent.Root()) {
  2260  		if bc.stateRecoverable(parent.Root()) {
  2261  			if err := bc.triedb.Recover(parent.Root()); err != nil {
  2262  				return common.Hash{}, err
  2263  			}
  2264  			break
  2265  		}
  2266  		hashes = append(hashes, parent.Hash())
  2267  		numbers = append(numbers, parent.NumberU64())
  2268  		parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2269  
  2270  		// If the chain is terminating, stop iteration
  2271  		if bc.insertStopped() {
  2272  			log.Debug("Abort during blocks iteration")
  2273  			return common.Hash{}, errInsertionInterrupted
  2274  		}
  2275  	}
  2276  	if parent == nil {
  2277  		return common.Hash{}, errors.New("missing parent")
  2278  	}
  2279  	// Import all the pruned blocks to make the state available
  2280  	for i := len(hashes) - 1; i >= 0; i-- {
  2281  		// If the chain is terminating, stop processing blocks
  2282  		if bc.insertStopped() {
  2283  			log.Debug("Abort during blocks processing")
  2284  			return common.Hash{}, errInsertionInterrupted
  2285  		}
  2286  		var b *types.Block
  2287  		if i == 0 {
  2288  			b = block
  2289  		} else {
  2290  			b = bc.GetBlock(hashes[i], numbers[i])
  2291  		}
  2292  		if _, _, err := bc.insertChain(types.Blocks{b}, false, makeWitness && i == 0); err != nil {
  2293  			return b.ParentHash(), err
  2294  		}
  2295  	}
  2296  	return block.Hash(), nil
  2297  }
  2298  
  2299  // collectLogs collects the logs that were generated or removed during the
  2300  // processing of a block. These logs are later announced as deleted or reborn.
  2301  func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
  2302  	var blobGasPrice *big.Int
  2303  	if b.ExcessBlobGas() != nil {
  2304  		blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header())
  2305  	}
  2306  	receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
  2307  	if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {
  2308  		log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err)
  2309  	}
  2310  	var logs []*types.Log
  2311  	for _, receipt := range receipts {
  2312  		for _, log := range receipt.Logs {
  2313  			if removed {
  2314  				log.Removed = true
  2315  			}
  2316  			logs = append(logs, log)
  2317  		}
  2318  	}
  2319  	return logs
  2320  }
  2321  
  2322  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  2323  // blocks and inserts them to be part of the new canonical chain and accumulates
  2324  // potential missing transactions and post an event about them.
  2325  //
  2326  // Note the new head block won't be processed here, callers need to handle it
  2327  // externally.
  2328  func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error {
  2329  	var (
  2330  		newChain    []*types.Header
  2331  		oldChain    []*types.Header
  2332  		commonBlock *types.Header
  2333  	)
  2334  	// Reduce the longer chain to the same number as the shorter one
  2335  	if oldHead.Number.Uint64() > newHead.Number.Uint64() {
  2336  		// Old chain is longer, gather all transactions and logs as deleted ones
  2337  		for ; oldHead != nil && oldHead.Number.Uint64() != newHead.Number.Uint64(); oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1) {
  2338  			oldChain = append(oldChain, oldHead)
  2339  		}
  2340  	} else {
  2341  		// New chain is longer, stash all blocks away for subsequent insertion
  2342  		for ; newHead != nil && newHead.Number.Uint64() != oldHead.Number.Uint64(); newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1) {
  2343  			newChain = append(newChain, newHead)
  2344  		}
  2345  	}
  2346  	if oldHead == nil {
  2347  		return errInvalidOldChain
  2348  	}
  2349  	if newHead == nil {
  2350  		return errInvalidNewChain
  2351  	}
  2352  	// Both sides of the reorg are at the same number, reduce both until the common
  2353  	// ancestor is found
  2354  	for {
  2355  		// If the common ancestor was found, bail out
  2356  		if oldHead.Hash() == newHead.Hash() {
  2357  			commonBlock = oldHead
  2358  			break
  2359  		}
  2360  		// Remove an old block as well as stash away a new block
  2361  		oldChain = append(oldChain, oldHead)
  2362  		newChain = append(newChain, newHead)
  2363  
  2364  		// Step back with both chains
  2365  		oldHead = bc.GetHeader(oldHead.ParentHash, oldHead.Number.Uint64()-1)
  2366  		if oldHead == nil {
  2367  			return errInvalidOldChain
  2368  		}
  2369  		newHead = bc.GetHeader(newHead.ParentHash, newHead.Number.Uint64()-1)
  2370  		if newHead == nil {
  2371  			return errInvalidNewChain
  2372  		}
  2373  	}
  2374  	// Ensure the user sees large reorgs
  2375  	if len(oldChain) > 0 && len(newChain) > 0 {
  2376  		logFn := log.Info
  2377  		msg := "Chain reorg detected"
  2378  		if len(oldChain) > 63 {
  2379  			msg = "Large chain reorg detected"
  2380  			logFn = log.Warn
  2381  		}
  2382  		logFn(msg, "number", commonBlock.Number, "hash", commonBlock.Hash(),
  2383  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2384  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2385  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2386  		blockReorgMeter.Mark(1)
  2387  	} else if len(newChain) > 0 {
  2388  		// Special case happens in the post merge stage that current head is
  2389  		// the ancestor of new head while these two blocks are not consecutive
  2390  		log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number, "hash", newChain[0].Hash())
  2391  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2392  	} else {
  2393  		// len(newChain) == 0 && len(oldChain) > 0
  2394  		// rewind the canonical chain to a lower point.
  2395  		log.Error("Impossible reorg, please file an issue", "oldnum", oldHead.Number, "oldhash", oldHead.Hash(), "oldblocks", len(oldChain), "newnum", newHead.Number, "newhash", newHead.Hash(), "newblocks", len(newChain))
  2396  	}
  2397  	// Acquire the tx-lookup lock before mutation. This step is essential
  2398  	// as the txlookups should be changed atomically, and all subsequent
  2399  	// reads should be blocked until the mutation is complete.
  2400  	bc.txLookupLock.Lock()
  2401  
  2402  	// Reorg can be executed, start reducing the chain's old blocks and appending
  2403  	// the new blocks
  2404  	var (
  2405  		deletedTxs []common.Hash
  2406  		rebirthTxs []common.Hash
  2407  
  2408  		deletedLogs []*types.Log
  2409  		rebirthLogs []*types.Log
  2410  	)
  2411  	// Deleted log emission on the API uses forward order, which is borked, but
  2412  	// we'll leave it in for legacy reasons.
  2413  	//
  2414  	// TODO(karalabe): This should be nuked out, no idea how, deprecate some APIs?
  2415  	{
  2416  		for i := len(oldChain) - 1; i >= 0; i-- {
  2417  			block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
  2418  			if block == nil {
  2419  				return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
  2420  			}
  2421  			if logs := bc.collectLogs(block, true); len(logs) > 0 {
  2422  				deletedLogs = append(deletedLogs, logs...)
  2423  			}
  2424  			if len(deletedLogs) > 512 {
  2425  				bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2426  				deletedLogs = nil
  2427  			}
  2428  		}
  2429  		if len(deletedLogs) > 0 {
  2430  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2431  		}
  2432  	}
  2433  	// Undo old blocks in reverse order
  2434  	for i := 0; i < len(oldChain); i++ {
  2435  		// Collect all the deleted transactions
  2436  		block := bc.GetBlock(oldChain[i].Hash(), oldChain[i].Number.Uint64())
  2437  		if block == nil {
  2438  			return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
  2439  		}
  2440  		for _, tx := range block.Transactions() {
  2441  			deletedTxs = append(deletedTxs, tx.Hash())
  2442  		}
  2443  		// Collect deleted logs and emit them for new integrations
  2444  		if logs := bc.collectLogs(block, true); len(logs) > 0 {
  2445  			// Emit revertals latest first, older then
  2446  			slices.Reverse(logs)
  2447  
  2448  			// TODO(karalabe): Hook into the reverse emission part
  2449  		}
  2450  	}
  2451  	// Apply new blocks in forward order
  2452  	for i := len(newChain) - 1; i >= 1; i-- {
  2453  		// Collect all the included transactions
  2454  		block := bc.GetBlock(newChain[i].Hash(), newChain[i].Number.Uint64())
  2455  		if block == nil {
  2456  			return errInvalidNewChain // Corrupt database, mostly here to avoid weird panics
  2457  		}
  2458  		for _, tx := range block.Transactions() {
  2459  			rebirthTxs = append(rebirthTxs, tx.Hash())
  2460  		}
  2461  		// Collect inserted logs and emit them
  2462  		if logs := bc.collectLogs(block, false); len(logs) > 0 {
  2463  			rebirthLogs = append(rebirthLogs, logs...)
  2464  		}
  2465  		if len(rebirthLogs) > 512 {
  2466  			bc.logsFeed.Send(rebirthLogs)
  2467  			rebirthLogs = nil
  2468  		}
  2469  		// Update the head block
  2470  		bc.writeHeadBlock(block)
  2471  	}
  2472  	if len(rebirthLogs) > 0 {
  2473  		bc.logsFeed.Send(rebirthLogs)
  2474  	}
  2475  	// Delete useless indexes right now which includes the non-canonical
  2476  	// transaction indexes, canonical chain indexes which above the head.
  2477  	batch := bc.db.NewBatch()
  2478  	for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) {
  2479  		rawdb.DeleteTxLookupEntry(batch, tx)
  2480  	}
  2481  	// Delete all hash markers that are not part of the new canonical chain.
  2482  	// Because the reorg function does not handle new chain head, all hash
  2483  	// markers greater than or equal to new chain head should be deleted.
  2484  	number := commonBlock.Number
  2485  	if len(newChain) > 1 {
  2486  		number = newChain[1].Number
  2487  	}
  2488  	for i := number.Uint64() + 1; ; i++ {
  2489  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2490  		if hash == (common.Hash{}) {
  2491  			break
  2492  		}
  2493  		rawdb.DeleteCanonicalHash(batch, i)
  2494  	}
  2495  	if err := batch.Write(); err != nil {
  2496  		log.Crit("Failed to delete useless indexes", "err", err)
  2497  	}
  2498  	// Reset the tx lookup cache to clear stale txlookup cache.
  2499  	bc.txLookupCache.Purge()
  2500  
  2501  	// Release the tx-lookup lock after mutation.
  2502  	bc.txLookupLock.Unlock()
  2503  
  2504  	return nil
  2505  }
  2506  
  2507  // InsertBlockWithoutSetHead executes the block, runs the necessary verification
  2508  // upon it and then persist the block and the associate state into the database.
  2509  // The key difference between the InsertChain is it won't do the canonical chain
  2510  // updating. It relies on the additional SetCanonical call to finalize the entire
  2511  // procedure.
  2512  func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block, makeWitness bool) (*stateless.Witness, error) {
  2513  	if !bc.chainmu.TryLock() {
  2514  		return nil, errChainStopped
  2515  	}
  2516  	defer bc.chainmu.Unlock()
  2517  
  2518  	witness, _, err := bc.insertChain(types.Blocks{block}, false, makeWitness)
  2519  	return witness, err
  2520  }
  2521  
  2522  // SetCanonical rewinds the chain to set the new head block as the specified
  2523  // block. It's possible that the state of the new head is missing, and it will
  2524  // be recovered in this function as well.
  2525  func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
  2526  	if !bc.chainmu.TryLock() {
  2527  		return common.Hash{}, errChainStopped
  2528  	}
  2529  	defer bc.chainmu.Unlock()
  2530  
  2531  	// Re-execute the reorged chain in case the head state is missing.
  2532  	if !bc.HasState(head.Root()) {
  2533  		if latestValidHash, err := bc.recoverAncestors(head, false); err != nil {
  2534  			return latestValidHash, err
  2535  		}
  2536  		log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash())
  2537  	}
  2538  	// Run the reorg if necessary and set the given block as new head.
  2539  	start := time.Now()
  2540  	if head.ParentHash() != bc.CurrentBlock().Hash() {
  2541  		if err := bc.reorg(bc.CurrentBlock(), head.Header()); err != nil {
  2542  			return common.Hash{}, err
  2543  		}
  2544  	}
  2545  	bc.writeHeadBlock(head)
  2546  
  2547  	// Emit events
  2548  	logs := bc.collectLogs(head, false)
  2549  	bc.chainFeed.Send(ChainEvent{Header: head.Header()})
  2550  	if len(logs) > 0 {
  2551  		bc.logsFeed.Send(logs)
  2552  	}
  2553  	bc.chainHeadFeed.Send(ChainHeadEvent{Header: head.Header()})
  2554  
  2555  	context := []interface{}{
  2556  		"number", head.Number(),
  2557  		"hash", head.Hash(),
  2558  		"root", head.Root(),
  2559  		"elapsed", time.Since(start),
  2560  	}
  2561  	if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
  2562  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2563  	}
  2564  	log.Info("Chain head was updated", context...)
  2565  	return head.Hash(), nil
  2566  }
  2567  
  2568  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2569  // that the block does not need to be processed but can be considered already fully 'done'.
  2570  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2571  	// We can only ever bypass processing if the only error returned by the validator
  2572  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2573  	// and state.
  2574  	if !errors.Is(err, ErrKnownBlock) {
  2575  		return false
  2576  	}
  2577  	// If we're not using snapshots, we can skip this, since we have both block
  2578  	// and (trie-) state
  2579  	if bc.snaps == nil {
  2580  		return true
  2581  	}
  2582  	var (
  2583  		header     = it.current() // header can't be nil
  2584  		parentRoot common.Hash
  2585  	)
  2586  	// If we also have the snapshot-state, we can skip the processing.
  2587  	if bc.snaps.Snapshot(header.Root) != nil {
  2588  		return true
  2589  	}
  2590  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2591  	// snapshot-state exists, we need to process this in order to not get a gap
  2592  	// in the snapshot layers.
  2593  	// Resolve parent block
  2594  	if parent := it.previous(); parent != nil {
  2595  		parentRoot = parent.Root
  2596  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2597  		parentRoot = parent.Root
  2598  	}
  2599  	if parentRoot == (common.Hash{}) {
  2600  		return false // Theoretically impossible case
  2601  	}
  2602  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2603  	if bc.snaps.Snapshot(parentRoot) == nil {
  2604  		return true
  2605  	}
  2606  	return false
  2607  }
  2608  
  2609  // reportBlock logs a bad block error.
  2610  func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) {
  2611  	var receipts types.Receipts
  2612  	if res != nil {
  2613  		receipts = res.Receipts
  2614  	}
  2615  	rawdb.WriteBadBlock(bc.db, block)
  2616  	log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
  2617  }
  2618  
  2619  // logForkReadiness will write a log when a future fork is scheduled, but not
  2620  // active. This is useful so operators know their client is ready for the fork.
  2621  func (bc *BlockChain) logForkReadiness(block *types.Block) {
  2622  	config := bc.Config()
  2623  	current, last := config.LatestFork(block.Time()), config.LatestFork(math.MaxUint64)
  2624  
  2625  	// Short circuit if the timestamp of the last fork is undefined,
  2626  	// or if the network has already passed the last configured fork.
  2627  	t := config.Timestamp(last)
  2628  	if t == nil || current >= last {
  2629  		return
  2630  	}
  2631  	at := time.Unix(int64(*t), 0)
  2632  
  2633  	// Only log if:
  2634  	// - Current time is before the fork activation time
  2635  	// - Enough time has passed since last alert
  2636  	now := time.Now()
  2637  	if now.Before(at) && now.After(bc.lastForkReadyAlert.Add(forkReadyInterval)) {
  2638  		log.Info("Ready for fork activation", "fork", last, "date", at.Format(time.RFC822),
  2639  			"remaining", time.Until(at).Round(time.Second), "timestamp", at.Unix())
  2640  		bc.lastForkReadyAlert = time.Now()
  2641  	}
  2642  }
  2643  
  2644  // summarizeBadBlock returns a string summarizing the bad block and other
  2645  // relevant information.
  2646  func summarizeBadBlock(block *types.Block, receipts []*types.Receipt, config *params.ChainConfig, err error) string {
  2647  	var receiptString string
  2648  	for i, receipt := range receipts {
  2649  		receiptString += fmt.Sprintf("\n  %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x",
  2650  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2651  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2652  	}
  2653  	version, vcs := version.Info()
  2654  	platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS)
  2655  	if vcs != "" {
  2656  		vcs = fmt.Sprintf("\nVCS: %s", vcs)
  2657  	}
  2658  	return fmt.Sprintf(`
  2659  ########## BAD BLOCK #########
  2660  Block: %v (%#x)
  2661  Error: %v
  2662  Platform: %v%v
  2663  Chain config: %#v
  2664  Receipts: %v
  2665  ##############################
  2666  `, block.Number(), block.Hash(), err, platform, vcs, config, receiptString)
  2667  }
  2668  
  2669  // InsertHeaderChain attempts to insert the given header chain in to the local
  2670  // chain, possibly creating a reorg. If an error is returned, it will return the
  2671  // index number of the failing header as well an error describing what went wrong.
  2672  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
  2673  	if len(chain) == 0 {
  2674  		return 0, nil
  2675  	}
  2676  	start := time.Now()
  2677  	if i, err := bc.hc.ValidateHeaderChain(chain); err != nil {
  2678  		return i, err
  2679  	}
  2680  	if !bc.chainmu.TryLock() {
  2681  		return 0, errChainStopped
  2682  	}
  2683  	defer bc.chainmu.Unlock()
  2684  
  2685  	_, err := bc.hc.InsertHeaderChain(chain, start)
  2686  	return 0, err
  2687  }
  2688  
  2689  // InsertHeadersBeforeCutoff inserts the given headers into the ancient store
  2690  // as they are claimed older than the configured chain cutoff point. All the
  2691  // inserted headers are regarded as canonical and chain reorg is not supported.
  2692  func (bc *BlockChain) InsertHeadersBeforeCutoff(headers []*types.Header) (int, error) {
  2693  	if len(headers) == 0 {
  2694  		return 0, nil
  2695  	}
  2696  	// TODO(rjl493456442): Headers before the configured cutoff have already
  2697  	// been verified by the hash of cutoff header. Theoretically, header validation
  2698  	// could be skipped here.
  2699  	if n, err := bc.hc.ValidateHeaderChain(headers); err != nil {
  2700  		return n, err
  2701  	}
  2702  	if !bc.chainmu.TryLock() {
  2703  		return 0, errChainStopped
  2704  	}
  2705  	defer bc.chainmu.Unlock()
  2706  
  2707  	// Initialize the ancient store with genesis block if it's empty.
  2708  	var (
  2709  		frozen, _ = bc.db.Ancients()
  2710  		first     = headers[0].Number.Uint64()
  2711  	)
  2712  	if first == 1 && frozen == 0 {
  2713  		_, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []rlp.RawValue{rlp.EmptyList})
  2714  		if err != nil {
  2715  			log.Error("Error writing genesis to ancients", "err", err)
  2716  			return 0, err
  2717  		}
  2718  		log.Info("Wrote genesis to ancient store")
  2719  	} else if frozen != first {
  2720  		return 0, fmt.Errorf("headers are gapped with the ancient store, first: %d, ancient: %d", first, frozen)
  2721  	}
  2722  
  2723  	// Write headers to the ancient store, with block bodies and receipts set to nil
  2724  	// to ensure consistency across tables in the freezer.
  2725  	_, err := rawdb.WriteAncientHeaderChain(bc.db, headers)
  2726  	if err != nil {
  2727  		return 0, err
  2728  	}
  2729  	// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  2730  	if err := bc.db.SyncAncient(); err != nil {
  2731  		return 0, err
  2732  	}
  2733  	// Write hash to number mappings
  2734  	batch := bc.db.NewBatch()
  2735  	for _, header := range headers {
  2736  		rawdb.WriteHeaderNumber(batch, header.Hash(), header.Number.Uint64())
  2737  	}
  2738  	// Write head header and head snap block flags
  2739  	last := headers[len(headers)-1]
  2740  	rawdb.WriteHeadHeaderHash(batch, last.Hash())
  2741  	rawdb.WriteHeadFastBlockHash(batch, last.Hash())
  2742  	if err := batch.Write(); err != nil {
  2743  		return 0, err
  2744  	}
  2745  	// Truncate the useless chain segment (zero bodies and receipts) in the
  2746  	// ancient store.
  2747  	if _, err := bc.db.TruncateTail(last.Number.Uint64() + 1); err != nil {
  2748  		return 0, err
  2749  	}
  2750  	// Last step update all in-memory markers
  2751  	bc.hc.currentHeader.Store(last)
  2752  	bc.currentSnapBlock.Store(last)
  2753  	headHeaderGauge.Update(last.Number.Int64())
  2754  	headFastBlockGauge.Update(last.Number.Int64())
  2755  	return 0, nil
  2756  }
  2757  
  2758  // SetBlockValidatorAndProcessorForTesting sets the current validator and processor.
  2759  // This method can be used to force an invalid blockchain to be verified for tests.
  2760  // This method is unsafe and should only be used before block import starts.
  2761  func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Processor) {
  2762  	bc.validator = v
  2763  	bc.processor = p
  2764  }
  2765  
  2766  // SetTrieFlushInterval configures how often in-memory tries are persisted to disk.
  2767  // The interval is in terms of block processing time, not wall clock.
  2768  // It is thread-safe and can be called repeatedly without side effects.
  2769  func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
  2770  	bc.flushInterval.Store(int64(interval))
  2771  }
  2772  
  2773  // GetTrieFlushInterval gets the in-memory tries flushAlloc interval
  2774  func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
  2775  	return time.Duration(bc.flushInterval.Load())
  2776  }