github.com/theQRL/go-zond@v0.1.1/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	"runtime"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/theQRL/go-zond/common"
    32  	"github.com/theQRL/go-zond/common/lru"
    33  	"github.com/theQRL/go-zond/common/mclock"
    34  	"github.com/theQRL/go-zond/common/prque"
    35  	"github.com/theQRL/go-zond/consensus"
    36  	"github.com/theQRL/go-zond/consensus/misc/eip4844"
    37  	"github.com/theQRL/go-zond/core/rawdb"
    38  	"github.com/theQRL/go-zond/core/state"
    39  	"github.com/theQRL/go-zond/core/state/snapshot"
    40  	"github.com/theQRL/go-zond/core/types"
    41  	"github.com/theQRL/go-zond/core/vm"
    42  	"github.com/theQRL/go-zond/event"
    43  	"github.com/theQRL/go-zond/internal/syncx"
    44  	"github.com/theQRL/go-zond/internal/version"
    45  	"github.com/theQRL/go-zond/log"
    46  	"github.com/theQRL/go-zond/metrics"
    47  	"github.com/theQRL/go-zond/params"
    48  	"github.com/theQRL/go-zond/rlp"
    49  	"github.com/theQRL/go-zond/trie"
    50  	"github.com/theQRL/go-zond/trie/triedb/hashdb"
    51  	"github.com/theQRL/go-zond/trie/triedb/pathdb"
    52  	"github.com/theQRL/go-zond/zonddb"
    53  	"golang.org/x/exp/slices"
    54  )
    55  
    56  var (
    57  	headBlockGauge          = metrics.NewRegisteredGauge("chain/head/block", nil)
    58  	headHeaderGauge         = metrics.NewRegisteredGauge("chain/head/header", nil)
    59  	headFastBlockGauge      = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    60  	headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil)
    61  	headSafeBlockGauge      = metrics.NewRegisteredGauge("chain/head/safe", nil)
    62  
    63  	chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
    64  
    65  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    66  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    67  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    68  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    69  
    70  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    71  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    72  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    73  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    74  
    75  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
    76  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
    77  	snapshotCommitTimer      = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
    78  
    79  	triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)
    80  
    81  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    82  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    83  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    84  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    85  
    86  	blockReorgMeter     = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
    87  	blockReorgAddMeter  = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    88  	blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    89  
    90  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    91  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    92  
    93  	errInsertionInterrupted = errors.New("insertion is interrupted")
    94  	errChainStopped         = errors.New("blockchain is stopped")
    95  	errInvalidOldChain      = errors.New("invalid old chain")
    96  	errInvalidNewChain      = errors.New("invalid new chain")
    97  )
    98  
    99  const (
   100  	bodyCacheLimit      = 256
   101  	blockCacheLimit     = 256
   102  	receiptsCacheLimit  = 32
   103  	txLookupCacheLimit  = 1024
   104  	maxFutureBlocks     = 256
   105  	maxTimeFutureBlocks = 30
   106  	TriesInMemory       = 128
   107  
   108  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   109  	//
   110  	// Changelog:
   111  	//
   112  	// - Version 4
   113  	//   The following incompatible database changes were added:
   114  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   115  	//   * the `Bloom` field of receipt is deleted
   116  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   117  	// - Version 5
   118  	//  The following incompatible database changes were added:
   119  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   120  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   121  	//      receipts' corresponding block
   122  	// - Version 6
   123  	//  The following incompatible database changes were added:
   124  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   125  	// - Version 7
   126  	//  The following incompatible database changes were added:
   127  	//    * Use freezer as the ancient database to maintain all ancient data
   128  	// - Version 8
   129  	//  The following incompatible database changes were added:
   130  	//    * New scheme for contract code in order to separate the codes and trie nodes
   131  	BlockChainVersion uint64 = 8
   132  )
   133  
   134  // CacheConfig contains the configuration values for the trie database
   135  // and state snapshot these are resident in a blockchain.
   136  type CacheConfig struct {
   137  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   138  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   139  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   140  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   141  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   142  	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
   143  	Preimages           bool          // Whether to store preimage of trie key to the disk
   144  	StateHistory        uint64        // Number of blocks from head whose state histories are reserved.
   145  	StateScheme         string        // Scheme used to store ethereum states and merkle tree nodes on top
   146  
   147  	SnapshotNoBuild bool // Whether the background generation is allowed
   148  	SnapshotWait    bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   149  }
   150  
   151  // triedbConfig derives the configures for trie database.
   152  func (c *CacheConfig) triedbConfig() *trie.Config {
   153  	config := &trie.Config{Preimages: c.Preimages}
   154  	if c.StateScheme == rawdb.HashScheme {
   155  		config.HashDB = &hashdb.Config{
   156  			CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
   157  		}
   158  	}
   159  	if c.StateScheme == rawdb.PathScheme {
   160  		config.PathDB = &pathdb.Config{
   161  			StateHistory:   c.StateHistory,
   162  			CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
   163  			DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
   164  		}
   165  	}
   166  	return config
   167  }
   168  
   169  // defaultCacheConfig are the default caching values if none are specified by the
   170  // user (also used during testing).
   171  var defaultCacheConfig = &CacheConfig{
   172  	TrieCleanLimit: 256,
   173  	TrieDirtyLimit: 256,
   174  	TrieTimeLimit:  5 * time.Minute,
   175  	SnapshotLimit:  256,
   176  	SnapshotWait:   true,
   177  	StateScheme:    rawdb.HashScheme,
   178  }
   179  
   180  // DefaultCacheConfigWithScheme returns a deep copied default cache config with
   181  // a provided trie node scheme.
   182  func DefaultCacheConfigWithScheme(scheme string) *CacheConfig {
   183  	config := *defaultCacheConfig
   184  	config.StateScheme = scheme
   185  	return &config
   186  }
   187  
   188  // BlockChain represents the canonical chain given a database with a genesis
   189  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   190  //
   191  // Importing blocks in to the block chain happens according to the set of rules
   192  // defined by the two stage Validator. Processing of blocks is done using the
   193  // Processor which processes the included transaction. The validation of the state
   194  // is done in the second part of the Validator. Failing results in aborting of
   195  // the import.
   196  //
   197  // The BlockChain also helps in returning blocks from **any** chain included
   198  // in the database as well as blocks that represents the canonical chain. It's
   199  // important to note that GetBlock can return any block and does not need to be
   200  // included in the canonical one where as GetBlockByNumber always represents the
   201  // canonical chain.
   202  type BlockChain struct {
   203  	chainConfig *params.ChainConfig // Chain & network configuration
   204  	cacheConfig *CacheConfig        // Cache configuration for pruning
   205  
   206  	db            zonddb.Database                   // Low level persistent database to store final content in
   207  	snaps         *snapshot.Tree                   // Snapshot tree for fast trie leaf access
   208  	triegc        *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
   209  	gcproc        time.Duration                    // Accumulates canonical block processing for trie dumping
   210  	lastWrite     uint64                           // Last block when the state was flushed
   211  	flushInterval atomic.Int64                     // Time interval (processing time) after which to flush a state
   212  	triedb        *trie.Database                   // The database handler for maintaining trie nodes.
   213  	stateCache    state.Database                   // State database to reuse between imports (contains state cache)
   214  
   215  	// txLookupLimit is the maximum number of blocks from head whose tx indices
   216  	// are reserved:
   217  	//  * 0:   means no limit and regenerate any missing indexes
   218  	//  * N:   means N block limit [HEAD-N+1, HEAD] and delete extra indexes
   219  	//  * nil: disable tx reindexer/deleter, but still index new blocks
   220  	txLookupLimit uint64
   221  
   222  	hc            *HeaderChain
   223  	rmLogsFeed    event.Feed
   224  	chainFeed     event.Feed
   225  	chainSideFeed event.Feed
   226  	chainHeadFeed event.Feed
   227  	logsFeed      event.Feed
   228  	blockProcFeed event.Feed
   229  	scope         event.SubscriptionScope
   230  	genesisBlock  *types.Block
   231  
   232  	// This mutex synchronizes chain write operations.
   233  	// Readers don't need to take it, they can just read the database.
   234  	chainmu *syncx.ClosableMutex
   235  
   236  	currentBlock      atomic.Pointer[types.Header] // Current head of the chain
   237  	currentSnapBlock  atomic.Pointer[types.Header] // Current head of snap-sync
   238  	currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
   239  	currentSafeBlock  atomic.Pointer[types.Header] // Latest (consensus) safe block
   240  
   241  	bodyCache     *lru.Cache[common.Hash, *types.Body]
   242  	bodyRLPCache  *lru.Cache[common.Hash, rlp.RawValue]
   243  	receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
   244  	blockCache    *lru.Cache[common.Hash, *types.Block]
   245  	txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry]
   246  
   247  	// future blocks are blocks added for later processing
   248  	futureBlocks *lru.Cache[common.Hash, *types.Block]
   249  
   250  	wg            sync.WaitGroup //
   251  	quit          chan struct{}  // shutdown signal, closed in Stop.
   252  	stopping      atomic.Bool    // false if chain is running, true when stopped
   253  	procInterrupt atomic.Bool    // interrupt signaler for block processing
   254  
   255  	engine     consensus.Engine
   256  	validator  Validator // Block and state validator interface
   257  	prefetcher Prefetcher
   258  	processor  Processor // Block transaction processor interface
   259  	forker     *ForkChoice
   260  	vmConfig   vm.Config
   261  }
   262  
   263  // NewBlockChain returns a fully initialised block chain using information
   264  // available in the database. It initialises the default Ethereum Validator
   265  // and Processor.
   266  func NewBlockChain(db zonddb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
   267  	if cacheConfig == nil {
   268  		cacheConfig = defaultCacheConfig
   269  	}
   270  	// Open trie database with provided config
   271  	triedb := trie.NewDatabase(db, cacheConfig.triedbConfig())
   272  
   273  	// Setup the genesis block, commit the provided genesis specification
   274  	// to database if the genesis block is not present yet, or load the
   275  	// stored one from database.
   276  	chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
   277  	if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
   278  		return nil, genesisErr
   279  	}
   280  	log.Info("")
   281  	log.Info(strings.Repeat("-", 153))
   282  	for _, line := range strings.Split(chainConfig.Description(), "\n") {
   283  		log.Info(line)
   284  	}
   285  	log.Info(strings.Repeat("-", 153))
   286  	log.Info("")
   287  
   288  	bc := &BlockChain{
   289  		chainConfig:   chainConfig,
   290  		cacheConfig:   cacheConfig,
   291  		db:            db,
   292  		triedb:        triedb,
   293  		triegc:        prque.New[int64, common.Hash](nil),
   294  		quit:          make(chan struct{}),
   295  		chainmu:       syncx.NewClosableMutex(),
   296  		bodyCache:     lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
   297  		bodyRLPCache:  lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
   298  		receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
   299  		blockCache:    lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
   300  		txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit),
   301  		futureBlocks:  lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
   302  		engine:        engine,
   303  		vmConfig:      vmConfig,
   304  	}
   305  	bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
   306  	bc.forker = NewForkChoice(bc, shouldPreserve)
   307  	bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
   308  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   309  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   310  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   311  
   312  	var err error
   313  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   314  	if err != nil {
   315  		return nil, err
   316  	}
   317  	bc.genesisBlock = bc.GetBlockByNumber(0)
   318  	if bc.genesisBlock == nil {
   319  		return nil, ErrNoGenesis
   320  	}
   321  
   322  	bc.currentBlock.Store(nil)
   323  	bc.currentSnapBlock.Store(nil)
   324  	bc.currentFinalBlock.Store(nil)
   325  	bc.currentSafeBlock.Store(nil)
   326  
   327  	// Update chain info data metrics
   328  	chainInfoGauge.Update(metrics.GaugeInfoValue{"chain_id": bc.chainConfig.ChainID.String()})
   329  
   330  	// If Geth is initialized with an external ancient store, re-initialize the
   331  	// missing chain indexes and chain flags. This procedure can survive crash
   332  	// and can be resumed in next restart since chain flags are updated in last step.
   333  	if bc.empty() {
   334  		rawdb.InitDatabaseFromFreezer(bc.db)
   335  	}
   336  	// Load blockchain states from disk
   337  	if err := bc.loadLastState(); err != nil {
   338  		return nil, err
   339  	}
   340  	// Make sure the state associated with the block is available
   341  	head := bc.CurrentBlock()
   342  	if !bc.HasState(head.Root) {
   343  		if head.Number.Uint64() == 0 {
   344  			// The genesis state is missing, which is only possible in the path-based
   345  			// scheme. This situation occurs when the state syncer overwrites it.
   346  			//
   347  			// The solution is to reset the state to the genesis state. Although it may not
   348  			// match the sync target, the state healer will later address and correct any
   349  			// inconsistencies.
   350  			bc.resetState()
   351  		} else {
   352  			// Head state is missing, before the state recovery, find out the
   353  			// disk layer point of snapshot(if it's enabled). Make sure the
   354  			// rewound point is lower than disk layer.
   355  			var diskRoot common.Hash
   356  			if bc.cacheConfig.SnapshotLimit > 0 {
   357  				diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   358  			}
   359  			if diskRoot != (common.Hash{}) {
   360  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
   361  
   362  				snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
   363  				if err != nil {
   364  					return nil, err
   365  				}
   366  				// Chain rewound, persist old snapshot number to indicate recovery procedure
   367  				if snapDisk != 0 {
   368  					rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   369  				}
   370  			} else {
   371  				log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
   372  				if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
   373  					return nil, err
   374  				}
   375  			}
   376  		}
   377  	}
   378  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   379  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   380  		var (
   381  			needRewind bool
   382  			low        uint64
   383  		)
   384  		// The head full block may be rolled back to a very low height due to
   385  		// blockchain repair. If the head full block is even lower than the ancient
   386  		// chain, truncate the ancient store.
   387  		fullBlock := bc.CurrentBlock()
   388  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
   389  			needRewind = true
   390  			low = fullBlock.Number.Uint64()
   391  		}
   392  		// In snap sync, it may happen that ancient data has been written to the
   393  		// ancient store, but the LastFastBlock has not been updated, truncate the
   394  		// extra data here.
   395  		snapBlock := bc.CurrentSnapBlock()
   396  		if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
   397  			needRewind = true
   398  			if snapBlock.Number.Uint64() < low || low == 0 {
   399  				low = snapBlock.Number.Uint64()
   400  			}
   401  		}
   402  		if needRewind {
   403  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   404  			if err := bc.SetHead(low); err != nil {
   405  				return nil, err
   406  			}
   407  		}
   408  	}
   409  	// The first thing the node will do is reconstruct the verification data for
   410  	// the head block (ethash cache or clique voting snapshot). Might as well do
   411  	// it in advance.
   412  	bc.engine.VerifyHeader(bc, bc.CurrentHeader())
   413  
   414  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   415  	for hash := range BadHashes {
   416  		if header := bc.GetHeaderByHash(hash); header != nil {
   417  			// get the canonical block corresponding to the offending header's number
   418  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   419  			// make sure the headerByNumber (if present) is in our current canonical chain
   420  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   421  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   422  				if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
   423  					return nil, err
   424  				}
   425  				log.Error("Chain rewind was successful, resuming normal operation")
   426  			}
   427  		}
   428  	}
   429  
   430  	// Load any existing snapshot, regenerating it if loading failed
   431  	if bc.cacheConfig.SnapshotLimit > 0 {
   432  		// If the chain was rewound past the snapshot persistent layer (causing
   433  		// a recovery block number to be persisted to disk), check if we're still
   434  		// in recovery mode and in that case, don't invalidate the snapshot on a
   435  		// head mismatch.
   436  		var recover bool
   437  
   438  		head := bc.CurrentBlock()
   439  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
   440  			log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
   441  			recover = true
   442  		}
   443  		snapconfig := snapshot.Config{
   444  			CacheSize:  bc.cacheConfig.SnapshotLimit,
   445  			Recovery:   recover,
   446  			NoBuild:    bc.cacheConfig.SnapshotNoBuild,
   447  			AsyncBuild: !bc.cacheConfig.SnapshotWait,
   448  		}
   449  		bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
   450  	}
   451  
   452  	// Start future block processor.
   453  	bc.wg.Add(1)
   454  	go bc.updateFutureBlocks()
   455  
   456  	// Rewind the chain in case of an incompatible config upgrade.
   457  	if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
   458  		log.Warn("Rewinding chain to upgrade configuration", "err", compat)
   459  		if compat.RewindToTime > 0 {
   460  			bc.SetHeadWithTimestamp(compat.RewindToTime)
   461  		} else {
   462  			bc.SetHead(compat.RewindToBlock)
   463  		}
   464  		rawdb.WriteChainConfig(db, genesisHash, chainConfig)
   465  	}
   466  	// Start tx indexer/unindexer if required.
   467  	if txLookupLimit != nil {
   468  		bc.txLookupLimit = *txLookupLimit
   469  
   470  		bc.wg.Add(1)
   471  		go bc.maintainTxIndex()
   472  	}
   473  	return bc, nil
   474  }
   475  
   476  // empty returns an indicator whether the blockchain is empty.
   477  // Note, it's a special case that we connect a non-empty ancient
   478  // database with an empty node, so that we can plugin the ancient
   479  // into node seamlessly.
   480  func (bc *BlockChain) empty() bool {
   481  	genesis := bc.genesisBlock.Hash()
   482  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   483  		if hash != genesis {
   484  			return false
   485  		}
   486  	}
   487  	return true
   488  }
   489  
   490  // loadLastState loads the last known chain state from the database. This method
   491  // assumes that the chain manager mutex is held.
   492  func (bc *BlockChain) loadLastState() error {
   493  	// Restore the last known head block
   494  	head := rawdb.ReadHeadBlockHash(bc.db)
   495  	if head == (common.Hash{}) {
   496  		// Corrupt or empty database, init from scratch
   497  		log.Warn("Empty database, resetting chain")
   498  		return bc.Reset()
   499  	}
   500  	// Make sure the entire head block is available
   501  	headBlock := bc.GetBlockByHash(head)
   502  	if headBlock == nil {
   503  		// Corrupt or empty database, init from scratch
   504  		log.Warn("Head block missing, resetting chain", "hash", head)
   505  		return bc.Reset()
   506  	}
   507  	// Everything seems to be fine, set as the head block
   508  	bc.currentBlock.Store(headBlock.Header())
   509  	headBlockGauge.Update(int64(headBlock.NumberU64()))
   510  
   511  	// Restore the last known head header
   512  	headHeader := headBlock.Header()
   513  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   514  		if header := bc.GetHeaderByHash(head); header != nil {
   515  			headHeader = header
   516  		}
   517  	}
   518  	bc.hc.SetCurrentHeader(headHeader)
   519  
   520  	// Restore the last known head snap block
   521  	bc.currentSnapBlock.Store(headBlock.Header())
   522  	headFastBlockGauge.Update(int64(headBlock.NumberU64()))
   523  
   524  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   525  		if block := bc.GetBlockByHash(head); block != nil {
   526  			bc.currentSnapBlock.Store(block.Header())
   527  			headFastBlockGauge.Update(int64(block.NumberU64()))
   528  		}
   529  	}
   530  
   531  	// Restore the last known finalized block and safe block
   532  	// Note: the safe block is not stored on disk and it is set to the last
   533  	// known finalized block on startup
   534  	if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) {
   535  		if block := bc.GetBlockByHash(head); block != nil {
   536  			bc.currentFinalBlock.Store(block.Header())
   537  			headFinalizedBlockGauge.Update(int64(block.NumberU64()))
   538  			bc.currentSafeBlock.Store(block.Header())
   539  			headSafeBlockGauge.Update(int64(block.NumberU64()))
   540  		}
   541  	}
   542  	// Issue a status log for the user
   543  	var (
   544  		currentSnapBlock  = bc.CurrentSnapBlock()
   545  		currentFinalBlock = bc.CurrentFinalBlock()
   546  
   547  		headerTd = bc.GetTd(headHeader.Hash(), headHeader.Number.Uint64())
   548  		blockTd  = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
   549  	)
   550  	if headHeader.Hash() != headBlock.Hash() {
   551  		log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
   552  	}
   553  	log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
   554  	if headBlock.Hash() != currentSnapBlock.Hash() {
   555  		snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64())
   556  		log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
   557  	}
   558  	if currentFinalBlock != nil {
   559  		finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64())
   560  		log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
   561  	}
   562  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   563  		log.Info("Loaded last snap-sync pivot marker", "number", *pivot)
   564  	}
   565  	return nil
   566  }
   567  
   568  // SetHead rewinds the local chain to a new head. Depending on whether the node
   569  // was snap synced or full synced and in which state, the method will try to
   570  // delete minimal data from disk whilst retaining chain consistency.
   571  func (bc *BlockChain) SetHead(head uint64) error {
   572  	if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil {
   573  		return err
   574  	}
   575  	// Send chain head event to update the transaction pool
   576  	header := bc.CurrentBlock()
   577  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   578  	if block == nil {
   579  		// This should never happen. In practice, previsouly currentBlock
   580  		// contained the entire block whereas now only a "marker", so there
   581  		// is an ever so slight chance for a race we should handle.
   582  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   583  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   584  	}
   585  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   586  	return nil
   587  }
   588  
   589  // SetHeadWithTimestamp rewinds the local chain to a new head that has at max
   590  // the given timestamp. Depending on whether the node was snap synced or full
   591  // synced and in which state, the method will try to delete minimal data from
   592  // disk whilst retaining chain consistency.
   593  func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
   594  	if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil {
   595  		return err
   596  	}
   597  	// Send chain head event to update the transaction pool
   598  	header := bc.CurrentBlock()
   599  	block := bc.GetBlock(header.Hash(), header.Number.Uint64())
   600  	if block == nil {
   601  		// This should never happen. In practice, previsouly currentBlock
   602  		// contained the entire block whereas now only a "marker", so there
   603  		// is an ever so slight chance for a race we should handle.
   604  		log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
   605  		return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
   606  	}
   607  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   608  	return nil
   609  }
   610  
   611  // SetFinalized sets the finalized block.
   612  func (bc *BlockChain) SetFinalized(header *types.Header) {
   613  	bc.currentFinalBlock.Store(header)
   614  	if header != nil {
   615  		rawdb.WriteFinalizedBlockHash(bc.db, header.Hash())
   616  		headFinalizedBlockGauge.Update(int64(header.Number.Uint64()))
   617  	} else {
   618  		rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{})
   619  		headFinalizedBlockGauge.Update(0)
   620  	}
   621  }
   622  
   623  // SetSafe sets the safe block.
   624  func (bc *BlockChain) SetSafe(header *types.Header) {
   625  	bc.currentSafeBlock.Store(header)
   626  	if header != nil {
   627  		headSafeBlockGauge.Update(int64(header.Number.Uint64()))
   628  	} else {
   629  		headSafeBlockGauge.Update(0)
   630  	}
   631  }
   632  
   633  // resetState resets the persistent state to genesis state if it's not present.
   634  func (bc *BlockChain) resetState() {
   635  	// Short circuit if the genesis state is already present.
   636  	root := bc.genesisBlock.Root()
   637  	if bc.HasState(root) {
   638  		return
   639  	}
   640  	// Reset the state database to empty for committing genesis state.
   641  	// Note, it should only happen in path-based scheme and Reset function
   642  	// is also only call-able in this mode.
   643  	if bc.triedb.Scheme() == rawdb.PathScheme {
   644  		if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
   645  			log.Crit("Failed to clean state", "err", err) // Shouldn't happen
   646  		}
   647  	}
   648  	// Write genesis state into database.
   649  	if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
   650  		log.Crit("Failed to commit genesis state", "err", err)
   651  	}
   652  	log.Info("Reset state to genesis", "root", root)
   653  }
   654  
   655  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   656  // that the rewind must pass the specified state root. This method is meant to be
   657  // used when rewinding with snapshots enabled to ensure that we go back further than
   658  // persistent disk layer. Depending on whether the node was snap synced or full, and
   659  // in which state, the method will try to delete minimal data from disk whilst
   660  // retaining chain consistency.
   661  //
   662  // The method also works in timestamp mode if `head == 0` but `time != 0`. In that
   663  // case blocks are rolled back until the new head becomes older or equal to the
   664  // requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
   665  //
   666  // The method returns the block number where the requested root cap was found.
   667  func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
   668  	if !bc.chainmu.TryLock() {
   669  		return 0, errChainStopped
   670  	}
   671  	defer bc.chainmu.Unlock()
   672  
   673  	// Track the block number of the requested root hash
   674  	var rootNumber uint64 // (no root == always 0)
   675  
   676  	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
   677  	// current freezer limit to start nuking id underflown
   678  	pivot := rawdb.ReadLastPivotNumber(bc.db)
   679  	frozen, _ := bc.db.Ancients()
   680  
   681  	updateFn := func(db zonddb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
   682  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   683  		// block. Note, depth equality is permitted to allow using SetHead as a
   684  		// chain reparation mechanism without deleting any data!
   685  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
   686  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   687  			if newHeadBlock == nil {
   688  				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
   689  				newHeadBlock = bc.genesisBlock
   690  				bc.resetState()
   691  			} else {
   692  				// Block exists, keep rewinding until we find one with state,
   693  				// keeping rewinding until we exceed the optional threshold
   694  				// root hash
   695  				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
   696  
   697  				for {
   698  					// If a root threshold was requested but not yet crossed, check
   699  					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
   700  						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
   701  					}
   702  					if !bc.HasState(newHeadBlock.Root()) && !bc.stateRecoverable(newHeadBlock.Root()) {
   703  						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   704  						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
   705  							parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
   706  							if parent != nil {
   707  								newHeadBlock = parent
   708  								continue
   709  							}
   710  							log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
   711  							newHeadBlock = bc.genesisBlock
   712  						} else {
   713  							log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
   714  							newHeadBlock = bc.genesisBlock
   715  						}
   716  					}
   717  					if beyondRoot || newHeadBlock.NumberU64() == 0 {
   718  						if newHeadBlock.NumberU64() == 0 {
   719  							bc.resetState()
   720  						} else if !bc.HasState(newHeadBlock.Root()) {
   721  							// Rewind to a block with recoverable state. If the state is
   722  							// missing, run the state recovery here.
   723  							if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil {
   724  								log.Crit("Failed to rollback state", "err", err) // Shouldn't happen
   725  							}
   726  						}
   727  						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   728  						break
   729  					}
   730  					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
   731  					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
   732  				}
   733  			}
   734  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   735  
   736  			// Degrade the chain markers if they are explicitly reverted.
   737  			// In theory we should update all in-memory markers in the
   738  			// last step, however the direction of SetHead is from high
   739  			// to low, so it's safe to update in-memory markers directly.
   740  			bc.currentBlock.Store(newHeadBlock.Header())
   741  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   742  		}
   743  		// Rewind the snap block in a simpleton way to the target head
   744  		if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
   745  			newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   746  			// If either blocks reached nil, reset to the genesis state
   747  			if newHeadSnapBlock == nil {
   748  				newHeadSnapBlock = bc.genesisBlock
   749  			}
   750  			rawdb.WriteHeadFastBlockHash(db, newHeadSnapBlock.Hash())
   751  
   752  			// Degrade the chain markers if they are explicitly reverted.
   753  			// In theory we should update all in-memory markers in the
   754  			// last step, however the direction of SetHead is from high
   755  			// to low, so it's safe the update in-memory markers directly.
   756  			bc.currentSnapBlock.Store(newHeadSnapBlock.Header())
   757  			headFastBlockGauge.Update(int64(newHeadSnapBlock.NumberU64()))
   758  		}
   759  		var (
   760  			headHeader = bc.CurrentBlock()
   761  			headNumber = headHeader.Number.Uint64()
   762  		)
   763  		// If setHead underflown the freezer threshold and the block processing
   764  		// intent afterwards is full block importing, delete the chain segment
   765  		// between the stateful-block and the sethead target.
   766  		var wipe bool
   767  		if headNumber+1 < frozen {
   768  			wipe = pivot == nil || headNumber >= *pivot
   769  		}
   770  		return headHeader, wipe // Only force wipe if full synced
   771  	}
   772  	// Rewind the header chain, deleting all block bodies until then
   773  	delFn := func(db zonddb.KeyValueWriter, hash common.Hash, num uint64) {
   774  		// Ignore the error here since light client won't hit this path
   775  		frozen, _ := bc.db.Ancients()
   776  		if num+1 <= frozen {
   777  			// Truncate all relative data(header, total difficulty, body, receipt
   778  			// and canonical hash) from ancient store.
   779  			if _, err := bc.db.TruncateHead(num); err != nil {
   780  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   781  			}
   782  			// Remove the hash <-> number mapping from the active store.
   783  			rawdb.DeleteHeaderNumber(db, hash)
   784  		} else {
   785  			// Remove relative body and receipts from the active store.
   786  			// The header, total difficulty and canonical hash will be
   787  			// removed in the hc.SetHead function.
   788  			rawdb.DeleteBody(db, hash, num)
   789  			rawdb.DeleteReceipts(db, hash, num)
   790  		}
   791  		// Todo(rjl493456442) txlookup, bloombits, etc
   792  	}
   793  	// If SetHead was only called as a chain reparation method, try to skip
   794  	// touching the header chain altogether, unless the freezer is broken
   795  	if repair {
   796  		if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
   797  			bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
   798  		}
   799  	} else {
   800  		// Rewind the chain to the requested head and keep going backwards until a
   801  		// block with a state is found or snap sync pivot is passed
   802  		if time > 0 {
   803  			log.Warn("Rewinding blockchain to timestamp", "target", time)
   804  			bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
   805  		} else {
   806  			log.Warn("Rewinding blockchain to block", "target", head)
   807  			bc.hc.SetHead(head, updateFn, delFn)
   808  		}
   809  	}
   810  	// Clear out any stale content from the caches
   811  	bc.bodyCache.Purge()
   812  	bc.bodyRLPCache.Purge()
   813  	bc.receiptsCache.Purge()
   814  	bc.blockCache.Purge()
   815  	bc.txLookupCache.Purge()
   816  	bc.futureBlocks.Purge()
   817  
   818  	// Clear safe block, finalized block if needed
   819  	if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() {
   820  		log.Warn("SetHead invalidated safe block")
   821  		bc.SetSafe(nil)
   822  	}
   823  	if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() {
   824  		log.Error("SetHead invalidated finalized block")
   825  		bc.SetFinalized(nil)
   826  	}
   827  	return rootNumber, bc.loadLastState()
   828  }
   829  
   830  // SnapSyncCommitHead sets the current head block to the one defined by the hash
   831  // irrelevant what the chain contents were prior.
   832  func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
   833  	// Make sure that both the block as well at its state trie exists
   834  	block := bc.GetBlockByHash(hash)
   835  	if block == nil {
   836  		return fmt.Errorf("non existent block [%x..]", hash[:4])
   837  	}
   838  	// Reset the trie database with the fresh snap synced state.
   839  	root := block.Root()
   840  	if bc.triedb.Scheme() == rawdb.PathScheme {
   841  		if err := bc.triedb.Reset(root); err != nil {
   842  			return err
   843  		}
   844  	}
   845  	if !bc.HasState(root) {
   846  		return fmt.Errorf("non existent state [%x..]", root[:4])
   847  	}
   848  	// If all checks out, manually set the head block.
   849  	if !bc.chainmu.TryLock() {
   850  		return errChainStopped
   851  	}
   852  	bc.currentBlock.Store(block.Header())
   853  	headBlockGauge.Update(int64(block.NumberU64()))
   854  	bc.chainmu.Unlock()
   855  
   856  	// Destroy any existing state snapshot and regenerate it in the background,
   857  	// also resuming the normal maintenance of any previously paused snapshot.
   858  	if bc.snaps != nil {
   859  		bc.snaps.Rebuild(root)
   860  	}
   861  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   862  	return nil
   863  }
   864  
   865  // Reset purges the entire blockchain, restoring it to its genesis state.
   866  func (bc *BlockChain) Reset() error {
   867  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   868  }
   869  
   870  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   871  // specified genesis state.
   872  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   873  	// Dump the entire block chain and purge the caches
   874  	if err := bc.SetHead(0); err != nil {
   875  		return err
   876  	}
   877  	if !bc.chainmu.TryLock() {
   878  		return errChainStopped
   879  	}
   880  	defer bc.chainmu.Unlock()
   881  
   882  	// Prepare the genesis block and reinitialise the chain
   883  	batch := bc.db.NewBatch()
   884  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   885  	rawdb.WriteBlock(batch, genesis)
   886  	if err := batch.Write(); err != nil {
   887  		log.Crit("Failed to write genesis block", "err", err)
   888  	}
   889  	bc.writeHeadBlock(genesis)
   890  
   891  	// Last update all in-memory chain markers
   892  	bc.genesisBlock = genesis
   893  	bc.currentBlock.Store(bc.genesisBlock.Header())
   894  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   895  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   896  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   897  	bc.currentSnapBlock.Store(bc.genesisBlock.Header())
   898  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   899  	return nil
   900  }
   901  
   902  // Export writes the active chain to the given writer.
   903  func (bc *BlockChain) Export(w io.Writer) error {
   904  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64())
   905  }
   906  
   907  // ExportN writes a subset of the active chain to the given writer.
   908  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   909  	if first > last {
   910  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   911  	}
   912  	log.Info("Exporting batch of blocks", "count", last-first+1)
   913  
   914  	var (
   915  		parentHash common.Hash
   916  		start      = time.Now()
   917  		reported   = time.Now()
   918  	)
   919  	for nr := first; nr <= last; nr++ {
   920  		block := bc.GetBlockByNumber(nr)
   921  		if block == nil {
   922  			return fmt.Errorf("export failed on #%d: not found", nr)
   923  		}
   924  		if nr > first && block.ParentHash() != parentHash {
   925  			return errors.New("export failed: chain reorg during export")
   926  		}
   927  		parentHash = block.Hash()
   928  		if err := block.EncodeRLP(w); err != nil {
   929  			return err
   930  		}
   931  		if time.Since(reported) >= statsReportLimit {
   932  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   933  			reported = time.Now()
   934  		}
   935  	}
   936  	return nil
   937  }
   938  
   939  // writeHeadBlock injects a new head block into the current block chain. This method
   940  // assumes that the block is indeed a true head. It will also reset the head
   941  // header and the head snap sync block to this very same block if they are older
   942  // or if they are on a different side chain.
   943  //
   944  // Note, this function assumes that the `mu` mutex is held!
   945  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   946  	// Add the block to the canonical chain number scheme and mark as the head
   947  	batch := bc.db.NewBatch()
   948  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   949  	rawdb.WriteHeadFastBlockHash(batch, block.Hash())
   950  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   951  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
   952  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   953  
   954  	// Flush the whole batch into the disk, exit the node if failed
   955  	if err := batch.Write(); err != nil {
   956  		log.Crit("Failed to update chain indexes and markers", "err", err)
   957  	}
   958  	// Update all in-memory chain markers in the last step
   959  	bc.hc.SetCurrentHeader(block.Header())
   960  
   961  	bc.currentSnapBlock.Store(block.Header())
   962  	headFastBlockGauge.Update(int64(block.NumberU64()))
   963  
   964  	bc.currentBlock.Store(block.Header())
   965  	headBlockGauge.Update(int64(block.NumberU64()))
   966  }
   967  
   968  // stopWithoutSaving stops the blockchain service. If any imports are currently in progress
   969  // it will abort them using the procInterrupt. This method stops all running
   970  // goroutines, but does not do all the post-stop work of persisting data.
   971  // OBS! It is generally recommended to use the Stop method!
   972  // This method has been exposed to allow tests to stop the blockchain while simulating
   973  // a crash.
   974  func (bc *BlockChain) stopWithoutSaving() {
   975  	if !bc.stopping.CompareAndSwap(false, true) {
   976  		return
   977  	}
   978  
   979  	// Unsubscribe all subscriptions registered from blockchain.
   980  	bc.scope.Close()
   981  
   982  	// Signal shutdown to all goroutines.
   983  	close(bc.quit)
   984  	bc.StopInsert()
   985  
   986  	// Now wait for all chain modifications to end and persistent goroutines to exit.
   987  	//
   988  	// Note: Close waits for the mutex to become available, i.e. any running chain
   989  	// modification will have exited when Close returns. Since we also called StopInsert,
   990  	// the mutex should become available quickly. It cannot be taken again after Close has
   991  	// returned.
   992  	bc.chainmu.Close()
   993  	bc.wg.Wait()
   994  }
   995  
   996  // Stop stops the blockchain service. If any imports are currently in progress
   997  // it will abort them using the procInterrupt.
   998  func (bc *BlockChain) Stop() {
   999  	bc.stopWithoutSaving()
  1000  
  1001  	// Ensure that the entirety of the state snapshot is journalled to disk.
  1002  	var snapBase common.Hash
  1003  	if bc.snaps != nil {
  1004  		var err error
  1005  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
  1006  			log.Error("Failed to journal state snapshot", "err", err)
  1007  		}
  1008  	}
  1009  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1010  		// Ensure that the in-memory trie nodes are journaled to disk properly.
  1011  		if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil {
  1012  			log.Info("Failed to journal in-memory trie nodes", "err", err)
  1013  		}
  1014  	} else {
  1015  		// Ensure the state of a recent block is also stored to disk before exiting.
  1016  		// We're writing three different states to catch different restart scenarios:
  1017  		//  - HEAD:     So we don't need to reprocess any blocks in the general case
  1018  		//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
  1019  		//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  1020  		if !bc.cacheConfig.TrieDirtyDisabled {
  1021  			triedb := bc.triedb
  1022  
  1023  			for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
  1024  				if number := bc.CurrentBlock().Number.Uint64(); number > offset {
  1025  					recent := bc.GetBlockByNumber(number - offset)
  1026  
  1027  					log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  1028  					if err := triedb.Commit(recent.Root(), true); err != nil {
  1029  						log.Error("Failed to commit recent state trie", "err", err)
  1030  					}
  1031  				}
  1032  			}
  1033  			if snapBase != (common.Hash{}) {
  1034  				log.Info("Writing snapshot state to disk", "root", snapBase)
  1035  				if err := triedb.Commit(snapBase, true); err != nil {
  1036  					log.Error("Failed to commit recent state trie", "err", err)
  1037  				}
  1038  			}
  1039  			for !bc.triegc.Empty() {
  1040  				triedb.Dereference(bc.triegc.PopItem())
  1041  			}
  1042  			if _, nodes, _ := triedb.Size(); nodes != 0 { // all memory is contained within the nodes return for hashdb
  1043  				log.Error("Dangling trie nodes after full cleanup")
  1044  			}
  1045  		}
  1046  	}
  1047  	// Close the trie database, release all the held resources as the last step.
  1048  	if err := bc.triedb.Close(); err != nil {
  1049  		log.Error("Failed to close trie database", "err", err)
  1050  	}
  1051  	log.Info("Blockchain stopped")
  1052  }
  1053  
  1054  // StopInsert interrupts all insertion methods, causing them to return
  1055  // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
  1056  // calling this method.
  1057  func (bc *BlockChain) StopInsert() {
  1058  	bc.procInterrupt.Store(true)
  1059  }
  1060  
  1061  // insertStopped returns true after StopInsert has been called.
  1062  func (bc *BlockChain) insertStopped() bool {
  1063  	return bc.procInterrupt.Load()
  1064  }
  1065  
  1066  func (bc *BlockChain) procFutureBlocks() {
  1067  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  1068  	for _, hash := range bc.futureBlocks.Keys() {
  1069  		if block, exist := bc.futureBlocks.Peek(hash); exist {
  1070  			blocks = append(blocks, block)
  1071  		}
  1072  	}
  1073  	if len(blocks) > 0 {
  1074  		slices.SortFunc(blocks, func(a, b *types.Block) int {
  1075  			return a.Number().Cmp(b.Number())
  1076  		})
  1077  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
  1078  		for i := range blocks {
  1079  			bc.InsertChain(blocks[i : i+1])
  1080  		}
  1081  	}
  1082  }
  1083  
  1084  // WriteStatus status of write
  1085  type WriteStatus byte
  1086  
  1087  const (
  1088  	NonStatTy WriteStatus = iota
  1089  	CanonStatTy
  1090  	SideStatTy
  1091  )
  1092  
  1093  // InsertReceiptChain attempts to complete an already existing header chain with
  1094  // transaction and receipt data.
  1095  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
  1096  	// We don't require the chainMu here since we want to maximize the
  1097  	// concurrency of header insertion and receipt insertion.
  1098  	bc.wg.Add(1)
  1099  	defer bc.wg.Done()
  1100  
  1101  	var (
  1102  		ancientBlocks, liveBlocks     types.Blocks
  1103  		ancientReceipts, liveReceipts []types.Receipts
  1104  	)
  1105  	// Do a sanity check that the provided chain is actually ordered and linked
  1106  	for i, block := range blockChain {
  1107  		if i != 0 {
  1108  			prev := blockChain[i-1]
  1109  			if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1110  				log.Error("Non contiguous receipt insert",
  1111  					"number", block.Number(), "hash", block.Hash(), "parent", block.ParentHash(),
  1112  					"prevnumber", prev.Number(), "prevhash", prev.Hash())
  1113  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])",
  1114  					i-1, prev.NumberU64(), prev.Hash().Bytes()[:4],
  1115  					i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1116  			}
  1117  		}
  1118  		if block.NumberU64() <= ancientLimit {
  1119  			ancientBlocks, ancientReceipts = append(ancientBlocks, block), append(ancientReceipts, receiptChain[i])
  1120  		} else {
  1121  			liveBlocks, liveReceipts = append(liveBlocks, block), append(liveReceipts, receiptChain[i])
  1122  		}
  1123  
  1124  		// Here we also validate that blob transactions in the block do not contain a sidecar.
  1125  		// While the sidecar does not affect the block hash / tx hash, sending blobs within a block is not allowed.
  1126  		for txIndex, tx := range block.Transactions() {
  1127  			if tx.Type() == types.BlobTxType && tx.BlobTxSidecar() != nil {
  1128  				return 0, fmt.Errorf("block #%d contains unexpected blob sidecar in tx at index %d", block.NumberU64(), txIndex)
  1129  			}
  1130  		}
  1131  	}
  1132  
  1133  	var (
  1134  		stats = struct{ processed, ignored int32 }{}
  1135  		start = time.Now()
  1136  		size  = int64(0)
  1137  	)
  1138  
  1139  	// updateHead updates the head snap sync block if the inserted blocks are better
  1140  	// and returns an indicator whether the inserted blocks are canonical.
  1141  	updateHead := func(head *types.Block) bool {
  1142  		if !bc.chainmu.TryLock() {
  1143  			return false
  1144  		}
  1145  		defer bc.chainmu.Unlock()
  1146  
  1147  		// Rewind may have occurred, skip in that case.
  1148  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
  1149  			reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
  1150  			if err != nil {
  1151  				log.Warn("Reorg failed", "err", err)
  1152  				return false
  1153  			} else if !reorg {
  1154  				return false
  1155  			}
  1156  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  1157  			bc.currentSnapBlock.Store(head.Header())
  1158  			headFastBlockGauge.Update(int64(head.NumberU64()))
  1159  			return true
  1160  		}
  1161  		return false
  1162  	}
  1163  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1164  	//
  1165  	// this function only accepts canonical chain data. All side chain will be reverted
  1166  	// eventually.
  1167  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1168  		first := blockChain[0]
  1169  		last := blockChain[len(blockChain)-1]
  1170  
  1171  		// Ensure genesis is in ancients.
  1172  		if first.NumberU64() == 1 {
  1173  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
  1174  				b := bc.genesisBlock
  1175  				td := bc.genesisBlock.Difficulty()
  1176  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
  1177  				size += writeSize
  1178  				if err != nil {
  1179  					log.Error("Error writing genesis to ancients", "err", err)
  1180  					return 0, err
  1181  				}
  1182  				log.Info("Wrote genesis to ancients")
  1183  			}
  1184  		}
  1185  		// Before writing the blocks to the ancients, we need to ensure that
  1186  		// they correspond to the what the headerchain 'expects'.
  1187  		// We only check the last block/header, since it's a contiguous chain.
  1188  		if !bc.HasHeader(last.Hash(), last.NumberU64()) {
  1189  			return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
  1190  		}
  1191  
  1192  		// Write all chain data to ancients.
  1193  		td := bc.GetTd(first.Hash(), first.NumberU64())
  1194  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
  1195  		size += writeSize
  1196  		if err != nil {
  1197  			log.Error("Error importing chain data to ancients", "err", err)
  1198  			return 0, err
  1199  		}
  1200  
  1201  		// Write tx indices if any condition is satisfied:
  1202  		// * If user requires to reserve all tx indices(txlookuplimit=0)
  1203  		// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
  1204  		// * If block number is large enough to be regarded as a recent block
  1205  		// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
  1206  		//
  1207  		// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
  1208  		// an external ancient database, during the setup, blockchain will start
  1209  		// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
  1210  		// range. In this case, all tx indices of newly imported blocks should be
  1211  		// generated.
  1212  		var batch = bc.db.NewBatch()
  1213  		for i, block := range blockChain {
  1214  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
  1215  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1216  			} else if rawdb.ReadTxIndexTail(bc.db) != nil {
  1217  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1218  			}
  1219  			stats.processed++
  1220  
  1221  			if batch.ValueSize() > zonddb.IdealBatchSize || i == len(blockChain)-1 {
  1222  				size += int64(batch.ValueSize())
  1223  				if err = batch.Write(); err != nil {
  1224  					snapBlock := bc.CurrentSnapBlock().Number.Uint64()
  1225  					if _, err := bc.db.TruncateHead(snapBlock + 1); err != nil {
  1226  						log.Error("Can't truncate ancient store after failed insert", "err", err)
  1227  					}
  1228  					return 0, err
  1229  				}
  1230  				batch.Reset()
  1231  			}
  1232  		}
  1233  
  1234  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1235  		if err := bc.db.Sync(); err != nil {
  1236  			return 0, err
  1237  		}
  1238  		// Update the current snap block because all block data is now present in DB.
  1239  		previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64()
  1240  		if !updateHead(blockChain[len(blockChain)-1]) {
  1241  			// We end up here if the header chain has reorg'ed, and the blocks/receipts
  1242  			// don't match the canonical chain.
  1243  			if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
  1244  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1245  			}
  1246  			return 0, errSideChainReceipts
  1247  		}
  1248  
  1249  		// Delete block data from the main database.
  1250  		batch.Reset()
  1251  		canonHashes := make(map[common.Hash]struct{})
  1252  		for _, block := range blockChain {
  1253  			canonHashes[block.Hash()] = struct{}{}
  1254  			if block.NumberU64() == 0 {
  1255  				continue
  1256  			}
  1257  			rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1258  			rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1259  		}
  1260  		// Delete side chain hash-to-number mappings.
  1261  		for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
  1262  			if _, canon := canonHashes[nh.Hash]; !canon {
  1263  				rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
  1264  			}
  1265  		}
  1266  		if err := batch.Write(); err != nil {
  1267  			return 0, err
  1268  		}
  1269  		return 0, nil
  1270  	}
  1271  
  1272  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1273  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1274  		skipPresenceCheck := false
  1275  		batch := bc.db.NewBatch()
  1276  		for i, block := range blockChain {
  1277  			// Short circuit insertion if shutting down or processing failed
  1278  			if bc.insertStopped() {
  1279  				return 0, errInsertionInterrupted
  1280  			}
  1281  			// Short circuit if the owner header is unknown
  1282  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1283  				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
  1284  			}
  1285  			if !skipPresenceCheck {
  1286  				// Ignore if the entire data is already known
  1287  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1288  					stats.ignored++
  1289  					continue
  1290  				} else {
  1291  					// If block N is not present, neither are the later blocks.
  1292  					// This should be true, but if we are mistaken, the shortcut
  1293  					// here will only cause overwriting of some existing data
  1294  					skipPresenceCheck = true
  1295  				}
  1296  			}
  1297  			// Write all the data out into the database
  1298  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1299  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1300  			rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
  1301  
  1302  			// Write everything belongs to the blocks into the database. So that
  1303  			// we can ensure all components of body is completed(body, receipts,
  1304  			// tx indexes)
  1305  			if batch.ValueSize() >= zonddb.IdealBatchSize {
  1306  				if err := batch.Write(); err != nil {
  1307  					return 0, err
  1308  				}
  1309  				size += int64(batch.ValueSize())
  1310  				batch.Reset()
  1311  			}
  1312  			stats.processed++
  1313  		}
  1314  		// Write everything belongs to the blocks into the database. So that
  1315  		// we can ensure all components of body is completed(body, receipts,
  1316  		// tx indexes)
  1317  		if batch.ValueSize() > 0 {
  1318  			size += int64(batch.ValueSize())
  1319  			if err := batch.Write(); err != nil {
  1320  				return 0, err
  1321  			}
  1322  		}
  1323  		updateHead(blockChain[len(blockChain)-1])
  1324  		return 0, nil
  1325  	}
  1326  
  1327  	// Write downloaded chain data and corresponding receipt chain data
  1328  	if len(ancientBlocks) > 0 {
  1329  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1330  			if err == errInsertionInterrupted {
  1331  				return 0, nil
  1332  			}
  1333  			return n, err
  1334  		}
  1335  	}
  1336  	// Write the tx index tail (block number from where we index) before write any live blocks
  1337  	if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
  1338  		// The tx index tail can only be one of the following two options:
  1339  		// * 0: all ancient blocks have been indexed
  1340  		// * ancient-limit: the indices of blocks before ancient-limit are ignored
  1341  		if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
  1342  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
  1343  				rawdb.WriteTxIndexTail(bc.db, 0)
  1344  			} else {
  1345  				rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
  1346  			}
  1347  		}
  1348  	}
  1349  	if len(liveBlocks) > 0 {
  1350  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1351  			if err == errInsertionInterrupted {
  1352  				return 0, nil
  1353  			}
  1354  			return n, err
  1355  		}
  1356  	}
  1357  
  1358  	head := blockChain[len(blockChain)-1]
  1359  	context := []interface{}{
  1360  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1361  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1362  		"size", common.StorageSize(size),
  1363  	}
  1364  	if stats.ignored > 0 {
  1365  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1366  	}
  1367  	log.Debug("Imported new block receipts", context...)
  1368  
  1369  	return 0, nil
  1370  }
  1371  
  1372  // writeBlockWithoutState writes only the block and its metadata to the database,
  1373  // but does not write any state. This is used to construct competing side forks
  1374  // up to the point where they exceed the canonical total difficulty.
  1375  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1376  	if bc.insertStopped() {
  1377  		return errInsertionInterrupted
  1378  	}
  1379  
  1380  	batch := bc.db.NewBatch()
  1381  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1382  	rawdb.WriteBlock(batch, block)
  1383  	if err := batch.Write(); err != nil {
  1384  		log.Crit("Failed to write block into disk", "err", err)
  1385  	}
  1386  	return nil
  1387  }
  1388  
  1389  // writeKnownBlock updates the head block flag with a known block
  1390  // and introduces chain reorg if necessary.
  1391  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1392  	current := bc.CurrentBlock()
  1393  	if block.ParentHash() != current.Hash() {
  1394  		if err := bc.reorg(current, block); err != nil {
  1395  			return err
  1396  		}
  1397  	}
  1398  	bc.writeHeadBlock(block)
  1399  	return nil
  1400  }
  1401  
  1402  // writeBlockWithState writes block, metadata and corresponding state data to the
  1403  // database.
  1404  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
  1405  	// Calculate the total difficulty of the block
  1406  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1407  	if ptd == nil {
  1408  		return consensus.ErrUnknownAncestor
  1409  	}
  1410  	// Make sure no inconsistent state is leaked during insertion
  1411  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1412  
  1413  	// Irrelevant of the canonical status, write the block itself to the database.
  1414  	//
  1415  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1416  	// should be written atomically. BlockBatch is used for containing all components.
  1417  	blockBatch := bc.db.NewBatch()
  1418  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1419  	rawdb.WriteBlock(blockBatch, block)
  1420  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1421  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1422  	if err := blockBatch.Write(); err != nil {
  1423  		log.Crit("Failed to write block into disk", "err", err)
  1424  	}
  1425  	// Commit all cached state changes into underlying memory database.
  1426  	root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
  1427  	if err != nil {
  1428  		return err
  1429  	}
  1430  	// If node is running in path mode, skip explicit gc operation
  1431  	// which is unnecessary in this mode.
  1432  	if bc.triedb.Scheme() == rawdb.PathScheme {
  1433  		return nil
  1434  	}
  1435  	// If we're running an archive node, always flush
  1436  	if bc.cacheConfig.TrieDirtyDisabled {
  1437  		return bc.triedb.Commit(root, false)
  1438  	}
  1439  	// Full but not archive node, do proper garbage collection
  1440  	bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1441  	bc.triegc.Push(root, -int64(block.NumberU64()))
  1442  
  1443  	// Flush limits are not considered for the first TriesInMemory blocks.
  1444  	current := block.NumberU64()
  1445  	if current <= TriesInMemory {
  1446  		return nil
  1447  	}
  1448  	// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1449  	var (
  1450  		_, nodes, imgs = bc.triedb.Size() // all memory is contained within the nodes return for hashdb
  1451  		limit          = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1452  	)
  1453  	if nodes > limit || imgs > 4*1024*1024 {
  1454  		bc.triedb.Cap(limit - zonddb.IdealBatchSize)
  1455  	}
  1456  	// Find the next state trie we need to commit
  1457  	chosen := current - TriesInMemory
  1458  	flushInterval := time.Duration(bc.flushInterval.Load())
  1459  	// If we exceeded time allowance, flush an entire trie to disk
  1460  	if bc.gcproc > flushInterval {
  1461  		// If the header is missing (canonical chain behind), we're reorging a low
  1462  		// diff sidechain. Suspend committing until this operation is completed.
  1463  		header := bc.GetHeaderByNumber(chosen)
  1464  		if header == nil {
  1465  			log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1466  		} else {
  1467  			// If we're exceeding limits but haven't reached a large enough memory gap,
  1468  			// warn the user that the system is becoming unstable.
  1469  			if chosen < bc.lastWrite+TriesInMemory && bc.gcproc >= 2*flushInterval {
  1470  				log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory)
  1471  			}
  1472  			// Flush an entire trie and restart the counters
  1473  			bc.triedb.Commit(header.Root, true)
  1474  			bc.lastWrite = chosen
  1475  			bc.gcproc = 0
  1476  		}
  1477  	}
  1478  	// Garbage collect anything below our required write retention
  1479  	for !bc.triegc.Empty() {
  1480  		root, number := bc.triegc.Pop()
  1481  		if uint64(-number) > chosen {
  1482  			bc.triegc.Push(root, number)
  1483  			break
  1484  		}
  1485  		bc.triedb.Dereference(root)
  1486  	}
  1487  	return nil
  1488  }
  1489  
  1490  // WriteBlockAndSetHead writes the given block and all associated state to the database,
  1491  // and applies the block as the new chain head.
  1492  func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1493  	if !bc.chainmu.TryLock() {
  1494  		return NonStatTy, errChainStopped
  1495  	}
  1496  	defer bc.chainmu.Unlock()
  1497  
  1498  	return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
  1499  }
  1500  
  1501  // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
  1502  // This function expects the chain mutex to be held.
  1503  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1504  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1505  		return NonStatTy, err
  1506  	}
  1507  	currentBlock := bc.CurrentBlock()
  1508  	reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
  1509  	if err != nil {
  1510  		return NonStatTy, err
  1511  	}
  1512  	if reorg {
  1513  		// Reorganise the chain if the parent is not the head block
  1514  		if block.ParentHash() != currentBlock.Hash() {
  1515  			if err := bc.reorg(currentBlock, block); err != nil {
  1516  				return NonStatTy, err
  1517  			}
  1518  		}
  1519  		status = CanonStatTy
  1520  	} else {
  1521  		status = SideStatTy
  1522  	}
  1523  	// Set new head.
  1524  	if status == CanonStatTy {
  1525  		bc.writeHeadBlock(block)
  1526  	}
  1527  	bc.futureBlocks.Remove(block.Hash())
  1528  
  1529  	if status == CanonStatTy {
  1530  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1531  		if len(logs) > 0 {
  1532  			bc.logsFeed.Send(logs)
  1533  		}
  1534  		// In theory, we should fire a ChainHeadEvent when we inject
  1535  		// a canonical block, but sometimes we can insert a batch of
  1536  		// canonical blocks. Avoid firing too many ChainHeadEvents,
  1537  		// we will fire an accumulated ChainHeadEvent and disable fire
  1538  		// event here.
  1539  		if emitHeadEvent {
  1540  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1541  		}
  1542  	} else {
  1543  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1544  	}
  1545  	return status, nil
  1546  }
  1547  
  1548  // addFutureBlock checks if the block is within the max allowed window to get
  1549  // accepted for future processing, and returns an error if the block is too far
  1550  // ahead and was not added.
  1551  //
  1552  // TODO after the transition, the future block shouldn't be kept. Because
  1553  // it's not checked in the Geth side anymore.
  1554  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1555  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1556  	if block.Time() > max {
  1557  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1558  	}
  1559  	if block.Difficulty().Cmp(common.Big0) == 0 {
  1560  		// Never add PoS blocks into the future queue
  1561  		return nil
  1562  	}
  1563  	bc.futureBlocks.Add(block.Hash(), block)
  1564  	return nil
  1565  }
  1566  
  1567  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1568  // chain or, otherwise, create a fork. If an error is returned it will return
  1569  // the index number of the failing block as well an error describing what went
  1570  // wrong. After insertion is done, all accumulated events will be fired.
  1571  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1572  	// Sanity check that we have something meaningful to import
  1573  	if len(chain) == 0 {
  1574  		return 0, nil
  1575  	}
  1576  	bc.blockProcFeed.Send(true)
  1577  	defer bc.blockProcFeed.Send(false)
  1578  
  1579  	// Do a sanity check that the provided chain is actually ordered and linked.
  1580  	for i := 1; i < len(chain); i++ {
  1581  		block, prev := chain[i], chain[i-1]
  1582  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1583  			log.Error("Non contiguous block insert",
  1584  				"number", block.Number(),
  1585  				"hash", block.Hash(),
  1586  				"parent", block.ParentHash(),
  1587  				"prevnumber", prev.Number(),
  1588  				"prevhash", prev.Hash(),
  1589  			)
  1590  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1591  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1592  		}
  1593  	}
  1594  	// Pre-checks passed, start the full block imports
  1595  	if !bc.chainmu.TryLock() {
  1596  		return 0, errChainStopped
  1597  	}
  1598  	defer bc.chainmu.Unlock()
  1599  	return bc.insertChain(chain, true)
  1600  }
  1601  
  1602  // insertChain is the internal implementation of InsertChain, which assumes that
  1603  // 1) chains are contiguous, and 2) The chain mutex is held.
  1604  //
  1605  // This method is split out so that import batches that require re-injecting
  1606  // historical blocks can do so without releasing the lock, which could lead to
  1607  // racey behaviour. If a sidechain import is in progress, and the historic state
  1608  // is imported, but then new canon-head is added before the actual sidechain
  1609  // completes, then the historic state could be pruned again
  1610  func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) {
  1611  	// If the chain is terminating, don't even bother starting up.
  1612  	if bc.insertStopped() {
  1613  		return 0, nil
  1614  	}
  1615  
  1616  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1617  	SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
  1618  
  1619  	var (
  1620  		stats     = insertStats{startTime: mclock.Now()}
  1621  		lastCanon *types.Block
  1622  	)
  1623  	// Fire a single chain head event if we've progressed the chain
  1624  	defer func() {
  1625  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1626  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1627  		}
  1628  	}()
  1629  	// Start the parallel header verifier
  1630  	headers := make([]*types.Header, len(chain))
  1631  	for i, block := range chain {
  1632  		headers[i] = block.Header()
  1633  	}
  1634  	abort, results := bc.engine.VerifyHeaders(bc, headers)
  1635  	defer close(abort)
  1636  
  1637  	// Peek the error for the first block to decide the directing import logic
  1638  	it := newInsertIterator(chain, results, bc.validator)
  1639  	block, err := it.next()
  1640  
  1641  	// Left-trim all the known blocks that don't need to build snapshot
  1642  	if bc.skipBlock(err, it) {
  1643  		// First block (and state) is known
  1644  		//   1. We did a roll-back, and should now do a re-import
  1645  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1646  		//      from the canonical chain, which has not been verified.
  1647  		// Skip all known blocks that are behind us.
  1648  		var (
  1649  			reorg   bool
  1650  			current = bc.CurrentBlock()
  1651  		)
  1652  		for block != nil && bc.skipBlock(err, it) {
  1653  			reorg, err = bc.forker.ReorgNeeded(current, block.Header())
  1654  			if err != nil {
  1655  				return it.index, err
  1656  			}
  1657  			if reorg {
  1658  				// Switch to import mode if the forker says the reorg is necessary
  1659  				// and also the block is not on the canonical chain.
  1660  				// In eth2 the forker always returns true for reorg decision (blindly trusting
  1661  				// the external consensus engine), but in order to prevent the unnecessary
  1662  				// reorgs when importing known blocks, the special case is handled here.
  1663  				if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
  1664  					break
  1665  				}
  1666  			}
  1667  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1668  			stats.ignored++
  1669  
  1670  			block, err = it.next()
  1671  		}
  1672  		// The remaining blocks are still known blocks, the only scenario here is:
  1673  		// During the snap sync, the pivot point is already submitted but rollback
  1674  		// happens. Then node resets the head full block to a lower height via `rollback`
  1675  		// and leaves a few known blocks in the database.
  1676  		//
  1677  		// When node runs a snap sync again, it can re-import a batch of known blocks via
  1678  		// `insertChain` while a part of them have higher total difficulty than current
  1679  		// head full block(new pivot point).
  1680  		for block != nil && bc.skipBlock(err, it) {
  1681  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1682  			if err := bc.writeKnownBlock(block); err != nil {
  1683  				return it.index, err
  1684  			}
  1685  			lastCanon = block
  1686  
  1687  			block, err = it.next()
  1688  		}
  1689  		// Falls through to the block import
  1690  	}
  1691  	switch {
  1692  	// First block is pruned
  1693  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1694  		if setHead {
  1695  			// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1696  			log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1697  			return bc.insertSideChain(block, it)
  1698  		} else {
  1699  			// We're post-merge and the parent is pruned, try to recover the parent state
  1700  			log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
  1701  			_, err := bc.recoverAncestors(block)
  1702  			return it.index, err
  1703  		}
  1704  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1705  	case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
  1706  		for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
  1707  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1708  			if err := bc.addFutureBlock(block); err != nil {
  1709  				return it.index, err
  1710  			}
  1711  			block, err = it.next()
  1712  		}
  1713  		stats.queued += it.processed()
  1714  		stats.ignored += it.remaining()
  1715  
  1716  		// If there are any still remaining, mark as ignored
  1717  		return it.index, err
  1718  
  1719  	// Some other error(except ErrKnownBlock) occurred, abort.
  1720  	// ErrKnownBlock is allowed here since some known blocks
  1721  	// still need re-execution to generate snapshots that are missing
  1722  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1723  		bc.futureBlocks.Remove(block.Hash())
  1724  		stats.ignored += len(it.chain)
  1725  		bc.reportBlock(block, nil, err)
  1726  		return it.index, err
  1727  	}
  1728  	// No validation errors for the first block (or chain prefix skipped)
  1729  	var activeState *state.StateDB
  1730  	defer func() {
  1731  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1732  		// block or other error is hit however, an early return may not properly
  1733  		// terminate the background threads. This defer ensures that we clean up
  1734  		// and dangling prefetcher, without defering each and holding on live refs.
  1735  		if activeState != nil {
  1736  			activeState.StopPrefetcher()
  1737  		}
  1738  	}()
  1739  
  1740  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1741  		// If the chain is terminating, stop processing blocks
  1742  		if bc.insertStopped() {
  1743  			log.Debug("Abort during block processing")
  1744  			break
  1745  		}
  1746  		// If the header is a banned one, straight out abort
  1747  		if BadHashes[block.Hash()] {
  1748  			bc.reportBlock(block, nil, ErrBannedHash)
  1749  			return it.index, ErrBannedHash
  1750  		}
  1751  		// If the block is known (in the middle of the chain), it's a special case for
  1752  		// Clique blocks where they can share state among each other, so importing an
  1753  		// older block might complete the state of the subsequent one. In this case,
  1754  		// just skip the block (we already validated it once fully (and crashed), since
  1755  		// its header and body was already in the database). But if the corresponding
  1756  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1757  		if bc.skipBlock(err, it) {
  1758  			logger := log.Debug
  1759  			if bc.chainConfig.Clique == nil {
  1760  				logger = log.Warn
  1761  			}
  1762  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1763  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1764  				"root", block.Root())
  1765  
  1766  			// Special case. Commit the empty receipt slice if we meet the known
  1767  			// block in the middle. It can only happen in the clique chain. Whenever
  1768  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1769  			// and `body` if it's non-existent. Since we don't have receipts without
  1770  			// reexecution, so nothing to commit. But if the sidechain will be adopted
  1771  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1772  			// state, but if it's this special case here(skip reexecution) we will lose
  1773  			// the empty receipt entry.
  1774  			if len(block.Transactions()) == 0 {
  1775  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1776  			} else {
  1777  				log.Error("Please file an issue, skip known block execution without receipt",
  1778  					"hash", block.Hash(), "number", block.NumberU64())
  1779  			}
  1780  			if err := bc.writeKnownBlock(block); err != nil {
  1781  				return it.index, err
  1782  			}
  1783  			stats.processed++
  1784  
  1785  			// We can assume that logs are empty here, since the only way for consecutive
  1786  			// Clique blocks to have the same state is if there are no transactions.
  1787  			lastCanon = block
  1788  			continue
  1789  		}
  1790  
  1791  		// Retrieve the parent block and it's state to execute on top
  1792  		start := time.Now()
  1793  		parent := it.previous()
  1794  		if parent == nil {
  1795  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1796  		}
  1797  		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1798  		if err != nil {
  1799  			return it.index, err
  1800  		}
  1801  
  1802  		// Enable prefetching to pull in trie node paths while processing transactions
  1803  		statedb.StartPrefetcher("chain")
  1804  		activeState = statedb
  1805  
  1806  		// If we have a followup block, run that against the current state to pre-cache
  1807  		// transactions and probabilistically some of the account/storage trie nodes.
  1808  		var followupInterrupt atomic.Bool
  1809  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1810  			if followup, err := it.peek(); followup != nil && err == nil {
  1811  				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
  1812  
  1813  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB) {
  1814  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1815  
  1816  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1817  					if followupInterrupt.Load() {
  1818  						blockPrefetchInterruptMeter.Mark(1)
  1819  					}
  1820  				}(time.Now(), followup, throwaway)
  1821  			}
  1822  		}
  1823  
  1824  		// Process block using the parent state as reference point
  1825  		pstart := time.Now()
  1826  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1827  		if err != nil {
  1828  			bc.reportBlock(block, receipts, err)
  1829  			followupInterrupt.Store(true)
  1830  			return it.index, err
  1831  		}
  1832  		ptime := time.Since(pstart)
  1833  
  1834  		vstart := time.Now()
  1835  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1836  			bc.reportBlock(block, receipts, err)
  1837  			followupInterrupt.Store(true)
  1838  			return it.index, err
  1839  		}
  1840  		vtime := time.Since(vstart)
  1841  		proctime := time.Since(start) // processing + validation
  1842  
  1843  		// Update the metrics touched during block processing and validation
  1844  		accountReadTimer.Update(statedb.AccountReads)                   // Account reads are complete(in processing)
  1845  		storageReadTimer.Update(statedb.StorageReads)                   // Storage reads are complete(in processing)
  1846  		snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads)   // Account reads are complete(in processing)
  1847  		snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads)   // Storage reads are complete(in processing)
  1848  		accountUpdateTimer.Update(statedb.AccountUpdates)               // Account updates are complete(in validation)
  1849  		storageUpdateTimer.Update(statedb.StorageUpdates)               // Storage updates are complete(in validation)
  1850  		accountHashTimer.Update(statedb.AccountHashes)                  // Account hashes are complete(in validation)
  1851  		storageHashTimer.Update(statedb.StorageHashes)                  // Storage hashes are complete(in validation)
  1852  		triehash := statedb.AccountHashes + statedb.StorageHashes       // The time spent on tries hashing
  1853  		trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates   // The time spent on tries update
  1854  		trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
  1855  		trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
  1856  		blockExecutionTimer.Update(ptime - trieRead)                    // The time spent on EVM processing
  1857  		blockValidationTimer.Update(vtime - (triehash + trieUpdate))    // The time spent on block validation
  1858  
  1859  		// Write the block to the chain and get the status.
  1860  		var (
  1861  			wstart = time.Now()
  1862  			status WriteStatus
  1863  		)
  1864  		if !setHead {
  1865  			// Don't set the head, only insert the block
  1866  			err = bc.writeBlockWithState(block, receipts, statedb)
  1867  		} else {
  1868  			status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
  1869  		}
  1870  		followupInterrupt.Store(true)
  1871  		if err != nil {
  1872  			return it.index, err
  1873  		}
  1874  		// Update the metrics touched during block commit
  1875  		accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  1876  		storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  1877  		snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  1878  		triedbCommitTimer.Update(statedb.TrieDBCommits)     // Trie database commits are complete, we can mark them
  1879  
  1880  		blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
  1881  		blockInsertTimer.UpdateSince(start)
  1882  
  1883  		// Report the import stats before returning the various results
  1884  		stats.processed++
  1885  		stats.usedGas += usedGas
  1886  
  1887  		var snapDiffItems, snapBufItems common.StorageSize
  1888  		if bc.snaps != nil {
  1889  			snapDiffItems, snapBufItems = bc.snaps.Size()
  1890  		}
  1891  		trieDiffNodes, trieBufNodes, _ := bc.triedb.Size()
  1892  		stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead)
  1893  
  1894  		if !setHead {
  1895  			// After merge we expect few side chains. Simply count
  1896  			// all blocks the CL gives us for GC processing time
  1897  			bc.gcproc += proctime
  1898  
  1899  			return it.index, nil // Direct block insertion of a single block
  1900  		}
  1901  		switch status {
  1902  		case CanonStatTy:
  1903  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1904  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1905  				"elapsed", common.PrettyDuration(time.Since(start)),
  1906  				"root", block.Root())
  1907  
  1908  			lastCanon = block
  1909  
  1910  			// Only count canonical blocks for GC processing time
  1911  			bc.gcproc += proctime
  1912  
  1913  		case SideStatTy:
  1914  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1915  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1916  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1917  				"root", block.Root())
  1918  
  1919  		default:
  1920  			// This in theory is impossible, but lets be nice to our future selves and leave
  1921  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1922  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1923  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1924  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1925  				"root", block.Root())
  1926  		}
  1927  	}
  1928  
  1929  	// Any blocks remaining here? The only ones we care about are the future ones
  1930  	if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
  1931  		if err := bc.addFutureBlock(block); err != nil {
  1932  			return it.index, err
  1933  		}
  1934  		block, err = it.next()
  1935  
  1936  		for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
  1937  			if err := bc.addFutureBlock(block); err != nil {
  1938  				return it.index, err
  1939  			}
  1940  			stats.queued++
  1941  		}
  1942  	}
  1943  	stats.ignored += it.remaining()
  1944  
  1945  	return it.index, err
  1946  }
  1947  
  1948  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1949  // error, which happens when a sidechain with a sufficiently old fork-block is
  1950  // found.
  1951  //
  1952  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1953  // switch over to the new chain if the TD exceeded the current chain.
  1954  // insertSideChain is only used pre-merge.
  1955  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  1956  	var (
  1957  		externTd  *big.Int
  1958  		lastBlock = block
  1959  		current   = bc.CurrentBlock()
  1960  	)
  1961  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1962  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1963  	// ones. Any other errors means that the block is invalid, and should not be written
  1964  	// to disk.
  1965  	err := consensus.ErrPrunedAncestor
  1966  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  1967  		// Check the canonical state root for that number
  1968  		if number := block.NumberU64(); current.Number.Uint64() >= number {
  1969  			canonical := bc.GetBlockByNumber(number)
  1970  			if canonical != nil && canonical.Hash() == block.Hash() {
  1971  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1972  
  1973  				// Collect the TD of the block. Since we know it's a canon one,
  1974  				// we can get it directly, and not (like further below) use
  1975  				// the parent and then add the block on top
  1976  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  1977  				continue
  1978  			}
  1979  			if canonical != nil && canonical.Root() == block.Root() {
  1980  				// This is most likely a shadow-state attack. When a fork is imported into the
  1981  				// database, and it eventually reaches a block height which is not pruned, we
  1982  				// just found that the state already exist! This means that the sidechain block
  1983  				// refers to a state which already exists in our canon chain.
  1984  				//
  1985  				// If left unchecked, we would now proceed importing the blocks, without actually
  1986  				// having verified the state of the previous blocks.
  1987  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1988  
  1989  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1990  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1991  				// mechanism.
  1992  				return it.index, errors.New("sidechain ghost-state attack")
  1993  			}
  1994  		}
  1995  		if externTd == nil {
  1996  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1997  		}
  1998  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1999  
  2000  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  2001  			start := time.Now()
  2002  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  2003  				return it.index, err
  2004  			}
  2005  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  2006  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  2007  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  2008  				"root", block.Root())
  2009  		}
  2010  		lastBlock = block
  2011  	}
  2012  	// At this point, we've written all sidechain blocks to database. Loop ended
  2013  	// either on some other error or all were processed. If there was some other
  2014  	// error, we can ignore the rest of those blocks.
  2015  	//
  2016  	// If the externTd was larger than our local TD, we now need to reimport the previous
  2017  	// blocks to regenerate the required state
  2018  	reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
  2019  	if err != nil {
  2020  		return it.index, err
  2021  	}
  2022  	if !reorg {
  2023  		localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
  2024  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  2025  		return it.index, err
  2026  	}
  2027  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2028  	var (
  2029  		hashes  []common.Hash
  2030  		numbers []uint64
  2031  	)
  2032  	parent := it.previous()
  2033  	for parent != nil && !bc.HasState(parent.Root) {
  2034  		if bc.stateRecoverable(parent.Root) {
  2035  			if err := bc.triedb.Recover(parent.Root); err != nil {
  2036  				return 0, err
  2037  			}
  2038  			break
  2039  		}
  2040  		hashes = append(hashes, parent.Hash())
  2041  		numbers = append(numbers, parent.Number.Uint64())
  2042  
  2043  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  2044  	}
  2045  	if parent == nil {
  2046  		return it.index, errors.New("missing parent")
  2047  	}
  2048  	// Import all the pruned blocks to make the state available
  2049  	var (
  2050  		blocks []*types.Block
  2051  		memory uint64
  2052  	)
  2053  	for i := len(hashes) - 1; i >= 0; i-- {
  2054  		// Append the next block to our batch
  2055  		block := bc.GetBlock(hashes[i], numbers[i])
  2056  
  2057  		blocks = append(blocks, block)
  2058  		memory += block.Size()
  2059  
  2060  		// If memory use grew too large, import and continue. Sadly we need to discard
  2061  		// all raised events and logs from notifications since we're too heavy on the
  2062  		// memory here.
  2063  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  2064  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  2065  			if _, err := bc.insertChain(blocks, true); err != nil {
  2066  				return 0, err
  2067  			}
  2068  			blocks, memory = blocks[:0], 0
  2069  
  2070  			// If the chain is terminating, stop processing blocks
  2071  			if bc.insertStopped() {
  2072  				log.Debug("Abort during blocks processing")
  2073  				return 0, nil
  2074  			}
  2075  		}
  2076  	}
  2077  	if len(blocks) > 0 {
  2078  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  2079  		return bc.insertChain(blocks, true)
  2080  	}
  2081  	return 0, nil
  2082  }
  2083  
  2084  // recoverAncestors finds the closest ancestor with available state and re-execute
  2085  // all the ancestor blocks since that.
  2086  // recoverAncestors is only used post-merge.
  2087  // We return the hash of the latest block that we could correctly validate.
  2088  func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) {
  2089  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2090  	var (
  2091  		hashes  []common.Hash
  2092  		numbers []uint64
  2093  		parent  = block
  2094  	)
  2095  	for parent != nil && !bc.HasState(parent.Root()) {
  2096  		if bc.stateRecoverable(parent.Root()) {
  2097  			if err := bc.triedb.Recover(parent.Root()); err != nil {
  2098  				return common.Hash{}, err
  2099  			}
  2100  			break
  2101  		}
  2102  		hashes = append(hashes, parent.Hash())
  2103  		numbers = append(numbers, parent.NumberU64())
  2104  		parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2105  
  2106  		// If the chain is terminating, stop iteration
  2107  		if bc.insertStopped() {
  2108  			log.Debug("Abort during blocks iteration")
  2109  			return common.Hash{}, errInsertionInterrupted
  2110  		}
  2111  	}
  2112  	if parent == nil {
  2113  		return common.Hash{}, errors.New("missing parent")
  2114  	}
  2115  	// Import all the pruned blocks to make the state available
  2116  	for i := len(hashes) - 1; i >= 0; i-- {
  2117  		// If the chain is terminating, stop processing blocks
  2118  		if bc.insertStopped() {
  2119  			log.Debug("Abort during blocks processing")
  2120  			return common.Hash{}, errInsertionInterrupted
  2121  		}
  2122  		var b *types.Block
  2123  		if i == 0 {
  2124  			b = block
  2125  		} else {
  2126  			b = bc.GetBlock(hashes[i], numbers[i])
  2127  		}
  2128  		if _, err := bc.insertChain(types.Blocks{b}, false); err != nil {
  2129  			return b.ParentHash(), err
  2130  		}
  2131  	}
  2132  	return block.Hash(), nil
  2133  }
  2134  
  2135  // collectLogs collects the logs that were generated or removed during
  2136  // the processing of a block. These logs are later announced as deleted or reborn.
  2137  func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
  2138  	var blobGasPrice *big.Int
  2139  	excessBlobGas := b.ExcessBlobGas()
  2140  	if excessBlobGas != nil {
  2141  		blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
  2142  	}
  2143  	receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
  2144  	if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {
  2145  		log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err)
  2146  	}
  2147  	var logs []*types.Log
  2148  	for _, receipt := range receipts {
  2149  		for _, log := range receipt.Logs {
  2150  			if removed {
  2151  				log.Removed = true
  2152  			}
  2153  			logs = append(logs, log)
  2154  		}
  2155  	}
  2156  	return logs
  2157  }
  2158  
  2159  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  2160  // blocks and inserts them to be part of the new canonical chain and accumulates
  2161  // potential missing transactions and post an event about them.
  2162  // Note the new head block won't be processed here, callers need to handle it
  2163  // externally.
  2164  func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
  2165  	var (
  2166  		newChain    types.Blocks
  2167  		oldChain    types.Blocks
  2168  		commonBlock *types.Block
  2169  
  2170  		deletedTxs []common.Hash
  2171  		addedTxs   []common.Hash
  2172  	)
  2173  	oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  2174  	if oldBlock == nil {
  2175  		return errors.New("current head block missing")
  2176  	}
  2177  	newBlock := newHead
  2178  
  2179  	// Reduce the longer chain to the same number as the shorter one
  2180  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  2181  		// Old chain is longer, gather all transactions and logs as deleted ones
  2182  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  2183  			oldChain = append(oldChain, oldBlock)
  2184  			for _, tx := range oldBlock.Transactions() {
  2185  				deletedTxs = append(deletedTxs, tx.Hash())
  2186  			}
  2187  		}
  2188  	} else {
  2189  		// New chain is longer, stash all blocks away for subsequent insertion
  2190  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  2191  			newChain = append(newChain, newBlock)
  2192  		}
  2193  	}
  2194  	if oldBlock == nil {
  2195  		return errInvalidOldChain
  2196  	}
  2197  	if newBlock == nil {
  2198  		return errInvalidNewChain
  2199  	}
  2200  	// Both sides of the reorg are at the same number, reduce both until the common
  2201  	// ancestor is found
  2202  	for {
  2203  		// If the common ancestor was found, bail out
  2204  		if oldBlock.Hash() == newBlock.Hash() {
  2205  			commonBlock = oldBlock
  2206  			break
  2207  		}
  2208  		// Remove an old block as well as stash away a new block
  2209  		oldChain = append(oldChain, oldBlock)
  2210  		for _, tx := range oldBlock.Transactions() {
  2211  			deletedTxs = append(deletedTxs, tx.Hash())
  2212  		}
  2213  		newChain = append(newChain, newBlock)
  2214  
  2215  		// Step back with both chains
  2216  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2217  		if oldBlock == nil {
  2218  			return errInvalidOldChain
  2219  		}
  2220  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2221  		if newBlock == nil {
  2222  			return errInvalidNewChain
  2223  		}
  2224  	}
  2225  
  2226  	// Ensure the user sees large reorgs
  2227  	if len(oldChain) > 0 && len(newChain) > 0 {
  2228  		logFn := log.Info
  2229  		msg := "Chain reorg detected"
  2230  		if len(oldChain) > 63 {
  2231  			msg = "Large chain reorg detected"
  2232  			logFn = log.Warn
  2233  		}
  2234  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2235  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2236  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2237  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2238  		blockReorgMeter.Mark(1)
  2239  	} else if len(newChain) > 0 {
  2240  		// Special case happens in the post merge stage that current head is
  2241  		// the ancestor of new head while these two blocks are not consecutive
  2242  		log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
  2243  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2244  	} else {
  2245  		// len(newChain) == 0 && len(oldChain) > 0
  2246  		// rewind the canonical chain to a lower point.
  2247  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
  2248  	}
  2249  	// Insert the new chain(except the head block(reverse order)),
  2250  	// taking care of the proper incremental order.
  2251  	for i := len(newChain) - 1; i >= 1; i-- {
  2252  		// Insert the block in the canonical way, re-writing history
  2253  		bc.writeHeadBlock(newChain[i])
  2254  
  2255  		// Collect the new added transactions.
  2256  		for _, tx := range newChain[i].Transactions() {
  2257  			addedTxs = append(addedTxs, tx.Hash())
  2258  		}
  2259  	}
  2260  
  2261  	// Delete useless indexes right now which includes the non-canonical
  2262  	// transaction indexes, canonical chain indexes which above the head.
  2263  	indexesBatch := bc.db.NewBatch()
  2264  	for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
  2265  		rawdb.DeleteTxLookupEntry(indexesBatch, tx)
  2266  	}
  2267  
  2268  	// Delete all hash markers that are not part of the new canonical chain.
  2269  	// Because the reorg function does not handle new chain head, all hash
  2270  	// markers greater than or equal to new chain head should be deleted.
  2271  	number := commonBlock.NumberU64()
  2272  	if len(newChain) > 1 {
  2273  		number = newChain[1].NumberU64()
  2274  	}
  2275  	for i := number + 1; ; i++ {
  2276  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2277  		if hash == (common.Hash{}) {
  2278  			break
  2279  		}
  2280  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2281  	}
  2282  	if err := indexesBatch.Write(); err != nil {
  2283  		log.Crit("Failed to delete useless indexes", "err", err)
  2284  	}
  2285  
  2286  	// Send out events for logs from the old canon chain, and 'reborn'
  2287  	// logs from the new canon chain. The number of logs can be very
  2288  	// high, so the events are sent in batches of size around 512.
  2289  
  2290  	// Deleted logs + blocks:
  2291  	var deletedLogs []*types.Log
  2292  	for i := len(oldChain) - 1; i >= 0; i-- {
  2293  		// Also send event for blocks removed from the canon chain.
  2294  		bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2295  
  2296  		// Collect deleted logs for notification
  2297  		if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 {
  2298  			deletedLogs = append(deletedLogs, logs...)
  2299  		}
  2300  		if len(deletedLogs) > 512 {
  2301  			bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2302  			deletedLogs = nil
  2303  		}
  2304  	}
  2305  	if len(deletedLogs) > 0 {
  2306  		bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2307  	}
  2308  
  2309  	// New logs:
  2310  	var rebirthLogs []*types.Log
  2311  	for i := len(newChain) - 1; i >= 1; i-- {
  2312  		if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 {
  2313  			rebirthLogs = append(rebirthLogs, logs...)
  2314  		}
  2315  		if len(rebirthLogs) > 512 {
  2316  			bc.logsFeed.Send(rebirthLogs)
  2317  			rebirthLogs = nil
  2318  		}
  2319  	}
  2320  	if len(rebirthLogs) > 0 {
  2321  		bc.logsFeed.Send(rebirthLogs)
  2322  	}
  2323  	return nil
  2324  }
  2325  
  2326  // InsertBlockWithoutSetHead executes the block, runs the necessary verification
  2327  // upon it and then persist the block and the associate state into the database.
  2328  // The key difference between the InsertChain is it won't do the canonical chain
  2329  // updating. It relies on the additional SetCanonical call to finalize the entire
  2330  // procedure.
  2331  func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
  2332  	if !bc.chainmu.TryLock() {
  2333  		return errChainStopped
  2334  	}
  2335  	defer bc.chainmu.Unlock()
  2336  
  2337  	_, err := bc.insertChain(types.Blocks{block}, false)
  2338  	return err
  2339  }
  2340  
  2341  // SetCanonical rewinds the chain to set the new head block as the specified
  2342  // block. It's possible that the state of the new head is missing, and it will
  2343  // be recovered in this function as well.
  2344  func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
  2345  	if !bc.chainmu.TryLock() {
  2346  		return common.Hash{}, errChainStopped
  2347  	}
  2348  	defer bc.chainmu.Unlock()
  2349  
  2350  	// Re-execute the reorged chain in case the head state is missing.
  2351  	if !bc.HasState(head.Root()) {
  2352  		if latestValidHash, err := bc.recoverAncestors(head); err != nil {
  2353  			return latestValidHash, err
  2354  		}
  2355  		log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash())
  2356  	}
  2357  	// Run the reorg if necessary and set the given block as new head.
  2358  	start := time.Now()
  2359  	if head.ParentHash() != bc.CurrentBlock().Hash() {
  2360  		if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
  2361  			return common.Hash{}, err
  2362  		}
  2363  	}
  2364  	bc.writeHeadBlock(head)
  2365  
  2366  	// Emit events
  2367  	logs := bc.collectLogs(head, false)
  2368  	bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
  2369  	if len(logs) > 0 {
  2370  		bc.logsFeed.Send(logs)
  2371  	}
  2372  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
  2373  
  2374  	context := []interface{}{
  2375  		"number", head.Number(),
  2376  		"hash", head.Hash(),
  2377  		"root", head.Root(),
  2378  		"elapsed", time.Since(start),
  2379  	}
  2380  	if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
  2381  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2382  	}
  2383  	log.Info("Chain head was updated", context...)
  2384  	return head.Hash(), nil
  2385  }
  2386  
  2387  func (bc *BlockChain) updateFutureBlocks() {
  2388  	futureTimer := time.NewTicker(5 * time.Second)
  2389  	defer futureTimer.Stop()
  2390  	defer bc.wg.Done()
  2391  	for {
  2392  		select {
  2393  		case <-futureTimer.C:
  2394  			bc.procFutureBlocks()
  2395  		case <-bc.quit:
  2396  			return
  2397  		}
  2398  	}
  2399  }
  2400  
  2401  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2402  // that the block does not need to be processed but can be considered already fully 'done'.
  2403  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2404  	// We can only ever bypass processing if the only error returned by the validator
  2405  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2406  	// and state.
  2407  	if !errors.Is(err, ErrKnownBlock) {
  2408  		return false
  2409  	}
  2410  	// If we're not using snapshots, we can skip this, since we have both block
  2411  	// and (trie-) state
  2412  	if bc.snaps == nil {
  2413  		return true
  2414  	}
  2415  	var (
  2416  		header     = it.current() // header can't be nil
  2417  		parentRoot common.Hash
  2418  	)
  2419  	// If we also have the snapshot-state, we can skip the processing.
  2420  	if bc.snaps.Snapshot(header.Root) != nil {
  2421  		return true
  2422  	}
  2423  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2424  	// snapshot-state exists, we need to process this in order to not get a gap
  2425  	// in the snapshot layers.
  2426  	// Resolve parent block
  2427  	if parent := it.previous(); parent != nil {
  2428  		parentRoot = parent.Root
  2429  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2430  		parentRoot = parent.Root
  2431  	}
  2432  	if parentRoot == (common.Hash{}) {
  2433  		return false // Theoretically impossible case
  2434  	}
  2435  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2436  	if bc.snaps.Snapshot(parentRoot) == nil {
  2437  		return true
  2438  	}
  2439  	return false
  2440  }
  2441  
  2442  // indexBlocks reindexes or unindexes transactions depending on user configuration
  2443  func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) {
  2444  	defer func() { close(done) }()
  2445  
  2446  	// If head is 0, it means the chain is just initialized and no blocks are inserted,
  2447  	// so don't need to indexing anything.
  2448  	if head == 0 {
  2449  		return
  2450  	}
  2451  
  2452  	// The tail flag is not existent, it means the node is just initialized
  2453  	// and all blocks(may from ancient store) are not indexed yet.
  2454  	if tail == nil {
  2455  		from := uint64(0)
  2456  		if bc.txLookupLimit != 0 && head >= bc.txLookupLimit {
  2457  			from = head - bc.txLookupLimit + 1
  2458  		}
  2459  		rawdb.IndexTransactions(bc.db, from, head+1, bc.quit)
  2460  		return
  2461  	}
  2462  	// The tail flag is existent, but the whole chain is required to be indexed.
  2463  	if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2464  		if *tail > 0 {
  2465  			// It can happen when chain is rewound to a historical point which
  2466  			// is even lower than the indexes tail, recap the indexing target
  2467  			// to new head to avoid reading non-existent block bodies.
  2468  			end := *tail
  2469  			if end > head+1 {
  2470  				end = head + 1
  2471  			}
  2472  			rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
  2473  		}
  2474  		return
  2475  	}
  2476  	// Update the transaction index to the new chain state
  2477  	if head-bc.txLookupLimit+1 < *tail {
  2478  		// Reindex a part of missing indices and rewind index tail to HEAD-limit
  2479  		rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
  2480  	} else {
  2481  		// Unindex a part of stale indices and forward index tail to HEAD-limit
  2482  		rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
  2483  	}
  2484  }
  2485  
  2486  // maintainTxIndex is responsible for the construction and deletion of the
  2487  // transaction index.
  2488  //
  2489  // User can use flag `txlookuplimit` to specify a "recentness" block, below
  2490  // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
  2491  // all tx indices will be reserved.
  2492  //
  2493  // The user can adjust the txlookuplimit value for each launch after sync,
  2494  // Geth will automatically construct the missing indices or delete the extra
  2495  // indices.
  2496  func (bc *BlockChain) maintainTxIndex() {
  2497  	defer bc.wg.Done()
  2498  
  2499  	// Listening to chain events and manipulate the transaction indexes.
  2500  	var (
  2501  		done   chan struct{}                  // Non-nil if background unindexing or reindexing routine is active.
  2502  		headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
  2503  	)
  2504  	sub := bc.SubscribeChainHeadEvent(headCh)
  2505  	if sub == nil {
  2506  		return
  2507  	}
  2508  	defer sub.Unsubscribe()
  2509  	log.Info("Initialized transaction indexer", "limit", bc.TxLookupLimit())
  2510  
  2511  	// Launch the initial processing if chain is not empty. This step is
  2512  	// useful in these scenarios that chain has no progress and indexer
  2513  	// is never triggered.
  2514  	if head := rawdb.ReadHeadBlock(bc.db); head != nil {
  2515  		done = make(chan struct{})
  2516  		go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.NumberU64(), done)
  2517  	}
  2518  
  2519  	for {
  2520  		select {
  2521  		case head := <-headCh:
  2522  			if done == nil {
  2523  				done = make(chan struct{})
  2524  				go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
  2525  			}
  2526  		case <-done:
  2527  			done = nil
  2528  		case <-bc.quit:
  2529  			if done != nil {
  2530  				log.Info("Waiting background transaction indexer to exit")
  2531  				<-done
  2532  			}
  2533  			return
  2534  		}
  2535  	}
  2536  }
  2537  
  2538  // reportBlock logs a bad block error.
  2539  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2540  	rawdb.WriteBadBlock(bc.db, block)
  2541  	log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
  2542  }
  2543  
  2544  // summarizeBadBlock returns a string summarizing the bad block and other
  2545  // relevant information.
  2546  func summarizeBadBlock(block *types.Block, receipts []*types.Receipt, config *params.ChainConfig, err error) string {
  2547  	var receiptString string
  2548  	for i, receipt := range receipts {
  2549  		receiptString += fmt.Sprintf("\n  %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x",
  2550  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2551  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2552  	}
  2553  	version, vcs := version.Info()
  2554  	platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS)
  2555  	if vcs != "" {
  2556  		vcs = fmt.Sprintf("\nVCS: %s", vcs)
  2557  	}
  2558  	return fmt.Sprintf(`
  2559  ########## BAD BLOCK #########
  2560  Block: %v (%#x)
  2561  Error: %v
  2562  Platform: %v%v
  2563  Chain config: %#v
  2564  Receipts: %v
  2565  ##############################
  2566  `, block.Number(), block.Hash(), err, platform, vcs, config, receiptString)
  2567  }
  2568  
  2569  // InsertHeaderChain attempts to insert the given header chain in to the local
  2570  // chain, possibly creating a reorg. If an error is returned, it will return the
  2571  // index number of the failing header as well an error describing what went wrong.
  2572  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
  2573  	if len(chain) == 0 {
  2574  		return 0, nil
  2575  	}
  2576  	start := time.Now()
  2577  	if i, err := bc.hc.ValidateHeaderChain(chain); err != nil {
  2578  		return i, err
  2579  	}
  2580  
  2581  	if !bc.chainmu.TryLock() {
  2582  		return 0, errChainStopped
  2583  	}
  2584  	defer bc.chainmu.Unlock()
  2585  	_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
  2586  	return 0, err
  2587  }
  2588  
  2589  // SetBlockValidatorAndProcessorForTesting sets the current validator and processor.
  2590  // This method can be used to force an invalid blockchain to be verified for tests.
  2591  // This method is unsafe and should only be used before block import starts.
  2592  func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Processor) {
  2593  	bc.validator = v
  2594  	bc.processor = p
  2595  }
  2596  
  2597  // SetTrieFlushInterval configures how often in-memory tries are persisted to disk.
  2598  // The interval is in terms of block processing time, not wall clock.
  2599  // It is thread-safe and can be called repeatedly without side effects.
  2600  func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
  2601  	bc.flushInterval.Store(int64(interval))
  2602  }
  2603  
  2604  // GetTrieFlushInterval gets the in-memroy tries flush interval
  2605  func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
  2606  	return time.Duration(bc.flushInterval.Load())
  2607  }