github.com/cgcardona/r-subnet-evm@v0.1.5/core/blockchain.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2014 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  // Package core implements the Ethereum consensus protocol.
    28  package core
    29  
    30  import (
    31  	"context"
    32  	"errors"
    33  	"fmt"
    34  	"io"
    35  	"math/big"
    36  	"runtime"
    37  	"sync"
    38  	"sync/atomic"
    39  	"time"
    40  
    41  	"github.com/cgcardona/r-subnet-evm/commontype"
    42  	"github.com/cgcardona/r-subnet-evm/consensus"
    43  	"github.com/cgcardona/r-subnet-evm/core/rawdb"
    44  	"github.com/cgcardona/r-subnet-evm/core/state"
    45  	"github.com/cgcardona/r-subnet-evm/core/state/snapshot"
    46  	"github.com/cgcardona/r-subnet-evm/core/types"
    47  	"github.com/cgcardona/r-subnet-evm/core/vm"
    48  	"github.com/cgcardona/r-subnet-evm/ethdb"
    49  	"github.com/cgcardona/r-subnet-evm/metrics"
    50  	"github.com/cgcardona/r-subnet-evm/params"
    51  	"github.com/cgcardona/r-subnet-evm/trie"
    52  	"github.com/ethereum/go-ethereum/common"
    53  	"github.com/ethereum/go-ethereum/event"
    54  	"github.com/ethereum/go-ethereum/log"
    55  	lru "github.com/hashicorp/golang-lru"
    56  )
    57  
    58  var (
    59  	accountReadTimer         = metrics.NewRegisteredCounter("chain/account/reads", nil)
    60  	accountHashTimer         = metrics.NewRegisteredCounter("chain/account/hashes", nil)
    61  	accountUpdateTimer       = metrics.NewRegisteredCounter("chain/account/updates", nil)
    62  	accountCommitTimer       = metrics.NewRegisteredCounter("chain/account/commits", nil)
    63  	storageReadTimer         = metrics.NewRegisteredCounter("chain/storage/reads", nil)
    64  	storageHashTimer         = metrics.NewRegisteredCounter("chain/storage/hashes", nil)
    65  	storageUpdateTimer       = metrics.NewRegisteredCounter("chain/storage/updates", nil)
    66  	storageCommitTimer       = metrics.NewRegisteredCounter("chain/storage/commits", nil)
    67  	snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil)
    68  	snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil)
    69  	snapshotCommitTimer      = metrics.NewRegisteredCounter("chain/snapshot/commits", nil)
    70  	triedbCommitTimer        = metrics.NewRegisteredCounter("chain/triedb/commits", nil)
    71  
    72  	blockInsertTimer            = metrics.NewRegisteredCounter("chain/block/inserts", nil)
    73  	blockInsertCount            = metrics.NewRegisteredCounter("chain/block/inserts/count", nil)
    74  	blockContentValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/content", nil)
    75  	blockStateInitTimer         = metrics.NewRegisteredCounter("chain/block/inits/state", nil)
    76  	blockExecutionTimer         = metrics.NewRegisteredCounter("chain/block/executions", nil)
    77  	blockTrieOpsTimer           = metrics.NewRegisteredCounter("chain/block/trie", nil)
    78  	blockStateValidationTimer   = metrics.NewRegisteredCounter("chain/block/validations/state", nil)
    79  	blockWriteTimer             = metrics.NewRegisteredCounter("chain/block/writes", nil)
    80  
    81  	acceptorQueueGauge            = metrics.NewRegisteredGauge("chain/acceptor/queue/size", nil)
    82  	acceptorWorkTimer             = metrics.NewRegisteredCounter("chain/acceptor/work", nil)
    83  	acceptorWorkCount             = metrics.NewRegisteredCounter("chain/acceptor/work/count", nil)
    84  	lastAcceptedBlockBaseFeeGauge = metrics.NewRegisteredGauge("chain/block/fee/basefee", nil)
    85  	blockTotalFeesGauge           = metrics.NewRegisteredGauge("chain/block/fee/total", nil)
    86  	processedBlockGasUsedCounter  = metrics.NewRegisteredCounter("chain/block/gas/used/processed", nil)
    87  	acceptedBlockGasUsedCounter   = metrics.NewRegisteredCounter("chain/block/gas/used/accepted", nil)
    88  	badBlockCounter               = metrics.NewRegisteredCounter("chain/block/bad/count", nil)
    89  
    90  	txUnindexTimer      = metrics.NewRegisteredCounter("chain/txs/unindex", nil)
    91  	acceptedTxsCounter  = metrics.NewRegisteredCounter("chain/txs/accepted", nil)
    92  	processedTxsCounter = metrics.NewRegisteredCounter("chain/txs/processed", nil)
    93  
    94  	acceptedLogsCounter  = metrics.NewRegisteredCounter("chain/logs/accepted", nil)
    95  	processedLogsCounter = metrics.NewRegisteredCounter("chain/logs/processed", nil)
    96  
    97  	ErrRefuseToCorruptArchiver = errors.New("node has operated with pruning disabled, shutting down to prevent missing tries")
    98  
    99  	errFutureBlockUnsupported  = errors.New("future block insertion not supported")
   100  	errCacheConfigNotSpecified = errors.New("must specify cache config")
   101  )
   102  
   103  const (
   104  	bodyCacheLimit           = 256
   105  	blockCacheLimit          = 256
   106  	receiptsCacheLimit       = 32
   107  	txLookupCacheLimit       = 1024
   108  	feeConfigCacheLimit      = 256
   109  	coinbaseConfigCacheLimit = 256
   110  	badBlockLimit            = 10
   111  
   112  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   113  	//
   114  	// Changelog:
   115  	//
   116  	// - Version 4
   117  	//   The following incompatible database changes were added:
   118  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   119  	//   * the `Bloom` field of receipt is deleted
   120  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   121  	// - Version 5
   122  	//  The following incompatible database changes were added:
   123  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   124  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   125  	//      receipts' corresponding block
   126  	// - Version 6
   127  	//  The following incompatible database changes were added:
   128  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   129  	// - Version 7
   130  	//  The following incompatible database changes were added:
   131  	//    * Use freezer as the ancient database to maintain all ancient data
   132  	// - Version 8
   133  	//  The following incompatible database changes were added:
   134  	//    * New scheme for contract code in order to separate the codes and trie nodes
   135  	BlockChainVersion uint64 = 8
   136  
   137  	// statsReportLimit is the time limit during import and export after which we
   138  	// always print out progress. This avoids the user wondering what's going on.
   139  	statsReportLimit = 8 * time.Second
   140  
   141  	// trieCleanCacheStatsNamespace is the namespace to surface stats from the trie
   142  	// clean cache's underlying fastcache.
   143  	trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache"
   144  )
   145  
   146  // cacheableFeeConfig encapsulates fee configuration itself and the block number that it has changed at,
   147  // in order to cache them together.
   148  type cacheableFeeConfig struct {
   149  	feeConfig     commontype.FeeConfig
   150  	lastChangedAt *big.Int
   151  }
   152  
   153  // cacheableCoinbaseConfig encapsulates coinbase address itself and allowFeeRecipient flag,
   154  // in order to cache them together.
   155  type cacheableCoinbaseConfig struct {
   156  	coinbaseAddress    common.Address
   157  	allowFeeRecipients bool
   158  }
   159  
   160  // CacheConfig contains the configuration values for the trie caching/pruning
   161  // that's resident in a blockchain.
   162  type CacheConfig struct {
   163  	TrieCleanLimit                  int           // Memory allowance (MB) to use for caching trie nodes in memory
   164  	TrieCleanJournal                string        // Disk journal for saving clean cache entries.
   165  	TrieCleanRejournal              time.Duration // Time interval to dump clean cache to disk periodically
   166  	TrieDirtyLimit                  int           // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk
   167  	TrieDirtyCommitTarget           int           // Memory limit (MB) to target for the dirties cache before invoking commit
   168  	CommitInterval                  uint64        // Commit the trie every [CommitInterval] blocks.
   169  	Pruning                         bool          // Whether to disable trie write caching and GC altogether (archive node)
   170  	AcceptorQueueLimit              int           // Blocks to queue before blocking during acceptance
   171  	PopulateMissingTries            *uint64       // If non-nil, sets the starting height for re-generating historical tries.
   172  	PopulateMissingTriesParallelism int           // Is the number of readers to use when trying to populate missing tries.
   173  	AllowMissingTries               bool          // Whether to allow an archive node to run with pruning enabled
   174  	SnapshotDelayInit               bool          // Whether to initialize snapshots on startup or wait for external call
   175  	SnapshotLimit                   int           // Memory allowance (MB) to use for caching snapshot entries in memory
   176  	SnapshotAsync                   bool          // Generate snapshot tree async
   177  	SnapshotVerify                  bool          // Verify generated snapshots
   178  	SkipSnapshotRebuild             bool          // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests)
   179  	Preimages                       bool          // Whether to store preimage of trie key to the disk
   180  	AcceptedCacheSize               int           // Depth of accepted headers cache and accepted logs cache at the accepted tip
   181  	TxLookupLimit                   uint64        // Number of recent blocks for which to maintain transaction lookup indices
   182  }
   183  
   184  var DefaultCacheConfig = &CacheConfig{
   185  	TrieCleanLimit:        256,
   186  	TrieDirtyLimit:        256,
   187  	TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB)
   188  	Pruning:               true,
   189  	CommitInterval:        4096,
   190  	AcceptorQueueLimit:    64, // Provides 2 minutes of buffer (2s block target) for a commit delay
   191  	SnapshotLimit:         256,
   192  	AcceptedCacheSize:     32,
   193  }
   194  
   195  // BlockChain represents the canonical chain given a database with a genesis
   196  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   197  //
   198  // Importing blocks in to the block chain happens according to the set of rules
   199  // defined by the two stage Validator. Processing of blocks is done using the
   200  // Processor which processes the included transaction. The validation of the state
   201  // is done in the second part of the Validator. Failing results in aborting of
   202  // the import.
   203  //
   204  // The BlockChain also helps in returning blocks from **any** chain included
   205  // in the database as well as blocks that represents the canonical chain. It's
   206  // important to note that GetBlock can return any block and does not need to be
   207  // included in the canonical one where as GetBlockByNumber always represents the
   208  // canonical chain.
   209  type BlockChain struct {
   210  	chainConfig *params.ChainConfig // Chain & network configuration
   211  	cacheConfig *CacheConfig        // Cache configuration for pruning
   212  
   213  	db ethdb.Database // Low level persistent database to store final content in
   214  
   215  	snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
   216  
   217  	hc                *HeaderChain
   218  	rmLogsFeed        event.Feed
   219  	chainFeed         event.Feed
   220  	chainSideFeed     event.Feed
   221  	chainHeadFeed     event.Feed
   222  	chainAcceptedFeed event.Feed
   223  	logsFeed          event.Feed
   224  	logsAcceptedFeed  event.Feed
   225  	blockProcFeed     event.Feed
   226  	txAcceptedFeed    event.Feed
   227  	scope             event.SubscriptionScope
   228  	genesisBlock      *types.Block
   229  
   230  	// This mutex synchronizes chain write operations.
   231  	// Readers don't need to take it, they can just read the database.
   232  	chainmu sync.RWMutex
   233  
   234  	currentBlock atomic.Value // Current head of the block chain
   235  
   236  	stateCache          state.Database // State database to reuse between imports (contains state cache)
   237  	stateManager        TrieWriter
   238  	bodyCache           *lru.Cache // Cache for the most recent block bodies
   239  	receiptsCache       *lru.Cache // Cache for the most recent receipts per block
   240  	blockCache          *lru.Cache // Cache for the most recent entire blocks
   241  	txLookupCache       *lru.Cache // Cache for the most recent transaction lookup data.
   242  	feeConfigCache      *lru.Cache // Cache for the most recent feeConfig lookup data.
   243  	coinbaseConfigCache *lru.Cache // Cache for the most recent coinbaseConfig lookup data.
   244  
   245  	running int32 // 0 if chain is running, 1 when stopped
   246  
   247  	engine     consensus.Engine
   248  	validator  Validator  // Block and state validator interface
   249  	prefetcher Prefetcher // Block state prefetcher interface
   250  	processor  Processor  // Block transaction processor interface
   251  	vmConfig   vm.Config
   252  
   253  	badBlocks *lru.Cache // Bad block cache
   254  
   255  	lastAccepted *types.Block // Prevents reorgs past this height
   256  
   257  	senderCacher *TxSenderCacher
   258  
   259  	// [acceptorQueue] is a processing queue for the Acceptor. This is
   260  	// different than [chainAcceptedFeed], which is sent an event after an accepted
   261  	// block is processed (after each loop of the accepted worker). If there is a
   262  	// clean shutdown, all items inserted into the [acceptorQueue] will be processed.
   263  	acceptorQueue chan *types.Block
   264  
   265  	// [acceptorClosingLock], and [acceptorClosed] are used
   266  	// to synchronize the closing of the [acceptorQueue] channel.
   267  	//
   268  	// Because we can't check if a channel is closed without reading from it
   269  	// (which we don't want to do as we may remove a processing block), we need
   270  	// to use a second variable to ensure we don't close a closed channel.
   271  	acceptorClosingLock sync.RWMutex
   272  	acceptorClosed      bool
   273  
   274  	// [acceptorWg] is used to wait for the acceptorQueue to clear. This is used
   275  	// during shutdown and in tests.
   276  	acceptorWg sync.WaitGroup
   277  
   278  	// [wg] is used to wait for the async blockchain processes to finish on shutdown.
   279  	wg sync.WaitGroup
   280  
   281  	// quit channel is used to listen for when the blockchain is shut down to close
   282  	// async processes.
   283  	// WaitGroups are used to ensure that async processes have finished during shutdown.
   284  	quit chan struct{}
   285  
   286  	// [acceptorTip] is the last block processed by the acceptor. This is
   287  	// returned as the LastAcceptedBlock() to ensure clients get only fully
   288  	// processed blocks. This may be equal to [lastAccepted].
   289  	acceptorTip     *types.Block
   290  	acceptorTipLock sync.Mutex
   291  
   292  	// [flattenLock] prevents the [acceptor] from flattening snapshots while
   293  	// a block is being verified.
   294  	flattenLock sync.Mutex
   295  
   296  	// [acceptedLogsCache] stores recently accepted logs to improve the performance of eth_getLogs.
   297  	acceptedLogsCache FIFOCache[common.Hash, [][]*types.Log]
   298  }
   299  
   300  // NewBlockChain returns a fully initialised block chain using information
   301  // available in the database. It initialises the default Ethereum Validator and
   302  // Processor.
   303  func NewBlockChain(
   304  	db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine,
   305  	vmConfig vm.Config, lastAcceptedHash common.Hash,
   306  ) (*BlockChain, error) {
   307  	if cacheConfig == nil {
   308  		return nil, errCacheConfigNotSpecified
   309  	}
   310  	bodyCache, _ := lru.New(bodyCacheLimit)
   311  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   312  	blockCache, _ := lru.New(blockCacheLimit)
   313  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   314  	feeConfigCache, _ := lru.New(feeConfigCacheLimit)
   315  	coinbaseConfigCache, _ := lru.New(coinbaseConfigCacheLimit)
   316  	badBlocks, _ := lru.New(badBlockLimit)
   317  
   318  	bc := &BlockChain{
   319  		chainConfig: chainConfig,
   320  		cacheConfig: cacheConfig,
   321  		db:          db,
   322  		stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
   323  			Cache:       cacheConfig.TrieCleanLimit,
   324  			Journal:     cacheConfig.TrieCleanJournal,
   325  			Preimages:   cacheConfig.Preimages,
   326  			StatsPrefix: trieCleanCacheStatsNamespace,
   327  		}),
   328  		bodyCache:           bodyCache,
   329  		receiptsCache:       receiptsCache,
   330  		blockCache:          blockCache,
   331  		txLookupCache:       txLookupCache,
   332  		feeConfigCache:      feeConfigCache,
   333  		coinbaseConfigCache: coinbaseConfigCache,
   334  		engine:              engine,
   335  		vmConfig:            vmConfig,
   336  		badBlocks:           badBlocks,
   337  		senderCacher:        newTxSenderCacher(runtime.NumCPU()),
   338  		acceptorQueue:       make(chan *types.Block, cacheConfig.AcceptorQueueLimit),
   339  		quit:                make(chan struct{}),
   340  		acceptedLogsCache:   NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize),
   341  	}
   342  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   343  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   344  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   345  
   346  	var err error
   347  	bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine)
   348  	if err != nil {
   349  		return nil, err
   350  	}
   351  	bc.genesisBlock = bc.GetBlockByNumber(0)
   352  	if bc.genesisBlock == nil {
   353  		return nil, ErrNoGenesis
   354  	}
   355  
   356  	var nilBlock *types.Block
   357  	bc.currentBlock.Store(nilBlock)
   358  
   359  	// Create the state manager
   360  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig)
   361  
   362  	// loadLastState writes indices, so we should start the tx indexer after that.
   363  	// Start tx indexer/unindexer here.
   364  	if bc.cacheConfig.TxLookupLimit != 0 {
   365  		bc.wg.Add(1)
   366  		go bc.dispatchTxUnindexer()
   367  	}
   368  
   369  	// Re-generate current block state if it is missing
   370  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
   371  		return nil, err
   372  	}
   373  
   374  	// After loading the last state (and reprocessing if necessary), we are
   375  	// guaranteed that [acceptorTip] is equal to [lastAccepted].
   376  	//
   377  	// It is critical to update this vaue before performing any state repairs so
   378  	// that all accepted blocks can be considered.
   379  	bc.acceptorTip = bc.lastAccepted
   380  
   381  	// Make sure the state associated with the block is available
   382  	head := bc.CurrentBlock()
   383  	if !bc.HasState(head.Root()) {
   384  		return nil, fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
   385  	}
   386  
   387  	if err := bc.protectTrieIndex(); err != nil {
   388  		return nil, err
   389  	}
   390  
   391  	// Populate missing tries if required
   392  	if err := bc.populateMissingTries(); err != nil {
   393  		return nil, fmt.Errorf("could not populate missing tries: %v", err)
   394  	}
   395  
   396  	// If snapshot initialization is delayed for fast sync, skip initializing it here.
   397  	// This assumes that no blocks will be processed until ResetState is called to initialize
   398  	// the state of fast sync.
   399  	if !bc.cacheConfig.SnapshotDelayInit {
   400  		// Load any existing snapshot, regenerating it if loading failed (if not
   401  		// already initialized in recovery)
   402  		bc.initSnapshot(head)
   403  	}
   404  
   405  	// Warm up [hc.acceptedNumberCache] and [acceptedLogsCache]
   406  	bc.warmAcceptedCaches()
   407  
   408  	// Start processing accepted blocks effects in the background
   409  	go bc.startAcceptor()
   410  
   411  	// If periodic cache journal is required, spin it up.
   412  	if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 {
   413  		log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal)
   414  
   415  		triedb := bc.stateCache.TrieDB()
   416  		bc.wg.Add(1)
   417  		go func() {
   418  			defer bc.wg.Done()
   419  			triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
   420  		}()
   421  	}
   422  
   423  	return bc, nil
   424  }
   425  
   426  // dispatchTxUnindexer is responsible for the deletion of the
   427  // transaction index.
   428  // Invariant: If TxLookupLimit is 0, it means all tx indices will be preserved.
   429  // Meaning that this function should never be called.
   430  func (bc *BlockChain) dispatchTxUnindexer() {
   431  	defer bc.wg.Done()
   432  	txLookupLimit := bc.cacheConfig.TxLookupLimit
   433  
   434  	// If the user just upgraded to a new version which supports transaction
   435  	// index pruning, write the new tail and remove anything older.
   436  	if rawdb.ReadTxIndexTail(bc.db) == nil {
   437  		rawdb.WriteTxIndexTail(bc.db, 0)
   438  	}
   439  
   440  	// unindexes transactions depending on user configuration
   441  	unindexBlocks := func(tail uint64, head uint64, done chan struct{}) {
   442  		start := time.Now()
   443  		defer func() {
   444  			txUnindexTimer.Inc(time.Since(start).Milliseconds())
   445  			done <- struct{}{}
   446  		}()
   447  
   448  		// Update the transaction index to the new chain state
   449  		if head-txLookupLimit+1 >= tail {
   450  			// Unindex a part of stale indices and forward index tail to HEAD-limit
   451  			rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit)
   452  		}
   453  	}
   454  	// Any reindexing done, start listening to chain events and moving the index window
   455  	var (
   456  		done   chan struct{}              // Non-nil if background unindexing or reindexing routine is active.
   457  		headCh = make(chan ChainEvent, 1) // Buffered to avoid locking up the event feed
   458  	)
   459  	sub := bc.SubscribeChainAcceptedEvent(headCh)
   460  	if sub == nil {
   461  		log.Warn("could not create chain accepted subscription to unindex txs")
   462  		return
   463  	}
   464  	defer sub.Unsubscribe()
   465  
   466  	for {
   467  		select {
   468  		case head := <-headCh:
   469  			headNum := head.Block.NumberU64()
   470  			if headNum < txLookupLimit {
   471  				break
   472  			}
   473  
   474  			if done == nil {
   475  				done = make(chan struct{})
   476  				// Note: tail will not be nil since it is initialized in this function.
   477  				tail := rawdb.ReadTxIndexTail(bc.db)
   478  				go unindexBlocks(*tail, headNum, done)
   479  			}
   480  		case <-done:
   481  			done = nil
   482  		case <-bc.quit:
   483  			if done != nil {
   484  				log.Info("Waiting background transaction indexer to exit")
   485  				<-done
   486  			}
   487  			return
   488  		}
   489  	}
   490  }
   491  
   492  // writeBlockAcceptedIndices writes any indices that must be persisted for accepted block.
   493  // This includes the following:
   494  // - transaction lookup indices
   495  // - updating the acceptor tip index
   496  func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error {
   497  	batch := bc.db.NewBatch()
   498  	rawdb.WriteTxLookupEntriesByBlock(batch, b)
   499  	if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil {
   500  		return fmt.Errorf("%w: failed to write acceptor tip key", err)
   501  	}
   502  	if err := batch.Write(); err != nil {
   503  		return fmt.Errorf("%w: failed to write tx lookup entries batch", err)
   504  	}
   505  	return nil
   506  }
   507  
   508  // flattenSnapshot attempts to flatten a block of [hash] to disk.
   509  func (bc *BlockChain) flattenSnapshot(postAbortWork func() error, hash common.Hash) error {
   510  	// If snapshots are not initialized, perform [postAbortWork] immediately.
   511  	if bc.snaps == nil {
   512  		return postAbortWork()
   513  	}
   514  
   515  	// Abort snapshot generation before pruning anything from trie database
   516  	// (could occur in AcceptTrie)
   517  	bc.snaps.AbortGeneration()
   518  
   519  	// Perform work after snapshot generation is aborted (typically trie updates)
   520  	if err := postAbortWork(); err != nil {
   521  		return err
   522  	}
   523  
   524  	// Ensure we avoid flattening the snapshot while we are processing a block, or
   525  	// block execution will fallback to reading from the trie (which is much
   526  	// slower).
   527  	bc.flattenLock.Lock()
   528  	defer bc.flattenLock.Unlock()
   529  
   530  	// Flatten the entire snap Trie to disk
   531  	//
   532  	// Note: This resumes snapshot generation.
   533  	return bc.snaps.Flatten(hash)
   534  }
   535  
   536  // warmAcceptedCaches fetches previously accepted headers and logs from disk to
   537  // pre-populate [hc.acceptedNumberCache] and [acceptedLogsCache].
   538  func (bc *BlockChain) warmAcceptedCaches() {
   539  	var (
   540  		startTime       = time.Now()
   541  		lastAccepted    = bc.LastAcceptedBlock().NumberU64()
   542  		startIndex      = uint64(1)
   543  		targetCacheSize = uint64(bc.cacheConfig.AcceptedCacheSize)
   544  	)
   545  	if targetCacheSize == 0 {
   546  		log.Info("Not warming accepted cache because disabled")
   547  		return
   548  	}
   549  	if lastAccepted < startIndex {
   550  		// This could occur if we haven't accepted any blocks yet
   551  		log.Info("Not warming accepted cache because there are no accepted blocks")
   552  		return
   553  	}
   554  	cacheDiff := targetCacheSize - 1 // last accepted lookback is inclusive, so we reduce size by 1
   555  	if cacheDiff < lastAccepted {
   556  		startIndex = lastAccepted - cacheDiff
   557  	}
   558  	for i := startIndex; i <= lastAccepted; i++ {
   559  		header := bc.GetHeaderByNumber(i)
   560  		if header == nil {
   561  			// This could happen if a node state-synced
   562  			log.Info("Exiting accepted cache warming early because header is nil", "height", i, "t", time.Since(startTime))
   563  			break
   564  		}
   565  		bc.hc.acceptedNumberCache.Put(header.Number.Uint64(), header)
   566  		bc.acceptedLogsCache.Put(header.Hash(), rawdb.ReadLogs(bc.db, header.Hash(), header.Number.Uint64()))
   567  	}
   568  	log.Info("Warmed accepted caches", "start", startIndex, "end", lastAccepted, "t", time.Since(startTime))
   569  }
   570  
   571  // startAcceptor starts processing items on the [acceptorQueue]. If a [nil]
   572  // object is placed on the [acceptorQueue], the [startAcceptor] will exit.
   573  func (bc *BlockChain) startAcceptor() {
   574  	log.Info("Starting Acceptor", "queue length", bc.cacheConfig.AcceptorQueueLimit)
   575  
   576  	for next := range bc.acceptorQueue {
   577  		start := time.Now()
   578  		acceptorQueueGauge.Dec(1)
   579  
   580  		if err := bc.flattenSnapshot(func() error {
   581  			return bc.stateManager.AcceptTrie(next)
   582  		}, next.Hash()); err != nil {
   583  			log.Crit("unable to flatten snapshot from acceptor", "blockHash", next.Hash(), "err", err)
   584  		}
   585  
   586  		// Update last processed and transaction lookup index
   587  		if err := bc.writeBlockAcceptedIndices(next); err != nil {
   588  			log.Crit("failed to write accepted block effects", "err", err)
   589  		}
   590  
   591  		// Ensure [hc.acceptedNumberCache] and [acceptedLogsCache] have latest content
   592  		bc.hc.acceptedNumberCache.Put(next.NumberU64(), next.Header())
   593  		logs := rawdb.ReadLogs(bc.db, next.Hash(), next.NumberU64())
   594  		bc.acceptedLogsCache.Put(next.Hash(), logs)
   595  
   596  		// Update accepted feeds
   597  		flattenedLogs := types.FlattenLogs(logs)
   598  		bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs})
   599  		if len(flattenedLogs) > 0 {
   600  			bc.logsAcceptedFeed.Send(flattenedLogs)
   601  		}
   602  		if len(next.Transactions()) != 0 {
   603  			bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()})
   604  		}
   605  
   606  		bc.acceptorTipLock.Lock()
   607  		bc.acceptorTip = next
   608  		bc.acceptorTipLock.Unlock()
   609  		bc.acceptorWg.Done()
   610  
   611  		acceptorWorkTimer.Inc(time.Since(start).Milliseconds())
   612  		acceptorWorkCount.Inc(1)
   613  		// Note: in contrast to most accepted metrics, we increment the accepted log metrics in the acceptor queue because
   614  		// the logs are already processed in the acceptor queue.
   615  		acceptedLogsCounter.Inc(int64(len(logs)))
   616  	}
   617  }
   618  
   619  // addAcceptorQueue adds a new *types.Block to the [acceptorQueue]. This will
   620  // block if there are [AcceptorQueueLimit] items in [acceptorQueue].
   621  func (bc *BlockChain) addAcceptorQueue(b *types.Block) {
   622  	// We only acquire a read lock here because it is ok to add items to the
   623  	// [acceptorQueue] concurrently.
   624  	bc.acceptorClosingLock.RLock()
   625  	defer bc.acceptorClosingLock.RUnlock()
   626  
   627  	if bc.acceptorClosed {
   628  		return
   629  	}
   630  
   631  	acceptorQueueGauge.Inc(1)
   632  	bc.acceptorWg.Add(1)
   633  	bc.acceptorQueue <- b
   634  }
   635  
   636  // DrainAcceptorQueue blocks until all items in [acceptorQueue] have been
   637  // processed.
   638  func (bc *BlockChain) DrainAcceptorQueue() {
   639  	bc.acceptorClosingLock.RLock()
   640  	defer bc.acceptorClosingLock.RUnlock()
   641  
   642  	if bc.acceptorClosed {
   643  		return
   644  	}
   645  
   646  	bc.acceptorWg.Wait()
   647  }
   648  
   649  // stopAcceptor sends a signal to the Acceptor to stop processing accepted
   650  // blocks. The Acceptor will exit once all items in [acceptorQueue] have been
   651  // processed.
   652  func (bc *BlockChain) stopAcceptor() {
   653  	bc.acceptorClosingLock.Lock()
   654  	defer bc.acceptorClosingLock.Unlock()
   655  
   656  	// If [acceptorClosed] is already false, we should just return here instead
   657  	// of attempting to close [acceptorQueue] more than once (will cause
   658  	// a panic).
   659  	//
   660  	// This typically happens when a test calls [stopAcceptor] directly (prior to
   661  	// shutdown) and then [stopAcceptor] is called again in shutdown.
   662  	if bc.acceptorClosed {
   663  		return
   664  	}
   665  
   666  	// Although nothing should be added to [acceptorQueue] after
   667  	// [acceptorClosed] is updated, we close the channel so the Acceptor
   668  	// goroutine exits.
   669  	bc.acceptorWg.Wait()
   670  	bc.acceptorClosed = true
   671  	close(bc.acceptorQueue)
   672  }
   673  
   674  func (bc *BlockChain) InitializeSnapshots() {
   675  	bc.chainmu.Lock()
   676  	defer bc.chainmu.Unlock()
   677  
   678  	head := bc.CurrentBlock()
   679  	bc.initSnapshot(head)
   680  }
   681  
   682  // SenderCacher returns the *TxSenderCacher used within the core package.
   683  func (bc *BlockChain) SenderCacher() *TxSenderCacher {
   684  	return bc.senderCacher
   685  }
   686  
   687  // loadLastState loads the last known chain state from the database. This method
   688  // assumes that the chain manager mutex is held.
   689  func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error {
   690  	// Initialize genesis state
   691  	if lastAcceptedHash == (common.Hash{}) {
   692  		return bc.loadGenesisState()
   693  	}
   694  
   695  	// Restore the last known head block
   696  	head := rawdb.ReadHeadBlockHash(bc.db)
   697  	if head == (common.Hash{}) {
   698  		return errors.New("could not read head block hash")
   699  	}
   700  	// Make sure the entire head block is available
   701  	currentBlock := bc.GetBlockByHash(head)
   702  	if currentBlock == nil {
   703  		return fmt.Errorf("could not load head block %s", head.Hex())
   704  	}
   705  	// Everything seems to be fine, set as the head block
   706  	bc.currentBlock.Store(currentBlock)
   707  
   708  	// Restore the last known head header
   709  	currentHeader := currentBlock.Header()
   710  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   711  		if header := bc.GetHeaderByHash(head); header != nil {
   712  			currentHeader = header
   713  		}
   714  	}
   715  	bc.hc.SetCurrentHeader(currentHeader)
   716  
   717  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   718  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   719  
   720  	// Otherwise, set the last accepted block and perform a re-org.
   721  	bc.lastAccepted = bc.GetBlockByHash(lastAcceptedHash)
   722  	if bc.lastAccepted == nil {
   723  		return fmt.Errorf("could not load last accepted block")
   724  	}
   725  
   726  	// This ensures that the head block is updated to the last accepted block on startup
   727  	if err := bc.setPreference(bc.lastAccepted); err != nil {
   728  		return fmt.Errorf("failed to set preference to last accepted block while loading last state: %w", err)
   729  	}
   730  
   731  	// reprocessState is necessary to ensure that the last accepted state is
   732  	// available. The state may not be available if it was not committed due
   733  	// to an unclean shutdown.
   734  	return bc.reprocessState(bc.lastAccepted, 2*bc.cacheConfig.CommitInterval)
   735  }
   736  
   737  func (bc *BlockChain) loadGenesisState() error {
   738  	// Prepare the genesis block and reinitialise the chain
   739  	batch := bc.db.NewBatch()
   740  	rawdb.WriteBlock(batch, bc.genesisBlock)
   741  	if err := batch.Write(); err != nil {
   742  		log.Crit("Failed to write genesis block", "err", err)
   743  	}
   744  	bc.writeHeadBlock(bc.genesisBlock)
   745  
   746  	// Last update all in-memory chain markers
   747  	bc.lastAccepted = bc.genesisBlock
   748  	bc.currentBlock.Store(bc.genesisBlock)
   749  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   750  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   751  	return nil
   752  }
   753  
   754  // Export writes the active chain to the given writer.
   755  func (bc *BlockChain) Export(w io.Writer) error {
   756  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   757  }
   758  
   759  // ExportN writes a subset of the active chain to the given writer.
   760  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   761  	return bc.ExportCallback(func(block *types.Block) error {
   762  		return block.EncodeRLP(w)
   763  	}, first, last)
   764  }
   765  
   766  // ExportCallback invokes [callback] for every block from [first] to [last] in order.
   767  func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, first uint64, last uint64) error {
   768  	if first > last {
   769  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   770  	}
   771  	log.Info("Exporting batch of blocks", "count", last-first+1)
   772  
   773  	var (
   774  		parentHash common.Hash
   775  		start      = time.Now()
   776  		reported   = time.Now()
   777  	)
   778  	for nr := first; nr <= last; nr++ {
   779  		block := bc.GetBlockByNumber(nr)
   780  		if block == nil {
   781  			return fmt.Errorf("export failed on #%d: not found", nr)
   782  		}
   783  		if nr > first && block.ParentHash() != parentHash {
   784  			return fmt.Errorf("export failed: chain reorg during export")
   785  		}
   786  		parentHash = block.Hash()
   787  		if err := callback(block); err != nil {
   788  			return err
   789  		}
   790  		if time.Since(reported) >= statsReportLimit {
   791  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   792  			reported = time.Now()
   793  		}
   794  	}
   795  	return nil
   796  }
   797  
   798  // writeHeadBlock injects a new head block into the current block chain. This method
   799  // assumes that the block is indeed a true head. It will also reset the head
   800  // header to this very same block if they are older or if they are on a different side chain.
   801  //
   802  // Note, this function assumes that the `mu` mutex is held!
   803  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   804  	// If the block is on a side chain or an unknown one, force other heads onto it too
   805  	// Add the block to the canonical chain number scheme and mark as the head
   806  	batch := bc.db.NewBatch()
   807  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   808  
   809  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   810  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   811  
   812  	// Flush the whole batch into the disk, exit the node if failed
   813  	if err := batch.Write(); err != nil {
   814  		log.Crit("Failed to update chain indexes and markers", "err", err)
   815  	}
   816  	// Update all in-memory chain markers in the last step
   817  	bc.hc.SetCurrentHeader(block.Header())
   818  	bc.currentBlock.Store(block)
   819  }
   820  
   821  // ValidateCanonicalChain confirms a canonical chain is well-formed.
   822  func (bc *BlockChain) ValidateCanonicalChain() error {
   823  	// Ensure all accepted blocks are fully processed
   824  	bc.DrainAcceptorQueue()
   825  
   826  	current := bc.CurrentBlock()
   827  	i := 0
   828  	log.Info("Beginning to validate canonical chain", "startBlock", current.NumberU64())
   829  
   830  	for current.Hash() != bc.genesisBlock.Hash() {
   831  		blkByHash := bc.GetBlockByHash(current.Hash())
   832  		if blkByHash == nil {
   833  			return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number())
   834  		}
   835  		if blkByHash.Hash() != current.Hash() {
   836  			return fmt.Errorf("blockByHash returned a block with an unexpected hash: %s, expected: %s", blkByHash.Hash().String(), current.Hash().String())
   837  		}
   838  		blkByNumber := bc.GetBlockByNumber(current.Number().Uint64())
   839  		if blkByNumber == nil {
   840  			return fmt.Errorf("couldn't find block by number at height %d", current.Number())
   841  		}
   842  		if blkByNumber.Hash() != current.Hash() {
   843  			return fmt.Errorf("blockByNumber returned a block with unexpected hash: %s, expected: %s", blkByNumber.Hash().String(), current.Hash().String())
   844  		}
   845  
   846  		hdrByHash := bc.GetHeaderByHash(current.Hash())
   847  		if hdrByHash == nil {
   848  			return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number())
   849  		}
   850  		if hdrByHash.Hash() != current.Hash() {
   851  			return fmt.Errorf("hdrByHash returned a block header with an unexpected hash: %s, expected: %s", hdrByHash.Hash().String(), current.Hash().String())
   852  		}
   853  		hdrByNumber := bc.GetHeaderByNumber(current.Number().Uint64())
   854  		if hdrByNumber == nil {
   855  			return fmt.Errorf("couldn't find block header by number at height %d", current.Number())
   856  		}
   857  		if hdrByNumber.Hash() != current.Hash() {
   858  			return fmt.Errorf("hdrByNumber returned a block header with unexpected hash: %s, expected: %s", hdrByNumber.Hash().String(), current.Hash().String())
   859  		}
   860  
   861  		txs := current.Body().Transactions
   862  
   863  		// Transactions are only indexed beneath the last accepted block, so we only check
   864  		// that the transactions have been indexed, if we are checking below the last accepted
   865  		// block.
   866  		shouldIndexTxs := bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.NumberU64()+bc.cacheConfig.TxLookupLimit
   867  		if current.NumberU64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs {
   868  			// Ensure that all of the transactions have been stored correctly in the canonical
   869  			// chain
   870  			for txIndex, tx := range txs {
   871  				txLookup := bc.GetTransactionLookup(tx.Hash())
   872  				if txLookup == nil {
   873  					return fmt.Errorf("failed to find transaction %s", tx.Hash().String())
   874  				}
   875  				if txLookup.BlockHash != current.Hash() {
   876  					return fmt.Errorf("tx lookup returned with incorrect block hash: %s, expected: %s", txLookup.BlockHash.String(), current.Hash().String())
   877  				}
   878  				if txLookup.BlockIndex != current.Number().Uint64() {
   879  					return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number().Uint64())
   880  				}
   881  				if txLookup.Index != uint64(txIndex) {
   882  					return fmt.Errorf("tx lookup returned with incorrect transaction index: %d, expected: %d", txLookup.Index, txIndex)
   883  				}
   884  			}
   885  		}
   886  
   887  		blkReceipts := bc.GetReceiptsByHash(current.Hash())
   888  		if blkReceipts.Len() != len(txs) {
   889  			return fmt.Errorf("found %d transaction receipts, expected %d", blkReceipts.Len(), len(txs))
   890  		}
   891  		for index, txReceipt := range blkReceipts {
   892  			if txReceipt.TxHash != txs[index].Hash() {
   893  				return fmt.Errorf("transaction receipt mismatch, expected %s, but found: %s", txs[index].Hash().String(), txReceipt.TxHash.String())
   894  			}
   895  			if txReceipt.BlockHash != current.Hash() {
   896  				return fmt.Errorf("transaction receipt had block hash %s, but expected %s", txReceipt.BlockHash.String(), current.Hash().String())
   897  			}
   898  			if txReceipt.BlockNumber.Uint64() != current.NumberU64() {
   899  				return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.NumberU64())
   900  			}
   901  		}
   902  
   903  		i += 1
   904  		if i%1000 == 0 {
   905  			log.Info("Validate Canonical Chain Update", "totalBlocks", i)
   906  		}
   907  
   908  		parent := bc.GetBlockByHash(current.ParentHash())
   909  		if parent.Hash() != current.ParentHash() {
   910  			return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash().String())
   911  		}
   912  		current = parent
   913  	}
   914  
   915  	return nil
   916  }
   917  
   918  // Stop stops the blockchain service. If any imports are currently in progress
   919  // it will abort them using the procInterrupt.
   920  func (bc *BlockChain) Stop() {
   921  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   922  		return
   923  	}
   924  
   925  	log.Info("Closing quit channel")
   926  	close(bc.quit)
   927  	// Wait for accepted feed to process all remaining items
   928  	log.Info("Stopping Acceptor")
   929  	start := time.Now()
   930  	bc.stopAcceptor()
   931  	log.Info("Acceptor queue drained", "t", time.Since(start))
   932  
   933  	log.Info("Shutting down state manager")
   934  	start = time.Now()
   935  	if err := bc.stateManager.Shutdown(); err != nil {
   936  		log.Error("Failed to Shutdown state manager", "err", err)
   937  	}
   938  	log.Info("State manager shut down", "t", time.Since(start))
   939  	// Flush the collected preimages to disk
   940  	if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil {
   941  		log.Error("Failed to commit trie preimages", "err", err)
   942  	}
   943  
   944  	// Stop senderCacher's goroutines
   945  	log.Info("Shutting down sender cacher")
   946  	bc.senderCacher.Shutdown()
   947  
   948  	// Unsubscribe all subscriptions registered from blockchain.
   949  	log.Info("Closing scope")
   950  	bc.scope.Close()
   951  
   952  	// Waiting for background processes to complete
   953  	log.Info("Waiting for background processes to complete")
   954  	bc.wg.Wait()
   955  
   956  	log.Info("Blockchain stopped")
   957  }
   958  
   959  // SetPreference attempts to update the head block to be the provided block and
   960  // emits a ChainHeadEvent if successful. This function will handle all reorg
   961  // side effects, if necessary.
   962  //
   963  // Note: This function should ONLY be called on blocks that have already been
   964  // inserted into the chain.
   965  //
   966  // Assumes [bc.chainmu] is not held by the caller.
   967  func (bc *BlockChain) SetPreference(block *types.Block) error {
   968  	bc.chainmu.Lock()
   969  	defer bc.chainmu.Unlock()
   970  
   971  	return bc.setPreference(block)
   972  }
   973  
   974  // setPreference attempts to update the head block to be the provided block and
   975  // emits a ChainHeadEvent if successful. This function will handle all reorg
   976  // side effects, if necessary.
   977  //
   978  // Assumes [bc.chainmu] is held by the caller.
   979  func (bc *BlockChain) setPreference(block *types.Block) error {
   980  	current := bc.CurrentBlock()
   981  
   982  	// Return early if the current block is already the block
   983  	// we are trying to write.
   984  	if current.Hash() == block.Hash() {
   985  		return nil
   986  	}
   987  
   988  	log.Debug("Setting preference", "number", block.Number(), "hash", block.Hash())
   989  
   990  	// writeKnownBlock updates the head block and will handle any reorg side
   991  	// effects automatically.
   992  	if err := bc.writeKnownBlock(block); err != nil {
   993  		return fmt.Errorf("unable to invoke writeKnownBlock: %w", err)
   994  	}
   995  
   996  	// Send a ChainHeadEvent if we end up altering
   997  	// the head block. Many internal aysnc processes rely on
   998  	// receiving these events (i.e. the TxPool).
   999  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1000  	return nil
  1001  }
  1002  
  1003  // LastAcceptedBlock returns the last block to be marked as accepted. It may or
  1004  // may not yet be processed.
  1005  func (bc *BlockChain) LastConsensusAcceptedBlock() *types.Block {
  1006  	bc.chainmu.Lock()
  1007  	defer bc.chainmu.Unlock()
  1008  
  1009  	return bc.lastAccepted
  1010  }
  1011  
  1012  // LastAcceptedBlock returns the last block to be marked as accepted and is
  1013  // processed.
  1014  //
  1015  // Note: During initialization, [acceptorTip] is equal to [lastAccepted].
  1016  func (bc *BlockChain) LastAcceptedBlock() *types.Block {
  1017  	bc.acceptorTipLock.Lock()
  1018  	defer bc.acceptorTipLock.Unlock()
  1019  
  1020  	return bc.acceptorTip
  1021  }
  1022  
  1023  // Accept sets a minimum height at which no reorg can pass. Additionally,
  1024  // this function may trigger a reorg if the block being accepted is not in the
  1025  // canonical chain.
  1026  //
  1027  // Assumes [bc.chainmu] is not held by the caller.
  1028  func (bc *BlockChain) Accept(block *types.Block) error {
  1029  	bc.chainmu.Lock()
  1030  	defer bc.chainmu.Unlock()
  1031  
  1032  	// The parent of [block] must be the last accepted block.
  1033  	if bc.lastAccepted.Hash() != block.ParentHash() {
  1034  		return fmt.Errorf(
  1035  			"expected accepted block to have parent %s:%d but got %s:%d",
  1036  			bc.lastAccepted.Hash().Hex(),
  1037  			bc.lastAccepted.NumberU64(),
  1038  			block.ParentHash().Hex(),
  1039  			block.NumberU64()-1,
  1040  		)
  1041  	}
  1042  
  1043  	// If the canonical hash at the block height does not match the block we are
  1044  	// accepting, we need to trigger a reorg.
  1045  	canonical := bc.GetCanonicalHash(block.NumberU64())
  1046  	if canonical != block.Hash() {
  1047  		log.Debug("Accepting block in non-canonical chain", "number", block.Number(), "hash", block.Hash())
  1048  		if err := bc.setPreference(block); err != nil {
  1049  			return fmt.Errorf("could not set new preferred block %d:%s as preferred: %w", block.Number(), block.Hash(), err)
  1050  		}
  1051  	}
  1052  
  1053  	// Enqueue block in the acceptor
  1054  	bc.lastAccepted = block
  1055  	bc.addAcceptorQueue(block)
  1056  	acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
  1057  	acceptedTxsCounter.Inc(int64(len(block.Transactions())))
  1058  	if baseFee := block.BaseFee(); baseFee != nil {
  1059  		lastAcceptedBlockBaseFeeGauge.Update(baseFee.Int64())
  1060  	}
  1061  	total, err := TotalFees(block, bc.GetReceiptsByHash(block.Hash()))
  1062  	if err != nil {
  1063  		log.Error(fmt.Sprintf("TotalFees error: %s", err))
  1064  	} else {
  1065  		blockTotalFeesGauge.Update(total.Int64())
  1066  	}
  1067  	return nil
  1068  }
  1069  
  1070  // TotalFees computes total consumed fees in wei. Block transactions and receipts have to have the same order.
  1071  func TotalFees(block *types.Block, receipts []*types.Receipt) (*big.Int, error) {
  1072  	baseFee := block.BaseFee()
  1073  	feesWei := new(big.Int)
  1074  	if len(block.Transactions()) != len(receipts) {
  1075  		return nil, errors.New("mismatch between total number of transactions and receipts")
  1076  	}
  1077  	for i, tx := range block.Transactions() {
  1078  		var minerFee *big.Int
  1079  		if baseFee == nil {
  1080  			// legacy block, no baseFee
  1081  			minerFee = tx.GasPrice()
  1082  		} else {
  1083  			minerFee = new(big.Int).Add(baseFee, tx.EffectiveGasTipValue(baseFee))
  1084  		}
  1085  		feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee))
  1086  	}
  1087  	return feesWei, nil
  1088  }
  1089  
  1090  // TotalFees computes total consumed fees in ether. Block transactions and receipts have to have the same order.
  1091  func TotalFeesFloat(block *types.Block, receipts []*types.Receipt) (*big.Float, error) {
  1092  	total, err := TotalFees(block, receipts)
  1093  	if err != nil {
  1094  		return nil, err
  1095  	}
  1096  	return new(big.Float).Quo(new(big.Float).SetInt(total), new(big.Float).SetInt(big.NewInt(params.Ether))), nil
  1097  }
  1098  
  1099  func (bc *BlockChain) Reject(block *types.Block) error {
  1100  	bc.chainmu.Lock()
  1101  	defer bc.chainmu.Unlock()
  1102  
  1103  	// Reject Trie
  1104  	if err := bc.stateManager.RejectTrie(block); err != nil {
  1105  		return fmt.Errorf("unable to reject trie: %w", err)
  1106  	}
  1107  
  1108  	if bc.snaps != nil {
  1109  		if err := bc.snaps.Discard(block.Hash()); err != nil {
  1110  			log.Error("unable to discard snap from rejected block", "block", block.Hash(), "number", block.NumberU64(), "root", block.Root())
  1111  		}
  1112  	}
  1113  
  1114  	// Remove the block since its data is no longer needed
  1115  	batch := bc.db.NewBatch()
  1116  	rawdb.DeleteBlock(batch, block.Hash(), block.NumberU64())
  1117  	if err := batch.Write(); err != nil {
  1118  		return fmt.Errorf("failed to write delete block batch: %w", err)
  1119  	}
  1120  
  1121  	return nil
  1122  }
  1123  
  1124  // writeKnownBlock updates the head block flag with a known block
  1125  // and introduces chain reorg if necessary.
  1126  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1127  	current := bc.CurrentBlock()
  1128  	if block.ParentHash() != current.Hash() {
  1129  		if err := bc.reorg(current, block); err != nil {
  1130  			return err
  1131  		}
  1132  	}
  1133  	bc.writeHeadBlock(block)
  1134  	return nil
  1135  }
  1136  
  1137  // writeCanonicalBlockWithLogs writes the new head [block] and emits events
  1138  // for the new head block.
  1139  func (bc *BlockChain) writeCanonicalBlockWithLogs(block *types.Block, logs []*types.Log) {
  1140  	bc.writeHeadBlock(block)
  1141  	bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1142  	if len(logs) > 0 {
  1143  		bc.logsFeed.Send(logs)
  1144  	}
  1145  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1146  }
  1147  
  1148  // newTip returns a boolean indicating if the block should be appended to
  1149  // the canonical chain.
  1150  func (bc *BlockChain) newTip(block *types.Block) bool {
  1151  	return block.ParentHash() == bc.CurrentBlock().Hash()
  1152  }
  1153  
  1154  // writeBlockAndSetHead persists the block and associated state to the database
  1155  // and optimistically updates the canonical chain if [block] extends the current
  1156  // canonical chain.
  1157  // writeBlockAndSetHead expects to be the last verification step during InsertBlock
  1158  // since it creates a reference that will only be cleaned up by Accept/Reject.
  1159  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
  1160  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1161  		return err
  1162  	}
  1163  
  1164  	// If [block] represents a new tip of the canonical chain, we optimistically add it before
  1165  	// setPreference is called. Otherwise, we consider it a side chain block.
  1166  	if bc.newTip(block) {
  1167  		bc.writeCanonicalBlockWithLogs(block, logs)
  1168  	} else {
  1169  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1170  	}
  1171  
  1172  	return nil
  1173  }
  1174  
  1175  // writeBlockWithState writes the block and all associated state to the database,
  1176  // but it expects the chain mutex to be held.
  1177  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
  1178  	// Irrelevant of the canonical status, write the block itself to the database.
  1179  	//
  1180  	// Note all the components of block(hash->number map, header, body, receipts)
  1181  	// should be written atomically. BlockBatch is used for containing all components.
  1182  	blockBatch := bc.db.NewBatch()
  1183  	rawdb.WriteBlock(blockBatch, block)
  1184  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1185  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1186  	if err := blockBatch.Write(); err != nil {
  1187  		log.Crit("Failed to write block into disk", "err", err)
  1188  	}
  1189  
  1190  	// Commit all cached state changes into underlying memory database.
  1191  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
  1192  	// diff layer for the block.
  1193  	var err error
  1194  	if bc.snaps == nil {
  1195  		_, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true)
  1196  	} else {
  1197  		_, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
  1198  	}
  1199  	if err != nil {
  1200  		return err
  1201  	}
  1202  
  1203  	// Note: if InsertTrie must be the last step in verification that can return an error.
  1204  	// This allows [stateManager] to assume that if it inserts a trie without returning an
  1205  	// error then the block has passed verification and either AcceptTrie/RejectTrie will
  1206  	// eventually be called on [root] unless a fatal error occurs. It does not assume that
  1207  	// the node will not shutdown before either AcceptTrie/RejectTrie is called.
  1208  	if err := bc.stateManager.InsertTrie(block); err != nil {
  1209  		if bc.snaps != nil {
  1210  			discardErr := bc.snaps.Discard(block.Hash())
  1211  			if discardErr != nil {
  1212  				log.Debug("failed to discard snapshot after being unable to insert block trie", "block", block.Hash(), "root", block.Root())
  1213  			}
  1214  		}
  1215  		return err
  1216  	}
  1217  	return nil
  1218  }
  1219  
  1220  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1221  // chain or, otherwise, create a fork. If an error is returned it will return
  1222  // the index number of the failing block as well an error describing what went
  1223  // wrong.
  1224  //
  1225  // After insertion is done, all accumulated events will be fired.
  1226  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1227  	// Sanity check that we have something meaningful to import
  1228  	if len(chain) == 0 {
  1229  		return 0, nil
  1230  	}
  1231  
  1232  	bc.blockProcFeed.Send(true)
  1233  	defer bc.blockProcFeed.Send(false)
  1234  
  1235  	// Do a sanity check that the provided chain is actually ordered and linked.
  1236  	for i := 1; i < len(chain); i++ {
  1237  		block, prev := chain[i], chain[i-1]
  1238  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1239  			log.Error("Non contiguous block insert",
  1240  				"number", block.Number(),
  1241  				"hash", block.Hash(),
  1242  				"parent", block.ParentHash(),
  1243  				"prevnumber", prev.Number(),
  1244  				"prevhash", prev.Hash(),
  1245  			)
  1246  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1247  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1248  		}
  1249  	}
  1250  	// Pre-checks passed, start the full block imports
  1251  	bc.chainmu.Lock()
  1252  	defer bc.chainmu.Unlock()
  1253  	for n, block := range chain {
  1254  		if err := bc.insertBlock(block, true); err != nil {
  1255  			return n, err
  1256  		}
  1257  	}
  1258  
  1259  	return len(chain), nil
  1260  }
  1261  
  1262  func (bc *BlockChain) InsertBlock(block *types.Block) error {
  1263  	return bc.InsertBlockManual(block, true)
  1264  }
  1265  
  1266  func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error {
  1267  	bc.blockProcFeed.Send(true)
  1268  	defer bc.blockProcFeed.Send(false)
  1269  
  1270  	bc.chainmu.Lock()
  1271  	err := bc.insertBlock(block, writes)
  1272  	bc.chainmu.Unlock()
  1273  
  1274  	return err
  1275  }
  1276  
  1277  // gatherBlockLogs fetches logs from a previously inserted block.
  1278  func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log {
  1279  	receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig)
  1280  	var logs []*types.Log
  1281  	for _, receipt := range receipts {
  1282  		for _, log := range receipt.Logs {
  1283  			l := *log
  1284  			if removed {
  1285  				l.Removed = true
  1286  			}
  1287  			logs = append(logs, &l)
  1288  		}
  1289  	}
  1290  
  1291  	return logs
  1292  }
  1293  
  1294  func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
  1295  	start := time.Now()
  1296  	bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions())
  1297  
  1298  	substart := time.Now()
  1299  	err := bc.engine.VerifyHeader(bc, block.Header())
  1300  	if err == nil {
  1301  		err = bc.validator.ValidateBody(block)
  1302  	}
  1303  
  1304  	switch {
  1305  	case errors.Is(err, ErrKnownBlock):
  1306  		// even if the block is already known, we still need to generate the
  1307  		// snapshot layer and add a reference to the triedb, so we re-execute
  1308  		// the block. Note that insertBlock should only be called on a block
  1309  		// once if it returns nil
  1310  		if bc.newTip(block) {
  1311  			log.Debug("Setting head to be known block", "number", block.Number(), "hash", block.Hash())
  1312  		} else {
  1313  			log.Debug("Reprocessing already known block", "number", block.Number(), "hash", block.Hash())
  1314  		}
  1315  
  1316  	// If an ancestor has been pruned, then this block cannot be acceptable.
  1317  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1318  		return errors.New("side chain insertion is not supported")
  1319  
  1320  	// Future blocks are not supported, but should not be reported, so we return an error
  1321  	// early here
  1322  	case errors.Is(err, consensus.ErrFutureBlock):
  1323  		return errFutureBlockUnsupported
  1324  
  1325  	// Some other error occurred, abort
  1326  	case err != nil:
  1327  		bc.reportBlock(block, nil, err)
  1328  		return err
  1329  	}
  1330  	blockContentValidationTimer.Inc(time.Since(substart).Milliseconds())
  1331  
  1332  	// No validation errors for the block
  1333  	var activeState *state.StateDB
  1334  	defer func() {
  1335  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1336  		// block or other error is hit however, an early return may not properly
  1337  		// terminate the background threads. This defer ensures that we clean up
  1338  		// and dangling prefetcher, without defering each and holding on live refs.
  1339  		if activeState != nil {
  1340  			activeState.StopPrefetcher()
  1341  		}
  1342  	}()
  1343  
  1344  	// Retrieve the parent block to determine which root to build state on
  1345  	substart = time.Now()
  1346  	parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1347  
  1348  	// Instantiate the statedb to use for processing transactions
  1349  	//
  1350  	// NOTE: Flattening a snapshot during block execution requires fetching state
  1351  	// entries directly from the trie (much slower).
  1352  	bc.flattenLock.Lock()
  1353  	defer bc.flattenLock.Unlock()
  1354  	statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1355  	if err != nil {
  1356  		return err
  1357  	}
  1358  	blockStateInitTimer.Inc(time.Since(substart).Milliseconds())
  1359  
  1360  	// Enable prefetching to pull in trie node paths while processing transactions
  1361  	statedb.StartPrefetcher("chain")
  1362  	activeState = statedb
  1363  
  1364  	// If we have a followup block, run that against the current state to pre-cache
  1365  	// transactions and probabilistically some of the account/storage trie nodes.
  1366  	// Process block using the parent state as reference point
  1367  	substart = time.Now()
  1368  	receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig)
  1369  	if serr := statedb.Error(); serr != nil {
  1370  		log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash())
  1371  	}
  1372  	if err != nil {
  1373  		bc.reportBlock(block, receipts, err)
  1374  		return err
  1375  	}
  1376  
  1377  	// Update the metrics touched during block processing
  1378  	accountReadTimer.Inc(statedb.AccountReads.Milliseconds())                 // Account reads are complete, we can mark them
  1379  	storageReadTimer.Inc(statedb.StorageReads.Milliseconds())                 // Storage reads are complete, we can mark them
  1380  	snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete, we can mark them
  1381  	snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete, we can mark them
  1382  	trieproc := statedb.AccountHashes + statedb.StorageHashes                 // Save to not double count in validation
  1383  	trieproc += statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
  1384  	trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
  1385  	blockExecutionTimer.Inc((time.Since(substart) - trieproc).Milliseconds())
  1386  
  1387  	// Validate the state using the default validator
  1388  	substart = time.Now()
  1389  	if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1390  		bc.reportBlock(block, receipts, err)
  1391  		return err
  1392  	}
  1393  
  1394  	// Update the metrics touched during block validation
  1395  	accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them
  1396  	storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them
  1397  	accountHashTimer.Inc(statedb.AccountHashes.Milliseconds())    // Account hashes are complete, we can mark them
  1398  	storageHashTimer.Inc(statedb.StorageHashes.Milliseconds())    // Storage hashes are complete, we can mark them
  1399  	validationTrieProcTime := statedb.AccountHashes + statedb.StorageHashes + statedb.AccountUpdates + statedb.StorageUpdates - trieproc
  1400  	blockStateValidationTimer.Inc((time.Since(substart) - validationTrieProcTime).Milliseconds())
  1401  	blockTrieOpsTimer.Inc((trieproc + validationTrieProcTime).Milliseconds())
  1402  
  1403  	// If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block
  1404  	// or the state trie to disk.
  1405  	// Note: in pruning mode, this prevents us from generating a reference to the state root.
  1406  	if !writes {
  1407  		return nil
  1408  	}
  1409  
  1410  	// Write the block to the chain and get the status.
  1411  	// writeBlockWithState (called within writeBlockAndSethead) creates a reference that
  1412  	// will be cleaned up in Accept/Reject so we need to ensure an error cannot occur
  1413  	// later in verification, since that would cause the referenced root to never be dereferenced.
  1414  	substart = time.Now()
  1415  	if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil {
  1416  		return err
  1417  	}
  1418  	// Update the metrics touched during block commit
  1419  	accountCommitTimer.Inc(statedb.AccountCommits.Milliseconds())   // Account commits are complete, we can mark them
  1420  	storageCommitTimer.Inc(statedb.StorageCommits.Milliseconds())   // Storage commits are complete, we can mark them
  1421  	snapshotCommitTimer.Inc(statedb.SnapshotCommits.Milliseconds()) // Snapshot commits are complete, we can mark them
  1422  	triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds())     // Triedb commits are complete, we can mark them
  1423  	blockWriteTimer.Inc((time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds())
  1424  	blockInsertTimer.Inc(time.Since(start).Milliseconds())
  1425  
  1426  	log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1427  		"parentHash", block.ParentHash(),
  1428  		"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1429  		"elapsed", common.PrettyDuration(time.Since(start)),
  1430  		"root", block.Root(), "baseFeePerGas", block.BaseFee(), "blockGasCost", block.BlockGasCost(),
  1431  	)
  1432  
  1433  	processedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
  1434  	processedTxsCounter.Inc(int64(block.Transactions().Len()))
  1435  	processedLogsCounter.Inc(int64(len(logs)))
  1436  	blockInsertCount.Inc(1)
  1437  	return nil
  1438  }
  1439  
  1440  // collectLogs collects the logs that were generated or removed during
  1441  // the processing of the block that corresponds with the given hash.
  1442  // These logs are later announced as deleted or reborn.
  1443  func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log {
  1444  	number := bc.hc.GetBlockNumber(hash)
  1445  	if number == nil {
  1446  		return nil
  1447  	}
  1448  	return bc.gatherBlockLogs(hash, *number, removed)
  1449  }
  1450  
  1451  // mergeLogs returns a merged log slice with specified sort order.
  1452  func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log {
  1453  	var ret []*types.Log
  1454  	if reverse {
  1455  		for i := len(logs) - 1; i >= 0; i-- {
  1456  			ret = append(ret, logs[i]...)
  1457  		}
  1458  	} else {
  1459  		for i := 0; i < len(logs); i++ {
  1460  			ret = append(ret, logs[i]...)
  1461  		}
  1462  	}
  1463  	return ret
  1464  }
  1465  
  1466  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1467  // blocks and inserts them to be part of the new canonical chain and accumulates
  1468  // potential missing transactions and post an event about them.
  1469  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1470  	var (
  1471  		newHead = newBlock
  1472  		oldHead = oldBlock
  1473  
  1474  		newChain    types.Blocks
  1475  		oldChain    types.Blocks
  1476  		commonBlock *types.Block
  1477  
  1478  		deletedLogs [][]*types.Log
  1479  		rebirthLogs [][]*types.Log
  1480  	)
  1481  	// Reduce the longer chain to the same number as the shorter one
  1482  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1483  		// Old chain is longer, gather all transactions and logs as deleted ones
  1484  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1485  			oldChain = append(oldChain, oldBlock)
  1486  			// Collect deleted logs for notification
  1487  			logs := bc.collectLogs(oldBlock.Hash(), true)
  1488  			if len(logs) > 0 {
  1489  				deletedLogs = append(deletedLogs, logs)
  1490  			}
  1491  		}
  1492  	} else {
  1493  		// New chain is longer, stash all blocks away for subsequent insertion
  1494  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1495  			newChain = append(newChain, newBlock)
  1496  		}
  1497  	}
  1498  	if oldBlock == nil {
  1499  		return fmt.Errorf("invalid old chain")
  1500  	}
  1501  	if newBlock == nil {
  1502  		return fmt.Errorf("invalid new chain")
  1503  	}
  1504  	// Both sides of the reorg are at the same number, reduce both until the common
  1505  	// ancestor is found
  1506  	for {
  1507  		// If the common ancestor was found, bail out
  1508  		if oldBlock.Hash() == newBlock.Hash() {
  1509  			commonBlock = oldBlock
  1510  			break
  1511  		}
  1512  		// Remove an old block as well as stash away a new block
  1513  		oldChain = append(oldChain, oldBlock)
  1514  		// Collect deleted logs for notification
  1515  		logs := bc.collectLogs(oldBlock.Hash(), true)
  1516  		if len(logs) > 0 {
  1517  			deletedLogs = append(deletedLogs, logs)
  1518  		}
  1519  
  1520  		newChain = append(newChain, newBlock)
  1521  
  1522  		// Step back with both chains
  1523  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1524  		if oldBlock == nil {
  1525  			return fmt.Errorf("invalid old chain")
  1526  		}
  1527  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1528  		if newBlock == nil {
  1529  			return fmt.Errorf("invalid new chain")
  1530  		}
  1531  	}
  1532  
  1533  	// If the commonBlock is less than the last accepted height, we return an error
  1534  	// because performing a reorg would mean removing an accepted block from the
  1535  	// canonical chain.
  1536  	if commonBlock.NumberU64() < bc.lastAccepted.NumberU64() {
  1537  		return fmt.Errorf("cannot orphan finalized block at height: %d to common block at height: %d", bc.lastAccepted.NumberU64(), commonBlock.NumberU64())
  1538  	}
  1539  
  1540  	// Ensure the user sees large reorgs
  1541  	if len(oldChain) > 0 && len(newChain) > 0 {
  1542  		logFn := log.Info
  1543  		msg := "Resetting chain preference"
  1544  		if len(oldChain) > 63 {
  1545  			msg = "Large chain preference change detected"
  1546  			logFn = log.Warn
  1547  		}
  1548  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1549  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1550  	} else {
  1551  		log.Debug("Preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash())
  1552  	}
  1553  	// Insert the new chain(except the head block(reverse order)),
  1554  	// taking care of the proper incremental order.
  1555  	for i := len(newChain) - 1; i >= 1; i-- {
  1556  		// Insert the block in the canonical way, re-writing history
  1557  		bc.writeHeadBlock(newChain[i])
  1558  
  1559  		// Collect reborn logs due to chain reorg
  1560  		logs := bc.collectLogs(newChain[i].Hash(), false)
  1561  		if len(logs) > 0 {
  1562  			rebirthLogs = append(rebirthLogs, logs)
  1563  		}
  1564  	}
  1565  	// Delete any canonical number assignments above the new head
  1566  	indexesBatch := bc.db.NewBatch()
  1567  
  1568  	// Use the height of [newHead] to determine which canonical hashes to remove
  1569  	// in case the new chain is shorter than the old chain, in which case
  1570  	// there may be hashes set on the canonical chain that were invalidated
  1571  	// but not yet overwritten by the re-org.
  1572  	for i := newHead.NumberU64() + 1; ; i++ {
  1573  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  1574  		if hash == (common.Hash{}) {
  1575  			break
  1576  		}
  1577  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  1578  	}
  1579  	if err := indexesBatch.Write(); err != nil {
  1580  		log.Crit("Failed to delete useless indexes", "err", err)
  1581  	}
  1582  
  1583  	// If any logs need to be fired, do it now. In theory we could avoid creating
  1584  	// this goroutine if there are no events to fire, but realistcally that only
  1585  	// ever happens if we're reorging empty blocks, which will only happen on idle
  1586  	// networks where performance is not an issue either way.
  1587  	if len(deletedLogs) > 0 {
  1588  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  1589  	}
  1590  	if len(rebirthLogs) > 0 {
  1591  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  1592  	}
  1593  	if len(oldChain) > 0 {
  1594  		for i := len(oldChain) - 1; i >= 0; i-- {
  1595  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  1596  		}
  1597  	}
  1598  	return nil
  1599  }
  1600  
  1601  type badBlock struct {
  1602  	block  *types.Block
  1603  	reason *BadBlockReason
  1604  }
  1605  
  1606  type BadBlockReason struct {
  1607  	ChainConfig *params.ChainConfig `json:"chainConfig"`
  1608  	Receipts    types.Receipts      `json:"receipts"`
  1609  	Number      uint64              `json:"number"`
  1610  	Hash        common.Hash         `json:"hash"`
  1611  	Error       string              `json:"error"`
  1612  }
  1613  
  1614  func (b *BadBlockReason) String() string {
  1615  	var receiptString string
  1616  	for i, receipt := range b.Receipts {
  1617  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1618  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1619  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1620  	}
  1621  	reason := fmt.Sprintf(`
  1622  	########## BAD BLOCK #########
  1623  	Chain config: %v
  1624  	
  1625  	Number: %v
  1626  	Hash: %#x
  1627  	%v
  1628  	
  1629  	Error: %v
  1630  	##############################
  1631  	`, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error)
  1632  
  1633  	return reason
  1634  }
  1635  
  1636  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network and the BadBlockReason
  1637  // that caused each to be reported as a bad block.
  1638  // BadBlocks ensures that the length of the blocks and the BadBlockReason slice have the same length.
  1639  func (bc *BlockChain) BadBlocks() ([]*types.Block, []*BadBlockReason) {
  1640  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1641  	reasons := make([]*BadBlockReason, 0, bc.badBlocks.Len())
  1642  	for _, hash := range bc.badBlocks.Keys() {
  1643  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1644  			badBlk := blk.(*badBlock)
  1645  			blocks = append(blocks, badBlk.block)
  1646  			reasons = append(reasons, badBlk.reason)
  1647  		}
  1648  	}
  1649  	return blocks, reasons
  1650  }
  1651  
  1652  // addBadBlock adds a bad block to the bad-block LRU cache
  1653  func (bc *BlockChain) addBadBlock(block *types.Block, reason *BadBlockReason) {
  1654  	bc.badBlocks.Add(block.Hash(), &badBlock{
  1655  		block:  block,
  1656  		reason: reason,
  1657  	})
  1658  }
  1659  
  1660  // reportBlock logs a bad block error.
  1661  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1662  	reason := &BadBlockReason{
  1663  		ChainConfig: bc.chainConfig,
  1664  		Receipts:    receipts,
  1665  		Number:      block.NumberU64(),
  1666  		Hash:        block.Hash(),
  1667  		Error:       err.Error(),
  1668  	}
  1669  
  1670  	badBlockCounter.Inc(1)
  1671  	bc.addBadBlock(block, reason)
  1672  	log.Debug(reason.String())
  1673  }
  1674  
  1675  func (bc *BlockChain) RemoveRejectedBlocks(start, end uint64) error {
  1676  	batch := bc.db.NewBatch()
  1677  
  1678  	for i := start; i < end; i++ {
  1679  		hashes := rawdb.ReadAllHashes(bc.db, i)
  1680  		canonicalBlock := bc.GetBlockByNumber((i))
  1681  		if canonicalBlock == nil {
  1682  			return fmt.Errorf("failed to retrieve block by number at height %d", i)
  1683  		}
  1684  		canonicalHash := canonicalBlock.Hash()
  1685  		for _, hash := range hashes {
  1686  			if hash == canonicalHash {
  1687  				continue
  1688  			}
  1689  			rawdb.DeleteBlock(batch, hash, i)
  1690  		}
  1691  
  1692  		if err := batch.Write(); err != nil {
  1693  			return fmt.Errorf("failed to write delete rejected block batch at height %d", i)
  1694  		}
  1695  		batch.Reset()
  1696  	}
  1697  
  1698  	return nil
  1699  }
  1700  
  1701  // reprocessBlock reprocesses a previously accepted block. This is often used
  1702  // to regenerate previously pruned state tries.
  1703  func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) (common.Hash, error) {
  1704  	// Retrieve the parent block and its state to execute block
  1705  	var (
  1706  		statedb    *state.StateDB
  1707  		err        error
  1708  		parentRoot = parent.Root()
  1709  	)
  1710  	// We don't simply use [NewWithSnapshot] here because it doesn't return an
  1711  	// error if [bc.snaps != nil] and [bc.snaps.Snapshot(parentRoot) == nil].
  1712  	if bc.snaps == nil {
  1713  		statedb, err = state.New(parentRoot, bc.stateCache, nil)
  1714  	} else {
  1715  		snap := bc.snaps.Snapshot(parentRoot)
  1716  		if snap == nil {
  1717  			return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
  1718  		}
  1719  		statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
  1720  	}
  1721  	if err != nil {
  1722  		return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
  1723  	}
  1724  
  1725  	// Enable prefetching to pull in trie node paths while processing transactions
  1726  	statedb.StartPrefetcher("chain")
  1727  	defer func() {
  1728  		statedb.StopPrefetcher()
  1729  	}()
  1730  
  1731  	// Process previously stored block
  1732  	receipts, _, usedGas, err := bc.processor.Process(current, parent.Header(), statedb, vm.Config{})
  1733  	if err != nil {
  1734  		return common.Hash{}, fmt.Errorf("failed to re-process block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1735  	}
  1736  
  1737  	// Validate the state using the default validator
  1738  	if err := bc.validator.ValidateState(current, statedb, receipts, usedGas); err != nil {
  1739  		return common.Hash{}, fmt.Errorf("failed to validate state while re-processing block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1740  	}
  1741  	log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
  1742  
  1743  	// Commit all cached state changes into underlying memory database.
  1744  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
  1745  	// diff layer for the block.
  1746  	if bc.snaps == nil {
  1747  		return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false)
  1748  	}
  1749  	return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
  1750  }
  1751  
  1752  // initSnapshot instantiates a Snapshot instance and adds it to [bc]
  1753  func (bc *BlockChain) initSnapshot(b *types.Block) {
  1754  	if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil {
  1755  		return
  1756  	}
  1757  
  1758  	// If we are starting from genesis, generate the original snapshot disk layer
  1759  	// up front, so we can use it while executing blocks in bootstrapping. This
  1760  	// also avoids a costly async generation process when reaching tip.
  1761  	//
  1762  	// Additionally, we should always repair a snapshot if starting at genesis
  1763  	// if [SnapshotLimit] > 0.
  1764  	async := bc.cacheConfig.SnapshotAsync && b.NumberU64() > 0
  1765  	rebuild := !bc.cacheConfig.SkipSnapshotRebuild || b.NumberU64() == 0
  1766  	log.Info("Initializing snapshots", "async", async, "rebuild", rebuild, "headHash", b.Hash(), "headRoot", b.Root())
  1767  	var err error
  1768  	bc.snaps, err = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, b.Hash(), b.Root(), async, rebuild, bc.cacheConfig.SnapshotVerify)
  1769  	if err != nil {
  1770  		log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root(), "err", err, "async", async)
  1771  	}
  1772  }
  1773  
  1774  // reprocessState reprocesses the state up to [block], iterating through its ancestors until
  1775  // it reaches a block with a state committed to the database. reprocessState does not use
  1776  // snapshots since the disk layer for snapshots will most likely be above the last committed
  1777  // state that reprocessing will start from.
  1778  func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error {
  1779  	origin := current.NumberU64()
  1780  	acceptorTip, err := rawdb.ReadAcceptorTip(bc.db)
  1781  	if err != nil {
  1782  		return fmt.Errorf("%w: unable to get Acceptor tip", err)
  1783  	}
  1784  	log.Info("Loaded Acceptor tip", "hash", acceptorTip)
  1785  
  1786  	// The acceptor tip is up to date either if it matches the current hash, or it has not been
  1787  	// initialized (i.e., this node has not accepted any blocks asynchronously).
  1788  	acceptorTipUpToDate := acceptorTip == (common.Hash{}) || acceptorTip == current.Hash()
  1789  
  1790  	// If the state is already available and the acceptor tip is up to date, skip re-processing.
  1791  	if bc.HasState(current.Root()) && acceptorTipUpToDate {
  1792  		log.Info("Skipping state reprocessing", "root", current.Root())
  1793  		return nil
  1794  	}
  1795  
  1796  	// If the acceptorTip is a non-empty hash, jump re-processing back to the acceptor tip to ensure that
  1797  	// we re-process at a minimum from the last processed accepted block.
  1798  	// Note: we do not have a guarantee that the last trie on disk will be at a height <= acceptorTip.
  1799  	// Since we need to re-process from at least the acceptorTip to ensure indices are updated correctly
  1800  	// we must start searching for the block to start re-processing at the acceptorTip.
  1801  	// This may occur if we are running in archive mode where every block's trie is committed on insertion
  1802  	// or during an unclean shutdown.
  1803  	if acceptorTip != (common.Hash{}) {
  1804  		current = bc.GetBlockByHash(acceptorTip)
  1805  		if current == nil {
  1806  			return fmt.Errorf("failed to get block for acceptor tip %s", acceptorTip)
  1807  		}
  1808  	}
  1809  
  1810  	for i := 0; i < int(reexec); i++ {
  1811  		// TODO: handle canceled context
  1812  
  1813  		if current.NumberU64() == 0 {
  1814  			return errors.New("genesis state is missing")
  1815  		}
  1816  		parent := bc.GetBlock(current.ParentHash(), current.NumberU64()-1)
  1817  		if parent == nil {
  1818  			return fmt.Errorf("missing block %s:%d", current.ParentHash().Hex(), current.NumberU64()-1)
  1819  		}
  1820  		current = parent
  1821  		_, err = bc.stateCache.OpenTrie(current.Root())
  1822  		if err == nil {
  1823  			break
  1824  		}
  1825  	}
  1826  	if err != nil {
  1827  		switch err.(type) {
  1828  		case *trie.MissingNodeError:
  1829  			return fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
  1830  		default:
  1831  			return err
  1832  		}
  1833  	}
  1834  
  1835  	// State was available at historical point, regenerate
  1836  	var (
  1837  		start        = time.Now()
  1838  		logged       time.Time
  1839  		previousRoot common.Hash
  1840  		triedb       = bc.stateCache.TrieDB()
  1841  		writeIndices bool
  1842  	)
  1843  	// Note: we add 1 since in each iteration, we attempt to re-execute the next block.
  1844  	log.Info("Re-executing blocks to generate state for last accepted block", "from", current.NumberU64()+1, "to", origin)
  1845  	for current.NumberU64() < origin {
  1846  		// TODO: handle canceled context
  1847  
  1848  		// Print progress logs if long enough time elapsed
  1849  		if time.Since(logged) > 8*time.Second {
  1850  			log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64(), "elapsed", time.Since(start))
  1851  			logged = time.Now()
  1852  		}
  1853  
  1854  		// Retrieve the next block to regenerate and process it
  1855  		parent := current
  1856  		next := current.NumberU64() + 1
  1857  		if current = bc.GetBlockByNumber(next); current == nil {
  1858  			return fmt.Errorf("failed to retrieve block %d while re-generating state", next)
  1859  		}
  1860  
  1861  		// Initialize snapshot if required (prevents full snapshot re-generation in
  1862  		// the case of unclean shutdown)
  1863  		if parent.Hash() == acceptorTip {
  1864  			log.Info("Recovering snapshot", "hash", parent.Hash(), "index", parent.NumberU64())
  1865  			// TODO: switch to checking the snapshot block hash markers here to ensure that when we re-process the block, we have the opportunity to apply
  1866  			// a snapshot diff layer that we may have been in the middle of committing during shutdown. This will prevent snapshot re-generation in the case
  1867  			// that the node stops mid-way through snapshot flattening (performed across multiple DB batches).
  1868  			// If snapshot initialization is delayed due to state sync, skip initializing snaps here
  1869  			if !bc.cacheConfig.SnapshotDelayInit {
  1870  				bc.initSnapshot(parent)
  1871  			}
  1872  			writeIndices = true // Set [writeIndices] to true, so that the indices will be updated from the last accepted tip onwards.
  1873  		}
  1874  
  1875  		// Reprocess next block using previously fetched data
  1876  		root, err := bc.reprocessBlock(parent, current)
  1877  		if err != nil {
  1878  			return err
  1879  		}
  1880  
  1881  		// Flatten snapshot if initialized, holding a reference to the state root until the next block
  1882  		// is processed.
  1883  		if err := bc.flattenSnapshot(func() error {
  1884  			triedb.Reference(root, common.Hash{})
  1885  			if previousRoot != (common.Hash{}) {
  1886  				triedb.Dereference(previousRoot)
  1887  			}
  1888  			previousRoot = root
  1889  			return nil
  1890  		}, current.Hash()); err != nil {
  1891  			return err
  1892  		}
  1893  
  1894  		// Write any unsaved indices to disk
  1895  		if writeIndices {
  1896  			if err := bc.writeBlockAcceptedIndices(current); err != nil {
  1897  				return fmt.Errorf("%w: failed to process accepted block indices", err)
  1898  			}
  1899  		}
  1900  	}
  1901  
  1902  	nodes, imgs := triedb.Size()
  1903  	log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
  1904  	if previousRoot != (common.Hash{}) {
  1905  		return triedb.Commit(previousRoot, true, nil)
  1906  	}
  1907  	return nil
  1908  }
  1909  
  1910  func (bc *BlockChain) protectTrieIndex() error {
  1911  	if !bc.cacheConfig.Pruning {
  1912  		return rawdb.WritePruningDisabled(bc.db)
  1913  	}
  1914  	pruningDisabled, err := rawdb.HasPruningDisabled(bc.db)
  1915  	if err != nil {
  1916  		return fmt.Errorf("failed to check if the chain has been run with pruning disabled: %w", err)
  1917  	}
  1918  	if !pruningDisabled {
  1919  		return nil
  1920  	}
  1921  	if !bc.cacheConfig.AllowMissingTries {
  1922  		return ErrRefuseToCorruptArchiver
  1923  	}
  1924  	return nil
  1925  }
  1926  
  1927  // populateMissingTries iterates from [bc.cacheConfig.PopulateMissingTries] (defaults to 0)
  1928  // to [LastAcceptedBlock] and persists all tries to disk that are not already on disk. This is
  1929  // used to fill trie index gaps in an "archive" node without resyncing from scratch.
  1930  //
  1931  // NOTE: Assumes the genesis root and last accepted root are written to disk
  1932  func (bc *BlockChain) populateMissingTries() error {
  1933  	if bc.cacheConfig.PopulateMissingTries == nil {
  1934  		return nil
  1935  	}
  1936  
  1937  	var (
  1938  		lastAccepted = bc.LastAcceptedBlock().NumberU64()
  1939  		startHeight  = *bc.cacheConfig.PopulateMissingTries
  1940  		startTime    = time.Now()
  1941  		logged       time.Time
  1942  		triedb       = bc.stateCache.TrieDB()
  1943  		missing      = 0
  1944  	)
  1945  
  1946  	// Do not allow the config to specify a starting point above the last accepted block.
  1947  	if startHeight > lastAccepted {
  1948  		return fmt.Errorf("cannot populate missing tries from a starting point (%d) > last accepted block (%d)", startHeight, lastAccepted)
  1949  	}
  1950  
  1951  	// If we are starting from the genesis, increment the start height by 1 so we don't attempt to re-process
  1952  	// the genesis block.
  1953  	if startHeight == 0 {
  1954  		startHeight += 1
  1955  	}
  1956  	parent := bc.GetBlockByNumber(startHeight - 1)
  1957  	if parent == nil {
  1958  		return fmt.Errorf("failed to fetch initial parent block for re-populate missing tries at height %d", startHeight-1)
  1959  	}
  1960  
  1961  	it := newBlockChainIterator(bc, startHeight, bc.cacheConfig.PopulateMissingTriesParallelism)
  1962  	defer it.Stop()
  1963  
  1964  	for i := startHeight; i < lastAccepted; i++ {
  1965  		// Print progress logs if long enough time elapsed
  1966  		if time.Since(logged) > 8*time.Second {
  1967  			log.Info("Populating missing tries", "missing", missing, "block", i, "remaining", lastAccepted-i, "elapsed", time.Since(startTime))
  1968  			logged = time.Now()
  1969  		}
  1970  
  1971  		// TODO: handle canceled context
  1972  		current, hasState, err := it.Next(context.TODO())
  1973  		if err != nil {
  1974  			return fmt.Errorf("error while populating missing tries: %w", err)
  1975  		}
  1976  
  1977  		if hasState {
  1978  			parent = current
  1979  			continue
  1980  		}
  1981  
  1982  		root, err := bc.reprocessBlock(parent, current)
  1983  		if err != nil {
  1984  			return err
  1985  		}
  1986  
  1987  		// Commit root to disk so that it can be accessed directly
  1988  		if err := triedb.Commit(root, false, nil); err != nil {
  1989  			return err
  1990  		}
  1991  		parent = current
  1992  		log.Debug("Populated missing trie", "block", current.NumberU64(), "root", root)
  1993  		missing++
  1994  	}
  1995  
  1996  	// Write marker to DB to indicate populate missing tries finished successfully.
  1997  	// Note: writing the marker here means that we do allow consecutive runs of re-populating
  1998  	// missing tries if it does not finish during the prior run.
  1999  	if err := rawdb.WritePopulateMissingTries(bc.db); err != nil {
  2000  		return fmt.Errorf("failed to write offline pruning success marker: %w", err)
  2001  	}
  2002  
  2003  	nodes, imgs := triedb.Size()
  2004  	log.Info("All missing tries populated", "startHeight", startHeight, "lastAcceptedHeight", lastAccepted, "missing", missing, "elapsed", time.Since(startTime), "nodes", nodes, "preimages", imgs)
  2005  	return nil
  2006  }
  2007  
  2008  // CleanBlockRootsAboveLastAccepted gathers the blocks that may have previously been in processing above the
  2009  // last accepted block and wipes their block roots from disk to mark their tries as inaccessible.
  2010  // This is used prior to pruning to ensure that all of the tries that may still be in processing are marked
  2011  // as inaccessible and mirrors the handling of middle roots in the geth offline pruning implementation.
  2012  // This is not strictly necessary, but maintains a soft assumption.
  2013  func (bc *BlockChain) CleanBlockRootsAboveLastAccepted() error {
  2014  	targetRoot := bc.LastAcceptedBlock().Root()
  2015  
  2016  	// Clean up any block roots above the last accepted block before we start pruning.
  2017  	// Note: this takes the place of middleRoots in the geth implementation since we do not
  2018  	// track processing block roots via snapshot journals in the same way.
  2019  	processingRoots := bc.gatherBlockRootsAboveLastAccepted()
  2020  	// If there is a block above the last accepted block with an identical state root, we
  2021  	// explicitly remove it from the set to ensure we do not corrupt the last accepted trie.
  2022  	delete(processingRoots, targetRoot)
  2023  	for processingRoot := range processingRoots {
  2024  		// Delete the processing root from disk to mark the trie as inaccessible (no need to handle this in a batch).
  2025  		if err := bc.db.Delete(processingRoot[:]); err != nil {
  2026  			return fmt.Errorf("failed to remove processing root (%s) preparing for offline pruning: %w", processingRoot, err)
  2027  		}
  2028  	}
  2029  
  2030  	return nil
  2031  }
  2032  
  2033  // gatherBlockRootsAboveLastAccepted iterates forward from the last accepted block and returns a list of all block roots
  2034  // for any blocks that were inserted above the last accepted block.
  2035  // Given that we never insert a block into the chain unless all of its ancestors have been inserted, this should gather
  2036  // all of the block roots for blocks inserted above the last accepted block that may have been in processing at some point
  2037  // in the past and are therefore potentially still acceptable.
  2038  // Note: there is an edge case where the node dies while the consensus engine is rejecting a branch of blocks since the
  2039  // consensus engine will reject the lowest ancestor first. In this case, these blocks will not be considered acceptable in
  2040  // the future.
  2041  // Ex.
  2042  //
  2043  //	   A
  2044  //	 /   \
  2045  //	B     C
  2046  //	|
  2047  //	D
  2048  //	|
  2049  //	E
  2050  //	|
  2051  //	F
  2052  //
  2053  // The consensus engine accepts block C and proceeds to reject the other branch in order (B, D, E, F).
  2054  // If the consensus engine dies after rejecting block D, block D will be deleted, such that the forward iteration
  2055  // may not find any blocks at this height and will not reach the previously processing blocks E and F.
  2056  func (bc *BlockChain) gatherBlockRootsAboveLastAccepted() map[common.Hash]struct{} {
  2057  	blockRoots := make(map[common.Hash]struct{})
  2058  	for height := bc.lastAccepted.NumberU64() + 1; ; height++ {
  2059  		blockHashes := rawdb.ReadAllHashes(bc.db, height)
  2060  		// If there are no block hashes at [height], then there should be no further acceptable blocks
  2061  		// past this point.
  2062  		if len(blockHashes) == 0 {
  2063  			break
  2064  		}
  2065  
  2066  		// Fetch the blocks and append their roots.
  2067  		for _, blockHash := range blockHashes {
  2068  			block := bc.GetBlockByHash(blockHash)
  2069  			if block == nil {
  2070  				continue
  2071  			}
  2072  
  2073  			blockRoots[block.Root()] = struct{}{}
  2074  		}
  2075  	}
  2076  
  2077  	return blockRoots
  2078  }
  2079  
  2080  // ResetToStateSyncedBlock reinitializes the state of the blockchain
  2081  // to the trie represented by [block.Root()] after updating
  2082  // in-memory and on disk current block pointers to [block].
  2083  // Only should be called after state sync has completed.
  2084  func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error {
  2085  	bc.chainmu.Lock()
  2086  	defer bc.chainmu.Unlock()
  2087  
  2088  	// Update head block and snapshot pointers on disk
  2089  	batch := bc.db.NewBatch()
  2090  	rawdb.WriteAcceptorTip(batch, block.Hash())
  2091  	rawdb.WriteHeadBlockHash(batch, block.Hash())
  2092  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
  2093  	rawdb.WriteSnapshotBlockHash(batch, block.Hash())
  2094  	rawdb.WriteSnapshotRoot(batch, block.Root())
  2095  	if err := rawdb.WriteSyncPerformed(batch, block.NumberU64()); err != nil {
  2096  		return err
  2097  	}
  2098  
  2099  	if err := batch.Write(); err != nil {
  2100  		return err
  2101  	}
  2102  
  2103  	// Update all in-memory chain markers
  2104  	bc.lastAccepted = block
  2105  	bc.acceptorTip = block
  2106  	bc.currentBlock.Store(block)
  2107  	bc.hc.SetCurrentHeader(block.Header())
  2108  
  2109  	lastAcceptedHash := block.Hash()
  2110  	bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{
  2111  		Cache:       bc.cacheConfig.TrieCleanLimit,
  2112  		Journal:     bc.cacheConfig.TrieCleanJournal,
  2113  		Preimages:   bc.cacheConfig.Preimages,
  2114  		StatsPrefix: trieCleanCacheStatsNamespace,
  2115  	})
  2116  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
  2117  		return err
  2118  	}
  2119  	// Create the state manager
  2120  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), bc.cacheConfig)
  2121  
  2122  	// Make sure the state associated with the block is available
  2123  	head := bc.CurrentBlock()
  2124  	if !bc.HasState(head.Root()) {
  2125  		return fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
  2126  	}
  2127  
  2128  	bc.initSnapshot(head)
  2129  	return nil
  2130  }