github.com/MetalBlockchain/subnet-evm@v0.4.9/core/blockchain.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2014 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  // Package core implements the Ethereum consensus protocol.
    28  package core
    29  
    30  import (
    31  	"context"
    32  	"errors"
    33  	"fmt"
    34  	"io"
    35  	"math/big"
    36  	"runtime"
    37  	"sync"
    38  	"sync/atomic"
    39  	"time"
    40  
    41  	"github.com/MetalBlockchain/subnet-evm/commontype"
    42  	"github.com/MetalBlockchain/subnet-evm/consensus"
    43  	"github.com/MetalBlockchain/subnet-evm/core/rawdb"
    44  	"github.com/MetalBlockchain/subnet-evm/core/state"
    45  	"github.com/MetalBlockchain/subnet-evm/core/state/snapshot"
    46  	"github.com/MetalBlockchain/subnet-evm/core/types"
    47  	"github.com/MetalBlockchain/subnet-evm/core/vm"
    48  	"github.com/MetalBlockchain/subnet-evm/ethdb"
    49  	"github.com/MetalBlockchain/subnet-evm/metrics"
    50  	"github.com/MetalBlockchain/subnet-evm/params"
    51  	"github.com/MetalBlockchain/subnet-evm/trie"
    52  	"github.com/ethereum/go-ethereum/common"
    53  	"github.com/ethereum/go-ethereum/event"
    54  	"github.com/ethereum/go-ethereum/log"
    55  	lru "github.com/hashicorp/golang-lru"
    56  )
    57  
    58  var (
    59  	accountReadTimer         = metrics.NewRegisteredCounter("chain/account/reads", nil)
    60  	accountHashTimer         = metrics.NewRegisteredCounter("chain/account/hashes", nil)
    61  	accountUpdateTimer       = metrics.NewRegisteredCounter("chain/account/updates", nil)
    62  	accountCommitTimer       = metrics.NewRegisteredCounter("chain/account/commits", nil)
    63  	storageReadTimer         = metrics.NewRegisteredCounter("chain/storage/reads", nil)
    64  	storageHashTimer         = metrics.NewRegisteredCounter("chain/storage/hashes", nil)
    65  	storageUpdateTimer       = metrics.NewRegisteredCounter("chain/storage/updates", nil)
    66  	storageCommitTimer       = metrics.NewRegisteredCounter("chain/storage/commits", nil)
    67  	snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil)
    68  	snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil)
    69  	snapshotCommitTimer      = metrics.NewRegisteredCounter("chain/snapshot/commits", nil)
    70  	triedbCommitTimer        = metrics.NewRegisteredCounter("chain/triedb/commits", nil)
    71  
    72  	blockInsertTimer            = metrics.NewRegisteredCounter("chain/block/inserts", nil)
    73  	blockInsertCount            = metrics.NewRegisteredCounter("chain/block/inserts/count", nil)
    74  	blockContentValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/content", nil)
    75  	blockStateInitTimer         = metrics.NewRegisteredCounter("chain/block/inits/state", nil)
    76  	blockExecutionTimer         = metrics.NewRegisteredCounter("chain/block/executions", nil)
    77  	blockTrieOpsTimer           = metrics.NewRegisteredCounter("chain/block/trie", nil)
    78  	blockStateValidationTimer   = metrics.NewRegisteredCounter("chain/block/validations/state", nil)
    79  	blockWriteTimer             = metrics.NewRegisteredCounter("chain/block/writes", nil)
    80  
    81  	acceptorQueueGauge           = metrics.NewRegisteredGauge("chain/acceptor/queue/size", nil)
    82  	acceptorWorkTimer            = metrics.NewRegisteredCounter("chain/acceptor/work", nil)
    83  	acceptorWorkCount            = metrics.NewRegisteredCounter("chain/acceptor/work/count", nil)
    84  	processedBlockGasUsedCounter = metrics.NewRegisteredCounter("chain/block/gas/used/processed", nil)
    85  	acceptedBlockGasUsedCounter  = metrics.NewRegisteredCounter("chain/block/gas/used/accepted", nil)
    86  	badBlockCounter              = metrics.NewRegisteredCounter("chain/block/bad/count", nil)
    87  
    88  	txUnindexTimer      = metrics.NewRegisteredCounter("chain/txs/unindex", nil)
    89  	acceptedTxsCounter  = metrics.NewRegisteredCounter("chain/txs/accepted", nil)
    90  	processedTxsCounter = metrics.NewRegisteredCounter("chain/txs/processed", nil)
    91  
    92  	acceptedLogsCounter  = metrics.NewRegisteredCounter("chain/logs/accepted", nil)
    93  	processedLogsCounter = metrics.NewRegisteredCounter("chain/logs/processed", nil)
    94  
    95  	ErrRefuseToCorruptArchiver = errors.New("node has operated with pruning disabled, shutting down to prevent missing tries")
    96  
    97  	errFutureBlockUnsupported  = errors.New("future block insertion not supported")
    98  	errCacheConfigNotSpecified = errors.New("must specify cache config")
    99  )
   100  
   101  const (
   102  	bodyCacheLimit           = 256
   103  	blockCacheLimit          = 256
   104  	receiptsCacheLimit       = 32
   105  	txLookupCacheLimit       = 1024
   106  	feeConfigCacheLimit      = 256
   107  	coinbaseConfigCacheLimit = 256
   108  	badBlockLimit            = 10
   109  
   110  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   111  	//
   112  	// Changelog:
   113  	//
   114  	// - Version 4
   115  	//   The following incompatible database changes were added:
   116  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   117  	//   * the `Bloom` field of receipt is deleted
   118  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   119  	// - Version 5
   120  	//  The following incompatible database changes were added:
   121  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   122  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   123  	//      receipts' corresponding block
   124  	// - Version 6
   125  	//  The following incompatible database changes were added:
   126  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   127  	// - Version 7
   128  	//  The following incompatible database changes were added:
   129  	//    * Use freezer as the ancient database to maintain all ancient data
   130  	// - Version 8
   131  	//  The following incompatible database changes were added:
   132  	//    * New scheme for contract code in order to separate the codes and trie nodes
   133  	BlockChainVersion uint64 = 8
   134  
   135  	// statsReportLimit is the time limit during import and export after which we
   136  	// always print out progress. This avoids the user wondering what's going on.
   137  	statsReportLimit = 8 * time.Second
   138  
   139  	// trieCleanCacheStatsNamespace is the namespace to surface stats from the trie
   140  	// clean cache's underlying fastcache.
   141  	trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache"
   142  )
   143  
   144  // cacheableFeeConfig encapsulates fee configuration itself and the block number that it has changed at,
   145  // in order to cache them together.
   146  type cacheableFeeConfig struct {
   147  	feeConfig     commontype.FeeConfig
   148  	lastChangedAt *big.Int
   149  }
   150  
   151  // cacheableCoinbaseConfig encapsulates coinbase address itself and allowFeeRecipient flag,
   152  // in order to cache them together.
   153  type cacheableCoinbaseConfig struct {
   154  	coinbaseAddress    common.Address
   155  	allowFeeRecipients bool
   156  }
   157  
   158  // CacheConfig contains the configuration values for the trie caching/pruning
   159  // that's resident in a blockchain.
   160  type CacheConfig struct {
   161  	TrieCleanLimit                  int           // Memory allowance (MB) to use for caching trie nodes in memory
   162  	TrieCleanJournal                string        // Disk journal for saving clean cache entries.
   163  	TrieCleanRejournal              time.Duration // Time interval to dump clean cache to disk periodically
   164  	TrieDirtyLimit                  int           // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk
   165  	TrieDirtyCommitTarget           int           // Memory limit (MB) to target for the dirties cache before invoking commit
   166  	CommitInterval                  uint64        // Commit the trie every [CommitInterval] blocks.
   167  	Pruning                         bool          // Whether to disable trie write caching and GC altogether (archive node)
   168  	AcceptorQueueLimit              int           // Blocks to queue before blocking during acceptance
   169  	PopulateMissingTries            *uint64       // If non-nil, sets the starting height for re-generating historical tries.
   170  	PopulateMissingTriesParallelism int           // Is the number of readers to use when trying to populate missing tries.
   171  	AllowMissingTries               bool          // Whether to allow an archive node to run with pruning enabled
   172  	SnapshotDelayInit               bool          // Whether to initialize snapshots on startup or wait for external call
   173  	SnapshotLimit                   int           // Memory allowance (MB) to use for caching snapshot entries in memory
   174  	SnapshotAsync                   bool          // Generate snapshot tree async
   175  	SnapshotVerify                  bool          // Verify generated snapshots
   176  	SkipSnapshotRebuild             bool          // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests)
   177  	Preimages                       bool          // Whether to store preimage of trie key to the disk
   178  	AcceptedCacheSize               int           // Depth of accepted headers cache and accepted logs cache at the accepted tip
   179  	TxLookupLimit                   uint64        // Number of recent blocks for which to maintain transaction lookup indices
   180  }
   181  
   182  var DefaultCacheConfig = &CacheConfig{
   183  	TrieCleanLimit:        256,
   184  	TrieDirtyLimit:        256,
   185  	TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB)
   186  	Pruning:               true,
   187  	CommitInterval:        4096,
   188  	AcceptorQueueLimit:    64, // Provides 2 minutes of buffer (2s block target) for a commit delay
   189  	SnapshotLimit:         256,
   190  	AcceptedCacheSize:     32,
   191  }
   192  
   193  // BlockChain represents the canonical chain given a database with a genesis
   194  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   195  //
   196  // Importing blocks in to the block chain happens according to the set of rules
   197  // defined by the two stage Validator. Processing of blocks is done using the
   198  // Processor which processes the included transaction. The validation of the state
   199  // is done in the second part of the Validator. Failing results in aborting of
   200  // the import.
   201  //
   202  // The BlockChain also helps in returning blocks from **any** chain included
   203  // in the database as well as blocks that represents the canonical chain. It's
   204  // important to note that GetBlock can return any block and does not need to be
   205  // included in the canonical one where as GetBlockByNumber always represents the
   206  // canonical chain.
   207  type BlockChain struct {
   208  	chainConfig *params.ChainConfig // Chain & network configuration
   209  	cacheConfig *CacheConfig        // Cache configuration for pruning
   210  
   211  	db ethdb.Database // Low level persistent database to store final content in
   212  
   213  	snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
   214  
   215  	hc                *HeaderChain
   216  	rmLogsFeed        event.Feed
   217  	chainFeed         event.Feed
   218  	chainSideFeed     event.Feed
   219  	chainHeadFeed     event.Feed
   220  	chainAcceptedFeed event.Feed
   221  	logsFeed          event.Feed
   222  	logsAcceptedFeed  event.Feed
   223  	blockProcFeed     event.Feed
   224  	txAcceptedFeed    event.Feed
   225  	scope             event.SubscriptionScope
   226  	genesisBlock      *types.Block
   227  
   228  	// This mutex synchronizes chain write operations.
   229  	// Readers don't need to take it, they can just read the database.
   230  	chainmu sync.RWMutex
   231  
   232  	currentBlock atomic.Value // Current head of the block chain
   233  
   234  	stateCache          state.Database // State database to reuse between imports (contains state cache)
   235  	stateManager        TrieWriter
   236  	bodyCache           *lru.Cache // Cache for the most recent block bodies
   237  	receiptsCache       *lru.Cache // Cache for the most recent receipts per block
   238  	blockCache          *lru.Cache // Cache for the most recent entire blocks
   239  	txLookupCache       *lru.Cache // Cache for the most recent transaction lookup data.
   240  	feeConfigCache      *lru.Cache // Cache for the most recent feeConfig lookup data.
   241  	coinbaseConfigCache *lru.Cache // Cache for the most recent coinbaseConfig lookup data.
   242  
   243  	running int32 // 0 if chain is running, 1 when stopped
   244  
   245  	engine     consensus.Engine
   246  	validator  Validator  // Block and state validator interface
   247  	prefetcher Prefetcher // Block state prefetcher interface
   248  	processor  Processor  // Block transaction processor interface
   249  	vmConfig   vm.Config
   250  
   251  	badBlocks *lru.Cache // Bad block cache
   252  
   253  	lastAccepted *types.Block // Prevents reorgs past this height
   254  
   255  	senderCacher *TxSenderCacher
   256  
   257  	// [acceptorQueue] is a processing queue for the Acceptor. This is
   258  	// different than [chainAcceptedFeed], which is sent an event after an accepted
   259  	// block is processed (after each loop of the accepted worker). If there is a
   260  	// clean shutdown, all items inserted into the [acceptorQueue] will be processed.
   261  	acceptorQueue chan *types.Block
   262  
   263  	// [acceptorClosingLock], and [acceptorClosed] are used
   264  	// to synchronize the closing of the [acceptorQueue] channel.
   265  	//
   266  	// Because we can't check if a channel is closed without reading from it
   267  	// (which we don't want to do as we may remove a processing block), we need
   268  	// to use a second variable to ensure we don't close a closed channel.
   269  	acceptorClosingLock sync.RWMutex
   270  	acceptorClosed      bool
   271  
   272  	// [acceptorWg] is used to wait for the acceptorQueue to clear. This is used
   273  	// during shutdown and in tests.
   274  	acceptorWg sync.WaitGroup
   275  
   276  	// [wg] is used to wait for the async blockchain processes to finish on shutdown.
   277  	wg sync.WaitGroup
   278  
   279  	// quit channel is used to listen for when the blockchain is shut down to close
   280  	// async processes.
   281  	// WaitGroups are used to ensure that async processes have finished during shutdown.
   282  	quit chan struct{}
   283  
   284  	// [acceptorTip] is the last block processed by the acceptor. This is
   285  	// returned as the LastAcceptedBlock() to ensure clients get only fully
   286  	// processed blocks. This may be equal to [lastAccepted].
   287  	acceptorTip     *types.Block
   288  	acceptorTipLock sync.Mutex
   289  
   290  	// [flattenLock] prevents the [acceptor] from flattening snapshots while
   291  	// a block is being verified.
   292  	flattenLock sync.Mutex
   293  
   294  	// [acceptedLogsCache] stores recently accepted logs to improve the performance of eth_getLogs.
   295  	acceptedLogsCache FIFOCache[common.Hash, [][]*types.Log]
   296  }
   297  
   298  // NewBlockChain returns a fully initialised block chain using information
   299  // available in the database. It initialises the default Ethereum Validator and
   300  // Processor.
   301  func NewBlockChain(
   302  	db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine,
   303  	vmConfig vm.Config, lastAcceptedHash common.Hash,
   304  ) (*BlockChain, error) {
   305  	if cacheConfig == nil {
   306  		return nil, errCacheConfigNotSpecified
   307  	}
   308  	bodyCache, _ := lru.New(bodyCacheLimit)
   309  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   310  	blockCache, _ := lru.New(blockCacheLimit)
   311  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   312  	feeConfigCache, _ := lru.New(feeConfigCacheLimit)
   313  	coinbaseConfigCache, _ := lru.New(coinbaseConfigCacheLimit)
   314  	badBlocks, _ := lru.New(badBlockLimit)
   315  
   316  	bc := &BlockChain{
   317  		chainConfig: chainConfig,
   318  		cacheConfig: cacheConfig,
   319  		db:          db,
   320  		stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
   321  			Cache:       cacheConfig.TrieCleanLimit,
   322  			Journal:     cacheConfig.TrieCleanJournal,
   323  			Preimages:   cacheConfig.Preimages,
   324  			StatsPrefix: trieCleanCacheStatsNamespace,
   325  		}),
   326  		bodyCache:           bodyCache,
   327  		receiptsCache:       receiptsCache,
   328  		blockCache:          blockCache,
   329  		txLookupCache:       txLookupCache,
   330  		feeConfigCache:      feeConfigCache,
   331  		coinbaseConfigCache: coinbaseConfigCache,
   332  		engine:              engine,
   333  		vmConfig:            vmConfig,
   334  		badBlocks:           badBlocks,
   335  		senderCacher:        newTxSenderCacher(runtime.NumCPU()),
   336  		acceptorQueue:       make(chan *types.Block, cacheConfig.AcceptorQueueLimit),
   337  		quit:                make(chan struct{}),
   338  		acceptedLogsCache:   NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize),
   339  	}
   340  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   341  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   342  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   343  
   344  	var err error
   345  	bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine)
   346  	if err != nil {
   347  		return nil, err
   348  	}
   349  	bc.genesisBlock = bc.GetBlockByNumber(0)
   350  	if bc.genesisBlock == nil {
   351  		return nil, ErrNoGenesis
   352  	}
   353  
   354  	var nilBlock *types.Block
   355  	bc.currentBlock.Store(nilBlock)
   356  
   357  	// Create the state manager
   358  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig)
   359  
   360  	// loadLastState writes indices, so we should start the tx indexer after that.
   361  	// Start tx indexer/unindexer here.
   362  	if bc.cacheConfig.TxLookupLimit != 0 {
   363  		bc.wg.Add(1)
   364  		go bc.dispatchTxUnindexer()
   365  	}
   366  
   367  	// Re-generate current block state if it is missing
   368  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
   369  		return nil, err
   370  	}
   371  
   372  	// After loading the last state (and reprocessing if necessary), we are
   373  	// guaranteed that [acceptorTip] is equal to [lastAccepted].
   374  	//
   375  	// It is critical to update this vaue before performing any state repairs so
   376  	// that all accepted blocks can be considered.
   377  	bc.acceptorTip = bc.lastAccepted
   378  
   379  	// Make sure the state associated with the block is available
   380  	head := bc.CurrentBlock()
   381  	if !bc.HasState(head.Root()) {
   382  		return nil, fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
   383  	}
   384  
   385  	if err := bc.protectTrieIndex(); err != nil {
   386  		return nil, err
   387  	}
   388  
   389  	// Populate missing tries if required
   390  	if err := bc.populateMissingTries(); err != nil {
   391  		return nil, fmt.Errorf("could not populate missing tries: %v", err)
   392  	}
   393  
   394  	// If snapshot initialization is delayed for fast sync, skip initializing it here.
   395  	// This assumes that no blocks will be processed until ResetState is called to initialize
   396  	// the state of fast sync.
   397  	if !bc.cacheConfig.SnapshotDelayInit {
   398  		// Load any existing snapshot, regenerating it if loading failed (if not
   399  		// already initialized in recovery)
   400  		bc.initSnapshot(head)
   401  	}
   402  
   403  	// Warm up [hc.acceptedNumberCache] and [acceptedLogsCache]
   404  	bc.warmAcceptedCaches()
   405  
   406  	// Start processing accepted blocks effects in the background
   407  	go bc.startAcceptor()
   408  
   409  	// If periodic cache journal is required, spin it up.
   410  	if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 {
   411  		log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal)
   412  
   413  		triedb := bc.stateCache.TrieDB()
   414  		bc.wg.Add(1)
   415  		go func() {
   416  			defer bc.wg.Done()
   417  			triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
   418  		}()
   419  	}
   420  
   421  	return bc, nil
   422  }
   423  
   424  // dispatchTxUnindexer is responsible for the deletion of the
   425  // transaction index.
   426  // Invariant: If TxLookupLimit is 0, it means all tx indices will be preserved.
   427  // Meaning that this function should never be called.
   428  func (bc *BlockChain) dispatchTxUnindexer() {
   429  	defer bc.wg.Done()
   430  	txLookupLimit := bc.cacheConfig.TxLookupLimit
   431  
   432  	// If the user just upgraded to a new version which supports transaction
   433  	// index pruning, write the new tail and remove anything older.
   434  	if rawdb.ReadTxIndexTail(bc.db) == nil {
   435  		rawdb.WriteTxIndexTail(bc.db, 0)
   436  	}
   437  
   438  	// unindexes transactions depending on user configuration
   439  	unindexBlocks := func(tail uint64, head uint64, done chan struct{}) {
   440  		start := time.Now()
   441  		defer func() {
   442  			txUnindexTimer.Inc(time.Since(start).Milliseconds())
   443  			done <- struct{}{}
   444  		}()
   445  
   446  		// Update the transaction index to the new chain state
   447  		if head-txLookupLimit+1 >= tail {
   448  			// Unindex a part of stale indices and forward index tail to HEAD-limit
   449  			rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit)
   450  		}
   451  	}
   452  	// Any reindexing done, start listening to chain events and moving the index window
   453  	var (
   454  		done   chan struct{}              // Non-nil if background unindexing or reindexing routine is active.
   455  		headCh = make(chan ChainEvent, 1) // Buffered to avoid locking up the event feed
   456  	)
   457  	sub := bc.SubscribeChainAcceptedEvent(headCh)
   458  	if sub == nil {
   459  		log.Warn("could not create chain accepted subscription to unindex txs")
   460  		return
   461  	}
   462  	defer sub.Unsubscribe()
   463  
   464  	for {
   465  		select {
   466  		case head := <-headCh:
   467  			headNum := head.Block.NumberU64()
   468  			if headNum < txLookupLimit {
   469  				break
   470  			}
   471  
   472  			if done == nil {
   473  				done = make(chan struct{})
   474  				// Note: tail will not be nil since it is initialized in this function.
   475  				tail := rawdb.ReadTxIndexTail(bc.db)
   476  				go unindexBlocks(*tail, headNum, done)
   477  			}
   478  		case <-done:
   479  			done = nil
   480  		case <-bc.quit:
   481  			if done != nil {
   482  				log.Info("Waiting background transaction indexer to exit")
   483  				<-done
   484  			}
   485  			return
   486  		}
   487  	}
   488  }
   489  
   490  // writeBlockAcceptedIndices writes any indices that must be persisted for accepted block.
   491  // This includes the following:
   492  // - transaction lookup indices
   493  // - updating the acceptor tip index
   494  func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error {
   495  	batch := bc.db.NewBatch()
   496  	rawdb.WriteTxLookupEntriesByBlock(batch, b)
   497  	if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil {
   498  		return fmt.Errorf("%w: failed to write acceptor tip key", err)
   499  	}
   500  	if err := batch.Write(); err != nil {
   501  		return fmt.Errorf("%w: failed to write tx lookup entries batch", err)
   502  	}
   503  	return nil
   504  }
   505  
   506  // flattenSnapshot attempts to flatten a block of [hash] to disk.
   507  func (bc *BlockChain) flattenSnapshot(postAbortWork func() error, hash common.Hash) error {
   508  	// If snapshots are not initialized, perform [postAbortWork] immediately.
   509  	if bc.snaps == nil {
   510  		return postAbortWork()
   511  	}
   512  
   513  	// Abort snapshot generation before pruning anything from trie database
   514  	// (could occur in AcceptTrie)
   515  	bc.snaps.AbortGeneration()
   516  
   517  	// Perform work after snapshot generation is aborted (typically trie updates)
   518  	if err := postAbortWork(); err != nil {
   519  		return err
   520  	}
   521  
   522  	// Ensure we avoid flattening the snapshot while we are processing a block, or
   523  	// block execution will fallback to reading from the trie (which is much
   524  	// slower).
   525  	bc.flattenLock.Lock()
   526  	defer bc.flattenLock.Unlock()
   527  
   528  	// Flatten the entire snap Trie to disk
   529  	//
   530  	// Note: This resumes snapshot generation.
   531  	return bc.snaps.Flatten(hash)
   532  }
   533  
   534  // warmAcceptedCaches fetches previously accepted headers and logs from disk to
   535  // pre-populate [hc.acceptedNumberCache] and [acceptedLogsCache].
   536  func (bc *BlockChain) warmAcceptedCaches() {
   537  	var (
   538  		startTime       = time.Now()
   539  		lastAccepted    = bc.LastAcceptedBlock().NumberU64()
   540  		startIndex      = uint64(1)
   541  		targetCacheSize = uint64(bc.cacheConfig.AcceptedCacheSize)
   542  	)
   543  	if targetCacheSize == 0 {
   544  		log.Info("Not warming accepted cache because disabled")
   545  		return
   546  	}
   547  	if lastAccepted < startIndex {
   548  		// This could occur if we haven't accepted any blocks yet
   549  		log.Info("Not warming accepted cache because there are no accepted blocks")
   550  		return
   551  	}
   552  	cacheDiff := targetCacheSize - 1 // last accepted lookback is inclusive, so we reduce size by 1
   553  	if cacheDiff < lastAccepted {
   554  		startIndex = lastAccepted - cacheDiff
   555  	}
   556  	for i := startIndex; i <= lastAccepted; i++ {
   557  		header := bc.GetHeaderByNumber(i)
   558  		if header == nil {
   559  			// This could happen if a node state-synced
   560  			log.Info("Exiting accepted cache warming early because header is nil", "height", i, "t", time.Since(startTime))
   561  			break
   562  		}
   563  		bc.hc.acceptedNumberCache.Put(header.Number.Uint64(), header)
   564  		bc.acceptedLogsCache.Put(header.Hash(), rawdb.ReadLogs(bc.db, header.Hash(), header.Number.Uint64()))
   565  	}
   566  	log.Info("Warmed accepted caches", "start", startIndex, "end", lastAccepted, "t", time.Since(startTime))
   567  }
   568  
   569  // startAcceptor starts processing items on the [acceptorQueue]. If a [nil]
   570  // object is placed on the [acceptorQueue], the [startAcceptor] will exit.
   571  func (bc *BlockChain) startAcceptor() {
   572  	log.Info("Starting Acceptor", "queue length", bc.cacheConfig.AcceptorQueueLimit)
   573  
   574  	for next := range bc.acceptorQueue {
   575  		start := time.Now()
   576  		acceptorQueueGauge.Dec(1)
   577  
   578  		if err := bc.flattenSnapshot(func() error {
   579  			return bc.stateManager.AcceptTrie(next)
   580  		}, next.Hash()); err != nil {
   581  			log.Crit("unable to flatten snapshot from acceptor", "blockHash", next.Hash(), "err", err)
   582  		}
   583  
   584  		// Update last processed and transaction lookup index
   585  		if err := bc.writeBlockAcceptedIndices(next); err != nil {
   586  			log.Crit("failed to write accepted block effects", "err", err)
   587  		}
   588  
   589  		// Ensure [hc.acceptedNumberCache] and [acceptedLogsCache] have latest content
   590  		bc.hc.acceptedNumberCache.Put(next.NumberU64(), next.Header())
   591  		logs := rawdb.ReadLogs(bc.db, next.Hash(), next.NumberU64())
   592  		bc.acceptedLogsCache.Put(next.Hash(), logs)
   593  
   594  		// Update accepted feeds
   595  		flattenedLogs := types.FlattenLogs(logs)
   596  		bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs})
   597  		if len(flattenedLogs) > 0 {
   598  			bc.logsAcceptedFeed.Send(flattenedLogs)
   599  		}
   600  		if len(next.Transactions()) != 0 {
   601  			bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()})
   602  		}
   603  
   604  		bc.acceptorTipLock.Lock()
   605  		bc.acceptorTip = next
   606  		bc.acceptorTipLock.Unlock()
   607  		bc.acceptorWg.Done()
   608  
   609  		acceptorWorkTimer.Inc(time.Since(start).Milliseconds())
   610  		acceptorWorkCount.Inc(1)
   611  		// Note: in contrast to most accepted metrics, we increment the accepted log metrics in the acceptor queue because
   612  		// the logs are already processed in the acceptor queue.
   613  		acceptedLogsCounter.Inc(int64(len(logs)))
   614  	}
   615  }
   616  
   617  // addAcceptorQueue adds a new *types.Block to the [acceptorQueue]. This will
   618  // block if there are [AcceptorQueueLimit] items in [acceptorQueue].
   619  func (bc *BlockChain) addAcceptorQueue(b *types.Block) {
   620  	// We only acquire a read lock here because it is ok to add items to the
   621  	// [acceptorQueue] concurrently.
   622  	bc.acceptorClosingLock.RLock()
   623  	defer bc.acceptorClosingLock.RUnlock()
   624  
   625  	if bc.acceptorClosed {
   626  		return
   627  	}
   628  
   629  	acceptorQueueGauge.Inc(1)
   630  	bc.acceptorWg.Add(1)
   631  	bc.acceptorQueue <- b
   632  }
   633  
   634  // DrainAcceptorQueue blocks until all items in [acceptorQueue] have been
   635  // processed.
   636  func (bc *BlockChain) DrainAcceptorQueue() {
   637  	bc.acceptorClosingLock.RLock()
   638  	defer bc.acceptorClosingLock.RUnlock()
   639  
   640  	if bc.acceptorClosed {
   641  		return
   642  	}
   643  
   644  	bc.acceptorWg.Wait()
   645  }
   646  
   647  // stopAcceptor sends a signal to the Acceptor to stop processing accepted
   648  // blocks. The Acceptor will exit once all items in [acceptorQueue] have been
   649  // processed.
   650  func (bc *BlockChain) stopAcceptor() {
   651  	bc.acceptorClosingLock.Lock()
   652  	defer bc.acceptorClosingLock.Unlock()
   653  
   654  	// If [acceptorClosed] is already false, we should just return here instead
   655  	// of attempting to close [acceptorQueue] more than once (will cause
   656  	// a panic).
   657  	//
   658  	// This typically happens when a test calls [stopAcceptor] directly (prior to
   659  	// shutdown) and then [stopAcceptor] is called again in shutdown.
   660  	if bc.acceptorClosed {
   661  		return
   662  	}
   663  
   664  	// Although nothing should be added to [acceptorQueue] after
   665  	// [acceptorClosed] is updated, we close the channel so the Acceptor
   666  	// goroutine exits.
   667  	bc.acceptorWg.Wait()
   668  	bc.acceptorClosed = true
   669  	close(bc.acceptorQueue)
   670  }
   671  
   672  func (bc *BlockChain) InitializeSnapshots() {
   673  	bc.chainmu.Lock()
   674  	defer bc.chainmu.Unlock()
   675  
   676  	head := bc.CurrentBlock()
   677  	bc.initSnapshot(head)
   678  }
   679  
   680  // SenderCacher returns the *TxSenderCacher used within the core package.
   681  func (bc *BlockChain) SenderCacher() *TxSenderCacher {
   682  	return bc.senderCacher
   683  }
   684  
   685  // loadLastState loads the last known chain state from the database. This method
   686  // assumes that the chain manager mutex is held.
   687  func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error {
   688  	// Initialize genesis state
   689  	if lastAcceptedHash == (common.Hash{}) {
   690  		return bc.loadGenesisState()
   691  	}
   692  
   693  	// Restore the last known head block
   694  	head := rawdb.ReadHeadBlockHash(bc.db)
   695  	if head == (common.Hash{}) {
   696  		return errors.New("could not read head block hash")
   697  	}
   698  	// Make sure the entire head block is available
   699  	currentBlock := bc.GetBlockByHash(head)
   700  	if currentBlock == nil {
   701  		return fmt.Errorf("could not load head block %s", head.Hex())
   702  	}
   703  	// Everything seems to be fine, set as the head block
   704  	bc.currentBlock.Store(currentBlock)
   705  
   706  	// Restore the last known head header
   707  	currentHeader := currentBlock.Header()
   708  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   709  		if header := bc.GetHeaderByHash(head); header != nil {
   710  			currentHeader = header
   711  		}
   712  	}
   713  	bc.hc.SetCurrentHeader(currentHeader)
   714  
   715  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   716  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   717  
   718  	// Otherwise, set the last accepted block and perform a re-org.
   719  	bc.lastAccepted = bc.GetBlockByHash(lastAcceptedHash)
   720  	if bc.lastAccepted == nil {
   721  		return fmt.Errorf("could not load last accepted block")
   722  	}
   723  
   724  	// This ensures that the head block is updated to the last accepted block on startup
   725  	if err := bc.setPreference(bc.lastAccepted); err != nil {
   726  		return fmt.Errorf("failed to set preference to last accepted block while loading last state: %w", err)
   727  	}
   728  
   729  	// reprocessState is necessary to ensure that the last accepted state is
   730  	// available. The state may not be available if it was not committed due
   731  	// to an unclean shutdown.
   732  	return bc.reprocessState(bc.lastAccepted, 2*bc.cacheConfig.CommitInterval)
   733  }
   734  
   735  func (bc *BlockChain) loadGenesisState() error {
   736  	// Prepare the genesis block and reinitialise the chain
   737  	batch := bc.db.NewBatch()
   738  	rawdb.WriteBlock(batch, bc.genesisBlock)
   739  	if err := batch.Write(); err != nil {
   740  		log.Crit("Failed to write genesis block", "err", err)
   741  	}
   742  	bc.writeHeadBlock(bc.genesisBlock)
   743  
   744  	// Last update all in-memory chain markers
   745  	bc.lastAccepted = bc.genesisBlock
   746  	bc.currentBlock.Store(bc.genesisBlock)
   747  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   748  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   749  	return nil
   750  }
   751  
   752  // Export writes the active chain to the given writer.
   753  func (bc *BlockChain) Export(w io.Writer) error {
   754  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   755  }
   756  
   757  // ExportN writes a subset of the active chain to the given writer.
   758  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   759  	return bc.ExportCallback(func(block *types.Block) error {
   760  		return block.EncodeRLP(w)
   761  	}, first, last)
   762  }
   763  
   764  // ExportCallback invokes [callback] for every block from [first] to [last] in order.
   765  func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, first uint64, last uint64) error {
   766  	if first > last {
   767  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   768  	}
   769  	log.Info("Exporting batch of blocks", "count", last-first+1)
   770  
   771  	var (
   772  		parentHash common.Hash
   773  		start      = time.Now()
   774  		reported   = time.Now()
   775  	)
   776  	for nr := first; nr <= last; nr++ {
   777  		block := bc.GetBlockByNumber(nr)
   778  		if block == nil {
   779  			return fmt.Errorf("export failed on #%d: not found", nr)
   780  		}
   781  		if nr > first && block.ParentHash() != parentHash {
   782  			return fmt.Errorf("export failed: chain reorg during export")
   783  		}
   784  		parentHash = block.Hash()
   785  		if err := callback(block); err != nil {
   786  			return err
   787  		}
   788  		if time.Since(reported) >= statsReportLimit {
   789  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   790  			reported = time.Now()
   791  		}
   792  	}
   793  	return nil
   794  }
   795  
   796  // writeHeadBlock injects a new head block into the current block chain. This method
   797  // assumes that the block is indeed a true head. It will also reset the head
   798  // header to this very same block if they are older or if they are on a different side chain.
   799  //
   800  // Note, this function assumes that the `mu` mutex is held!
   801  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   802  	// If the block is on a side chain or an unknown one, force other heads onto it too
   803  	// Add the block to the canonical chain number scheme and mark as the head
   804  	batch := bc.db.NewBatch()
   805  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   806  
   807  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   808  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   809  
   810  	// Flush the whole batch into the disk, exit the node if failed
   811  	if err := batch.Write(); err != nil {
   812  		log.Crit("Failed to update chain indexes and markers", "err", err)
   813  	}
   814  	// Update all in-memory chain markers in the last step
   815  	bc.hc.SetCurrentHeader(block.Header())
   816  	bc.currentBlock.Store(block)
   817  }
   818  
   819  // ValidateCanonicalChain confirms a canonical chain is well-formed.
   820  func (bc *BlockChain) ValidateCanonicalChain() error {
   821  	// Ensure all accepted blocks are fully processed
   822  	bc.DrainAcceptorQueue()
   823  
   824  	current := bc.CurrentBlock()
   825  	i := 0
   826  	log.Info("Beginning to validate canonical chain", "startBlock", current.NumberU64())
   827  
   828  	for current.Hash() != bc.genesisBlock.Hash() {
   829  		blkByHash := bc.GetBlockByHash(current.Hash())
   830  		if blkByHash == nil {
   831  			return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number())
   832  		}
   833  		if blkByHash.Hash() != current.Hash() {
   834  			return fmt.Errorf("blockByHash returned a block with an unexpected hash: %s, expected: %s", blkByHash.Hash().String(), current.Hash().String())
   835  		}
   836  		blkByNumber := bc.GetBlockByNumber(current.Number().Uint64())
   837  		if blkByNumber == nil {
   838  			return fmt.Errorf("couldn't find block by number at height %d", current.Number())
   839  		}
   840  		if blkByNumber.Hash() != current.Hash() {
   841  			return fmt.Errorf("blockByNumber returned a block with unexpected hash: %s, expected: %s", blkByNumber.Hash().String(), current.Hash().String())
   842  		}
   843  
   844  		hdrByHash := bc.GetHeaderByHash(current.Hash())
   845  		if hdrByHash == nil {
   846  			return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number())
   847  		}
   848  		if hdrByHash.Hash() != current.Hash() {
   849  			return fmt.Errorf("hdrByHash returned a block header with an unexpected hash: %s, expected: %s", hdrByHash.Hash().String(), current.Hash().String())
   850  		}
   851  		hdrByNumber := bc.GetHeaderByNumber(current.Number().Uint64())
   852  		if hdrByNumber == nil {
   853  			return fmt.Errorf("couldn't find block header by number at height %d", current.Number())
   854  		}
   855  		if hdrByNumber.Hash() != current.Hash() {
   856  			return fmt.Errorf("hdrByNumber returned a block header with unexpected hash: %s, expected: %s", hdrByNumber.Hash().String(), current.Hash().String())
   857  		}
   858  
   859  		txs := current.Body().Transactions
   860  
   861  		// Transactions are only indexed beneath the last accepted block, so we only check
   862  		// that the transactions have been indexed, if we are checking below the last accepted
   863  		// block.
   864  		shouldIndexTxs := bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.NumberU64()+bc.cacheConfig.TxLookupLimit
   865  		if current.NumberU64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs {
   866  			// Ensure that all of the transactions have been stored correctly in the canonical
   867  			// chain
   868  			for txIndex, tx := range txs {
   869  				txLookup := bc.GetTransactionLookup(tx.Hash())
   870  				if txLookup == nil {
   871  					return fmt.Errorf("failed to find transaction %s", tx.Hash().String())
   872  				}
   873  				if txLookup.BlockHash != current.Hash() {
   874  					return fmt.Errorf("tx lookup returned with incorrect block hash: %s, expected: %s", txLookup.BlockHash.String(), current.Hash().String())
   875  				}
   876  				if txLookup.BlockIndex != current.Number().Uint64() {
   877  					return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number().Uint64())
   878  				}
   879  				if txLookup.Index != uint64(txIndex) {
   880  					return fmt.Errorf("tx lookup returned with incorrect transaction index: %d, expected: %d", txLookup.Index, txIndex)
   881  				}
   882  			}
   883  		}
   884  
   885  		blkReceipts := bc.GetReceiptsByHash(current.Hash())
   886  		if blkReceipts.Len() != len(txs) {
   887  			return fmt.Errorf("found %d transaction receipts, expected %d", blkReceipts.Len(), len(txs))
   888  		}
   889  		for index, txReceipt := range blkReceipts {
   890  			if txReceipt.TxHash != txs[index].Hash() {
   891  				return fmt.Errorf("transaction receipt mismatch, expected %s, but found: %s", txs[index].Hash().String(), txReceipt.TxHash.String())
   892  			}
   893  			if txReceipt.BlockHash != current.Hash() {
   894  				return fmt.Errorf("transaction receipt had block hash %s, but expected %s", txReceipt.BlockHash.String(), current.Hash().String())
   895  			}
   896  			if txReceipt.BlockNumber.Uint64() != current.NumberU64() {
   897  				return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.NumberU64())
   898  			}
   899  		}
   900  
   901  		i += 1
   902  		if i%1000 == 0 {
   903  			log.Info("Validate Canonical Chain Update", "totalBlocks", i)
   904  		}
   905  
   906  		parent := bc.GetBlockByHash(current.ParentHash())
   907  		if parent.Hash() != current.ParentHash() {
   908  			return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash().String())
   909  		}
   910  		current = parent
   911  	}
   912  
   913  	return nil
   914  }
   915  
   916  // Stop stops the blockchain service. If any imports are currently in progress
   917  // it will abort them using the procInterrupt.
   918  func (bc *BlockChain) Stop() {
   919  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   920  		return
   921  	}
   922  
   923  	log.Info("Closing quit channel")
   924  	close(bc.quit)
   925  	// Wait for accepted feed to process all remaining items
   926  	log.Info("Stopping Acceptor")
   927  	start := time.Now()
   928  	bc.stopAcceptor()
   929  	log.Info("Acceptor queue drained", "t", time.Since(start))
   930  
   931  	log.Info("Shutting down state manager")
   932  	start = time.Now()
   933  	if err := bc.stateManager.Shutdown(); err != nil {
   934  		log.Error("Failed to Shutdown state manager", "err", err)
   935  	}
   936  	log.Info("State manager shut down", "t", time.Since(start))
   937  	// Flush the collected preimages to disk
   938  	if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil {
   939  		log.Error("Failed to commit trie preimages", "err", err)
   940  	}
   941  
   942  	// Stop senderCacher's goroutines
   943  	log.Info("Shutting down sender cacher")
   944  	bc.senderCacher.Shutdown()
   945  
   946  	// Unsubscribe all subscriptions registered from blockchain.
   947  	log.Info("Closing scope")
   948  	bc.scope.Close()
   949  
   950  	// Waiting for background processes to complete
   951  	log.Info("Waiting for background processes to complete")
   952  	bc.wg.Wait()
   953  
   954  	log.Info("Blockchain stopped")
   955  }
   956  
   957  // SetPreference attempts to update the head block to be the provided block and
   958  // emits a ChainHeadEvent if successful. This function will handle all reorg
   959  // side effects, if necessary.
   960  //
   961  // Note: This function should ONLY be called on blocks that have already been
   962  // inserted into the chain.
   963  //
   964  // Assumes [bc.chainmu] is not held by the caller.
   965  func (bc *BlockChain) SetPreference(block *types.Block) error {
   966  	bc.chainmu.Lock()
   967  	defer bc.chainmu.Unlock()
   968  
   969  	return bc.setPreference(block)
   970  }
   971  
   972  // setPreference attempts to update the head block to be the provided block and
   973  // emits a ChainHeadEvent if successful. This function will handle all reorg
   974  // side effects, if necessary.
   975  //
   976  // Assumes [bc.chainmu] is held by the caller.
   977  func (bc *BlockChain) setPreference(block *types.Block) error {
   978  	current := bc.CurrentBlock()
   979  
   980  	// Return early if the current block is already the block
   981  	// we are trying to write.
   982  	if current.Hash() == block.Hash() {
   983  		return nil
   984  	}
   985  
   986  	log.Debug("Setting preference", "number", block.Number(), "hash", block.Hash())
   987  
   988  	// writeKnownBlock updates the head block and will handle any reorg side
   989  	// effects automatically.
   990  	if err := bc.writeKnownBlock(block); err != nil {
   991  		return fmt.Errorf("unable to invoke writeKnownBlock: %w", err)
   992  	}
   993  
   994  	// Send a ChainHeadEvent if we end up altering
   995  	// the head block. Many internal aysnc processes rely on
   996  	// receiving these events (i.e. the TxPool).
   997  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   998  	return nil
   999  }
  1000  
  1001  // LastAcceptedBlock returns the last block to be marked as accepted. It may or
  1002  // may not yet be processed.
  1003  func (bc *BlockChain) LastConsensusAcceptedBlock() *types.Block {
  1004  	bc.chainmu.Lock()
  1005  	defer bc.chainmu.Unlock()
  1006  
  1007  	return bc.lastAccepted
  1008  }
  1009  
  1010  // LastAcceptedBlock returns the last block to be marked as accepted and is
  1011  // processed.
  1012  //
  1013  // Note: During initialization, [acceptorTip] is equal to [lastAccepted].
  1014  func (bc *BlockChain) LastAcceptedBlock() *types.Block {
  1015  	bc.acceptorTipLock.Lock()
  1016  	defer bc.acceptorTipLock.Unlock()
  1017  
  1018  	return bc.acceptorTip
  1019  }
  1020  
  1021  // Accept sets a minimum height at which no reorg can pass. Additionally,
  1022  // this function may trigger a reorg if the block being accepted is not in the
  1023  // canonical chain.
  1024  //
  1025  // Assumes [bc.chainmu] is not held by the caller.
  1026  func (bc *BlockChain) Accept(block *types.Block) error {
  1027  	bc.chainmu.Lock()
  1028  	defer bc.chainmu.Unlock()
  1029  
  1030  	// The parent of [block] must be the last accepted block.
  1031  	if bc.lastAccepted.Hash() != block.ParentHash() {
  1032  		return fmt.Errorf(
  1033  			"expected accepted block to have parent %s:%d but got %s:%d",
  1034  			bc.lastAccepted.Hash().Hex(),
  1035  			bc.lastAccepted.NumberU64(),
  1036  			block.ParentHash().Hex(),
  1037  			block.NumberU64()-1,
  1038  		)
  1039  	}
  1040  
  1041  	// If the canonical hash at the block height does not match the block we are
  1042  	// accepting, we need to trigger a reorg.
  1043  	canonical := bc.GetCanonicalHash(block.NumberU64())
  1044  	if canonical != block.Hash() {
  1045  		log.Debug("Accepting block in non-canonical chain", "number", block.Number(), "hash", block.Hash())
  1046  		if err := bc.setPreference(block); err != nil {
  1047  			return fmt.Errorf("could not set new preferred block %d:%s as preferred: %w", block.Number(), block.Hash(), err)
  1048  		}
  1049  	}
  1050  
  1051  	// Enqueue block in the acceptor
  1052  	bc.lastAccepted = block
  1053  	bc.addAcceptorQueue(block)
  1054  	acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
  1055  	acceptedTxsCounter.Inc(int64(len(block.Transactions())))
  1056  	return nil
  1057  }
  1058  
  1059  func (bc *BlockChain) Reject(block *types.Block) error {
  1060  	bc.chainmu.Lock()
  1061  	defer bc.chainmu.Unlock()
  1062  
  1063  	// Reject Trie
  1064  	if err := bc.stateManager.RejectTrie(block); err != nil {
  1065  		return fmt.Errorf("unable to reject trie: %w", err)
  1066  	}
  1067  
  1068  	if bc.snaps != nil {
  1069  		if err := bc.snaps.Discard(block.Hash()); err != nil {
  1070  			log.Error("unable to discard snap from rejected block", "block", block.Hash(), "number", block.NumberU64(), "root", block.Root())
  1071  		}
  1072  	}
  1073  
  1074  	// Remove the block since its data is no longer needed
  1075  	batch := bc.db.NewBatch()
  1076  	rawdb.DeleteBlock(batch, block.Hash(), block.NumberU64())
  1077  	if err := batch.Write(); err != nil {
  1078  		return fmt.Errorf("failed to write delete block batch: %w", err)
  1079  	}
  1080  
  1081  	return nil
  1082  }
  1083  
  1084  // writeKnownBlock updates the head block flag with a known block
  1085  // and introduces chain reorg if necessary.
  1086  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1087  	current := bc.CurrentBlock()
  1088  	if block.ParentHash() != current.Hash() {
  1089  		if err := bc.reorg(current, block); err != nil {
  1090  			return err
  1091  		}
  1092  	}
  1093  	bc.writeHeadBlock(block)
  1094  	return nil
  1095  }
  1096  
  1097  // writeCanonicalBlockWithLogs writes the new head [block] and emits events
  1098  // for the new head block.
  1099  func (bc *BlockChain) writeCanonicalBlockWithLogs(block *types.Block, logs []*types.Log) {
  1100  	bc.writeHeadBlock(block)
  1101  	bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1102  	if len(logs) > 0 {
  1103  		bc.logsFeed.Send(logs)
  1104  	}
  1105  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1106  }
  1107  
  1108  // newTip returns a boolean indicating if the block should be appended to
  1109  // the canonical chain.
  1110  func (bc *BlockChain) newTip(block *types.Block) bool {
  1111  	return block.ParentHash() == bc.CurrentBlock().Hash()
  1112  }
  1113  
  1114  // writeBlockAndSetHead persists the block and associated state to the database
  1115  // and optimistically updates the canonical chain if [block] extends the current
  1116  // canonical chain.
  1117  // writeBlockAndSetHead expects to be the last verification step during InsertBlock
  1118  // since it creates a reference that will only be cleaned up by Accept/Reject.
  1119  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
  1120  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
  1121  		return err
  1122  	}
  1123  
  1124  	// If [block] represents a new tip of the canonical chain, we optimistically add it before
  1125  	// setPreference is called. Otherwise, we consider it a side chain block.
  1126  	if bc.newTip(block) {
  1127  		bc.writeCanonicalBlockWithLogs(block, logs)
  1128  	} else {
  1129  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1130  	}
  1131  
  1132  	return nil
  1133  }
  1134  
  1135  // writeBlockWithState writes the block and all associated state to the database,
  1136  // but it expects the chain mutex to be held.
  1137  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
  1138  	// Irrelevant of the canonical status, write the block itself to the database.
  1139  	//
  1140  	// Note all the components of block(hash->number map, header, body, receipts)
  1141  	// should be written atomically. BlockBatch is used for containing all components.
  1142  	blockBatch := bc.db.NewBatch()
  1143  	rawdb.WriteBlock(blockBatch, block)
  1144  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1145  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1146  	if err := blockBatch.Write(); err != nil {
  1147  		log.Crit("Failed to write block into disk", "err", err)
  1148  	}
  1149  
  1150  	// Commit all cached state changes into underlying memory database.
  1151  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
  1152  	// diff layer for the block.
  1153  	var err error
  1154  	if bc.snaps == nil {
  1155  		_, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true)
  1156  	} else {
  1157  		_, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
  1158  	}
  1159  	if err != nil {
  1160  		return err
  1161  	}
  1162  
  1163  	// Note: if InsertTrie must be the last step in verification that can return an error.
  1164  	// This allows [stateManager] to assume that if it inserts a trie without returning an
  1165  	// error then the block has passed verification and either AcceptTrie/RejectTrie will
  1166  	// eventually be called on [root] unless a fatal error occurs. It does not assume that
  1167  	// the node will not shutdown before either AcceptTrie/RejectTrie is called.
  1168  	if err := bc.stateManager.InsertTrie(block); err != nil {
  1169  		if bc.snaps != nil {
  1170  			discardErr := bc.snaps.Discard(block.Hash())
  1171  			if discardErr != nil {
  1172  				log.Debug("failed to discard snapshot after being unable to insert block trie", "block", block.Hash(), "root", block.Root())
  1173  			}
  1174  		}
  1175  		return err
  1176  	}
  1177  	return nil
  1178  }
  1179  
  1180  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1181  // chain or, otherwise, create a fork. If an error is returned it will return
  1182  // the index number of the failing block as well an error describing what went
  1183  // wrong.
  1184  //
  1185  // After insertion is done, all accumulated events will be fired.
  1186  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1187  	// Sanity check that we have something meaningful to import
  1188  	if len(chain) == 0 {
  1189  		return 0, nil
  1190  	}
  1191  
  1192  	bc.blockProcFeed.Send(true)
  1193  	defer bc.blockProcFeed.Send(false)
  1194  
  1195  	// Do a sanity check that the provided chain is actually ordered and linked.
  1196  	for i := 1; i < len(chain); i++ {
  1197  		block, prev := chain[i], chain[i-1]
  1198  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1199  			log.Error("Non contiguous block insert",
  1200  				"number", block.Number(),
  1201  				"hash", block.Hash(),
  1202  				"parent", block.ParentHash(),
  1203  				"prevnumber", prev.Number(),
  1204  				"prevhash", prev.Hash(),
  1205  			)
  1206  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1207  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1208  		}
  1209  	}
  1210  	// Pre-checks passed, start the full block imports
  1211  	bc.chainmu.Lock()
  1212  	defer bc.chainmu.Unlock()
  1213  	for n, block := range chain {
  1214  		if err := bc.insertBlock(block, true); err != nil {
  1215  			return n, err
  1216  		}
  1217  	}
  1218  
  1219  	return len(chain), nil
  1220  }
  1221  
  1222  func (bc *BlockChain) InsertBlock(block *types.Block) error {
  1223  	return bc.InsertBlockManual(block, true)
  1224  }
  1225  
  1226  func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error {
  1227  	bc.blockProcFeed.Send(true)
  1228  	defer bc.blockProcFeed.Send(false)
  1229  
  1230  	bc.chainmu.Lock()
  1231  	err := bc.insertBlock(block, writes)
  1232  	bc.chainmu.Unlock()
  1233  
  1234  	return err
  1235  }
  1236  
  1237  // gatherBlockLogs fetches logs from a previously inserted block.
  1238  func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log {
  1239  	receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig)
  1240  	var logs []*types.Log
  1241  	for _, receipt := range receipts {
  1242  		for _, log := range receipt.Logs {
  1243  			l := *log
  1244  			if removed {
  1245  				l.Removed = true
  1246  			}
  1247  			logs = append(logs, &l)
  1248  		}
  1249  	}
  1250  
  1251  	return logs
  1252  }
  1253  
  1254  func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
  1255  	start := time.Now()
  1256  	bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions())
  1257  
  1258  	substart := time.Now()
  1259  	err := bc.engine.VerifyHeader(bc, block.Header())
  1260  	if err == nil {
  1261  		err = bc.validator.ValidateBody(block)
  1262  	}
  1263  
  1264  	switch {
  1265  	case errors.Is(err, ErrKnownBlock):
  1266  		// even if the block is already known, we still need to generate the
  1267  		// snapshot layer and add a reference to the triedb, so we re-execute
  1268  		// the block. Note that insertBlock should only be called on a block
  1269  		// once if it returns nil
  1270  		if bc.newTip(block) {
  1271  			log.Debug("Setting head to be known block", "number", block.Number(), "hash", block.Hash())
  1272  		} else {
  1273  			log.Debug("Reprocessing already known block", "number", block.Number(), "hash", block.Hash())
  1274  		}
  1275  
  1276  	// If an ancestor has been pruned, then this block cannot be acceptable.
  1277  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1278  		return errors.New("side chain insertion is not supported")
  1279  
  1280  	// Future blocks are not supported, but should not be reported, so we return an error
  1281  	// early here
  1282  	case errors.Is(err, consensus.ErrFutureBlock):
  1283  		return errFutureBlockUnsupported
  1284  
  1285  	// Some other error occurred, abort
  1286  	case err != nil:
  1287  		bc.reportBlock(block, nil, err)
  1288  		return err
  1289  	}
  1290  	blockContentValidationTimer.Inc(time.Since(substart).Milliseconds())
  1291  
  1292  	// No validation errors for the block
  1293  	var activeState *state.StateDB
  1294  	defer func() {
  1295  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1296  		// block or other error is hit however, an early return may not properly
  1297  		// terminate the background threads. This defer ensures that we clean up
  1298  		// and dangling prefetcher, without defering each and holding on live refs.
  1299  		if activeState != nil {
  1300  			activeState.StopPrefetcher()
  1301  		}
  1302  	}()
  1303  
  1304  	// Retrieve the parent block to determine which root to build state on
  1305  	substart = time.Now()
  1306  	parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1307  
  1308  	// Instantiate the statedb to use for processing transactions
  1309  	//
  1310  	// NOTE: Flattening a snapshot during block execution requires fetching state
  1311  	// entries directly from the trie (much slower).
  1312  	bc.flattenLock.Lock()
  1313  	defer bc.flattenLock.Unlock()
  1314  	statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1315  	if err != nil {
  1316  		return err
  1317  	}
  1318  	blockStateInitTimer.Inc(time.Since(substart).Milliseconds())
  1319  
  1320  	// Enable prefetching to pull in trie node paths while processing transactions
  1321  	statedb.StartPrefetcher("chain")
  1322  	activeState = statedb
  1323  
  1324  	// If we have a followup block, run that against the current state to pre-cache
  1325  	// transactions and probabilistically some of the account/storage trie nodes.
  1326  	// Process block using the parent state as reference point
  1327  	substart = time.Now()
  1328  	receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig)
  1329  	if serr := statedb.Error(); serr != nil {
  1330  		log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash())
  1331  	}
  1332  	if err != nil {
  1333  		bc.reportBlock(block, receipts, err)
  1334  		return err
  1335  	}
  1336  
  1337  	// Update the metrics touched during block processing
  1338  	accountReadTimer.Inc(statedb.AccountReads.Milliseconds())                 // Account reads are complete, we can mark them
  1339  	storageReadTimer.Inc(statedb.StorageReads.Milliseconds())                 // Storage reads are complete, we can mark them
  1340  	snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete, we can mark them
  1341  	snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete, we can mark them
  1342  	trieproc := statedb.AccountHashes + statedb.StorageHashes                 // Save to not double count in validation
  1343  	trieproc += statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
  1344  	trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
  1345  	blockExecutionTimer.Inc((time.Since(substart) - trieproc).Milliseconds())
  1346  
  1347  	// Validate the state using the default validator
  1348  	substart = time.Now()
  1349  	if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1350  		bc.reportBlock(block, receipts, err)
  1351  		return err
  1352  	}
  1353  
  1354  	// Update the metrics touched during block validation
  1355  	accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them
  1356  	storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them
  1357  	accountHashTimer.Inc(statedb.AccountHashes.Milliseconds())    // Account hashes are complete, we can mark them
  1358  	storageHashTimer.Inc(statedb.StorageHashes.Milliseconds())    // Storage hashes are complete, we can mark them
  1359  	additionalTrieProc := statedb.AccountHashes + statedb.StorageHashes + statedb.AccountUpdates + statedb.StorageUpdates - trieproc
  1360  	blockStateValidationTimer.Inc((time.Since(substart) - additionalTrieProc).Milliseconds())
  1361  	blockTrieOpsTimer.Inc((trieproc + additionalTrieProc).Milliseconds())
  1362  
  1363  	// If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block
  1364  	// or the state trie to disk.
  1365  	// Note: in pruning mode, this prevents us from generating a reference to the state root.
  1366  	if !writes {
  1367  		return nil
  1368  	}
  1369  
  1370  	// Write the block to the chain and get the status.
  1371  	// writeBlockWithState (called within writeBlockAndSethead) creates a reference that
  1372  	// will be cleaned up in Accept/Reject so we need to ensure an error cannot occur
  1373  	// later in verification, since that would cause the referenced root to never be dereferenced.
  1374  	substart = time.Now()
  1375  	if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil {
  1376  		return err
  1377  	}
  1378  	// Update the metrics touched during block commit
  1379  	accountCommitTimer.Inc(statedb.AccountCommits.Milliseconds())   // Account commits are complete, we can mark them
  1380  	storageCommitTimer.Inc(statedb.StorageCommits.Milliseconds())   // Storage commits are complete, we can mark them
  1381  	snapshotCommitTimer.Inc(statedb.SnapshotCommits.Milliseconds()) // Snapshot commits are complete, we can mark them
  1382  	triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds())     // Triedb commits are complete, we can mark them
  1383  	blockWriteTimer.Inc((time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds())
  1384  	blockInsertTimer.Inc(time.Since(start).Milliseconds())
  1385  
  1386  	log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1387  		"parentHash", block.ParentHash(),
  1388  		"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1389  		"elapsed", common.PrettyDuration(time.Since(start)),
  1390  		"root", block.Root(), "baseFeePerGas", block.BaseFee(), "blockGasCost", block.BlockGasCost(),
  1391  	)
  1392  
  1393  	processedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
  1394  	processedTxsCounter.Inc(int64(block.Transactions().Len()))
  1395  	processedLogsCounter.Inc(int64(len(logs)))
  1396  	blockInsertCount.Inc(1)
  1397  	return nil
  1398  }
  1399  
  1400  // collectLogs collects the logs that were generated or removed during
  1401  // the processing of the block that corresponds with the given hash.
  1402  // These logs are later announced as deleted or reborn.
  1403  func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log {
  1404  	number := bc.hc.GetBlockNumber(hash)
  1405  	if number == nil {
  1406  		return nil
  1407  	}
  1408  	return bc.gatherBlockLogs(hash, *number, removed)
  1409  }
  1410  
  1411  // mergeLogs returns a merged log slice with specified sort order.
  1412  func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log {
  1413  	var ret []*types.Log
  1414  	if reverse {
  1415  		for i := len(logs) - 1; i >= 0; i-- {
  1416  			ret = append(ret, logs[i]...)
  1417  		}
  1418  	} else {
  1419  		for i := 0; i < len(logs); i++ {
  1420  			ret = append(ret, logs[i]...)
  1421  		}
  1422  	}
  1423  	return ret
  1424  }
  1425  
  1426  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1427  // blocks and inserts them to be part of the new canonical chain and accumulates
  1428  // potential missing transactions and post an event about them.
  1429  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1430  	var (
  1431  		newHead = newBlock
  1432  		oldHead = oldBlock
  1433  
  1434  		newChain    types.Blocks
  1435  		oldChain    types.Blocks
  1436  		commonBlock *types.Block
  1437  
  1438  		deletedLogs [][]*types.Log
  1439  		rebirthLogs [][]*types.Log
  1440  	)
  1441  	// Reduce the longer chain to the same number as the shorter one
  1442  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1443  		// Old chain is longer, gather all transactions and logs as deleted ones
  1444  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1445  			oldChain = append(oldChain, oldBlock)
  1446  			// Collect deleted logs for notification
  1447  			logs := bc.collectLogs(oldBlock.Hash(), true)
  1448  			if len(logs) > 0 {
  1449  				deletedLogs = append(deletedLogs, logs)
  1450  			}
  1451  		}
  1452  	} else {
  1453  		// New chain is longer, stash all blocks away for subsequent insertion
  1454  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1455  			newChain = append(newChain, newBlock)
  1456  		}
  1457  	}
  1458  	if oldBlock == nil {
  1459  		return fmt.Errorf("invalid old chain")
  1460  	}
  1461  	if newBlock == nil {
  1462  		return fmt.Errorf("invalid new chain")
  1463  	}
  1464  	// Both sides of the reorg are at the same number, reduce both until the common
  1465  	// ancestor is found
  1466  	for {
  1467  		// If the common ancestor was found, bail out
  1468  		if oldBlock.Hash() == newBlock.Hash() {
  1469  			commonBlock = oldBlock
  1470  			break
  1471  		}
  1472  		// Remove an old block as well as stash away a new block
  1473  		oldChain = append(oldChain, oldBlock)
  1474  		// Collect deleted logs for notification
  1475  		logs := bc.collectLogs(oldBlock.Hash(), true)
  1476  		if len(logs) > 0 {
  1477  			deletedLogs = append(deletedLogs, logs)
  1478  		}
  1479  
  1480  		newChain = append(newChain, newBlock)
  1481  
  1482  		// Step back with both chains
  1483  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1484  		if oldBlock == nil {
  1485  			return fmt.Errorf("invalid old chain")
  1486  		}
  1487  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1488  		if newBlock == nil {
  1489  			return fmt.Errorf("invalid new chain")
  1490  		}
  1491  	}
  1492  
  1493  	// If the commonBlock is less than the last accepted height, we return an error
  1494  	// because performing a reorg would mean removing an accepted block from the
  1495  	// canonical chain.
  1496  	if commonBlock.NumberU64() < bc.lastAccepted.NumberU64() {
  1497  		return fmt.Errorf("cannot orphan finalized block at height: %d to common block at height: %d", bc.lastAccepted.NumberU64(), commonBlock.NumberU64())
  1498  	}
  1499  
  1500  	// Ensure the user sees large reorgs
  1501  	if len(oldChain) > 0 && len(newChain) > 0 {
  1502  		logFn := log.Info
  1503  		msg := "Resetting chain preference"
  1504  		if len(oldChain) > 63 {
  1505  			msg = "Large chain preference change detected"
  1506  			logFn = log.Warn
  1507  		}
  1508  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1509  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1510  	} else {
  1511  		log.Warn("Unlikely preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash())
  1512  	}
  1513  	// Insert the new chain(except the head block(reverse order)),
  1514  	// taking care of the proper incremental order.
  1515  	for i := len(newChain) - 1; i >= 1; i-- {
  1516  		// Insert the block in the canonical way, re-writing history
  1517  		bc.writeHeadBlock(newChain[i])
  1518  
  1519  		// Collect reborn logs due to chain reorg
  1520  		logs := bc.collectLogs(newChain[i].Hash(), false)
  1521  		if len(logs) > 0 {
  1522  			rebirthLogs = append(rebirthLogs, logs)
  1523  		}
  1524  	}
  1525  	// Delete any canonical number assignments above the new head
  1526  	indexesBatch := bc.db.NewBatch()
  1527  
  1528  	// Use the height of [newHead] to determine which canonical hashes to remove
  1529  	// in case the new chain is shorter than the old chain, in which case
  1530  	// there may be hashes set on the canonical chain that were invalidated
  1531  	// but not yet overwritten by the re-org.
  1532  	for i := newHead.NumberU64() + 1; ; i++ {
  1533  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  1534  		if hash == (common.Hash{}) {
  1535  			break
  1536  		}
  1537  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  1538  	}
  1539  	if err := indexesBatch.Write(); err != nil {
  1540  		log.Crit("Failed to delete useless indexes", "err", err)
  1541  	}
  1542  
  1543  	// If any logs need to be fired, do it now. In theory we could avoid creating
  1544  	// this goroutine if there are no events to fire, but realistcally that only
  1545  	// ever happens if we're reorging empty blocks, which will only happen on idle
  1546  	// networks where performance is not an issue either way.
  1547  	if len(deletedLogs) > 0 {
  1548  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  1549  	}
  1550  	if len(rebirthLogs) > 0 {
  1551  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  1552  	}
  1553  	if len(oldChain) > 0 {
  1554  		for i := len(oldChain) - 1; i >= 0; i-- {
  1555  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  1556  		}
  1557  	}
  1558  	return nil
  1559  }
  1560  
  1561  type badBlock struct {
  1562  	block  *types.Block
  1563  	reason *BadBlockReason
  1564  }
  1565  
  1566  type BadBlockReason struct {
  1567  	ChainConfig *params.ChainConfig `json:"chainConfig"`
  1568  	Receipts    types.Receipts      `json:"receipts"`
  1569  	Number      uint64              `json:"number"`
  1570  	Hash        common.Hash         `json:"hash"`
  1571  	Error       error               `json:"error"`
  1572  }
  1573  
  1574  func (b *BadBlockReason) String() string {
  1575  	var receiptString string
  1576  	for i, receipt := range b.Receipts {
  1577  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1578  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1579  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1580  	}
  1581  	reason := fmt.Sprintf(`
  1582  	########## BAD BLOCK #########
  1583  	Chain config: %v
  1584  	
  1585  	Number: %v
  1586  	Hash: %#x
  1587  	%v
  1588  	
  1589  	Error: %v
  1590  	##############################
  1591  	`, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error)
  1592  
  1593  	return reason
  1594  }
  1595  
  1596  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network and the BadBlockReason
  1597  // that caused each to be reported as a bad block.
  1598  // BadBlocks ensures that the length of the blocks and the BadBlockReason slice have the same length.
  1599  func (bc *BlockChain) BadBlocks() ([]*types.Block, []*BadBlockReason) {
  1600  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1601  	reasons := make([]*BadBlockReason, 0, bc.badBlocks.Len())
  1602  	for _, hash := range bc.badBlocks.Keys() {
  1603  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1604  			badBlk := blk.(*badBlock)
  1605  			blocks = append(blocks, badBlk.block)
  1606  			reasons = append(reasons, badBlk.reason)
  1607  		}
  1608  	}
  1609  	return blocks, reasons
  1610  }
  1611  
  1612  // addBadBlock adds a bad block to the bad-block LRU cache
  1613  func (bc *BlockChain) addBadBlock(block *types.Block, reason *BadBlockReason) {
  1614  	bc.badBlocks.Add(block.Hash(), &badBlock{
  1615  		block:  block,
  1616  		reason: reason,
  1617  	})
  1618  }
  1619  
  1620  // reportBlock logs a bad block error.
  1621  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1622  	reason := &BadBlockReason{
  1623  		ChainConfig: bc.chainConfig,
  1624  		Receipts:    receipts,
  1625  		Number:      block.NumberU64(),
  1626  		Hash:        block.Hash(),
  1627  		Error:       err,
  1628  	}
  1629  
  1630  	badBlockCounter.Inc(1)
  1631  	bc.addBadBlock(block, reason)
  1632  	log.Debug(reason.String())
  1633  }
  1634  
  1635  func (bc *BlockChain) RemoveRejectedBlocks(start, end uint64) error {
  1636  	batch := bc.db.NewBatch()
  1637  
  1638  	for i := start; i < end; i++ {
  1639  		hashes := rawdb.ReadAllHashes(bc.db, i)
  1640  		canonicalBlock := bc.GetBlockByNumber((i))
  1641  		if canonicalBlock == nil {
  1642  			return fmt.Errorf("failed to retrieve block by number at height %d", i)
  1643  		}
  1644  		canonicalHash := canonicalBlock.Hash()
  1645  		for _, hash := range hashes {
  1646  			if hash == canonicalHash {
  1647  				continue
  1648  			}
  1649  			rawdb.DeleteBlock(batch, hash, i)
  1650  		}
  1651  
  1652  		if err := batch.Write(); err != nil {
  1653  			return fmt.Errorf("failed to write delete rejected block batch at height %d", i)
  1654  		}
  1655  		batch.Reset()
  1656  	}
  1657  
  1658  	return nil
  1659  }
  1660  
  1661  // reprocessBlock reprocesses a previously accepted block. This is often used
  1662  // to regenerate previously pruned state tries.
  1663  func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) (common.Hash, error) {
  1664  	// Retrieve the parent block and its state to execute block
  1665  	var (
  1666  		statedb    *state.StateDB
  1667  		err        error
  1668  		parentRoot = parent.Root()
  1669  	)
  1670  	// We don't simply use [NewWithSnapshot] here because it doesn't return an
  1671  	// error if [bc.snaps != nil] and [bc.snaps.Snapshot(parentRoot) == nil].
  1672  	if bc.snaps == nil {
  1673  		statedb, err = state.New(parentRoot, bc.stateCache, nil)
  1674  	} else {
  1675  		snap := bc.snaps.Snapshot(parentRoot)
  1676  		if snap == nil {
  1677  			return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
  1678  		}
  1679  		statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
  1680  	}
  1681  	if err != nil {
  1682  		return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
  1683  	}
  1684  
  1685  	// Enable prefetching to pull in trie node paths while processing transactions
  1686  	statedb.StartPrefetcher("chain")
  1687  	defer func() {
  1688  		statedb.StopPrefetcher()
  1689  	}()
  1690  
  1691  	// Process previously stored block
  1692  	receipts, _, usedGas, err := bc.processor.Process(current, parent.Header(), statedb, vm.Config{})
  1693  	if err != nil {
  1694  		return common.Hash{}, fmt.Errorf("failed to re-process block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1695  	}
  1696  
  1697  	// Validate the state using the default validator
  1698  	if err := bc.validator.ValidateState(current, statedb, receipts, usedGas); err != nil {
  1699  		return common.Hash{}, fmt.Errorf("failed to validate state while re-processing block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1700  	}
  1701  	log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
  1702  
  1703  	// Commit all cached state changes into underlying memory database.
  1704  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
  1705  	// diff layer for the block.
  1706  	if bc.snaps == nil {
  1707  		return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false)
  1708  	}
  1709  	return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
  1710  }
  1711  
  1712  // initSnapshot instantiates a Snapshot instance and adds it to [bc]
  1713  func (bc *BlockChain) initSnapshot(b *types.Block) {
  1714  	if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil {
  1715  		return
  1716  	}
  1717  
  1718  	// If we are starting from genesis, generate the original snapshot disk layer
  1719  	// up front, so we can use it while executing blocks in bootstrapping. This
  1720  	// also avoids a costly async generation process when reaching tip.
  1721  	//
  1722  	// Additionally, we should always repair a snapshot if starting at genesis
  1723  	// if [SnapshotLimit] > 0.
  1724  	async := bc.cacheConfig.SnapshotAsync && b.NumberU64() > 0
  1725  	rebuild := !bc.cacheConfig.SkipSnapshotRebuild || b.NumberU64() == 0
  1726  	log.Info("Initializing snapshots", "async", async, "rebuild", rebuild, "headHash", b.Hash(), "headRoot", b.Root())
  1727  	var err error
  1728  	bc.snaps, err = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, b.Hash(), b.Root(), async, rebuild, bc.cacheConfig.SnapshotVerify)
  1729  	if err != nil {
  1730  		log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root(), "err", err, "async", async)
  1731  	}
  1732  }
  1733  
  1734  // reprocessState reprocesses the state up to [block], iterating through its ancestors until
  1735  // it reaches a block with a state committed to the database. reprocessState does not use
  1736  // snapshots since the disk layer for snapshots will most likely be above the last committed
  1737  // state that reprocessing will start from.
  1738  func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error {
  1739  	origin := current.NumberU64()
  1740  	acceptorTip, err := rawdb.ReadAcceptorTip(bc.db)
  1741  	if err != nil {
  1742  		return fmt.Errorf("%w: unable to get Acceptor tip", err)
  1743  	}
  1744  	log.Info("Loaded Acceptor tip", "hash", acceptorTip)
  1745  
  1746  	// The acceptor tip is up to date either if it matches the current hash, or it has not been
  1747  	// initialized (i.e., this node has not accepted any blocks asynchronously).
  1748  	acceptorTipUpToDate := acceptorTip == (common.Hash{}) || acceptorTip == current.Hash()
  1749  
  1750  	// If the state is already available and the acceptor tip is up to date, skip re-processing.
  1751  	if bc.HasState(current.Root()) && acceptorTipUpToDate {
  1752  		log.Info("Skipping state reprocessing", "root", current.Root())
  1753  		return nil
  1754  	}
  1755  
  1756  	// If the acceptorTip is a non-empty hash, jump re-processing back to the acceptor tip to ensure that
  1757  	// we re-process at a minimum from the last processed accepted block.
  1758  	// Note: we do not have a guarantee that the last trie on disk will be at a height <= acceptorTip.
  1759  	// Since we need to re-process from at least the acceptorTip to ensure indices are updated correctly
  1760  	// we must start searching for the block to start re-processing at the acceptorTip.
  1761  	// This may occur if we are running in archive mode where every block's trie is committed on insertion
  1762  	// or during an unclean shutdown.
  1763  	if acceptorTip != (common.Hash{}) {
  1764  		current = bc.GetBlockByHash(acceptorTip)
  1765  		if current == nil {
  1766  			return fmt.Errorf("failed to get block for acceptor tip %s", acceptorTip)
  1767  		}
  1768  	}
  1769  
  1770  	for i := 0; i < int(reexec); i++ {
  1771  		// TODO: handle canceled context
  1772  
  1773  		if current.NumberU64() == 0 {
  1774  			return errors.New("genesis state is missing")
  1775  		}
  1776  		parent := bc.GetBlock(current.ParentHash(), current.NumberU64()-1)
  1777  		if parent == nil {
  1778  			return fmt.Errorf("missing block %s:%d", current.ParentHash().Hex(), current.NumberU64()-1)
  1779  		}
  1780  		current = parent
  1781  		_, err = bc.stateCache.OpenTrie(current.Root())
  1782  		if err == nil {
  1783  			break
  1784  		}
  1785  	}
  1786  	if err != nil {
  1787  		switch err.(type) {
  1788  		case *trie.MissingNodeError:
  1789  			return fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
  1790  		default:
  1791  			return err
  1792  		}
  1793  	}
  1794  
  1795  	// State was available at historical point, regenerate
  1796  	var (
  1797  		start        = time.Now()
  1798  		logged       time.Time
  1799  		previousRoot common.Hash
  1800  		triedb       = bc.stateCache.TrieDB()
  1801  		writeIndices bool
  1802  	)
  1803  	// Note: we add 1 since in each iteration, we attempt to re-execute the next block.
  1804  	log.Info("Re-executing blocks to generate state for last accepted block", "from", current.NumberU64()+1, "to", origin)
  1805  	for current.NumberU64() < origin {
  1806  		// TODO: handle canceled context
  1807  
  1808  		// Print progress logs if long enough time elapsed
  1809  		if time.Since(logged) > 8*time.Second {
  1810  			log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64(), "elapsed", time.Since(start))
  1811  			logged = time.Now()
  1812  		}
  1813  
  1814  		// Retrieve the next block to regenerate and process it
  1815  		parent := current
  1816  		next := current.NumberU64() + 1
  1817  		if current = bc.GetBlockByNumber(next); current == nil {
  1818  			return fmt.Errorf("failed to retrieve block %d while re-generating state", next)
  1819  		}
  1820  
  1821  		// Initialize snapshot if required (prevents full snapshot re-generation in
  1822  		// the case of unclean shutdown)
  1823  		if parent.Hash() == acceptorTip {
  1824  			log.Info("Recovering snapshot", "hash", parent.Hash(), "index", parent.NumberU64())
  1825  			// TODO: switch to checking the snapshot block hash markers here to ensure that when we re-process the block, we have the opportunity to apply
  1826  			// a snapshot diff layer that we may have been in the middle of committing during shutdown. This will prevent snapshot re-generation in the case
  1827  			// that the node stops mid-way through snapshot flattening (performed across multiple DB batches).
  1828  			// If snapshot initialization is delayed due to state sync, skip initializing snaps here
  1829  			if !bc.cacheConfig.SnapshotDelayInit {
  1830  				bc.initSnapshot(parent)
  1831  			}
  1832  			writeIndices = true // Set [writeIndices] to true, so that the indices will be updated from the last accepted tip onwards.
  1833  		}
  1834  
  1835  		// Reprocess next block using previously fetched data
  1836  		root, err := bc.reprocessBlock(parent, current)
  1837  		if err != nil {
  1838  			return err
  1839  		}
  1840  
  1841  		// Flatten snapshot if initialized, holding a reference to the state root until the next block
  1842  		// is processed.
  1843  		if err := bc.flattenSnapshot(func() error {
  1844  			triedb.Reference(root, common.Hash{})
  1845  			if previousRoot != (common.Hash{}) {
  1846  				triedb.Dereference(previousRoot)
  1847  			}
  1848  			previousRoot = root
  1849  			return nil
  1850  		}, current.Hash()); err != nil {
  1851  			return err
  1852  		}
  1853  
  1854  		// Write any unsaved indices to disk
  1855  		if writeIndices {
  1856  			if err := bc.writeBlockAcceptedIndices(current); err != nil {
  1857  				return fmt.Errorf("%w: failed to process accepted block indices", err)
  1858  			}
  1859  		}
  1860  	}
  1861  
  1862  	nodes, imgs := triedb.Size()
  1863  	log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
  1864  	if previousRoot != (common.Hash{}) {
  1865  		return triedb.Commit(previousRoot, true, nil)
  1866  	}
  1867  	return nil
  1868  }
  1869  
  1870  func (bc *BlockChain) protectTrieIndex() error {
  1871  	if !bc.cacheConfig.Pruning {
  1872  		return rawdb.WritePruningDisabled(bc.db)
  1873  	}
  1874  	pruningDisabled, err := rawdb.HasPruningDisabled(bc.db)
  1875  	if err != nil {
  1876  		return fmt.Errorf("failed to check if the chain has been run with pruning disabled: %w", err)
  1877  	}
  1878  	if !pruningDisabled {
  1879  		return nil
  1880  	}
  1881  	if !bc.cacheConfig.AllowMissingTries {
  1882  		return ErrRefuseToCorruptArchiver
  1883  	}
  1884  	return nil
  1885  }
  1886  
  1887  // populateMissingTries iterates from [bc.cacheConfig.PopulateMissingTries] (defaults to 0)
  1888  // to [LastAcceptedBlock] and persists all tries to disk that are not already on disk. This is
  1889  // used to fill trie index gaps in an "archive" node without resyncing from scratch.
  1890  //
  1891  // NOTE: Assumes the genesis root and last accepted root are written to disk
  1892  func (bc *BlockChain) populateMissingTries() error {
  1893  	if bc.cacheConfig.PopulateMissingTries == nil {
  1894  		return nil
  1895  	}
  1896  
  1897  	var (
  1898  		lastAccepted = bc.LastAcceptedBlock().NumberU64()
  1899  		startHeight  = *bc.cacheConfig.PopulateMissingTries
  1900  		startTime    = time.Now()
  1901  		logged       time.Time
  1902  		triedb       = bc.stateCache.TrieDB()
  1903  		missing      = 0
  1904  	)
  1905  
  1906  	// Do not allow the config to specify a starting point above the last accepted block.
  1907  	if startHeight > lastAccepted {
  1908  		return fmt.Errorf("cannot populate missing tries from a starting point (%d) > last accepted block (%d)", startHeight, lastAccepted)
  1909  	}
  1910  
  1911  	// If we are starting from the genesis, increment the start height by 1 so we don't attempt to re-process
  1912  	// the genesis block.
  1913  	if startHeight == 0 {
  1914  		startHeight += 1
  1915  	}
  1916  	parent := bc.GetBlockByNumber(startHeight - 1)
  1917  	if parent == nil {
  1918  		return fmt.Errorf("failed to fetch initial parent block for re-populate missing tries at height %d", startHeight-1)
  1919  	}
  1920  
  1921  	it := newBlockChainIterator(bc, startHeight, bc.cacheConfig.PopulateMissingTriesParallelism)
  1922  	defer it.Stop()
  1923  
  1924  	for i := startHeight; i < lastAccepted; i++ {
  1925  		// Print progress logs if long enough time elapsed
  1926  		if time.Since(logged) > 8*time.Second {
  1927  			log.Info("Populating missing tries", "missing", missing, "block", i, "remaining", lastAccepted-i, "elapsed", time.Since(startTime))
  1928  			logged = time.Now()
  1929  		}
  1930  
  1931  		// TODO: handle canceled context
  1932  		current, hasState, err := it.Next(context.TODO())
  1933  		if err != nil {
  1934  			return fmt.Errorf("error while populating missing tries: %w", err)
  1935  		}
  1936  
  1937  		if hasState {
  1938  			parent = current
  1939  			continue
  1940  		}
  1941  
  1942  		root, err := bc.reprocessBlock(parent, current)
  1943  		if err != nil {
  1944  			return err
  1945  		}
  1946  
  1947  		// Commit root to disk so that it can be accessed directly
  1948  		if err := triedb.Commit(root, false, nil); err != nil {
  1949  			return err
  1950  		}
  1951  		parent = current
  1952  		log.Debug("Populated missing trie", "block", current.NumberU64(), "root", root)
  1953  		missing++
  1954  	}
  1955  
  1956  	// Write marker to DB to indicate populate missing tries finished successfully.
  1957  	// Note: writing the marker here means that we do allow consecutive runs of re-populating
  1958  	// missing tries if it does not finish during the prior run.
  1959  	if err := rawdb.WritePopulateMissingTries(bc.db); err != nil {
  1960  		return fmt.Errorf("failed to write offline pruning success marker: %w", err)
  1961  	}
  1962  
  1963  	nodes, imgs := triedb.Size()
  1964  	log.Info("All missing tries populated", "startHeight", startHeight, "lastAcceptedHeight", lastAccepted, "missing", missing, "elapsed", time.Since(startTime), "nodes", nodes, "preimages", imgs)
  1965  	return nil
  1966  }
  1967  
  1968  // CleanBlockRootsAboveLastAccepted gathers the blocks that may have previously been in processing above the
  1969  // last accepted block and wipes their block roots from disk to mark their tries as inaccessible.
  1970  // This is used prior to pruning to ensure that all of the tries that may still be in processing are marked
  1971  // as inaccessible and mirrors the handling of middle roots in the geth offline pruning implementation.
  1972  // This is not strictly necessary, but maintains a soft assumption.
  1973  func (bc *BlockChain) CleanBlockRootsAboveLastAccepted() error {
  1974  	targetRoot := bc.LastAcceptedBlock().Root()
  1975  
  1976  	// Clean up any block roots above the last accepted block before we start pruning.
  1977  	// Note: this takes the place of middleRoots in the geth implementation since we do not
  1978  	// track processing block roots via snapshot journals in the same way.
  1979  	processingRoots := bc.gatherBlockRootsAboveLastAccepted()
  1980  	// If there is a block above the last accepted block with an identical state root, we
  1981  	// explicitly remove it from the set to ensure we do not corrupt the last accepted trie.
  1982  	delete(processingRoots, targetRoot)
  1983  	for processingRoot := range processingRoots {
  1984  		// Delete the processing root from disk to mark the trie as inaccessible (no need to handle this in a batch).
  1985  		if err := bc.db.Delete(processingRoot[:]); err != nil {
  1986  			return fmt.Errorf("failed to remove processing root (%s) preparing for offline pruning: %w", processingRoot, err)
  1987  		}
  1988  	}
  1989  
  1990  	return nil
  1991  }
  1992  
  1993  // gatherBlockRootsAboveLastAccepted iterates forward from the last accepted block and returns a list of all block roots
  1994  // for any blocks that were inserted above the last accepted block.
  1995  // Given that we never insert a block into the chain unless all of its ancestors have been inserted, this should gather
  1996  // all of the block roots for blocks inserted above the last accepted block that may have been in processing at some point
  1997  // in the past and are therefore potentially still acceptable.
  1998  // Note: there is an edge case where the node dies while the consensus engine is rejecting a branch of blocks since the
  1999  // consensus engine will reject the lowest ancestor first. In this case, these blocks will not be considered acceptable in
  2000  // the future.
  2001  // Ex.
  2002  //
  2003  //	   A
  2004  //	 /   \
  2005  //	B     C
  2006  //	|
  2007  //	D
  2008  //	|
  2009  //	E
  2010  //	|
  2011  //	F
  2012  //
  2013  // The consensus engine accepts block C and proceeds to reject the other branch in order (B, D, E, F).
  2014  // If the consensus engine dies after rejecting block D, block D will be deleted, such that the forward iteration
  2015  // may not find any blocks at this height and will not reach the previously processing blocks E and F.
  2016  func (bc *BlockChain) gatherBlockRootsAboveLastAccepted() map[common.Hash]struct{} {
  2017  	blockRoots := make(map[common.Hash]struct{})
  2018  	for height := bc.lastAccepted.NumberU64() + 1; ; height++ {
  2019  		blockHashes := rawdb.ReadAllHashes(bc.db, height)
  2020  		// If there are no block hashes at [height], then there should be no further acceptable blocks
  2021  		// past this point.
  2022  		if len(blockHashes) == 0 {
  2023  			break
  2024  		}
  2025  
  2026  		// Fetch the blocks and append their roots.
  2027  		for _, blockHash := range blockHashes {
  2028  			block := bc.GetBlockByHash(blockHash)
  2029  			if block == nil {
  2030  				continue
  2031  			}
  2032  
  2033  			blockRoots[block.Root()] = struct{}{}
  2034  		}
  2035  	}
  2036  
  2037  	return blockRoots
  2038  }
  2039  
  2040  // ResetToStateSyncedBlock reinitializes the state of the blockchain
  2041  // to the trie represented by [block.Root()] after updating
  2042  // in-memory and on disk current block pointers to [block].
  2043  // Only should be called after state sync has completed.
  2044  func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error {
  2045  	bc.chainmu.Lock()
  2046  	defer bc.chainmu.Unlock()
  2047  
  2048  	// Update head block and snapshot pointers on disk
  2049  	batch := bc.db.NewBatch()
  2050  	rawdb.WriteAcceptorTip(batch, block.Hash())
  2051  	rawdb.WriteHeadBlockHash(batch, block.Hash())
  2052  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
  2053  	rawdb.WriteSnapshotBlockHash(batch, block.Hash())
  2054  	rawdb.WriteSnapshotRoot(batch, block.Root())
  2055  	if err := rawdb.WriteSyncPerformed(batch, block.NumberU64()); err != nil {
  2056  		return err
  2057  	}
  2058  
  2059  	if err := batch.Write(); err != nil {
  2060  		return err
  2061  	}
  2062  
  2063  	// Update all in-memory chain markers
  2064  	bc.lastAccepted = block
  2065  	bc.acceptorTip = block
  2066  	bc.currentBlock.Store(block)
  2067  	bc.hc.SetCurrentHeader(block.Header())
  2068  
  2069  	lastAcceptedHash := block.Hash()
  2070  	bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{
  2071  		Cache:       bc.cacheConfig.TrieCleanLimit,
  2072  		Journal:     bc.cacheConfig.TrieCleanJournal,
  2073  		Preimages:   bc.cacheConfig.Preimages,
  2074  		StatsPrefix: trieCleanCacheStatsNamespace,
  2075  	})
  2076  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
  2077  		return err
  2078  	}
  2079  	// Create the state manager
  2080  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), bc.cacheConfig)
  2081  
  2082  	// Make sure the state associated with the block is available
  2083  	head := bc.CurrentBlock()
  2084  	if !bc.HasState(head.Root()) {
  2085  		return fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
  2086  	}
  2087  
  2088  	bc.initSnapshot(head)
  2089  	return nil
  2090  }