github.com/dim4egster/coreth@v0.10.2/core/blockchain.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2014 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  // Package core implements the Ethereum consensus protocol.
    28  package core
    29  
    30  import (
    31  	"context"
    32  	"errors"
    33  	"fmt"
    34  	"io"
    35  	"math/big"
    36  	"runtime"
    37  	"sync"
    38  	"sync/atomic"
    39  	"time"
    40  
    41  	"github.com/dim4egster/coreth/consensus"
    42  	"github.com/dim4egster/coreth/core/rawdb"
    43  	"github.com/dim4egster/coreth/core/state"
    44  	"github.com/dim4egster/coreth/core/state/snapshot"
    45  	"github.com/dim4egster/coreth/core/types"
    46  	"github.com/dim4egster/coreth/core/vm"
    47  	"github.com/dim4egster/coreth/ethdb"
    48  	"github.com/dim4egster/coreth/metrics"
    49  	"github.com/dim4egster/coreth/params"
    50  	"github.com/dim4egster/coreth/trie"
    51  	"github.com/ethereum/go-ethereum/common"
    52  	"github.com/ethereum/go-ethereum/event"
    53  	"github.com/ethereum/go-ethereum/log"
    54  	lru "github.com/hashicorp/golang-lru"
    55  )
    56  
    57  var (
    58  	acceptorQueueGauge           = metrics.NewRegisteredGauge("blockchain/acceptor/queue/size", nil)
    59  	processedBlockGasUsedCounter = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/processed", nil)
    60  	acceptedBlockGasUsedCounter  = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/accepted", nil)
    61  	badBlockCounter              = metrics.NewRegisteredCounter("blockchain/blocks/bad/count", nil)
    62  
    63  	ErrRefuseToCorruptArchiver = errors.New("node has operated with pruning disabled, shutting down to prevent missing tries")
    64  
    65  	errFutureBlockUnsupported  = errors.New("future block insertion not supported")
    66  	errCacheConfigNotSpecified = errors.New("must specify cache config")
    67  )
    68  
    69  const (
    70  	bodyCacheLimit     = 256
    71  	blockCacheLimit    = 256
    72  	receiptsCacheLimit = 32
    73  	txLookupCacheLimit = 1024
    74  	badBlockLimit      = 10
    75  	TriesInMemory      = 128
    76  
    77  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    78  	//
    79  	// Changelog:
    80  	//
    81  	// - Version 4
    82  	//   The following incompatible database changes were added:
    83  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
    84  	//   * the `Bloom` field of receipt is deleted
    85  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
    86  	// - Version 5
    87  	//  The following incompatible database changes were added:
    88  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
    89  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
    90  	//      receipts' corresponding block
    91  	// - Version 6
    92  	//  The following incompatible database changes were added:
    93  	//    * Transaction lookup information stores the corresponding block number instead of block hash
    94  	// - Version 7
    95  	//  The following incompatible database changes were added:
    96  	//    * Use freezer as the ancient database to maintain all ancient data
    97  	// - Version 8
    98  	//  The following incompatible database changes were added:
    99  	//    * New scheme for contract code in order to separate the codes and trie nodes
   100  	BlockChainVersion uint64 = 8
   101  
   102  	// statsReportLimit is the time limit during import and export after which we
   103  	// always print out progress. This avoids the user wondering what's going on.
   104  	statsReportLimit = 8 * time.Second
   105  )
   106  
   107  // CacheConfig contains the configuration values for the trie caching/pruning
   108  // that's resident in a blockchain.
   109  type CacheConfig struct {
   110  	TrieCleanLimit                  int     // Memory allowance (MB) to use for caching trie nodes in memory
   111  	TrieDirtyLimit                  int     // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk
   112  	TrieDirtyCommitTarget           int     // Memory limit (MB) to target for the dirties cache before invoking commit
   113  	CommitInterval                  uint64  // Commit the trie every [CommitInterval] blocks.
   114  	Pruning                         bool    // Whether to disable trie write caching and GC altogether (archive node)
   115  	AcceptorQueueLimit              int     // Blocks to queue before blocking during acceptance
   116  	PopulateMissingTries            *uint64 // If non-nil, sets the starting height for re-generating historical tries.
   117  	PopulateMissingTriesParallelism int     // Is the number of readers to use when trying to populate missing tries.
   118  	AllowMissingTries               bool    // Whether to allow an archive node to run with pruning enabled
   119  	SnapshotDelayInit               bool    // Whether to initialize snapshots on startup or wait for external call
   120  	SnapshotLimit                   int     // Memory allowance (MB) to use for caching snapshot entries in memory
   121  	SnapshotAsync                   bool    // Generate snapshot tree async
   122  	SnapshotVerify                  bool    // Verify generated snapshots
   123  	SkipSnapshotRebuild             bool    // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests)
   124  	Preimages                       bool    // Whether to store preimage of trie key to the disk
   125  }
   126  
   127  var DefaultCacheConfig = &CacheConfig{
   128  	TrieCleanLimit:        256,
   129  	TrieDirtyLimit:        256,
   130  	TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB)
   131  	Pruning:               true,
   132  	CommitInterval:        4096,
   133  	AcceptorQueueLimit:    64, // Provides 2 minutes of buffer (2s block target) for a commit delay
   134  	SnapshotLimit:         256,
   135  }
   136  
   137  // BlockChain represents the canonical chain given a database with a genesis
   138  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   139  //
   140  // Importing blocks in to the block chain happens according to the set of rules
   141  // defined by the two stage Validator. Processing of blocks is done using the
   142  // Processor which processes the included transaction. The validation of the state
   143  // is done in the second part of the Validator. Failing results in aborting of
   144  // the import.
   145  //
   146  // The BlockChain also helps in returning blocks from **any** chain included
   147  // in the database as well as blocks that represents the canonical chain. It's
   148  // important to note that GetBlock can return any block and does not need to be
   149  // included in the canonical one where as GetBlockByNumber always represents the
   150  // canonical chain.
   151  type BlockChain struct {
   152  	chainConfig *params.ChainConfig // Chain & network configuration
   153  	cacheConfig *CacheConfig        // Cache configuration for pruning
   154  
   155  	db ethdb.Database // Low level persistent database to store final content in
   156  
   157  	snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
   158  
   159  	hc                *HeaderChain
   160  	rmLogsFeed        event.Feed
   161  	chainFeed         event.Feed
   162  	chainSideFeed     event.Feed
   163  	chainHeadFeed     event.Feed
   164  	chainAcceptedFeed event.Feed
   165  	logsFeed          event.Feed
   166  	logsAcceptedFeed  event.Feed
   167  	blockProcFeed     event.Feed
   168  	txAcceptedFeed    event.Feed
   169  	scope             event.SubscriptionScope
   170  	genesisBlock      *types.Block
   171  
   172  	// This mutex synchronizes chain write operations.
   173  	// Readers don't need to take it, they can just read the database.
   174  	chainmu sync.RWMutex
   175  
   176  	currentBlock atomic.Value // Current head of the block chain
   177  
   178  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   179  	stateManager  TrieWriter
   180  	bodyCache     *lru.Cache // Cache for the most recent block bodies
   181  	receiptsCache *lru.Cache // Cache for the most recent receipts per block
   182  	blockCache    *lru.Cache // Cache for the most recent entire blocks
   183  	txLookupCache *lru.Cache // Cache for the most recent transaction lookup data.
   184  
   185  	running int32 // 0 if chain is running, 1 when stopped
   186  
   187  	engine     consensus.Engine
   188  	validator  Validator  // Block and state validator interface
   189  	prefetcher Prefetcher // Block state prefetcher interface
   190  	processor  Processor  // Block transaction processor interface
   191  	vmConfig   vm.Config
   192  
   193  	badBlocks *lru.Cache // Bad block cache
   194  
   195  	lastAccepted *types.Block // Prevents reorgs past this height
   196  
   197  	senderCacher *TxSenderCacher
   198  
   199  	// [acceptorQueue] is a processing queue for the Acceptor. This is
   200  	// different than [chainAcceptedFeed], which is sent an event after an accepted
   201  	// block is processed (after each loop of the accepted worker). If there is a
   202  	// clean shutdown, all items inserted into the [acceptorQueue] will be processed.
   203  	acceptorQueue chan *types.Block
   204  
   205  	// [acceptorClosingLock], and [acceptorClosed] are used
   206  	// to synchronize the closing of the [acceptorQueue] channel.
   207  	//
   208  	// Because we can't check if a channel is closed without reading from it
   209  	// (which we don't want to do as we may remove a processing block), we need
   210  	// to use a second variable to ensure we don't close a closed channel.
   211  	acceptorClosingLock sync.RWMutex
   212  	acceptorClosed      bool
   213  
   214  	// [acceptorWg] is used to wait for the acceptorQueue to clear. This is used
   215  	// during shutdown and in tests.
   216  	acceptorWg sync.WaitGroup
   217  
   218  	// [acceptorTip] is the last block processed by the acceptor. This is
   219  	// returned as the LastAcceptedBlock() to ensure clients get only fully
   220  	// processed blocks. This may be equal to [lastAccepted].
   221  	acceptorTip     *types.Block
   222  	acceptorTipLock sync.Mutex
   223  }
   224  
   225  // NewBlockChain returns a fully initialised block chain using information
   226  // available in the database. It initialises the default Ethereum Validator and
   227  // Processor.
   228  func NewBlockChain(
   229  	db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine,
   230  	vmConfig vm.Config, lastAcceptedHash common.Hash,
   231  ) (*BlockChain, error) {
   232  	if cacheConfig == nil {
   233  		return nil, errCacheConfigNotSpecified
   234  	}
   235  	bodyCache, _ := lru.New(bodyCacheLimit)
   236  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   237  	blockCache, _ := lru.New(blockCacheLimit)
   238  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   239  	badBlocks, _ := lru.New(badBlockLimit)
   240  
   241  	bc := &BlockChain{
   242  		chainConfig: chainConfig,
   243  		cacheConfig: cacheConfig,
   244  		db:          db,
   245  		stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
   246  			Cache:     cacheConfig.TrieCleanLimit,
   247  			Preimages: cacheConfig.Preimages,
   248  		}),
   249  		bodyCache:     bodyCache,
   250  		receiptsCache: receiptsCache,
   251  		blockCache:    blockCache,
   252  		txLookupCache: txLookupCache,
   253  		engine:        engine,
   254  		vmConfig:      vmConfig,
   255  		badBlocks:     badBlocks,
   256  		senderCacher:  newTxSenderCacher(runtime.NumCPU()),
   257  		acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit),
   258  	}
   259  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   260  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   261  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   262  
   263  	var err error
   264  	bc.hc, err = NewHeaderChain(db, chainConfig, engine)
   265  	if err != nil {
   266  		return nil, err
   267  	}
   268  	bc.genesisBlock = bc.GetBlockByNumber(0)
   269  	if bc.genesisBlock == nil {
   270  		return nil, ErrNoGenesis
   271  	}
   272  
   273  	var nilBlock *types.Block
   274  	bc.currentBlock.Store(nilBlock)
   275  
   276  	// Create the state manager
   277  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig)
   278  
   279  	// Re-generate current block state if it is missing
   280  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
   281  		return nil, err
   282  	}
   283  
   284  	// After loading the last state (and reprocessing if necessary), we are
   285  	// guaranteed that [acceptorTip] is equal to [lastAccepted].
   286  	//
   287  	// It is critical to update this vaue before performing any state repairs so
   288  	// that all accepted blocks can be considered.
   289  	bc.acceptorTip = bc.lastAccepted
   290  
   291  	// Make sure the state associated with the block is available
   292  	head := bc.CurrentBlock()
   293  	if !bc.HasState(head.Root()) {
   294  		return nil, fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
   295  	}
   296  
   297  	if err := bc.protectTrieIndex(); err != nil {
   298  		return nil, err
   299  	}
   300  
   301  	// Populate missing tries if required
   302  	if err := bc.populateMissingTries(); err != nil {
   303  		return nil, fmt.Errorf("could not populate missing tries: %v", err)
   304  	}
   305  
   306  	// If snapshot initialization is delayed for fast sync, skip initializing it here.
   307  	// This assumes that no blocks will be processed until ResetState is called to initialize
   308  	// the state of fast sync.
   309  	if !bc.cacheConfig.SnapshotDelayInit {
   310  		// Load any existing snapshot, regenerating it if loading failed (if not
   311  		// already initialized in recovery)
   312  		bc.initSnapshot(head)
   313  	}
   314  
   315  	// Start processing accepted blocks effects in the background
   316  	go bc.startAcceptor()
   317  
   318  	return bc, nil
   319  }
   320  
   321  // writeBlockAcceptedIndices writes any indices that must be persisted for accepted block.
   322  // This includes the following:
   323  // - transaction lookup indices
   324  // - updating the acceptor tip index
   325  func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error {
   326  	batch := bc.db.NewBatch()
   327  	rawdb.WriteTxLookupEntriesByBlock(batch, b)
   328  	if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil {
   329  		return fmt.Errorf("%w: failed to write acceptor tip key", err)
   330  	}
   331  	if err := batch.Write(); err != nil {
   332  		return fmt.Errorf("%w: failed to write tx lookup entries batch", err)
   333  	}
   334  	return nil
   335  }
   336  
   337  // flattenSnapshot attempts to flatten a block of [hash] to disk.
   338  func (bc *BlockChain) flattenSnapshot(postAbortWork func() error, hash common.Hash) error {
   339  	// If snapshots are not initialized, perform [postAbortWork] immediately.
   340  	if bc.snaps == nil {
   341  		return postAbortWork()
   342  	}
   343  
   344  	// Abort snapshot generation before pruning anything from trie database
   345  	// (could occur in AcceptTrie)
   346  	bc.snaps.AbortGeneration()
   347  
   348  	// Perform work after snapshot generation is aborted (typically trie updates)
   349  	if err := postAbortWork(); err != nil {
   350  		return err
   351  	}
   352  
   353  	// Flatten the entire snap Trie to disk
   354  	//
   355  	// Note: This resumes snapshot generation.
   356  	return bc.snaps.Flatten(hash)
   357  }
   358  
   359  // startAcceptor starts processing items on the [acceptorQueue]. If a [nil]
   360  // object is placed on the [acceptorQueue], the [startAcceptor] will exit.
   361  func (bc *BlockChain) startAcceptor() {
   362  	log.Info("Starting Acceptor", "queue length", bc.cacheConfig.AcceptorQueueLimit)
   363  
   364  	for next := range bc.acceptorQueue {
   365  		acceptorQueueGauge.Dec(1)
   366  
   367  		if err := bc.flattenSnapshot(func() error {
   368  			return bc.stateManager.AcceptTrie(next)
   369  		}, next.Hash()); err != nil {
   370  			log.Crit("unable to flatten snapshot from acceptor", "blockHash", next.Hash(), "err", err)
   371  		}
   372  
   373  		// Update last processed and transaction lookup index
   374  		if err := bc.writeBlockAcceptedIndices(next); err != nil {
   375  			log.Crit("failed to write accepted block effects", "err", err)
   376  		}
   377  
   378  		// Fetch block logs
   379  		logs := bc.gatherBlockLogs(next.Hash(), next.NumberU64(), false)
   380  
   381  		// Update accepted feeds
   382  		bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: logs})
   383  		if len(logs) > 0 {
   384  			bc.logsAcceptedFeed.Send(logs)
   385  		}
   386  		if len(next.Transactions()) != 0 {
   387  			bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()})
   388  		}
   389  
   390  		bc.acceptorTipLock.Lock()
   391  		bc.acceptorTip = next
   392  		bc.acceptorTipLock.Unlock()
   393  		bc.acceptorWg.Done()
   394  	}
   395  }
   396  
   397  // addAcceptorQueue adds a new *types.Block to the [acceptorQueue]. This will
   398  // block if there are [AcceptorQueueLimit] items in [acceptorQueue].
   399  func (bc *BlockChain) addAcceptorQueue(b *types.Block) {
   400  	// We only acquire a read lock here because it is ok to add items to the
   401  	// [acceptorQueue] concurrently.
   402  	bc.acceptorClosingLock.RLock()
   403  	defer bc.acceptorClosingLock.RUnlock()
   404  
   405  	if bc.acceptorClosed {
   406  		return
   407  	}
   408  
   409  	acceptorQueueGauge.Inc(1)
   410  	bc.acceptorWg.Add(1)
   411  	bc.acceptorQueue <- b
   412  }
   413  
   414  // DrainAcceptorQueue blocks until all items in [acceptorQueue] have been
   415  // processed.
   416  func (bc *BlockChain) DrainAcceptorQueue() {
   417  	bc.acceptorClosingLock.Lock()
   418  	defer bc.acceptorClosingLock.Unlock()
   419  
   420  	if bc.acceptorClosed {
   421  		return
   422  	}
   423  
   424  	bc.acceptorWg.Wait()
   425  }
   426  
   427  // stopAcceptor sends a signal to the Acceptor to stop processing accepted
   428  // blocks. The Acceptor will exit once all items in [acceptorQueue] have been
   429  // processed.
   430  func (bc *BlockChain) stopAcceptor() {
   431  	bc.acceptorClosingLock.Lock()
   432  	defer bc.acceptorClosingLock.Unlock()
   433  
   434  	// If [acceptorClosed] is already false, we should just return here instead
   435  	// of attempting to close [acceptorQueue] more than once (will cause
   436  	// a panic).
   437  	//
   438  	// This typically happens when a test calls [stopAcceptor] directly (prior to
   439  	// shutdown) and then [stopAcceptor] is called again in shutdown.
   440  	if bc.acceptorClosed {
   441  		return
   442  	}
   443  
   444  	// Although nothing should be added to [acceptorQueue] after
   445  	// [acceptorClosed] is updated, we close the channel so the Acceptor
   446  	// goroutine exits.
   447  	bc.acceptorWg.Wait()
   448  	bc.acceptorClosed = true
   449  	close(bc.acceptorQueue)
   450  }
   451  
   452  func (bc *BlockChain) InitializeSnapshots() {
   453  	bc.chainmu.Lock()
   454  	defer bc.chainmu.Unlock()
   455  
   456  	head := bc.CurrentBlock()
   457  	bc.initSnapshot(head)
   458  }
   459  
   460  // SenderCacher returns the *TxSenderCacher used within the core package.
   461  func (bc *BlockChain) SenderCacher() *TxSenderCacher {
   462  	return bc.senderCacher
   463  }
   464  
   465  // loadLastState loads the last known chain state from the database. This method
   466  // assumes that the chain manager mutex is held.
   467  func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error {
   468  	// Initialize genesis state
   469  	if lastAcceptedHash == (common.Hash{}) {
   470  		return bc.loadGenesisState()
   471  	}
   472  
   473  	// Restore the last known head block
   474  	head := rawdb.ReadHeadBlockHash(bc.db)
   475  	if head == (common.Hash{}) {
   476  		return errors.New("could not read head block hash")
   477  	}
   478  	// Make sure the entire head block is available
   479  	currentBlock := bc.GetBlockByHash(head)
   480  	if currentBlock == nil {
   481  		return fmt.Errorf("could not load head block %s", head.Hex())
   482  	}
   483  	// Everything seems to be fine, set as the head block
   484  	bc.currentBlock.Store(currentBlock)
   485  
   486  	// Restore the last known head header
   487  	currentHeader := currentBlock.Header()
   488  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   489  		if header := bc.GetHeaderByHash(head); header != nil {
   490  			currentHeader = header
   491  		}
   492  	}
   493  	bc.hc.SetCurrentHeader(currentHeader)
   494  
   495  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   496  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   497  
   498  	// Otherwise, set the last accepted block and perform a re-org.
   499  	bc.lastAccepted = bc.GetBlockByHash(lastAcceptedHash)
   500  	if bc.lastAccepted == nil {
   501  		return fmt.Errorf("could not load last accepted block")
   502  	}
   503  
   504  	// This ensures that the head block is updated to the last accepted block on startup
   505  	if err := bc.setPreference(bc.lastAccepted); err != nil {
   506  		return fmt.Errorf("failed to set preference to last accepted block while loading last state: %w", err)
   507  	}
   508  
   509  	// reprocessState is necessary to ensure that the last accepted state is
   510  	// available. The state may not be available if it was not committed due
   511  	// to an unclean shutdown.
   512  	return bc.reprocessState(bc.lastAccepted, 2*bc.cacheConfig.CommitInterval)
   513  }
   514  
   515  func (bc *BlockChain) loadGenesisState() error {
   516  	// Prepare the genesis block and reinitialise the chain
   517  	batch := bc.db.NewBatch()
   518  	rawdb.WriteBlock(batch, bc.genesisBlock)
   519  	if err := batch.Write(); err != nil {
   520  		log.Crit("Failed to write genesis block", "err", err)
   521  	}
   522  	bc.writeHeadBlock(bc.genesisBlock)
   523  
   524  	// Last update all in-memory chain markers
   525  	bc.lastAccepted = bc.genesisBlock
   526  	bc.currentBlock.Store(bc.genesisBlock)
   527  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   528  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   529  	return nil
   530  }
   531  
   532  // Export writes the active chain to the given writer.
   533  func (bc *BlockChain) Export(w io.Writer) error {
   534  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   535  }
   536  
   537  // ExportN writes a subset of the active chain to the given writer.
   538  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   539  	return bc.ExportCallback(func(block *types.Block) error {
   540  		return block.EncodeRLP(w)
   541  	}, first, last)
   542  }
   543  
   544  // ExportCallback invokes [callback] for every block from [first] to [last] in order.
   545  func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, first uint64, last uint64) error {
   546  	if first > last {
   547  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   548  	}
   549  	log.Info("Exporting batch of blocks", "count", last-first+1)
   550  
   551  	var (
   552  		parentHash common.Hash
   553  		start      = time.Now()
   554  		reported   = time.Now()
   555  	)
   556  	for nr := first; nr <= last; nr++ {
   557  		block := bc.GetBlockByNumber(nr)
   558  		if block == nil {
   559  			return fmt.Errorf("export failed on #%d: not found", nr)
   560  		}
   561  		if nr > first && block.ParentHash() != parentHash {
   562  			return fmt.Errorf("export failed: chain reorg during export")
   563  		}
   564  		parentHash = block.Hash()
   565  		if err := callback(block); err != nil {
   566  			return err
   567  		}
   568  		if time.Since(reported) >= statsReportLimit {
   569  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   570  			reported = time.Now()
   571  		}
   572  	}
   573  	return nil
   574  }
   575  
   576  // writeHeadBlock injects a new head block into the current block chain. This method
   577  // assumes that the block is indeed a true head. It will also reset the head
   578  // header to this very same block if they are older or if they are on a different side chain.
   579  //
   580  // Note, this function assumes that the `mu` mutex is held!
   581  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   582  	// If the block is on a side chain or an unknown one, force other heads onto it too
   583  	// Add the block to the canonical chain number scheme and mark as the head
   584  	batch := bc.db.NewBatch()
   585  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   586  
   587  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   588  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   589  
   590  	// Flush the whole batch into the disk, exit the node if failed
   591  	if err := batch.Write(); err != nil {
   592  		log.Crit("Failed to update chain indexes and markers", "err", err)
   593  	}
   594  	// Update all in-memory chain markers in the last step
   595  	bc.hc.SetCurrentHeader(block.Header())
   596  	bc.currentBlock.Store(block)
   597  }
   598  
   599  // ValidateCanonicalChain confirms a canonical chain is well-formed.
   600  func (bc *BlockChain) ValidateCanonicalChain() error {
   601  	// Ensure all accepted blocks are fully processed
   602  	bc.DrainAcceptorQueue()
   603  
   604  	current := bc.CurrentBlock()
   605  	i := 0
   606  	log.Info("Beginning to validate canonical chain", "startBlock", current.NumberU64())
   607  
   608  	for current.Hash() != bc.genesisBlock.Hash() {
   609  		blkByHash := bc.GetBlockByHash(current.Hash())
   610  		if blkByHash == nil {
   611  			return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number())
   612  		}
   613  		if blkByHash.Hash() != current.Hash() {
   614  			return fmt.Errorf("blockByHash returned a block with an unexpected hash: %s, expected: %s", blkByHash.Hash().String(), current.Hash().String())
   615  		}
   616  		blkByNumber := bc.GetBlockByNumber(current.Number().Uint64())
   617  		if blkByNumber == nil {
   618  			return fmt.Errorf("couldn't find block by number at height %d", current.Number())
   619  		}
   620  		if blkByNumber.Hash() != current.Hash() {
   621  			return fmt.Errorf("blockByNumber returned a block with unexpected hash: %s, expected: %s", blkByNumber.Hash().String(), current.Hash().String())
   622  		}
   623  
   624  		hdrByHash := bc.GetHeaderByHash(current.Hash())
   625  		if hdrByHash == nil {
   626  			return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number())
   627  		}
   628  		if hdrByHash.Hash() != current.Hash() {
   629  			return fmt.Errorf("hdrByHash returned a block header with an unexpected hash: %s, expected: %s", hdrByHash.Hash().String(), current.Hash().String())
   630  		}
   631  		hdrByNumber := bc.GetHeaderByNumber(current.Number().Uint64())
   632  		if hdrByNumber == nil {
   633  			return fmt.Errorf("couldn't find block header by number at height %d", current.Number())
   634  		}
   635  		if hdrByNumber.Hash() != current.Hash() {
   636  			return fmt.Errorf("hdrByNumber returned a block header with unexpected hash: %s, expected: %s", hdrByNumber.Hash().String(), current.Hash().String())
   637  		}
   638  
   639  		txs := current.Body().Transactions
   640  
   641  		// Transactions are only indexed beneath the last accepted block, so we only check
   642  		// that the transactions have been indexed, if we are checking below the last accepted
   643  		// block.
   644  		if current.NumberU64() <= bc.lastAccepted.NumberU64() {
   645  			// Ensure that all of the transactions have been stored correctly in the canonical
   646  			// chain
   647  			for txIndex, tx := range txs {
   648  				txLookup := bc.GetTransactionLookup(tx.Hash())
   649  				if txLookup == nil {
   650  					return fmt.Errorf("failed to find transaction %s", tx.Hash().String())
   651  				}
   652  				if txLookup.BlockHash != current.Hash() {
   653  					return fmt.Errorf("tx lookup returned with incorrect block hash: %s, expected: %s", txLookup.BlockHash.String(), current.Hash().String())
   654  				}
   655  				if txLookup.BlockIndex != current.Number().Uint64() {
   656  					return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number().Uint64())
   657  				}
   658  				if txLookup.Index != uint64(txIndex) {
   659  					return fmt.Errorf("tx lookup returned with incorrect transaction index: %d, expected: %d", txLookup.Index, txIndex)
   660  				}
   661  			}
   662  		}
   663  
   664  		blkReceipts := bc.GetReceiptsByHash(current.Hash())
   665  		if blkReceipts.Len() != len(txs) {
   666  			return fmt.Errorf("found %d transaction receipts, expected %d", blkReceipts.Len(), len(txs))
   667  		}
   668  		for index, txReceipt := range blkReceipts {
   669  			if txReceipt.TxHash != txs[index].Hash() {
   670  				return fmt.Errorf("transaction receipt mismatch, expected %s, but found: %s", txs[index].Hash().String(), txReceipt.TxHash.String())
   671  			}
   672  			if txReceipt.BlockHash != current.Hash() {
   673  				return fmt.Errorf("transaction receipt had block hash %s, but expected %s", txReceipt.BlockHash.String(), current.Hash().String())
   674  			}
   675  			if txReceipt.BlockNumber.Uint64() != current.NumberU64() {
   676  				return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.NumberU64())
   677  			}
   678  		}
   679  
   680  		i += 1
   681  		if i%1000 == 0 {
   682  			log.Info("Validate Canonical Chain Update", "totalBlocks", i)
   683  		}
   684  
   685  		parent := bc.GetBlockByHash(current.ParentHash())
   686  		if parent.Hash() != current.ParentHash() {
   687  			return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash().String())
   688  		}
   689  		current = parent
   690  	}
   691  
   692  	return nil
   693  }
   694  
   695  // Stop stops the blockchain service. If any imports are currently in progress
   696  // it will abort them using the procInterrupt.
   697  func (bc *BlockChain) Stop() {
   698  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   699  		return
   700  	}
   701  
   702  	// Wait for accepted feed to process all remaining items
   703  	log.Info("Stopping Acceptor")
   704  	start := time.Now()
   705  	bc.stopAcceptor()
   706  	log.Info("Acceptor queue drained", "t", time.Since(start))
   707  
   708  	log.Info("Shutting down state manager")
   709  	start = time.Now()
   710  	if err := bc.stateManager.Shutdown(); err != nil {
   711  		log.Error("Failed to Shutdown state manager", "err", err)
   712  	}
   713  	log.Info("State manager shut down", "t", time.Since(start))
   714  	// Flush the collected preimages to disk
   715  	if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil {
   716  		log.Error("Failed to commit trie preimages", "err", err)
   717  	}
   718  
   719  	// Stop senderCacher's goroutines
   720  	log.Info("Shutting down sender cacher")
   721  	bc.senderCacher.Shutdown()
   722  
   723  	// Unsubscribe all subscriptions registered from blockchain.
   724  	log.Info("Closing scope")
   725  	bc.scope.Close()
   726  
   727  	log.Info("Blockchain stopped")
   728  }
   729  
   730  // SetPreference attempts to update the head block to be the provided block and
   731  // emits a ChainHeadEvent if successful. This function will handle all reorg
   732  // side effects, if necessary.
   733  //
   734  // Note: This function should ONLY be called on blocks that have already been
   735  // inserted into the chain.
   736  //
   737  // Assumes [bc.chainmu] is not held by the caller.
   738  func (bc *BlockChain) SetPreference(block *types.Block) error {
   739  	bc.chainmu.Lock()
   740  	defer bc.chainmu.Unlock()
   741  
   742  	return bc.setPreference(block)
   743  }
   744  
   745  // setPreference attempts to update the head block to be the provided block and
   746  // emits a ChainHeadEvent if successful. This function will handle all reorg
   747  // side effects, if necessary.
   748  //
   749  // Assumes [bc.chainmu] is held by the caller.
   750  func (bc *BlockChain) setPreference(block *types.Block) error {
   751  	current := bc.CurrentBlock()
   752  
   753  	// Return early if the current block is already the block
   754  	// we are trying to write.
   755  	if current.Hash() == block.Hash() {
   756  		return nil
   757  	}
   758  
   759  	log.Debug("Setting preference", "number", block.Number(), "hash", block.Hash())
   760  
   761  	// writeKnownBlock updates the head block and will handle any reorg side
   762  	// effects automatically.
   763  	if err := bc.writeKnownBlock(block); err != nil {
   764  		return fmt.Errorf("unable to invoke writeKnownBlock: %w", err)
   765  	}
   766  
   767  	// Send a ChainHeadEvent if we end up altering
   768  	// the head block. Many internal aysnc processes rely on
   769  	// receiving these events (i.e. the TxPool).
   770  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   771  	return nil
   772  }
   773  
   774  // LastAcceptedBlock returns the last block to be marked as accepted. It may or
   775  // may not yet be processed.
   776  func (bc *BlockChain) LastConsensusAcceptedBlock() *types.Block {
   777  	bc.chainmu.Lock()
   778  	defer bc.chainmu.Unlock()
   779  
   780  	return bc.lastAccepted
   781  }
   782  
   783  // LastAcceptedBlock returns the last block to be marked as accepted and is
   784  // processed.
   785  //
   786  // Note: During initialization, [acceptorTip] is equal to [lastAccepted].
   787  func (bc *BlockChain) LastAcceptedBlock() *types.Block {
   788  	bc.acceptorTipLock.Lock()
   789  	defer bc.acceptorTipLock.Unlock()
   790  
   791  	return bc.acceptorTip
   792  }
   793  
   794  // Accept sets a minimum height at which no reorg can pass. Additionally,
   795  // this function may trigger a reorg if the block being accepted is not in the
   796  // canonical chain.
   797  //
   798  // Assumes [bc.chainmu] is not held by the caller.
   799  func (bc *BlockChain) Accept(block *types.Block) error {
   800  	bc.chainmu.Lock()
   801  	defer bc.chainmu.Unlock()
   802  
   803  	// The parent of [block] must be the last accepted block.
   804  	if bc.lastAccepted.Hash() != block.ParentHash() {
   805  		return fmt.Errorf(
   806  			"expected accepted block to have parent %s:%d but got %s:%d",
   807  			bc.lastAccepted.Hash().Hex(),
   808  			bc.lastAccepted.NumberU64(),
   809  			block.ParentHash().Hex(),
   810  			block.NumberU64()-1,
   811  		)
   812  	}
   813  
   814  	// If the canonical hash at the block height does not match the block we are
   815  	// accepting, we need to trigger a reorg.
   816  	canonical := bc.GetCanonicalHash(block.NumberU64())
   817  	if canonical != block.Hash() {
   818  		log.Debug("Accepting block in non-canonical chain", "number", block.Number(), "hash", block.Hash())
   819  		if err := bc.setPreference(block); err != nil {
   820  			return fmt.Errorf("could not set new preferred block %d:%s as preferred: %w", block.Number(), block.Hash(), err)
   821  		}
   822  	}
   823  
   824  	bc.lastAccepted = block
   825  	bc.addAcceptorQueue(block)
   826  	acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
   827  
   828  	return nil
   829  }
   830  
   831  func (bc *BlockChain) Reject(block *types.Block) error {
   832  	bc.chainmu.Lock()
   833  	defer bc.chainmu.Unlock()
   834  
   835  	// Reject Trie
   836  	if err := bc.stateManager.RejectTrie(block); err != nil {
   837  		return fmt.Errorf("unable to reject trie: %w", err)
   838  	}
   839  
   840  	if bc.snaps != nil {
   841  		if err := bc.snaps.Discard(block.Hash()); err != nil {
   842  			log.Error("unable to discard snap from rejected block", "block", block.Hash(), "number", block.NumberU64(), "root", block.Root())
   843  		}
   844  	}
   845  
   846  	// Remove the block since its data is no longer needed
   847  	batch := bc.db.NewBatch()
   848  	rawdb.DeleteBlock(batch, block.Hash(), block.NumberU64())
   849  	if err := batch.Write(); err != nil {
   850  		return fmt.Errorf("failed to write delete block batch: %w", err)
   851  	}
   852  
   853  	return nil
   854  }
   855  
   856  // writeKnownBlock updates the head block flag with a known block
   857  // and introduces chain reorg if necessary.
   858  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
   859  	current := bc.CurrentBlock()
   860  	if block.ParentHash() != current.Hash() {
   861  		if err := bc.reorg(current, block); err != nil {
   862  			return err
   863  		}
   864  	}
   865  	bc.writeHeadBlock(block)
   866  	return nil
   867  }
   868  
   869  // writeCanonicalBlockWithLogs writes the new head [block] and emits events
   870  // for the new head block.
   871  func (bc *BlockChain) writeCanonicalBlockWithLogs(block *types.Block, logs []*types.Log) {
   872  	bc.writeHeadBlock(block)
   873  	bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
   874  	if len(logs) > 0 {
   875  		bc.logsFeed.Send(logs)
   876  	}
   877  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   878  }
   879  
   880  // newTip returns a boolean indicating if the block should be appended to
   881  // the canonical chain.
   882  func (bc *BlockChain) newTip(block *types.Block) bool {
   883  	return block.ParentHash() == bc.CurrentBlock().Hash()
   884  }
   885  
   886  // writeBlockAndSetHead persists the block and associated state to the database
   887  // and optimistically updates the canonical chain if [block] extends the current
   888  // canonical chain.
   889  // writeBlockAndSetHead expects to be the last verification step during InsertBlock
   890  // since it creates a reference that will only be cleaned up by Accept/Reject.
   891  func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error {
   892  	if err := bc.writeBlockWithState(block, receipts, state); err != nil {
   893  		return err
   894  	}
   895  
   896  	// If [block] represents a new tip of the canonical chain, we optimistically add it before
   897  	// setPreference is called. Otherwise, we consider it a side chain block.
   898  	if bc.newTip(block) {
   899  		bc.writeCanonicalBlockWithLogs(block, logs)
   900  	} else {
   901  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
   902  	}
   903  
   904  	return nil
   905  }
   906  
   907  // writeBlockWithState writes the block and all associated state to the database,
   908  // but it expects the chain mutex to be held.
   909  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
   910  	// Irrelevant of the canonical status, write the block itself to the database.
   911  	//
   912  	// Note all the components of block(hash->number map, header, body, receipts)
   913  	// should be written atomically. BlockBatch is used for containing all components.
   914  	blockBatch := bc.db.NewBatch()
   915  	rawdb.WriteBlock(blockBatch, block)
   916  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
   917  	rawdb.WritePreimages(blockBatch, state.Preimages())
   918  	if err := blockBatch.Write(); err != nil {
   919  		log.Crit("Failed to write block into disk", "err", err)
   920  	}
   921  
   922  	// Commit all cached state changes into underlying memory database.
   923  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
   924  	// diff layer for the block.
   925  	var err error
   926  	if bc.snaps == nil {
   927  		_, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true)
   928  	} else {
   929  		_, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true)
   930  	}
   931  	if err != nil {
   932  		return err
   933  	}
   934  
   935  	// Note: if InsertTrie must be the last step in verification that can return an error.
   936  	// This allows [stateManager] to assume that if it inserts a trie without returning an
   937  	// error then the block has passed verification and either AcceptTrie/RejectTrie will
   938  	// eventually be called on [root] unless a fatal error occurs. It does not assume that
   939  	// the node will not shutdown before either AcceptTrie/RejectTrie is called.
   940  	if err := bc.stateManager.InsertTrie(block); err != nil {
   941  		if bc.snaps != nil {
   942  			discardErr := bc.snaps.Discard(block.Hash())
   943  			if discardErr != nil {
   944  				log.Debug("failed to discard snapshot after being unable to insert block trie", "block", block.Hash(), "root", block.Root())
   945  			}
   946  		}
   947  		return err
   948  	}
   949  	return nil
   950  }
   951  
   952  // InsertChain attempts to insert the given batch of blocks in to the canonical
   953  // chain or, otherwise, create a fork. If an error is returned it will return
   954  // the index number of the failing block as well an error describing what went
   955  // wrong.
   956  //
   957  // After insertion is done, all accumulated events will be fired.
   958  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
   959  	// Sanity check that we have something meaningful to import
   960  	if len(chain) == 0 {
   961  		return 0, nil
   962  	}
   963  
   964  	bc.blockProcFeed.Send(true)
   965  	defer bc.blockProcFeed.Send(false)
   966  
   967  	// Do a sanity check that the provided chain is actually ordered and linked.
   968  	for i := 1; i < len(chain); i++ {
   969  		block, prev := chain[i], chain[i-1]
   970  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
   971  			log.Error("Non contiguous block insert",
   972  				"number", block.Number(),
   973  				"hash", block.Hash(),
   974  				"parent", block.ParentHash(),
   975  				"prevnumber", prev.Number(),
   976  				"prevhash", prev.Hash(),
   977  			)
   978  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
   979  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
   980  		}
   981  	}
   982  	// Pre-checks passed, start the full block imports
   983  	bc.chainmu.Lock()
   984  	defer bc.chainmu.Unlock()
   985  	for n, block := range chain {
   986  		if err := bc.insertBlock(block, true); err != nil {
   987  			return n, err
   988  		}
   989  	}
   990  
   991  	return len(chain), nil
   992  }
   993  
   994  func (bc *BlockChain) InsertBlock(block *types.Block) error {
   995  	return bc.InsertBlockManual(block, true)
   996  }
   997  
   998  func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error {
   999  	bc.blockProcFeed.Send(true)
  1000  	defer bc.blockProcFeed.Send(false)
  1001  
  1002  	bc.chainmu.Lock()
  1003  	err := bc.insertBlock(block, writes)
  1004  	bc.chainmu.Unlock()
  1005  
  1006  	return err
  1007  }
  1008  
  1009  // gatherBlockLogs fetches logs from a previously inserted block.
  1010  func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log {
  1011  	receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig)
  1012  	var logs []*types.Log
  1013  	for _, receipt := range receipts {
  1014  		for _, log := range receipt.Logs {
  1015  			l := *log
  1016  			if removed {
  1017  				l.Removed = true
  1018  			}
  1019  			logs = append(logs, &l)
  1020  		}
  1021  	}
  1022  
  1023  	return logs
  1024  }
  1025  
  1026  func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error {
  1027  	bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions())
  1028  
  1029  	err := bc.engine.VerifyHeader(bc, block.Header())
  1030  	if err == nil {
  1031  		err = bc.validator.ValidateBody(block)
  1032  	}
  1033  
  1034  	switch {
  1035  	case errors.Is(err, ErrKnownBlock):
  1036  		// even if the block is already known, we still need to generate the
  1037  		// snapshot layer and add a reference to the triedb, so we re-execute
  1038  		// the block. Note that insertBlock should only be called on a block
  1039  		// once if it returns nil
  1040  		if bc.newTip(block) {
  1041  			log.Debug("Setting head to be known block", "number", block.Number(), "hash", block.Hash())
  1042  		} else {
  1043  			log.Debug("Reprocessing already known block", "number", block.Number(), "hash", block.Hash())
  1044  		}
  1045  
  1046  	// If an ancestor has been pruned, then this block cannot be acceptable.
  1047  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1048  		return errors.New("side chain insertion is not supported")
  1049  
  1050  	// Future blocks are not supported, but should not be reported, so we return an error
  1051  	// early here
  1052  	case errors.Is(err, consensus.ErrFutureBlock):
  1053  		return errFutureBlockUnsupported
  1054  
  1055  	// Some other error occurred, abort
  1056  	case err != nil:
  1057  		bc.reportBlock(block, nil, err)
  1058  		return err
  1059  	}
  1060  	// No validation errors for the block
  1061  	var activeState *state.StateDB
  1062  	defer func() {
  1063  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1064  		// block or other error is hit however, an early return may not properly
  1065  		// terminate the background threads. This defer ensures that we clean up
  1066  		// and dangling prefetcher, without defering each and holding on live refs.
  1067  		if activeState != nil {
  1068  			activeState.StopPrefetcher()
  1069  		}
  1070  	}()
  1071  
  1072  	// Retrieve the parent block and its state to execute on top
  1073  	start := time.Now()
  1074  
  1075  	// Retrieve the parent block and its state to execute block
  1076  	parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1077  	statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1078  	if err != nil {
  1079  		return err
  1080  	}
  1081  
  1082  	// Enable prefetching to pull in trie node paths while processing transactions
  1083  	statedb.StartPrefetcher("chain")
  1084  	activeState = statedb
  1085  
  1086  	// If we have a followup block, run that against the current state to pre-cache
  1087  	// transactions and probabilistically some of the account/storage trie nodes.
  1088  	// Process block using the parent state as reference point
  1089  	receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig)
  1090  	if serr := statedb.Error(); serr != nil {
  1091  		log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash())
  1092  	}
  1093  	if err != nil {
  1094  		bc.reportBlock(block, receipts, err)
  1095  		return err
  1096  	}
  1097  
  1098  	// Validate the state using the default validator
  1099  	if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1100  		bc.reportBlock(block, receipts, err)
  1101  		return err
  1102  	}
  1103  
  1104  	// If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block
  1105  	// or the state trie to disk.
  1106  	// Note: in pruning mode, this prevents us from generating a reference to the state root.
  1107  	if !writes {
  1108  		return nil
  1109  	}
  1110  
  1111  	// Write the block to the chain and get the status.
  1112  	// writeBlockWithState (called within writeBlockAndSethead) creates a reference that
  1113  	// will be cleaned up in Accept/Reject so we need to ensure an error cannot occur
  1114  	// later in verification, since that would cause the referenced root to never be dereferenced.
  1115  	if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil {
  1116  		return err
  1117  	}
  1118  	log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1119  		"parentHash", block.ParentHash(),
  1120  		"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1121  		"elapsed", common.PrettyDuration(time.Since(start)),
  1122  		"root", block.Root(), "baseFeePerGas", block.BaseFee(), "blockGasCost", block.BlockGasCost(),
  1123  	)
  1124  
  1125  	processedBlockGasUsedCounter.Inc(int64(block.GasUsed()))
  1126  	return nil
  1127  }
  1128  
  1129  // collectLogs collects the logs that were generated or removed during
  1130  // the processing of the block that corresponds with the given hash.
  1131  // These logs are later announced as deleted or reborn.
  1132  func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log {
  1133  	number := bc.hc.GetBlockNumber(hash)
  1134  	if number == nil {
  1135  		return nil
  1136  	}
  1137  	return bc.gatherBlockLogs(hash, *number, removed)
  1138  }
  1139  
  1140  // mergeLogs returns a merged log slice with specified sort order.
  1141  func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log {
  1142  	var ret []*types.Log
  1143  	if reverse {
  1144  		for i := len(logs) - 1; i >= 0; i-- {
  1145  			ret = append(ret, logs[i]...)
  1146  		}
  1147  	} else {
  1148  		for i := 0; i < len(logs); i++ {
  1149  			ret = append(ret, logs[i]...)
  1150  		}
  1151  	}
  1152  	return ret
  1153  }
  1154  
  1155  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1156  // blocks and inserts them to be part of the new canonical chain and accumulates
  1157  // potential missing transactions and post an event about them.
  1158  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1159  	var (
  1160  		newHead = newBlock
  1161  		oldHead = oldBlock
  1162  
  1163  		newChain    types.Blocks
  1164  		oldChain    types.Blocks
  1165  		commonBlock *types.Block
  1166  
  1167  		deletedLogs [][]*types.Log
  1168  		rebirthLogs [][]*types.Log
  1169  	)
  1170  	// Reduce the longer chain to the same number as the shorter one
  1171  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1172  		// Old chain is longer, gather all transactions and logs as deleted ones
  1173  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1174  			oldChain = append(oldChain, oldBlock)
  1175  			// Collect deleted logs for notification
  1176  			logs := bc.collectLogs(oldBlock.Hash(), true)
  1177  			if len(logs) > 0 {
  1178  				deletedLogs = append(deletedLogs, logs)
  1179  			}
  1180  		}
  1181  	} else {
  1182  		// New chain is longer, stash all blocks away for subsequent insertion
  1183  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1184  			newChain = append(newChain, newBlock)
  1185  		}
  1186  	}
  1187  	if oldBlock == nil {
  1188  		return fmt.Errorf("invalid old chain")
  1189  	}
  1190  	if newBlock == nil {
  1191  		return fmt.Errorf("invalid new chain")
  1192  	}
  1193  	// Both sides of the reorg are at the same number, reduce both until the common
  1194  	// ancestor is found
  1195  	for {
  1196  		// If the common ancestor was found, bail out
  1197  		if oldBlock.Hash() == newBlock.Hash() {
  1198  			commonBlock = oldBlock
  1199  			break
  1200  		}
  1201  		// Remove an old block as well as stash away a new block
  1202  		oldChain = append(oldChain, oldBlock)
  1203  		// Collect deleted logs for notification
  1204  		logs := bc.collectLogs(oldBlock.Hash(), true)
  1205  		if len(logs) > 0 {
  1206  			deletedLogs = append(deletedLogs, logs)
  1207  		}
  1208  
  1209  		newChain = append(newChain, newBlock)
  1210  
  1211  		// Step back with both chains
  1212  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1213  		if oldBlock == nil {
  1214  			return fmt.Errorf("invalid old chain")
  1215  		}
  1216  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1217  		if newBlock == nil {
  1218  			return fmt.Errorf("invalid new chain")
  1219  		}
  1220  	}
  1221  
  1222  	// If the commonBlock is less than the last accepted height, we return an error
  1223  	// because performing a reorg would mean removing an accepted block from the
  1224  	// canonical chain.
  1225  	if commonBlock.NumberU64() < bc.lastAccepted.NumberU64() {
  1226  		return fmt.Errorf("cannot orphan finalized block at height: %d to common block at height: %d", bc.lastAccepted.NumberU64(), commonBlock.NumberU64())
  1227  	}
  1228  
  1229  	// Ensure the user sees large reorgs
  1230  	if len(oldChain) > 0 && len(newChain) > 0 {
  1231  		logFn := log.Info
  1232  		msg := "Resetting chain preference"
  1233  		if len(oldChain) > 63 {
  1234  			msg = "Large chain preference change detected"
  1235  			logFn = log.Warn
  1236  		}
  1237  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1238  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1239  	} else {
  1240  		log.Warn("Unlikely preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash())
  1241  	}
  1242  	// Insert the new chain(except the head block(reverse order)),
  1243  	// taking care of the proper incremental order.
  1244  	for i := len(newChain) - 1; i >= 1; i-- {
  1245  		// Insert the block in the canonical way, re-writing history
  1246  		bc.writeHeadBlock(newChain[i])
  1247  
  1248  		// Collect reborn logs due to chain reorg
  1249  		logs := bc.collectLogs(newChain[i].Hash(), false)
  1250  		if len(logs) > 0 {
  1251  			rebirthLogs = append(rebirthLogs, logs)
  1252  		}
  1253  	}
  1254  	// Delete any canonical number assignments above the new head
  1255  	indexesBatch := bc.db.NewBatch()
  1256  
  1257  	// Use the height of [newHead] to determine which canonical hashes to remove
  1258  	// in case the new chain is shorter than the old chain, in which case
  1259  	// there may be hashes set on the canonical chain that were invalidated
  1260  	// but not yet overwritten by the re-org.
  1261  	for i := newHead.NumberU64() + 1; ; i++ {
  1262  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  1263  		if hash == (common.Hash{}) {
  1264  			break
  1265  		}
  1266  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  1267  	}
  1268  	if err := indexesBatch.Write(); err != nil {
  1269  		log.Crit("Failed to delete useless indexes", "err", err)
  1270  	}
  1271  
  1272  	// If any logs need to be fired, do it now. In theory we could avoid creating
  1273  	// this goroutine if there are no events to fire, but realistcally that only
  1274  	// ever happens if we're reorging empty blocks, which will only happen on idle
  1275  	// networks where performance is not an issue either way.
  1276  	if len(deletedLogs) > 0 {
  1277  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  1278  	}
  1279  	if len(rebirthLogs) > 0 {
  1280  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  1281  	}
  1282  	if len(oldChain) > 0 {
  1283  		for i := len(oldChain) - 1; i >= 0; i-- {
  1284  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  1285  		}
  1286  	}
  1287  	return nil
  1288  }
  1289  
  1290  type badBlock struct {
  1291  	block  *types.Block
  1292  	reason *BadBlockReason
  1293  }
  1294  
  1295  type BadBlockReason struct {
  1296  	ChainConfig *params.ChainConfig `json:"chainConfig"`
  1297  	Receipts    types.Receipts      `json:"receipts"`
  1298  	Number      uint64              `json:"number"`
  1299  	Hash        common.Hash         `json:"hash"`
  1300  	Error       error               `json:"error"`
  1301  }
  1302  
  1303  func (b *BadBlockReason) String() string {
  1304  	var receiptString string
  1305  	for i, receipt := range b.Receipts {
  1306  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1307  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1308  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1309  	}
  1310  	reason := fmt.Sprintf(`
  1311  	########## BAD BLOCK #########
  1312  	Chain config: %v
  1313  	
  1314  	Number: %v
  1315  	Hash: %#x
  1316  	%v
  1317  	
  1318  	Error: %v
  1319  	##############################
  1320  	`, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error)
  1321  
  1322  	return reason
  1323  }
  1324  
  1325  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network and the BadBlockReason
  1326  // that caused each to be reported as a bad block.
  1327  // BadBlocks ensures that the length of the blocks and the BadBlockReason slice have the same length.
  1328  func (bc *BlockChain) BadBlocks() ([]*types.Block, []*BadBlockReason) {
  1329  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1330  	reasons := make([]*BadBlockReason, 0, bc.badBlocks.Len())
  1331  	for _, hash := range bc.badBlocks.Keys() {
  1332  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1333  			badBlk := blk.(*badBlock)
  1334  			blocks = append(blocks, badBlk.block)
  1335  			reasons = append(reasons, badBlk.reason)
  1336  		}
  1337  	}
  1338  	return blocks, reasons
  1339  }
  1340  
  1341  // addBadBlock adds a bad block to the bad-block LRU cache
  1342  func (bc *BlockChain) addBadBlock(block *types.Block, reason *BadBlockReason) {
  1343  	bc.badBlocks.Add(block.Hash(), &badBlock{
  1344  		block:  block,
  1345  		reason: reason,
  1346  	})
  1347  }
  1348  
  1349  // reportBlock logs a bad block error.
  1350  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1351  	reason := &BadBlockReason{
  1352  		ChainConfig: bc.chainConfig,
  1353  		Receipts:    receipts,
  1354  		Number:      block.NumberU64(),
  1355  		Hash:        block.Hash(),
  1356  		Error:       err,
  1357  	}
  1358  
  1359  	badBlockCounter.Inc(1)
  1360  	bc.addBadBlock(block, reason)
  1361  	log.Debug(reason.String())
  1362  }
  1363  
  1364  func (bc *BlockChain) RemoveRejectedBlocks(start, end uint64) error {
  1365  	batch := bc.db.NewBatch()
  1366  
  1367  	for i := start; i < end; i++ {
  1368  		hashes := rawdb.ReadAllHashes(bc.db, i)
  1369  		canonicalBlock := bc.GetBlockByNumber((i))
  1370  		if canonicalBlock == nil {
  1371  			return fmt.Errorf("failed to retrieve block by number at height %d", i)
  1372  		}
  1373  		canonicalHash := canonicalBlock.Hash()
  1374  		for _, hash := range hashes {
  1375  			if hash == canonicalHash {
  1376  				continue
  1377  			}
  1378  			rawdb.DeleteBlock(batch, hash, i)
  1379  		}
  1380  
  1381  		if err := batch.Write(); err != nil {
  1382  			return fmt.Errorf("failed to write delete rejected block batch at height %d", i)
  1383  		}
  1384  		batch.Reset()
  1385  	}
  1386  
  1387  	return nil
  1388  }
  1389  
  1390  // reprocessBlock reprocesses a previously accepted block. This is often used
  1391  // to regenerate previously pruned state tries.
  1392  func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) (common.Hash, error) {
  1393  	// Retrieve the parent block and its state to execute block
  1394  	var (
  1395  		statedb    *state.StateDB
  1396  		err        error
  1397  		parentRoot = parent.Root()
  1398  	)
  1399  	// We don't simply use [NewWithSnapshot] here because it doesn't return an
  1400  	// error if [bc.snaps != nil] and [bc.snaps.Snapshot(parentRoot) == nil].
  1401  	if bc.snaps == nil {
  1402  		statedb, err = state.New(parentRoot, bc.stateCache, nil)
  1403  	} else {
  1404  		snap := bc.snaps.Snapshot(parentRoot)
  1405  		if snap == nil {
  1406  			return common.Hash{}, fmt.Errorf("failed to get snapshot for parent root: %s", parentRoot)
  1407  		}
  1408  		statedb, err = state.NewWithSnapshot(parentRoot, bc.stateCache, snap)
  1409  	}
  1410  	if err != nil {
  1411  		return common.Hash{}, fmt.Errorf("could not fetch state for (%s: %d): %v", parent.Hash().Hex(), parent.NumberU64(), err)
  1412  	}
  1413  
  1414  	// Enable prefetching to pull in trie node paths while processing transactions
  1415  	statedb.StartPrefetcher("chain")
  1416  	defer func() {
  1417  		statedb.StopPrefetcher()
  1418  	}()
  1419  
  1420  	// Process previously stored block
  1421  	receipts, _, usedGas, err := bc.processor.Process(current, parent.Header(), statedb, vm.Config{})
  1422  	if err != nil {
  1423  		return common.Hash{}, fmt.Errorf("failed to re-process block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1424  	}
  1425  
  1426  	// Validate the state using the default validator
  1427  	if err := bc.validator.ValidateState(current, statedb, receipts, usedGas); err != nil {
  1428  		return common.Hash{}, fmt.Errorf("failed to validate state while re-processing block (%s: %d): %v", current.Hash().Hex(), current.NumberU64(), err)
  1429  	}
  1430  	log.Debug("Processed block", "block", current.Hash(), "number", current.NumberU64())
  1431  
  1432  	// Commit all cached state changes into underlying memory database.
  1433  	// If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot
  1434  	// diff layer for the block.
  1435  	if bc.snaps == nil {
  1436  		return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false)
  1437  	}
  1438  	return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false)
  1439  }
  1440  
  1441  // initSnapshot instantiates a Snapshot instance and adds it to [bc]
  1442  func (bc *BlockChain) initSnapshot(b *types.Block) {
  1443  	if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil {
  1444  		return
  1445  	}
  1446  
  1447  	// If we are starting from genesis, generate the original snapshot disk layer
  1448  	// up front, so we can use it while executing blocks in bootstrapping. This
  1449  	// also avoids a costly async generation process when reaching tip.
  1450  	//
  1451  	// Additionally, we should always repair a snapshot if starting at genesis
  1452  	// if [SnapshotLimit] > 0.
  1453  	async := bc.cacheConfig.SnapshotAsync && b.NumberU64() > 0
  1454  	rebuild := !bc.cacheConfig.SkipSnapshotRebuild || b.NumberU64() == 0
  1455  	log.Info("Initializing snapshots", "async", async, "rebuild", rebuild, "headHash", b.Hash(), "headRoot", b.Root())
  1456  	var err error
  1457  	bc.snaps, err = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, b.Hash(), b.Root(), async, rebuild, bc.cacheConfig.SnapshotVerify)
  1458  	if err != nil {
  1459  		log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root(), "err", err, "async", async)
  1460  	}
  1461  }
  1462  
  1463  // reprocessState reprocesses the state up to [block], iterating through its ancestors until
  1464  // it reaches a block with a state committed to the database. reprocessState does not use
  1465  // snapshots since the disk layer for snapshots will most likely be above the last committed
  1466  // state that reprocessing will start from.
  1467  func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error {
  1468  	origin := current.NumberU64()
  1469  	acceptorTip, err := rawdb.ReadAcceptorTip(bc.db)
  1470  	if err != nil {
  1471  		return fmt.Errorf("%w: unable to get Acceptor tip", err)
  1472  	}
  1473  	log.Info("Loaded Acceptor tip", "hash", acceptorTip)
  1474  
  1475  	// The acceptor tip is up to date either if it matches the current hash, or it has not been
  1476  	// initialized (i.e., this node has not accepted any blocks asynchronously).
  1477  	acceptorTipUpToDate := acceptorTip == (common.Hash{}) || acceptorTip == current.Hash()
  1478  
  1479  	// If the state is already available and the acceptor tip is up to date, skip re-processing.
  1480  	if bc.HasState(current.Root()) && acceptorTipUpToDate {
  1481  		log.Info("Skipping state reprocessing", "root", current.Root())
  1482  		return nil
  1483  	}
  1484  
  1485  	// If the acceptorTip is a non-empty hash, jump re-processing back to the acceptor tip to ensure that
  1486  	// we re-process at a minimum from the last processed accepted block.
  1487  	// Note: we do not have a guarantee that the last trie on disk will be at a height <= acceptorTip.
  1488  	// Since we need to re-process from at least the acceptorTip to ensure indices are updated correctly
  1489  	// we must start searching for the block to start re-processing at the acceptorTip.
  1490  	// This may occur if we are running in archive mode where every block's trie is committed on insertion
  1491  	// or during an unclean shutdown.
  1492  	if acceptorTip != (common.Hash{}) {
  1493  		current = bc.GetBlockByHash(acceptorTip)
  1494  		if current == nil {
  1495  			return fmt.Errorf("failed to get block for acceptor tip %s", acceptorTip)
  1496  		}
  1497  	}
  1498  
  1499  	for i := 0; i < int(reexec); i++ {
  1500  		// TODO: handle canceled context
  1501  
  1502  		if current.NumberU64() == 0 {
  1503  			return errors.New("genesis state is missing")
  1504  		}
  1505  		parent := bc.GetBlock(current.ParentHash(), current.NumberU64()-1)
  1506  		if parent == nil {
  1507  			return fmt.Errorf("missing block %s:%d", current.ParentHash().Hex(), current.NumberU64()-1)
  1508  		}
  1509  		current = parent
  1510  		_, err = bc.stateCache.OpenTrie(current.Root())
  1511  		if err == nil {
  1512  			break
  1513  		}
  1514  	}
  1515  	if err != nil {
  1516  		switch err.(type) {
  1517  		case *trie.MissingNodeError:
  1518  			return fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
  1519  		default:
  1520  			return err
  1521  		}
  1522  	}
  1523  
  1524  	// State was available at historical point, regenerate
  1525  	var (
  1526  		start        = time.Now()
  1527  		logged       time.Time
  1528  		previousRoot common.Hash
  1529  		triedb       = bc.stateCache.TrieDB()
  1530  		writeIndices bool
  1531  	)
  1532  	// Note: we add 1 since in each iteration, we attempt to re-execute the next block.
  1533  	log.Info("Re-executing blocks to generate state for last accepted block", "from", current.NumberU64()+1, "to", origin)
  1534  	for current.NumberU64() < origin {
  1535  		// TODO: handle canceled context
  1536  
  1537  		// Print progress logs if long enough time elapsed
  1538  		if time.Since(logged) > 8*time.Second {
  1539  			log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64(), "elapsed", time.Since(start))
  1540  			logged = time.Now()
  1541  		}
  1542  
  1543  		// Retrieve the next block to regenerate and process it
  1544  		parent := current
  1545  		next := current.NumberU64() + 1
  1546  		if current = bc.GetBlockByNumber(next); current == nil {
  1547  			return fmt.Errorf("failed to retrieve block %d while re-generating state", next)
  1548  		}
  1549  
  1550  		// Initialize snapshot if required (prevents full snapshot re-generation in
  1551  		// the case of unclean shutdown)
  1552  		if parent.Hash() == acceptorTip {
  1553  			log.Info("Recovering snapshot", "hash", parent.Hash(), "index", parent.NumberU64())
  1554  			// TODO: switch to checking the snapshot block hash markers here to ensure that when we re-process the block, we have the opportunity to apply
  1555  			// a snapshot diff layer that we may have been in the middle of committing during shutdown. This will prevent snapshot re-generation in the case
  1556  			// that the node stops mid-way through snapshot flattening (performed across multiple DB batches).
  1557  			// If snapshot initialization is delayed due to state sync, skip initializing snaps here
  1558  			if !bc.cacheConfig.SnapshotDelayInit {
  1559  				bc.initSnapshot(parent)
  1560  			}
  1561  			writeIndices = true // Set [writeIndices] to true, so that the indices will be updated from the last accepted tip onwards.
  1562  		}
  1563  
  1564  		// Reprocess next block using previously fetched data
  1565  		root, err := bc.reprocessBlock(parent, current)
  1566  		if err != nil {
  1567  			return err
  1568  		}
  1569  
  1570  		// Flatten snapshot if initialized, holding a reference to the state root until the next block
  1571  		// is processed.
  1572  		if err := bc.flattenSnapshot(func() error {
  1573  			triedb.Reference(root, common.Hash{})
  1574  			if previousRoot != (common.Hash{}) {
  1575  				triedb.Dereference(previousRoot)
  1576  			}
  1577  			previousRoot = root
  1578  			return nil
  1579  		}, current.Hash()); err != nil {
  1580  			return err
  1581  		}
  1582  
  1583  		// Write any unsaved indices to disk
  1584  		if writeIndices {
  1585  			if err := bc.writeBlockAcceptedIndices(current); err != nil {
  1586  				return fmt.Errorf("%w: failed to process accepted block indices", err)
  1587  			}
  1588  		}
  1589  	}
  1590  
  1591  	nodes, imgs := triedb.Size()
  1592  	log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
  1593  	if previousRoot != (common.Hash{}) {
  1594  		return triedb.Commit(previousRoot, true, nil)
  1595  	}
  1596  	return nil
  1597  }
  1598  
  1599  func (bc *BlockChain) protectTrieIndex() error {
  1600  	if !bc.cacheConfig.Pruning {
  1601  		return rawdb.WritePruningDisabled(bc.db)
  1602  	}
  1603  	pruningDisabled, err := rawdb.HasPruningDisabled(bc.db)
  1604  	if err != nil {
  1605  		return fmt.Errorf("failed to check if the chain has been run with pruning disabled: %w", err)
  1606  	}
  1607  	if !pruningDisabled {
  1608  		return nil
  1609  	}
  1610  	if !bc.cacheConfig.AllowMissingTries {
  1611  		return ErrRefuseToCorruptArchiver
  1612  	}
  1613  	return nil
  1614  }
  1615  
  1616  // populateMissingTries iterates from [bc.cacheConfig.PopulateMissingTries] (defaults to 0)
  1617  // to [LastAcceptedBlock] and persists all tries to disk that are not already on disk. This is
  1618  // used to fill trie index gaps in an "archive" node without resyncing from scratch.
  1619  //
  1620  // NOTE: Assumes the genesis root and last accepted root are written to disk
  1621  func (bc *BlockChain) populateMissingTries() error {
  1622  	if bc.cacheConfig.PopulateMissingTries == nil {
  1623  		return nil
  1624  	}
  1625  
  1626  	var (
  1627  		lastAccepted = bc.LastAcceptedBlock().NumberU64()
  1628  		startHeight  = *bc.cacheConfig.PopulateMissingTries
  1629  		startTime    = time.Now()
  1630  		logged       time.Time
  1631  		triedb       = bc.stateCache.TrieDB()
  1632  		missing      = 0
  1633  	)
  1634  
  1635  	// Do not allow the config to specify a starting point above the last accepted block.
  1636  	if startHeight > lastAccepted {
  1637  		return fmt.Errorf("cannot populate missing tries from a starting point (%d) > last accepted block (%d)", startHeight, lastAccepted)
  1638  	}
  1639  
  1640  	// If we are starting from the genesis, increment the start height by 1 so we don't attempt to re-process
  1641  	// the genesis block.
  1642  	if startHeight == 0 {
  1643  		startHeight += 1
  1644  	}
  1645  	parent := bc.GetBlockByNumber(startHeight - 1)
  1646  	if parent == nil {
  1647  		return fmt.Errorf("failed to fetch initial parent block for re-populate missing tries at height %d", startHeight-1)
  1648  	}
  1649  
  1650  	it := newBlockChainIterator(bc, startHeight, bc.cacheConfig.PopulateMissingTriesParallelism)
  1651  	defer it.Stop()
  1652  
  1653  	for i := startHeight; i < lastAccepted; i++ {
  1654  		// Print progress logs if long enough time elapsed
  1655  		if time.Since(logged) > 8*time.Second {
  1656  			log.Info("Populating missing tries", "missing", missing, "block", i, "remaining", lastAccepted-i, "elapsed", time.Since(startTime))
  1657  			logged = time.Now()
  1658  		}
  1659  
  1660  		// TODO: handle canceled context
  1661  		current, hasState, err := it.Next(context.TODO())
  1662  		if err != nil {
  1663  			return fmt.Errorf("error while populating missing tries: %w", err)
  1664  		}
  1665  
  1666  		if hasState {
  1667  			parent = current
  1668  			continue
  1669  		}
  1670  
  1671  		root, err := bc.reprocessBlock(parent, current)
  1672  		if err != nil {
  1673  			return err
  1674  		}
  1675  
  1676  		// Commit root to disk so that it can be accessed directly
  1677  		if err := triedb.Commit(root, false, nil); err != nil {
  1678  			return err
  1679  		}
  1680  		parent = current
  1681  		log.Debug("Populated missing trie", "block", current.NumberU64(), "root", root)
  1682  		missing++
  1683  	}
  1684  
  1685  	// Write marker to DB to indicate populate missing tries finished successfully.
  1686  	// Note: writing the marker here means that we do allow consecutive runs of re-populating
  1687  	// missing tries if it does not finish during the prior run.
  1688  	if err := rawdb.WritePopulateMissingTries(bc.db); err != nil {
  1689  		return fmt.Errorf("failed to write offline pruning success marker: %w", err)
  1690  	}
  1691  
  1692  	nodes, imgs := triedb.Size()
  1693  	log.Info("All missing tries populated", "startHeight", startHeight, "lastAcceptedHeight", lastAccepted, "missing", missing, "elapsed", time.Since(startTime), "nodes", nodes, "preimages", imgs)
  1694  	return nil
  1695  }
  1696  
  1697  // CleanBlockRootsAboveLastAccepted gathers the blocks that may have previously been in processing above the
  1698  // last accepted block and wipes their block roots from disk to mark their tries as inaccessible.
  1699  // This is used prior to pruning to ensure that all of the tries that may still be in processing are marked
  1700  // as inaccessible and mirrors the handling of middle roots in the geth offline pruning implementation.
  1701  // This is not strictly necessary, but maintains a soft assumption.
  1702  func (bc *BlockChain) CleanBlockRootsAboveLastAccepted() error {
  1703  	targetRoot := bc.LastAcceptedBlock().Root()
  1704  
  1705  	// Clean up any block roots above the last accepted block before we start pruning.
  1706  	// Note: this takes the place of middleRoots in the geth implementation since we do not
  1707  	// track processing block roots via snapshot journals in the same way.
  1708  	processingRoots := bc.gatherBlockRootsAboveLastAccepted()
  1709  	// If there is a block above the last accepted block with an identical state root, we
  1710  	// explicitly remove it from the set to ensure we do not corrupt the last accepted trie.
  1711  	delete(processingRoots, targetRoot)
  1712  	for processingRoot := range processingRoots {
  1713  		// Delete the processing root from disk to mark the trie as inaccessible (no need to handle this in a batch).
  1714  		if err := bc.db.Delete(processingRoot[:]); err != nil {
  1715  			return fmt.Errorf("failed to remove processing root (%s) preparing for offline pruning: %w", processingRoot, err)
  1716  		}
  1717  	}
  1718  
  1719  	return nil
  1720  }
  1721  
  1722  // gatherBlockRootsAboveLastAccepted iterates forward from the last accepted block and returns a list of all block roots
  1723  // for any blocks that were inserted above the last accepted block.
  1724  // Given that we never insert a block into the chain unless all of its ancestors have been inserted, this should gather
  1725  // all of the block roots for blocks inserted above the last accepted block that may have been in processing at some point
  1726  // in the past and are therefore potentially still acceptable.
  1727  // Note: there is an edge case where the node dies while the consensus engine is rejecting a branch of blocks since the
  1728  // consensus engine will reject the lowest ancestor first. In this case, these blocks will not be considered acceptable in
  1729  // the future.
  1730  // Ex.
  1731  //
  1732  //	   A
  1733  //	 /   \
  1734  //	B     C
  1735  //	|
  1736  //	D
  1737  //	|
  1738  //	E
  1739  //	|
  1740  //	F
  1741  //
  1742  // The consensus engine accepts block C and proceeds to reject the other branch in order (B, D, E, F).
  1743  // If the consensus engine dies after rejecting block D, block D will be deleted, such that the forward iteration
  1744  // may not find any blocks at this height and will not reach the previously processing blocks E and F.
  1745  func (bc *BlockChain) gatherBlockRootsAboveLastAccepted() map[common.Hash]struct{} {
  1746  	blockRoots := make(map[common.Hash]struct{})
  1747  	for height := bc.lastAccepted.NumberU64() + 1; ; height++ {
  1748  		blockHashes := rawdb.ReadAllHashes(bc.db, height)
  1749  		// If there are no block hashes at [height], then there should be no further acceptable blocks
  1750  		// past this point.
  1751  		if len(blockHashes) == 0 {
  1752  			break
  1753  		}
  1754  
  1755  		// Fetch the blocks and append their roots.
  1756  		for _, blockHash := range blockHashes {
  1757  			block := bc.GetBlockByHash(blockHash)
  1758  			if block == nil {
  1759  				continue
  1760  			}
  1761  
  1762  			blockRoots[block.Root()] = struct{}{}
  1763  		}
  1764  	}
  1765  
  1766  	return blockRoots
  1767  }
  1768  
  1769  // ResetState reinitializes the state of the blockchain
  1770  // to the trie represented by [block.Root()] after updating
  1771  // in-memory current block pointers to [block].
  1772  // Only used in state sync.
  1773  func (bc *BlockChain) ResetState(block *types.Block) error {
  1774  	bc.chainmu.Lock()
  1775  	defer bc.chainmu.Unlock()
  1776  
  1777  	// Update head block and snapshot pointers on disk
  1778  	batch := bc.db.NewBatch()
  1779  	rawdb.WriteAcceptorTip(batch, block.Hash())
  1780  	rawdb.WriteHeadBlockHash(batch, block.Hash())
  1781  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
  1782  	rawdb.WriteSnapshotBlockHash(batch, block.Hash())
  1783  	rawdb.WriteSnapshotRoot(batch, block.Root())
  1784  	if err := batch.Write(); err != nil {
  1785  		return err
  1786  	}
  1787  
  1788  	// Update all in-memory chain markers
  1789  	bc.lastAccepted = block
  1790  	bc.acceptorTip = block
  1791  	bc.currentBlock.Store(block)
  1792  	bc.hc.SetCurrentHeader(block.Header())
  1793  
  1794  	lastAcceptedHash := block.Hash()
  1795  	bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{
  1796  		Cache:     bc.cacheConfig.TrieCleanLimit,
  1797  		Preimages: bc.cacheConfig.Preimages,
  1798  	})
  1799  	if err := bc.loadLastState(lastAcceptedHash); err != nil {
  1800  		return err
  1801  	}
  1802  	// Create the state manager
  1803  	bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), bc.cacheConfig)
  1804  
  1805  	// Make sure the state associated with the block is available
  1806  	head := bc.CurrentBlock()
  1807  	if !bc.HasState(head.Root()) {
  1808  		return fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash())
  1809  	}
  1810  
  1811  	bc.initSnapshot(head)
  1812  	return nil
  1813  }