github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"compress/gzip"
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"math/big"
    27  	"os"
    28  	"path/filepath"
    29  	"sort"
    30  	"strings"
    31  	"sync"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	lru "github.com/hashicorp/golang-lru"
    36  	"go.opentelemetry.io/otel/attribute"
    37  	"go.opentelemetry.io/otel/trace"
    38  
    39  	"github.com/ethereum/go-ethereum"
    40  	"github.com/ethereum/go-ethereum/common"
    41  	"github.com/ethereum/go-ethereum/common/mclock"
    42  	"github.com/ethereum/go-ethereum/common/prque"
    43  	"github.com/ethereum/go-ethereum/common/tracing"
    44  	"github.com/ethereum/go-ethereum/consensus"
    45  	"github.com/ethereum/go-ethereum/core/blockstm"
    46  	"github.com/ethereum/go-ethereum/core/rawdb"
    47  	"github.com/ethereum/go-ethereum/core/state"
    48  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    49  	"github.com/ethereum/go-ethereum/core/types"
    50  	"github.com/ethereum/go-ethereum/core/vm"
    51  	"github.com/ethereum/go-ethereum/eth/downloader/whitelist"
    52  	"github.com/ethereum/go-ethereum/ethdb"
    53  	"github.com/ethereum/go-ethereum/event"
    54  	"github.com/ethereum/go-ethereum/internal/syncx"
    55  	"github.com/ethereum/go-ethereum/log"
    56  	"github.com/ethereum/go-ethereum/metrics"
    57  	"github.com/ethereum/go-ethereum/params"
    58  	"github.com/ethereum/go-ethereum/trie"
    59  )
    60  
    61  var (
    62  	headBlockGauge     = metrics.NewRegisteredGauge("chain/head/block", nil)
    63  	headHeaderGauge    = metrics.NewRegisteredGauge("chain/head/header", nil)
    64  	headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    65  
    66  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    67  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    68  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    69  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    70  
    71  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    72  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    73  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    74  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    75  
    76  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
    77  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
    78  	snapshotCommitTimer      = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
    79  
    80  	blockImportTimer              = metrics.NewRegisteredMeter("chain/imports", nil)
    81  	blockInsertTimer              = metrics.NewRegisteredTimer("chain/inserts", nil)
    82  	blockValidationTimer          = metrics.NewRegisteredTimer("chain/validation", nil)
    83  	blockExecutionTimer           = metrics.NewRegisteredTimer("chain/execution", nil)
    84  	blockWriteTimer               = metrics.NewRegisteredTimer("chain/write", nil)
    85  	blockExecutionParallelCounter = metrics.NewRegisteredCounter("chain/execution/parallel", nil)
    86  	blockExecutionSerialCounter   = metrics.NewRegisteredCounter("chain/execution/serial", nil)
    87  
    88  	blockReorgMeter         = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
    89  	blockReorgAddMeter      = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    90  	blockReorgDropMeter     = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    91  	blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil)
    92  
    93  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    94  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    95  
    96  	errInsertionInterrupted = errors.New("insertion is interrupted")
    97  	errChainStopped         = errors.New("blockchain is stopped")
    98  )
    99  
   100  const (
   101  	bodyCacheLimit      = 256
   102  	blockCacheLimit     = 256
   103  	receiptsCacheLimit  = 1024
   104  	txLookupCacheLimit  = 1024
   105  	maxFutureBlocks     = 256
   106  	maxTimeFutureBlocks = 30
   107  
   108  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   109  	//
   110  	// Changelog:
   111  	//
   112  	// - Version 4
   113  	//   The following incompatible database changes were added:
   114  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   115  	//   * the `Bloom` field of receipt is deleted
   116  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   117  	// - Version 5
   118  	//  The following incompatible database changes were added:
   119  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   120  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   121  	//      receipts' corresponding block
   122  	// - Version 6
   123  	//  The following incompatible database changes were added:
   124  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   125  	// - Version 7
   126  	//  The following incompatible database changes were added:
   127  	//    * Use freezer as the ancient database to maintain all ancient data
   128  	// - Version 8
   129  	//  The following incompatible database changes were added:
   130  	//    * New scheme for contract code in order to separate the codes and trie nodes
   131  	BlockChainVersion uint64 = 8
   132  )
   133  
   134  // CacheConfig contains the configuration values for the trie caching/pruning
   135  // that's resident in a blockchain.
   136  type CacheConfig struct {
   137  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   138  	TrieCleanJournal    string        // Disk journal for saving clean cache entries.
   139  	TrieCleanRejournal  time.Duration // Time interval to dump clean cache to disk periodically
   140  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   141  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   142  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   143  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   144  	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
   145  	Preimages           bool          // Whether to store preimage of trie key to the disk
   146  	TriesInMemory       uint64        // Number of recent tries to keep in memory
   147  
   148  	SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   149  }
   150  
   151  // DefaultCacheConfig are the default caching values if none are specified by the
   152  // user (also used during testing).
   153  var DefaultCacheConfig = &CacheConfig{
   154  	TrieCleanLimit: 256,
   155  	TrieDirtyLimit: 256,
   156  	TrieTimeLimit:  5 * time.Minute,
   157  	SnapshotLimit:  256,
   158  	SnapshotWait:   true,
   159  	TriesInMemory:  128,
   160  }
   161  
   162  // BlockChain represents the canonical chain given a database with a genesis
   163  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   164  //
   165  // Importing blocks in to the block chain happens according to the set of rules
   166  // defined by the two stage Validator. Processing of blocks is done using the
   167  // Processor which processes the included transaction. The validation of the state
   168  // is done in the second part of the Validator. Failing results in aborting of
   169  // the import.
   170  //
   171  // The BlockChain also helps in returning blocks from **any** chain included
   172  // in the database as well as blocks that represents the canonical chain. It's
   173  // important to note that GetBlock can return any block and does not need to be
   174  // included in the canonical one where as GetBlockByNumber always represents the
   175  // canonical chain.
   176  type BlockChain struct {
   177  	chainConfig *params.ChainConfig // Chain & network configuration
   178  	cacheConfig *CacheConfig        // Cache configuration for pruning
   179  
   180  	db     ethdb.Database // Low level persistent database to store final content in
   181  	snaps  *snapshot.Tree // Snapshot tree for fast trie leaf access
   182  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
   183  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
   184  
   185  	// txLookupLimit is the maximum number of blocks from head whose tx indices
   186  	// are reserved:
   187  	//  * 0:   means no limit and regenerate any missing indexes
   188  	//  * N:   means N block limit [HEAD-N+1, HEAD] and delete extra indexes
   189  	//  * nil: disable tx reindexer/deleter, but still index new blocks
   190  	txLookupLimit uint64
   191  
   192  	hc            *HeaderChain
   193  	rmLogsFeed    event.Feed
   194  	chainFeed     event.Feed
   195  	chainSideFeed event.Feed
   196  	chainHeadFeed event.Feed
   197  	logsFeed      event.Feed
   198  	blockProcFeed event.Feed
   199  	scope         event.SubscriptionScope
   200  	genesisBlock  *types.Block
   201  
   202  	// This mutex synchronizes chain write operations.
   203  	// Readers don't need to take it, they can just read the database.
   204  	chainmu *syncx.ClosableMutex
   205  
   206  	currentBlock     atomic.Value // Current head of the block chain
   207  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   208  
   209  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   210  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   211  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   212  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   213  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   214  	txLookupCache *lru.Cache     // Cache for the most recent transaction lookup data.
   215  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   216  
   217  	wg            sync.WaitGroup //
   218  	quit          chan struct{}  // shutdown signal, closed in Stop.
   219  	running       int32          // 0 if chain is running, 1 when stopped
   220  	procInterrupt int32          // interrupt signaler for block processing
   221  
   222  	engine            consensus.Engine
   223  	validator         Validator // Block and state validator interface
   224  	prefetcher        Prefetcher
   225  	processor         Processor // Block transaction processor interface
   226  	parallelProcessor Processor // Parallel block transaction processor interface
   227  	forker            *ForkChoice
   228  	vmConfig          vm.Config
   229  
   230  	// Bor related changes
   231  	borReceiptsCache *lru.Cache             // Cache for the most recent bor receipt receipts per block
   232  	stateSyncData    []*types.StateSyncData // State sync data
   233  	stateSyncFeed    event.Feed             // State sync feed
   234  	chain2HeadFeed   event.Feed             // Reorg/NewHead/Fork data feed
   235  }
   236  
   237  // NewBlockChain returns a fully initialised block chain using information
   238  // available in the database. It initialises the default Ethereum Validator
   239  // and Processor.
   240  //
   241  //nolint:gocognit
   242  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64, checker ethereum.ChainValidator) (*BlockChain, error) {
   243  	if cacheConfig == nil {
   244  		cacheConfig = DefaultCacheConfig
   245  	}
   246  
   247  	if cacheConfig.TriesInMemory <= 0 {
   248  		cacheConfig.TriesInMemory = DefaultCacheConfig.TriesInMemory
   249  	}
   250  	bodyCache, _ := lru.New(bodyCacheLimit)
   251  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   252  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   253  	blockCache, _ := lru.New(blockCacheLimit)
   254  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   255  	futureBlocks, _ := lru.New(maxFutureBlocks)
   256  
   257  	borReceiptsCache, _ := lru.New(receiptsCacheLimit)
   258  
   259  	bc := &BlockChain{
   260  		chainConfig: chainConfig,
   261  		cacheConfig: cacheConfig,
   262  		db:          db,
   263  		triegc:      prque.New(nil),
   264  		stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
   265  			Cache:     cacheConfig.TrieCleanLimit,
   266  			Journal:   cacheConfig.TrieCleanJournal,
   267  			Preimages: cacheConfig.Preimages,
   268  		}),
   269  		quit:          make(chan struct{}),
   270  		chainmu:       syncx.NewClosableMutex(),
   271  		bodyCache:     bodyCache,
   272  		bodyRLPCache:  bodyRLPCache,
   273  		receiptsCache: receiptsCache,
   274  		blockCache:    blockCache,
   275  		txLookupCache: txLookupCache,
   276  		futureBlocks:  futureBlocks,
   277  		engine:        engine,
   278  		vmConfig:      vmConfig,
   279  
   280  		borReceiptsCache: borReceiptsCache,
   281  	}
   282  	bc.forker = NewForkChoice(bc, shouldPreserve, checker)
   283  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   284  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   285  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   286  
   287  	var err error
   288  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   289  	if err != nil {
   290  		return nil, err
   291  	}
   292  	bc.genesisBlock = bc.GetBlockByNumber(0)
   293  	if bc.genesisBlock == nil {
   294  		return nil, ErrNoGenesis
   295  	}
   296  
   297  	var nilBlock *types.Block
   298  	bc.currentBlock.Store(nilBlock)
   299  	bc.currentFastBlock.Store(nilBlock)
   300  
   301  	// Initialize the chain with ancient data if it isn't empty.
   302  	var txIndexBlock uint64
   303  
   304  	if bc.empty() {
   305  		rawdb.InitDatabaseFromFreezer(bc.db)
   306  		// If ancient database is not empty, reconstruct all missing
   307  		// indices in the background.
   308  		frozen, _ := bc.db.Ancients()
   309  		if frozen > 0 {
   310  			txIndexBlock = frozen
   311  		}
   312  	}
   313  	if err := bc.loadLastState(); err != nil {
   314  		return nil, err
   315  	}
   316  
   317  	// Make sure the state associated with the block is available
   318  	head := bc.CurrentBlock()
   319  	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
   320  		// Head state is missing, before the state recovery, find out the
   321  		// disk layer point of snapshot(if it's enabled). Make sure the
   322  		// rewound point is lower than disk layer.
   323  		var diskRoot common.Hash
   324  		if bc.cacheConfig.SnapshotLimit > 0 {
   325  			diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   326  		}
   327  		if diskRoot != (common.Hash{}) {
   328  			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
   329  
   330  			snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
   331  			if err != nil {
   332  				return nil, err
   333  			}
   334  			// Chain rewound, persist old snapshot number to indicate recovery procedure
   335  			if snapDisk != 0 {
   336  				rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   337  			}
   338  		} else {
   339  			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
   340  			if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
   341  				return nil, err
   342  			}
   343  		}
   344  	}
   345  
   346  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   347  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   348  		var (
   349  			needRewind bool
   350  			low        uint64
   351  		)
   352  		// The head full block may be rolled back to a very low height due to
   353  		// blockchain repair. If the head full block is even lower than the ancient
   354  		// chain, truncate the ancient store.
   355  		fullBlock := bc.CurrentBlock()
   356  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
   357  			needRewind = true
   358  			low = fullBlock.NumberU64()
   359  		}
   360  		// In fast sync, it may happen that ancient data has been written to the
   361  		// ancient store, but the LastFastBlock has not been updated, truncate the
   362  		// extra data here.
   363  		fastBlock := bc.CurrentFastBlock()
   364  		if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
   365  			needRewind = true
   366  			if fastBlock.NumberU64() < low || low == 0 {
   367  				low = fastBlock.NumberU64()
   368  			}
   369  		}
   370  		if needRewind {
   371  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   372  			if err := bc.SetHead(low); err != nil {
   373  				return nil, err
   374  			}
   375  		}
   376  	}
   377  	// The first thing the node will do is reconstruct the verification data for
   378  	// the head block (ethash cache or clique voting snapshot). Might as well do
   379  	// it in advance.
   380  	// bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   381  
   382  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   383  	for hash := range BadHashes {
   384  		if header := bc.GetHeaderByHash(hash); header != nil {
   385  			// get the canonical block corresponding to the offending header's number
   386  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   387  			// make sure the headerByNumber (if present) is in our current canonical chain
   388  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   389  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   390  				if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
   391  					return nil, err
   392  				}
   393  				log.Error("Chain rewind was successful, resuming normal operation")
   394  			}
   395  		}
   396  	}
   397  
   398  	// Load any existing snapshot, regenerating it if loading failed
   399  	if bc.cacheConfig.SnapshotLimit > 0 {
   400  		// If the chain was rewound past the snapshot persistent layer (causing
   401  		// a recovery block number to be persisted to disk), check if we're still
   402  		// in recovery mode and in that case, don't invalidate the snapshot on a
   403  		// head mismatch.
   404  		var recover bool
   405  
   406  		head := bc.CurrentBlock()
   407  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
   408  			log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
   409  			recover = true
   410  		}
   411  		bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
   412  	}
   413  
   414  	// Start future block processor.
   415  	bc.wg.Add(1)
   416  	go bc.updateFutureBlocks()
   417  
   418  	// Start tx indexer/unindexer.
   419  	if txLookupLimit != nil {
   420  		bc.txLookupLimit = *txLookupLimit
   421  
   422  		bc.wg.Add(1)
   423  		go bc.maintainTxIndex(txIndexBlock)
   424  	}
   425  
   426  	// If periodic cache journal is required, spin it up.
   427  	if bc.cacheConfig.TrieCleanRejournal > 0 {
   428  		if bc.cacheConfig.TrieCleanRejournal < time.Minute {
   429  			log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
   430  			bc.cacheConfig.TrieCleanRejournal = time.Minute
   431  		}
   432  		triedb := bc.stateCache.TrieDB()
   433  		bc.wg.Add(1)
   434  		go func() {
   435  			defer bc.wg.Done()
   436  			triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
   437  		}()
   438  	}
   439  	return bc, nil
   440  }
   441  
   442  // Similar to NewBlockChain, this function creates a new blockchain object, but with a parallel state processor
   443  func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64, checker ethereum.ChainValidator) (*BlockChain, error) {
   444  	bc, err := NewBlockChain(db, cacheConfig, chainConfig, engine, vmConfig, shouldPreserve, txLookupLimit, checker)
   445  
   446  	if err != nil {
   447  		return nil, err
   448  	}
   449  
   450  	bc.parallelProcessor = NewParallelStateProcessor(chainConfig, bc, engine)
   451  
   452  	return bc, nil
   453  }
   454  
   455  func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (types.Receipts, []*types.Log, uint64, *state.StateDB, error) {
   456  	// Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
   457  	ctx, cancel := context.WithCancel(context.Background())
   458  	defer cancel()
   459  
   460  	type Result struct {
   461  		receipts types.Receipts
   462  		logs     []*types.Log
   463  		usedGas  uint64
   464  		err      error
   465  		statedb  *state.StateDB
   466  		counter  metrics.Counter
   467  	}
   468  
   469  	resultChan := make(chan Result, 2)
   470  
   471  	processorCount := 0
   472  
   473  	if bc.parallelProcessor != nil {
   474  		parallelStatedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
   475  		if err != nil {
   476  			return nil, nil, 0, nil, err
   477  		}
   478  
   479  		processorCount++
   480  
   481  		go func() {
   482  			parallelStatedb.StartPrefetcher("chain")
   483  			receipts, logs, usedGas, err := bc.parallelProcessor.Process(block, parallelStatedb, bc.vmConfig, ctx)
   484  			resultChan <- Result{receipts, logs, usedGas, err, parallelStatedb, blockExecutionParallelCounter}
   485  		}()
   486  	}
   487  
   488  	if bc.processor != nil {
   489  		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
   490  		if err != nil {
   491  			return nil, nil, 0, nil, err
   492  		}
   493  
   494  		processorCount++
   495  
   496  		go func() {
   497  			statedb.StartPrefetcher("chain")
   498  			receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig, ctx)
   499  			resultChan <- Result{receipts, logs, usedGas, err, statedb, blockExecutionSerialCounter}
   500  		}()
   501  	}
   502  
   503  	result := <-resultChan
   504  
   505  	if _, ok := result.err.(blockstm.ParallelExecFailedError); ok {
   506  		log.Warn("Parallel state processor failed", "err", result.err)
   507  
   508  		// If the parallel processor failed, we will fallback to the serial processor if enabled
   509  		if processorCount == 2 {
   510  			result.statedb.StopPrefetcher()
   511  			result = <-resultChan
   512  			processorCount--
   513  		}
   514  	}
   515  
   516  	result.counter.Inc(1)
   517  
   518  	// Make sure we are not leaking any prefetchers
   519  	if processorCount == 2 {
   520  		go func() {
   521  			second_result := <-resultChan
   522  			second_result.statedb.StopPrefetcher()
   523  		}()
   524  	}
   525  
   526  	return result.receipts, result.logs, result.usedGas, result.statedb, result.err
   527  }
   528  
   529  // empty returns an indicator whether the blockchain is empty.
   530  // Note, it's a special case that we connect a non-empty ancient
   531  // database with an empty node, so that we can plugin the ancient
   532  // into node seamlessly.
   533  func (bc *BlockChain) empty() bool {
   534  	genesis := bc.genesisBlock.Hash()
   535  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   536  		if hash != genesis {
   537  			return false
   538  		}
   539  	}
   540  	return true
   541  }
   542  
   543  // loadLastState loads the last known chain state from the database. This method
   544  // assumes that the chain manager mutex is held.
   545  func (bc *BlockChain) loadLastState() error {
   546  	// Restore the last known head block
   547  	head := rawdb.ReadHeadBlockHash(bc.db)
   548  	if head == (common.Hash{}) {
   549  		// Corrupt or empty database, init from scratch
   550  		log.Warn("Empty database, resetting chain")
   551  		return bc.Reset()
   552  	}
   553  	// Make sure the entire head block is available
   554  	currentBlock := bc.GetBlockByHash(head)
   555  	if currentBlock == nil {
   556  		// Corrupt or empty database, init from scratch
   557  		log.Warn("Head block missing, resetting chain", "hash", head)
   558  		return bc.Reset()
   559  	}
   560  	// Everything seems to be fine, set as the head block
   561  	bc.currentBlock.Store(currentBlock)
   562  	headBlockGauge.Update(int64(currentBlock.NumberU64()))
   563  
   564  	// Restore the last known head header
   565  	currentHeader := currentBlock.Header()
   566  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   567  		if header := bc.GetHeaderByHash(head); header != nil {
   568  			currentHeader = header
   569  		}
   570  	}
   571  	bc.hc.SetCurrentHeader(currentHeader)
   572  
   573  	// Restore the last known head fast block
   574  	bc.currentFastBlock.Store(currentBlock)
   575  	headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
   576  
   577  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   578  		if block := bc.GetBlockByHash(head); block != nil {
   579  			bc.currentFastBlock.Store(block)
   580  			headFastBlockGauge.Update(int64(block.NumberU64()))
   581  		}
   582  	}
   583  	// Issue a status log for the user
   584  	currentFastBlock := bc.CurrentFastBlock()
   585  
   586  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   587  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   588  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   589  
   590  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   591  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   592  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
   593  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   594  		log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
   595  	}
   596  	return nil
   597  }
   598  
   599  // SetHead rewinds the local chain to a new head. Depending on whether the node
   600  // was fast synced or full synced and in which state, the method will try to
   601  // delete minimal data from disk whilst retaining chain consistency.
   602  func (bc *BlockChain) SetHead(head uint64) error {
   603  	_, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
   604  	return err
   605  }
   606  
   607  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   608  // that the rewind must pass the specified state root. This method is meant to be
   609  // used when rewinding with snapshots enabled to ensure that we go back further than
   610  // persistent disk layer. Depending on whether the node was fast synced or full, and
   611  // in which state, the method will try to delete minimal data from disk whilst
   612  // retaining chain consistency.
   613  //
   614  // The method returns the block number where the requested root cap was found.
   615  func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
   616  	if !bc.chainmu.TryLock() {
   617  		return 0, errChainStopped
   618  	}
   619  	defer bc.chainmu.Unlock()
   620  
   621  	// Track the block number of the requested root hash
   622  	var rootNumber uint64 // (no root == always 0)
   623  
   624  	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
   625  	// current freezer limit to start nuking id underflown
   626  	pivot := rawdb.ReadLastPivotNumber(bc.db)
   627  	frozen, _ := bc.db.Ancients()
   628  
   629  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
   630  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   631  		// block. Note, depth equality is permitted to allow using SetHead as a
   632  		// chain reparation mechanism without deleting any data!
   633  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
   634  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   635  			if newHeadBlock == nil {
   636  				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
   637  				newHeadBlock = bc.genesisBlock
   638  			} else {
   639  				// Block exists, keep rewinding until we find one with state,
   640  				// keeping rewinding until we exceed the optional threshold
   641  				// root hash
   642  				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
   643  
   644  				for {
   645  					// If a root threshold was requested but not yet crossed, check
   646  					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
   647  						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
   648  					}
   649  					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
   650  						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   651  						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
   652  							parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
   653  							if parent != nil {
   654  								newHeadBlock = parent
   655  								continue
   656  							}
   657  							log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
   658  							newHeadBlock = bc.genesisBlock
   659  						} else {
   660  							log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
   661  							newHeadBlock = bc.genesisBlock
   662  						}
   663  					}
   664  					if beyondRoot || newHeadBlock.NumberU64() == 0 {
   665  						if newHeadBlock.NumberU64() == 0 {
   666  							// Recommit the genesis state into disk in case the rewinding destination
   667  							// is genesis block and the relevant state is gone. In the future this
   668  							// rewinding destination can be the earliest block stored in the chain
   669  							// if the historical chain pruning is enabled. In that case the logic
   670  							// needs to be improved here.
   671  							if !bc.HasState(bc.genesisBlock.Root()) {
   672  								if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
   673  									log.Crit("Failed to commit genesis state", "err", err)
   674  								}
   675  								log.Debug("Recommitted genesis state to disk")
   676  							}
   677  						}
   678  						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   679  						break
   680  					}
   681  					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
   682  					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
   683  				}
   684  			}
   685  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   686  
   687  			// Degrade the chain markers if they are explicitly reverted.
   688  			// In theory we should update all in-memory markers in the
   689  			// last step, however the direction of SetHead is from high
   690  			// to low, so it's safe to update in-memory markers directly.
   691  			bc.currentBlock.Store(newHeadBlock)
   692  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   693  		}
   694  		// Rewind the fast block in a simpleton way to the target head
   695  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   696  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   697  			// If either blocks reached nil, reset to the genesis state
   698  			if newHeadFastBlock == nil {
   699  				newHeadFastBlock = bc.genesisBlock
   700  			}
   701  			rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
   702  
   703  			// Degrade the chain markers if they are explicitly reverted.
   704  			// In theory we should update all in-memory markers in the
   705  			// last step, however the direction of SetHead is from high
   706  			// to low, so it's safe the update in-memory markers directly.
   707  			bc.currentFastBlock.Store(newHeadFastBlock)
   708  			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
   709  		}
   710  		head := bc.CurrentBlock().NumberU64()
   711  
   712  		// If setHead underflown the freezer threshold and the block processing
   713  		// intent afterwards is full block importing, delete the chain segment
   714  		// between the stateful-block and the sethead target.
   715  		var wipe bool
   716  		if head+1 < frozen {
   717  			wipe = pivot == nil || head >= *pivot
   718  		}
   719  		return head, wipe // Only force wipe if full synced
   720  	}
   721  	// Rewind the header chain, deleting all block bodies until then
   722  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   723  		// Ignore the error here since light client won't hit this path
   724  		frozen, _ := bc.db.Ancients()
   725  		if num+1 <= frozen {
   726  			// Truncate all relative data(header, total difficulty, body, receipt
   727  			// and canonical hash) from ancient store.
   728  			if err := bc.db.TruncateHead(num); err != nil {
   729  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   730  			}
   731  			// Remove the hash <-> number mapping from the active store.
   732  			rawdb.DeleteHeaderNumber(db, hash)
   733  		} else {
   734  			// Remove relative body and receipts from the active store.
   735  			// The header, total difficulty and canonical hash will be
   736  			// removed in the hc.SetHead function.
   737  			rawdb.DeleteBody(db, hash, num)
   738  			rawdb.DeleteReceipts(db, hash, num)
   739  			rawdb.DeleteBorReceipt(db, hash, num)
   740  			rawdb.DeleteBorTxLookupEntry(db, hash, num)
   741  		}
   742  		// Todo(rjl493456442) txlookup, bloombits, etc
   743  	}
   744  	// If SetHead was only called as a chain reparation method, try to skip
   745  	// touching the header chain altogether, unless the freezer is broken
   746  	if repair {
   747  		if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
   748  			bc.hc.SetHead(target, updateFn, delFn)
   749  		}
   750  	} else {
   751  		// Rewind the chain to the requested head and keep going backwards until a
   752  		// block with a state is found or fast sync pivot is passed
   753  		log.Warn("Rewinding blockchain", "target", head)
   754  		bc.hc.SetHead(head, updateFn, delFn)
   755  	}
   756  	// Clear out any stale content from the caches
   757  	bc.bodyCache.Purge()
   758  	bc.bodyRLPCache.Purge()
   759  	bc.receiptsCache.Purge()
   760  	bc.blockCache.Purge()
   761  	bc.txLookupCache.Purge()
   762  	bc.futureBlocks.Purge()
   763  	bc.borReceiptsCache.Purge()
   764  
   765  	return rootNumber, bc.loadLastState()
   766  }
   767  
   768  // SnapSyncCommitHead sets the current head block to the one defined by the hash
   769  // irrelevant what the chain contents were prior.
   770  func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
   771  	// Make sure that both the block as well at its state trie exists
   772  	block := bc.GetBlockByHash(hash)
   773  	if block == nil {
   774  		return fmt.Errorf("non existent block [%x..]", hash[:4])
   775  	}
   776  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   777  		return err
   778  	}
   779  
   780  	// If all checks out, manually set the head block.
   781  	if !bc.chainmu.TryLock() {
   782  		return errChainStopped
   783  	}
   784  	bc.currentBlock.Store(block)
   785  	headBlockGauge.Update(int64(block.NumberU64()))
   786  	bc.chainmu.Unlock()
   787  
   788  	// Destroy any existing state snapshot and regenerate it in the background,
   789  	// also resuming the normal maintenance of any previously paused snapshot.
   790  	if bc.snaps != nil {
   791  		bc.snaps.Rebuild(block.Root())
   792  	}
   793  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   794  	return nil
   795  }
   796  
   797  // Reset purges the entire blockchain, restoring it to its genesis state.
   798  func (bc *BlockChain) Reset() error {
   799  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   800  }
   801  
   802  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   803  // specified genesis state.
   804  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   805  	// Dump the entire block chain and purge the caches
   806  	if err := bc.SetHead(0); err != nil {
   807  		return err
   808  	}
   809  	if !bc.chainmu.TryLock() {
   810  		return errChainStopped
   811  	}
   812  	defer bc.chainmu.Unlock()
   813  
   814  	// Prepare the genesis block and reinitialise the chain
   815  	batch := bc.db.NewBatch()
   816  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   817  	rawdb.WriteBlock(batch, genesis)
   818  	if err := batch.Write(); err != nil {
   819  		log.Crit("Failed to write genesis block", "err", err)
   820  	}
   821  	bc.writeHeadBlock(genesis)
   822  
   823  	// Last update all in-memory chain markers
   824  	bc.genesisBlock = genesis
   825  	bc.currentBlock.Store(bc.genesisBlock)
   826  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   827  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   828  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   829  	bc.currentFastBlock.Store(bc.genesisBlock)
   830  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   831  	return nil
   832  }
   833  
   834  // Export writes the active chain to the given writer.
   835  func (bc *BlockChain) Export(w io.Writer) error {
   836  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   837  }
   838  
   839  // ExportN writes a subset of the active chain to the given writer.
   840  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   841  	if !bc.chainmu.TryLock() {
   842  		return errChainStopped
   843  	}
   844  	defer bc.chainmu.Unlock()
   845  
   846  	if first > last {
   847  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   848  	}
   849  	log.Info("Exporting batch of blocks", "count", last-first+1)
   850  
   851  	start, reported := time.Now(), time.Now()
   852  	for nr := first; nr <= last; nr++ {
   853  		block := bc.GetBlockByNumber(nr)
   854  		if block == nil {
   855  			return fmt.Errorf("export failed on #%d: not found", nr)
   856  		}
   857  		if err := block.EncodeRLP(w); err != nil {
   858  			return err
   859  		}
   860  		if time.Since(reported) >= statsReportLimit {
   861  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   862  			reported = time.Now()
   863  		}
   864  	}
   865  	return nil
   866  }
   867  
   868  // writeHeadBlock injects a new head block into the current block chain. This method
   869  // assumes that the block is indeed a true head. It will also reset the head
   870  // header and the head fast sync block to this very same block if they are older
   871  // or if they are on a different side chain.
   872  //
   873  // Note, this function assumes that the `mu` mutex is held!
   874  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   875  	// Add the block to the canonical chain number scheme and mark as the head
   876  	batch := bc.db.NewBatch()
   877  	rawdb.WriteHeadHeaderHash(batch, block.Hash())
   878  	rawdb.WriteHeadFastBlockHash(batch, block.Hash())
   879  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   880  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
   881  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   882  
   883  	// Flush the whole batch into the disk, exit the node if failed
   884  	if err := batch.Write(); err != nil {
   885  		log.Crit("Failed to update chain indexes and markers", "err", err)
   886  	}
   887  	// Update all in-memory chain markers in the last step
   888  	bc.hc.SetCurrentHeader(block.Header())
   889  
   890  	bc.currentFastBlock.Store(block)
   891  	headFastBlockGauge.Update(int64(block.NumberU64()))
   892  
   893  	bc.currentBlock.Store(block)
   894  	headBlockGauge.Update(int64(block.NumberU64()))
   895  }
   896  
   897  // Stop stops the blockchain service. If any imports are currently in progress
   898  // it will abort them using the procInterrupt.
   899  func (bc *BlockChain) Stop() {
   900  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   901  		return
   902  	}
   903  
   904  	// Unsubscribe all subscriptions registered from blockchain.
   905  	bc.scope.Close()
   906  
   907  	// Signal shutdown to all goroutines.
   908  	close(bc.quit)
   909  	bc.StopInsert()
   910  
   911  	// Now wait for all chain modifications to end and persistent goroutines to exit.
   912  	//
   913  	// Note: Close waits for the mutex to become available, i.e. any running chain
   914  	// modification will have exited when Close returns. Since we also called StopInsert,
   915  	// the mutex should become available quickly. It cannot be taken again after Close has
   916  	// returned.
   917  	bc.chainmu.Close()
   918  	bc.wg.Wait()
   919  
   920  	// Ensure that the entirety of the state snapshot is journalled to disk.
   921  	var snapBase common.Hash
   922  	if bc.snaps != nil {
   923  		var err error
   924  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
   925  			log.Error("Failed to journal state snapshot", "err", err)
   926  		}
   927  	}
   928  
   929  	// Ensure the state of a recent block is also stored to disk before exiting.
   930  	// We're writing three different states to catch different restart scenarios:
   931  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   932  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   933  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   934  	if !bc.cacheConfig.TrieDirtyDisabled {
   935  		triedb := bc.stateCache.TrieDB()
   936  
   937  		for _, offset := range []uint64{0, 1, bc.cacheConfig.TriesInMemory - 1} {
   938  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   939  				recent := bc.GetBlockByNumber(number - offset)
   940  
   941  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   942  				if err := triedb.Commit(recent.Root(), true, nil); err != nil {
   943  					log.Error("Failed to commit recent state trie", "err", err)
   944  				}
   945  			}
   946  		}
   947  		if snapBase != (common.Hash{}) {
   948  			log.Info("Writing snapshot state to disk", "root", snapBase)
   949  			if err := triedb.Commit(snapBase, true, nil); err != nil {
   950  				log.Error("Failed to commit recent state trie", "err", err)
   951  			}
   952  		}
   953  		for !bc.triegc.Empty() {
   954  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   955  		}
   956  		if size, _ := triedb.Size(); size != 0 {
   957  			log.Error("Dangling trie nodes after full cleanup")
   958  		}
   959  	}
   960  	// Ensure all live cached entries be saved into disk, so that we can skip
   961  	// cache warmup when node restarts.
   962  	if bc.cacheConfig.TrieCleanJournal != "" {
   963  		triedb := bc.stateCache.TrieDB()
   964  		triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
   965  	}
   966  	log.Info("Blockchain stopped")
   967  }
   968  
   969  // StopInsert interrupts all insertion methods, causing them to return
   970  // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
   971  // calling this method.
   972  func (bc *BlockChain) StopInsert() {
   973  	atomic.StoreInt32(&bc.procInterrupt, 1)
   974  }
   975  
   976  // insertStopped returns true after StopInsert has been called.
   977  func (bc *BlockChain) insertStopped() bool {
   978  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   979  }
   980  
   981  func (bc *BlockChain) procFutureBlocks() {
   982  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   983  	for _, hash := range bc.futureBlocks.Keys() {
   984  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   985  			blocks = append(blocks, block.(*types.Block))
   986  		}
   987  	}
   988  	if len(blocks) > 0 {
   989  		sort.Slice(blocks, func(i, j int) bool {
   990  			return blocks[i].NumberU64() < blocks[j].NumberU64()
   991  		})
   992  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   993  		for i := range blocks {
   994  			bc.InsertChain(blocks[i : i+1])
   995  		}
   996  	}
   997  }
   998  
   999  // WriteStatus status of write
  1000  type WriteStatus byte
  1001  
  1002  const (
  1003  	NonStatTy WriteStatus = iota
  1004  	CanonStatTy
  1005  	SideStatTy
  1006  )
  1007  
  1008  // InsertReceiptChain attempts to complete an already existing header chain with
  1009  // transaction and receipt data.
  1010  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
  1011  	// We don't require the chainMu here since we want to maximize the
  1012  	// concurrency of header insertion and receipt insertion.
  1013  	bc.wg.Add(1)
  1014  	defer bc.wg.Done()
  1015  
  1016  	var (
  1017  		ancientBlocks, liveBlocks     types.Blocks
  1018  		ancientReceipts, liveReceipts []types.Receipts
  1019  	)
  1020  
  1021  	// Do a sanity check that the provided chain is actually ordered and linked
  1022  	for i := 0; i < len(blockChain); i++ {
  1023  		if i != 0 {
  1024  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  1025  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  1026  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  1027  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(),
  1028  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  1029  			}
  1030  		}
  1031  		if blockChain[i].NumberU64() <= ancientLimit {
  1032  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
  1033  		} else {
  1034  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
  1035  		}
  1036  	}
  1037  
  1038  	var (
  1039  		stats = struct{ processed, ignored int32 }{}
  1040  		start = time.Now()
  1041  		size  = int64(0)
  1042  	)
  1043  
  1044  	// updateHead updates the head fast sync block if the inserted blocks are better
  1045  	// and returns an indicator whether the inserted blocks are canonical.
  1046  	updateHead := func(head *types.Block, headers []*types.Header) bool {
  1047  		if !bc.chainmu.TryLock() {
  1048  			return false
  1049  		}
  1050  		defer bc.chainmu.Unlock()
  1051  
  1052  		// Rewind may have occurred, skip in that case.
  1053  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
  1054  			reorg, err := bc.forker.ReorgNeeded(bc.CurrentFastBlock().Header(), head.Header())
  1055  			if err != nil {
  1056  				log.Warn("Reorg failed", "err", err)
  1057  				return false
  1058  			} else if !reorg {
  1059  				return false
  1060  			}
  1061  
  1062  			isValid, err := bc.forker.ValidateReorg(bc.CurrentFastBlock().Header(), headers)
  1063  			if err != nil {
  1064  				log.Warn("Reorg failed", "err", err)
  1065  				return false
  1066  			} else if !isValid {
  1067  				return false
  1068  			}
  1069  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  1070  			bc.currentFastBlock.Store(head)
  1071  			headFastBlockGauge.Update(int64(head.NumberU64()))
  1072  			return true
  1073  		}
  1074  		return false
  1075  	}
  1076  
  1077  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
  1078  	//
  1079  	// this function only accepts canonical chain data. All side chain will be reverted
  1080  	// eventually.
  1081  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1082  		first := blockChain[0]
  1083  		last := blockChain[len(blockChain)-1]
  1084  
  1085  		// Ensure genesis is in ancients.
  1086  		if first.NumberU64() == 1 {
  1087  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
  1088  				b := bc.genesisBlock
  1089  				td := bc.genesisBlock.Difficulty()
  1090  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, []types.Receipts{nil}, td)
  1091  				size += writeSize
  1092  				if err != nil {
  1093  					log.Error("Error writing genesis to ancients", "err", err)
  1094  					return 0, err
  1095  				}
  1096  				log.Info("Wrote genesis to ancients")
  1097  			}
  1098  		}
  1099  		// Before writing the blocks to the ancients, we need to ensure that
  1100  		// they correspond to the what the headerchain 'expects'.
  1101  		// We only check the last block/header, since it's a contiguous chain.
  1102  		if !bc.HasHeader(last.Hash(), last.NumberU64()) {
  1103  			return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
  1104  		}
  1105  
  1106  		// BOR: Retrieve all the bor receipts and also maintain the array of headers
  1107  		// for bor specific reorg check.
  1108  		borReceipts := []types.Receipts{}
  1109  
  1110  		var headers []*types.Header
  1111  		for _, block := range blockChain {
  1112  			borReceipts = append(borReceipts, []*types.Receipt{bc.GetBorReceiptByHash(block.Hash())})
  1113  			headers = append(headers, block.Header())
  1114  		}
  1115  
  1116  		// Write all chain data to ancients.
  1117  		td := bc.GetTd(first.Hash(), first.NumberU64())
  1118  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, borReceipts, td)
  1119  		size += writeSize
  1120  		if err != nil {
  1121  			log.Error("Error importing chain data to ancients", "err", err)
  1122  			return 0, err
  1123  		}
  1124  
  1125  		// Write tx indices if any condition is satisfied:
  1126  		// * If user requires to reserve all tx indices(txlookuplimit=0)
  1127  		// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
  1128  		// * If block number is large enough to be regarded as a recent block
  1129  		// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
  1130  		//
  1131  		// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
  1132  		// an external ancient database, during the setup, blockchain will start
  1133  		// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
  1134  		// range. In this case, all tx indices of newly imported blocks should be
  1135  		// generated.
  1136  		var batch = bc.db.NewBatch()
  1137  		for i, block := range blockChain {
  1138  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
  1139  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1140  			} else if rawdb.ReadTxIndexTail(bc.db) != nil {
  1141  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1142  			}
  1143  			stats.processed++
  1144  
  1145  			if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
  1146  				size += int64(batch.ValueSize())
  1147  				if err = batch.Write(); err != nil {
  1148  					fastBlock := bc.CurrentFastBlock().NumberU64()
  1149  					if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
  1150  						log.Error("Can't truncate ancient store after failed insert", "err", err)
  1151  					}
  1152  					return 0, err
  1153  				}
  1154  				batch.Reset()
  1155  			}
  1156  		}
  1157  
  1158  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1159  		if err := bc.db.Sync(); err != nil {
  1160  			return 0, err
  1161  		}
  1162  		// Update the current fast block because all block data is now present in DB.
  1163  		previousFastBlock := bc.CurrentFastBlock().NumberU64()
  1164  		if !updateHead(blockChain[len(blockChain)-1], headers) {
  1165  			// We end up here if the header chain has reorg'ed, and the blocks/receipts
  1166  			// don't match the canonical chain.
  1167  			if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
  1168  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1169  			}
  1170  			return 0, errSideChainReceipts
  1171  		}
  1172  
  1173  		// Delete block data from the main database.
  1174  		batch.Reset()
  1175  		canonHashes := make(map[common.Hash]struct{})
  1176  		for _, block := range blockChain {
  1177  			canonHashes[block.Hash()] = struct{}{}
  1178  			if block.NumberU64() == 0 {
  1179  				continue
  1180  			}
  1181  			rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1182  			rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1183  		}
  1184  		// Delete side chain hash-to-number mappings.
  1185  		for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
  1186  			if _, canon := canonHashes[nh.Hash]; !canon {
  1187  				rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
  1188  			}
  1189  		}
  1190  		if err := batch.Write(); err != nil {
  1191  			return 0, err
  1192  		}
  1193  		return 0, nil
  1194  	}
  1195  
  1196  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1197  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1198  		skipPresenceCheck := false
  1199  		batch := bc.db.NewBatch()
  1200  		headers := make([]*types.Header, 0, len(blockChain))
  1201  		for i, block := range blockChain {
  1202  			// Update the headers for bor specific reorg check
  1203  			headers = append(headers, block.Header())
  1204  
  1205  			// Short circuit insertion if shutting down or processing failed
  1206  			if bc.insertStopped() {
  1207  				return 0, errInsertionInterrupted
  1208  			}
  1209  			// Short circuit if the owner header is unknown
  1210  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1211  				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
  1212  			}
  1213  			if !skipPresenceCheck {
  1214  				// Ignore if the entire data is already known
  1215  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1216  					stats.ignored++
  1217  					continue
  1218  				} else {
  1219  					// If block N is not present, neither are the later blocks.
  1220  					// This should be true, but if we are mistaken, the shortcut
  1221  					// here will only cause overwriting of some existing data
  1222  					skipPresenceCheck = true
  1223  				}
  1224  			}
  1225  			// Write all the data out into the database
  1226  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1227  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1228  			rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
  1229  
  1230  			// Write everything belongs to the blocks into the database. So that
  1231  			// we can ensure all components of body is completed(body, receipts,
  1232  			// tx indexes)
  1233  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1234  				if err := batch.Write(); err != nil {
  1235  					return 0, err
  1236  				}
  1237  				size += int64(batch.ValueSize())
  1238  				batch.Reset()
  1239  			}
  1240  			stats.processed++
  1241  		}
  1242  		// Write everything belongs to the blocks into the database. So that
  1243  		// we can ensure all components of body is completed(body, receipts,
  1244  		// tx indexes)
  1245  		if batch.ValueSize() > 0 {
  1246  			size += int64(batch.ValueSize())
  1247  			if err := batch.Write(); err != nil {
  1248  				return 0, err
  1249  			}
  1250  		}
  1251  
  1252  		updateHead(blockChain[len(blockChain)-1], headers)
  1253  		return 0, nil
  1254  	}
  1255  
  1256  	// Write downloaded chain data and corresponding receipt chain data
  1257  	if len(ancientBlocks) > 0 {
  1258  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1259  			if err == errInsertionInterrupted {
  1260  				return 0, nil
  1261  			}
  1262  			return n, err
  1263  		}
  1264  	}
  1265  	// Write the tx index tail (block number from where we index) before write any live blocks
  1266  	if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
  1267  		// The tx index tail can only be one of the following two options:
  1268  		// * 0: all ancient blocks have been indexed
  1269  		// * ancient-limit: the indices of blocks before ancient-limit are ignored
  1270  		if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
  1271  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
  1272  				rawdb.WriteTxIndexTail(bc.db, 0)
  1273  			} else {
  1274  				rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
  1275  			}
  1276  		}
  1277  	}
  1278  	if len(liveBlocks) > 0 {
  1279  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1280  			if err == errInsertionInterrupted {
  1281  				return 0, nil
  1282  			}
  1283  			return n, err
  1284  		}
  1285  	}
  1286  
  1287  	head := blockChain[len(blockChain)-1]
  1288  	context := []interface{}{
  1289  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1290  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1291  		"size", common.StorageSize(size),
  1292  	}
  1293  	if stats.ignored > 0 {
  1294  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1295  	}
  1296  	log.Info("Imported new block receipts", context...)
  1297  
  1298  	return 0, nil
  1299  }
  1300  
  1301  var lastWrite uint64
  1302  
  1303  // writeBlockWithoutState writes only the block and its metadata to the database,
  1304  // but does not write any state. This is used to construct competing side forks
  1305  // up to the point where they exceed the canonical total difficulty.
  1306  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1307  	if bc.insertStopped() {
  1308  		return errInsertionInterrupted
  1309  	}
  1310  
  1311  	batch := bc.db.NewBatch()
  1312  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1313  	rawdb.WriteBlock(batch, block)
  1314  	if err := batch.Write(); err != nil {
  1315  		log.Crit("Failed to write block into disk", "err", err)
  1316  	}
  1317  	return nil
  1318  }
  1319  
  1320  // writeKnownBlock updates the head block flag with a known block
  1321  // and introduces chain reorg if necessary.
  1322  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1323  	current := bc.CurrentBlock()
  1324  	if block.ParentHash() != current.Hash() {
  1325  		if err := bc.reorg(current, block); err != nil {
  1326  			return err
  1327  		}
  1328  	}
  1329  	bc.writeHeadBlock(block)
  1330  	return nil
  1331  }
  1332  
  1333  // writeBlockWithState writes block, metadata and corresponding state data to the
  1334  // database.
  1335  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) ([]*types.Log, error) {
  1336  	// Calculate the total difficulty of the block
  1337  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1338  	if ptd == nil {
  1339  		return []*types.Log{}, consensus.ErrUnknownAncestor
  1340  	}
  1341  	// Make sure no inconsistent state is leaked during insertion
  1342  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1343  
  1344  	// Irrelevant of the canonical status, write the block itself to the database.
  1345  	//
  1346  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1347  	// should be written atomically. BlockBatch is used for containing all components.
  1348  	blockBatch := bc.db.NewBatch()
  1349  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1350  	rawdb.WriteBlock(blockBatch, block)
  1351  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1352  
  1353  	// System call appends state-sync logs into state. So, `state.Logs()` contains
  1354  	// all logs including system-call logs (state sync logs) while `logs` contains
  1355  	// only logs generated by transactions (receipts).
  1356  	//
  1357  	// That means that state.Logs() can have more logs than receipt logs.
  1358  	// In that case, we can safely assume that extra logs are from state sync logs.
  1359  	//
  1360  	// block logs = receipt logs + state sync logs = `state.Logs()`
  1361  	blockLogs := state.Logs()
  1362  	var stateSyncLogs []*types.Log
  1363  	if len(blockLogs) > 0 {
  1364  		sort.SliceStable(blockLogs, func(i, j int) bool {
  1365  			return blockLogs[i].Index < blockLogs[j].Index
  1366  		})
  1367  
  1368  		if len(blockLogs) > len(logs) {
  1369  			stateSyncLogs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()`
  1370  
  1371  			// State sync logs don't have tx index, tx hash and other necessary fields
  1372  			// DeriveFieldsForBorLogs will fill those fields for websocket subscriptions
  1373  			types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs)))
  1374  
  1375  			// Write bor receipt
  1376  			rawdb.WriteBorReceipt(blockBatch, block.Hash(), block.NumberU64(), &types.ReceiptForStorage{
  1377  				Status: types.ReceiptStatusSuccessful, // make receipt status successful
  1378  				Logs:   stateSyncLogs,
  1379  			})
  1380  
  1381  			// Write bor tx reverse lookup
  1382  			rawdb.WriteBorTxLookupEntry(blockBatch, block.Hash(), block.NumberU64())
  1383  		}
  1384  	}
  1385  
  1386  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1387  	if err := blockBatch.Write(); err != nil {
  1388  		log.Crit("Failed to write block into disk", "err", err)
  1389  	}
  1390  	// Commit all cached state changes into underlying memory database.
  1391  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1392  	if err != nil {
  1393  		return []*types.Log{}, err
  1394  	}
  1395  	triedb := bc.stateCache.TrieDB()
  1396  
  1397  	// If we're running an archive node, always flush
  1398  	if bc.cacheConfig.TrieDirtyDisabled {
  1399  		return []*types.Log{}, triedb.Commit(root, false, nil)
  1400  	} else {
  1401  		// Full but not archive node, do proper garbage collection
  1402  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1403  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1404  
  1405  		if current := block.NumberU64(); current > bc.cacheConfig.TriesInMemory {
  1406  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1407  			var (
  1408  				nodes, imgs = triedb.Size()
  1409  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1410  			)
  1411  			if nodes > limit || imgs > 4*1024*1024 {
  1412  				triedb.Cap(limit - ethdb.IdealBatchSize)
  1413  			}
  1414  			// Find the next state trie we need to commit
  1415  			chosen := current - bc.cacheConfig.TriesInMemory
  1416  
  1417  			// If we exceeded out time allowance, flush an entire trie to disk
  1418  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1419  				// If the header is missing (canonical chain behind), we're reorging a low
  1420  				// diff sidechain. Suspend committing until this operation is completed.
  1421  				header := bc.GetHeaderByNumber(chosen)
  1422  				if header == nil {
  1423  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1424  				} else {
  1425  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1426  					// warn the user that the system is becoming unstable.
  1427  					if chosen < lastWrite+bc.cacheConfig.TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1428  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TriesInMemory, "optimum", float64(chosen-lastWrite)/float64((bc.cacheConfig.TriesInMemory)))
  1429  					}
  1430  					// Flush an entire trie and restart the counters
  1431  					triedb.Commit(header.Root, true, nil)
  1432  					lastWrite = chosen
  1433  					bc.gcproc = 0
  1434  				}
  1435  			}
  1436  			// Garbage collect anything below our required write retention
  1437  			for !bc.triegc.Empty() {
  1438  				root, number := bc.triegc.Pop()
  1439  				if uint64(-number) > chosen {
  1440  					bc.triegc.Push(root, number)
  1441  					break
  1442  				}
  1443  				triedb.Dereference(root.(common.Hash))
  1444  			}
  1445  		}
  1446  	}
  1447  	return stateSyncLogs, nil
  1448  }
  1449  
  1450  // WriteBlockWithState writes the block and all associated state to the database.
  1451  func (bc *BlockChain) WriteBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1452  	if !bc.chainmu.TryLock() {
  1453  		return NonStatTy, errChainStopped
  1454  	}
  1455  	defer bc.chainmu.Unlock()
  1456  
  1457  	return bc.writeBlockAndSetHead(ctx, block, receipts, logs, state, emitHeadEvent)
  1458  }
  1459  
  1460  // writeBlockAndSetHead writes the block and all associated state to the database,
  1461  // and also it applies the given block as the new chain head. This function expects
  1462  // the chain mutex to be held.
  1463  func (bc *BlockChain) writeBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1464  	writeBlockAndSetHeadCtx, span := tracing.StartSpan(ctx, "blockchain.writeBlockAndSetHead")
  1465  	defer tracing.EndSpan(span)
  1466  
  1467  	var stateSyncLogs []*types.Log
  1468  
  1469  	tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeBlockWithState", func(_ context.Context, span trace.Span) {
  1470  		stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state)
  1471  		tracing.SetAttributes(
  1472  			span,
  1473  			attribute.Int("number", int(block.Number().Uint64())),
  1474  			attribute.Bool("error", err != nil),
  1475  		)
  1476  	})
  1477  
  1478  	if err != nil {
  1479  		return NonStatTy, err
  1480  	}
  1481  
  1482  	currentBlock := bc.CurrentBlock()
  1483  
  1484  	var reorg bool
  1485  
  1486  	tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.ReorgNeeded", func(_ context.Context, span trace.Span) {
  1487  		reorg, err = bc.forker.ReorgNeeded(currentBlock.Header(), block.Header())
  1488  		tracing.SetAttributes(
  1489  			span,
  1490  			attribute.Int("number", int(block.Number().Uint64())),
  1491  			attribute.Int("current block", int(currentBlock.Number().Uint64())),
  1492  			attribute.Bool("reorg needed", reorg),
  1493  			attribute.Bool("error", err != nil),
  1494  		)
  1495  	})
  1496  	if err != nil {
  1497  		return NonStatTy, err
  1498  	}
  1499  
  1500  	tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.reorg", func(_ context.Context, span trace.Span) {
  1501  		if reorg {
  1502  			// Reorganise the chain if the parent is not the head block
  1503  			if block.ParentHash() != currentBlock.Hash() {
  1504  				if err = bc.reorg(currentBlock, block); err != nil {
  1505  					status = NonStatTy
  1506  				}
  1507  			}
  1508  			status = CanonStatTy
  1509  		} else {
  1510  			status = SideStatTy
  1511  		}
  1512  
  1513  		tracing.SetAttributes(
  1514  			span,
  1515  			attribute.Int("number", int(block.Number().Uint64())),
  1516  			attribute.Int("current block", int(currentBlock.Number().Uint64())),
  1517  			attribute.Bool("reorg needed", reorg),
  1518  			attribute.Bool("error", err != nil),
  1519  			attribute.String("status", string(status)),
  1520  		)
  1521  	})
  1522  
  1523  	if status == NonStatTy {
  1524  		return
  1525  	}
  1526  
  1527  	// Set new head.
  1528  	if status == CanonStatTy {
  1529  		tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeHeadBlock", func(_ context.Context, _ trace.Span) {
  1530  			bc.writeHeadBlock(block)
  1531  		})
  1532  	}
  1533  
  1534  	bc.futureBlocks.Remove(block.Hash())
  1535  
  1536  	if status == CanonStatTy {
  1537  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1538  		if len(logs) > 0 {
  1539  			bc.logsFeed.Send(logs)
  1540  		}
  1541  
  1542  		// send state sync logs into logs feed
  1543  		if len(stateSyncLogs) > 0 {
  1544  			bc.logsFeed.Send(stateSyncLogs)
  1545  		}
  1546  
  1547  		// In theory we should fire a ChainHeadEvent when we inject
  1548  		// a canonical block, but sometimes we can insert a batch of
  1549  		// canonicial blocks. Avoid firing too many ChainHeadEvents,
  1550  		// we will fire an accumulated ChainHeadEvent and disable fire
  1551  		// event here.
  1552  		if emitHeadEvent {
  1553  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1554  			// BOR state sync feed related changes
  1555  			for _, data := range bc.stateSyncData {
  1556  				bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
  1557  			}
  1558  			// BOR
  1559  		}
  1560  	} else {
  1561  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1562  
  1563  		bc.chain2HeadFeed.Send(Chain2HeadEvent{
  1564  			Type:     Chain2HeadForkEvent,
  1565  			NewChain: []*types.Block{block},
  1566  		})
  1567  	}
  1568  	return status, nil
  1569  }
  1570  
  1571  // addFutureBlock checks if the block is within the max allowed window to get
  1572  // accepted for future processing, and returns an error if the block is too far
  1573  // ahead and was not added.
  1574  //
  1575  // TODO after the transition, the future block shouldn't be kept. Because
  1576  // it's not checked in the Geth side anymore.
  1577  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1578  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1579  	if block.Time() > max {
  1580  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1581  	}
  1582  	if block.Difficulty().Cmp(common.Big0) == 0 {
  1583  		// Never add PoS blocks into the future queue
  1584  		return nil
  1585  	}
  1586  	bc.futureBlocks.Add(block.Hash(), block)
  1587  	return nil
  1588  }
  1589  
  1590  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1591  // chain or, otherwise, create a fork. If an error is returned it will return
  1592  // the index number of the failing block as well an error describing what went
  1593  // wrong. After insertion is done, all accumulated events will be fired.
  1594  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1595  	// Sanity check that we have something meaningful to import
  1596  	if len(chain) == 0 {
  1597  		return 0, nil
  1598  	}
  1599  	bc.blockProcFeed.Send(true)
  1600  	defer bc.blockProcFeed.Send(false)
  1601  
  1602  	// Do a sanity check that the provided chain is actually ordered and linked.
  1603  	for i := 1; i < len(chain); i++ {
  1604  		block, prev := chain[i], chain[i-1]
  1605  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1606  			log.Error("Non contiguous block insert",
  1607  				"number", block.Number(),
  1608  				"hash", block.Hash(),
  1609  				"parent", block.ParentHash(),
  1610  				"prevnumber", prev.Number(),
  1611  				"prevhash", prev.Hash(),
  1612  			)
  1613  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1614  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1615  		}
  1616  	}
  1617  	// Pre-checks passed, start the full block imports
  1618  	if !bc.chainmu.TryLock() {
  1619  		return 0, errChainStopped
  1620  	}
  1621  	defer bc.chainmu.Unlock()
  1622  	return bc.insertChain(chain, true, true)
  1623  }
  1624  
  1625  // insertChain is the internal implementation of InsertChain, which assumes that
  1626  // 1) chains are contiguous, and 2) The chain mutex is held.
  1627  //
  1628  // This method is split out so that import batches that require re-injecting
  1629  // historical blocks can do so without releasing the lock, which could lead to
  1630  // racey behaviour. If a sidechain import is in progress, and the historic state
  1631  // is imported, but then new canon-head is added before the actual sidechain
  1632  // completes, then the historic state could be pruned again
  1633  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) {
  1634  	// If the chain is terminating, don't even bother starting up.
  1635  	if bc.insertStopped() {
  1636  		return 0, nil
  1637  	}
  1638  
  1639  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1640  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1641  
  1642  	var (
  1643  		stats     = insertStats{startTime: mclock.Now()}
  1644  		lastCanon *types.Block
  1645  	)
  1646  	// Fire a single chain head event if we've progressed the chain
  1647  	defer func() {
  1648  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1649  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1650  		}
  1651  	}()
  1652  	// Start the parallel header verifier
  1653  	headers := make([]*types.Header, len(chain))
  1654  	seals := make([]bool, len(chain))
  1655  
  1656  	for i, block := range chain {
  1657  		headers[i] = block.Header()
  1658  		seals[i] = verifySeals
  1659  	}
  1660  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1661  	defer close(abort)
  1662  
  1663  	// Peek the error for the first block to decide the directing import logic
  1664  	it := newInsertIterator(chain, results, bc.validator)
  1665  	block, err := it.next()
  1666  
  1667  	// Update the block import meter; it will just record chains we've received
  1668  	// from other peers. (Note that the actual chain which gets imported would be
  1669  	// quite low).
  1670  	blockImportTimer.Mark(int64(len(headers)))
  1671  
  1672  	// Check the validity of incoming chain
  1673  	isValid, err1 := bc.forker.ValidateReorg(bc.CurrentBlock().Header(), headers)
  1674  	if err1 != nil {
  1675  		return it.index, err1
  1676  	}
  1677  
  1678  	if !isValid {
  1679  		// The chain to be imported is invalid as the blocks doesn't match with
  1680  		// the whitelisted checkpoints.
  1681  		return it.index, whitelist.ErrCheckpointMismatch
  1682  	}
  1683  
  1684  	// Left-trim all the known blocks that don't need to build snapshot
  1685  	if bc.skipBlock(err, it) {
  1686  		// First block (and state) is known
  1687  		//   1. We did a roll-back, and should now do a re-import
  1688  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1689  		//      from the canonical chain, which has not been verified.
  1690  		// Skip all known blocks that are behind us.
  1691  		var (
  1692  			reorg   bool
  1693  			current = bc.CurrentBlock()
  1694  		)
  1695  		for block != nil && bc.skipBlock(err, it) {
  1696  			reorg, err = bc.forker.ReorgNeeded(current.Header(), block.Header())
  1697  			if err != nil {
  1698  				return it.index, err
  1699  			}
  1700  			if reorg {
  1701  				// Switch to import mode if the forker says the reorg is necessary
  1702  				// and also the block is not on the canonical chain.
  1703  				// In eth2 the forker always returns true for reorg decision (blindly trusting
  1704  				// the external consensus engine), but in order to prevent the unnecessary
  1705  				// reorgs when importing known blocks, the special case is handled here.
  1706  				if block.NumberU64() > current.NumberU64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
  1707  					break
  1708  				}
  1709  			}
  1710  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1711  			stats.ignored++
  1712  
  1713  			block, err = it.next()
  1714  		}
  1715  		// The remaining blocks are still known blocks, the only scenario here is:
  1716  		// During the fast sync, the pivot point is already submitted but rollback
  1717  		// happens. Then node resets the head full block to a lower height via `rollback`
  1718  		// and leaves a few known blocks in the database.
  1719  		//
  1720  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1721  		// `insertChain` while a part of them have higher total difficulty than current
  1722  		// head full block(new pivot point).
  1723  		for block != nil && bc.skipBlock(err, it) {
  1724  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1725  			if err := bc.writeKnownBlock(block); err != nil {
  1726  				return it.index, err
  1727  			}
  1728  			lastCanon = block
  1729  
  1730  			block, err = it.next()
  1731  		}
  1732  		// Falls through to the block import
  1733  	}
  1734  	switch {
  1735  	// First block is pruned
  1736  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1737  		if setHead {
  1738  			// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1739  			log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1740  			return bc.insertSideChain(block, it)
  1741  		} else {
  1742  			// We're post-merge and the parent is pruned, try to recover the parent state
  1743  			log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
  1744  			return it.index, bc.recoverAncestors(block)
  1745  		}
  1746  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1747  	case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
  1748  		for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
  1749  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1750  			if err := bc.addFutureBlock(block); err != nil {
  1751  				return it.index, err
  1752  			}
  1753  			block, err = it.next()
  1754  		}
  1755  		stats.queued += it.processed()
  1756  		stats.ignored += it.remaining()
  1757  
  1758  		// If there are any still remaining, mark as ignored
  1759  		return it.index, err
  1760  
  1761  	// Some other error(except ErrKnownBlock) occurred, abort.
  1762  	// ErrKnownBlock is allowed here since some known blocks
  1763  	// still need re-execution to generate snapshots that are missing
  1764  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1765  		bc.futureBlocks.Remove(block.Hash())
  1766  		stats.ignored += len(it.chain)
  1767  		bc.reportBlock(block, nil, err)
  1768  		return it.index, err
  1769  	}
  1770  	// No validation errors for the first block (or chain prefix skipped)
  1771  	var activeState *state.StateDB
  1772  	defer func() {
  1773  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1774  		// block or other error is hit however, an early return may not properly
  1775  		// terminate the background threads. This defer ensures that we clean up
  1776  		// and dangling prefetcher, without defering each and holding on live refs.
  1777  		if activeState != nil {
  1778  			activeState.StopPrefetcher()
  1779  		}
  1780  	}()
  1781  
  1782  	// accumulator for canonical blocks
  1783  	var canonAccum []*types.Block
  1784  
  1785  	emitAccum := func() {
  1786  		size := len(canonAccum)
  1787  		if size == 0 || size > 5 {
  1788  			// avoid reporting events for large sync events
  1789  			return
  1790  		}
  1791  		bc.chain2HeadFeed.Send(Chain2HeadEvent{
  1792  			Type:     Chain2HeadCanonicalEvent,
  1793  			NewChain: canonAccum,
  1794  		})
  1795  		canonAccum = canonAccum[:0]
  1796  	}
  1797  
  1798  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1799  		// If the chain is terminating, stop processing blocks
  1800  		if bc.insertStopped() {
  1801  			log.Debug("Abort during block processing")
  1802  			break
  1803  		}
  1804  		// If the header is a banned one, straight out abort
  1805  		if BadHashes[block.Hash()] {
  1806  			bc.reportBlock(block, nil, ErrBannedHash)
  1807  			return it.index, ErrBannedHash
  1808  		}
  1809  		// If the block is known (in the middle of the chain), it's a special case for
  1810  		// Clique blocks where they can share state among each other, so importing an
  1811  		// older block might complete the state of the subsequent one. In this case,
  1812  		// just skip the block (we already validated it once fully (and crashed), since
  1813  		// its header and body was already in the database). But if the corresponding
  1814  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1815  		if bc.skipBlock(err, it) {
  1816  			logger := log.Debug
  1817  			if bc.chainConfig.Clique == nil {
  1818  				logger = log.Warn
  1819  			}
  1820  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1821  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1822  				"root", block.Root())
  1823  
  1824  			// Special case. Commit the empty receipt slice if we meet the known
  1825  			// block in the middle. It can only happen in the clique chain. Whenever
  1826  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1827  			// and `body` if it's non-existent. Since we don't have receipts without
  1828  			// reexecution, so nothing to commit. But if the sidechain will be adpoted
  1829  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1830  			// state, but if it's this special case here(skip reexecution) we will lose
  1831  			// the empty receipt entry.
  1832  			if len(block.Transactions()) == 0 {
  1833  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1834  			} else {
  1835  				log.Error("Please file an issue, skip known block execution without receipt",
  1836  					"hash", block.Hash(), "number", block.NumberU64())
  1837  			}
  1838  			if err := bc.writeKnownBlock(block); err != nil {
  1839  				return it.index, err
  1840  			}
  1841  			stats.processed++
  1842  
  1843  			// We can assume that logs are empty here, since the only way for consecutive
  1844  			// Clique blocks to have the same state is if there are no transactions.
  1845  			lastCanon = block
  1846  			continue
  1847  		}
  1848  
  1849  		// Retrieve the parent block and it's state to execute on top
  1850  		start := time.Now()
  1851  		parent := it.previous()
  1852  		if parent == nil {
  1853  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1854  		}
  1855  
  1856  		// If we have a followup block, run that against the current state to pre-cache
  1857  		// transactions and probabilistically some of the account/storage trie nodes.
  1858  		var followupInterrupt uint32
  1859  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1860  			if followup, err := it.peek(); followup != nil && err == nil {
  1861  				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
  1862  
  1863  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
  1864  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1865  
  1866  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1867  					if atomic.LoadUint32(interrupt) == 1 {
  1868  						blockPrefetchInterruptMeter.Mark(1)
  1869  					}
  1870  				}(time.Now(), followup, throwaway, &followupInterrupt)
  1871  			}
  1872  		}
  1873  
  1874  		// Process block using the parent state as reference point
  1875  		substart := time.Now()
  1876  		receipts, logs, usedGas, statedb, err := bc.ProcessBlock(block, parent)
  1877  		activeState = statedb
  1878  		if err != nil {
  1879  			bc.reportBlock(block, receipts, err)
  1880  			atomic.StoreUint32(&followupInterrupt, 1)
  1881  			return it.index, err
  1882  		}
  1883  		// BOR state sync feed related changes
  1884  		for _, data := range bc.stateSyncData {
  1885  			bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
  1886  		}
  1887  		// BOR
  1888  
  1889  		// Update the metrics touched during block processing
  1890  		accountReadTimer.Update(statedb.AccountReads)                 // Account reads are complete, we can mark them
  1891  		storageReadTimer.Update(statedb.StorageReads)                 // Storage reads are complete, we can mark them
  1892  		accountUpdateTimer.Update(statedb.AccountUpdates)             // Account updates are complete, we can mark them
  1893  		storageUpdateTimer.Update(statedb.StorageUpdates)             // Storage updates are complete, we can mark them
  1894  		snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
  1895  		snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
  1896  		triehash := statedb.AccountHashes + statedb.StorageHashes     // Save to not double count in validation
  1897  		trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
  1898  		trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
  1899  
  1900  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1901  
  1902  		// Validate the state using the default validator
  1903  		substart = time.Now()
  1904  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1905  			bc.reportBlock(block, receipts, err)
  1906  			atomic.StoreUint32(&followupInterrupt, 1)
  1907  			return it.index, err
  1908  		}
  1909  		proctime := time.Since(start)
  1910  
  1911  		// Update the metrics touched during block validation
  1912  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1913  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1914  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1915  
  1916  		// Write the block to the chain and get the status.
  1917  		substart = time.Now()
  1918  		var status WriteStatus
  1919  		if !setHead {
  1920  			// Don't set the head, only insert the block
  1921  			_, err = bc.writeBlockWithState(block, receipts, logs, statedb)
  1922  		} else {
  1923  			status, err = bc.writeBlockAndSetHead(context.Background(), block, receipts, logs, statedb, false)
  1924  		}
  1925  		atomic.StoreUint32(&followupInterrupt, 1)
  1926  		if err != nil {
  1927  			return it.index, err
  1928  		}
  1929  		// Update the metrics touched during block commit
  1930  		accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  1931  		storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  1932  		snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  1933  
  1934  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
  1935  		blockInsertTimer.UpdateSince(start)
  1936  
  1937  		// Report the import stats before returning the various results
  1938  		stats.processed++
  1939  		stats.usedGas += usedGas
  1940  
  1941  		dirty, _ := bc.stateCache.TrieDB().Size()
  1942  		stats.report(chain, it.index, dirty, setHead)
  1943  
  1944  		if !setHead {
  1945  			return it.index, nil // Direct block insertion of a single block
  1946  		}
  1947  
  1948  		// BOR
  1949  		if status == CanonStatTy {
  1950  			canonAccum = append(canonAccum, block)
  1951  		} else {
  1952  			emitAccum()
  1953  		}
  1954  		// BOR
  1955  
  1956  		switch status {
  1957  		case CanonStatTy:
  1958  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1959  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1960  				"elapsed", common.PrettyDuration(time.Since(start)),
  1961  				"root", block.Root())
  1962  
  1963  			lastCanon = block
  1964  
  1965  			// Only count canonical blocks for GC processing time
  1966  			bc.gcproc += proctime
  1967  
  1968  		case SideStatTy:
  1969  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1970  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1971  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1972  				"root", block.Root())
  1973  
  1974  		default:
  1975  			// This in theory is impossible, but lets be nice to our future selves and leave
  1976  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1977  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1978  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1979  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1980  				"root", block.Root())
  1981  		}
  1982  	}
  1983  
  1984  	// BOR
  1985  	emitAccum()
  1986  	// BOR
  1987  
  1988  	// Any blocks remaining here? The only ones we care about are the future ones
  1989  	if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
  1990  		if err := bc.addFutureBlock(block); err != nil {
  1991  			return it.index, err
  1992  		}
  1993  		block, err = it.next()
  1994  
  1995  		for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
  1996  			if err := bc.addFutureBlock(block); err != nil {
  1997  				return it.index, err
  1998  			}
  1999  			stats.queued++
  2000  		}
  2001  	}
  2002  	stats.ignored += it.remaining()
  2003  
  2004  	return it.index, err
  2005  }
  2006  
  2007  // insertSideChain is called when an import batch hits upon a pruned ancestor
  2008  // error, which happens when a sidechain with a sufficiently old fork-block is
  2009  // found.
  2010  //
  2011  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  2012  // switch over to the new chain if the TD exceeded the current chain.
  2013  // insertSideChain is only used pre-merge.
  2014  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  2015  	var (
  2016  		externTd  *big.Int
  2017  		lastBlock = block
  2018  		current   = bc.CurrentBlock()
  2019  		headers   []*types.Header
  2020  	)
  2021  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  2022  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  2023  	// ones. Any other errors means that the block is invalid, and should not be written
  2024  	// to disk.
  2025  	err := consensus.ErrPrunedAncestor
  2026  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  2027  		headers = append(headers, block.Header())
  2028  		// Check the canonical state root for that number
  2029  		if number := block.NumberU64(); current.NumberU64() >= number {
  2030  			canonical := bc.GetBlockByNumber(number)
  2031  			if canonical != nil && canonical.Hash() == block.Hash() {
  2032  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  2033  
  2034  				// Collect the TD of the block. Since we know it's a canon one,
  2035  				// we can get it directly, and not (like further below) use
  2036  				// the parent and then add the block on top
  2037  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  2038  				continue
  2039  			}
  2040  			if canonical != nil && canonical.Root() == block.Root() {
  2041  				// This is most likely a shadow-state attack. When a fork is imported into the
  2042  				// database, and it eventually reaches a block height which is not pruned, we
  2043  				// just found that the state already exist! This means that the sidechain block
  2044  				// refers to a state which already exists in our canon chain.
  2045  				//
  2046  				// If left unchecked, we would now proceed importing the blocks, without actually
  2047  				// having verified the state of the previous blocks.
  2048  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  2049  
  2050  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  2051  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  2052  				// mechanism.
  2053  				return it.index, errors.New("sidechain ghost-state attack")
  2054  			}
  2055  		}
  2056  		if externTd == nil {
  2057  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  2058  		}
  2059  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  2060  
  2061  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  2062  			start := time.Now()
  2063  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  2064  				return it.index, err
  2065  			}
  2066  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  2067  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  2068  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  2069  				"root", block.Root())
  2070  		}
  2071  		lastBlock = block
  2072  	}
  2073  	// At this point, we've written all sidechain blocks to database. Loop ended
  2074  	// either on some other error or all were processed. If there was some other
  2075  	// error, we can ignore the rest of those blocks.
  2076  	//
  2077  	// If the externTd was larger than our local TD, we now need to reimport the previous
  2078  	// blocks to regenerate the required state
  2079  	reorg, err := bc.forker.ReorgNeeded(current.Header(), lastBlock.Header())
  2080  	if err != nil {
  2081  		return it.index, err
  2082  	}
  2083  
  2084  	isValid, err := bc.forker.ValidateReorg(current.Header(), headers)
  2085  	if err != nil {
  2086  		return it.index, err
  2087  	}
  2088  
  2089  	if !reorg || !isValid {
  2090  		localTd := bc.GetTd(current.Hash(), current.NumberU64())
  2091  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  2092  		return it.index, err
  2093  	}
  2094  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2095  	var (
  2096  		hashes  []common.Hash
  2097  		numbers []uint64
  2098  	)
  2099  	parent := it.previous()
  2100  	for parent != nil && !bc.HasState(parent.Root) {
  2101  		hashes = append(hashes, parent.Hash())
  2102  		numbers = append(numbers, parent.Number.Uint64())
  2103  
  2104  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  2105  	}
  2106  	if parent == nil {
  2107  		return it.index, errors.New("missing parent")
  2108  	}
  2109  	// Import all the pruned blocks to make the state available
  2110  	var (
  2111  		blocks []*types.Block
  2112  		memory common.StorageSize
  2113  	)
  2114  	for i := len(hashes) - 1; i >= 0; i-- {
  2115  		// Append the next block to our batch
  2116  		block := bc.GetBlock(hashes[i], numbers[i])
  2117  
  2118  		blocks = append(blocks, block)
  2119  		memory += block.Size()
  2120  
  2121  		// If memory use grew too large, import and continue. Sadly we need to discard
  2122  		// all raised events and logs from notifications since we're too heavy on the
  2123  		// memory here.
  2124  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  2125  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  2126  			if _, err := bc.insertChain(blocks, false, true); err != nil {
  2127  				return 0, err
  2128  			}
  2129  			blocks, memory = blocks[:0], 0
  2130  
  2131  			// If the chain is terminating, stop processing blocks
  2132  			if bc.insertStopped() {
  2133  				log.Debug("Abort during blocks processing")
  2134  				return 0, nil
  2135  			}
  2136  		}
  2137  	}
  2138  	if len(blocks) > 0 {
  2139  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  2140  		return bc.insertChain(blocks, false, true)
  2141  	}
  2142  	return 0, nil
  2143  }
  2144  
  2145  // recoverAncestors finds the closest ancestor with available state and re-execute
  2146  // all the ancestor blocks since that.
  2147  // recoverAncestors is only used post-merge.
  2148  func (bc *BlockChain) recoverAncestors(block *types.Block) error {
  2149  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  2150  	var (
  2151  		hashes  []common.Hash
  2152  		numbers []uint64
  2153  		parent  = block
  2154  	)
  2155  	for parent != nil && !bc.HasState(parent.Root()) {
  2156  		hashes = append(hashes, parent.Hash())
  2157  		numbers = append(numbers, parent.NumberU64())
  2158  		parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2159  
  2160  		// If the chain is terminating, stop iteration
  2161  		if bc.insertStopped() {
  2162  			log.Debug("Abort during blocks iteration")
  2163  			return errInsertionInterrupted
  2164  		}
  2165  	}
  2166  	if parent == nil {
  2167  		return errors.New("missing parent")
  2168  	}
  2169  	// Import all the pruned blocks to make the state available
  2170  	for i := len(hashes) - 1; i >= 0; i-- {
  2171  		// If the chain is terminating, stop processing blocks
  2172  		if bc.insertStopped() {
  2173  			log.Debug("Abort during blocks processing")
  2174  			return errInsertionInterrupted
  2175  		}
  2176  		var b *types.Block
  2177  		if i == 0 {
  2178  			b = block
  2179  		} else {
  2180  			b = bc.GetBlock(hashes[i], numbers[i])
  2181  		}
  2182  		if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil {
  2183  			return err
  2184  		}
  2185  	}
  2186  	return nil
  2187  }
  2188  
  2189  // collectLogs collects the logs that were generated or removed during
  2190  // the processing of the block that corresponds with the given hash.
  2191  // These logs are later announced as deleted or reborn.
  2192  func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log {
  2193  	number := bc.hc.GetBlockNumber(hash)
  2194  	if number == nil {
  2195  		return []*types.Log{}
  2196  	}
  2197  	receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  2198  
  2199  	// Append bor receipt
  2200  	borReceipt := rawdb.ReadBorReceipt(bc.db, hash, *number, bc.chainConfig)
  2201  	if borReceipt != nil {
  2202  		receipts = append(receipts, borReceipt)
  2203  	}
  2204  
  2205  	var logs []*types.Log
  2206  	for _, receipt := range receipts {
  2207  		for _, log := range receipt.Logs {
  2208  			l := *log
  2209  			if removed {
  2210  				l.Removed = true
  2211  			}
  2212  			logs = append(logs, &l)
  2213  		}
  2214  	}
  2215  	return logs
  2216  }
  2217  
  2218  // mergeLogs returns a merged log slice with specified sort order.
  2219  func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log {
  2220  	var ret []*types.Log
  2221  	if reverse {
  2222  		for i := len(logs) - 1; i >= 0; i-- {
  2223  			ret = append(ret, logs[i]...)
  2224  		}
  2225  	} else {
  2226  		for i := 0; i < len(logs); i++ {
  2227  			ret = append(ret, logs[i]...)
  2228  		}
  2229  	}
  2230  	return ret
  2231  }
  2232  
  2233  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  2234  // blocks and inserts them to be part of the new canonical chain and accumulates
  2235  // potential missing transactions and post an event about them.
  2236  // Note the new head block won't be processed here, callers need to handle it
  2237  // externally.
  2238  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  2239  	var (
  2240  		newChain    types.Blocks
  2241  		oldChain    types.Blocks
  2242  		commonBlock *types.Block
  2243  
  2244  		deletedTxs types.Transactions
  2245  		addedTxs   types.Transactions
  2246  
  2247  		deletedLogs [][]*types.Log
  2248  		rebirthLogs [][]*types.Log
  2249  	)
  2250  	// Reduce the longer chain to the same number as the shorter one
  2251  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  2252  		// Old chain is longer, gather all transactions and logs as deleted ones
  2253  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  2254  			oldChain = append(oldChain, oldBlock)
  2255  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  2256  
  2257  			// Collect deleted logs for notification
  2258  			logs := bc.collectLogs(oldBlock.Hash(), true)
  2259  			if len(logs) > 0 {
  2260  				deletedLogs = append(deletedLogs, logs)
  2261  			}
  2262  		}
  2263  	} else {
  2264  		// New chain is longer, stash all blocks away for subsequent insertion
  2265  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  2266  			newChain = append(newChain, newBlock)
  2267  		}
  2268  	}
  2269  	if oldBlock == nil {
  2270  		return fmt.Errorf("invalid old chain")
  2271  	}
  2272  	if newBlock == nil {
  2273  		return fmt.Errorf("invalid new chain")
  2274  	}
  2275  	// Both sides of the reorg are at the same number, reduce both until the common
  2276  	// ancestor is found
  2277  	for {
  2278  		// If the common ancestor was found, bail out
  2279  		if oldBlock.Hash() == newBlock.Hash() {
  2280  			commonBlock = oldBlock
  2281  			break
  2282  		}
  2283  		// Remove an old block as well as stash away a new block
  2284  		oldChain = append(oldChain, oldBlock)
  2285  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  2286  
  2287  		// Collect deleted logs for notification
  2288  		logs := bc.collectLogs(oldBlock.Hash(), true)
  2289  		if len(logs) > 0 {
  2290  			deletedLogs = append(deletedLogs, logs)
  2291  		}
  2292  		newChain = append(newChain, newBlock)
  2293  
  2294  		// Step back with both chains
  2295  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2296  		if oldBlock == nil {
  2297  			return fmt.Errorf("invalid old chain")
  2298  		}
  2299  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2300  		if newBlock == nil {
  2301  			return fmt.Errorf("invalid new chain")
  2302  		}
  2303  	}
  2304  	// Ensure the user sees large reorgs
  2305  	if len(oldChain) > 0 && len(newChain) > 0 {
  2306  
  2307  		bc.chain2HeadFeed.Send(Chain2HeadEvent{
  2308  			Type:     Chain2HeadReorgEvent,
  2309  			NewChain: newChain,
  2310  			OldChain: oldChain,
  2311  		})
  2312  
  2313  		logFn := log.Info
  2314  		msg := "Chain reorg detected"
  2315  		if len(oldChain) > 63 {
  2316  			msg = "Large chain reorg detected"
  2317  			logFn = log.Warn
  2318  		}
  2319  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2320  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2321  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2322  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2323  		blockReorgMeter.Mark(1)
  2324  	} else if len(newChain) > 0 {
  2325  		// Special case happens in the post merge stage that current head is
  2326  		// the ancestor of new head while these two blocks are not consecutive
  2327  		log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash())
  2328  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2329  	} else {
  2330  		// len(newChain) == 0 && len(oldChain) > 0
  2331  		// rewind the canonical chain to a lower point.
  2332  
  2333  		home, err := os.UserHomeDir()
  2334  		if err != nil {
  2335  			fmt.Println("Impossible reorg : Unable to get user home dir", "Error", err)
  2336  		}
  2337  		outPath := filepath.Join(home, "impossible-reorgs", fmt.Sprintf("%v-impossibleReorg", time.Now().Format(time.RFC3339)))
  2338  
  2339  		if _, err := os.Stat(outPath); errors.Is(err, os.ErrNotExist) {
  2340  			err := os.MkdirAll(outPath, os.ModePerm)
  2341  			if err != nil {
  2342  				log.Error("Impossible reorg : Unable to create Dir", "Error", err)
  2343  			}
  2344  		} else {
  2345  			err = ExportBlocks(oldChain, filepath.Join(outPath, "oldChain.gz"))
  2346  			if err != nil {
  2347  				log.Error("Impossible reorg : Unable to export oldChain", "Error", err)
  2348  			}
  2349  
  2350  			err = ExportBlocks([]*types.Block{oldBlock}, filepath.Join(outPath, "oldBlock.gz"))
  2351  			if err != nil {
  2352  				log.Error("Impossible reorg : Unable to export oldBlock", "Error", err)
  2353  			}
  2354  
  2355  			err = ExportBlocks([]*types.Block{newBlock}, filepath.Join(outPath, "newBlock.gz"))
  2356  			if err != nil {
  2357  				log.Error("Impossible reorg : Unable to export newBlock", "Error", err)
  2358  			}
  2359  		}
  2360  
  2361  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
  2362  	}
  2363  	// Insert the new chain(except the head block(reverse order)),
  2364  	// taking care of the proper incremental order.
  2365  	for i := len(newChain) - 1; i >= 1; i-- {
  2366  		// Insert the block in the canonical way, re-writing history
  2367  		bc.writeHeadBlock(newChain[i])
  2368  
  2369  		// Collect reborn logs due to chain reorg
  2370  		logs := bc.collectLogs(newChain[i].Hash(), false)
  2371  		if len(logs) > 0 {
  2372  			rebirthLogs = append(rebirthLogs, logs)
  2373  		}
  2374  		// Collect the new added transactions.
  2375  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  2376  	}
  2377  	// Delete useless indexes right now which includes the non-canonical
  2378  	// transaction indexes, canonical chain indexes which above the head.
  2379  	indexesBatch := bc.db.NewBatch()
  2380  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  2381  		rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
  2382  	}
  2383  	// Delete any canonical number assignments above the new head
  2384  	number := bc.CurrentBlock().NumberU64()
  2385  	for i := number + 1; ; i++ {
  2386  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2387  		if hash == (common.Hash{}) {
  2388  			break
  2389  		}
  2390  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2391  	}
  2392  	if err := indexesBatch.Write(); err != nil {
  2393  		log.Crit("Failed to delete useless indexes", "err", err)
  2394  	}
  2395  	// If any logs need to be fired, do it now. In theory we could avoid creating
  2396  	// this goroutine if there are no events to fire, but realistcally that only
  2397  	// ever happens if we're reorging empty blocks, which will only happen on idle
  2398  	// networks where performance is not an issue either way.
  2399  	if len(deletedLogs) > 0 {
  2400  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  2401  	}
  2402  	if len(rebirthLogs) > 0 {
  2403  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  2404  	}
  2405  	if len(oldChain) > 0 {
  2406  		for i := len(oldChain) - 1; i >= 0; i-- {
  2407  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2408  		}
  2409  	}
  2410  	return nil
  2411  }
  2412  
  2413  // ExportBlocks exports blocks into the specified file, truncating any data
  2414  // already present in the file.
  2415  func ExportBlocks(blocks []*types.Block, fn string) error {
  2416  	log.Info("Exporting blockchain", "file", fn)
  2417  
  2418  	// Open the file handle and potentially wrap with a gzip stream
  2419  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  2420  	if err != nil {
  2421  		return err
  2422  	}
  2423  	defer fh.Close()
  2424  
  2425  	var writer io.Writer = fh
  2426  	if strings.HasSuffix(fn, ".gz") {
  2427  		writer = gzip.NewWriter(writer)
  2428  		defer writer.(*gzip.Writer).Close()
  2429  	}
  2430  	// Iterate over the blocks and export them
  2431  	if err := ExportN(writer, blocks); err != nil {
  2432  		return err
  2433  	}
  2434  
  2435  	log.Info("Exported blocks", "file", fn)
  2436  
  2437  	return nil
  2438  }
  2439  
  2440  // ExportBlock writes a block to the given writer.
  2441  func ExportN(w io.Writer, blocks []*types.Block) error {
  2442  	for _, block := range blocks {
  2443  		if err := block.EncodeRLP(w); err != nil {
  2444  			return err
  2445  		}
  2446  	}
  2447  
  2448  	return nil
  2449  }
  2450  
  2451  // InsertBlockWithoutSetHead executes the block, runs the necessary verification
  2452  // upon it and then persist the block and the associate state into the database.
  2453  // The key difference between the InsertChain is it won't do the canonical chain
  2454  // updating. It relies on the additional SetChainHead call to finalize the entire
  2455  // procedure.
  2456  func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
  2457  	if !bc.chainmu.TryLock() {
  2458  		return errChainStopped
  2459  	}
  2460  	defer bc.chainmu.Unlock()
  2461  
  2462  	_, err := bc.insertChain(types.Blocks{block}, true, false)
  2463  	return err
  2464  }
  2465  
  2466  // SetChainHead rewinds the chain to set the new head block as the specified
  2467  // block. It's possible that after the reorg the relevant state of head
  2468  // is missing. It can be fixed by inserting a new block which triggers
  2469  // the re-execution.
  2470  func (bc *BlockChain) SetChainHead(head *types.Block) error {
  2471  	if !bc.chainmu.TryLock() {
  2472  		return errChainStopped
  2473  	}
  2474  	defer bc.chainmu.Unlock()
  2475  
  2476  	// Run the reorg if necessary and set the given block as new head.
  2477  	start := time.Now()
  2478  	if head.ParentHash() != bc.CurrentBlock().Hash() {
  2479  		if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
  2480  			return err
  2481  		}
  2482  	}
  2483  	bc.writeHeadBlock(head)
  2484  
  2485  	// Emit events
  2486  	logs := bc.collectLogs(head.Hash(), false)
  2487  	bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
  2488  	if len(logs) > 0 {
  2489  		bc.logsFeed.Send(logs)
  2490  	}
  2491  	bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
  2492  
  2493  	context := []interface{}{
  2494  		"number", head.Number(),
  2495  		"hash", head.Hash(),
  2496  		"root", head.Root(),
  2497  		"elapsed", time.Since(start),
  2498  	}
  2499  	if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
  2500  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2501  	}
  2502  	log.Info("Chain head was updated", context...)
  2503  	return nil
  2504  }
  2505  
  2506  func (bc *BlockChain) updateFutureBlocks() {
  2507  	futureTimer := time.NewTicker(5 * time.Second)
  2508  	defer futureTimer.Stop()
  2509  	defer bc.wg.Done()
  2510  	for {
  2511  		select {
  2512  		case <-futureTimer.C:
  2513  			bc.procFutureBlocks()
  2514  		case <-bc.quit:
  2515  			return
  2516  		}
  2517  	}
  2518  }
  2519  
  2520  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2521  // that the block does not need to be processed but can be considered already fully 'done'.
  2522  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2523  	// We can only ever bypass processing if the only error returned by the validator
  2524  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2525  	// and state.
  2526  	if !errors.Is(err, ErrKnownBlock) {
  2527  		return false
  2528  	}
  2529  	// If we're not using snapshots, we can skip this, since we have both block
  2530  	// and (trie-) state
  2531  	if bc.snaps == nil {
  2532  		return true
  2533  	}
  2534  	var (
  2535  		header     = it.current() // header can't be nil
  2536  		parentRoot common.Hash
  2537  	)
  2538  	// If we also have the snapshot-state, we can skip the processing.
  2539  	if bc.snaps.Snapshot(header.Root) != nil {
  2540  		return true
  2541  	}
  2542  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2543  	// snapshot-state exists, we need to process this in order to not get a gap
  2544  	// in the snapshot layers.
  2545  	// Resolve parent block
  2546  	if parent := it.previous(); parent != nil {
  2547  		parentRoot = parent.Root
  2548  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2549  		parentRoot = parent.Root
  2550  	}
  2551  	if parentRoot == (common.Hash{}) {
  2552  		return false // Theoretically impossible case
  2553  	}
  2554  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2555  	if bc.snaps.Snapshot(parentRoot) == nil {
  2556  		return true
  2557  	}
  2558  	return false
  2559  }
  2560  
  2561  // maintainTxIndex is responsible for the construction and deletion of the
  2562  // transaction index.
  2563  //
  2564  // User can use flag `txlookuplimit` to specify a "recentness" block, below
  2565  // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
  2566  // all tx indices will be reserved.
  2567  //
  2568  // The user can adjust the txlookuplimit value for each launch after fast
  2569  // sync, Geth will automatically construct the missing indices and delete
  2570  // the extra indices.
  2571  func (bc *BlockChain) maintainTxIndex(ancients uint64) {
  2572  	defer bc.wg.Done()
  2573  
  2574  	// Before starting the actual maintenance, we need to handle a special case,
  2575  	// where user might init Geth with an external ancient database. If so, we
  2576  	// need to reindex all necessary transactions before starting to process any
  2577  	// pruning requests.
  2578  	if ancients > 0 {
  2579  		var from = uint64(0)
  2580  		if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit {
  2581  			from = ancients - bc.txLookupLimit
  2582  		}
  2583  		rawdb.IndexTransactions(bc.db, from, ancients, bc.quit)
  2584  	}
  2585  
  2586  	// indexBlocks reindexes or unindexes transactions depending on user configuration
  2587  	indexBlocks := func(tail *uint64, head uint64, done chan struct{}) {
  2588  		defer func() { done <- struct{}{} }()
  2589  
  2590  		// If the user just upgraded Geth to a new version which supports transaction
  2591  		// index pruning, write the new tail and remove anything older.
  2592  		if tail == nil {
  2593  			if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2594  				// Nothing to delete, write the tail and return
  2595  				rawdb.WriteTxIndexTail(bc.db, 0)
  2596  			} else {
  2597  				// Prune all stale tx indices and record the tx index tail
  2598  				rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit)
  2599  			}
  2600  			return
  2601  		}
  2602  		// If a previous indexing existed, make sure that we fill in any missing entries
  2603  		if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2604  			if *tail > 0 {
  2605  				// It can happen when chain is rewound to a historical point which
  2606  				// is even lower than the indexes tail, recap the indexing target
  2607  				// to new head to avoid reading non-existent block bodies.
  2608  				end := *tail
  2609  				if end > head+1 {
  2610  					end = head + 1
  2611  				}
  2612  				rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
  2613  			}
  2614  			return
  2615  		}
  2616  		// Update the transaction index to the new chain state
  2617  		if head-bc.txLookupLimit+1 < *tail {
  2618  			// Reindex a part of missing indices and rewind index tail to HEAD-limit
  2619  			rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
  2620  		} else {
  2621  			// Unindex a part of stale indices and forward index tail to HEAD-limit
  2622  			rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
  2623  		}
  2624  	}
  2625  
  2626  	// Any reindexing done, start listening to chain events and moving the index window
  2627  	var (
  2628  		done   chan struct{}                  // Non-nil if background unindexing or reindexing routine is active.
  2629  		headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
  2630  	)
  2631  	sub := bc.SubscribeChainHeadEvent(headCh)
  2632  	if sub == nil {
  2633  		return
  2634  	}
  2635  	defer sub.Unsubscribe()
  2636  
  2637  	for {
  2638  		select {
  2639  		case head := <-headCh:
  2640  			if done == nil {
  2641  				done = make(chan struct{})
  2642  				go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
  2643  			}
  2644  		case <-done:
  2645  			done = nil
  2646  		case <-bc.quit:
  2647  			if done != nil {
  2648  				log.Info("Waiting background transaction indexer to exit")
  2649  				<-done
  2650  			}
  2651  			return
  2652  		}
  2653  	}
  2654  }
  2655  
  2656  // reportBlock logs a bad block error.
  2657  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2658  	rawdb.WriteBadBlock(bc.db, block)
  2659  
  2660  	var receiptString string
  2661  	for i, receipt := range receipts {
  2662  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  2663  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2664  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2665  	}
  2666  	log.Error(fmt.Sprintf(`
  2667  ########## BAD BLOCK #########
  2668  Chain config: %v
  2669  
  2670  Number: %v
  2671  Hash: 0x%x
  2672  %v
  2673  
  2674  Error: %v
  2675  ##############################
  2676  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  2677  }
  2678  
  2679  // InsertHeaderChain attempts to insert the given header chain in to the local
  2680  // chain, possibly creating a reorg. If an error is returned, it will return the
  2681  // index number of the failing header as well an error describing what went wrong.
  2682  //
  2683  // The verify parameter can be used to fine tune whether nonce verification
  2684  // should be done or not. The reason behind the optional check is because some
  2685  // of the header retrieval mechanisms already need to verify nonces, as well as
  2686  // because nonces can be verified sparsely, not needing to check each.
  2687  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2688  	if len(chain) == 0 {
  2689  		return 0, nil
  2690  	}
  2691  	start := time.Now()
  2692  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2693  		return i, err
  2694  	}
  2695  
  2696  	if !bc.chainmu.TryLock() {
  2697  		return 0, errChainStopped
  2698  	}
  2699  	defer bc.chainmu.Unlock()
  2700  	_, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
  2701  	return 0, err
  2702  }