github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sort"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	lru "github.com/hashicorp/golang-lru"
    32  
    33  	"github.com/scroll-tech/go-ethereum/common"
    34  	"github.com/scroll-tech/go-ethereum/common/mclock"
    35  	"github.com/scroll-tech/go-ethereum/common/prque"
    36  	"github.com/scroll-tech/go-ethereum/consensus"
    37  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    38  	"github.com/scroll-tech/go-ethereum/core/state"
    39  	"github.com/scroll-tech/go-ethereum/core/state/snapshot"
    40  	"github.com/scroll-tech/go-ethereum/core/types"
    41  	"github.com/scroll-tech/go-ethereum/core/vm"
    42  	"github.com/scroll-tech/go-ethereum/ethdb"
    43  	"github.com/scroll-tech/go-ethereum/event"
    44  	"github.com/scroll-tech/go-ethereum/internal/syncx"
    45  	"github.com/scroll-tech/go-ethereum/log"
    46  	"github.com/scroll-tech/go-ethereum/metrics"
    47  	"github.com/scroll-tech/go-ethereum/params"
    48  	"github.com/scroll-tech/go-ethereum/trie"
    49  	"github.com/scroll-tech/go-ethereum/trie/zkproof"
    50  )
    51  
    52  var (
    53  	headBlockGauge     = metrics.NewRegisteredGauge("chain/head/block", nil)
    54  	headHeaderGauge    = metrics.NewRegisteredGauge("chain/head/header", nil)
    55  	headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
    56  
    57  	accountReadTimer   = metrics.NewRegisteredTimer("chain/account/reads", nil)
    58  	accountHashTimer   = metrics.NewRegisteredTimer("chain/account/hashes", nil)
    59  	accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
    60  	accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
    61  
    62  	storageReadTimer   = metrics.NewRegisteredTimer("chain/storage/reads", nil)
    63  	storageHashTimer   = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
    64  	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
    65  	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
    66  
    67  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
    68  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
    69  	snapshotCommitTimer      = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
    70  
    71  	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil)
    72  	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
    73  	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil)
    74  	blockWriteTimer      = metrics.NewRegisteredTimer("chain/write", nil)
    75  
    76  	blockReorgMeter         = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
    77  	blockReorgAddMeter      = metrics.NewRegisteredMeter("chain/reorg/add", nil)
    78  	blockReorgDropMeter     = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
    79  	blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil)
    80  
    81  	blockPrefetchExecuteTimer   = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
    82  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    83  
    84  	errInsertionInterrupted = errors.New("insertion is interrupted")
    85  	errChainStopped         = errors.New("blockchain is stopped")
    86  )
    87  
    88  const (
    89  	bodyCacheLimit      = 256
    90  	blockCacheLimit     = 256
    91  	receiptsCacheLimit  = 32
    92  	txLookupCacheLimit  = 1024
    93  	maxFutureBlocks     = 256
    94  	maxTimeFutureBlocks = 30
    95  	TriesInMemory       = 128
    96  
    97  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    98  	//
    99  	// Changelog:
   100  	//
   101  	// - Version 4
   102  	//   The following incompatible database changes were added:
   103  	//   * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
   104  	//   * the `Bloom` field of receipt is deleted
   105  	//   * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
   106  	// - Version 5
   107  	//  The following incompatible database changes were added:
   108  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
   109  	//    * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
   110  	//      receipts' corresponding block
   111  	// - Version 6
   112  	//  The following incompatible database changes were added:
   113  	//    * Transaction lookup information stores the corresponding block number instead of block hash
   114  	// - Version 7
   115  	//  The following incompatible database changes were added:
   116  	//    * Use freezer as the ancient database to maintain all ancient data
   117  	// - Version 8
   118  	//  The following incompatible database changes were added:
   119  	//    * New scheme for contract code in order to separate the codes and trie nodes
   120  	BlockChainVersion uint64 = 8
   121  )
   122  
   123  // CacheConfig contains the configuration values for the trie caching/pruning
   124  // that's resident in a blockchain.
   125  type CacheConfig struct {
   126  	TrieCleanLimit      int           // Memory allowance (MB) to use for caching trie nodes in memory
   127  	TrieCleanJournal    string        // Disk journal for saving clean cache entries.
   128  	TrieCleanRejournal  time.Duration // Time interval to dump clean cache to disk periodically
   129  	TrieCleanNoPrefetch bool          // Whether to disable heuristic state prefetching for followup blocks
   130  	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
   131  	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
   132  	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
   133  	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
   134  	Preimages           bool          // Whether to store preimage of trie key to the disk
   135  	MPTWitness          int           // How to generate witness data for mpt circuit, 0: nothing, 1: natural
   136  
   137  	SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
   138  }
   139  
   140  // defaultCacheConfig are the default caching values if none are specified by the
   141  // user (also used during testing).
   142  var defaultCacheConfig = &CacheConfig{
   143  	TrieCleanLimit: 256,
   144  	TrieDirtyLimit: 256,
   145  	TrieTimeLimit:  5 * time.Minute,
   146  	SnapshotLimit:  256,
   147  	SnapshotWait:   true,
   148  	MPTWitness:     int(zkproof.MPTWitnessNothing),
   149  }
   150  
   151  // BlockChain represents the canonical chain given a database with a genesis
   152  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   153  //
   154  // Importing blocks in to the block chain happens according to the set of rules
   155  // defined by the two stage Validator. Processing of blocks is done using the
   156  // Processor which processes the included transaction. The validation of the state
   157  // is done in the second part of the Validator. Failing results in aborting of
   158  // the import.
   159  //
   160  // The BlockChain also helps in returning blocks from **any** chain included
   161  // in the database as well as blocks that represents the canonical chain. It's
   162  // important to note that GetBlock can return any block and does not need to be
   163  // included in the canonical one where as GetBlockByNumber always represents the
   164  // canonical chain.
   165  type BlockChain struct {
   166  	chainConfig *params.ChainConfig // Chain & network configuration
   167  	cacheConfig *CacheConfig        // Cache configuration for pruning
   168  
   169  	db     ethdb.Database // Low level persistent database to store final content in
   170  	snaps  *snapshot.Tree // Snapshot tree for fast trie leaf access
   171  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
   172  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
   173  
   174  	// txLookupLimit is the maximum number of blocks from head whose tx indices
   175  	// are reserved:
   176  	//  * 0:   means no limit and regenerate any missing indexes
   177  	//  * N:   means N block limit [HEAD-N+1, HEAD] and delete extra indexes
   178  	//  * nil: disable tx reindexer/deleter, but still index new blocks
   179  	txLookupLimit uint64
   180  
   181  	hc            *HeaderChain
   182  	rmLogsFeed    event.Feed
   183  	chainFeed     event.Feed
   184  	chainSideFeed event.Feed
   185  	chainHeadFeed event.Feed
   186  	logsFeed      event.Feed
   187  	blockProcFeed event.Feed
   188  	scope         event.SubscriptionScope
   189  	genesisBlock  *types.Block
   190  
   191  	// This mutex synchronizes chain write operations.
   192  	// Readers don't need to take it, they can just read the database.
   193  	chainmu *syncx.ClosableMutex
   194  
   195  	currentBlock     atomic.Value // Current head of the block chain
   196  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   197  
   198  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   199  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   200  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   201  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   202  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   203  	txLookupCache *lru.Cache     // Cache for the most recent transaction lookup data.
   204  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   205  
   206  	wg            sync.WaitGroup //
   207  	quit          chan struct{}  // shutdown signal, closed in Stop.
   208  	running       int32          // 0 if chain is running, 1 when stopped
   209  	procInterrupt int32          // interrupt signaler for block processing
   210  
   211  	engine     consensus.Engine
   212  	validator  Validator // Block and state validator interface
   213  	prefetcher Prefetcher
   214  	processor  Processor // Block transaction processor interface
   215  	vmConfig   vm.Config
   216  
   217  	shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
   218  }
   219  
   220  // NewBlockChain returns a fully initialised block chain using information
   221  // available in the database. It initialises the default Ethereum Validator and
   222  // Processor.
   223  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64, checkCircuitCapacity bool) (*BlockChain, error) {
   224  	if cacheConfig == nil {
   225  		cacheConfig = defaultCacheConfig
   226  	}
   227  	bodyCache, _ := lru.New(bodyCacheLimit)
   228  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   229  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   230  	blockCache, _ := lru.New(blockCacheLimit)
   231  	txLookupCache, _ := lru.New(txLookupCacheLimit)
   232  	futureBlocks, _ := lru.New(maxFutureBlocks)
   233  	// override snapshot setting
   234  	if chainConfig.Scroll.ZktrieEnabled() && cacheConfig.SnapshotLimit > 0 {
   235  		log.Warn("Snapshot has been disabled by zktrie")
   236  		cacheConfig.SnapshotLimit = 0
   237  	}
   238  
   239  	if chainConfig.Scroll.FeeVaultEnabled() {
   240  		log.Warn("Using fee vault address", "FeeVaultAddress", *chainConfig.Scroll.FeeVaultAddress)
   241  	}
   242  
   243  	bc := &BlockChain{
   244  		chainConfig: chainConfig,
   245  		cacheConfig: cacheConfig,
   246  		db:          db,
   247  		triegc:      prque.New(nil),
   248  		stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
   249  			Cache:     cacheConfig.TrieCleanLimit,
   250  			Journal:   cacheConfig.TrieCleanJournal,
   251  			Preimages: cacheConfig.Preimages,
   252  			Zktrie:    chainConfig.Scroll.ZktrieEnabled(),
   253  		}),
   254  		quit:           make(chan struct{}),
   255  		chainmu:        syncx.NewClosableMutex(),
   256  		shouldPreserve: shouldPreserve,
   257  		bodyCache:      bodyCache,
   258  		bodyRLPCache:   bodyRLPCache,
   259  		receiptsCache:  receiptsCache,
   260  		blockCache:     blockCache,
   261  		txLookupCache:  txLookupCache,
   262  		futureBlocks:   futureBlocks,
   263  		engine:         engine,
   264  		vmConfig:       vmConfig,
   265  	}
   266  	bc.validator = NewBlockValidator(chainConfig, bc, engine, db, checkCircuitCapacity)
   267  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   268  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   269  
   270  	var err error
   271  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
   272  	if err != nil {
   273  		return nil, err
   274  	}
   275  	bc.genesisBlock = bc.GetBlockByNumber(0)
   276  	if bc.genesisBlock == nil {
   277  		return nil, ErrNoGenesis
   278  	}
   279  
   280  	// initialize L1 message index for genesis block
   281  	rawdb.WriteFirstQueueIndexNotInL2Block(db, bc.genesisBlock.Hash(), 0)
   282  
   283  	var nilBlock *types.Block
   284  	bc.currentBlock.Store(nilBlock)
   285  	bc.currentFastBlock.Store(nilBlock)
   286  
   287  	// Initialize the chain with ancient data if it isn't empty.
   288  	var txIndexBlock uint64
   289  
   290  	if bc.empty() {
   291  		rawdb.InitDatabaseFromFreezer(bc.db)
   292  		// If ancient database is not empty, reconstruct all missing
   293  		// indices in the background.
   294  		frozen, _ := bc.db.Ancients()
   295  		if frozen > 0 {
   296  			txIndexBlock = frozen
   297  		}
   298  	}
   299  	if err := bc.loadLastState(); err != nil {
   300  		return nil, err
   301  	}
   302  
   303  	// Make sure the state associated with the block is available
   304  	head := bc.CurrentBlock()
   305  	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
   306  		// Head state is missing, before the state recovery, find out the
   307  		// disk layer point of snapshot(if it's enabled). Make sure the
   308  		// rewound point is lower than disk layer.
   309  		var diskRoot common.Hash
   310  		if bc.cacheConfig.SnapshotLimit > 0 {
   311  			diskRoot = rawdb.ReadSnapshotRoot(bc.db)
   312  		}
   313  		if diskRoot != (common.Hash{}) {
   314  			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
   315  
   316  			snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
   317  			if err != nil {
   318  				return nil, err
   319  			}
   320  			// Chain rewound, persist old snapshot number to indicate recovery procedure
   321  			if snapDisk != 0 {
   322  				rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
   323  			}
   324  		} else {
   325  			log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
   326  			if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
   327  				return nil, err
   328  			}
   329  		}
   330  	}
   331  
   332  	// Ensure that a previous crash in SetHead doesn't leave extra ancients
   333  	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
   334  		var (
   335  			needRewind bool
   336  			low        uint64
   337  		)
   338  		// The head full block may be rolled back to a very low height due to
   339  		// blockchain repair. If the head full block is even lower than the ancient
   340  		// chain, truncate the ancient store.
   341  		fullBlock := bc.CurrentBlock()
   342  		if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
   343  			needRewind = true
   344  			low = fullBlock.NumberU64()
   345  		}
   346  		// In fast sync, it may happen that ancient data has been written to the
   347  		// ancient store, but the LastFastBlock has not been updated, truncate the
   348  		// extra data here.
   349  		fastBlock := bc.CurrentFastBlock()
   350  		if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
   351  			needRewind = true
   352  			if fastBlock.NumberU64() < low || low == 0 {
   353  				low = fastBlock.NumberU64()
   354  			}
   355  		}
   356  		if needRewind {
   357  			log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
   358  			if err := bc.SetHead(low); err != nil {
   359  				return nil, err
   360  			}
   361  		}
   362  	}
   363  	// The first thing the node will do is reconstruct the verification data for
   364  	// the head block (ethash cache or clique voting snapshot). Might as well do
   365  	// it in advance.
   366  	bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
   367  
   368  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   369  	for hash := range BadHashes {
   370  		if header := bc.GetHeaderByHash(hash); header != nil {
   371  			// get the canonical block corresponding to the offending header's number
   372  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   373  			// make sure the headerByNumber (if present) is in our current canonical chain
   374  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   375  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   376  				if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
   377  					return nil, err
   378  				}
   379  				log.Error("Chain rewind was successful, resuming normal operation")
   380  			}
   381  		}
   382  	}
   383  
   384  	// Load any existing snapshot, regenerating it if loading failed
   385  	if bc.cacheConfig.SnapshotLimit > 0 {
   386  		// If the chain was rewound past the snapshot persistent layer (causing
   387  		// a recovery block number to be persisted to disk), check if we're still
   388  		// in recovery mode and in that case, don't invalidate the snapshot on a
   389  		// head mismatch.
   390  		var recover bool
   391  
   392  		head := bc.CurrentBlock()
   393  		if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
   394  			log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
   395  			recover = true
   396  		}
   397  		bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
   398  	}
   399  
   400  	// Start future block processor.
   401  	bc.wg.Add(1)
   402  	go bc.futureBlocksLoop()
   403  
   404  	// Start tx indexer/unindexer.
   405  	if txLookupLimit != nil {
   406  		bc.txLookupLimit = *txLookupLimit
   407  
   408  		bc.wg.Add(1)
   409  		go bc.maintainTxIndex(txIndexBlock)
   410  	}
   411  
   412  	// If periodic cache journal is required, spin it up.
   413  	if bc.cacheConfig.TrieCleanRejournal > 0 {
   414  		if bc.cacheConfig.TrieCleanRejournal < time.Minute {
   415  			log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
   416  			bc.cacheConfig.TrieCleanRejournal = time.Minute
   417  		}
   418  		triedb := bc.stateCache.TrieDB()
   419  		bc.wg.Add(1)
   420  		go func() {
   421  			defer bc.wg.Done()
   422  			triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
   423  		}()
   424  	}
   425  	return bc, nil
   426  }
   427  
   428  // empty returns an indicator whether the blockchain is empty.
   429  // Note, it's a special case that we connect a non-empty ancient
   430  // database with an empty node, so that we can plugin the ancient
   431  // into node seamlessly.
   432  func (bc *BlockChain) empty() bool {
   433  	genesis := bc.genesisBlock.Hash()
   434  	for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
   435  		if hash != genesis {
   436  			return false
   437  		}
   438  	}
   439  	return true
   440  }
   441  
   442  // loadLastState loads the last known chain state from the database. This method
   443  // assumes that the chain manager mutex is held.
   444  func (bc *BlockChain) loadLastState() error {
   445  	// Restore the last known head block
   446  	head := rawdb.ReadHeadBlockHash(bc.db)
   447  	if head == (common.Hash{}) {
   448  		// Corrupt or empty database, init from scratch
   449  		log.Warn("Empty database, resetting chain")
   450  		return bc.Reset()
   451  	}
   452  	// Make sure the entire head block is available
   453  	currentBlock := bc.GetBlockByHash(head)
   454  	if currentBlock == nil {
   455  		// Corrupt or empty database, init from scratch
   456  		log.Warn("Head block missing, resetting chain", "hash", head)
   457  		return bc.Reset()
   458  	}
   459  	// Everything seems to be fine, set as the head block
   460  	bc.currentBlock.Store(currentBlock)
   461  	headBlockGauge.Update(int64(currentBlock.NumberU64()))
   462  
   463  	// Restore the last known head header
   464  	currentHeader := currentBlock.Header()
   465  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   466  		if header := bc.GetHeaderByHash(head); header != nil {
   467  			currentHeader = header
   468  		}
   469  	}
   470  	bc.hc.SetCurrentHeader(currentHeader)
   471  
   472  	// Restore the last known head fast block
   473  	bc.currentFastBlock.Store(currentBlock)
   474  	headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
   475  
   476  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   477  		if block := bc.GetBlockByHash(head); block != nil {
   478  			bc.currentFastBlock.Store(block)
   479  			headFastBlockGauge.Update(int64(block.NumberU64()))
   480  		}
   481  	}
   482  	// Issue a status log for the user
   483  	currentFastBlock := bc.CurrentFastBlock()
   484  
   485  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   486  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   487  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   488  
   489  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
   490  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
   491  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
   492  	if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
   493  		log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
   494  	}
   495  	return nil
   496  }
   497  
   498  // SetHead rewinds the local chain to a new head. Depending on whether the node
   499  // was fast synced or full synced and in which state, the method will try to
   500  // delete minimal data from disk whilst retaining chain consistency.
   501  func (bc *BlockChain) SetHead(head uint64) error {
   502  	_, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
   503  	return err
   504  }
   505  
   506  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   507  // that the rewind must pass the specified state root. This method is meant to be
   508  // used when rewinding with snapshots enabled to ensure that we go back further than
   509  // persistent disk layer. Depending on whether the node was fast synced or full, and
   510  // in which state, the method will try to delete minimal data from disk whilst
   511  // retaining chain consistency.
   512  //
   513  // The method returns the block number where the requested root cap was found.
   514  func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
   515  	if !bc.chainmu.TryLock() {
   516  		return 0, errChainStopped
   517  	}
   518  	defer bc.chainmu.Unlock()
   519  
   520  	// Track the block number of the requested root hash
   521  	var rootNumber uint64 // (no root == always 0)
   522  
   523  	// Retrieve the last pivot block to short circuit rollbacks beyond it and the
   524  	// current freezer limit to start nuking id underflown
   525  	pivot := rawdb.ReadLastPivotNumber(bc.db)
   526  	frozen, _ := bc.db.Ancients()
   527  
   528  	updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
   529  		// Rewind the blockchain, ensuring we don't end up with a stateless head
   530  		// block. Note, depth equality is permitted to allow using SetHead as a
   531  		// chain reparation mechanism without deleting any data!
   532  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
   533  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   534  			if newHeadBlock == nil {
   535  				log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
   536  				newHeadBlock = bc.genesisBlock
   537  			} else {
   538  				// Block exists, keep rewinding until we find one with state,
   539  				// keeping rewinding until we exceed the optional threshold
   540  				// root hash
   541  				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
   542  
   543  				for {
   544  					// If a root threshold was requested but not yet crossed, check
   545  					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
   546  						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
   547  					}
   548  					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
   549  						log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   550  						if pivot == nil || newHeadBlock.NumberU64() > *pivot {
   551  							parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
   552  							if parent != nil {
   553  								newHeadBlock = parent
   554  								continue
   555  							}
   556  							log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
   557  							newHeadBlock = bc.genesisBlock
   558  						} else {
   559  							log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
   560  							newHeadBlock = bc.genesisBlock
   561  						}
   562  					}
   563  					if beyondRoot || newHeadBlock.NumberU64() == 0 {
   564  						log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   565  						break
   566  					}
   567  					log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
   568  					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
   569  				}
   570  			}
   571  			rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
   572  
   573  			// Degrade the chain markers if they are explicitly reverted.
   574  			// In theory we should update all in-memory markers in the
   575  			// last step, however the direction of SetHead is from high
   576  			// to low, so it's safe the update in-memory markers directly.
   577  			bc.currentBlock.Store(newHeadBlock)
   578  			headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
   579  		}
   580  		// Rewind the fast block in a simpleton way to the target head
   581  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   582  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   583  			// If either blocks reached nil, reset to the genesis state
   584  			if newHeadFastBlock == nil {
   585  				newHeadFastBlock = bc.genesisBlock
   586  			}
   587  			rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
   588  
   589  			// Degrade the chain markers if they are explicitly reverted.
   590  			// In theory we should update all in-memory markers in the
   591  			// last step, however the direction of SetHead is from high
   592  			// to low, so it's safe the update in-memory markers directly.
   593  			bc.currentFastBlock.Store(newHeadFastBlock)
   594  			headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
   595  		}
   596  		head := bc.CurrentBlock().NumberU64()
   597  
   598  		// If setHead underflown the freezer threshold and the block processing
   599  		// intent afterwards is full block importing, delete the chain segment
   600  		// between the stateful-block and the sethead target.
   601  		var wipe bool
   602  		if head+1 < frozen {
   603  			wipe = pivot == nil || head >= *pivot
   604  		}
   605  		return head, wipe // Only force wipe if full synced
   606  	}
   607  	// Rewind the header chain, deleting all block bodies until then
   608  	delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
   609  		// Ignore the error here since light client won't hit this path
   610  		frozen, _ := bc.db.Ancients()
   611  		if num+1 <= frozen {
   612  			// Truncate all relative data(header, total difficulty, body, receipt
   613  			// and canonical hash) from ancient store.
   614  			if err := bc.db.TruncateAncients(num); err != nil {
   615  				log.Crit("Failed to truncate ancient data", "number", num, "err", err)
   616  			}
   617  			// Remove the hash <-> number mapping from the active store.
   618  			rawdb.DeleteHeaderNumber(db, hash)
   619  		} else {
   620  			// Remove relative body and receipts from the active store.
   621  			// The header, total difficulty and canonical hash will be
   622  			// removed in the hc.SetHead function.
   623  			rawdb.DeleteBody(db, hash, num)
   624  			rawdb.DeleteReceipts(db, hash, num)
   625  		}
   626  		// Todo(rjl493456442) txlookup, bloombits, etc
   627  	}
   628  	// If SetHead was only called as a chain reparation method, try to skip
   629  	// touching the header chain altogether, unless the freezer is broken
   630  	if repair {
   631  		if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
   632  			bc.hc.SetHead(target, updateFn, delFn)
   633  		}
   634  	} else {
   635  		// Rewind the chain to the requested head and keep going backwards until a
   636  		// block with a state is found or fast sync pivot is passed
   637  		log.Warn("Rewinding blockchain", "target", head)
   638  		bc.hc.SetHead(head, updateFn, delFn)
   639  	}
   640  	// Clear out any stale content from the caches
   641  	bc.bodyCache.Purge()
   642  	bc.bodyRLPCache.Purge()
   643  	bc.receiptsCache.Purge()
   644  	bc.blockCache.Purge()
   645  	bc.txLookupCache.Purge()
   646  	bc.futureBlocks.Purge()
   647  
   648  	return rootNumber, bc.loadLastState()
   649  }
   650  
   651  // FastSyncCommitHead sets the current head block to the one defined by the hash
   652  // irrelevant what the chain contents were prior.
   653  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   654  	// Make sure that both the block as well at its state trie exists
   655  	block := bc.GetBlockByHash(hash)
   656  	if block == nil {
   657  		return fmt.Errorf("non existent block [%x..]", hash[:4])
   658  	}
   659  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   660  		return err
   661  	}
   662  
   663  	// If all checks out, manually set the head block.
   664  	if !bc.chainmu.TryLock() {
   665  		return errChainStopped
   666  	}
   667  	bc.currentBlock.Store(block)
   668  	headBlockGauge.Update(int64(block.NumberU64()))
   669  	bc.chainmu.Unlock()
   670  
   671  	// Destroy any existing state snapshot and regenerate it in the background,
   672  	// also resuming the normal maintenance of any previously paused snapshot.
   673  	if bc.snaps != nil {
   674  		bc.snaps.Rebuild(block.Root())
   675  	}
   676  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   677  	return nil
   678  }
   679  
   680  // Reset purges the entire blockchain, restoring it to its genesis state.
   681  func (bc *BlockChain) Reset() error {
   682  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   683  }
   684  
   685  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   686  // specified genesis state.
   687  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   688  	// Dump the entire block chain and purge the caches
   689  	if err := bc.SetHead(0); err != nil {
   690  		return err
   691  	}
   692  	if !bc.chainmu.TryLock() {
   693  		return errChainStopped
   694  	}
   695  	defer bc.chainmu.Unlock()
   696  
   697  	// Prepare the genesis block and reinitialise the chain
   698  	batch := bc.db.NewBatch()
   699  	rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
   700  	rawdb.WriteBlock(batch, genesis)
   701  	rawdb.WriteFirstQueueIndexNotInL2Block(batch, genesis.Hash(), 0)
   702  	if err := batch.Write(); err != nil {
   703  		log.Crit("Failed to write genesis block", "err", err)
   704  	}
   705  	bc.writeHeadBlock(genesis)
   706  
   707  	// Last update all in-memory chain markers
   708  	bc.genesisBlock = genesis
   709  	bc.currentBlock.Store(bc.genesisBlock)
   710  	headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   711  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   712  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   713  	bc.currentFastBlock.Store(bc.genesisBlock)
   714  	headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
   715  	return nil
   716  }
   717  
   718  // Export writes the active chain to the given writer.
   719  func (bc *BlockChain) Export(w io.Writer) error {
   720  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   721  }
   722  
   723  // ExportN writes a subset of the active chain to the given writer.
   724  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   725  	if !bc.chainmu.TryLock() {
   726  		return errChainStopped
   727  	}
   728  	defer bc.chainmu.Unlock()
   729  
   730  	if first > last {
   731  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   732  	}
   733  	log.Info("Exporting batch of blocks", "count", last-first+1)
   734  
   735  	start, reported := time.Now(), time.Now()
   736  	for nr := first; nr <= last; nr++ {
   737  		block := bc.GetBlockByNumber(nr)
   738  		if block == nil {
   739  			return fmt.Errorf("export failed on #%d: not found", nr)
   740  		}
   741  		if err := block.EncodeRLP(w); err != nil {
   742  			return err
   743  		}
   744  		if time.Since(reported) >= statsReportLimit {
   745  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   746  			reported = time.Now()
   747  		}
   748  	}
   749  	return nil
   750  }
   751  
   752  // writeHeadBlock injects a new head block into the current block chain. This method
   753  // assumes that the block is indeed a true head. It will also reset the head
   754  // header and the head fast sync block to this very same block if they are older
   755  // or if they are on a different side chain.
   756  //
   757  // Note, this function assumes that the `mu` mutex is held!
   758  func (bc *BlockChain) writeHeadBlock(block *types.Block) {
   759  	// If the block is on a side chain or an unknown one, force other heads onto it too
   760  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   761  
   762  	// Add the block to the canonical chain number scheme and mark as the head
   763  	batch := bc.db.NewBatch()
   764  	rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
   765  	rawdb.WriteTxLookupEntriesByBlock(batch, block)
   766  	rawdb.WriteHeadBlockHash(batch, block.Hash())
   767  
   768  	// If the block is better than our head or is on a different chain, force update heads
   769  	if updateHeads {
   770  		rawdb.WriteHeadHeaderHash(batch, block.Hash())
   771  		rawdb.WriteHeadFastBlockHash(batch, block.Hash())
   772  	}
   773  	// Flush the whole batch into the disk, exit the node if failed
   774  	if err := batch.Write(); err != nil {
   775  		log.Crit("Failed to update chain indexes and markers", "err", err)
   776  	}
   777  	// Update all in-memory chain markers in the last step
   778  	if updateHeads {
   779  		bc.hc.SetCurrentHeader(block.Header())
   780  		bc.currentFastBlock.Store(block)
   781  		headFastBlockGauge.Update(int64(block.NumberU64()))
   782  	}
   783  	bc.currentBlock.Store(block)
   784  	headBlockGauge.Update(int64(block.NumberU64()))
   785  }
   786  
   787  // Stop stops the blockchain service. If any imports are currently in progress
   788  // it will abort them using the procInterrupt.
   789  func (bc *BlockChain) Stop() {
   790  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   791  		return
   792  	}
   793  
   794  	// Unsubscribe all subscriptions registered from blockchain.
   795  	bc.scope.Close()
   796  
   797  	// Signal shutdown to all goroutines.
   798  	close(bc.quit)
   799  	bc.StopInsert()
   800  
   801  	// Now wait for all chain modifications to end and persistent goroutines to exit.
   802  	//
   803  	// Note: Close waits for the mutex to become available, i.e. any running chain
   804  	// modification will have exited when Close returns. Since we also called StopInsert,
   805  	// the mutex should become available quickly. It cannot be taken again after Close has
   806  	// returned.
   807  	bc.chainmu.Close()
   808  	bc.wg.Wait()
   809  
   810  	// Ensure that the entirety of the state snapshot is journalled to disk.
   811  	var snapBase common.Hash
   812  	if bc.snaps != nil {
   813  		var err error
   814  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
   815  			log.Error("Failed to journal state snapshot", "err", err)
   816  		}
   817  	}
   818  
   819  	// Ensure the state of a recent block is also stored to disk before exiting.
   820  	// We're writing three different states to catch different restart scenarios:
   821  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   822  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   823  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   824  	if !bc.cacheConfig.TrieDirtyDisabled {
   825  		triedb := bc.stateCache.TrieDB()
   826  
   827  		for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
   828  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   829  				recent := bc.GetBlockByNumber(number - offset)
   830  
   831  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   832  				if err := triedb.Commit(recent.Root(), true, nil); err != nil {
   833  					log.Error("Failed to commit recent state trie", "err", err)
   834  				}
   835  			}
   836  		}
   837  		if snapBase != (common.Hash{}) {
   838  			log.Info("Writing snapshot state to disk", "root", snapBase)
   839  			if err := triedb.Commit(snapBase, true, nil); err != nil {
   840  				log.Error("Failed to commit recent state trie", "err", err)
   841  			}
   842  		}
   843  		for !bc.triegc.Empty() {
   844  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   845  		}
   846  		if size, _ := triedb.Size(); size != 0 {
   847  			log.Error("Dangling trie nodes after full cleanup")
   848  		}
   849  	}
   850  	// Ensure all live cached entries be saved into disk, so that we can skip
   851  	// cache warmup when node restarts.
   852  	if bc.cacheConfig.TrieCleanJournal != "" {
   853  		triedb := bc.stateCache.TrieDB()
   854  		triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
   855  	}
   856  	log.Info("Blockchain stopped")
   857  }
   858  
   859  // StopInsert interrupts all insertion methods, causing them to return
   860  // errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
   861  // calling this method.
   862  func (bc *BlockChain) StopInsert() {
   863  	atomic.StoreInt32(&bc.procInterrupt, 1)
   864  }
   865  
   866  // insertStopped returns true after StopInsert has been called.
   867  func (bc *BlockChain) insertStopped() bool {
   868  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   869  }
   870  
   871  func (bc *BlockChain) procFutureBlocks() {
   872  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   873  	for _, hash := range bc.futureBlocks.Keys() {
   874  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   875  			blocks = append(blocks, block.(*types.Block))
   876  		}
   877  	}
   878  	if len(blocks) > 0 {
   879  		sort.Slice(blocks, func(i, j int) bool {
   880  			return blocks[i].NumberU64() < blocks[j].NumberU64()
   881  		})
   882  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   883  		for i := range blocks {
   884  			bc.InsertChain(blocks[i : i+1])
   885  		}
   886  	}
   887  }
   888  
   889  // WriteStatus status of write
   890  type WriteStatus byte
   891  
   892  const (
   893  	NonStatTy WriteStatus = iota
   894  	CanonStatTy
   895  	SideStatTy
   896  )
   897  
   898  // numberHash is just a container for a number and a hash, to represent a block
   899  type numberHash struct {
   900  	number uint64
   901  	hash   common.Hash
   902  }
   903  
   904  // InsertReceiptChain attempts to complete an already existing header chain with
   905  // transaction and receipt data.
   906  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) {
   907  	// We don't require the chainMu here since we want to maximize the
   908  	// concurrency of header insertion and receipt insertion.
   909  	bc.wg.Add(1)
   910  	defer bc.wg.Done()
   911  
   912  	var (
   913  		ancientBlocks, liveBlocks     types.Blocks
   914  		ancientReceipts, liveReceipts []types.Receipts
   915  	)
   916  	// Do a sanity check that the provided chain is actually ordered and linked
   917  	for i := 0; i < len(blockChain); i++ {
   918  		if i != 0 {
   919  			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   920  				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   921  					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   922  				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(),
   923  					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   924  			}
   925  		}
   926  		if blockChain[i].NumberU64() <= ancientLimit {
   927  			ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i])
   928  		} else {
   929  			liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i])
   930  		}
   931  	}
   932  
   933  	var (
   934  		stats = struct{ processed, ignored int32 }{}
   935  		start = time.Now()
   936  		size  = int64(0)
   937  	)
   938  
   939  	// updateHead updates the head fast sync block if the inserted blocks are better
   940  	// and returns an indicator whether the inserted blocks are canonical.
   941  	updateHead := func(head *types.Block) bool {
   942  		if !bc.chainmu.TryLock() {
   943  			return false
   944  		}
   945  		defer bc.chainmu.Unlock()
   946  
   947  		// Rewind may have occurred, skip in that case.
   948  		if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
   949  			currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64())
   950  			if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   951  				rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   952  				bc.currentFastBlock.Store(head)
   953  				headFastBlockGauge.Update(int64(head.NumberU64()))
   954  				return true
   955  			}
   956  		}
   957  		return false
   958  	}
   959  
   960  	// writeAncient writes blockchain and corresponding receipt chain into ancient store.
   961  	//
   962  	// this function only accepts canonical chain data. All side chain will be reverted
   963  	// eventually.
   964  	writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   965  		first := blockChain[0]
   966  		last := blockChain[len(blockChain)-1]
   967  
   968  		// Ensure genesis is in ancients.
   969  		if first.NumberU64() == 1 {
   970  			if frozen, _ := bc.db.Ancients(); frozen == 0 {
   971  				b := bc.genesisBlock
   972  				td := bc.genesisBlock.Difficulty()
   973  				writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
   974  				size += writeSize
   975  				if err != nil {
   976  					log.Error("Error writing genesis to ancients", "err", err)
   977  					return 0, err
   978  				}
   979  				log.Info("Wrote genesis to ancients")
   980  			}
   981  		}
   982  		// Before writing the blocks to the ancients, we need to ensure that
   983  		// they correspond to the what the headerchain 'expects'.
   984  		// We only check the last block/header, since it's a contiguous chain.
   985  		if !bc.HasHeader(last.Hash(), last.NumberU64()) {
   986  			return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
   987  		}
   988  
   989  		// Write all chain data to ancients.
   990  		td := bc.GetTd(first.Hash(), first.NumberU64())
   991  		writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
   992  		size += writeSize
   993  		if err != nil {
   994  			log.Error("Error importing chain data to ancients", "err", err)
   995  			return 0, err
   996  		}
   997  
   998  		// Write tx indices if any condition is satisfied:
   999  		// * If user requires to reserve all tx indices(txlookuplimit=0)
  1000  		// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
  1001  		// * If block number is large enough to be regarded as a recent block
  1002  		// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
  1003  		//
  1004  		// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
  1005  		// an external ancient database, during the setup, blockchain will start
  1006  		// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
  1007  		// range. In this case, all tx indices of newly imported blocks should be
  1008  		// generated.
  1009  		var batch = bc.db.NewBatch()
  1010  		for _, block := range blockChain {
  1011  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
  1012  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1013  			} else if rawdb.ReadTxIndexTail(bc.db) != nil {
  1014  				rawdb.WriteTxLookupEntriesByBlock(batch, block)
  1015  			}
  1016  			stats.processed++
  1017  		}
  1018  
  1019  		// Flush all tx-lookup index data.
  1020  		size += int64(batch.ValueSize())
  1021  		if err := batch.Write(); err != nil {
  1022  			// The tx index data could not be written.
  1023  			// Roll back the ancient store update.
  1024  			fastBlock := bc.CurrentFastBlock().NumberU64()
  1025  			if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
  1026  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1027  			}
  1028  			return 0, err
  1029  		}
  1030  
  1031  		// Sync the ancient store explicitly to ensure all data has been flushed to disk.
  1032  		if err := bc.db.Sync(); err != nil {
  1033  			return 0, err
  1034  		}
  1035  
  1036  		// Update the current fast block because all block data is now present in DB.
  1037  		previousFastBlock := bc.CurrentFastBlock().NumberU64()
  1038  		if !updateHead(blockChain[len(blockChain)-1]) {
  1039  			// We end up here if the header chain has reorg'ed, and the blocks/receipts
  1040  			// don't match the canonical chain.
  1041  			if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
  1042  				log.Error("Can't truncate ancient store after failed insert", "err", err)
  1043  			}
  1044  			return 0, errSideChainReceipts
  1045  		}
  1046  
  1047  		// Delete block data from the main database.
  1048  		batch.Reset()
  1049  		canonHashes := make(map[common.Hash]struct{})
  1050  		for _, block := range blockChain {
  1051  			canonHashes[block.Hash()] = struct{}{}
  1052  			if block.NumberU64() == 0 {
  1053  				continue
  1054  			}
  1055  			rawdb.DeleteCanonicalHash(batch, block.NumberU64())
  1056  			rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
  1057  		}
  1058  		// Delete side chain hash-to-number mappings.
  1059  		for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
  1060  			if _, canon := canonHashes[nh.Hash]; !canon {
  1061  				rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
  1062  			}
  1063  		}
  1064  		if err := batch.Write(); err != nil {
  1065  			return 0, err
  1066  		}
  1067  		return 0, nil
  1068  	}
  1069  
  1070  	// writeLive writes blockchain and corresponding receipt chain into active store.
  1071  	writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1072  		skipPresenceCheck := false
  1073  		batch := bc.db.NewBatch()
  1074  		for i, block := range blockChain {
  1075  			// Short circuit insertion if shutting down or processing failed
  1076  			if bc.insertStopped() {
  1077  				return 0, errInsertionInterrupted
  1078  			}
  1079  			// Short circuit if the owner header is unknown
  1080  			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1081  				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
  1082  			}
  1083  			if !skipPresenceCheck {
  1084  				// Ignore if the entire data is already known
  1085  				if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1086  					stats.ignored++
  1087  					continue
  1088  				} else {
  1089  					// If block N is not present, neither are the later blocks.
  1090  					// This should be true, but if we are mistaken, the shortcut
  1091  					// here will only cause overwriting of some existing data
  1092  					skipPresenceCheck = true
  1093  				}
  1094  			}
  1095  			// Write all the data out into the database
  1096  			rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  1097  			rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
  1098  			rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
  1099  
  1100  			// Write everything belongs to the blocks into the database. So that
  1101  			// we can ensure all components of body is completed(body, receipts,
  1102  			// tx indexes)
  1103  			if batch.ValueSize() >= ethdb.IdealBatchSize {
  1104  				if err := batch.Write(); err != nil {
  1105  					return 0, err
  1106  				}
  1107  				size += int64(batch.ValueSize())
  1108  				batch.Reset()
  1109  			}
  1110  			stats.processed++
  1111  		}
  1112  		// Write everything belongs to the blocks into the database. So that
  1113  		// we can ensure all components of body is completed(body, receipts,
  1114  		// tx indexes)
  1115  		if batch.ValueSize() > 0 {
  1116  			size += int64(batch.ValueSize())
  1117  			if err := batch.Write(); err != nil {
  1118  				return 0, err
  1119  			}
  1120  		}
  1121  		updateHead(blockChain[len(blockChain)-1])
  1122  		return 0, nil
  1123  	}
  1124  
  1125  	// Write downloaded chain data and corresponding receipt chain data
  1126  	if len(ancientBlocks) > 0 {
  1127  		if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
  1128  			if err == errInsertionInterrupted {
  1129  				return 0, nil
  1130  			}
  1131  			return n, err
  1132  		}
  1133  	}
  1134  	// Write the tx index tail (block number from where we index) before write any live blocks
  1135  	if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
  1136  		// The tx index tail can only be one of the following two options:
  1137  		// * 0: all ancient blocks have been indexed
  1138  		// * ancient-limit: the indices of blocks before ancient-limit are ignored
  1139  		if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
  1140  			if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
  1141  				rawdb.WriteTxIndexTail(bc.db, 0)
  1142  			} else {
  1143  				rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
  1144  			}
  1145  		}
  1146  	}
  1147  	if len(liveBlocks) > 0 {
  1148  		if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
  1149  			if err == errInsertionInterrupted {
  1150  				return 0, nil
  1151  			}
  1152  			return n, err
  1153  		}
  1154  	}
  1155  
  1156  	head := blockChain[len(blockChain)-1]
  1157  	context := []interface{}{
  1158  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  1159  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  1160  		"size", common.StorageSize(size),
  1161  	}
  1162  	if stats.ignored > 0 {
  1163  		context = append(context, []interface{}{"ignored", stats.ignored}...)
  1164  	}
  1165  	log.Info("Imported new block receipts", context...)
  1166  
  1167  	return 0, nil
  1168  }
  1169  
  1170  var lastWrite uint64
  1171  
  1172  // writeBlockWithoutState writes only the block and its metadata to the database,
  1173  // but does not write any state. This is used to construct competing side forks
  1174  // up to the point where they exceed the canonical total difficulty.
  1175  func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  1176  	if bc.insertStopped() {
  1177  		return errInsertionInterrupted
  1178  	}
  1179  
  1180  	batch := bc.db.NewBatch()
  1181  	rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
  1182  	rawdb.WriteBlock(batch, block)
  1183  
  1184  	queueIndex := rawdb.ReadFirstQueueIndexNotInL2Block(bc.db, block.ParentHash())
  1185  
  1186  	// note: we can insert blocks with header-only ancestors here,
  1187  	// so queueIndex might not yet be available in DB.
  1188  	if queueIndex != nil {
  1189  		numProcessed := uint64(block.NumL1MessagesProcessed(*queueIndex))
  1190  		// do not overwrite the index written by the miner worker
  1191  		if index := rawdb.ReadFirstQueueIndexNotInL2Block(bc.db, block.Hash()); index == nil {
  1192  			newIndex := *queueIndex + numProcessed
  1193  			log.Trace(
  1194  				"Blockchain.writeBlockWithoutState WriteFirstQueueIndexNotInL2Block",
  1195  				"number", block.Number(),
  1196  				"hash", block.Hash().String(),
  1197  				"queueIndex", *queueIndex,
  1198  				"numProcessed", numProcessed,
  1199  				"newIndex", newIndex,
  1200  			)
  1201  			rawdb.WriteFirstQueueIndexNotInL2Block(batch, block.Hash(), newIndex)
  1202  		} else {
  1203  			log.Trace(
  1204  				"Blockchain.writeBlockWithoutState WriteFirstQueueIndexNotInL2Block: not overwriting existing index",
  1205  				"number", block.Number(),
  1206  				"hash", block.Hash().String(),
  1207  				"queueIndex", *queueIndex,
  1208  				"numProcessed", numProcessed,
  1209  				"index", *index,
  1210  			)
  1211  		}
  1212  	}
  1213  
  1214  	if err := batch.Write(); err != nil {
  1215  		log.Crit("Failed to write block into disk", "err", err)
  1216  	}
  1217  	return nil
  1218  }
  1219  
  1220  // writeKnownBlock updates the head block flag with a known block
  1221  // and introduces chain reorg if necessary.
  1222  func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  1223  	current := bc.CurrentBlock()
  1224  	if block.ParentHash() != current.Hash() {
  1225  		if err := bc.reorg(current, block); err != nil {
  1226  			return err
  1227  		}
  1228  	}
  1229  	bc.writeHeadBlock(block)
  1230  	return nil
  1231  }
  1232  
  1233  // WriteBlockWithState writes the block and all associated state to the database.
  1234  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1235  	if !bc.chainmu.TryLock() {
  1236  		return NonStatTy, errInsertionInterrupted
  1237  	}
  1238  	defer bc.chainmu.Unlock()
  1239  	return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
  1240  }
  1241  
  1242  // writeBlockWithState writes the block and all associated state to the database,
  1243  // but is expects the chain mutex to be held.
  1244  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
  1245  	if bc.insertStopped() {
  1246  		return NonStatTy, errInsertionInterrupted
  1247  	}
  1248  
  1249  	// Calculate the total difficulty of the block
  1250  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1251  	if ptd == nil {
  1252  		return NonStatTy, consensus.ErrUnknownAncestor
  1253  	}
  1254  	// Make sure no inconsistent state is leaked during insertion
  1255  	currentBlock := bc.CurrentBlock()
  1256  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1257  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
  1258  
  1259  	// Irrelevant of the canonical status, write the block itself to the database.
  1260  	//
  1261  	// Note all the components of block(td, hash->number map, header, body, receipts)
  1262  	// should be written atomically. BlockBatch is used for containing all components.
  1263  	blockBatch := bc.db.NewBatch()
  1264  	rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
  1265  	rawdb.WriteBlock(blockBatch, block)
  1266  	rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
  1267  	rawdb.WritePreimages(blockBatch, state.Preimages())
  1268  
  1269  	queueIndex := rawdb.ReadFirstQueueIndexNotInL2Block(bc.db, block.ParentHash())
  1270  	if queueIndex == nil {
  1271  		// We expect that we only insert contiguous chain segments,
  1272  		// so the parent will always be inserted first.
  1273  		log.Crit("Queue index in DB is nil", "parent", block.ParentHash(), "hash", block.Hash())
  1274  	}
  1275  	numProcessed := uint64(block.NumL1MessagesProcessed(*queueIndex))
  1276  	// do not overwrite the index written by the miner worker
  1277  	if index := rawdb.ReadFirstQueueIndexNotInL2Block(bc.db, block.Hash()); index == nil {
  1278  		newIndex := *queueIndex + numProcessed
  1279  		log.Trace(
  1280  			"Blockchain.writeBlockWithState WriteFirstQueueIndexNotInL2Block",
  1281  			"number", block.Number(),
  1282  			"hash", block.Hash().String(),
  1283  			"queueIndex", *queueIndex,
  1284  			"numProcessed", numProcessed,
  1285  			"newIndex", newIndex,
  1286  		)
  1287  		rawdb.WriteFirstQueueIndexNotInL2Block(blockBatch, block.Hash(), newIndex)
  1288  	} else {
  1289  		log.Trace(
  1290  			"Blockchain.writeBlockWithState WriteFirstQueueIndexNotInL2Block: not overwriting existing index",
  1291  			"number", block.Number(),
  1292  			"hash", block.Hash().String(),
  1293  			"queueIndex", *queueIndex,
  1294  			"numProcessed", numProcessed,
  1295  			"index", *index,
  1296  		)
  1297  	}
  1298  
  1299  	if err := blockBatch.Write(); err != nil {
  1300  		log.Crit("Failed to write block into disk", "err", err)
  1301  	}
  1302  	// Commit all cached state changes into underlying memory database.
  1303  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  1304  	if err != nil {
  1305  		return NonStatTy, err
  1306  	}
  1307  	triedb := bc.stateCache.TrieDB()
  1308  
  1309  	// If we're running an archive node, always flush
  1310  	if bc.cacheConfig.TrieDirtyDisabled {
  1311  		if err := triedb.Commit(root, false, nil); err != nil {
  1312  			return NonStatTy, err
  1313  		}
  1314  	} else {
  1315  		// Full but not archive node, do proper garbage collection
  1316  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1317  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1318  
  1319  		if current := block.NumberU64(); current > TriesInMemory {
  1320  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1321  			var (
  1322  				nodes, imgs = triedb.Size()
  1323  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1324  			)
  1325  			if nodes > limit || imgs > 4*1024*1024 {
  1326  				triedb.Cap(limit - ethdb.IdealBatchSize)
  1327  			}
  1328  			// Find the next state trie we need to commit
  1329  			chosen := current - TriesInMemory
  1330  
  1331  			// If we exceeded out time allowance, flush an entire trie to disk
  1332  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1333  				// If the header is missing (canonical chain behind), we're reorging a low
  1334  				// diff sidechain. Suspend committing until this operation is completed.
  1335  				header := bc.GetHeaderByNumber(chosen)
  1336  				if header == nil {
  1337  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1338  				} else {
  1339  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1340  					// warn the user that the system is becoming unstable.
  1341  					if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1342  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
  1343  					}
  1344  					// Flush an entire trie and restart the counters
  1345  					triedb.Commit(header.Root, true, nil)
  1346  					lastWrite = chosen
  1347  					bc.gcproc = 0
  1348  				}
  1349  			}
  1350  			// Garbage collect anything below our required write retention
  1351  			for !bc.triegc.Empty() {
  1352  				root, number := bc.triegc.Pop()
  1353  				if uint64(-number) > chosen {
  1354  					bc.triegc.Push(root, number)
  1355  					break
  1356  				}
  1357  				triedb.Dereference(root.(common.Hash))
  1358  			}
  1359  		}
  1360  	}
  1361  	// If the total difficulty is higher than our known, add it to the canonical chain
  1362  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1363  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1364  	reorg := externTd.Cmp(localTd) > 0
  1365  	currentBlock = bc.CurrentBlock()
  1366  	if !reorg && externTd.Cmp(localTd) == 0 {
  1367  		// Split same-difficulty blocks by number, then preferentially select
  1368  		// the block generated by the local miner as the canonical block.
  1369  		if block.NumberU64() < currentBlock.NumberU64() {
  1370  			reorg = true
  1371  		} else if block.NumberU64() == currentBlock.NumberU64() {
  1372  			var currentPreserve, blockPreserve bool
  1373  			if bc.shouldPreserve != nil {
  1374  				currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  1375  			}
  1376  			reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  1377  		}
  1378  	}
  1379  	if reorg {
  1380  		// Reorganise the chain if the parent is not the head block
  1381  		if block.ParentHash() != currentBlock.Hash() {
  1382  			if err := bc.reorg(currentBlock, block); err != nil {
  1383  				return NonStatTy, err
  1384  			}
  1385  		}
  1386  		status = CanonStatTy
  1387  	} else {
  1388  		status = SideStatTy
  1389  	}
  1390  	// Set new head.
  1391  	if status == CanonStatTy {
  1392  		bc.writeHeadBlock(block)
  1393  	}
  1394  	bc.futureBlocks.Remove(block.Hash())
  1395  
  1396  	if status == CanonStatTy {
  1397  		bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
  1398  		if len(logs) > 0 {
  1399  			bc.logsFeed.Send(logs)
  1400  		}
  1401  		// In theory we should fire a ChainHeadEvent when we inject
  1402  		// a canonical block, but sometimes we can insert a batch of
  1403  		// canonicial blocks. Avoid firing too much ChainHeadEvents,
  1404  		// we will fire an accumulated ChainHeadEvent and disable fire
  1405  		// event here.
  1406  		if emitHeadEvent {
  1407  			bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
  1408  		}
  1409  	} else {
  1410  		bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1411  	}
  1412  	return status, nil
  1413  }
  1414  
  1415  // addFutureBlock checks if the block is within the max allowed window to get
  1416  // accepted for future processing, and returns an error if the block is too far
  1417  // ahead and was not added.
  1418  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1419  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1420  	if block.Time() > max {
  1421  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1422  	}
  1423  	bc.futureBlocks.Add(block.Hash(), block)
  1424  	return nil
  1425  }
  1426  
  1427  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1428  // chain or, otherwise, create a fork. If an error is returned it will return
  1429  // the index number of the failing block as well an error describing what went
  1430  // wrong.
  1431  //
  1432  // After insertion is done, all accumulated events will be fired.
  1433  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1434  	// Sanity check that we have something meaningful to import
  1435  	if len(chain) == 0 {
  1436  		return 0, nil
  1437  	}
  1438  
  1439  	bc.blockProcFeed.Send(true)
  1440  	defer bc.blockProcFeed.Send(false)
  1441  
  1442  	// Do a sanity check that the provided chain is actually ordered and linked.
  1443  	for i := 1; i < len(chain); i++ {
  1444  		block, prev := chain[i], chain[i-1]
  1445  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1446  			log.Error("Non contiguous block insert",
  1447  				"number", block.Number(),
  1448  				"hash", block.Hash(),
  1449  				"parent", block.ParentHash(),
  1450  				"prevnumber", prev.Number(),
  1451  				"prevhash", prev.Hash(),
  1452  			)
  1453  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
  1454  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1455  		}
  1456  	}
  1457  
  1458  	// Pre-check passed, start the full block imports.
  1459  	if !bc.chainmu.TryLock() {
  1460  		return 0, errChainStopped
  1461  	}
  1462  	defer bc.chainmu.Unlock()
  1463  	return bc.insertChain(chain, true)
  1464  }
  1465  
  1466  // InsertChainWithoutSealVerification works exactly the same
  1467  // except for seal verification, seal verification is omitted
  1468  func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) {
  1469  	bc.blockProcFeed.Send(true)
  1470  	defer bc.blockProcFeed.Send(false)
  1471  
  1472  	if !bc.chainmu.TryLock() {
  1473  		return 0, errChainStopped
  1474  	}
  1475  	defer bc.chainmu.Unlock()
  1476  	return bc.insertChain(types.Blocks([]*types.Block{block}), false)
  1477  }
  1478  
  1479  // insertChain is the internal implementation of InsertChain, which assumes that
  1480  // 1) chains are contiguous, and 2) The chain mutex is held.
  1481  //
  1482  // This method is split out so that import batches that require re-injecting
  1483  // historical blocks can do so without releasing the lock, which could lead to
  1484  // racey behaviour. If a sidechain import is in progress, and the historic state
  1485  // is imported, but then new canon-head is added before the actual sidechain
  1486  // completes, then the historic state could be pruned again
  1487  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
  1488  	// If the chain is terminating, don't even bother starting up.
  1489  	if bc.insertStopped() {
  1490  		return 0, nil
  1491  	}
  1492  
  1493  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1494  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1495  
  1496  	var (
  1497  		stats     = insertStats{startTime: mclock.Now()}
  1498  		lastCanon *types.Block
  1499  	)
  1500  	// Fire a single chain head event if we've progressed the chain
  1501  	defer func() {
  1502  		if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1503  			bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
  1504  		}
  1505  	}()
  1506  	// Start the parallel header verifier
  1507  	headers := make([]*types.Header, len(chain))
  1508  	seals := make([]bool, len(chain))
  1509  
  1510  	for i, block := range chain {
  1511  		headers[i] = block.Header()
  1512  		seals[i] = verifySeals
  1513  	}
  1514  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1515  	defer close(abort)
  1516  
  1517  	// Peek the error for the first block to decide the directing import logic
  1518  	it := newInsertIterator(chain, results, bc.validator)
  1519  	block, err := it.next()
  1520  
  1521  	// Left-trim all the known blocks that don't need to build snapshot
  1522  	if bc.skipBlock(err, it) {
  1523  		// First block (and state) is known
  1524  		//   1. We did a roll-back, and should now do a re-import
  1525  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1526  		//      from the canonical chain, which has not been verified.
  1527  		// Skip all known blocks that are behind us.
  1528  		var (
  1529  			current  = bc.CurrentBlock()
  1530  			localTd  = bc.GetTd(current.Hash(), current.NumberU64())
  1531  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
  1532  		)
  1533  		for block != nil && bc.skipBlock(err, it) {
  1534  			externTd = new(big.Int).Add(externTd, block.Difficulty())
  1535  			if localTd.Cmp(externTd) < 0 {
  1536  				break
  1537  			}
  1538  			log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
  1539  			stats.ignored++
  1540  
  1541  			block, err = it.next()
  1542  		}
  1543  		// The remaining blocks are still known blocks, the only scenario here is:
  1544  		// During the fast sync, the pivot point is already submitted but rollback
  1545  		// happens. Then node resets the head full block to a lower height via `rollback`
  1546  		// and leaves a few known blocks in the database.
  1547  		//
  1548  		// When node runs a fast sync again, it can re-import a batch of known blocks via
  1549  		// `insertChain` while a part of them have higher total difficulty than current
  1550  		// head full block(new pivot point).
  1551  		for block != nil && bc.skipBlock(err, it) {
  1552  			log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
  1553  			if err := bc.writeKnownBlock(block); err != nil {
  1554  				return it.index, err
  1555  			}
  1556  			lastCanon = block
  1557  
  1558  			block, err = it.next()
  1559  		}
  1560  		// Falls through to the block import
  1561  	}
  1562  	switch {
  1563  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1564  	case errors.Is(err, consensus.ErrPrunedAncestor):
  1565  		log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
  1566  		return bc.insertSideChain(block, it)
  1567  
  1568  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1569  	case errors.Is(err, consensus.ErrFutureBlock) || errors.Is(err, consensus.ErrMissingL1MessageData) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())):
  1570  		for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) {
  1571  			log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
  1572  			if err := bc.addFutureBlock(block); err != nil {
  1573  				return it.index, err
  1574  			}
  1575  			block, err = it.next()
  1576  		}
  1577  		stats.queued += it.processed()
  1578  		stats.ignored += it.remaining()
  1579  
  1580  		// If there are any still remaining, mark as ignored
  1581  		return it.index, err
  1582  
  1583  	// Some other error(except ErrKnownBlock) occurred, abort.
  1584  	// ErrKnownBlock is allowed here since some known blocks
  1585  	// still need re-execution to generate snapshots that are missing
  1586  	case err != nil && !errors.Is(err, ErrKnownBlock):
  1587  		bc.futureBlocks.Remove(block.Hash())
  1588  		stats.ignored += len(it.chain)
  1589  		bc.reportBlock(block, nil, err)
  1590  		return it.index, err
  1591  	}
  1592  	// No validation errors for the first block (or chain prefix skipped)
  1593  	var activeState *state.StateDB
  1594  	defer func() {
  1595  		// The chain importer is starting and stopping trie prefetchers. If a bad
  1596  		// block or other error is hit however, an early return may not properly
  1597  		// terminate the background threads. This defer ensures that we clean up
  1598  		// and dangling prefetcher, without defering each and holding on live refs.
  1599  		if activeState != nil {
  1600  			activeState.StopPrefetcher()
  1601  		}
  1602  	}()
  1603  
  1604  	for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
  1605  		// If the chain is terminating, stop processing blocks
  1606  		if bc.insertStopped() {
  1607  			log.Debug("Abort during block processing")
  1608  			break
  1609  		}
  1610  		// If the header is a banned one, straight out abort
  1611  		if BadHashes[block.Hash()] {
  1612  			bc.reportBlock(block, nil, ErrBannedHash)
  1613  			return it.index, ErrBannedHash
  1614  		}
  1615  		// If the block is known (in the middle of the chain), it's a special case for
  1616  		// Clique blocks where they can share state among each other, so importing an
  1617  		// older block might complete the state of the subsequent one. In this case,
  1618  		// just skip the block (we already validated it once fully (and crashed), since
  1619  		// its header and body was already in the database). But if the corresponding
  1620  		// snapshot layer is missing, forcibly rerun the execution to build it.
  1621  		if bc.skipBlock(err, it) {
  1622  			logger := log.Debug
  1623  			if bc.chainConfig.Clique == nil {
  1624  				logger = log.Warn
  1625  			}
  1626  			logger("Inserted known block", "number", block.Number(), "hash", block.Hash(),
  1627  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1628  				"root", block.Root())
  1629  
  1630  			// Special case. Commit the empty receipt slice if we meet the known
  1631  			// block in the middle. It can only happen in the clique chain. Whenever
  1632  			// we insert blocks via `insertSideChain`, we only commit `td`, `header`
  1633  			// and `body` if it's non-existent. Since we don't have receipts without
  1634  			// reexecution, so nothing to commit. But if the sidechain will be adpoted
  1635  			// as the canonical chain eventually, it needs to be reexecuted for missing
  1636  			// state, but if it's this special case here(skip reexecution) we will lose
  1637  			// the empty receipt entry.
  1638  			if len(block.Transactions()) == 0 {
  1639  				rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
  1640  			} else {
  1641  				log.Error("Please file an issue, skip known block execution without receipt",
  1642  					"hash", block.Hash(), "number", block.NumberU64())
  1643  			}
  1644  			if err := bc.writeKnownBlock(block); err != nil {
  1645  				return it.index, err
  1646  			}
  1647  			stats.processed++
  1648  
  1649  			// We can assume that logs are empty here, since the only way for consecutive
  1650  			// Clique blocks to have the same state is if there are no transactions.
  1651  			lastCanon = block
  1652  			continue
  1653  		}
  1654  
  1655  		// Retrieve the parent block and it's state to execute on top
  1656  		start := time.Now()
  1657  		parent := it.previous()
  1658  		if parent == nil {
  1659  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1660  		}
  1661  		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
  1662  		if err != nil {
  1663  			return it.index, err
  1664  		}
  1665  
  1666  		// Enable prefetching to pull in trie node paths while processing transactions
  1667  		statedb.StartPrefetcher("chain")
  1668  		activeState = statedb
  1669  
  1670  		// If we have a followup block, run that against the current state to pre-cache
  1671  		// transactions and probabilistically some of the account/storage trie nodes.
  1672  		var followupInterrupt uint32
  1673  		if !bc.cacheConfig.TrieCleanNoPrefetch {
  1674  			if followup, err := it.peek(); followup != nil && err == nil {
  1675  				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
  1676  
  1677  				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
  1678  					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1679  
  1680  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1681  					if atomic.LoadUint32(interrupt) == 1 {
  1682  						blockPrefetchInterruptMeter.Mark(1)
  1683  					}
  1684  				}(time.Now(), followup, throwaway, &followupInterrupt)
  1685  			}
  1686  		}
  1687  
  1688  		// Process block using the parent state as reference point
  1689  		substart := time.Now()
  1690  		receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1691  		if err != nil {
  1692  			bc.reportBlock(block, receipts, err)
  1693  			atomic.StoreUint32(&followupInterrupt, 1)
  1694  			return it.index, err
  1695  		}
  1696  
  1697  		// Update the metrics touched during block processing
  1698  		accountReadTimer.Update(statedb.AccountReads)                 // Account reads are complete, we can mark them
  1699  		storageReadTimer.Update(statedb.StorageReads)                 // Storage reads are complete, we can mark them
  1700  		accountUpdateTimer.Update(statedb.AccountUpdates)             // Account updates are complete, we can mark them
  1701  		storageUpdateTimer.Update(statedb.StorageUpdates)             // Storage updates are complete, we can mark them
  1702  		snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
  1703  		snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
  1704  		triehash := statedb.AccountHashes + statedb.StorageHashes     // Save to not double count in validation
  1705  		trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
  1706  		trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
  1707  
  1708  		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1709  
  1710  		// Validate the state using the default validator
  1711  		substart = time.Now()
  1712  		if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1713  			bc.reportBlock(block, receipts, err)
  1714  			atomic.StoreUint32(&followupInterrupt, 1)
  1715  			return it.index, err
  1716  		}
  1717  		proctime := time.Since(start)
  1718  
  1719  		// Update the metrics touched during block validation
  1720  		accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1721  		storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1722  
  1723  		blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1724  
  1725  		// Write the block to the chain and get the status.
  1726  		substart = time.Now()
  1727  		// EvmTraces & StorageTrace being nil is safe because l2geth's p2p server is stoped and the code will not execute there.
  1728  		status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
  1729  		atomic.StoreUint32(&followupInterrupt, 1)
  1730  		if err != nil {
  1731  			return it.index, err
  1732  		}
  1733  		// Update the metrics touched during block commit
  1734  		accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
  1735  		storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
  1736  		snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
  1737  
  1738  		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
  1739  		blockInsertTimer.UpdateSince(start)
  1740  
  1741  		switch status {
  1742  		case CanonStatTy:
  1743  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1744  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1745  				"elapsed", common.PrettyDuration(time.Since(start)),
  1746  				"root", block.Root())
  1747  
  1748  			lastCanon = block
  1749  
  1750  			// Only count canonical blocks for GC processing time
  1751  			bc.gcproc += proctime
  1752  
  1753  		case SideStatTy:
  1754  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1755  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1756  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1757  				"root", block.Root())
  1758  
  1759  		default:
  1760  			// This in theory is impossible, but lets be nice to our future selves and leave
  1761  			// a log, instead of trying to track down blocks imports that don't emit logs.
  1762  			log.Warn("Inserted block with unknown status", "number", block.Number(), "hash", block.Hash(),
  1763  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1764  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1765  				"root", block.Root())
  1766  		}
  1767  		stats.processed++
  1768  		stats.usedGas += usedGas
  1769  
  1770  		dirty, _ := bc.stateCache.TrieDB().Size()
  1771  		stats.report(chain, it.index, dirty)
  1772  	}
  1773  
  1774  	// Any blocks remaining here? The only ones we care about are the future ones
  1775  	if block != nil && errors.Is(err, consensus.ErrFutureBlock) {
  1776  		if err := bc.addFutureBlock(block); err != nil {
  1777  			return it.index, err
  1778  		}
  1779  		block, err = it.next()
  1780  
  1781  		for ; block != nil && errors.Is(err, consensus.ErrUnknownAncestor); block, err = it.next() {
  1782  			if err := bc.addFutureBlock(block); err != nil {
  1783  				return it.index, err
  1784  			}
  1785  			stats.queued++
  1786  		}
  1787  	}
  1788  	stats.ignored += it.remaining()
  1789  
  1790  	return it.index, err
  1791  }
  1792  
  1793  // insertSideChain is called when an import batch hits upon a pruned ancestor
  1794  // error, which happens when a sidechain with a sufficiently old fork-block is
  1795  // found.
  1796  //
  1797  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1798  // switch over to the new chain if the TD exceeded the current chain.
  1799  func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
  1800  	var (
  1801  		externTd *big.Int
  1802  		current  = bc.CurrentBlock()
  1803  	)
  1804  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1805  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1806  	// ones. Any other errors means that the block is invalid, and should not be written
  1807  	// to disk.
  1808  	err := consensus.ErrPrunedAncestor
  1809  	for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
  1810  		// Check the canonical state root for that number
  1811  		if number := block.NumberU64(); current.NumberU64() >= number {
  1812  			canonical := bc.GetBlockByNumber(number)
  1813  			if canonical != nil && canonical.Hash() == block.Hash() {
  1814  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1815  
  1816  				// Collect the TD of the block. Since we know it's a canon one,
  1817  				// we can get it directly, and not (like further below) use
  1818  				// the parent and then add the block on top
  1819  				externTd = bc.GetTd(block.Hash(), block.NumberU64())
  1820  				continue
  1821  			}
  1822  			if canonical != nil && canonical.Root() == block.Root() {
  1823  				// This is most likely a shadow-state attack. When a fork is imported into the
  1824  				// database, and it eventually reaches a block height which is not pruned, we
  1825  				// just found that the state already exist! This means that the sidechain block
  1826  				// refers to a state which already exists in our canon chain.
  1827  				//
  1828  				// If left unchecked, we would now proceed importing the blocks, without actually
  1829  				// having verified the state of the previous blocks.
  1830  				log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1831  
  1832  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1833  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1834  				// mechanism.
  1835  				return it.index, errors.New("sidechain ghost-state attack")
  1836  			}
  1837  		}
  1838  		if externTd == nil {
  1839  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1840  		}
  1841  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1842  
  1843  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1844  			start := time.Now()
  1845  			if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  1846  				return it.index, err
  1847  			}
  1848  			log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1849  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1850  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1851  				"root", block.Root())
  1852  		}
  1853  	}
  1854  	// At this point, we've written all sidechain blocks to database. Loop ended
  1855  	// either on some other error or all were processed. If there was some other
  1856  	// error, we can ignore the rest of those blocks.
  1857  	//
  1858  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1859  	// blocks to regenerate the required state
  1860  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1861  	if localTd.Cmp(externTd) > 0 {
  1862  		log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1863  		return it.index, err
  1864  	}
  1865  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1866  	var (
  1867  		hashes  []common.Hash
  1868  		numbers []uint64
  1869  	)
  1870  	parent := it.previous()
  1871  	for parent != nil && !bc.HasState(parent.Root) {
  1872  		hashes = append(hashes, parent.Hash())
  1873  		numbers = append(numbers, parent.Number.Uint64())
  1874  
  1875  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1876  	}
  1877  	if parent == nil {
  1878  		return it.index, errors.New("missing parent")
  1879  	}
  1880  	// Import all the pruned blocks to make the state available
  1881  	var (
  1882  		blocks []*types.Block
  1883  		memory common.StorageSize
  1884  	)
  1885  	for i := len(hashes) - 1; i >= 0; i-- {
  1886  		// Append the next block to our batch
  1887  		block := bc.GetBlock(hashes[i], numbers[i])
  1888  
  1889  		blocks = append(blocks, block)
  1890  		memory += block.Size()
  1891  
  1892  		// If memory use grew too large, import and continue. Sadly we need to discard
  1893  		// all raised events and logs from notifications since we're too heavy on the
  1894  		// memory here.
  1895  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1896  			log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1897  			if _, err := bc.insertChain(blocks, false); err != nil {
  1898  				return 0, err
  1899  			}
  1900  			blocks, memory = blocks[:0], 0
  1901  
  1902  			// If the chain is terminating, stop processing blocks
  1903  			if bc.insertStopped() {
  1904  				log.Debug("Abort during blocks processing")
  1905  				return 0, nil
  1906  			}
  1907  		}
  1908  	}
  1909  	if len(blocks) > 0 {
  1910  		log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1911  		return bc.insertChain(blocks, false)
  1912  	}
  1913  	return 0, nil
  1914  }
  1915  
  1916  // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1917  // blocks and inserts them to be part of the new canonical chain and accumulates
  1918  // potential missing transactions and post an event about them.
  1919  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1920  	var (
  1921  		newChain    types.Blocks
  1922  		oldChain    types.Blocks
  1923  		commonBlock *types.Block
  1924  
  1925  		deletedTxs types.Transactions
  1926  		addedTxs   types.Transactions
  1927  
  1928  		deletedLogs [][]*types.Log
  1929  		rebirthLogs [][]*types.Log
  1930  
  1931  		// collectLogs collects the logs that were generated or removed during
  1932  		// the processing of the block that corresponds with the given hash.
  1933  		// These logs are later announced as deleted or reborn
  1934  		collectLogs = func(hash common.Hash, removed bool) {
  1935  			number := bc.hc.GetBlockNumber(hash)
  1936  			if number == nil {
  1937  				return
  1938  			}
  1939  			receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  1940  
  1941  			var logs []*types.Log
  1942  			for _, receipt := range receipts {
  1943  				for _, log := range receipt.Logs {
  1944  					l := *log
  1945  					if removed {
  1946  						l.Removed = true
  1947  					}
  1948  					logs = append(logs, &l)
  1949  				}
  1950  			}
  1951  			if len(logs) > 0 {
  1952  				if removed {
  1953  					deletedLogs = append(deletedLogs, logs)
  1954  				} else {
  1955  					rebirthLogs = append(rebirthLogs, logs)
  1956  				}
  1957  			}
  1958  		}
  1959  		// mergeLogs returns a merged log slice with specified sort order.
  1960  		mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
  1961  			var ret []*types.Log
  1962  			if reverse {
  1963  				for i := len(logs) - 1; i >= 0; i-- {
  1964  					ret = append(ret, logs[i]...)
  1965  				}
  1966  			} else {
  1967  				for i := 0; i < len(logs); i++ {
  1968  					ret = append(ret, logs[i]...)
  1969  				}
  1970  			}
  1971  			return ret
  1972  		}
  1973  	)
  1974  	// Reduce the longer chain to the same number as the shorter one
  1975  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1976  		// Old chain is longer, gather all transactions and logs as deleted ones
  1977  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1978  			oldChain = append(oldChain, oldBlock)
  1979  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1980  			collectLogs(oldBlock.Hash(), true)
  1981  		}
  1982  	} else {
  1983  		// New chain is longer, stash all blocks away for subsequent insertion
  1984  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1985  			newChain = append(newChain, newBlock)
  1986  		}
  1987  	}
  1988  	if oldBlock == nil {
  1989  		return fmt.Errorf("invalid old chain")
  1990  	}
  1991  	if newBlock == nil {
  1992  		return fmt.Errorf("invalid new chain")
  1993  	}
  1994  	// Both sides of the reorg are at the same number, reduce both until the common
  1995  	// ancestor is found
  1996  	for {
  1997  		// If the common ancestor was found, bail out
  1998  		if oldBlock.Hash() == newBlock.Hash() {
  1999  			commonBlock = oldBlock
  2000  			break
  2001  		}
  2002  		// Remove an old block as well as stash away a new block
  2003  		oldChain = append(oldChain, oldBlock)
  2004  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  2005  		collectLogs(oldBlock.Hash(), true)
  2006  
  2007  		newChain = append(newChain, newBlock)
  2008  
  2009  		// Step back with both chains
  2010  		oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  2011  		if oldBlock == nil {
  2012  			return fmt.Errorf("invalid old chain")
  2013  		}
  2014  		newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2015  		if newBlock == nil {
  2016  			return fmt.Errorf("invalid new chain")
  2017  		}
  2018  	}
  2019  	// Ensure the user sees large reorgs
  2020  	if len(oldChain) > 0 && len(newChain) > 0 {
  2021  		logFn := log.Info
  2022  		msg := "Chain reorg detected"
  2023  		if len(oldChain) > 63 {
  2024  			msg = "Large chain reorg detected"
  2025  			logFn = log.Warn
  2026  		}
  2027  		logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2028  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2029  		blockReorgAddMeter.Mark(int64(len(newChain)))
  2030  		blockReorgDropMeter.Mark(int64(len(oldChain)))
  2031  		blockReorgMeter.Mark(1)
  2032  	} else {
  2033  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  2034  	}
  2035  	// Insert the new chain(except the head block(reverse order)),
  2036  	// taking care of the proper incremental order.
  2037  	for i := len(newChain) - 1; i >= 1; i-- {
  2038  		// Insert the block in the canonical way, re-writing history
  2039  		bc.writeHeadBlock(newChain[i])
  2040  
  2041  		// Collect reborn logs due to chain reorg
  2042  		collectLogs(newChain[i].Hash(), false)
  2043  
  2044  		// Collect the new added transactions.
  2045  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  2046  	}
  2047  	// Delete useless indexes right now which includes the non-canonical
  2048  	// transaction indexes, canonical chain indexes which above the head.
  2049  	indexesBatch := bc.db.NewBatch()
  2050  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  2051  		rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
  2052  	}
  2053  	// Delete any canonical number assignments above the new head
  2054  	number := bc.CurrentBlock().NumberU64()
  2055  	for i := number + 1; ; i++ {
  2056  		hash := rawdb.ReadCanonicalHash(bc.db, i)
  2057  		if hash == (common.Hash{}) {
  2058  			break
  2059  		}
  2060  		rawdb.DeleteCanonicalHash(indexesBatch, i)
  2061  	}
  2062  	if err := indexesBatch.Write(); err != nil {
  2063  		log.Crit("Failed to delete useless indexes", "err", err)
  2064  	}
  2065  	// If any logs need to be fired, do it now. In theory we could avoid creating
  2066  	// this goroutine if there are no events to fire, but realistcally that only
  2067  	// ever happens if we're reorging empty blocks, which will only happen on idle
  2068  	// networks where performance is not an issue either way.
  2069  	if len(deletedLogs) > 0 {
  2070  		bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
  2071  	}
  2072  	if len(rebirthLogs) > 0 {
  2073  		bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
  2074  	}
  2075  	if len(oldChain) > 0 {
  2076  		for i := len(oldChain) - 1; i >= 0; i-- {
  2077  			bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
  2078  		}
  2079  	}
  2080  	return nil
  2081  }
  2082  
  2083  // futureBlocksLoop processes the 'future block' queue.
  2084  func (bc *BlockChain) futureBlocksLoop() {
  2085  	defer bc.wg.Done()
  2086  
  2087  	futureTimer := time.NewTicker(5 * time.Second)
  2088  	defer futureTimer.Stop()
  2089  	for {
  2090  		select {
  2091  		case <-futureTimer.C:
  2092  			bc.procFutureBlocks()
  2093  		case <-bc.quit:
  2094  			return
  2095  		}
  2096  	}
  2097  }
  2098  
  2099  // skipBlock returns 'true', if the block being imported can be skipped over, meaning
  2100  // that the block does not need to be processed but can be considered already fully 'done'.
  2101  func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
  2102  	// We can only ever bypass processing if the only error returned by the validator
  2103  	// is ErrKnownBlock, which means all checks passed, but we already have the block
  2104  	// and state.
  2105  	if !errors.Is(err, ErrKnownBlock) {
  2106  		return false
  2107  	}
  2108  	// If we're not using snapshots, we can skip this, since we have both block
  2109  	// and (trie-) state
  2110  	if bc.snaps == nil {
  2111  		return true
  2112  	}
  2113  	var (
  2114  		header     = it.current() // header can't be nil
  2115  		parentRoot common.Hash
  2116  	)
  2117  	// If we also have the snapshot-state, we can skip the processing.
  2118  	if bc.snaps.Snapshot(header.Root) != nil {
  2119  		return true
  2120  	}
  2121  	// In this case, we have the trie-state but not snapshot-state. If the parent
  2122  	// snapshot-state exists, we need to process this in order to not get a gap
  2123  	// in the snapshot layers.
  2124  	// Resolve parent block
  2125  	if parent := it.previous(); parent != nil {
  2126  		parentRoot = parent.Root
  2127  	} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
  2128  		parentRoot = parent.Root
  2129  	}
  2130  	if parentRoot == (common.Hash{}) {
  2131  		return false // Theoretically impossible case
  2132  	}
  2133  	// Parent is also missing snapshot: we can skip this. Otherwise process.
  2134  	if bc.snaps.Snapshot(parentRoot) == nil {
  2135  		return true
  2136  	}
  2137  	return false
  2138  }
  2139  
  2140  // maintainTxIndex is responsible for the construction and deletion of the
  2141  // transaction index.
  2142  //
  2143  // User can use flag `txlookuplimit` to specify a "recentness" block, below
  2144  // which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
  2145  // all tx indices will be reserved.
  2146  //
  2147  // The user can adjust the txlookuplimit value for each launch after fast
  2148  // sync, Geth will automatically construct the missing indices and delete
  2149  // the extra indices.
  2150  func (bc *BlockChain) maintainTxIndex(ancients uint64) {
  2151  	defer bc.wg.Done()
  2152  
  2153  	// Before starting the actual maintenance, we need to handle a special case,
  2154  	// where user might init Geth with an external ancient database. If so, we
  2155  	// need to reindex all necessary transactions before starting to process any
  2156  	// pruning requests.
  2157  	if ancients > 0 {
  2158  		var from = uint64(0)
  2159  		if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit {
  2160  			from = ancients - bc.txLookupLimit
  2161  		}
  2162  		rawdb.IndexTransactions(bc.db, from, ancients, bc.quit)
  2163  	}
  2164  
  2165  	// indexBlocks reindexes or unindexes transactions depending on user configuration
  2166  	indexBlocks := func(tail *uint64, head uint64, done chan struct{}) {
  2167  		defer func() { done <- struct{}{} }()
  2168  
  2169  		// If the user just upgraded Geth to a new version which supports transaction
  2170  		// index pruning, write the new tail and remove anything older.
  2171  		if tail == nil {
  2172  			if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2173  				// Nothing to delete, write the tail and return
  2174  				rawdb.WriteTxIndexTail(bc.db, 0)
  2175  			} else {
  2176  				// Prune all stale tx indices and record the tx index tail
  2177  				rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1, bc.quit)
  2178  			}
  2179  			return
  2180  		}
  2181  		// If a previous indexing existed, make sure that we fill in any missing entries
  2182  		if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
  2183  			if *tail > 0 {
  2184  				rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit)
  2185  			}
  2186  			return
  2187  		}
  2188  		// Update the transaction index to the new chain state
  2189  		if head-bc.txLookupLimit+1 < *tail {
  2190  			// Reindex a part of missing indices and rewind index tail to HEAD-limit
  2191  			rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
  2192  		} else {
  2193  			// Unindex a part of stale indices and forward index tail to HEAD-limit
  2194  			rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
  2195  		}
  2196  	}
  2197  
  2198  	// Any reindexing done, start listening to chain events and moving the index window
  2199  	var (
  2200  		done   chan struct{}                  // Non-nil if background unindexing or reindexing routine is active.
  2201  		headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
  2202  	)
  2203  	sub := bc.SubscribeChainHeadEvent(headCh)
  2204  	if sub == nil {
  2205  		return
  2206  	}
  2207  	defer sub.Unsubscribe()
  2208  
  2209  	for {
  2210  		select {
  2211  		case head := <-headCh:
  2212  			if done == nil {
  2213  				done = make(chan struct{})
  2214  				go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
  2215  			}
  2216  		case <-done:
  2217  			done = nil
  2218  		case <-bc.quit:
  2219  			if done != nil {
  2220  				log.Info("Waiting background transaction indexer to exit")
  2221  				<-done
  2222  			}
  2223  			return
  2224  		}
  2225  	}
  2226  }
  2227  
  2228  // reportBlock logs a bad block error.
  2229  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2230  	rawdb.WriteBadBlock(bc.db, block)
  2231  
  2232  	var receiptString string
  2233  	for i, receipt := range receipts {
  2234  		receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  2235  			i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2236  			receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  2237  	}
  2238  	log.Error(fmt.Sprintf(`
  2239  ########## BAD BLOCK #########
  2240  Chain config: %v
  2241  
  2242  Number: %v
  2243  Hash: 0x%x
  2244  ParentHash: 0x%x
  2245  %v
  2246  
  2247  Error: %v
  2248  ##############################
  2249  `, bc.chainConfig, block.Number(), block.Hash(), block.ParentHash(), receiptString, err))
  2250  }
  2251  
  2252  // InsertHeaderChain attempts to insert the given header chain in to the local
  2253  // chain, possibly creating a reorg. If an error is returned, it will return the
  2254  // index number of the failing header as well an error describing what went wrong.
  2255  //
  2256  // The verify parameter can be used to fine tune whether nonce verification
  2257  // should be done or not. The reason behind the optional check is because some
  2258  // of the header retrieval mechanisms already need to verify nonces, as well as
  2259  // because nonces can be verified sparsely, not needing to check each.
  2260  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2261  	start := time.Now()
  2262  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2263  		return i, err
  2264  	}
  2265  
  2266  	if !bc.chainmu.TryLock() {
  2267  		return 0, errChainStopped
  2268  	}
  2269  	defer bc.chainmu.Unlock()
  2270  	_, err := bc.hc.InsertHeaderChain(chain, start)
  2271  	return 0, err
  2272  }