github.com/klaytn/klaytn@v1.12.1/blockchain/blockchain.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from core/blockchain.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package blockchain
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"io"
    27  	"math/big"
    28  	mrand "math/rand"
    29  	"reflect"
    30  	"runtime"
    31  	"strconv"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"github.com/klaytn/klaytn/snapshot"
    37  
    38  	"github.com/go-redis/redis/v7"
    39  	lru "github.com/hashicorp/golang-lru"
    40  	"github.com/klaytn/klaytn/blockchain/state"
    41  	"github.com/klaytn/klaytn/blockchain/types"
    42  	"github.com/klaytn/klaytn/blockchain/vm"
    43  	"github.com/klaytn/klaytn/common"
    44  	"github.com/klaytn/klaytn/common/hexutil"
    45  	"github.com/klaytn/klaytn/common/mclock"
    46  	"github.com/klaytn/klaytn/common/prque"
    47  	"github.com/klaytn/klaytn/consensus"
    48  	"github.com/klaytn/klaytn/crypto"
    49  	"github.com/klaytn/klaytn/event"
    50  	"github.com/klaytn/klaytn/fork"
    51  	"github.com/klaytn/klaytn/log"
    52  	klaytnmetrics "github.com/klaytn/klaytn/metrics"
    53  	"github.com/klaytn/klaytn/params"
    54  	"github.com/klaytn/klaytn/rlp"
    55  	"github.com/klaytn/klaytn/storage/database"
    56  	"github.com/klaytn/klaytn/storage/statedb"
    57  	"github.com/rcrowley/go-metrics"
    58  )
    59  
    60  // If total insertion time of a block exceeds insertTimeLimit,
    61  // that time will be logged by blockLongInsertTimeGauge.
    62  const insertTimeLimit = common.PrettyDuration(time.Second)
    63  
    64  var (
    65  	accountReadTimer   = klaytnmetrics.NewRegisteredHybridTimer("state/account/reads", nil)
    66  	accountHashTimer   = klaytnmetrics.NewRegisteredHybridTimer("state/account/hashes", nil)
    67  	accountUpdateTimer = klaytnmetrics.NewRegisteredHybridTimer("state/account/updates", nil)
    68  	accountCommitTimer = klaytnmetrics.NewRegisteredHybridTimer("state/account/commits", nil)
    69  
    70  	storageReadTimer   = klaytnmetrics.NewRegisteredHybridTimer("state/storage/reads", nil)
    71  	storageHashTimer   = klaytnmetrics.NewRegisteredHybridTimer("state/storage/hashes", nil)
    72  	storageUpdateTimer = klaytnmetrics.NewRegisteredHybridTimer("state/storage/updates", nil)
    73  	storageCommitTimer = klaytnmetrics.NewRegisteredHybridTimer("state/storage/commits", nil)
    74  
    75  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("state/snapshot/account/reads", nil)
    76  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("state/snapshot/storage/reads", nil)
    77  	snapshotCommitTimer      = metrics.NewRegisteredTimer("state/snapshot/commits", nil)
    78  
    79  	blockBaseFee        = metrics.NewRegisteredGauge("chain/basefee", nil)
    80  	blockInsertTimer    = klaytnmetrics.NewRegisteredHybridTimer("chain/inserts", nil)
    81  	blockProcessTimer   = klaytnmetrics.NewRegisteredHybridTimer("chain/process", nil)
    82  	blockExecutionTimer = klaytnmetrics.NewRegisteredHybridTimer("chain/execution", nil)
    83  	blockFinalizeTimer  = klaytnmetrics.NewRegisteredHybridTimer("chain/finalize", nil)
    84  	blockValidateTimer  = klaytnmetrics.NewRegisteredHybridTimer("chain/validate", nil)
    85  	blockAgeTimer       = klaytnmetrics.NewRegisteredHybridTimer("chain/age", nil)
    86  
    87  	blockPrefetchExecuteTimer   = klaytnmetrics.NewRegisteredHybridTimer("chain/prefetch/executes", nil)
    88  	blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
    89  
    90  	ErrNoGenesis            = errors.New("genesis not found in chain")
    91  	ErrNotExistNode         = errors.New("the node does not exist in cached node")
    92  	ErrQuitBySignal         = errors.New("quit by signal")
    93  	ErrNotInWarmUp          = errors.New("not in warm up")
    94  	logger                  = log.NewModuleLogger(log.Blockchain)
    95  	kesCachePrefixBlockLogs = []byte("blockLogs")
    96  )
    97  
    98  // Below is the list of the constants for cache size.
    99  // TODO-Klaytn: Below should be handled by ini or other configurations.
   100  const (
   101  	maxFutureBlocks     = 256
   102  	maxTimeFutureBlocks = 30
   103  	// TODO-Klaytn-Issue1911  This flag needs to be adjusted to the appropriate value.
   104  	//  Currently, this value is taken to cache all 10 million accounts
   105  	//  and should be optimized considering memory size and performance.
   106  	maxAccountForCache = 10000000
   107  )
   108  
   109  const (
   110  	DefaultTriesInMemory        = 128
   111  	DefaultBlockInterval        = 128
   112  	DefaultLivePruningRetention = 172800 // 2*params.DefaultStakeUpdateInterval
   113  	MaxPrefetchTxs              = 20000
   114  
   115  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
   116  	// Changelog:
   117  	// - Version 4
   118  	// The following incompatible database changes were added:
   119  	//   * New scheme for contract code in order to separate the codes and trie nodes
   120  	BlockChainVersion = 4
   121  )
   122  
   123  // CacheConfig contains the configuration values for the 1) stateDB caching and
   124  // 2) trie caching/pruning resident in a blockchain.
   125  type CacheConfig struct {
   126  	// TODO-Klaytn-Issue1666 Need to check the benefit of trie caching.
   127  	ArchiveMode          bool                         // If true, state trie is not pruned and always written to database
   128  	CacheSize            int                          // Size of in-memory cache of a trie (MiB) to flush matured singleton trie nodes to disk
   129  	BlockInterval        uint                         // Block interval to flush the trie. Each interval state trie will be flushed into disk
   130  	TriesInMemory        uint64                       // Maximum number of recent state tries according to its block number
   131  	LivePruningRetention uint64                       // Number of blocks before trie nodes in pruning marks to be deleted. If zero, obsolete nodes are not deleted.
   132  	SenderTxHashIndexing bool                         // Enables saving senderTxHash to txHash mapping information to database and cache
   133  	TrieNodeCacheConfig  *statedb.TrieNodeCacheConfig // Configures trie node cache
   134  	SnapshotCacheSize    int                          // Memory allowance (MB) to use for caching snapshot entries in memory
   135  	SnapshotAsyncGen     bool                         // Enables snapshot data generation asynchronously
   136  }
   137  
   138  // gcBlock is used for priority queue for GC.
   139  type gcBlock struct {
   140  	root     common.Hash
   141  	blockNum uint64
   142  }
   143  
   144  // BlockChain represents the canonical chain given a database with a genesis
   145  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
   146  //
   147  // Importing blocks in to the block chain happens according to the set of rules
   148  // defined by the two stage Validator. Processing of blocks is done using the
   149  // Processor which processes the included transaction. The validation of the state
   150  // is done in the second part of the Validator. Failing results in aborting of
   151  // the import.
   152  //
   153  // The BlockChain also helps in returning blocks from **any** chain included
   154  // in the database as well as blocks that represents the canonical chain. It's
   155  // important to note that GetBlock can return any block and does not need to be
   156  // included in the canonical one where as GetBlockByNumber always represents the
   157  // canonical chain.
   158  type BlockChain struct {
   159  	chainConfig *params.ChainConfig // Chain & network configuration
   160  	cacheConfig *CacheConfig        // stateDB caching and trie caching/pruning configuration
   161  
   162  	db      database.DBManager // Low level persistent database to store final content in
   163  	snaps   *snapshot.Tree     // Snapshot tree for fast trie leaf access
   164  	triegc  *prque.Prque       // Priority queue mapping block numbers to tries to gc
   165  	chBlock chan gcBlock       // chPushBlockGCPrque is a channel for delivering the gc item to gc loop.
   166  	chPrune chan uint64        // chPrune is a channel for delivering the current block number for pruning loop.
   167  
   168  	hc            *HeaderChain
   169  	rmLogsFeed    event.Feed
   170  	chainFeed     event.Feed
   171  	chainSideFeed event.Feed
   172  	chainHeadFeed event.Feed
   173  	logsFeed      event.Feed
   174  	scope         event.SubscriptionScope
   175  	genesisBlock  *types.Block
   176  
   177  	mu sync.RWMutex // global mutex for locking chain operations
   178  
   179  	checkpoint       int          // checkpoint counts towards the new checkpoint
   180  	currentBlock     atomic.Value // Current head of the block chain
   181  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   182  
   183  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   184  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   185  
   186  	quit    chan struct{} // blockchain quit channel
   187  	running int32         // running must be called atomically
   188  	// procInterrupt must be atomically called
   189  	procInterrupt int32          // interrupt signaler for block processing
   190  	wg            sync.WaitGroup // chain processing wait group for shutting down
   191  
   192  	engine     consensus.Engine
   193  	processor  Processor  // block processor interface
   194  	prefetcher Prefetcher // Block state prefetcher interface
   195  	validator  Validator  // block and state validator interface
   196  	vmConfig   vm.Config
   197  
   198  	parallelDBWrite bool // TODO-Klaytn-Storage parallelDBWrite will be replaced by number of goroutines when worker pool pattern is introduced.
   199  
   200  	// State migration
   201  	prepareStateMigration bool
   202  	stopStateMigration    chan struct{}
   203  	readCnt               int
   204  	committedCnt          int
   205  	pendingCnt            int
   206  	progress              float64
   207  	migrationErr          error
   208  	testMigrationHook     func()
   209  
   210  	// Warm up
   211  	lastCommittedBlock uint64
   212  	quitWarmUp         chan struct{}
   213  
   214  	prefetchTxCh chan prefetchTx
   215  }
   216  
   217  // prefetchTx is used to prefetch transactions, when fetcher works.
   218  type prefetchTx struct {
   219  	ti                int
   220  	block             *types.Block
   221  	followupInterrupt *uint32
   222  }
   223  
   224  // NewBlockChain returns a fully initialised block chain using information
   225  // available in the database. It initialises the default Klaytn validator and
   226  // Processor.
   227  func NewBlockChain(db database.DBManager, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   228  	if cacheConfig == nil {
   229  		cacheConfig = &CacheConfig{
   230  			ArchiveMode:          false,
   231  			CacheSize:            512,
   232  			BlockInterval:        DefaultBlockInterval,
   233  			TriesInMemory:        DefaultTriesInMemory,
   234  			LivePruningRetention: DefaultLivePruningRetention,
   235  			TrieNodeCacheConfig:  statedb.GetEmptyTrieNodeCacheConfig(),
   236  			SnapshotCacheSize:    512,
   237  			SnapshotAsyncGen:     true,
   238  		}
   239  	}
   240  
   241  	if cacheConfig.TrieNodeCacheConfig == nil {
   242  		cacheConfig.TrieNodeCacheConfig = statedb.GetEmptyTrieNodeCacheConfig()
   243  	}
   244  
   245  	state.EnabledExpensive = db.GetDBConfig().EnableDBPerfMetrics
   246  
   247  	futureBlocks, _ := lru.New(maxFutureBlocks)
   248  
   249  	bc := &BlockChain{
   250  		chainConfig:        chainConfig,
   251  		cacheConfig:        cacheConfig,
   252  		db:                 db,
   253  		triegc:             prque.New(),
   254  		chBlock:            make(chan gcBlock, 2048), // downloader.maxResultsProcess
   255  		chPrune:            make(chan uint64, 2048),  // downloader.maxResultsProcess
   256  		stateCache:         state.NewDatabaseWithNewCache(db, cacheConfig.TrieNodeCacheConfig),
   257  		quit:               make(chan struct{}),
   258  		futureBlocks:       futureBlocks,
   259  		engine:             engine,
   260  		vmConfig:           vmConfig,
   261  		parallelDBWrite:    db.IsParallelDBWrite(),
   262  		stopStateMigration: make(chan struct{}),
   263  		prefetchTxCh:       make(chan prefetchTx, MaxPrefetchTxs),
   264  	}
   265  
   266  	// set hardForkBlockNumberConfig which will be used as a global variable
   267  	if err := fork.SetHardForkBlockNumberConfig(bc.chainConfig); err != nil {
   268  		return nil, err
   269  	}
   270  
   271  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   272  	bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
   273  	bc.processor = NewStateProcessor(chainConfig, bc, engine)
   274  
   275  	var err error
   276  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   277  	if err != nil {
   278  		return nil, err
   279  	}
   280  	bc.genesisBlock = bc.GetBlockByNumber(0)
   281  	if bc.genesisBlock == nil {
   282  		return nil, ErrNoGenesis
   283  	}
   284  	var nilBlock *types.Block
   285  	bc.currentBlock.Store(nilBlock)
   286  	bc.currentFastBlock.Store(nilBlock)
   287  
   288  	if err := bc.loadLastState(); err != nil {
   289  		return nil, err
   290  	}
   291  	// Make sure the state associated with the block is available
   292  	head := bc.CurrentBlock()
   293  	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps, nil); err != nil {
   294  		// Head state is missing, before the state recovery, find out the
   295  		// disk layer point of snapshot(if it's enabled). Make sure the
   296  		// rewound point is lower than disk layer.
   297  		var diskRoot common.Hash
   298  		if bc.cacheConfig.SnapshotCacheSize > 0 {
   299  			diskRoot = bc.db.ReadSnapshotRoot()
   300  		}
   301  		if diskRoot != (common.Hash{}) {
   302  			logger.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
   303  
   304  			snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
   305  			if err != nil {
   306  				return nil, err
   307  			}
   308  
   309  			// Chain rewound, persist old snapshot number to indicate recovery procedure
   310  			if snapDisk != 0 {
   311  				bc.db.WriteSnapshotRecoveryNumber(snapDisk)
   312  			}
   313  		} else {
   314  			// Dangling block without a state associated, init from scratch
   315  			logger.Warn("Head state missing, repairing chain",
   316  				"number", head.NumberU64(), "hash", head.Hash().String())
   317  			if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
   318  				return nil, err
   319  			}
   320  		}
   321  	}
   322  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   323  	for hash := range BadHashes {
   324  		if header := bc.GetHeaderByHash(hash); header != nil {
   325  			// get the canonical block corresponding to the offending header's number
   326  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   327  			// make sure the headerByNumber (if present) is in our current canonical chain
   328  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   329  				logger.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   330  				bc.SetHead(header.Number.Uint64() - 1)
   331  				logger.Error("Chain rewind was successful, resuming normal operation")
   332  			}
   333  		}
   334  	}
   335  
   336  	// Load any existing snapshot, regenerating it if loading failed
   337  	if bc.cacheConfig.SnapshotCacheSize > 0 {
   338  		// If the chain was rewound past the snapshot persistent layer (causing
   339  		// a recovery block number to be persisted to disk), check if we're still
   340  		// in recovery mode and in that case, don't invalidate the snapshot on a
   341  		// head mismatch.
   342  		var recover bool
   343  
   344  		head := bc.CurrentBlock()
   345  		if layer := bc.db.ReadSnapshotRecoveryNumber(); layer != nil && *layer > head.NumberU64() {
   346  			logger.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
   347  			recover = true
   348  		}
   349  		bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotCacheSize, head.Root(), bc.cacheConfig.SnapshotAsyncGen, true, recover)
   350  	}
   351  
   352  	for i := 1; i <= bc.cacheConfig.TrieNodeCacheConfig.NumFetcherPrefetchWorker; i++ {
   353  		bc.wg.Add(1)
   354  		go bc.prefetchTxWorker(i)
   355  	}
   356  	logger.Info("prefetchTxWorkers are started", "num", bc.cacheConfig.TrieNodeCacheConfig.NumFetcherPrefetchWorker)
   357  
   358  	// Take ownership of this particular state
   359  	go bc.update()
   360  	bc.gcCachedNodeLoop()
   361  	bc.pruneTrieNodeLoop()
   362  	bc.restartStateMigration()
   363  
   364  	if cacheConfig.TrieNodeCacheConfig.DumpPeriodically() {
   365  		logger.Info("LocalCache is used for trie node cache, start saving cache to file periodically",
   366  			"dir", bc.cacheConfig.TrieNodeCacheConfig.FastCacheFileDir,
   367  			"period", bc.cacheConfig.TrieNodeCacheConfig.FastCacheSavePeriod)
   368  		trieDB := bc.stateCache.TrieDB()
   369  		bc.wg.Add(1)
   370  		go func() {
   371  			defer bc.wg.Done()
   372  			trieDB.SaveCachePeriodically(bc.cacheConfig.TrieNodeCacheConfig, bc.quit)
   373  		}()
   374  	}
   375  
   376  	return bc, nil
   377  }
   378  
   379  // prefetchTxWorker receives a block and a transaction index, which it pre-executes
   380  // to retrieve and cache the data for the actual block processing.
   381  func (bc *BlockChain) prefetchTxWorker(index int) {
   382  	defer bc.wg.Done()
   383  
   384  	logger.Debug("prefetchTxWorker is started", "index", index)
   385  	var snaps *snapshot.Tree
   386  	if bc.cacheConfig.TrieNodeCacheConfig.UseSnapshotForPrefetch {
   387  		snaps = bc.snaps
   388  	}
   389  	for followup := range bc.prefetchTxCh {
   390  		stateDB, err := state.New(bc.CurrentBlock().Root(), bc.stateCache, snaps,
   391  			&statedb.TrieOpts{Prefetching: true})
   392  		if err != nil {
   393  			logger.Debug("failed to retrieve stateDB for prefetchTxWorker", "err", err)
   394  			continue
   395  		}
   396  		vmCfg := bc.vmConfig
   397  		vmCfg.Prefetching = true
   398  		bc.prefetcher.PrefetchTx(followup.block, followup.ti, stateDB, vmCfg, followup.followupInterrupt)
   399  	}
   400  	logger.Debug("prefetchTxWorker is terminated", "index", index)
   401  }
   402  
   403  // SetCanonicalBlock resets the canonical as the block with the given block number.
   404  // It works as rewinding the head block to the previous one, but does not delete the data.
   405  func (bc *BlockChain) SetCanonicalBlock(blockNum uint64) {
   406  	// If the given block number is zero (it is zero by default), it does nothing
   407  	if blockNum == 0 {
   408  		return
   409  	}
   410  	// Read the block with the given block number and set it as canonical block
   411  	targetBlock := bc.db.ReadBlockByNumber(blockNum)
   412  	if targetBlock == nil {
   413  		logger.Error("failed to retrieve the block", "blockNum", blockNum)
   414  		return
   415  	}
   416  	bc.insert(targetBlock)
   417  	if err := bc.loadLastState(); err != nil {
   418  		logger.Error("failed to load last state after setting the canonical block", "err", err)
   419  		return
   420  	}
   421  	// Make sure the state associated with the block is available
   422  	head := bc.CurrentBlock()
   423  	if _, err := state.New(head.Root(), bc.stateCache, bc.snaps, nil); err != nil {
   424  		// Dangling block without a state associated, init from scratch
   425  		logger.Warn("Head state missing, repairing chain",
   426  			"number", head.NumberU64(), "hash", head.Hash().String())
   427  		if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
   428  			logger.Error("Repairing chain is failed", "number", head.NumberU64(), "hash", head.Hash().String(), "err", err)
   429  			return
   430  		}
   431  	}
   432  	logger.Info("successfully set the canonical block", "blockNum", blockNum)
   433  }
   434  
   435  func (bc *BlockChain) UseGiniCoeff() bool {
   436  	return bc.chainConfig.Governance.Reward.UseGiniCoeff
   437  }
   438  
   439  func (bc *BlockChain) ProposerPolicy() uint64 {
   440  	return bc.chainConfig.Istanbul.ProposerPolicy
   441  }
   442  
   443  func (bc *BlockChain) getProcInterrupt() bool {
   444  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   445  }
   446  
   447  // loadLastState loads the last known chain state from the database. This method
   448  // assumes that the chain manager mutex is held.
   449  func (bc *BlockChain) loadLastState() error {
   450  	// Restore the last known head block
   451  	head := bc.db.ReadHeadBlockHash()
   452  	if head == (common.Hash{}) {
   453  		// Corrupt or empty database, init from scratch
   454  		logger.Info("Empty database, resetting chain")
   455  		return bc.Reset()
   456  	}
   457  	// Make sure the entire head block is available
   458  	currentBlock := bc.GetBlockByHash(head)
   459  	if currentBlock == nil {
   460  		head = bc.db.ReadHeadBlockBackupHash()
   461  		if head == (common.Hash{}) {
   462  			// Corrupt or empty database, init from scratch
   463  			logger.Info("Empty database, resetting chain")
   464  			return bc.Reset()
   465  		}
   466  
   467  		currentBlock = bc.GetBlockByHash(head)
   468  		if currentBlock == nil {
   469  			// Corrupt or empty database, init from scratch
   470  			logger.Error("Head block missing, resetting chain", "hash", head.String())
   471  			return bc.Reset()
   472  		}
   473  	}
   474  	// Everything seems to be fine, set as the head block
   475  	bc.currentBlock.Store(currentBlock)
   476  	bc.lastCommittedBlock = currentBlock.NumberU64()
   477  
   478  	// Restore the last known head header
   479  	currentHeader := currentBlock.Header()
   480  	if head := bc.db.ReadHeadHeaderHash(); head != (common.Hash{}) {
   481  		if header := bc.GetHeaderByHash(head); header != nil {
   482  			currentHeader = header
   483  		}
   484  	}
   485  	bc.hc.SetCurrentHeader(currentHeader)
   486  
   487  	// Restore the last known head fast block
   488  	bc.currentFastBlock.Store(currentBlock)
   489  	if head := bc.db.ReadHeadFastBlockHash(); head != (common.Hash{}) {
   490  		if block := bc.GetBlockByHash(head); block != nil {
   491  			bc.currentFastBlock.Store(block)
   492  		} else if head := bc.db.ReadHeadFastBlockBackupHash(); head != (common.Hash{}) {
   493  			if block := bc.GetBlockByHash(head); block != nil {
   494  				bc.currentFastBlock.Store(block)
   495  			}
   496  		}
   497  	}
   498  
   499  	// Issue a status log for the user
   500  	currentFastBlock := bc.CurrentFastBlock()
   501  
   502  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   503  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   504  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   505  
   506  	logger.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time.Uint64()), 0)))
   507  	logger.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time.Uint64()), 0)))
   508  	logger.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time.Uint64()), 0)))
   509  
   510  	return nil
   511  }
   512  
   513  // SetHead rewinds the local chain to a new head with the extra condition
   514  // that the rewind must pass the specified state root. The method will try to
   515  // delete minimal data from disk whilst retaining chain consistency.
   516  func (bc *BlockChain) SetHead(head uint64) error {
   517  	// With the live pruning enabled, an attempt to SetHead into a state-pruned block number
   518  	// may result in an infinite loop, trying to find the existing block (probably the genesis block).
   519  	// If the target `head` is below the surviving block numbers, SetHead early exits with an error.
   520  	if lastPruned, err := bc.db.ReadLastPrunedBlockNumber(); err == nil {
   521  		if head <= lastPruned {
   522  			return fmt.Errorf("[SetHead] Cannot rewind to a state-pruned block number. lastPrunedBlock=%d targetHead=%d",
   523  				lastPruned, head)
   524  		}
   525  	}
   526  	_, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
   527  	return err
   528  }
   529  
   530  // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
   531  // that the rewind must pass the specified state root. This method is meant to be
   532  // used when rewinding with snapshots enabled to ensure that we go back further than
   533  // persistent disk layer. Depending on whether the node was fast synced or full, and
   534  // in which state, the method will try to delete minimal data from disk whilst
   535  // retaining chain consistency.
   536  //
   537  // The method returns the block number where the requested root cap was found.
   538  func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
   539  	bc.mu.Lock()
   540  	defer bc.mu.Unlock()
   541  
   542  	// Track the block number of the requested root hash
   543  	var rootNumber uint64 // (no root == always 0)
   544  
   545  	originLatestBlkNum := bc.CurrentBlock().Number().Uint64()
   546  
   547  	updateFn := func(header *types.Header) (uint64, error) {
   548  		// Rewind the block chain, ensuring we don't end up with a stateless head block
   549  		if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
   550  			newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   551  			if newHeadBlock == nil {
   552  				logger.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
   553  				newHeadBlock = bc.genesisBlock
   554  			} else {
   555  				// Block exists, keep rewinding until we find one with state,
   556  				// keeping rewinding until we exceed the optional threshold
   557  				// root hash
   558  				beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
   559  
   560  				for {
   561  					// If a root threshold was requested but not yet crossed, check
   562  					if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
   563  						beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
   564  					}
   565  					if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps, nil); err != nil {
   566  						// Rewound state missing, rolled back to the parent block, reset to genesis
   567  						logger.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
   568  						parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
   569  						if parent != nil {
   570  							newHeadBlock = parent
   571  							continue
   572  						}
   573  						logger.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
   574  						newHeadBlock = bc.genesisBlock
   575  					}
   576  					if beyondRoot || newHeadBlock.NumberU64() == 0 {
   577  						logger.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash().String())
   578  						break
   579  					}
   580  					// if newHeadBlock has state, then rewind first
   581  					logger.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash().String(), "root", newHeadBlock.Root().String())
   582  					newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
   583  				}
   584  			}
   585  			if newHeadBlock.NumberU64() == 0 {
   586  				return 0, errors.New("rewound to block number 0, but repair failed")
   587  			}
   588  			bc.db.WriteHeadBlockHash(newHeadBlock.Hash())
   589  
   590  			// Degrade the chain markers if they are explicitly reverted.
   591  			// In theory we should update all in-memory markers in the
   592  			// last step, however the direction of SetHead is from high
   593  			// to low, so it's safe the update in-memory markers directly.
   594  			bc.currentBlock.Store(newHeadBlock)
   595  			headBlockNumberGauge.Update(int64(newHeadBlock.NumberU64()))
   596  		}
   597  
   598  		// Rewind the fast block in a simpleton way to the target head
   599  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
   600  			newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
   601  			// If either blocks reached nil, reset to the genesis state
   602  			if newHeadFastBlock == nil {
   603  				newHeadFastBlock = bc.genesisBlock
   604  			}
   605  			bc.db.WriteHeadFastBlockHash(newHeadFastBlock.Hash())
   606  
   607  			// Degrade the chain markers if they are explicitly reverted.
   608  			// In theory we should update all in-memory markers in the
   609  			// last step, however the direction of SetHead is from high
   610  			// to low, so it's safe the update in-memory markers directly.
   611  			bc.currentFastBlock.Store(newHeadFastBlock)
   612  		}
   613  		return bc.CurrentBlock().Number().Uint64(), nil
   614  	}
   615  
   616  	// Rewind the header chain, deleting all block bodies until then
   617  	delFn := func(hash common.Hash, num uint64) {
   618  		// Remove relative body, receipts, header-governance database,
   619  		// istanbul snapshot database, and staking info database from the active store.
   620  		// The header, total difficulty and canonical hash will be
   621  		// removed in the hc.SetHead function.
   622  		bc.db.DeleteBody(hash, num)
   623  		bc.db.DeleteReceipts(hash, num)
   624  		bc.db.DeleteGovernance(num)
   625  		if params.IsCheckpointInterval(num) {
   626  			bc.db.DeleteIstanbulSnapshot(hash)
   627  		}
   628  		if bc.Config().Istanbul.ProposerPolicy == params.WeightedRandom && params.IsStakingUpdateInterval(num) {
   629  			bc.db.DeleteStakingInfo(num)
   630  		}
   631  	}
   632  
   633  	// If SetHead was only called as a chain reparation method, try to skip
   634  	// touching the header chain altogether
   635  	if repair {
   636  		if _, err := updateFn(bc.CurrentBlock().Header()); err != nil {
   637  			return 0, err
   638  		}
   639  	} else {
   640  		// Rewind the chain to the requested head and keep going backwards until a
   641  		// block with a state is found
   642  		logger.Warn("Rewinding blockchain", "target", head)
   643  		if err := bc.hc.SetHead(head, updateFn, delFn); err != nil {
   644  			return 0, err
   645  		}
   646  
   647  		// Delete istanbul snapshot database further two epochs
   648  		// Invoked only if the sethead was originated from explicit API call
   649  		var (
   650  			curBlkNum   = bc.CurrentBlock().Number().Uint64()
   651  			epoch       = bc.Config().Istanbul.Epoch
   652  			votingEpoch = curBlkNum - (curBlkNum % epoch)
   653  		)
   654  		if votingEpoch == 0 {
   655  			votingEpoch = 1
   656  		}
   657  		// Delete the snapshot state beyond the block number of the previous epoch on the right
   658  		for i := curBlkNum; i >= votingEpoch; i-- {
   659  			if params.IsCheckpointInterval(i) {
   660  				// delete from sethead number to previous two epoch block nums
   661  				// to handle a block that contains non-empty vote data to make sure
   662  				// the `HandleGovernanceVote()` cannot be skipped
   663  				bc.db.DeleteIstanbulSnapshot(bc.GetBlockByNumber(i).Hash())
   664  			}
   665  		}
   666  		logger.Trace("[SetHead] Snapshot database deleted", "from", originLatestBlkNum, "to", votingEpoch)
   667  	}
   668  
   669  	// Clear out any stale content from the caches
   670  	bc.futureBlocks.Purge()
   671  	bc.db.ClearBlockChainCache()
   672  
   673  	return rootNumber, bc.loadLastState()
   674  }
   675  
   676  // FastSyncCommitHead sets the current head block to the one defined by the hash
   677  // irrelevant what the chain contents were prior.
   678  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   679  	// Make sure that both the block as well at its state trie exists
   680  	block := bc.GetBlockByHash(hash)
   681  	if block == nil {
   682  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   683  	}
   684  	if _, err := statedb.NewSecureTrie(block.Root(), bc.stateCache.TrieDB(), nil); err != nil {
   685  		return err
   686  	}
   687  	// If all checks out, manually set the head block
   688  	bc.mu.Lock()
   689  	bc.currentBlock.Store(block)
   690  	bc.lastCommittedBlock = block.NumberU64()
   691  	bc.mu.Unlock()
   692  
   693  	// Destroy any existing state snapshot and regenerate it in the background,
   694  	// also resuming the normal maintenance of any previously paused snapshot.
   695  	if bc.snaps != nil {
   696  		bc.snaps.Rebuild(block.Root())
   697  	}
   698  	logger.Info("Committed new head block", "number", block.Number(), "hash", hash)
   699  	return nil
   700  }
   701  
   702  // CurrentBlock retrieves the current head block of the canonical chain. The
   703  // block is retrieved from the blockchain's internal cache.
   704  func (bc *BlockChain) CurrentBlock() *types.Block {
   705  	return bc.currentBlock.Load().(*types.Block)
   706  }
   707  
   708  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   709  // chain. The block is retrieved from the blockchain's internal cache.
   710  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   711  	return bc.currentFastBlock.Load().(*types.Block)
   712  }
   713  
   714  // Validator returns the current validator.
   715  func (bc *BlockChain) Validator() Validator {
   716  	return bc.validator
   717  }
   718  
   719  // Processor returns the current processor.
   720  func (bc *BlockChain) Processor() Processor {
   721  	return bc.processor
   722  }
   723  
   724  // State returns a new mutable state based on the current HEAD block.
   725  func (bc *BlockChain) State() (*state.StateDB, error) {
   726  	return bc.StateAt(bc.CurrentBlock().Root())
   727  }
   728  
   729  // StateAt returns a new mutable state based on a particular point in time.
   730  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   731  	return state.New(root, bc.stateCache, bc.snaps, nil)
   732  }
   733  
   734  // PrunableStateAt returns a new mutable state based on a particular point in time.
   735  // If live pruning is enabled on the databse, and num is nonzero, then trie will mark obsolete nodes for pruning.
   736  func (bc *BlockChain) PrunableStateAt(root common.Hash, num uint64) (*state.StateDB, error) {
   737  	if bc.IsLivePruningRequired() {
   738  		return state.New(root, bc.stateCache, bc.snaps, &statedb.TrieOpts{
   739  			PruningBlockNumber: num,
   740  		})
   741  	} else {
   742  		return bc.StateAt(root)
   743  	}
   744  }
   745  
   746  // StateAtWithPersistent returns a new mutable state based on a particular point in time with persistent trie nodes.
   747  func (bc *BlockChain) StateAtWithPersistent(root common.Hash) (*state.StateDB, error) {
   748  	exist := bc.stateCache.TrieDB().DoesExistNodeInPersistent(root.ExtendZero())
   749  	if !exist {
   750  		return nil, ErrNotExistNode
   751  	}
   752  	return state.New(root, bc.stateCache, bc.snaps, nil)
   753  }
   754  
   755  // StateAtWithGCLock returns a new mutable state based on a particular point in time with read lock of the state nodes.
   756  func (bc *BlockChain) StateAtWithGCLock(root common.Hash) (*state.StateDB, error) {
   757  	bc.RLockGCCachedNode()
   758  
   759  	exist := bc.stateCache.TrieDB().DoesExistCachedNode(root.ExtendZero())
   760  	if !exist {
   761  		bc.RUnlockGCCachedNode()
   762  		return nil, ErrNotExistNode
   763  	}
   764  
   765  	stateDB, err := state.New(root, bc.stateCache, bc.snaps, nil)
   766  	if err != nil {
   767  		bc.RUnlockGCCachedNode()
   768  		return nil, err
   769  	}
   770  
   771  	return stateDB, nil
   772  }
   773  
   774  // StateCache returns the caching database underpinning the blockchain instance.
   775  func (bc *BlockChain) StateCache() state.Database {
   776  	return bc.stateCache
   777  }
   778  
   779  // Reset purges the entire blockchain, restoring it to its genesis state.
   780  func (bc *BlockChain) Reset() error {
   781  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   782  }
   783  
   784  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   785  // specified genesis state.
   786  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   787  	// Dump the entire block chain and purge the caches
   788  	if err := bc.SetHead(0); err != nil {
   789  		return err
   790  	}
   791  	bc.mu.Lock()
   792  	defer bc.mu.Unlock()
   793  
   794  	// Prepare the genesis block and reinitialise the chain
   795  	bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.BlockScore())
   796  	bc.db.WriteBlock(genesis)
   797  
   798  	bc.genesisBlock = genesis
   799  	bc.insert(bc.genesisBlock)
   800  	bc.currentBlock.Store(bc.genesisBlock)
   801  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   802  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   803  	bc.currentFastBlock.Store(bc.genesisBlock)
   804  
   805  	return nil
   806  }
   807  
   808  // repair tries to repair the current blockchain by rolling back the current block
   809  // until one with associated state is found. This is needed to fix incomplete db
   810  // writes caused either by crashes/power outages, or simply non-committed tries.
   811  //
   812  // This method only rolls back the current block. The current header and current
   813  // fast block are left intact.
   814  // Deprecated: in order to repair chain, please use SetHead or setHeadBeyondRoot methods
   815  func (bc *BlockChain) repair(head **types.Block) error {
   816  	for {
   817  		// Abort if we've rewound to a head block that does have associated state
   818  		if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps, nil); err == nil {
   819  			logger.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   820  			return nil
   821  		} else {
   822  			// Should abort and return error, otherwise it will fall into infinite loop
   823  			if (*head).NumberU64() == 0 {
   824  				return errors.New("rewound to block number 0, but repair failed")
   825  			} else {
   826  				// If headBlockNumber > 0, rewind one block and recheck state availability there
   827  				block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   828  				if block == nil {
   829  					return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   830  				}
   831  				*head = block
   832  			}
   833  		}
   834  	}
   835  }
   836  
   837  // Export writes the active chain to the given writer.
   838  func (bc *BlockChain) Export(w io.Writer) error {
   839  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   840  }
   841  
   842  // ExportN writes a subset of the active chain to the given writer.
   843  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   844  	bc.mu.RLock()
   845  	defer bc.mu.RUnlock()
   846  
   847  	if first > last {
   848  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   849  	}
   850  	logger.Info("Exporting batch of blocks", "count", last-first+1)
   851  
   852  	start, reported := time.Now(), time.Now()
   853  	for nr := first; nr <= last; nr++ {
   854  		block := bc.GetBlockByNumber(nr)
   855  		if block == nil {
   856  			return fmt.Errorf("export failed on #%d: not found", nr)
   857  		}
   858  		if err := block.EncodeRLP(w); err != nil {
   859  			return err
   860  		}
   861  		if time.Since(reported) >= log.StatsReportLimit {
   862  			logger.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   863  			reported = time.Now()
   864  		}
   865  	}
   866  
   867  	return nil
   868  }
   869  
   870  // insert injects a new head block into the current block chain. This method
   871  // assumes that the block is indeed a true head. It will also reset the head
   872  // header and the head fast sync block to this very same block if they are older
   873  // or if they are on a different side chain.
   874  //
   875  // Note, this function assumes that the `mu` mutex is held!
   876  func (bc *BlockChain) insert(block *types.Block) {
   877  	// If the block is on a side chain or an unknown one, force other heads onto it too
   878  	updateHeads := bc.db.ReadCanonicalHash(block.NumberU64()) != block.Hash()
   879  
   880  	// Add the block to the canonical chain number scheme and mark as the head
   881  	bc.db.WriteCanonicalHash(block.Hash(), block.NumberU64())
   882  	bc.db.WriteHeadBlockHash(block.Hash())
   883  
   884  	bc.currentBlock.Store(block)
   885  
   886  	// If the block is better than our head or is on a different chain, force update heads
   887  	if updateHeads {
   888  		bc.hc.SetCurrentHeader(block.Header())
   889  		bc.db.WriteHeadFastBlockHash(block.Hash())
   890  
   891  		bc.currentFastBlock.Store(block)
   892  	}
   893  }
   894  
   895  // Genesis retrieves the chain's genesis block.
   896  func (bc *BlockChain) Genesis() *types.Block {
   897  	return bc.genesisBlock
   898  }
   899  
   900  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   901  // caching it if found.
   902  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   903  	return bc.db.ReadBodyRLPByHash(hash)
   904  }
   905  
   906  // HasBlock checks if a block is fully present in the database or not.
   907  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   908  	return bc.db.HasBlock(hash, number)
   909  }
   910  
   911  // HasState checks if state trie is fully present in the database or not.
   912  func (bc *BlockChain) HasState(hash common.Hash) bool {
   913  	_, err := bc.stateCache.OpenTrie(hash, nil)
   914  	return err == nil
   915  }
   916  
   917  // HasBlockAndState checks if a block and associated state trie is fully present
   918  // in the database or not, caching it if present.
   919  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   920  	// Check first that the block itself is known
   921  	block := bc.GetBlock(hash, number)
   922  	if block == nil {
   923  		return false
   924  	}
   925  	return bc.HasState(block.Root())
   926  }
   927  
   928  // ShouldTryInserting returns the state whether the block should be inserted.
   929  // If a node doesn't have the given block or the block number of given block is higher than the node's head block, it can try inserting the block.
   930  func (bc *BlockChain) ShouldTryInserting(block *types.Block) bool {
   931  	return !bc.HasBlockAndState(block.Hash(), block.NumberU64()) || bc.CurrentBlock().NumberU64() < block.NumberU64()
   932  }
   933  
   934  // GetBlock retrieves a block from the database by hash and number,
   935  // caching it if found.
   936  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   937  	return bc.db.ReadBlock(hash, number)
   938  }
   939  
   940  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   941  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   942  	return bc.db.ReadBlockByHash(hash)
   943  }
   944  
   945  // GetBlockNumber retrieves a blockNumber from the database by hash, caching it if found.
   946  func (bc *BlockChain) GetBlockNumber(hash common.Hash) *uint64 {
   947  	return bc.hc.GetBlockNumber(hash)
   948  }
   949  
   950  // GetBlockByNumber retrieves a block from the database by number, caching it
   951  // (associated with its hash) if found.
   952  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   953  	return bc.db.ReadBlockByNumber(number)
   954  }
   955  
   956  // GetTxAndLookupInfo retrieves a tx and lookup info for a given transaction hash.
   957  func (bc *BlockChain) GetTxAndLookupInfo(txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
   958  	tx, blockHash, blockNumber, index := bc.GetTxAndLookupInfoInCache(txHash)
   959  	if tx == nil {
   960  		tx, blockHash, blockNumber, index = bc.db.ReadTxAndLookupInfo(txHash)
   961  	}
   962  	return tx, blockHash, blockNumber, index
   963  }
   964  
   965  // GetTxLookupInfoAndReceipt retrieves a tx and lookup info and receipt for a given transaction hash.
   966  func (bc *BlockChain) GetTxLookupInfoAndReceipt(txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, *types.Receipt) {
   967  	tx, blockHash, blockNumber, index := bc.GetTxAndLookupInfo(txHash)
   968  	if tx == nil {
   969  		return nil, common.Hash{}, 0, 0, nil
   970  	}
   971  
   972  	receipt := bc.GetReceiptByTxHash(txHash)
   973  	if receipt == nil {
   974  		return nil, common.Hash{}, 0, 0, nil
   975  	}
   976  
   977  	return tx, blockHash, blockNumber, index, receipt
   978  }
   979  
   980  // GetTxLookupInfoAndReceiptInCache retrieves a tx and lookup info and receipt for a given transaction hash in cache.
   981  func (bc *BlockChain) GetTxLookupInfoAndReceiptInCache(txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, *types.Receipt) {
   982  	tx, blockHash, blockNumber, index := bc.GetTxAndLookupInfoInCache(txHash)
   983  	if tx == nil {
   984  		return nil, common.Hash{}, 0, 0, nil
   985  	}
   986  
   987  	receipt := bc.GetTxReceiptInCache(txHash)
   988  	if receipt == nil {
   989  		return nil, common.Hash{}, 0, 0, nil
   990  	}
   991  
   992  	return tx, blockHash, blockNumber, index, receipt
   993  }
   994  
   995  // GetReceiptsByBlockHash retrieves the receipts for all transactions with given block hash.
   996  func (bc *BlockChain) GetReceiptsByBlockHash(blockHash common.Hash) types.Receipts {
   997  	return bc.db.ReadReceiptsByBlockHash(blockHash)
   998  }
   999  
  1000  // GetReceiptByTxHash retrieves a receipt for a given transaction hash.
  1001  func (bc *BlockChain) GetReceiptByTxHash(txHash common.Hash) *types.Receipt {
  1002  	receipt := bc.GetTxReceiptInCache(txHash)
  1003  	if receipt != nil {
  1004  		return receipt
  1005  	}
  1006  
  1007  	tx, blockHash, _, index := bc.GetTxAndLookupInfo(txHash)
  1008  	if tx == nil {
  1009  		return nil
  1010  	}
  1011  
  1012  	receipts := bc.GetReceiptsByBlockHash(blockHash)
  1013  	if len(receipts) <= int(index) {
  1014  		logger.Error("receipt index exceeds the size of receipts", "receiptIndex", index, "receiptsSize", len(receipts))
  1015  		return nil
  1016  	}
  1017  	return receipts[index]
  1018  }
  1019  
  1020  // GetLogsByHash retrieves the logs for all receipts in a given block.
  1021  func (bc *BlockChain) GetLogsByHash(hash common.Hash) [][]*types.Log {
  1022  	receipts := bc.GetReceiptsByBlockHash(hash)
  1023  	if receipts == nil {
  1024  		return nil
  1025  	}
  1026  
  1027  	logs := make([][]*types.Log, len(receipts))
  1028  	for i, receipt := range receipts {
  1029  		logs[i] = receipt.Logs
  1030  	}
  1031  	return logs
  1032  }
  1033  
  1034  // TrieNode retrieves a blob of data associated with a trie node
  1035  // either from ephemeral in-memory cache, or from persistent storage.
  1036  // Cannot retrieve nodes keyed with ExtHash
  1037  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
  1038  	return bc.stateCache.TrieDB().Node(hash.ExtendZero())
  1039  }
  1040  
  1041  // ContractCode retrieves a blob of data associated with a contract hash
  1042  // either from ephemeral in-memory cache, or from persistent storage.
  1043  func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
  1044  	return bc.stateCache.ContractCode(hash)
  1045  }
  1046  
  1047  // ContractCodeWithPrefix retrieves a blob of data associated with a contract
  1048  // hash either from ephemeral in-memory cache, or from persistent storage.
  1049  //
  1050  // If the code doesn't exist in the in-memory cache, check the storage with
  1051  // new code scheme.
  1052  func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
  1053  	type codeReader interface {
  1054  		ContractCodeWithPrefix(codeHash common.Hash) ([]byte, error)
  1055  	}
  1056  	return bc.stateCache.(codeReader).ContractCodeWithPrefix(hash)
  1057  }
  1058  
  1059  // Stop stops the blockchain service. If any imports are currently in progress
  1060  // it will abort them using the procInterrupt.
  1061  func (bc *BlockChain) Stop() {
  1062  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
  1063  		return
  1064  	}
  1065  	// Unsubscribe all subscriptions registered from blockchain
  1066  	bc.scope.Close()
  1067  	if bc.cacheConfig.TrieNodeCacheConfig.RedisSubscribeBlockEnable {
  1068  		bc.CloseBlockSubscriptionLoop()
  1069  	}
  1070  
  1071  	close(bc.prefetchTxCh)
  1072  	close(bc.quit)
  1073  	atomic.StoreInt32(&bc.procInterrupt, 1)
  1074  
  1075  	bc.wg.Wait()
  1076  
  1077  	// Ensure that the entirety of the state snapshot is journalled to disk.
  1078  	var snapBase common.Hash
  1079  	if bc.snaps != nil {
  1080  		var err error
  1081  		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
  1082  			logger.Error("Failed to journal state snapshot", "err", err)
  1083  		}
  1084  	}
  1085  
  1086  	triedb := bc.stateCache.TrieDB()
  1087  	if !bc.isArchiveMode() {
  1088  		number := bc.CurrentBlock().NumberU64()
  1089  		recent := bc.GetBlockByNumber(number)
  1090  		if recent == nil {
  1091  			logger.Error("Failed to find recent block from persistent", "blockNumber", number)
  1092  			return
  1093  		}
  1094  
  1095  		logger.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  1096  		if err := triedb.Commit(recent.Root(), true, number); err != nil {
  1097  			logger.Error("Failed to commit recent state trie", "err", err)
  1098  		}
  1099  		if snapBase != (common.Hash{}) {
  1100  			logger.Info("Writing snapshot state to disk", "root", snapBase)
  1101  			if err := triedb.Commit(snapBase, true, number); err != nil {
  1102  				logger.Error("Failed to commit recent state trie", "err", err)
  1103  			}
  1104  		}
  1105  		for !bc.triegc.Empty() {
  1106  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
  1107  		}
  1108  		if size, _, _ := triedb.Size(); size != 0 {
  1109  			logger.Error("Dangling trie nodes after full cleanup")
  1110  		}
  1111  	}
  1112  	if triedb.TrieNodeCache() != nil {
  1113  		_ = triedb.TrieNodeCache().Close()
  1114  	}
  1115  
  1116  	if bc.vmConfig.EnableOpDebug {
  1117  		vm.PrintOpCodeExecTime()
  1118  	}
  1119  
  1120  	logger.Info("Blockchain manager stopped")
  1121  }
  1122  
  1123  func (bc *BlockChain) procFutureBlocks() {
  1124  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  1125  	for _, hash := range bc.futureBlocks.Keys() {
  1126  		hashKey, ok := hash.(common.CacheKey)
  1127  		if !ok {
  1128  			logger.Error("invalid key type", "expect", "common.CacheKey", "actual", reflect.TypeOf(hash))
  1129  			continue
  1130  		}
  1131  
  1132  		if block, exist := bc.futureBlocks.Peek(hashKey); exist {
  1133  			cacheGetFutureBlockHitMeter.Mark(1)
  1134  			blocks = append(blocks, block.(*types.Block))
  1135  		} else {
  1136  			cacheGetFutureBlockMissMeter.Mark(1)
  1137  		}
  1138  	}
  1139  	if len(blocks) > 0 {
  1140  		types.BlockBy(types.Number).Sort(blocks)
  1141  
  1142  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
  1143  		for i := range blocks {
  1144  			bc.InsertChain(blocks[i : i+1])
  1145  		}
  1146  	}
  1147  }
  1148  
  1149  // WriteStatus status of write
  1150  type WriteStatus byte
  1151  
  1152  // TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
  1153  //
  1154  //	Later we may be able to remove SideStatTy.
  1155  const (
  1156  	NonStatTy WriteStatus = iota
  1157  	CanonStatTy
  1158  	SideStatTy
  1159  )
  1160  
  1161  // WriteResult includes the block write status and related statistics.
  1162  type WriteResult struct {
  1163  	Status         WriteStatus
  1164  	TotalWriteTime time.Duration
  1165  	TrieWriteTime  time.Duration
  1166  }
  1167  
  1168  // Rollback is designed to remove a chain of links from the database that aren't
  1169  // certain enough to be valid.
  1170  func (bc *BlockChain) Rollback(chain []common.Hash) {
  1171  	bc.mu.Lock()
  1172  	defer bc.mu.Unlock()
  1173  
  1174  	for i := len(chain) - 1; i >= 0; i-- {
  1175  		hash := chain[i]
  1176  
  1177  		currentHeader := bc.CurrentHeader()
  1178  		if currentHeader.Hash() == hash {
  1179  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
  1180  		}
  1181  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
  1182  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
  1183  			bc.currentFastBlock.Store(newFastBlock)
  1184  			bc.db.WriteHeadFastBlockHash(newFastBlock.Hash())
  1185  		}
  1186  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
  1187  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
  1188  			bc.currentBlock.Store(newBlock)
  1189  			bc.db.WriteHeadBlockHash(newBlock.Hash())
  1190  		}
  1191  	}
  1192  }
  1193  
  1194  // SetReceiptsData computes all the non-consensus fields of the receipts
  1195  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
  1196  	signer := types.MakeSigner(config, block.Number())
  1197  
  1198  	transactions, logIndex := block.Transactions(), uint(0)
  1199  	if len(transactions) != len(receipts) {
  1200  		return errors.New("transaction and receipt count mismatch")
  1201  	}
  1202  
  1203  	for j := 0; j < len(receipts); j++ {
  1204  		// The transaction hash can be retrieved from the transaction itself
  1205  		receipts[j].TxHash = transactions[j].Hash()
  1206  
  1207  		// The contract address can be derived from the transaction itself
  1208  		if transactions[j].To() == nil {
  1209  			// Deriving the signer is expensive, only do if it's actually needed
  1210  			from, _ := types.Sender(signer, transactions[j])
  1211  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
  1212  		}
  1213  		// The derived log fields can simply be set from the block and transaction
  1214  		for k := 0; k < len(receipts[j].Logs); k++ {
  1215  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
  1216  			receipts[j].Logs[k].BlockHash = block.Hash()
  1217  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
  1218  			receipts[j].Logs[k].TxIndex = uint(j)
  1219  			receipts[j].Logs[k].Index = logIndex
  1220  			logIndex++
  1221  		}
  1222  	}
  1223  	return nil
  1224  }
  1225  
  1226  // InsertReceiptChain attempts to complete an already existing header chain with
  1227  // transaction and receipt data.
  1228  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  1229  	bc.wg.Add(1)
  1230  	defer bc.wg.Done()
  1231  
  1232  	// Do a sanity check that the provided chain is actually ordered and linked
  1233  	for i := 1; i < len(blockChain); i++ {
  1234  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  1235  			logger.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  1236  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  1237  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  1238  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  1239  		}
  1240  	}
  1241  
  1242  	var (
  1243  		stats = struct{ processed, ignored int32 }{}
  1244  		start = time.Now()
  1245  		bytes = 0
  1246  
  1247  		// TODO-Klaytn Needs to roll back if any one of batches fails
  1248  		bodyBatch            = bc.db.NewBatch(database.BodyDB)
  1249  		receiptsBatch        = bc.db.NewBatch(database.ReceiptsDB)
  1250  		txLookupEntriesBatch = bc.db.NewBatch(database.TxLookUpEntryDB)
  1251  	)
  1252  
  1253  	defer bodyBatch.Release()
  1254  	defer receiptsBatch.Release()
  1255  	defer txLookupEntriesBatch.Release()
  1256  
  1257  	for i, block := range blockChain {
  1258  		receipts := receiptChain[i]
  1259  		// Short circuit insertion if shutting down or processing failed
  1260  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1261  			return 0, nil
  1262  		}
  1263  		// Short circuit if the owner header is unknown
  1264  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  1265  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  1266  		}
  1267  		// Skip if the entire data is already known
  1268  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
  1269  			stats.ignored++
  1270  			continue
  1271  		}
  1272  		// Compute all the non-consensus fields of the receipts
  1273  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
  1274  			return i, fmt.Errorf("failed to set receipts data: %v", err)
  1275  		}
  1276  		// Write all the data out into the database
  1277  		bc.db.PutBodyToBatch(bodyBatch, block.Hash(), block.NumberU64(), block.Body())
  1278  		bc.db.PutReceiptsToBatch(receiptsBatch, block.Hash(), block.NumberU64(), receipts)
  1279  		bc.db.PutTxLookupEntriesToBatch(txLookupEntriesBatch, block)
  1280  
  1281  		stats.processed++
  1282  
  1283  		totalBytes, err := database.WriteBatchesOverThreshold(bodyBatch, receiptsBatch, txLookupEntriesBatch)
  1284  		if err != nil {
  1285  			return 0, err
  1286  		} else {
  1287  			bytes += totalBytes
  1288  		}
  1289  	}
  1290  
  1291  	totalBytes, err := database.WriteBatches(bodyBatch, receiptsBatch, txLookupEntriesBatch)
  1292  	if err != nil {
  1293  		return 0, err
  1294  	} else {
  1295  		bytes += totalBytes
  1296  	}
  1297  
  1298  	// Update the head fast sync block if better
  1299  	bc.mu.Lock()
  1300  	head := blockChain[len(blockChain)-1]
  1301  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
  1302  		currentFastBlock := bc.CurrentFastBlock()
  1303  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  1304  			bc.db.WriteHeadFastBlockHash(head.Hash())
  1305  			bc.currentFastBlock.Store(head)
  1306  		}
  1307  	}
  1308  	bc.mu.Unlock()
  1309  
  1310  	logger.Info("Imported new block receipts",
  1311  		"count", stats.processed,
  1312  		"elapsed", common.PrettyDuration(time.Since(start)),
  1313  		"number", head.Number(),
  1314  		"hash", head.Hash(),
  1315  		"size", common.StorageSize(bytes),
  1316  		"ignored", stats.ignored)
  1317  	return 0, nil
  1318  }
  1319  
  1320  // WriteBlockWithoutState writes only the block and its metadata to the database,
  1321  // but does not write any state. This is used to construct competing side forks
  1322  // up to the point where they exceed the canonical total blockscore.
  1323  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) {
  1324  	bc.wg.Add(1)
  1325  	defer bc.wg.Done()
  1326  
  1327  	bc.hc.WriteTd(block.Hash(), block.NumberU64(), td)
  1328  	bc.writeBlock(block)
  1329  }
  1330  
  1331  type TransactionLookup struct {
  1332  	Tx *types.Transaction
  1333  	*database.TxLookupEntry
  1334  }
  1335  
  1336  // writeBlock writes block to persistent database.
  1337  // If write through caching is enabled, it also writes block to the cache.
  1338  func (bc *BlockChain) writeBlock(block *types.Block) {
  1339  	bc.db.WriteBlock(block)
  1340  }
  1341  
  1342  // writeReceipts writes receipts to persistent database.
  1343  // If write through caching is enabled, it also writes blockReceipts to the cache.
  1344  func (bc *BlockChain) writeReceipts(hash common.Hash, number uint64, receipts types.Receipts) {
  1345  	bc.db.WriteReceipts(hash, number, receipts)
  1346  }
  1347  
  1348  // writeStateTrie writes state trie to database if possible.
  1349  // If an archiving node is running, it always flushes state trie to DB.
  1350  // If not, it flushes state trie to DB periodically. (period = bc.cacheConfig.BlockInterval)
  1351  func (bc *BlockChain) writeStateTrie(block *types.Block, state *state.StateDB) error {
  1352  	state.LockGCCachedNode()
  1353  	defer state.UnlockGCCachedNode()
  1354  
  1355  	root, err := state.Commit(true)
  1356  	if err != nil {
  1357  		return err
  1358  	}
  1359  	trieDB := bc.stateCache.TrieDB()
  1360  	trieDB.UpdateMetricNodes()
  1361  
  1362  	// If we're running an archive node, always flush
  1363  	if bc.isArchiveMode() {
  1364  		if err := trieDB.Commit(root, false, block.NumberU64()); err != nil {
  1365  			return err
  1366  		}
  1367  
  1368  		bc.checkStartStateMigration(block.NumberU64(), root)
  1369  		bc.lastCommittedBlock = block.NumberU64()
  1370  
  1371  		if bc.IsLivePruningRequired() {
  1372  			bc.chPrune <- block.NumberU64()
  1373  		}
  1374  	} else {
  1375  		// Full but not archive node, do proper garbage collection
  1376  		trieDB.ReferenceRoot(root) // metadata reference to keep trie alive
  1377  
  1378  		// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1379  		var (
  1380  			nodesSize, _, preimagesSize = trieDB.Size()
  1381  			nodesSizeLimit              = common.StorageSize(bc.cacheConfig.CacheSize) * 1024 * 1024
  1382  		)
  1383  
  1384  		trieDBNodesSizeBytesGauge.Update(int64(nodesSize))
  1385  		trieDBPreimagesSizeGauge.Update(int64(preimagesSize))
  1386  
  1387  		if nodesSize > nodesSizeLimit || preimagesSize > 4*1024*1024 {
  1388  			// NOTE-Klaytn Not to change the original behavior, error is not returned.
  1389  			// Error should be returned if it is thought to be safe in the future.
  1390  			if err := trieDB.Cap(nodesSizeLimit - database.IdealBatchSize); err != nil {
  1391  				logger.Error("Error from trieDB.Cap", "err", err, "limit", nodesSizeLimit-database.IdealBatchSize)
  1392  			}
  1393  		}
  1394  
  1395  		if isCommitTrieRequired(bc, block.NumberU64()) {
  1396  			if err := trieDB.Commit(block.Header().Root, true, block.NumberU64()); err != nil {
  1397  				return err
  1398  			}
  1399  			logger.Trace("Committed the state trie into the disk", "blocknum", block.NumberU64())
  1400  
  1401  			if bc.checkStartStateMigration(block.NumberU64(), root) {
  1402  				// flush referenced trie nodes out to new stateTrieDB
  1403  				if err := trieDB.Cap(0); err != nil {
  1404  					logger.Error("Error from trieDB.Cap by state migration", "err", err)
  1405  				}
  1406  			}
  1407  			bc.lastCommittedBlock = block.NumberU64()
  1408  
  1409  			if bc.IsLivePruningRequired() {
  1410  				bc.chPrune <- block.NumberU64()
  1411  			}
  1412  		}
  1413  
  1414  		bc.chBlock <- gcBlock{root, block.NumberU64()}
  1415  	}
  1416  	return nil
  1417  }
  1418  
  1419  // RLockGCCachedNode locks the GC lock of CachedNode.
  1420  func (bc *BlockChain) RLockGCCachedNode() {
  1421  	bc.stateCache.RLockGCCachedNode()
  1422  }
  1423  
  1424  // RUnlockGCCachedNode unlocks the GC lock of CachedNode.
  1425  func (bc *BlockChain) RUnlockGCCachedNode() {
  1426  	bc.stateCache.RUnlockGCCachedNode()
  1427  }
  1428  
  1429  // DefaultTriesInMemory returns the number of tries residing in the memory.
  1430  func (bc *BlockChain) triesInMemory() uint64 {
  1431  	return bc.cacheConfig.TriesInMemory
  1432  }
  1433  
  1434  // gcCachedNodeLoop runs a loop to gc.
  1435  func (bc *BlockChain) gcCachedNodeLoop() {
  1436  	trieDB := bc.stateCache.TrieDB()
  1437  
  1438  	bc.wg.Add(1)
  1439  	go func() {
  1440  		defer bc.wg.Done()
  1441  		for {
  1442  			select {
  1443  			case block := <-bc.chBlock:
  1444  				bc.triegc.Push(block.root, -int64(block.blockNum))
  1445  				logger.Trace("Push GC block", "blkNum", block.blockNum, "hash", block.root.String())
  1446  
  1447  				blkNum := block.blockNum
  1448  				if blkNum <= bc.triesInMemory() {
  1449  					continue
  1450  				}
  1451  
  1452  				// Garbage collect anything below our required write retention
  1453  				chosen := blkNum - bc.triesInMemory()
  1454  				cnt := 0
  1455  				for !bc.triegc.Empty() {
  1456  					root, number := bc.triegc.Pop()
  1457  					if uint64(-number) > chosen {
  1458  						bc.triegc.Push(root, number)
  1459  						break
  1460  					}
  1461  					trieDB.Dereference(root.(common.Hash))
  1462  					cnt++
  1463  				}
  1464  				logger.Debug("GC cached node", "currentBlk", blkNum, "chosenBlk", chosen, "deferenceCnt", cnt)
  1465  			case <-bc.quit:
  1466  				return
  1467  			}
  1468  		}
  1469  	}()
  1470  }
  1471  
  1472  func (bc *BlockChain) pruneTrieNodeLoop() {
  1473  	// ReadPruningMarks(1, limit) is very slow because it iterates over the most of MiscDB.
  1474  	// ReadPruningMarks(start, limit) is much faster because it only iterates a small range.
  1475  	startNum := uint64(1)
  1476  
  1477  	bc.wg.Add(1)
  1478  	go func() {
  1479  		defer bc.wg.Done()
  1480  		for {
  1481  			select {
  1482  			case num := <-bc.chPrune:
  1483  				if num <= bc.cacheConfig.LivePruningRetention {
  1484  					continue
  1485  				}
  1486  				limit := num - bc.cacheConfig.LivePruningRetention // Prune [1, latest - retention]
  1487  
  1488  				startTime := time.Now()
  1489  				marks := bc.db.ReadPruningMarks(startNum, limit+1)
  1490  				bc.db.PruneTrieNodes(marks)
  1491  				bc.db.DeletePruningMarks(marks)
  1492  				bc.db.WriteLastPrunedBlockNumber(limit)
  1493  
  1494  				logger.Info("Pruned trie nodes", "number", num, "start", startNum, "limit", limit,
  1495  					"count", len(marks), "elapsed", time.Since(startTime))
  1496  
  1497  				startNum = limit + 1
  1498  			case <-bc.quit:
  1499  				return
  1500  			}
  1501  		}
  1502  	}()
  1503  }
  1504  
  1505  func (bc *BlockChain) IsLivePruningRequired() bool {
  1506  	return bc.db.ReadPruningEnabled() && bc.cacheConfig.LivePruningRetention != 0
  1507  }
  1508  
  1509  func isCommitTrieRequired(bc *BlockChain, blockNum uint64) bool {
  1510  	if bc.prepareStateMigration {
  1511  		return true
  1512  	}
  1513  
  1514  	// TODO-Klaytn-Issue1602 Introduce a simple and more concise way to determine commit trie requirements from governance
  1515  	if blockNum%uint64(bc.cacheConfig.BlockInterval) == 0 {
  1516  		return true
  1517  	}
  1518  
  1519  	if bc.chainConfig.Istanbul != nil {
  1520  		return bc.ProposerPolicy() == params.WeightedRandom &&
  1521  			params.IsStakingUpdateInterval(blockNum)
  1522  	}
  1523  	return false
  1524  }
  1525  
  1526  // isReorganizationRequired returns if reorganization is required or not based on total blockscore.
  1527  func isReorganizationRequired(localTd, externTd *big.Int, currentBlock, block *types.Block) bool {
  1528  	reorg := externTd.Cmp(localTd) > 0
  1529  	if !reorg && externTd.Cmp(localTd) == 0 {
  1530  		// Split same-blockscore blocks by number, then at random
  1531  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
  1532  	}
  1533  	return reorg
  1534  }
  1535  
  1536  // WriteBlockWithState writes the block and all associated state to the database.
  1537  // If we are to use writeBlockWithState alone, we should use mutex to protect internal state.
  1538  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, stateDB *state.StateDB) (WriteResult, error) {
  1539  	bc.mu.Lock()
  1540  	defer bc.mu.Unlock()
  1541  
  1542  	return bc.writeBlockWithState(block, receipts, stateDB)
  1543  }
  1544  
  1545  // writeBlockWithState writes the block and all associated state to the database.
  1546  // If BlockChain.parallelDBWrite is true, it calls writeBlockWithStateParallel.
  1547  // If not, it calls writeBlockWithStateSerial.
  1548  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, stateDB *state.StateDB) (WriteResult, error) {
  1549  	var status WriteResult
  1550  	var err error
  1551  	if bc.parallelDBWrite {
  1552  		status, err = bc.writeBlockWithStateParallel(block, receipts, stateDB)
  1553  	} else {
  1554  		status, err = bc.writeBlockWithStateSerial(block, receipts, stateDB)
  1555  	}
  1556  
  1557  	if err != nil {
  1558  		return status, err
  1559  	}
  1560  
  1561  	// Publish the committed block to the redis cache of stateDB.
  1562  	// The cache uses the block to distinguish the latest state.
  1563  	if bc.cacheConfig.TrieNodeCacheConfig.RedisPublishBlockEnable {
  1564  		blockLogsKey := append(kesCachePrefixBlockLogs, block.Number().Bytes()...)
  1565  		bc.writeBlockLogsToRemoteCache(blockLogsKey, receipts)
  1566  
  1567  		blockRlp, err := rlp.EncodeToBytes(block)
  1568  		if err != nil {
  1569  			logger.Error("failed to encode lastCommittedBlock", "blockNumber", block.NumberU64(), "err", err)
  1570  		}
  1571  
  1572  		pubSub, ok := bc.stateCache.TrieDB().TrieNodeCache().(statedb.BlockPubSub)
  1573  		if ok {
  1574  			if err := pubSub.PublishBlock(hexutil.Encode(blockRlp)); err != nil {
  1575  				logger.Error("failed to publish block to redis", "blockNumber", block.NumberU64(), "err", err)
  1576  			}
  1577  		} else {
  1578  			logger.Error("invalid TrieNodeCache type", "trieNodeCacheConfig", bc.cacheConfig.TrieNodeCacheConfig)
  1579  		}
  1580  	}
  1581  
  1582  	return status, err
  1583  }
  1584  
  1585  // writeBlockLogsToRemoteCache writes block logs to remote cache.
  1586  // The stored logs will be used by KES service nodes to subscribe log events.
  1587  // This method is only for KES nodes.
  1588  func (bc *BlockChain) writeBlockLogsToRemoteCache(blockLogsKey []byte, receipts []*types.Receipt) {
  1589  	var entireBlockLogs []*types.LogForStorage
  1590  	for _, receipt := range receipts {
  1591  		for _, log := range receipt.Logs {
  1592  			// convert Log to LogForStorage to encode entire data
  1593  			entireBlockLogs = append(entireBlockLogs, (*types.LogForStorage)(log))
  1594  		}
  1595  	}
  1596  	encodedBlockLogs, err := rlp.EncodeToBytes(entireBlockLogs)
  1597  	if err != nil {
  1598  		logger.Error("rlp encoding error", "err", err)
  1599  		return
  1600  	}
  1601  	// TODO-Klaytn-KES: refine this not to use trieNodeCache
  1602  	cache, ok := bc.stateCache.TrieDB().TrieNodeCache().(*statedb.HybridCache)
  1603  	if !ok {
  1604  		logger.Error("only HybridCache supports block logs writing",
  1605  			"TrieNodeCacheType", reflect.TypeOf(bc.stateCache.TrieDB().TrieNodeCache()))
  1606  	} else {
  1607  		cache.Remote().Set(blockLogsKey, encodedBlockLogs)
  1608  	}
  1609  }
  1610  
  1611  // writeBlockWithStateSerial writes the block and all associated state to the database in serial manner.
  1612  func (bc *BlockChain) writeBlockWithStateSerial(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (WriteResult, error) {
  1613  	start := time.Now()
  1614  	bc.wg.Add(1)
  1615  	defer bc.wg.Done()
  1616  
  1617  	var status WriteStatus
  1618  	// Calculate the total blockscore of the block
  1619  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1620  	if ptd == nil {
  1621  		logger.Error("unknown ancestor (writeBlockWithStateSerial)", "num", block.NumberU64(),
  1622  			"hash", block.Hash(), "parentHash", block.ParentHash())
  1623  		return WriteResult{Status: NonStatTy}, consensus.ErrUnknownAncestor
  1624  	}
  1625  
  1626  	if !bc.ShouldTryInserting(block) {
  1627  		return WriteResult{Status: NonStatTy}, ErrKnownBlock
  1628  	}
  1629  
  1630  	currentBlock := bc.CurrentBlock()
  1631  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1632  	externTd := new(big.Int).Add(block.BlockScore(), ptd)
  1633  
  1634  	// Irrelevant of the canonical status, write the block itself to the database
  1635  	bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd)
  1636  
  1637  	// Write other block data.
  1638  	bc.writeBlock(block)
  1639  
  1640  	trieWriteStart := time.Now()
  1641  	if err := bc.writeStateTrie(block, state); err != nil {
  1642  		return WriteResult{Status: NonStatTy}, err
  1643  	}
  1644  	trieWriteTime := time.Since(trieWriteStart)
  1645  
  1646  	bc.writeReceipts(block.Hash(), block.NumberU64(), receipts)
  1647  
  1648  	// TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
  1649  	//         Later we may be able to refine below code.
  1650  
  1651  	// If the total blockscore is higher than our known, add it to the canonical chain
  1652  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1653  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1654  	currentBlock = bc.CurrentBlock()
  1655  	reorg := isReorganizationRequired(localTd, externTd, currentBlock, block)
  1656  	if reorg {
  1657  		// Reorganise the chain if the parent is not the head block
  1658  		if block.ParentHash() != currentBlock.Hash() {
  1659  			if err := bc.reorg(currentBlock, block); err != nil {
  1660  				return WriteResult{Status: NonStatTy}, err
  1661  			}
  1662  		}
  1663  		// Write the positional metadata for transaction/receipt lookups and preimages
  1664  		if err := bc.writeTxLookupEntries(block); err != nil {
  1665  			return WriteResult{Status: NonStatTy}, err
  1666  		}
  1667  		bc.db.WritePreimages(block.NumberU64(), state.Preimages())
  1668  		status = CanonStatTy
  1669  	} else {
  1670  		status = SideStatTy
  1671  	}
  1672  
  1673  	return bc.finalizeWriteBlockWithState(block, status, start, trieWriteTime)
  1674  }
  1675  
  1676  // writeBlockWithStateParallel writes the block and all associated state to the database using goroutines.
  1677  func (bc *BlockChain) writeBlockWithStateParallel(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (WriteResult, error) {
  1678  	start := time.Now()
  1679  	bc.wg.Add(1)
  1680  	defer bc.wg.Done()
  1681  
  1682  	var status WriteStatus
  1683  	// Calculate the total blockscore of the block
  1684  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1685  	if ptd == nil {
  1686  		logger.Error("unknown ancestor (writeBlockWithStateParallel)", "num", block.NumberU64(),
  1687  			"hash", block.Hash(), "parentHash", block.ParentHash())
  1688  		return WriteResult{Status: NonStatTy}, consensus.ErrUnknownAncestor
  1689  	}
  1690  
  1691  	if !bc.ShouldTryInserting(block) {
  1692  		return WriteResult{Status: NonStatTy}, ErrKnownBlock
  1693  	}
  1694  
  1695  	currentBlock := bc.CurrentBlock()
  1696  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1697  	externTd := new(big.Int).Add(block.BlockScore(), ptd)
  1698  
  1699  	parallelDBWriteWG := sync.WaitGroup{}
  1700  	parallelDBWriteErrCh := make(chan error, 2)
  1701  	// Irrelevant of the canonical status, write the block itself to the database
  1702  	// TODO-Klaytn-Storage Implementing worker pool pattern instead of generating goroutines every time.
  1703  	parallelDBWriteWG.Add(4)
  1704  	go func() {
  1705  		defer parallelDBWriteWG.Done()
  1706  		bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd)
  1707  	}()
  1708  
  1709  	// Write other block data.
  1710  	go func() {
  1711  		defer parallelDBWriteWG.Done()
  1712  		bc.writeBlock(block)
  1713  	}()
  1714  
  1715  	var trieWriteTime time.Duration
  1716  	trieWriteStart := time.Now()
  1717  	go func() {
  1718  		defer parallelDBWriteWG.Done()
  1719  		if err := bc.writeStateTrie(block, state); err != nil {
  1720  			parallelDBWriteErrCh <- err
  1721  		}
  1722  		trieWriteTime = time.Since(trieWriteStart)
  1723  	}()
  1724  
  1725  	go func() {
  1726  		defer parallelDBWriteWG.Done()
  1727  		bc.writeReceipts(block.Hash(), block.NumberU64(), receipts)
  1728  	}()
  1729  
  1730  	// Wait until all writing goroutines are terminated.
  1731  	parallelDBWriteWG.Wait()
  1732  	select {
  1733  	case err := <-parallelDBWriteErrCh:
  1734  		return WriteResult{Status: NonStatTy}, err
  1735  	default:
  1736  	}
  1737  
  1738  	// TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
  1739  	//         Later we may be able to refine below code.
  1740  
  1741  	// If the total blockscore is higher than our known, add it to the canonical chain
  1742  	// Second clause in the if statement reduces the vulnerability to selfish mining.
  1743  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1744  	currentBlock = bc.CurrentBlock()
  1745  	reorg := isReorganizationRequired(localTd, externTd, currentBlock, block)
  1746  	if reorg {
  1747  		// Reorganise the chain if the parent is not the head block
  1748  		if block.ParentHash() != currentBlock.Hash() {
  1749  			if err := bc.reorg(currentBlock, block); err != nil {
  1750  				return WriteResult{Status: NonStatTy}, err
  1751  			}
  1752  		}
  1753  
  1754  		parallelDBWriteWG.Add(2)
  1755  
  1756  		go func() {
  1757  			defer parallelDBWriteWG.Done()
  1758  			// Write the positional metadata for transaction/receipt lookups
  1759  			if err := bc.writeTxLookupEntries(block); err != nil {
  1760  				parallelDBWriteErrCh <- err
  1761  			}
  1762  		}()
  1763  
  1764  		go func() {
  1765  			defer parallelDBWriteWG.Done()
  1766  			bc.db.WritePreimages(block.NumberU64(), state.Preimages())
  1767  		}()
  1768  
  1769  		// Wait until all writing goroutines are terminated.
  1770  		parallelDBWriteWG.Wait()
  1771  
  1772  		status = CanonStatTy
  1773  	} else {
  1774  		status = SideStatTy
  1775  	}
  1776  
  1777  	select {
  1778  	case err := <-parallelDBWriteErrCh:
  1779  		return WriteResult{Status: NonStatTy}, err
  1780  	default:
  1781  	}
  1782  
  1783  	return bc.finalizeWriteBlockWithState(block, status, start, trieWriteTime)
  1784  }
  1785  
  1786  // finalizeWriteBlockWithState updates metrics and inserts block when status is CanonStatTy.
  1787  func (bc *BlockChain) finalizeWriteBlockWithState(block *types.Block, status WriteStatus, startTime time.Time, trieWriteTime time.Duration) (WriteResult, error) {
  1788  	// Set new head.
  1789  	if status == CanonStatTy {
  1790  		bc.insert(block)
  1791  		headBlockNumberGauge.Update(block.Number().Int64())
  1792  		blockTxCountsGauge.Update(int64(block.Transactions().Len()))
  1793  		blockTxCountsCounter.Inc(int64(block.Transactions().Len()))
  1794  	}
  1795  	bc.futureBlocks.Remove(block.Hash())
  1796  	return WriteResult{status, time.Since(startTime), trieWriteTime}, nil
  1797  }
  1798  
  1799  func (bc *BlockChain) writeTxLookupEntries(block *types.Block) error {
  1800  	return bc.db.WriteAndCacheTxLookupEntries(block)
  1801  }
  1802  
  1803  // GetTxAndLookupInfoInCache retrieves a tx and lookup info for a given transaction hash in cache.
  1804  func (bc *BlockChain) GetTxAndLookupInfoInCache(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
  1805  	return bc.db.ReadTxAndLookupInfoInCache(hash)
  1806  }
  1807  
  1808  // GetBlockReceiptsInCache returns receipt of txHash in cache.
  1809  func (bc *BlockChain) GetBlockReceiptsInCache(blockHash common.Hash) types.Receipts {
  1810  	return bc.db.ReadBlockReceiptsInCache(blockHash)
  1811  }
  1812  
  1813  // GetTxReceiptInCache returns receipt of txHash in cache.
  1814  func (bc *BlockChain) GetTxReceiptInCache(txHash common.Hash) *types.Receipt {
  1815  	return bc.db.ReadTxReceiptInCache(txHash)
  1816  }
  1817  
  1818  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1819  // chain or, otherwise, create a fork. If an error is returned it will return
  1820  // the index number of the failing block as well an error describing what went
  1821  // wrong.
  1822  //
  1823  // After insertion is done, all accumulated events will be fired.
  1824  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1825  	n, events, logs, err := bc.insertChain(chain)
  1826  	bc.PostChainEvents(events, logs)
  1827  	return n, err
  1828  }
  1829  
  1830  // insertChain will execute the actual chain insertion and event aggregation. The
  1831  // only reason this method exists as a separate one is to make locking cleaner
  1832  // with deferred statements.
  1833  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1834  	// Sanity check that we have something meaningful to import
  1835  	if len(chain) == 0 {
  1836  		return 0, nil, nil, nil
  1837  	}
  1838  	// Do a sanity check that the provided chain is actually ordered and linked
  1839  	for i := 1; i < len(chain); i++ {
  1840  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1841  			// Chain broke ancestry, log a messge (programming error) and skip insertion
  1842  			logger.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1843  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1844  
  1845  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1846  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1847  		}
  1848  	}
  1849  
  1850  	// Pre-checks passed, start the full block imports
  1851  	bc.wg.Add(1)
  1852  	defer bc.wg.Done()
  1853  
  1854  	bc.mu.Lock()
  1855  	defer bc.mu.Unlock()
  1856  
  1857  	// A queued approach to delivering events. This is generally
  1858  	// faster than direct delivery and requires much less mutex
  1859  	// acquiring.
  1860  	var (
  1861  		stats         = insertStats{startTime: mclock.Now()}
  1862  		events        = make([]interface{}, 0, len(chain))
  1863  		lastCanon     *types.Block
  1864  		coalescedLogs []*types.Log
  1865  	)
  1866  	// Start the parallel header verifier
  1867  	headers := make([]*types.Header, len(chain))
  1868  	seals := make([]bool, len(chain))
  1869  
  1870  	for i, block := range chain {
  1871  		headers[i] = block.Header()
  1872  		seals[i] = true
  1873  	}
  1874  
  1875  	var (
  1876  		abort   chan<- struct{}
  1877  		results <-chan error
  1878  	)
  1879  	if bc.engine.CanVerifyHeadersConcurrently() {
  1880  		abort, results = bc.engine.VerifyHeaders(bc, headers, seals)
  1881  	} else {
  1882  		abort, results = bc.engine.PreprocessHeaderVerification(headers)
  1883  	}
  1884  	defer close(abort)
  1885  
  1886  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1887  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1888  
  1889  	// Iterate over the blocks and insert when the verifier permits
  1890  	for i, block := range chain {
  1891  		// If the chain is terminating, stop processing blocks
  1892  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1893  			logger.Debug("Premature abort during blocks processing")
  1894  			break
  1895  		}
  1896  		// Create a new trie using the parent block and report an
  1897  		// error if it fails.
  1898  		var parent *types.Block
  1899  		if i == 0 {
  1900  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1901  		} else {
  1902  			parent = chain[i-1]
  1903  		}
  1904  
  1905  		// If we have a followup block, run that against the current state to pre-cache
  1906  		// transactions and probabilistically some of the account/storage trie nodes.
  1907  		var followupInterrupt uint32
  1908  
  1909  		if bc.cacheConfig.TrieNodeCacheConfig.NumFetcherPrefetchWorker > 0 && parent != nil {
  1910  			var snaps *snapshot.Tree
  1911  			if bc.cacheConfig.TrieNodeCacheConfig.UseSnapshotForPrefetch {
  1912  				snaps = bc.snaps
  1913  			}
  1914  
  1915  			// Tx prefetcher is enabled for all cases (both single and multiple block insertion).
  1916  			for ti := range block.Transactions() {
  1917  				select {
  1918  				case bc.prefetchTxCh <- prefetchTx{ti, block, &followupInterrupt}:
  1919  				default:
  1920  				}
  1921  			}
  1922  			if i < len(chain)-1 {
  1923  				// current block is not the last one, so prefetch the right next block
  1924  				followup := chain[i+1]
  1925  				go func(start time.Time) {
  1926  					defer func() {
  1927  						if err := recover(); err != nil {
  1928  							logger.Error("Got panic and recovered from prefetcher", "err", err)
  1929  						}
  1930  					}()
  1931  
  1932  					throwaway, err := state.New(parent.Root(), bc.stateCache, snaps,
  1933  						&statedb.TrieOpts{Prefetching: true})
  1934  					if throwaway == nil || err != nil {
  1935  						logger.Warn("failed to get StateDB for prefetcher", "err", err,
  1936  							"parentBlockNum", parent.NumberU64(), "currBlockNum", bc.CurrentBlock().NumberU64())
  1937  						return
  1938  					}
  1939  
  1940  					vmCfg := bc.vmConfig
  1941  					vmCfg.Prefetching = true
  1942  					bc.prefetcher.Prefetch(followup, throwaway, vmCfg, &followupInterrupt)
  1943  
  1944  					blockPrefetchExecuteTimer.Update(time.Since(start))
  1945  					if atomic.LoadUint32(&followupInterrupt) == 1 {
  1946  						blockPrefetchInterruptMeter.Mark(1)
  1947  					}
  1948  				}(time.Now())
  1949  			}
  1950  		}
  1951  		// If the header is a banned one, straight out abort
  1952  		if BadHashes[block.Hash()] {
  1953  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1954  			return i, events, coalescedLogs, ErrBlacklistedHash
  1955  		}
  1956  		// Wait for the block's verification to complete
  1957  		bstart := time.Now()
  1958  
  1959  		err := <-results
  1960  		if !bc.engine.CanVerifyHeadersConcurrently() && err == nil {
  1961  			err = bc.engine.VerifyHeader(bc, block.Header(), true)
  1962  		}
  1963  
  1964  		if err == nil {
  1965  			err = bc.validator.ValidateBody(block)
  1966  		}
  1967  
  1968  		switch {
  1969  		case err == ErrKnownBlock:
  1970  			// Block and state both already known. However if the current block is below
  1971  			// this number we did a rollback and we should reimport it nonetheless.
  1972  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1973  				stats.ignored++
  1974  				continue
  1975  			}
  1976  
  1977  		case err == consensus.ErrFutureBlock:
  1978  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1979  			// the chain is discarded and processed at a later time if given.
  1980  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1981  			if block.Time().Cmp(max) > 0 {
  1982  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1983  			}
  1984  			bc.futureBlocks.Add(block.Hash(), block)
  1985  			stats.queued++
  1986  			continue
  1987  
  1988  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1989  			bc.futureBlocks.Add(block.Hash(), block)
  1990  			stats.queued++
  1991  			continue
  1992  
  1993  		case err == consensus.ErrPrunedAncestor:
  1994  			// Block competing with the canonical chain, store in the db, but don't process
  1995  			// until the competitor TD goes above the canonical TD
  1996  			currentBlock := bc.CurrentBlock()
  1997  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1998  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.BlockScore())
  1999  			if localTd.Cmp(externTd) > 0 {
  2000  				bc.WriteBlockWithoutState(block, externTd)
  2001  				continue
  2002  			}
  2003  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  2004  			var winner []*types.Block
  2005  
  2006  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  2007  			for !bc.HasState(parent.Root()) {
  2008  				winner = append(winner, parent)
  2009  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  2010  			}
  2011  			for j := 0; j < len(winner)/2; j++ {
  2012  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  2013  			}
  2014  			// Import all the pruned blocks to make the state available
  2015  			bc.mu.Unlock()
  2016  			_, evs, logs, err := bc.insertChain(winner)
  2017  			bc.mu.Lock()
  2018  			events, coalescedLogs = evs, logs
  2019  
  2020  			if err != nil {
  2021  				return i, events, coalescedLogs, err
  2022  			}
  2023  
  2024  		case err != nil:
  2025  			bc.futureBlocks.Remove(block.Hash())
  2026  			bc.reportBlock(block, nil, err)
  2027  			return i, events, coalescedLogs, err
  2028  		}
  2029  
  2030  		stateDB, err := bc.PrunableStateAt(parent.Root(), parent.NumberU64())
  2031  		if err != nil {
  2032  			return i, events, coalescedLogs, err
  2033  		}
  2034  
  2035  		// Process block using the parent state as reference point.
  2036  		receipts, logs, usedGas, internalTxTraces, procStats, err := bc.processor.Process(block, stateDB, bc.vmConfig)
  2037  		if err != nil {
  2038  			bc.reportBlock(block, receipts, err)
  2039  			atomic.StoreUint32(&followupInterrupt, 1)
  2040  			return i, events, coalescedLogs, err
  2041  		}
  2042  
  2043  		// Validate the state using the default validator
  2044  		err = bc.validator.ValidateState(block, parent, stateDB, receipts, usedGas)
  2045  		if err != nil {
  2046  			bc.reportBlock(block, receipts, err)
  2047  			atomic.StoreUint32(&followupInterrupt, 1)
  2048  			return i, events, coalescedLogs, err
  2049  		}
  2050  		afterValidate := time.Now()
  2051  
  2052  		// Write the block to the chain and get the writeResult.
  2053  		writeResult, err := bc.writeBlockWithState(block, receipts, stateDB)
  2054  		if err != nil {
  2055  			atomic.StoreUint32(&followupInterrupt, 1)
  2056  			if err == ErrKnownBlock {
  2057  				logger.Debug("Tried to insert already known block", "num", block.NumberU64(), "hash", block.Hash().String())
  2058  				continue
  2059  			}
  2060  			return i, events, coalescedLogs, err
  2061  		}
  2062  		atomic.StoreUint32(&followupInterrupt, 1)
  2063  
  2064  		// Update to-address based spam throttler when spamThrottler is enabled and a single block is fetched.
  2065  		spamThrottler := GetSpamThrottler()
  2066  		if spamThrottler != nil && len(chain) == 1 {
  2067  			spamThrottler.updateThrottlerState(block.Transactions(), receipts)
  2068  		}
  2069  
  2070  		// Update the metrics subsystem with all the measurements
  2071  		accountReadTimer.Update(stateDB.AccountReads)
  2072  		accountHashTimer.Update(stateDB.AccountHashes)
  2073  		accountUpdateTimer.Update(stateDB.AccountUpdates)
  2074  		accountCommitTimer.Update(stateDB.AccountCommits)
  2075  
  2076  		storageReadTimer.Update(stateDB.StorageReads)
  2077  		storageHashTimer.Update(stateDB.StorageHashes)
  2078  		storageUpdateTimer.Update(stateDB.StorageUpdates)
  2079  		storageCommitTimer.Update(stateDB.StorageCommits)
  2080  
  2081  		snapshotAccountReadTimer.Update(stateDB.SnapshotAccountReads)
  2082  		snapshotStorageReadTimer.Update(stateDB.SnapshotStorageReads)
  2083  		snapshotCommitTimer.Update(stateDB.SnapshotCommits)
  2084  
  2085  		trieAccess := stateDB.AccountReads + stateDB.AccountHashes + stateDB.AccountUpdates + stateDB.AccountCommits
  2086  		trieAccess += stateDB.StorageReads + stateDB.StorageHashes + stateDB.StorageUpdates + stateDB.StorageCommits
  2087  
  2088  		blockAgeTimer.Update(time.Since(time.Unix(int64(block.Time().Uint64()), 0)))
  2089  
  2090  		switch writeResult.Status {
  2091  		case CanonStatTy:
  2092  			processTxsTime := common.PrettyDuration(procStats.AfterApplyTxs.Sub(procStats.BeforeApplyTxs))
  2093  			processFinalizeTime := common.PrettyDuration(procStats.AfterFinalize.Sub(procStats.AfterApplyTxs))
  2094  			validateTime := common.PrettyDuration(afterValidate.Sub(procStats.AfterFinalize))
  2095  			totalTime := common.PrettyDuration(time.Since(bstart))
  2096  			logger.Info("Inserted a new block", "number", block.Number(), "hash", block.Hash(),
  2097  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", totalTime,
  2098  				"processTxs", processTxsTime, "finalize", processFinalizeTime, "validateState", validateTime,
  2099  				"totalWrite", writeResult.TotalWriteTime, "trieWrite", writeResult.TrieWriteTime)
  2100  
  2101  			if block.Header().BaseFee != nil {
  2102  				blockBaseFee.Update(block.Header().BaseFee.Int64() / int64(params.Ston))
  2103  			}
  2104  			blockProcessTimer.Update(time.Duration(processTxsTime))
  2105  			blockExecutionTimer.Update(time.Duration(processTxsTime) - trieAccess)
  2106  			blockFinalizeTimer.Update(time.Duration(processFinalizeTime))
  2107  			blockValidateTimer.Update(time.Duration(validateTime))
  2108  			blockInsertTimer.Update(time.Duration(totalTime))
  2109  
  2110  			coalescedLogs = append(coalescedLogs, logs...)
  2111  			events = append(events, ChainEvent{
  2112  				Block:            block,
  2113  				Hash:             block.Hash(),
  2114  				Logs:             logs,
  2115  				Receipts:         receipts,
  2116  				InternalTxTraces: internalTxTraces,
  2117  			})
  2118  			lastCanon = block
  2119  
  2120  		case SideStatTy:
  2121  			logger.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.BlockScore(), "elapsed",
  2122  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed())
  2123  
  2124  			events = append(events, ChainSideEvent{block})
  2125  		}
  2126  		stats.processed++
  2127  		stats.usedGas += usedGas
  2128  
  2129  		cache, _, _ := bc.stateCache.TrieDB().Size()
  2130  		stats.report(chain, i, cache)
  2131  
  2132  		// update governance CurrentSet if it is at an epoch block
  2133  		if bc.engine.CreateSnapshot(bc, block.NumberU64(), block.Hash(), nil) != nil {
  2134  			return i, events, coalescedLogs, err
  2135  		}
  2136  
  2137  		// update governance parameters
  2138  		if istanbul, ok := bc.engine.(consensus.Istanbul); ok {
  2139  			if err = istanbul.UpdateParam(block.NumberU64()); err != nil {
  2140  				return i, events, coalescedLogs, err
  2141  			}
  2142  		}
  2143  	}
  2144  	// Append a single chain head event if we've progressed the chain
  2145  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  2146  		events = append(events, ChainHeadEvent{lastCanon})
  2147  	}
  2148  	return 0, events, coalescedLogs, nil
  2149  }
  2150  
  2151  // BlockSubscriptionLoop subscribes blocks from a redis server and processes them.
  2152  // This method is only for KES nodes.
  2153  func (bc *BlockChain) BlockSubscriptionLoop(pool *TxPool) {
  2154  	var ch <-chan *redis.Message
  2155  	logger.Info("subscribe blocks from redis cache")
  2156  
  2157  	pubSub, ok := bc.stateCache.TrieDB().TrieNodeCache().(statedb.BlockPubSub)
  2158  	if !ok || pubSub == nil {
  2159  		logger.Crit("invalid block pub/sub configure", "trieNodeCacheConfig",
  2160  			bc.stateCache.TrieDB().GetTrieNodeCacheConfig())
  2161  	}
  2162  
  2163  	ch = pubSub.SubscribeBlockCh()
  2164  	if ch == nil {
  2165  		logger.Crit("failed to create redis subscription channel")
  2166  	}
  2167  
  2168  	for msg := range ch {
  2169  		logger.Debug("msg from redis subscription channel", "msg", msg.Payload)
  2170  
  2171  		blockRlp, err := hexutil.Decode(msg.Payload)
  2172  		if err != nil {
  2173  			logger.Error("failed to decode redis subscription msg", "msg", msg.Payload)
  2174  			continue
  2175  		}
  2176  
  2177  		block := &types.Block{}
  2178  		if err := rlp.DecodeBytes(blockRlp, block); err != nil {
  2179  			logger.Error("failed to rlp decode block", "msg", msg.Payload, "block", string(blockRlp))
  2180  			continue
  2181  		}
  2182  
  2183  		oldHead := bc.CurrentHeader()
  2184  		bc.replaceCurrentBlock(block)
  2185  		pool.lockedReset(oldHead, bc.CurrentHeader())
  2186  
  2187  		// just in case the block number jumps up more than one, iterates all missed blocks
  2188  		for blockNum := oldHead.Number.Uint64() + 1; blockNum < block.Number().Uint64(); blockNum++ {
  2189  			retrievedBlock := bc.GetBlockByNumber(blockNum)
  2190  			bc.sendKESSubscriptionData(retrievedBlock)
  2191  		}
  2192  		bc.sendKESSubscriptionData(block)
  2193  	}
  2194  
  2195  	logger.Info("closed the block subscription loop")
  2196  }
  2197  
  2198  // sendKESSubscriptionData sends data to chainFeed and logsFeed.
  2199  // ChainEvent containing only Block and Hash is sent to chainFeed.
  2200  // []*types.Log containing entire logs of a block is set to logsFeed.
  2201  // The logs are expected to be delivered from remote cache.
  2202  // If it failed to read log data from remote cache, it will read the data from database.
  2203  // This method is only for KES nodes.
  2204  func (bc *BlockChain) sendKESSubscriptionData(block *types.Block) {
  2205  	bc.chainFeed.Send(ChainEvent{
  2206  		Block: block,
  2207  		Hash:  block.Hash(),
  2208  		// TODO-Klaytn-KES: fill the following data if needed
  2209  		Receipts:         types.Receipts{},
  2210  		Logs:             []*types.Log{},
  2211  		InternalTxTraces: []*vm.InternalTxTrace{},
  2212  	})
  2213  
  2214  	// TODO-Klaytn-KES: refine this not to use trieNodeCache
  2215  	logKey := append(kesCachePrefixBlockLogs, block.Number().Bytes()...)
  2216  	encodedLogs := bc.stateCache.TrieDB().TrieNodeCache().Get(logKey)
  2217  	if encodedLogs == nil {
  2218  		logger.Warn("cannot get a block log from the remote cache", "blockNum", block.NumberU64())
  2219  
  2220  		// read log data from database and send it
  2221  		logsList := bc.GetLogsByHash(block.Header().Hash())
  2222  		var logs []*types.Log
  2223  		for _, list := range logsList {
  2224  			logs = append(logs, list...)
  2225  		}
  2226  		bc.logsFeed.Send(logs)
  2227  		return
  2228  	}
  2229  
  2230  	entireLogs := []*types.LogForStorage{}
  2231  	if err := rlp.DecodeBytes(encodedLogs, &entireLogs); err != nil {
  2232  		logger.Warn("failed to decode a block log", "blockNum", block.NumberU64(), "err", err)
  2233  
  2234  		// read log data from database and send it
  2235  		logsList := bc.GetLogsByHash(block.Header().Hash())
  2236  		var logs []*types.Log
  2237  		for _, list := range logsList {
  2238  			logs = append(logs, list...)
  2239  		}
  2240  		bc.logsFeed.Send(logs)
  2241  		return
  2242  	}
  2243  
  2244  	// convert LogForStorage to Log
  2245  	logs := make([]*types.Log, len(entireLogs))
  2246  	for i, log := range entireLogs {
  2247  		logs[i] = (*types.Log)(log)
  2248  	}
  2249  	bc.logsFeed.Send(logs)
  2250  }
  2251  
  2252  // CloseBlockSubscriptionLoop closes BlockSubscriptionLoop.
  2253  func (bc *BlockChain) CloseBlockSubscriptionLoop() {
  2254  	pubSub, ok := bc.stateCache.TrieDB().TrieNodeCache().(statedb.BlockPubSub)
  2255  	if ok {
  2256  		if err := pubSub.UnsubscribeBlock(); err != nil {
  2257  			logger.Error("failed to unsubscribe blocks", "err", err, "trieNodeCacheConfig",
  2258  				bc.stateCache.TrieDB().GetTrieNodeCacheConfig())
  2259  		}
  2260  	}
  2261  }
  2262  
  2263  // CurrentBlockUpdateLoop updates the current block in the chain for updating read-only node.
  2264  func (bc *BlockChain) CurrentBlockUpdateLoop(pool *TxPool) {
  2265  	bc.wg.Add(1)
  2266  	defer bc.wg.Done()
  2267  
  2268  	refresher := time.NewTicker(1 * time.Second)
  2269  	defer refresher.Stop()
  2270  
  2271  	for {
  2272  		select {
  2273  		case <-refresher.C:
  2274  			if err := bc.db.TryCatchUpWithPrimary(); err != nil {
  2275  				logger.Error("Failed to catch up with primary", "err", err)
  2276  				continue
  2277  			}
  2278  
  2279  			// Restore the last known head block
  2280  			head := bc.db.ReadHeadBlockHash()
  2281  			if head == (common.Hash{}) {
  2282  				logger.Error("Failed to read head block hash")
  2283  				continue
  2284  			}
  2285  
  2286  			block := bc.db.ReadBlockByHash(head)
  2287  			if block == nil {
  2288  				head = bc.db.ReadHeadBlockBackupHash()
  2289  				if head == (common.Hash{}) {
  2290  					logger.Error("There is no block backup hash")
  2291  					continue
  2292  				}
  2293  
  2294  				block = bc.GetBlockByHash(head)
  2295  				if block == nil {
  2296  					logger.Error("Failed to read head block from database", "hash", head.String())
  2297  					continue
  2298  				}
  2299  			}
  2300  
  2301  			oldHead := bc.CurrentHeader()
  2302  			bc.replaceCurrentBlock(block)
  2303  			pool.lockedReset(oldHead, bc.CurrentHeader())
  2304  
  2305  			// TODO-Klaytn-RocksDB: update logic for subscription API. check BlockSubscriptionLoop method.
  2306  		case <-bc.quit:
  2307  			logger.Info("Closed current block update loop")
  2308  			return
  2309  		}
  2310  	}
  2311  }
  2312  
  2313  // replaceCurrentBlock replaces bc.currentBlock to the given block.
  2314  func (bc *BlockChain) replaceCurrentBlock(latestBlock *types.Block) {
  2315  	bc.mu.Lock()
  2316  	defer bc.mu.Unlock()
  2317  
  2318  	if latestBlock == nil {
  2319  		logger.Error("no latest block")
  2320  		return
  2321  	}
  2322  
  2323  	// Don't update current block if the latest block is not newer than current block.
  2324  	currentBlock := bc.CurrentBlock()
  2325  	if currentBlock.NumberU64() >= latestBlock.NumberU64() {
  2326  		logger.Debug("ignore an old block", "currentBlockNumber", currentBlock.NumberU64(), "oldBlockNumber",
  2327  			latestBlock.NumberU64())
  2328  		return
  2329  	}
  2330  
  2331  	// Insert a new block and update metrics
  2332  	bc.insert(latestBlock)
  2333  	bc.hc.SetCurrentHeader(latestBlock.Header())
  2334  
  2335  	headBlockNumberGauge.Update(latestBlock.Number().Int64())
  2336  	blockTxCountsGauge.Update(int64(latestBlock.Transactions().Len()))
  2337  	blockTxCountsCounter.Inc(int64(latestBlock.Transactions().Len()))
  2338  	bc.stateCache.TrieDB().UpdateMetricNodes()
  2339  
  2340  	logger.Info("Replaced the current block",
  2341  		"blkNum", latestBlock.NumberU64(), "blkHash", latestBlock.Hash().String())
  2342  }
  2343  
  2344  // insertStats tracks and reports on block insertion.
  2345  type insertStats struct {
  2346  	queued, processed, ignored int
  2347  	usedGas                    uint64
  2348  	lastIndex                  int
  2349  	startTime                  mclock.AbsTime
  2350  }
  2351  
  2352  // report prints statistics if some number of blocks have been processed
  2353  // or more than a few seconds have passed since the last message.
  2354  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  2355  	// report will leave a log only if inserting two or more blocks at once
  2356  	if len(chain) <= 1 {
  2357  		return
  2358  	}
  2359  	// Fetch the timings for the batch
  2360  	var (
  2361  		now     = mclock.Now()
  2362  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  2363  	)
  2364  	// If we're at the last block of the batch or report period reached, log
  2365  	if index == len(chain)-1 || elapsed >= log.StatsReportLimit {
  2366  		var (
  2367  			end = chain[index]
  2368  			txs = countTransactions(chain[st.lastIndex : index+1])
  2369  		)
  2370  		context := []interface{}{
  2371  			"number", end.Number(), "hash", end.Hash(), "blocks", st.processed, "txs", txs, "elapsed", common.PrettyDuration(elapsed),
  2372  			"trieDBSize", cache, "mgas", float64(st.usedGas) / 1000000, "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  2373  		}
  2374  
  2375  		timestamp := time.Unix(int64(end.Time().Uint64()), 0)
  2376  		context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  2377  
  2378  		if st.queued > 0 {
  2379  			context = append(context, []interface{}{"queued", st.queued}...)
  2380  		}
  2381  		if st.ignored > 0 {
  2382  			context = append(context, []interface{}{"ignored", st.ignored}...)
  2383  		}
  2384  		logger.Info("Imported new chain segment", context...)
  2385  
  2386  		*st = insertStats{startTime: now, lastIndex: index + 1}
  2387  	}
  2388  }
  2389  
  2390  func countTransactions(chain []*types.Block) (c int) {
  2391  	for _, b := range chain {
  2392  		c += len(b.Transactions())
  2393  	}
  2394  	return c
  2395  }
  2396  
  2397  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  2398  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  2399  // event about them
  2400  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  2401  	var (
  2402  		newChain    types.Blocks
  2403  		oldChain    types.Blocks
  2404  		commonBlock *types.Block
  2405  		deletedTxs  types.Transactions
  2406  		deletedLogs []*types.Log
  2407  		// collectLogs collects the logs that were generated during the
  2408  		// processing of the block that corresponds with the given hash.
  2409  		// These logs are later announced as deleted.
  2410  		collectLogs = func(hash common.Hash) {
  2411  			// Coalesce logs and set 'Removed'.
  2412  			number := bc.GetBlockNumber(hash)
  2413  			if number == nil {
  2414  				return
  2415  			}
  2416  			receipts := bc.db.ReadReceipts(hash, *number)
  2417  			for _, receipt := range receipts {
  2418  				for _, log := range receipt.Logs {
  2419  					del := *log
  2420  					del.Removed = true
  2421  					deletedLogs = append(deletedLogs, &del)
  2422  				}
  2423  			}
  2424  		}
  2425  	)
  2426  
  2427  	// first reduce whoever is higher bound
  2428  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  2429  		// reduce old chain
  2430  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  2431  			oldChain = append(oldChain, oldBlock)
  2432  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  2433  
  2434  			collectLogs(oldBlock.Hash())
  2435  		}
  2436  	} else {
  2437  		// reduce new chain and append new chain blocks for inserting later on
  2438  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  2439  			newChain = append(newChain, newBlock)
  2440  		}
  2441  	}
  2442  	if oldBlock == nil {
  2443  		return fmt.Errorf("Invalid old chain")
  2444  	}
  2445  	if newBlock == nil {
  2446  		return fmt.Errorf("Invalid new chain")
  2447  	}
  2448  
  2449  	for {
  2450  		if oldBlock.Hash() == newBlock.Hash() {
  2451  			commonBlock = oldBlock
  2452  			break
  2453  		}
  2454  
  2455  		oldChain = append(oldChain, oldBlock)
  2456  		newChain = append(newChain, newBlock)
  2457  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  2458  		collectLogs(oldBlock.Hash())
  2459  
  2460  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  2461  		if oldBlock == nil {
  2462  			return fmt.Errorf("Invalid old chain")
  2463  		}
  2464  		if newBlock == nil {
  2465  			return fmt.Errorf("Invalid new chain")
  2466  		}
  2467  	}
  2468  	// Ensure the user sees large reorgs
  2469  	if len(oldChain) > 0 && len(newChain) > 0 {
  2470  		logFn := logger.Debug
  2471  		if len(oldChain) > 63 {
  2472  			logFn = logger.Warn
  2473  		}
  2474  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  2475  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  2476  	} else {
  2477  		logger.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  2478  	}
  2479  	// Insert the new chain, taking care of the proper incremental order
  2480  	var addedTxs types.Transactions
  2481  	for i := len(newChain) - 1; i >= 0; i-- {
  2482  		// insert the block in the canonical way, re-writing history
  2483  		bc.insert(newChain[i])
  2484  		// write lookup entries for hash based transaction/receipt searches
  2485  		bc.db.WriteTxLookupEntries(newChain[i])
  2486  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  2487  	}
  2488  	// calculate the difference between deleted and added transactions
  2489  	diff := types.TxDifference(deletedTxs, addedTxs)
  2490  	// When transactions get deleted from the database that means the
  2491  	// receipts that were created in the fork must also be deleted
  2492  	for _, tx := range diff {
  2493  		bc.db.DeleteTxLookupEntry(tx.Hash())
  2494  	}
  2495  	if len(deletedLogs) > 0 {
  2496  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  2497  	}
  2498  	if len(oldChain) > 0 {
  2499  		go func() {
  2500  			for _, block := range oldChain {
  2501  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  2502  			}
  2503  		}()
  2504  	}
  2505  
  2506  	return nil
  2507  }
  2508  
  2509  // PostChainEvents iterates over the events generated by a chain insertion and
  2510  // posts them into the event feed.
  2511  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  2512  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  2513  	// post event logs for further processing
  2514  	if logs != nil {
  2515  		bc.logsFeed.Send(logs)
  2516  	}
  2517  	for _, event := range events {
  2518  		switch ev := event.(type) {
  2519  		case ChainEvent:
  2520  			bc.chainFeed.Send(ev)
  2521  
  2522  		case ChainHeadEvent:
  2523  			bc.chainHeadFeed.Send(ev)
  2524  
  2525  		case ChainSideEvent:
  2526  			bc.chainSideFeed.Send(ev)
  2527  		}
  2528  	}
  2529  }
  2530  
  2531  func (bc *BlockChain) update() {
  2532  	futureTimer := time.NewTicker(5 * time.Second)
  2533  	defer futureTimer.Stop()
  2534  	for {
  2535  		select {
  2536  		case <-futureTimer.C:
  2537  			bc.procFutureBlocks()
  2538  		case <-bc.quit:
  2539  			return
  2540  		}
  2541  	}
  2542  }
  2543  
  2544  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  2545  type BadBlockArgs struct {
  2546  	Hash  common.Hash  `json:"hash"`
  2547  	Block *types.Block `json:"block"`
  2548  }
  2549  
  2550  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  2551  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  2552  	blocks, err := bc.db.ReadAllBadBlocks()
  2553  	if err != nil {
  2554  		return nil, err
  2555  	}
  2556  	badBlockArgs := make([]BadBlockArgs, len(blocks))
  2557  	for i, block := range blocks {
  2558  		hash := block.Hash()
  2559  		badBlockArgs[i] = BadBlockArgs{Hash: hash, Block: block}
  2560  	}
  2561  	return badBlockArgs, err
  2562  }
  2563  
  2564  // istanbul BFT
  2565  func (bc *BlockChain) HasBadBlock(hash common.Hash) bool {
  2566  	badBlock := bc.db.ReadBadBlock(hash)
  2567  	if badBlock != nil {
  2568  		return true
  2569  	}
  2570  	return false
  2571  }
  2572  
  2573  // reportBlock logs a bad block error.
  2574  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  2575  	badBlockCounter.Inc(1)
  2576  	bc.db.WriteBadBlock(block)
  2577  
  2578  	var receiptString string
  2579  	for i, receipt := range receipts {
  2580  		receiptString += fmt.Sprintf("\t %d: tx: %v status: %v gas: %v contract: %v bloom: %x logs: %v\n",
  2581  			i, receipt.TxHash.Hex(), receipt.Status, receipt.GasUsed, receipt.ContractAddress.Hex(),
  2582  			receipt.Bloom, receipt.Logs)
  2583  	}
  2584  	logger.Error(fmt.Sprintf(`########## BAD BLOCK ######### Chain config: %v Number: %v Hash: 0x%x Receipt: %v Error: %v`, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  2585  }
  2586  
  2587  // InsertHeaderChain attempts to insert the given header chain in to the local
  2588  // chain, possibly creating a reorg. If an error is returned, it will return the
  2589  // index number of the failing header as well an error describing what went wrong.
  2590  //
  2591  // The verify parameter can be used to fine tune whether nonce verification
  2592  // should be done or not. The reason behind the optional check is because some
  2593  // of the header retrieval mechanisms already need to verify nonces, as well as
  2594  // because nonces can be verified sparsely, not needing to check each.
  2595  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  2596  	start := time.Now()
  2597  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  2598  		return i, err
  2599  	}
  2600  
  2601  	// Make sure only one thread manipulates the chain at once
  2602  	bc.mu.Lock()
  2603  	defer bc.mu.Unlock()
  2604  
  2605  	bc.wg.Add(1)
  2606  	defer bc.wg.Done()
  2607  
  2608  	whFunc := func(header *types.Header) error {
  2609  		_, err := bc.hc.WriteHeader(header)
  2610  		return err
  2611  	}
  2612  
  2613  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  2614  }
  2615  
  2616  // CurrentHeader retrieves the current head header of the canonical chain. The
  2617  // header is retrieved from the HeaderChain's internal cache.
  2618  func (bc *BlockChain) CurrentHeader() *types.Header {
  2619  	return bc.hc.CurrentHeader()
  2620  }
  2621  
  2622  // GetTd retrieves a block's total blockscore in the canonical chain from the
  2623  // database by hash and number, caching it if found.
  2624  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  2625  	return bc.hc.GetTd(hash, number)
  2626  }
  2627  
  2628  // GetTdByHash retrieves a block's total blockscore in the canonical chain from the
  2629  // database by hash, caching it if found.
  2630  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  2631  	return bc.hc.GetTdByHash(hash)
  2632  }
  2633  
  2634  // GetHeader retrieves a block header from the database by hash and number,
  2635  // caching it if found.
  2636  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  2637  	return bc.hc.GetHeader(hash, number)
  2638  }
  2639  
  2640  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  2641  // found.
  2642  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  2643  	return bc.hc.GetHeaderByHash(hash)
  2644  }
  2645  
  2646  // HasHeader checks if a block header is present in the database or not, caching
  2647  // it if present.
  2648  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  2649  	return bc.hc.HasHeader(hash, number)
  2650  }
  2651  
  2652  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  2653  // hash, fetching towards the genesis block.
  2654  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  2655  	return bc.hc.GetBlockHashesFromHash(hash, max)
  2656  }
  2657  
  2658  // GetHeaderByNumber retrieves a block header from the database by number,
  2659  // caching it (associated with its hash) if found.
  2660  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  2661  	return bc.hc.GetHeaderByNumber(number)
  2662  }
  2663  
  2664  // Config retrieves the blockchain's chain configuration.
  2665  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  2666  
  2667  // Engine retrieves the blockchain's consensus engine.
  2668  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  2669  
  2670  // Snapshots returns the blockchain snapshot tree.
  2671  func (bc *BlockChain) Snapshots() *snapshot.Tree {
  2672  	return bc.snaps
  2673  }
  2674  
  2675  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  2676  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  2677  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  2678  }
  2679  
  2680  // SubscribeChainEvent registers a subscription of ChainEvent.
  2681  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  2682  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  2683  }
  2684  
  2685  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  2686  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  2687  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  2688  }
  2689  
  2690  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  2691  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  2692  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  2693  }
  2694  
  2695  // SubscribeLogsEvent registers a subscription of []*types.Log.
  2696  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  2697  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  2698  }
  2699  
  2700  // isArchiveMode returns whether current blockchain is in archiving mode or not.
  2701  // cacheConfig.ArchiveMode means trie caching is disabled.
  2702  func (bc *BlockChain) isArchiveMode() bool {
  2703  	return bc.cacheConfig.ArchiveMode
  2704  }
  2705  
  2706  // IsParallelDBWrite returns if parallel write is enabled or not.
  2707  // If enabled, data written in WriteBlockWithState is being written in parallel manner.
  2708  func (bc *BlockChain) IsParallelDBWrite() bool {
  2709  	return bc.parallelDBWrite
  2710  }
  2711  
  2712  // IsSenderTxHashIndexingEnabled returns if storing senderTxHash to txHash mapping information
  2713  // is enabled or not.
  2714  func (bc *BlockChain) IsSenderTxHashIndexingEnabled() bool {
  2715  	return bc.cacheConfig.SenderTxHashIndexing
  2716  }
  2717  
  2718  func (bc *BlockChain) SaveTrieNodeCacheToDisk() error {
  2719  	if err := bc.stateCache.TrieDB().CanSaveTrieNodeCacheToFile(); err != nil {
  2720  		return err
  2721  	}
  2722  	go bc.stateCache.TrieDB().SaveTrieNodeCacheToFile(bc.cacheConfig.TrieNodeCacheConfig.FastCacheFileDir, runtime.NumCPU()/2)
  2723  	return nil
  2724  }
  2725  
  2726  // ApplyTransaction attempts to apply a transaction to the given state database
  2727  // and uses the input parameters for its environment. It returns the receipt
  2728  // for the transaction, gas used and an error if the transaction failed,
  2729  // indicating the block was invalid.
  2730  func (bc *BlockChain) ApplyTransaction(chainConfig *params.ChainConfig, author *common.Address, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, vmConfig *vm.Config) (*types.Receipt, *vm.InternalTxTrace, error) {
  2731  	// TODO-Klaytn We reject transactions with unexpected gasPrice and do not put the transaction into TxPool.
  2732  	//         And we run transactions regardless of gasPrice if we push transactions in the TxPool.
  2733  	/*
  2734  		// istanbul BFT
  2735  		if tx.GasPrice() != nil && tx.GasPrice().Cmp(common.Big0) > 0 {
  2736  			return nil, uint64(0), ErrInvalidGasPrice
  2737  		}
  2738  	*/
  2739  
  2740  	blockNumber := header.Number.Uint64()
  2741  
  2742  	// validation for each transaction before execution
  2743  	if err := tx.Validate(statedb, blockNumber); err != nil {
  2744  		return nil, nil, err
  2745  	}
  2746  
  2747  	msg, err := tx.AsMessageWithAccountKeyPicker(types.MakeSigner(chainConfig, header.Number), statedb, blockNumber)
  2748  	if err != nil {
  2749  		return nil, nil, err
  2750  	}
  2751  	// Create a new context to be used in the EVM environment
  2752  	blockContext := NewEVMBlockContext(header, bc, author)
  2753  	txContext := NewEVMTxContext(msg, header)
  2754  	// Create a new environment which holds all relevant information
  2755  	// about the transaction and calling mechanisms.
  2756  	vmenv := vm.NewEVM(blockContext, txContext, statedb, chainConfig, vmConfig)
  2757  	// Apply the transaction to the current state (included in the env)
  2758  	result, err := ApplyMessage(vmenv, msg)
  2759  	if err != nil {
  2760  		return nil, nil, err
  2761  	}
  2762  
  2763  	var internalTrace *vm.InternalTxTrace
  2764  	if vmConfig.EnableInternalTxTracing {
  2765  		internalTrace, err = GetInternalTxTrace(vmConfig.Tracer)
  2766  		if err != nil {
  2767  			logger.Error("failed to get tracing result from a transaction", "txHash", tx.Hash().String(), "err", err)
  2768  		}
  2769  	}
  2770  	// Update the state with pending changes
  2771  	statedb.Finalise(true, false)
  2772  	*usedGas += result.UsedGas
  2773  
  2774  	receipt := types.NewReceipt(result.VmExecutionStatus, tx.Hash(), result.UsedGas)
  2775  	// if the transaction created a contract, store the creation address in the receipt.
  2776  	msg.FillContractAddress(vmenv.Origin, receipt)
  2777  	// Set the receipt logs and create a bloom for filtering
  2778  	receipt.Logs = statedb.GetLogs(tx.Hash())
  2779  	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
  2780  
  2781  	return receipt, internalTrace, err
  2782  }
  2783  
  2784  func GetInternalTxTrace(tracer vm.Tracer) (*vm.InternalTxTrace, error) {
  2785  	var (
  2786  		internalTxTrace *vm.InternalTxTrace
  2787  		err             error
  2788  	)
  2789  	switch tracer := tracer.(type) {
  2790  	case *vm.InternalTxTracer:
  2791  		internalTxTrace, err = tracer.GetResult()
  2792  		if err != nil {
  2793  			return nil, err
  2794  		}
  2795  	default:
  2796  		logger.Error("To trace internal transactions, VM tracer type should be vm.InternalTxTracer", "actualType", reflect.TypeOf(tracer).String())
  2797  		return nil, ErrInvalidTracer
  2798  	}
  2799  	return internalTxTrace, nil
  2800  }
  2801  
  2802  // CheckBlockChainVersion checks the version of the current database and upgrade if possible.
  2803  func CheckBlockChainVersion(chainDB database.DBManager) error {
  2804  	bcVersion := chainDB.ReadDatabaseVersion()
  2805  	if bcVersion != nil && *bcVersion > BlockChainVersion {
  2806  		return fmt.Errorf("database version is v%d, Klaytn %s only supports v%d", *bcVersion, params.Version, BlockChainVersion)
  2807  	} else if bcVersion == nil || *bcVersion < BlockChainVersion {
  2808  		bcVersionStr := "N/A"
  2809  		if bcVersion != nil {
  2810  			bcVersionStr = strconv.Itoa(int(*bcVersion))
  2811  		}
  2812  		logger.Warn("Upgrade database version", "from", bcVersionStr, "to", BlockChainVersion)
  2813  		chainDB.WriteDatabaseVersion(BlockChainVersion)
  2814  	}
  2815  	return nil
  2816  }