github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"github.com/intfoundation/intchain/core/rawdb"
    24  	"io"
    25  	"math/big"
    26  	mrand "math/rand"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/hashicorp/golang-lru"
    32  	"github.com/intfoundation/intchain/common"
    33  	"github.com/intfoundation/intchain/common/mclock"
    34  	"github.com/intfoundation/intchain/common/prque"
    35  	"github.com/intfoundation/intchain/consensus"
    36  	"github.com/intfoundation/intchain/core/state"
    37  	"github.com/intfoundation/intchain/core/types"
    38  	"github.com/intfoundation/intchain/core/vm"
    39  	"github.com/intfoundation/intchain/crypto"
    40  	"github.com/intfoundation/intchain/event"
    41  	"github.com/intfoundation/intchain/intdb"
    42  	"github.com/intfoundation/intchain/log"
    43  	"github.com/intfoundation/intchain/metrics"
    44  	"github.com/intfoundation/intchain/params"
    45  	"github.com/intfoundation/intchain/rlp"
    46  	"github.com/intfoundation/intchain/trie"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	receiptsCacheLimit  = 32
    59  	maxFutureBlocks     = 256
    60  	maxTimeFutureBlocks = 30
    61  	badBlockLimit       = 10
    62  	triesInMemory       = 128
    63  
    64  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    65  	BlockChainVersion = 3
    66  )
    67  
    68  // CacheConfig contains the configuration values for the trie caching/pruning
    69  // that's resident in a blockchain.
    70  type CacheConfig struct {
    71  	TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
    72  
    73  	TrieDirtyLimit    int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
    74  	TrieDirtyDisabled bool          // Whether to disable trie write caching and GC altogether (archive node)
    75  	TrieTimeLimit     time.Duration // Time limit after which to flush the current in-memory trie to disk
    76  }
    77  
    78  // BlockChain represents the canonical chain given a database with a genesis
    79  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    80  //
    81  // Importing blocks in to the block chain happens according to the set of rules
    82  // defined by the two stage Validator. Processing of blocks is done using the
    83  // Processor which processes the included transaction. The validation of the state
    84  // is done in the second part of the Validator. Failing results in aborting of
    85  // the import.
    86  //
    87  // The BlockChain also helps in returning blocks from **any** chain included
    88  // in the database as well as blocks that represents the canonical chain. It's
    89  // important to note that GetBlock can return any block and does not need to be
    90  // included in the canonical one where as GetBlockByNumber always represents the
    91  // canonical chain.
    92  type BlockChain struct {
    93  	chainConfig *params.ChainConfig // Chain & network configuration
    94  	cacheConfig *CacheConfig        // Cache configuration for pruning
    95  
    96  	db     intdb.Database // Low level persistent database to store final content in
    97  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    98  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    99  
   100  	hc                   *HeaderChain
   101  	rmLogsFeed           event.Feed
   102  	chainFeed            event.Feed
   103  	chainSideFeed        event.Feed
   104  	chainHeadFeed        event.Feed
   105  	logsFeed             event.Feed
   106  	createChildChainFeed event.Feed
   107  	startMiningFeed      event.Feed
   108  	stopMiningFeed       event.Feed
   109  
   110  	scope        event.SubscriptionScope
   111  	genesisBlock *types.Block
   112  
   113  	chainmu sync.RWMutex // blockchain insertion lock
   114  
   115  	currentBlock     atomic.Value // Current head of the block chain
   116  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   117  
   118  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   119  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   120  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   121  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   122  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   123  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   124  
   125  	quit    chan struct{} // blockchain quit channel
   126  	running int32         // running must be called atomically
   127  	// procInterrupt must be atomically called
   128  	procInterrupt int32          // interrupt signaler for block processing
   129  	wg            sync.WaitGroup // chain processing wait group for shutting down
   130  
   131  	engine    consensus.Engine
   132  	validator Validator // Block and state validator interface
   133  	processor Processor // Block transaction processor interface
   134  	vmConfig  vm.Config
   135  
   136  	badBlocks *lru.Cache // Bad block cache
   137  
   138  	cch    CrossChainHelper
   139  	logger log.Logger
   140  }
   141  
   142  // NewBlockChain returns a fully initialised block chain using information
   143  // available in the database. It initialises the default Ethereum Validator and
   144  // Processor.
   145  func NewBlockChain(db intdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, cch CrossChainHelper) (*BlockChain, error) {
   146  	if cacheConfig == nil {
   147  		cacheConfig = &CacheConfig{
   148  			TrieCleanLimit: 256,
   149  			TrieDirtyLimit: 256,
   150  			TrieTimeLimit:  5 * time.Minute,
   151  		}
   152  	}
   153  	bodyCache, _ := lru.New(bodyCacheLimit)
   154  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   155  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   156  	blockCache, _ := lru.New(blockCacheLimit)
   157  	futureBlocks, _ := lru.New(maxFutureBlocks)
   158  	badBlocks, _ := lru.New(badBlockLimit)
   159  
   160  	bc := &BlockChain{
   161  		chainConfig:   chainConfig,
   162  		cacheConfig:   cacheConfig,
   163  		db:            db,
   164  		triegc:        prque.New(nil),
   165  		stateCache:    state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
   166  		quit:          make(chan struct{}),
   167  		bodyCache:     bodyCache,
   168  		bodyRLPCache:  bodyRLPCache,
   169  		receiptsCache: receiptsCache,
   170  		blockCache:    blockCache,
   171  		futureBlocks:  futureBlocks,
   172  		engine:        engine,
   173  		vmConfig:      vmConfig,
   174  		badBlocks:     badBlocks,
   175  		cch:           cch,
   176  		logger:        chainConfig.ChainLogger,
   177  	}
   178  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   179  	bc.processor = NewStateProcessor(chainConfig, bc, engine, cch)
   180  
   181  	var err error
   182  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   183  	if err != nil {
   184  		return nil, err
   185  	}
   186  	bc.genesisBlock = bc.GetBlockByNumber(0)
   187  	if bc.genesisBlock == nil {
   188  		return nil, ErrNoGenesis
   189  	}
   190  	if err := bc.loadLastState(); err != nil {
   191  		return nil, err
   192  	}
   193  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   194  	for hash := range BadHashes {
   195  		if header := bc.GetHeaderByHash(hash); header != nil {
   196  			// get the canonical block corresponding to the offending header's number
   197  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   198  			// make sure the headerByNumber (if present) is in our current canonical chain
   199  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   200  				bc.logger.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   201  				bc.SetHead(header.Number.Uint64() - 1)
   202  				bc.logger.Error("Chain rewind was successful, resuming normal operation")
   203  			}
   204  		}
   205  	}
   206  	// Take ownership of this particular state
   207  	go bc.update()
   208  	return bc, nil
   209  }
   210  
   211  func (bc *BlockChain) getProcInterrupt() bool {
   212  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   213  }
   214  
   215  // loadLastState loads the last known chain state from the database. This method
   216  // assumes that the chain manager mutex is held.
   217  func (bc *BlockChain) loadLastState() error {
   218  	// Restore the last known head block
   219  	head := rawdb.ReadHeadBlockHash(bc.db)
   220  	if head == (common.Hash{}) {
   221  		// Corrupt or empty database, init from scratch
   222  		bc.logger.Warn("Empty database, resetting chain")
   223  		return bc.Reset()
   224  	}
   225  	// Make sure the entire head block is available
   226  	currentBlock := bc.GetBlockByHash(head)
   227  	if currentBlock == nil {
   228  		// Corrupt or empty database, init from scratch
   229  		bc.logger.Warn("Head block missing, resetting chain", "hash", head)
   230  		return bc.Reset()
   231  	}
   232  	// Make sure the state associated with the block is available
   233  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   234  		// Dangling block without a state associated, init from scratch
   235  		bc.logger.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "err", err)
   236  		if err := bc.repair(&currentBlock); err != nil {
   237  			return err
   238  		}
   239  	}
   240  	// Everything seems to be fine, set as the head block
   241  	bc.currentBlock.Store(currentBlock)
   242  
   243  	// Restore the last known head header
   244  	currentHeader := currentBlock.Header()
   245  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   246  		if header := bc.GetHeaderByHash(head); header != nil {
   247  			currentHeader = header
   248  		}
   249  	}
   250  	bc.hc.SetCurrentHeader(currentHeader)
   251  
   252  	// Restore the last known head fast block
   253  	bc.currentFastBlock.Store(currentBlock)
   254  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   255  		if block := bc.GetBlockByHash(head); block != nil {
   256  			bc.currentFastBlock.Store(block)
   257  		}
   258  	}
   259  
   260  	// Issue a status log for the user
   261  	currentFastBlock := bc.CurrentFastBlock()
   262  
   263  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   264  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   265  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   266  
   267  	bc.logger.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   268  	bc.logger.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   269  	bc.logger.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   270  
   271  	return nil
   272  }
   273  
   274  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   275  // above the new head will be deleted and the new one set. In the case of blocks
   276  // though, the head may be further rewound if block bodies are missing (non-archive
   277  // nodes after a fast sync).
   278  func (bc *BlockChain) SetHead(head uint64) error {
   279  	bc.logger.Warn("Rewinding blockchain", "target", head)
   280  
   281  	bc.chainmu.Lock()
   282  	defer bc.chainmu.Unlock()
   283  
   284  	// Rewind the header chain, deleting all block bodies until then
   285  	delFn := func(db intdb.Writer, hash common.Hash, num uint64) {
   286  		rawdb.DeleteBody(db, hash, num)
   287  	}
   288  	bc.hc.SetHead(head, delFn)
   289  	currentHeader := bc.hc.CurrentHeader()
   290  
   291  	// Clear out any stale content from the caches
   292  	bc.bodyCache.Purge()
   293  	bc.bodyRLPCache.Purge()
   294  	bc.receiptsCache.Purge()
   295  	bc.blockCache.Purge()
   296  	bc.futureBlocks.Purge()
   297  
   298  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   299  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   300  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   301  	}
   302  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   303  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   304  			// Rewound state missing, rolled back to before pivot, reset to genesis
   305  			bc.currentBlock.Store(bc.genesisBlock)
   306  		}
   307  	}
   308  	// Rewind the fast block in a simpleton way to the target head
   309  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   310  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   311  	}
   312  	// If either blocks reached nil, reset to the genesis state
   313  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   314  		bc.currentBlock.Store(bc.genesisBlock)
   315  	}
   316  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   317  		bc.currentFastBlock.Store(bc.genesisBlock)
   318  	}
   319  	currentBlock := bc.CurrentBlock()
   320  	currentFastBlock := bc.CurrentFastBlock()
   321  
   322  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   323  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   324  
   325  	return bc.loadLastState()
   326  }
   327  
   328  // FastSyncCommitHead sets the current head block to the one defined by the hash
   329  // irrelevant what the chain contents were prior.
   330  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   331  	// Make sure that both the block as well at its state trie exists
   332  	block := bc.GetBlockByHash(hash)
   333  	if block == nil {
   334  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   335  	}
   336  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   337  		return err
   338  	}
   339  	// If all checks out, manually set the head block
   340  	bc.chainmu.Lock()
   341  	bc.currentBlock.Store(block)
   342  	bc.chainmu.Unlock()
   343  
   344  	bc.logger.Info("Committed new head block", "number", block.Number(), "hash", hash)
   345  	return nil
   346  }
   347  
   348  // GasLimit returns the gas limit of the current HEAD block.
   349  func (bc *BlockChain) GasLimit() uint64 {
   350  	return bc.CurrentBlock().GasLimit()
   351  }
   352  
   353  // CurrentBlock retrieves the current head block of the canonical chain. The
   354  // block is retrieved from the blockchain's internal cache.
   355  func (bc *BlockChain) CurrentBlock() *types.Block {
   356  	return bc.currentBlock.Load().(*types.Block)
   357  }
   358  
   359  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   360  // chain. The block is retrieved from the blockchain's internal cache.
   361  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   362  	return bc.currentFastBlock.Load().(*types.Block)
   363  }
   364  
   365  // Validator returns the current validator.
   366  func (bc *BlockChain) Validator() Validator {
   367  	return bc.validator
   368  }
   369  
   370  // Processor returns the current processor.
   371  func (bc *BlockChain) Processor() Processor {
   372  	return bc.processor
   373  }
   374  
   375  // State returns a new mutable state based on the current HEAD block.
   376  func (bc *BlockChain) State() (*state.StateDB, error) {
   377  	return bc.StateAt(bc.CurrentBlock().Root())
   378  }
   379  
   380  // StateAt returns a new mutable state based on a particular point in time.
   381  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   382  	return state.New(root, bc.stateCache)
   383  }
   384  
   385  // StateCache returns the caching database underpinning the blockchain instance.
   386  func (bc *BlockChain) StateCache() state.Database {
   387  	return bc.stateCache
   388  }
   389  
   390  // Reset purges the entire blockchain, restoring it to its genesis state.
   391  func (bc *BlockChain) Reset() error {
   392  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   393  }
   394  
   395  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   396  // specified genesis state.
   397  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   398  	// Dump the entire block chain and purge the caches
   399  	if err := bc.SetHead(0); err != nil {
   400  		return err
   401  	}
   402  	bc.chainmu.Lock()
   403  	defer bc.chainmu.Unlock()
   404  
   405  	// Prepare the genesis block and reinitialise the chain
   406  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   407  		bc.logger.Crit("Failed to write genesis block TD", "err", err)
   408  	}
   409  	rawdb.WriteBlock(bc.db, genesis)
   410  
   411  	bc.genesisBlock = genesis
   412  	bc.insert(bc.genesisBlock)
   413  	bc.currentBlock.Store(bc.genesisBlock)
   414  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   415  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   416  	bc.currentFastBlock.Store(bc.genesisBlock)
   417  
   418  	return nil
   419  }
   420  
   421  // repair tries to repair the current blockchain by rolling back the current block
   422  // until one with associated state is found. This is needed to fix incomplete db
   423  // writes caused either by crashes/power outages, or simply non-committed tries.
   424  //
   425  // This method only rolls back the current block. The current header and current
   426  // fast block are left intact.
   427  func (bc *BlockChain) repair(head **types.Block) error {
   428  	for {
   429  		// Abort if we've rewound to a head block that does have associated state
   430  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   431  			bc.logger.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   432  			return nil
   433  		}
   434  		// Otherwise rewind one block and recheck state availability there
   435  		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   436  		if block == nil {
   437  			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   438  		}
   439  		*head = block
   440  	}
   441  }
   442  
   443  // Export writes the active chain to the given writer.
   444  func (bc *BlockChain) Export(w io.Writer) error {
   445  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   446  }
   447  
   448  // ExportN writes a subset of the active chain to the given writer.
   449  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   450  	bc.chainmu.RLock()
   451  	defer bc.chainmu.RUnlock()
   452  
   453  	if first > last {
   454  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   455  	}
   456  	bc.logger.Info("Exporting batch of blocks", "count", last-first+1)
   457  
   458  	for nr := first; nr <= last; nr++ {
   459  		block := bc.GetBlockByNumber(nr)
   460  		if block == nil {
   461  			return fmt.Errorf("export failed on #%d: not found", nr)
   462  		}
   463  
   464  		if err := block.EncodeRLP(w); err != nil {
   465  			return err
   466  		}
   467  	}
   468  
   469  	return nil
   470  }
   471  
   472  // insert injects a new head block into the current block chain. This method
   473  // assumes that the block is indeed a true head. It will also reset the head
   474  // header and the head fast sync block to this very same block if they are older
   475  // or if they are on a different side chain.
   476  //
   477  // Note, this function assumes that the `mu` mutex is held!
   478  func (bc *BlockChain) insert(block *types.Block) {
   479  	// If the block is on a side chain or an unknown one, force other heads onto it too
   480  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   481  
   482  	// Add the block to the canonical chain number scheme and mark as the head
   483  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   484  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   485  
   486  	bc.currentBlock.Store(block)
   487  
   488  	// If the block is better than our head or is on a different chain, force update heads
   489  	if updateHeads {
   490  		bc.hc.SetCurrentHeader(block.Header())
   491  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   492  
   493  		bc.currentFastBlock.Store(block)
   494  	}
   495  
   496  	bc.logger.Info(fmt.Sprintf("BlockChain insert block number %v, hash: %x", block.NumberU64(), block.Hash()))
   497  	ibCbMap := GetInsertBlockCbMap()
   498  	for _, cb := range ibCbMap {
   499  		cb(bc, block)
   500  	}
   501  }
   502  
   503  // Genesis retrieves the chain's genesis block.
   504  func (bc *BlockChain) Genesis() *types.Block {
   505  	return bc.genesisBlock
   506  }
   507  
   508  // GetBody retrieves a block body (transactions and uncles) from the database by
   509  // hash, caching it if found.
   510  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   511  	// Short circuit if the body's already in the cache, retrieve otherwise
   512  	if cached, ok := bc.bodyCache.Get(hash); ok {
   513  		body := cached.(*types.Body)
   514  		return body
   515  	}
   516  	number := bc.hc.GetBlockNumber(hash)
   517  	if number == nil {
   518  		return nil
   519  	}
   520  	body := rawdb.ReadBody(bc.db, hash, *number)
   521  	if body == nil {
   522  		return nil
   523  	}
   524  	// Cache the found body for next time and return
   525  	bc.bodyCache.Add(hash, body)
   526  	return body
   527  }
   528  
   529  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   530  // caching it if found.
   531  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   532  	// Short circuit if the body's already in the cache, retrieve otherwise
   533  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   534  		return cached.(rlp.RawValue)
   535  	}
   536  	number := bc.hc.GetBlockNumber(hash)
   537  	if number == nil {
   538  		return nil
   539  	}
   540  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   541  	if len(body) == 0 {
   542  		return nil
   543  	}
   544  	// Cache the found body for next time and return
   545  	bc.bodyRLPCache.Add(hash, body)
   546  	return body
   547  }
   548  
   549  // HasBlock checks if a block is fully present in the database or not.
   550  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   551  	if bc.blockCache.Contains(hash) {
   552  		return true
   553  	}
   554  	return rawdb.HasBody(bc.db, hash, number)
   555  }
   556  
   557  // HasState checks if state trie is fully present in the database or not.
   558  func (bc *BlockChain) HasState(hash common.Hash) bool {
   559  	_, err := bc.stateCache.OpenTrie(hash)
   560  	return err == nil
   561  }
   562  
   563  // HasBlockAndState checks if a block and associated state trie is fully present
   564  // in the database or not, caching it if present.
   565  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   566  	// Check first that the block itself is known
   567  	block := bc.GetBlock(hash, number)
   568  	if block == nil {
   569  		return false
   570  	}
   571  	return bc.HasState(block.Root())
   572  }
   573  
   574  // GetBlock retrieves a block from the database by hash and number,
   575  // caching it if found.
   576  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   577  	// Short circuit if the block's already in the cache, retrieve otherwise
   578  	if block, ok := bc.blockCache.Get(hash); ok {
   579  		return block.(*types.Block)
   580  	}
   581  	block := rawdb.ReadBlock(bc.db, hash, number)
   582  	if block == nil {
   583  		return nil
   584  	}
   585  	// Cache the found block for next time and return
   586  	bc.blockCache.Add(block.Hash(), block)
   587  	return block
   588  }
   589  
   590  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   591  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   592  	number := bc.hc.GetBlockNumber(hash)
   593  	if number == nil {
   594  		return nil
   595  	}
   596  	return bc.GetBlock(hash, *number)
   597  }
   598  
   599  // GetBlockByNumber retrieves a block from the database by number, caching it
   600  // (associated with its hash) if found.
   601  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   602  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   603  	if hash == (common.Hash{}) {
   604  		return nil
   605  	}
   606  	return bc.GetBlock(hash, number)
   607  }
   608  
   609  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   610  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   611  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   612  		return receipts.(types.Receipts)
   613  	}
   614  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   615  	if number == nil {
   616  		return nil
   617  	}
   618  	receipts := rawdb.ReadReceipts(bc.db, hash, *number)
   619  	if receipts == nil {
   620  		return nil
   621  	}
   622  	bc.receiptsCache.Add(hash, receipts)
   623  	return receipts
   624  }
   625  
   626  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   627  // [deprecated by intprotocol/62]
   628  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   629  	number := bc.hc.GetBlockNumber(hash)
   630  	if number == nil {
   631  		return nil
   632  	}
   633  	for i := 0; i < n; i++ {
   634  		block := bc.GetBlock(hash, *number)
   635  		if block == nil {
   636  			break
   637  		}
   638  		blocks = append(blocks, block)
   639  		hash = block.ParentHash()
   640  		*number--
   641  	}
   642  	return
   643  }
   644  
   645  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   646  // a specific distance is reached.
   647  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   648  	uncles := []*types.Header{}
   649  	for i := 0; block != nil && i < length; i++ {
   650  		uncles = append(uncles, block.Uncles()...)
   651  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   652  	}
   653  	return uncles
   654  }
   655  
   656  // ChainValidator execute and validate the block with the current latest block.
   657  func (bc *BlockChain) ValidateBlock(block *types.Block) (*state.StateDB, types.Receipts, *types.PendingOps, error) {
   658  	// If the header is a banned one, straight out abort
   659  	if BadHashes[block.Hash()] {
   660  		return nil, nil, nil, ErrBlacklistedHash
   661  	}
   662  
   663  	// Header verify
   664  	if err := bc.engine.(consensus.IPBFT).VerifyHeaderBeforeConsensus(bc, block.Header(), true); err != nil {
   665  		return nil, nil, nil, err
   666  	}
   667  
   668  	// Body verify
   669  	if err := bc.Validator().ValidateBody(block); err != nil {
   670  		log.Debugf("ValidateBlock-ValidateBody return with error: %v", err)
   671  		return nil, nil, nil, err
   672  	}
   673  
   674  	var parent *types.Block
   675  	parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   676  	state, err := state.New(parent.Root(), bc.stateCache)
   677  	if err != nil {
   678  		log.Debugf("ValidateBlock-state.New return with error: %v", err)
   679  		return nil, nil, nil, err
   680  	}
   681  
   682  	// Process block using the parent state as reference point.
   683  	receipts, _, usedGas, ops, err := bc.processor.Process(block, state, bc.vmConfig)
   684  	if err != nil {
   685  		log.Debugf("ValidateBlock-Process return with error: %v", err)
   686  		return nil, nil, nil, err
   687  	}
   688  
   689  	// Validate the state using the default validator
   690  	err = bc.Validator().ValidateState(block, state, receipts, usedGas)
   691  	if err != nil {
   692  		log.Debugf("ValidateBlock-ValidateState return with error: %v", err)
   693  		return nil, nil, nil, err
   694  	}
   695  
   696  	return state, receipts, ops, nil
   697  }
   698  
   699  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   700  // either from ephemeral in-memory cache, or from persistent storage.
   701  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   702  	return bc.stateCache.TrieDB().Node(hash)
   703  }
   704  
   705  // Stop stops the blockchain service. If any imports are currently in progress
   706  // it will abort them using the procInterrupt.
   707  func (bc *BlockChain) Stop() {
   708  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   709  		return
   710  	}
   711  	// Unsubscribe all subscriptions registered from blockchain
   712  	bc.scope.Close()
   713  	close(bc.quit)
   714  	atomic.StoreInt32(&bc.procInterrupt, 1)
   715  
   716  	bc.wg.Wait()
   717  
   718  	// Ensure the state of a recent block is also stored to disk before exiting.
   719  	// We're writing three different states to catch different restart scenarios:
   720  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   721  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   722  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   723  	if !bc.cacheConfig.TrieDirtyDisabled {
   724  		triedb := bc.stateCache.TrieDB()
   725  
   726  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   727  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   728  				recent := bc.GetBlockByNumber(number - offset)
   729  
   730  				bc.logger.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   731  				if err := triedb.Commit(recent.Root(), true); err != nil {
   732  					bc.logger.Error("Failed to commit recent state trie", "err", err)
   733  				}
   734  			}
   735  		}
   736  		for !bc.triegc.Empty() {
   737  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   738  		}
   739  		if size, _ := triedb.Size(); size != 0 {
   740  			bc.logger.Error("Dangling trie nodes after full cleanup")
   741  		}
   742  	}
   743  	bc.logger.Info("Blockchain manager stopped")
   744  }
   745  
   746  func (bc *BlockChain) procFutureBlocks() {
   747  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   748  	for _, hash := range bc.futureBlocks.Keys() {
   749  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   750  			blocks = append(blocks, block.(*types.Block))
   751  		}
   752  	}
   753  	if len(blocks) > 0 {
   754  		types.BlockBy(types.Number).Sort(blocks)
   755  
   756  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   757  		for i := range blocks {
   758  			bc.InsertChain(blocks[i : i+1])
   759  		}
   760  	}
   761  }
   762  
   763  // WriteStatus status of write
   764  type WriteStatus byte
   765  
   766  const (
   767  	NonStatTy WriteStatus = iota
   768  	CanonStatTy
   769  	SideStatTy
   770  )
   771  
   772  // Rollback is designed to remove a chain of links from the database that aren't
   773  // certain enough to be valid.
   774  func (bc *BlockChain) Rollback(chain []common.Hash) {
   775  	bc.chainmu.Lock()
   776  	defer bc.chainmu.Unlock()
   777  
   778  	for i := len(chain) - 1; i >= 0; i-- {
   779  		hash := chain[i]
   780  
   781  		currentHeader := bc.hc.CurrentHeader()
   782  		if currentHeader.Hash() == hash {
   783  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   784  		}
   785  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   786  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   787  			bc.currentFastBlock.Store(newFastBlock)
   788  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   789  		}
   790  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   791  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   792  			bc.currentBlock.Store(newBlock)
   793  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   794  		}
   795  	}
   796  }
   797  
   798  // SetReceiptsData computes all the non-consensus fields of the receipts
   799  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   800  	signer := types.MakeSigner(config, block.Number())
   801  
   802  	transactions, logIndex := block.Transactions(), uint(0)
   803  	if len(transactions) != len(receipts) {
   804  		return errors.New("transaction and receipt count mismatch")
   805  	}
   806  
   807  	for j := 0; j < len(receipts); j++ {
   808  		// The transaction hash can be retrieved from the transaction itself
   809  		receipts[j].TxHash = transactions[j].Hash()
   810  
   811  		// block location fields
   812  		receipts[j].BlockHash = block.Hash()
   813  		receipts[j].BlockNumber = block.Number()
   814  		receipts[j].TransactionIndex = uint(j)
   815  
   816  		// The contract address can be derived from the transaction itself
   817  		if transactions[j].To() == nil {
   818  			// Deriving the signer is expensive, only do if it's actually needed
   819  			from, _ := types.Sender(signer, transactions[j])
   820  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   821  		}
   822  		// The used gas can be calculated based on previous receipts
   823  		if j == 0 {
   824  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   825  		} else {
   826  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   827  		}
   828  		// The derived log fields can simply be set from the block and transaction
   829  		for k := 0; k < len(receipts[j].Logs); k++ {
   830  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   831  			receipts[j].Logs[k].BlockHash = block.Hash()
   832  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   833  			receipts[j].Logs[k].TxIndex = uint(j)
   834  			receipts[j].Logs[k].Index = logIndex
   835  			logIndex++
   836  		}
   837  	}
   838  	return nil
   839  }
   840  
   841  // InsertReceiptChain attempts to complete an already existing header chain with
   842  // transaction and receipt data.
   843  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   844  	bc.wg.Add(1)
   845  	defer bc.wg.Done()
   846  
   847  	// Do a sanity check that the provided chain is actually ordered and linked
   848  	for i := 1; i < len(blockChain); i++ {
   849  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   850  			bc.logger.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   851  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   852  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   853  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   854  		}
   855  	}
   856  
   857  	var (
   858  		stats = struct{ processed, ignored int32 }{}
   859  		start = time.Now()
   860  		bytes = 0
   861  		batch = bc.db.NewBatch()
   862  	)
   863  	for i, block := range blockChain {
   864  		receipts := receiptChain[i]
   865  		// Short circuit insertion if shutting down or processing failed
   866  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   867  			return 0, nil
   868  		}
   869  		// Short circuit if the owner header is unknown
   870  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   871  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   872  		}
   873  		// Skip if the entire data is already known
   874  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   875  			stats.ignored++
   876  			continue
   877  		}
   878  		// Compute all the non-consensus fields of the receipts
   879  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   880  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   881  		}
   882  		// Write all the data out into the database
   883  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   884  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   885  		rawdb.WriteTxLookupEntries(batch, block)
   886  
   887  		stats.processed++
   888  
   889  		if batch.ValueSize() >= intdb.IdealBatchSize {
   890  			if err := batch.Write(); err != nil {
   891  				return 0, err
   892  			}
   893  			bytes += batch.ValueSize()
   894  			batch.Reset()
   895  		}
   896  	}
   897  	if batch.ValueSize() > 0 {
   898  		bytes += batch.ValueSize()
   899  		if err := batch.Write(); err != nil {
   900  			return 0, err
   901  		}
   902  	}
   903  
   904  	// Update the head fast sync block if better
   905  	bc.chainmu.Lock()
   906  	head := blockChain[len(blockChain)-1]
   907  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   908  		currentFastBlock := bc.CurrentFastBlock()
   909  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   910  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   911  			bc.currentFastBlock.Store(head)
   912  		}
   913  	}
   914  	bc.chainmu.Unlock()
   915  
   916  	bc.logger.Info("Imported new block receipts",
   917  		"count", stats.processed,
   918  		"elapsed", common.PrettyDuration(time.Since(start)),
   919  		"number", head.Number(),
   920  		"hash", head.Hash(),
   921  		"size", common.StorageSize(bytes),
   922  		"ignored", stats.ignored)
   923  	return 0, nil
   924  }
   925  
   926  var lastWrite uint64
   927  
   928  // WriteBlockWithoutState writes only the block and its metadata to the database,
   929  // but does not write any state. This is used to construct competing side forks
   930  // up to the point where they exceed the canonical total difficulty.
   931  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   932  	bc.wg.Add(1)
   933  	defer bc.wg.Done()
   934  
   935  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   936  		return err
   937  	}
   938  	rawdb.WriteBlock(bc.db, block)
   939  
   940  	return nil
   941  }
   942  
   943  func (bc *BlockChain) MuLock() {
   944  	bc.chainmu.Lock()
   945  }
   946  
   947  func (bc *BlockChain) MuUnLock() {
   948  	bc.chainmu.Unlock()
   949  }
   950  
   951  // WriteBlockWithState writes the block and all associated state to the database.
   952  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   953  
   954  	return bc.writeBlockWithState(block, receipts, state)
   955  }
   956  
   957  // writeBlockWithState writes the block and all associated state to the database.
   958  // but is expects the chain mutex to be held.
   959  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   960  	bc.wg.Add(1)
   961  	defer bc.wg.Done()
   962  
   963  	//to avoid rewrite the block, just refresh the head
   964  	// Set new head.
   965  	if bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
   966  		bc.insert(block)
   967  		bc.futureBlocks.Remove(block.Hash())
   968  		return CanonStatTy, nil
   969  	}
   970  
   971  	// Calculate the total difficulty of the block
   972  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   973  	if ptd == nil {
   974  		return NonStatTy, consensus.ErrUnknownAncestor
   975  	}
   976  	// Make sure no inconsistent state is leaked during insertion
   977  	currentBlock := bc.CurrentBlock()
   978  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   979  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   980  
   981  	// Irrelevant of the canonical status, write the block itself to the database
   982  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   983  		return NonStatTy, err
   984  	}
   985  	rawdb.WriteBlock(bc.db, block)
   986  
   987  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   988  	if err != nil {
   989  		return NonStatTy, err
   990  	}
   991  	triedb := bc.stateCache.TrieDB()
   992  
   993  	//we flush db within 5 blocks before/after epoch-switch to avoid rollback issues
   994  	tdm := bc.Engine().(consensus.IPBFT)
   995  	FORCEFULSHWINDOW := uint64(5)
   996  	curBlockNumber := block.NumberU64()
   997  	curEpoch := tdm.GetEpoch().GetEpochByBlockNumber(curBlockNumber)
   998  	withinEpochSwitchWindow := curBlockNumber < curEpoch.StartBlock+FORCEFULSHWINDOW || curBlockNumber > curEpoch.EndBlock-FORCEFULSHWINDOW
   999  
  1000  	FLUSHBLOCKSINTERVAL := uint64(5000) //flush per this count to reduce catch-up effort/blocks when rollback occurs
  1001  	meetFlushBlockInterval := curBlockNumber%FLUSHBLOCKSINTERVAL == 0
  1002  
  1003  	// If we're running an archive node, always flush
  1004  	if withinEpochSwitchWindow || bc.cacheConfig.TrieDirtyDisabled || meetFlushBlockInterval {
  1005  		if err := triedb.Commit(root, false); err != nil {
  1006  			return NonStatTy, err
  1007  		}
  1008  	} else {
  1009  		// Full but not archive node, do proper garbage collection
  1010  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1011  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1012  
  1013  		if current := block.NumberU64(); current > triesInMemory {
  1014  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1015  			var (
  1016  				nodes, imgs = triedb.Size()
  1017  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1018  			)
  1019  			if nodes > limit || imgs > 4*1024*1024 {
  1020  				triedb.Cap(limit - intdb.IdealBatchSize)
  1021  			}
  1022  			// Find the next state trie we need to commit
  1023  			chosen := current - triesInMemory
  1024  
  1025  			// If we exceeded out time allowance, flush an entire trie to disk
  1026  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1027  				// If the header is missing (canonical chain behind), we're reorging a low
  1028  				// diff sidechain. Suspend committing until this operation is completed.
  1029  				header := bc.GetHeaderByNumber(chosen)
  1030  				if header == nil {
  1031  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1032  				} else {
  1033  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1034  					// warn the user that the system is becoming unstable.
  1035  					if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1036  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  1037  					}
  1038  					// Flush an entire trie and restart the counters
  1039  					triedb.Commit(header.Root, true)
  1040  					lastWrite = chosen
  1041  					bc.gcproc = 0
  1042  				}
  1043  			}
  1044  			// Garbage collect anything below our required write retention
  1045  			for !bc.triegc.Empty() {
  1046  				root, number := bc.triegc.Pop()
  1047  				if uint64(-number) > chosen {
  1048  					bc.triegc.Push(root, number)
  1049  					break
  1050  				}
  1051  				triedb.Dereference(root.(common.Hash))
  1052  			}
  1053  		}
  1054  	}
  1055  
  1056  	// Write other block data using a batch.
  1057  	batch := bc.db.NewBatch()
  1058  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  1059  
  1060  	var reorg bool
  1061  	if _, ok := bc.engine.(consensus.IPBFT); ok {
  1062  		// IPBFT Engine always Canon State, insert the block to the chain,
  1063  		reorg = true
  1064  	} else {
  1065  		// If the total difficulty is higher than our known, add it to the canonical chain
  1066  		// Second clause in the if statement reduces the vulnerability to selfish mining.
  1067  		// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1068  		reorg := externTd.Cmp(localTd) > 0
  1069  		currentBlock = bc.CurrentBlock()
  1070  		if !reorg && externTd.Cmp(localTd) == 0 {
  1071  			// Split same-difficulty blocks by number, then at random
  1072  			reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
  1073  		}
  1074  	}
  1075  	if reorg {
  1076  		// Reorganise the chain if the parent is not the head block
  1077  		if block.ParentHash() != currentBlock.Hash() {
  1078  			if err := bc.reorg(currentBlock, block); err != nil {
  1079  				return NonStatTy, err
  1080  			}
  1081  		}
  1082  		// Write the positional metadata for transaction/receipt lookups and preimages
  1083  		rawdb.WriteTxLookupEntries(batch, block)
  1084  		rawdb.WritePreimages(batch, state.Preimages())
  1085  
  1086  		status = CanonStatTy
  1087  	} else {
  1088  		status = SideStatTy
  1089  	}
  1090  	if err := batch.Write(); err != nil {
  1091  		return NonStatTy, err
  1092  	}
  1093  
  1094  	// Set new head.
  1095  	if status == CanonStatTy {
  1096  		bc.insert(block)
  1097  	}
  1098  	bc.futureBlocks.Remove(block.Hash())
  1099  	return status, nil
  1100  }
  1101  
  1102  // addFutureBlock checks if the block is within the max allowed window to get
  1103  // accepted for future processing, and returns an error if the block is too far
  1104  // ahead and was not added.
  1105  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1106  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1107  	if block.Time() > max {
  1108  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1109  	}
  1110  	bc.futureBlocks.Add(block.Hash(), block)
  1111  	return nil
  1112  }
  1113  
  1114  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1115  // chain or, otherwise, create a fork. If an error is returned it will return
  1116  // the index number of the failing block as well an error describing what went
  1117  // wrong.
  1118  //
  1119  // After insertion is done, all accumulated events will be fired.
  1120  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1121  	// Sanity check that we have something meaningful to import
  1122  	if len(chain) == 0 {
  1123  		return 0, nil
  1124  	}
  1125  
  1126  	// Remove already known canon-blocks
  1127  	var (
  1128  		block, prev *types.Block
  1129  	)
  1130  	// Do a sanity check that the provided chain is actually ordered and linked
  1131  	for i := 1; i < len(chain); i++ {
  1132  		block = chain[i]
  1133  		prev = chain[i-1]
  1134  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1135  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1136  			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  1137  				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1138  
  1139  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1140  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1141  		}
  1142  	}
  1143  
  1144  	// Pre-checks passed, start the full block imports
  1145  	bc.wg.Add(1)
  1146  	bc.chainmu.Lock()
  1147  	n, events, logs, err := bc.insertChain(chain, true)
  1148  	bc.chainmu.Unlock()
  1149  	bc.wg.Done()
  1150  
  1151  	bc.PostChainEvents(events, logs)
  1152  	return n, err
  1153  }
  1154  
  1155  // insertChain is the internal implementation of InsertChain, which assumes that
  1156  // 1) chains are contiguous, and 2) The chain mutex is held.
  1157  //
  1158  // This method is split out so that import batches that require re-injecting
  1159  // historical blocks can do so without releasing the lock, which could lead to
  1160  // racey behaviour. If a sidechain import is in progress, and the historic state
  1161  // is imported, but then new canon-head is added before the actual sidechain
  1162  // completes, then the historic state could be pruned again
  1163  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1164  	// If the chain is terminating, don't even bother starting up
  1165  	if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1166  		return 0, nil, nil, nil
  1167  	}
  1168  
  1169  	// A queued approach to delivering events. This is generally
  1170  	// faster than direct delivery and requires much less mutex
  1171  	// acquiring.
  1172  	var (
  1173  		stats         = insertStats{startTime: mclock.Now()}
  1174  		events        = make([]interface{}, 0, len(chain))
  1175  		lastCanon     *types.Block
  1176  		coalescedLogs []*types.Log
  1177  	)
  1178  	// Start the parallel header verifier
  1179  	headers := make([]*types.Header, len(chain))
  1180  	seals := make([]bool, len(chain))
  1181  
  1182  	for i, block := range chain {
  1183  		headers[i] = block.Header()
  1184  		seals[i] = verifySeals
  1185  	}
  1186  
  1187  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1188  	defer close(abort)
  1189  
  1190  	// Peek the error for the first block to decide the directing import logic
  1191  	it := newInsertIterator(chain, results, bc.validator)
  1192  
  1193  	block, err := it.next()
  1194  
  1195  	// Left-trim all the known blocks
  1196  	if err == ErrKnownBlock {
  1197  		// First block (and state) is known
  1198  		//   1. We did a roll-back, and should now do a re-import
  1199  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1200  		// 	    from the canonical chain, which has not been verified.
  1201  		// Skip all known blocks that are behind us
  1202  		current := bc.CurrentBlock().NumberU64()
  1203  		for block != nil && err == ErrKnownBlock {
  1204  			if current >= block.NumberU64() {
  1205  				stats.ignored++
  1206  				block, err = it.next()
  1207  			} else {
  1208  				log.Infof("this block has been written, but head not refreshed. hash %x, number %v\n",
  1209  					block.Hash(), block.NumberU64())
  1210  				//make it continue to refresh head
  1211  				err = nil
  1212  				break
  1213  			}
  1214  		}
  1215  		// Falls through to the block import
  1216  	}
  1217  
  1218  	switch {
  1219  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1220  	case err == consensus.ErrPrunedAncestor:
  1221  		return bc.insertSidechain(block, it)
  1222  
  1223  	// First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1224  	case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1225  		for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1226  			if err := bc.addFutureBlock(block); err != nil {
  1227  				return it.index, events, coalescedLogs, err
  1228  			}
  1229  			block, err = it.next()
  1230  		}
  1231  		stats.queued += it.processed()
  1232  		stats.ignored += it.remaining()
  1233  
  1234  		// If there are any still remaining, mark as ignored
  1235  		return it.index, events, coalescedLogs, err
  1236  
  1237  	// Some other error occurred, abort
  1238  	case err != nil:
  1239  		stats.ignored += len(it.chain)
  1240  		bc.reportBlock(block, nil, err)
  1241  		return it.index, events, coalescedLogs, err
  1242  	}
  1243  
  1244  	// No validation errors for the first block (or chain prefix skipped)
  1245  	for ; block != nil && err == nil; block, err = it.next() {
  1246  		// If the chain is terminating, stop processing blocks
  1247  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1248  			bc.logger.Debug("Premature abort during blocks processing")
  1249  			break
  1250  		}
  1251  		// If the header is a banned one, straight out abort
  1252  		if BadHashes[block.Hash()] {
  1253  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1254  			return it.index, events, coalescedLogs, ErrBlacklistedHash
  1255  		}
  1256  
  1257  		// Retrieve the parent block and it's state to execute on top
  1258  		start := time.Now()
  1259  
  1260  		parent := it.previous()
  1261  		if parent == nil {
  1262  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1263  		}
  1264  		statedb, err := state.New(parent.Root, bc.stateCache)
  1265  		if err != nil {
  1266  			return it.index, events, coalescedLogs, err
  1267  		}
  1268  		// Process block using the parent state as reference point.
  1269  		receipts, logs, usedGas, ops, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1270  		if err != nil {
  1271  			bc.reportBlock(block, receipts, err)
  1272  			return it.index, events, coalescedLogs, err
  1273  		}
  1274  
  1275  		// Validate the state using the default validator
  1276  		err = bc.Validator().ValidateState(block, statedb, receipts, usedGas)
  1277  		if err != nil {
  1278  			bc.reportBlock(block, receipts, err)
  1279  			return it.index, events, coalescedLogs, err
  1280  		}
  1281  		proctime := time.Since(start)
  1282  
  1283  		//err = bc.UpdateForbiddenState(block.Header(), statedb)
  1284  		//if err != nil {
  1285  		//	bc.logger.Error("Block chain failed to update forbidden state", "err", err)
  1286  		//}
  1287  
  1288  		// Write the block to the chain and get the status.
  1289  		status, err := bc.writeBlockWithState(block, receipts, statedb)
  1290  		if err != nil {
  1291  			return it.index, events, coalescedLogs, err
  1292  		}
  1293  		// execute the pending ops.
  1294  		for _, op := range ops.Ops() {
  1295  			if err := ApplyOp(op, bc, bc.cch); err != nil {
  1296  				bc.logger.Error("Failed executing op", op, "err", err)
  1297  			}
  1298  		}
  1299  
  1300  		blockInsertTimer.UpdateSince(start)
  1301  
  1302  		switch status {
  1303  		case CanonStatTy:
  1304  			bc.logger.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1305  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1306  				"elapsed", common.PrettyDuration(time.Since(start)),
  1307  				"root", block.Root())
  1308  
  1309  			coalescedLogs = append(coalescedLogs, logs...)
  1310  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1311  			lastCanon = block
  1312  
  1313  			// Only count canonical blocks for GC processing time
  1314  			bc.gcproc += proctime
  1315  
  1316  		case SideStatTy:
  1317  			bc.logger.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1318  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1319  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1320  				"root", block.Root())
  1321  			events = append(events, ChainSideEvent{block})
  1322  		}
  1323  		stats.processed++
  1324  		stats.usedGas += usedGas
  1325  
  1326  		dirty, _ := bc.stateCache.TrieDB().Size()
  1327  		stats.report(chain, it.index, dirty)
  1328  	}
  1329  	// Any blocks remaining here? The only ones we care about are the future ones
  1330  	if block != nil && err == consensus.ErrFutureBlock {
  1331  		if err := bc.addFutureBlock(block); err != nil {
  1332  			return it.index, events, coalescedLogs, err
  1333  		}
  1334  		block, err = it.next()
  1335  
  1336  		for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1337  			if err := bc.addFutureBlock(block); err != nil {
  1338  				return it.index, events, coalescedLogs, err
  1339  			}
  1340  			stats.queued++
  1341  		}
  1342  	}
  1343  	stats.ignored += it.remaining()
  1344  
  1345  	// Append a single chain head event if we've progressed the chain
  1346  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1347  		events = append(events, ChainHeadEvent{lastCanon})
  1348  	}
  1349  
  1350  	return it.index, events, coalescedLogs, err
  1351  }
  1352  
  1353  // insertSidechain is called when an import batch hits upon a pruned ancestor
  1354  // error, which happens when a sidechain with a sufficiently old fork-block is
  1355  // found.
  1356  //
  1357  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1358  // switch over to the new chain if the TD exceeded the current chain.
  1359  func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1360  	var (
  1361  		externTd *big.Int
  1362  		current  = bc.CurrentBlock()
  1363  	)
  1364  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1365  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1366  	// ones. Any other errors means that the block is invalid, and should not be written
  1367  	// to disk.
  1368  	err := consensus.ErrPrunedAncestor
  1369  	for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1370  		// Check the canonical state root for that number
  1371  		if number := block.NumberU64(); current.NumberU64() >= number {
  1372  			canonical := bc.GetBlockByNumber(number)
  1373  			if canonical != nil && canonical.Hash() == block.Hash() {
  1374  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1375  				continue
  1376  			}
  1377  			if canonical != nil && canonical.Root() == block.Root() {
  1378  				// This is most likely a shadow-state attack. When a fork is imported into the
  1379  				// database, and it eventually reaches a block height which is not pruned, we
  1380  				// just found that the state already exist! This means that the sidechain block
  1381  				// refers to a state which already exists in our canon chain.
  1382  				//
  1383  				// If left unchecked, we would now proceed importing the blocks, without actually
  1384  				// having verified the state of the previous blocks.
  1385  				bc.logger.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1386  
  1387  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1388  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1389  				// mechanism.
  1390  				return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1391  			}
  1392  		}
  1393  		if externTd == nil {
  1394  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1395  		}
  1396  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1397  
  1398  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1399  			start := time.Now()
  1400  			if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
  1401  				return it.index, nil, nil, err
  1402  			}
  1403  			bc.logger.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1404  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1405  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1406  				"root", block.Root())
  1407  		}
  1408  	}
  1409  	// At this point, we've written all sidechain blocks to database. Loop ended
  1410  	// either on some other error or all were processed. If there was some other
  1411  	// error, we can ignore the rest of those blocks.
  1412  	//
  1413  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1414  	// blocks to regenerate the required state
  1415  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1416  	if localTd.Cmp(externTd) > 0 {
  1417  		bc.logger.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1418  		return it.index, nil, nil, err
  1419  	}
  1420  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1421  	var (
  1422  		hashes  []common.Hash
  1423  		numbers []uint64
  1424  	)
  1425  	parent := it.previous()
  1426  	for parent != nil && !bc.HasState(parent.Root) {
  1427  		hashes = append(hashes, parent.Hash())
  1428  		numbers = append(numbers, parent.Number.Uint64())
  1429  
  1430  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1431  	}
  1432  	if parent == nil {
  1433  		return it.index, nil, nil, errors.New("missing parent")
  1434  	}
  1435  	// Import all the pruned blocks to make the state available
  1436  	var (
  1437  		blocks []*types.Block
  1438  		memory common.StorageSize
  1439  	)
  1440  	for i := len(hashes) - 1; i >= 0; i-- {
  1441  		// Append the next block to our batch
  1442  		block := bc.GetBlock(hashes[i], numbers[i])
  1443  
  1444  		blocks = append(blocks, block)
  1445  		memory += block.Size()
  1446  
  1447  		// If memory use grew too large, import and continue. Sadly we need to discard
  1448  		// all raised events and logs from notifications since we're too heavy on the
  1449  		// memory here.
  1450  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1451  			bc.logger.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1452  			if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1453  				return 0, nil, nil, err
  1454  			}
  1455  			blocks, memory = blocks[:0], 0
  1456  
  1457  			// If the chain is terminating, stop processing blocks
  1458  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1459  				bc.logger.Debug("Premature abort during blocks processing")
  1460  				return 0, nil, nil, nil
  1461  			}
  1462  		}
  1463  	}
  1464  	if len(blocks) > 0 {
  1465  		bc.logger.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1466  		return bc.insertChain(blocks, false)
  1467  	}
  1468  	return 0, nil, nil, nil
  1469  }
  1470  
  1471  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1472  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1473  // event about them
  1474  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1475  	var (
  1476  		newChain    types.Blocks
  1477  		oldChain    types.Blocks
  1478  		commonBlock *types.Block
  1479  
  1480  		deletedTxs types.Transactions
  1481  
  1482  		deletedLogs []*types.Log
  1483  
  1484  		// collectLogs collects the logs that were generated during the
  1485  		// processing of the block that corresponds with the given hash.
  1486  		// These logs are later announced as deleted.
  1487  		collectLogs = func(hash common.Hash) {
  1488  			number := bc.hc.GetBlockNumber(hash)
  1489  			if number == nil {
  1490  				return
  1491  			}
  1492  			// Coalesce logs and set 'Removed'.
  1493  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1494  			for _, receipt := range receipts {
  1495  				for _, log := range receipt.Logs {
  1496  					del := *log
  1497  					del.Removed = true
  1498  					deletedLogs = append(deletedLogs, &del)
  1499  				}
  1500  			}
  1501  		}
  1502  	)
  1503  
  1504  	// first reduce whoever is higher bound
  1505  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1506  		// reduce old chain
  1507  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1508  			oldChain = append(oldChain, oldBlock)
  1509  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1510  			collectLogs(oldBlock.Hash())
  1511  		}
  1512  	} else {
  1513  		// reduce new chain and append new chain blocks for inserting later on
  1514  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1515  			newChain = append(newChain, newBlock)
  1516  		}
  1517  	}
  1518  	if oldBlock == nil {
  1519  		return fmt.Errorf("Invalid old chain")
  1520  	}
  1521  	if newBlock == nil {
  1522  		return fmt.Errorf("Invalid new chain")
  1523  	}
  1524  
  1525  	for {
  1526  		if oldBlock.Hash() == newBlock.Hash() {
  1527  			commonBlock = oldBlock
  1528  			break
  1529  		}
  1530  
  1531  		oldChain = append(oldChain, oldBlock)
  1532  		newChain = append(newChain, newBlock)
  1533  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1534  		collectLogs(oldBlock.Hash())
  1535  
  1536  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1537  		if oldBlock == nil {
  1538  			return fmt.Errorf("Invalid old chain")
  1539  		}
  1540  		if newBlock == nil {
  1541  			return fmt.Errorf("Invalid new chain")
  1542  		}
  1543  	}
  1544  	// Ensure the user sees large reorgs
  1545  	if len(oldChain) > 0 && len(newChain) > 0 {
  1546  		logFn := log.Debug
  1547  		if len(oldChain) > 63 {
  1548  			logFn = log.Warn
  1549  		}
  1550  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1551  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1552  	} else {
  1553  		bc.logger.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1554  	}
  1555  	// Insert the new chain, taking care of the proper incremental order
  1556  	var addedTxs types.Transactions
  1557  	for i := len(newChain) - 1; i >= 0; i-- {
  1558  		// insert the block in the canonical way, re-writing history
  1559  		bc.insert(newChain[i])
  1560  
  1561  		// Write lookup entries for hash based transaction/receipt searches
  1562  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1563  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1564  	}
  1565  	// When transactions get deleted from the database, the receipts that were
  1566  	// created in the fork must also be deleted
  1567  	batch := bc.db.NewBatch()
  1568  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  1569  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1570  	}
  1571  	batch.Write()
  1572  
  1573  	if len(deletedLogs) > 0 {
  1574  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1575  	}
  1576  	if len(oldChain) > 0 {
  1577  		go func() {
  1578  			for _, block := range oldChain {
  1579  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1580  			}
  1581  		}()
  1582  	}
  1583  
  1584  	return nil
  1585  }
  1586  
  1587  // PostChainEvents iterates over the events generated by a chain insertion and
  1588  // posts them into the event feed.
  1589  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1590  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1591  	// post event logs for further processing
  1592  	if logs != nil {
  1593  		bc.logsFeed.Send(logs)
  1594  	}
  1595  	for _, event := range events {
  1596  		switch ev := event.(type) {
  1597  		case ChainEvent:
  1598  			bc.chainFeed.Send(ev)
  1599  
  1600  		case ChainHeadEvent:
  1601  			bc.chainHeadFeed.Send(ev)
  1602  
  1603  		case ChainSideEvent:
  1604  			bc.chainSideFeed.Send(ev)
  1605  
  1606  		case CreateChildChainEvent:
  1607  			bc.createChildChainFeed.Send(ev)
  1608  
  1609  		case StartMiningEvent:
  1610  			bc.startMiningFeed.Send(ev)
  1611  
  1612  		case StopMiningEvent:
  1613  			bc.stopMiningFeed.Send(ev)
  1614  		}
  1615  	}
  1616  }
  1617  
  1618  func (bc *BlockChain) update() {
  1619  	futureTimer := time.NewTicker(5 * time.Second)
  1620  	defer futureTimer.Stop()
  1621  	for {
  1622  		select {
  1623  		case <-futureTimer.C:
  1624  			bc.procFutureBlocks()
  1625  		case <-bc.quit:
  1626  			return
  1627  		}
  1628  	}
  1629  }
  1630  
  1631  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  1632  type BadBlockArgs struct {
  1633  	Hash   common.Hash   `json:"hash"`
  1634  	Header *types.Header `json:"header"`
  1635  }
  1636  
  1637  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1638  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  1639  	headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
  1640  	for _, hash := range bc.badBlocks.Keys() {
  1641  		if hdr, exist := bc.badBlocks.Peek(hash); exist {
  1642  			header := hdr.(*types.Header)
  1643  			headers = append(headers, BadBlockArgs{header.Hash(), header})
  1644  		}
  1645  	}
  1646  	return headers, nil
  1647  }
  1648  
  1649  // HasBadBlock returns whether the block with the hash is a bad block
  1650  func (bc *BlockChain) HasBadBlock(hash common.Hash) bool {
  1651  	return bc.badBlocks.Contains(hash)
  1652  }
  1653  
  1654  // addBadBlock adds a bad block to the bad-block LRU cache
  1655  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1656  	bc.badBlocks.Add(block.Header().Hash(), block.Header())
  1657  }
  1658  
  1659  // reportBlock logs a bad block error.
  1660  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1661  	bc.addBadBlock(block)
  1662  
  1663  	var receiptString string
  1664  	for _, receipt := range receipts {
  1665  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1666  	}
  1667  	bc.logger.Error(fmt.Sprintf(`
  1668  ########## BAD BLOCK #########
  1669  Chain config: %v
  1670  
  1671  Number: %v
  1672  Hash: 0x%x
  1673  %v
  1674  
  1675  Error: %v
  1676  ##############################
  1677  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1678  }
  1679  
  1680  // InsertHeaderChain attempts to insert the given header chain in to the local
  1681  // chain, possibly creating a reorg. If an error is returned, it will return the
  1682  // index number of the failing header as well an error describing what went wrong.
  1683  //
  1684  // The verify parameter can be used to fine tune whether nonce verification
  1685  // should be done or not. The reason behind the optional check is because some
  1686  // of the header retrieval mechanisms already need to verify nonces, as well as
  1687  // because nonces can be verified sparsely, not needing to check each.
  1688  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1689  	start := time.Now()
  1690  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1691  		return i, err
  1692  	}
  1693  
  1694  	// Make sure only one thread manipulates the chain at once
  1695  	bc.chainmu.Lock()
  1696  	defer bc.chainmu.Unlock()
  1697  
  1698  	bc.wg.Add(1)
  1699  	defer bc.wg.Done()
  1700  
  1701  	whFunc := func(header *types.Header) error {
  1702  		_, err := bc.hc.WriteHeader(header)
  1703  		return err
  1704  	}
  1705  
  1706  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1707  }
  1708  
  1709  // CurrentHeader retrieves the current head header of the canonical chain. The
  1710  // header is retrieved from the HeaderChain's internal cache.
  1711  func (bc *BlockChain) CurrentHeader() *types.Header {
  1712  	return bc.hc.CurrentHeader()
  1713  }
  1714  
  1715  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1716  // database by hash and number, caching it if found.
  1717  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1718  	return bc.hc.GetTd(hash, number)
  1719  }
  1720  
  1721  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1722  // database by hash, caching it if found.
  1723  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1724  	return bc.hc.GetTdByHash(hash)
  1725  }
  1726  
  1727  // GetHeader retrieves a block header from the database by hash and number,
  1728  // caching it if found.
  1729  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1730  	return bc.hc.GetHeader(hash, number)
  1731  }
  1732  
  1733  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1734  // found.
  1735  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1736  	return bc.hc.GetHeaderByHash(hash)
  1737  }
  1738  
  1739  // HasHeader checks if a block header is present in the database or not, caching
  1740  // it if present.
  1741  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1742  	return bc.hc.HasHeader(hash, number)
  1743  }
  1744  
  1745  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1746  // hash, fetching towards the genesis block.
  1747  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1748  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1749  }
  1750  
  1751  // GetHeaderByNumber retrieves a block header from the database by number,
  1752  // caching it (associated with its hash) if found.
  1753  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1754  	return bc.hc.GetHeaderByNumber(number)
  1755  }
  1756  
  1757  // Config retrieves the blockchain's chain configuration.
  1758  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1759  
  1760  // Engine retrieves the blockchain's consensus engine.
  1761  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1762  
  1763  //GetCrossChainHelper retrieves the blockchain's cross chain helper.
  1764  func (bc *BlockChain) GetCrossChainHelper() CrossChainHelper { return bc.cch }
  1765  
  1766  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1767  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1768  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1769  }
  1770  
  1771  // SubscribeChainEvent registers a subscription of ChainEvent.
  1772  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1773  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1774  }
  1775  
  1776  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1777  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1778  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1779  }
  1780  
  1781  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1782  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1783  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1784  }
  1785  
  1786  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1787  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1788  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1789  }
  1790  
  1791  // SubscribeCreateChildChainEvent registers a subscription of CreateChildChainEvent.
  1792  func (bc *BlockChain) SubscribeCreateChildChainEvent(ch chan<- CreateChildChainEvent) event.Subscription {
  1793  	return bc.scope.Track(bc.createChildChainFeed.Subscribe(ch))
  1794  }
  1795  
  1796  // SubscribeStartMiningEvent registers a subscription of StartMiningEvent.
  1797  func (bc *BlockChain) SubscribeStartMiningEvent(ch chan<- StartMiningEvent) event.Subscription {
  1798  	return bc.scope.Track(bc.startMiningFeed.Subscribe(ch))
  1799  }
  1800  
  1801  // SubscribeStopMiningEvent registers a subscription of StopMiningEvent.
  1802  func (bc *BlockChain) SubscribeStopMiningEvent(ch chan<- StopMiningEvent) event.Subscription {
  1803  	return bc.scope.Track(bc.stopMiningFeed.Subscribe(ch))
  1804  }