github.com/Gessiux/neatchain@v1.3.1/chain/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/Gessiux/neatchain/chain/core/rawdb"
    31  
    32  	"github.com/Gessiux/neatchain/chain/consensus"
    33  	"github.com/Gessiux/neatchain/chain/core/state"
    34  	"github.com/Gessiux/neatchain/chain/core/types"
    35  	"github.com/Gessiux/neatchain/chain/core/vm"
    36  	"github.com/Gessiux/neatchain/chain/log"
    37  	"github.com/Gessiux/neatchain/chain/trie"
    38  	"github.com/Gessiux/neatchain/neatdb"
    39  	"github.com/Gessiux/neatchain/params"
    40  	"github.com/Gessiux/neatchain/utilities/common"
    41  	"github.com/Gessiux/neatchain/utilities/common/mclock"
    42  	"github.com/Gessiux/neatchain/utilities/common/prque"
    43  	"github.com/Gessiux/neatchain/utilities/crypto"
    44  	"github.com/Gessiux/neatchain/utilities/event"
    45  	"github.com/Gessiux/neatchain/utilities/metrics"
    46  	"github.com/Gessiux/neatchain/utilities/rlp"
    47  	lru "github.com/hashicorp/golang-lru"
    48  )
    49  
    50  var (
    51  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    52  
    53  	ErrNoGenesis = errors.New("Genesis not found in chain")
    54  )
    55  
    56  const (
    57  	bodyCacheLimit      = 256
    58  	blockCacheLimit     = 256
    59  	receiptsCacheLimit  = 32
    60  	maxFutureBlocks     = 256
    61  	maxTimeFutureBlocks = 30
    62  	badBlockLimit       = 10
    63  	triesInMemory       = 128
    64  
    65  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    66  	BlockChainVersion = 3
    67  
    68  	TimeForForbidden  = 4 * time.Hour
    69  	ForbiddenDuration = 24 * time.Hour
    70  )
    71  
    72  // CacheConfig contains the configuration values for the trie caching/pruning
    73  // that's resident in a blockchain.
    74  type CacheConfig struct {
    75  	TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
    76  
    77  	TrieDirtyLimit    int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
    78  	TrieDirtyDisabled bool          // Whether to disable trie write caching and GC altogether (archive node)
    79  	TrieTimeLimit     time.Duration // Time limit after which to flush the current in-memory trie to disk
    80  }
    81  
    82  // BlockChain represents the canonical chain given a database with a genesis
    83  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    84  //
    85  // Importing blocks in to the block chain happens according to the set of rules
    86  // defined by the two stage Validator. Processing of blocks is done using the
    87  // Processor which processes the included transaction. The validation of the state
    88  // is done in the second part of the Validator. Failing results in aborting of
    89  // the import.
    90  //
    91  // The BlockChain also helps in returning blocks from **any** chain included
    92  // in the database as well as blocks that represents the canonical chain. It's
    93  // important to note that GetBlock can return any block and does not need to be
    94  // included in the canonical one where as GetBlockByNumber always represents the
    95  // canonical chain.
    96  type BlockChain struct {
    97  	chainConfig *params.ChainConfig // Chain & network configuration
    98  	cacheConfig *CacheConfig        // Cache configuration for pruning
    99  
   100  	db     neatdb.Database // Low level persistent database to store final content in
   101  	triegc *prque.Prque    // Priority queue mapping block numbers to tries to gc
   102  	gcproc time.Duration   // Accumulates canonical block processing for trie dumping
   103  
   104  	hc                  *HeaderChain
   105  	rmLogsFeed          event.Feed
   106  	chainFeed           event.Feed
   107  	chainSideFeed       event.Feed
   108  	chainHeadFeed       event.Feed
   109  	logsFeed            event.Feed
   110  	createSideChainFeed event.Feed
   111  	startMiningFeed     event.Feed
   112  	stopMiningFeed      event.Feed
   113  
   114  	scope        event.SubscriptionScope
   115  	genesisBlock *types.Block
   116  
   117  	chainmu sync.RWMutex // blockchain insertion lock
   118  
   119  	currentBlock     atomic.Value // Current head of the block chain
   120  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   121  
   122  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   123  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   124  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   125  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   126  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   127  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   128  
   129  	quit    chan struct{} // blockchain quit channel
   130  	running int32         // running must be called atomically
   131  	// procInterrupt must be atomically called
   132  	procInterrupt int32          // interrupt signaler for block processing
   133  	wg            sync.WaitGroup // chain processing wait group for shutting down
   134  
   135  	engine    consensus.Engine
   136  	validator Validator // Block and state validator interface
   137  	processor Processor // Block transaction processor interface
   138  	vmConfig  vm.Config
   139  
   140  	badBlocks *lru.Cache // Bad block cache
   141  
   142  	cch    CrossChainHelper
   143  	logger log.Logger
   144  }
   145  
   146  // NewBlockChain returns a fully initialised block chain using information
   147  // available in the database. It initialises the default Ethereum Validator and
   148  // Processor.
   149  func NewBlockChain(db neatdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, cch CrossChainHelper) (*BlockChain, error) {
   150  	if cacheConfig == nil {
   151  		cacheConfig = &CacheConfig{
   152  			TrieCleanLimit: 256,
   153  			TrieDirtyLimit: 256,
   154  			TrieTimeLimit:  5 * time.Minute,
   155  		}
   156  	}
   157  	bodyCache, _ := lru.New(bodyCacheLimit)
   158  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   159  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   160  	blockCache, _ := lru.New(blockCacheLimit)
   161  	futureBlocks, _ := lru.New(maxFutureBlocks)
   162  	badBlocks, _ := lru.New(badBlockLimit)
   163  
   164  	bc := &BlockChain{
   165  		chainConfig:   chainConfig,
   166  		cacheConfig:   cacheConfig,
   167  		db:            db,
   168  		triegc:        prque.New(nil),
   169  		stateCache:    state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
   170  		quit:          make(chan struct{}),
   171  		bodyCache:     bodyCache,
   172  		bodyRLPCache:  bodyRLPCache,
   173  		receiptsCache: receiptsCache,
   174  		blockCache:    blockCache,
   175  		futureBlocks:  futureBlocks,
   176  		engine:        engine,
   177  		vmConfig:      vmConfig,
   178  		badBlocks:     badBlocks,
   179  		cch:           cch,
   180  		logger:        chainConfig.ChainLogger,
   181  	}
   182  	bc.validator = NewBlockValidator(chainConfig, bc, engine)
   183  	bc.processor = NewStateProcessor(chainConfig, bc, engine, cch)
   184  
   185  	var err error
   186  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   187  	if err != nil {
   188  		return nil, err
   189  	}
   190  	bc.genesisBlock = bc.GetBlockByNumber(0)
   191  	if bc.genesisBlock == nil {
   192  		return nil, ErrNoGenesis
   193  	}
   194  	if err := bc.loadLastState(); err != nil {
   195  		return nil, err
   196  	}
   197  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   198  	for hash := range BadHashes {
   199  		if header := bc.GetHeaderByHash(hash); header != nil {
   200  			// get the canonical block corresponding to the offending header's number
   201  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   202  			// make sure the headerByNumber (if present) is in our current canonical chain
   203  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   204  				bc.logger.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   205  				bc.SetHead(header.Number.Uint64() - 1)
   206  				bc.logger.Error("Chain rewind was successful, resuming normal operation")
   207  			}
   208  		}
   209  	}
   210  	// Take ownership of this particular state
   211  	go bc.update()
   212  	return bc, nil
   213  }
   214  
   215  func (bc *BlockChain) getProcInterrupt() bool {
   216  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   217  }
   218  
   219  // loadLastState loads the last known chain state from the database. This method
   220  // assumes that the chain manager mutex is held.
   221  func (bc *BlockChain) loadLastState() error {
   222  	// Restore the last known head block
   223  	head := rawdb.ReadHeadBlockHash(bc.db)
   224  	if head == (common.Hash{}) {
   225  		// Corrupt or empty database, init from scratch
   226  		bc.logger.Warn("Empty database, resetting chain")
   227  		return bc.Reset()
   228  	}
   229  	// Make sure the entire head block is available
   230  	currentBlock := bc.GetBlockByHash(head)
   231  	if currentBlock == nil {
   232  		// Corrupt or empty database, init from scratch
   233  		bc.logger.Warn("Head block missing, resetting chain", "hash", head)
   234  		return bc.Reset()
   235  	}
   236  	// Make sure the state associated with the block is available
   237  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   238  		// Dangling block without a state associated, init from scratch
   239  		bc.logger.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "err", err)
   240  		if err := bc.repair(&currentBlock); err != nil {
   241  			return err
   242  		}
   243  	}
   244  	// Everything seems to be fine, set as the head block
   245  	bc.currentBlock.Store(currentBlock)
   246  
   247  	// Restore the last known head header
   248  	currentHeader := currentBlock.Header()
   249  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   250  		if header := bc.GetHeaderByHash(head); header != nil {
   251  			currentHeader = header
   252  		}
   253  	}
   254  	bc.hc.SetCurrentHeader(currentHeader)
   255  
   256  	// Restore the last known head fast block
   257  	bc.currentFastBlock.Store(currentBlock)
   258  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   259  		if block := bc.GetBlockByHash(head); block != nil {
   260  			bc.currentFastBlock.Store(block)
   261  		}
   262  	}
   263  
   264  	// Issue a status log for the user
   265  	//currentFastBlock := bc.CurrentFastBlock()
   266  
   267  	//headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   268  	//blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   269  	//fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   270  
   271  	bc.logger.Info("Local chain block height is:", "Block", currentHeader.Number, "Hash", currentHeader.Hash())
   272  	//bc.logger.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   273  	//bc.logger.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   274  
   275  	return nil
   276  }
   277  
   278  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   279  // above the new head will be deleted and the new one set. In the case of blocks
   280  // though, the head may be further rewound if block bodies are missing (non-archive
   281  // nodes after a fast sync).
   282  func (bc *BlockChain) SetHead(head uint64) error {
   283  	bc.logger.Warn("Rewinding blockchain", "target", head)
   284  
   285  	bc.chainmu.Lock()
   286  	defer bc.chainmu.Unlock()
   287  
   288  	// Rewind the header chain, deleting all block bodies until then
   289  	delFn := func(db neatdb.Writer, hash common.Hash, num uint64) {
   290  		rawdb.DeleteBody(db, hash, num)
   291  	}
   292  	bc.hc.SetHead(head, delFn)
   293  	currentHeader := bc.hc.CurrentHeader()
   294  
   295  	// Clear out any stale content from the caches
   296  	bc.bodyCache.Purge()
   297  	bc.bodyRLPCache.Purge()
   298  	bc.receiptsCache.Purge()
   299  	bc.blockCache.Purge()
   300  	bc.futureBlocks.Purge()
   301  
   302  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   303  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   304  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   305  	}
   306  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   307  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   308  			// Rewound state missing, rolled back to before pivot, reset to genesis
   309  			bc.currentBlock.Store(bc.genesisBlock)
   310  		}
   311  	}
   312  	// Rewind the fast block in a simpleton way to the target head
   313  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   314  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   315  	}
   316  	// If either blocks reached nil, reset to the genesis state
   317  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   318  		bc.currentBlock.Store(bc.genesisBlock)
   319  	}
   320  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   321  		bc.currentFastBlock.Store(bc.genesisBlock)
   322  	}
   323  	currentBlock := bc.CurrentBlock()
   324  	currentFastBlock := bc.CurrentFastBlock()
   325  
   326  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   327  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   328  
   329  	return bc.loadLastState()
   330  }
   331  
   332  // FastSyncCommitHead sets the current head block to the one defined by the hash
   333  // irrelevant what the chain contents were prior.
   334  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   335  	// Make sure that both the block as well at its state trie exists
   336  	block := bc.GetBlockByHash(hash)
   337  	if block == nil {
   338  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   339  	}
   340  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
   341  		return err
   342  	}
   343  	// If all checks out, manually set the head block
   344  	bc.chainmu.Lock()
   345  	bc.currentBlock.Store(block)
   346  	bc.chainmu.Unlock()
   347  
   348  	bc.logger.Info("Committed new head block", "number", block.Number(), "hash", hash)
   349  	return nil
   350  }
   351  
   352  // GasLimit returns the gas limit of the current HEAD block.
   353  func (bc *BlockChain) GasLimit() uint64 {
   354  	return bc.CurrentBlock().GasLimit()
   355  }
   356  
   357  // CurrentBlock retrieves the current head block of the canonical chain. The
   358  // block is retrieved from the blockchain's internal cache.
   359  func (bc *BlockChain) CurrentBlock() *types.Block {
   360  	return bc.currentBlock.Load().(*types.Block)
   361  }
   362  
   363  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   364  // chain. The block is retrieved from the blockchain's internal cache.
   365  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   366  	return bc.currentFastBlock.Load().(*types.Block)
   367  }
   368  
   369  // Validator returns the current validator.
   370  func (bc *BlockChain) Validator() Validator {
   371  	return bc.validator
   372  }
   373  
   374  // Processor returns the current processor.
   375  func (bc *BlockChain) Processor() Processor {
   376  	return bc.processor
   377  }
   378  
   379  // State returns a new mutable state based on the current HEAD block.
   380  func (bc *BlockChain) State() (*state.StateDB, error) {
   381  	return bc.StateAt(bc.CurrentBlock().Root())
   382  }
   383  
   384  // StateAt returns a new mutable state based on a particular point in time.
   385  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   386  	return state.New(root, bc.stateCache)
   387  }
   388  
   389  // StateCache returns the caching database underpinning the blockchain instance.
   390  func (bc *BlockChain) StateCache() state.Database {
   391  	return bc.stateCache
   392  }
   393  
   394  // Reset purges the entire blockchain, restoring it to its genesis state.
   395  func (bc *BlockChain) Reset() error {
   396  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   397  }
   398  
   399  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   400  // specified genesis state.
   401  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   402  	// Dump the entire block chain and purge the caches
   403  	if err := bc.SetHead(0); err != nil {
   404  		return err
   405  	}
   406  	bc.chainmu.Lock()
   407  	defer bc.chainmu.Unlock()
   408  
   409  	// Prepare the genesis block and reinitialise the chain
   410  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   411  		bc.logger.Crit("Failed to write genesis block TD", "err", err)
   412  	}
   413  	rawdb.WriteBlock(bc.db, genesis)
   414  
   415  	bc.genesisBlock = genesis
   416  	bc.insert(bc.genesisBlock)
   417  	bc.currentBlock.Store(bc.genesisBlock)
   418  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   419  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   420  	bc.currentFastBlock.Store(bc.genesisBlock)
   421  
   422  	return nil
   423  }
   424  
   425  // repair tries to repair the current blockchain by rolling back the current block
   426  // until one with associated state is found. This is needed to fix incomplete db
   427  // writes caused either by crashes/power outages, or simply non-committed tries.
   428  //
   429  // This method only rolls back the current block. The current header and current
   430  // fast block are left intact.
   431  func (bc *BlockChain) repair(head **types.Block) error {
   432  	for {
   433  		// Abort if we've rewound to a head block that does have associated state
   434  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   435  			bc.logger.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   436  			return nil
   437  		}
   438  		// Otherwise rewind one block and recheck state availability there
   439  		block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   440  		if block == nil {
   441  			return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
   442  		}
   443  		*head = block
   444  	}
   445  }
   446  
   447  // Export writes the active chain to the given writer.
   448  func (bc *BlockChain) Export(w io.Writer) error {
   449  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   450  }
   451  
   452  // ExportN writes a subset of the active chain to the given writer.
   453  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   454  	bc.chainmu.RLock()
   455  	defer bc.chainmu.RUnlock()
   456  
   457  	if first > last {
   458  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   459  	}
   460  	bc.logger.Info("Exporting batch of blocks", "count", last-first+1)
   461  
   462  	for nr := first; nr <= last; nr++ {
   463  		block := bc.GetBlockByNumber(nr)
   464  		if block == nil {
   465  			return fmt.Errorf("export failed on #%d: not found", nr)
   466  		}
   467  
   468  		if err := block.EncodeRLP(w); err != nil {
   469  			return err
   470  		}
   471  	}
   472  
   473  	return nil
   474  }
   475  
   476  // insert injects a new head block into the current block chain. This method
   477  // assumes that the block is indeed a true head. It will also reset the head
   478  // header and the head fast sync block to this very same block if they are older
   479  // or if they are on a different side chain.
   480  //
   481  // Note, this function assumes that the `mu` mutex is held!
   482  func (bc *BlockChain) insert(block *types.Block) {
   483  	// If the block is on a side chain or an unknown one, force other heads onto it too
   484  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   485  
   486  	// Add the block to the canonical chain number scheme and mark as the head
   487  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   488  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   489  
   490  	bc.currentBlock.Store(block)
   491  
   492  	// If the block is better than our head or is on a different chain, force update heads
   493  	if updateHeads {
   494  		bc.hc.SetCurrentHeader(block.Header())
   495  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   496  
   497  		bc.currentFastBlock.Store(block)
   498  	}
   499  
   500  	//bc.logger.Info(fmt.Sprintf("Inserted block number %v, hash: %x", block.NumberU64(), block.Hash()))
   501  	ibCbMap := GetInsertBlockCbMap()
   502  	for _, cb := range ibCbMap {
   503  		cb(bc, block)
   504  	}
   505  }
   506  
   507  // Genesis retrieves the chain's genesis block.
   508  func (bc *BlockChain) Genesis() *types.Block {
   509  	return bc.genesisBlock
   510  }
   511  
   512  // GetBody retrieves a block body (transactions and uncles) from the database by
   513  // hash, caching it if found.
   514  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   515  	// Short circuit if the body's already in the cache, retrieve otherwise
   516  	if cached, ok := bc.bodyCache.Get(hash); ok {
   517  		body := cached.(*types.Body)
   518  		return body
   519  	}
   520  	number := bc.hc.GetBlockNumber(hash)
   521  	if number == nil {
   522  		return nil
   523  	}
   524  	body := rawdb.ReadBody(bc.db, hash, *number)
   525  	if body == nil {
   526  		return nil
   527  	}
   528  	// Cache the found body for next time and return
   529  	bc.bodyCache.Add(hash, body)
   530  	return body
   531  }
   532  
   533  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   534  // caching it if found.
   535  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   536  	// Short circuit if the body's already in the cache, retrieve otherwise
   537  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   538  		return cached.(rlp.RawValue)
   539  	}
   540  	number := bc.hc.GetBlockNumber(hash)
   541  	if number == nil {
   542  		return nil
   543  	}
   544  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   545  	if len(body) == 0 {
   546  		return nil
   547  	}
   548  	// Cache the found body for next time and return
   549  	bc.bodyRLPCache.Add(hash, body)
   550  	return body
   551  }
   552  
   553  // HasBlock checks if a block is fully present in the database or not.
   554  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   555  	if bc.blockCache.Contains(hash) {
   556  		return true
   557  	}
   558  	return rawdb.HasBody(bc.db, hash, number)
   559  }
   560  
   561  // HasState checks if state trie is fully present in the database or not.
   562  func (bc *BlockChain) HasState(hash common.Hash) bool {
   563  	_, err := bc.stateCache.OpenTrie(hash)
   564  	return err == nil
   565  }
   566  
   567  // HasBlockAndState checks if a block and associated state trie is fully present
   568  // in the database or not, caching it if present.
   569  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   570  	// Check first that the block itself is known
   571  	block := bc.GetBlock(hash, number)
   572  	if block == nil {
   573  		return false
   574  	}
   575  	return bc.HasState(block.Root())
   576  }
   577  
   578  // GetBlock retrieves a block from the database by hash and number,
   579  // caching it if found.
   580  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   581  	// Short circuit if the block's already in the cache, retrieve otherwise
   582  	if block, ok := bc.blockCache.Get(hash); ok {
   583  		return block.(*types.Block)
   584  	}
   585  	block := rawdb.ReadBlock(bc.db, hash, number)
   586  	if block == nil {
   587  		return nil
   588  	}
   589  	// Cache the found block for next time and return
   590  	bc.blockCache.Add(block.Hash(), block)
   591  	return block
   592  }
   593  
   594  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   595  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   596  	number := bc.hc.GetBlockNumber(hash)
   597  	if number == nil {
   598  		return nil
   599  	}
   600  	return bc.GetBlock(hash, *number)
   601  }
   602  
   603  // GetBlockByNumber retrieves a block from the database by number, caching it
   604  // (associated with its hash) if found.
   605  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   606  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   607  	if hash == (common.Hash{}) {
   608  		return nil
   609  	}
   610  	return bc.GetBlock(hash, number)
   611  }
   612  
   613  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   614  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   615  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   616  		return receipts.(types.Receipts)
   617  	}
   618  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   619  	if number == nil {
   620  		return nil
   621  	}
   622  	receipts := rawdb.ReadReceipts(bc.db, hash, *number)
   623  	if receipts == nil {
   624  		return nil
   625  	}
   626  	bc.receiptsCache.Add(hash, receipts)
   627  	return receipts
   628  }
   629  
   630  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   631  // [deprecated by neatptc/62]
   632  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   633  	number := bc.hc.GetBlockNumber(hash)
   634  	if number == nil {
   635  		return nil
   636  	}
   637  	for i := 0; i < n; i++ {
   638  		block := bc.GetBlock(hash, *number)
   639  		if block == nil {
   640  			break
   641  		}
   642  		blocks = append(blocks, block)
   643  		hash = block.ParentHash()
   644  		*number--
   645  	}
   646  	return
   647  }
   648  
   649  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   650  // a specific distance is reached.
   651  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   652  	uncles := []*types.Header{}
   653  	for i := 0; block != nil && i < length; i++ {
   654  		uncles = append(uncles, block.Uncles()...)
   655  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   656  	}
   657  	return uncles
   658  }
   659  
   660  // ChainValidator execute and validate the block with the current latest block.
   661  func (bc *BlockChain) ValidateBlock(block *types.Block) (*state.StateDB, types.Receipts, *types.PendingOps, error) {
   662  	// If the header is a banned one, straight out abort
   663  	if BadHashes[block.Hash()] {
   664  		return nil, nil, nil, ErrBlacklistedHash
   665  	}
   666  
   667  	// Header verify
   668  	if err := bc.engine.(consensus.NeatCon).VerifyHeaderBeforeConsensus(bc, block.Header(), true); err != nil {
   669  		return nil, nil, nil, err
   670  	}
   671  
   672  	// Body verify
   673  	if err := bc.Validator().ValidateBody(block); err != nil {
   674  		log.Debugf("ValidateBlock-ValidateBody return with error: %v", err)
   675  		return nil, nil, nil, err
   676  	}
   677  
   678  	var parent *types.Block
   679  	parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   680  	state, err := state.New(parent.Root(), bc.stateCache)
   681  	if err != nil {
   682  		log.Debugf("ValidateBlock-state.New return with error: %v", err)
   683  		return nil, nil, nil, err
   684  	}
   685  
   686  	// Process block using the parent state as reference point.
   687  	receipts, _, usedGas, ops, err := bc.processor.Process(block, state, bc.vmConfig)
   688  	if err != nil {
   689  		log.Debugf("ValidateBlock-Process return with error: %v", err)
   690  		return nil, nil, nil, err
   691  	}
   692  
   693  	// Validate the state using the default validator
   694  	err = bc.Validator().ValidateState(block, state, receipts, usedGas)
   695  	if err != nil {
   696  		log.Debugf("ValidateBlock-ValidateState return with error: %v", err)
   697  		return nil, nil, nil, err
   698  	}
   699  
   700  	return state, receipts, ops, nil
   701  }
   702  
   703  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   704  // either from ephemeral in-memory cache, or from persistent storage.
   705  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   706  	return bc.stateCache.TrieDB().Node(hash)
   707  }
   708  
   709  // Stop stops the blockchain service. If any imports are currently in progress
   710  // it will abort them using the procInterrupt.
   711  func (bc *BlockChain) Stop() {
   712  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   713  		return
   714  	}
   715  	// Unsubscribe all subscriptions registered from blockchain
   716  	bc.scope.Close()
   717  	close(bc.quit)
   718  	atomic.StoreInt32(&bc.procInterrupt, 1)
   719  
   720  	bc.wg.Wait()
   721  
   722  	// Ensure the state of a recent block is also stored to disk before exiting.
   723  	// We're writing three different states to catch different restart scenarios:
   724  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   725  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   726  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   727  	if !bc.cacheConfig.TrieDirtyDisabled {
   728  		triedb := bc.stateCache.TrieDB()
   729  
   730  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   731  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   732  				recent := bc.GetBlockByNumber(number - offset)
   733  
   734  				bc.logger.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   735  				if err := triedb.Commit(recent.Root(), true); err != nil {
   736  					bc.logger.Error("Failed to commit recent state trie", "err", err)
   737  				}
   738  			}
   739  		}
   740  		for !bc.triegc.Empty() {
   741  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   742  		}
   743  		if size, _ := triedb.Size(); size != 0 {
   744  			bc.logger.Error("Dangling trie nodes after full cleanup")
   745  		}
   746  	}
   747  	bc.logger.Info("Blockchain manager stopped")
   748  }
   749  
   750  func (bc *BlockChain) procFutureBlocks() {
   751  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   752  	for _, hash := range bc.futureBlocks.Keys() {
   753  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   754  			blocks = append(blocks, block.(*types.Block))
   755  		}
   756  	}
   757  	if len(blocks) > 0 {
   758  		types.BlockBy(types.Number).Sort(blocks)
   759  
   760  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   761  		for i := range blocks {
   762  			bc.InsertChain(blocks[i : i+1])
   763  		}
   764  	}
   765  }
   766  
   767  // WriteStatus status of write
   768  type WriteStatus byte
   769  
   770  const (
   771  	NonStatTy WriteStatus = iota
   772  	CanonStatTy
   773  	SideStatTy
   774  )
   775  
   776  // Rollback is designed to remove a chain of links from the database that aren't
   777  // certain enough to be valid.
   778  func (bc *BlockChain) Rollback(chain []common.Hash) {
   779  	bc.chainmu.Lock()
   780  	defer bc.chainmu.Unlock()
   781  
   782  	for i := len(chain) - 1; i >= 0; i-- {
   783  		hash := chain[i]
   784  
   785  		currentHeader := bc.hc.CurrentHeader()
   786  		if currentHeader.Hash() == hash {
   787  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   788  		}
   789  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   790  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   791  			bc.currentFastBlock.Store(newFastBlock)
   792  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   793  		}
   794  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   795  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   796  			bc.currentBlock.Store(newBlock)
   797  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   798  		}
   799  	}
   800  }
   801  
   802  // SetReceiptsData computes all the non-consensus fields of the receipts
   803  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   804  	signer := types.MakeSigner(config, block.Number())
   805  
   806  	transactions, logIndex := block.Transactions(), uint(0)
   807  	if len(transactions) != len(receipts) {
   808  		return errors.New("transaction and receipt count mismatch")
   809  	}
   810  
   811  	for j := 0; j < len(receipts); j++ {
   812  		// The transaction hash can be retrieved from the transaction itself
   813  		receipts[j].TxHash = transactions[j].Hash()
   814  
   815  		// block location fields
   816  		receipts[j].BlockHash = block.Hash()
   817  		receipts[j].BlockNumber = block.Number()
   818  		receipts[j].TransactionIndex = uint(j)
   819  
   820  		// The contract address can be derived from the transaction itself
   821  		if transactions[j].To() == nil {
   822  			// Deriving the signer is expensive, only do if it's actually needed
   823  			from, _ := types.Sender(signer, transactions[j])
   824  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   825  		}
   826  		// The used gas can be calculated based on previous receipts
   827  		if j == 0 {
   828  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   829  		} else {
   830  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   831  		}
   832  		// The derived log fields can simply be set from the block and transaction
   833  		for k := 0; k < len(receipts[j].Logs); k++ {
   834  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   835  			receipts[j].Logs[k].BlockHash = block.Hash()
   836  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   837  			receipts[j].Logs[k].TxIndex = uint(j)
   838  			receipts[j].Logs[k].Index = logIndex
   839  			logIndex++
   840  		}
   841  	}
   842  	return nil
   843  }
   844  
   845  // InsertReceiptChain attempts to complete an already existing header chain with
   846  // transaction and receipt data.
   847  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   848  	bc.wg.Add(1)
   849  	defer bc.wg.Done()
   850  
   851  	// Do a sanity check that the provided chain is actually ordered and linked
   852  	for i := 1; i < len(blockChain); i++ {
   853  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   854  			bc.logger.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   855  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   856  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   857  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   858  		}
   859  	}
   860  
   861  	var (
   862  		stats = struct{ processed, ignored int32 }{}
   863  		start = time.Now()
   864  		bytes = 0
   865  		batch = bc.db.NewBatch()
   866  	)
   867  	for i, block := range blockChain {
   868  		receipts := receiptChain[i]
   869  		// Short circuit insertion if shutting down or processing failed
   870  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   871  			return 0, nil
   872  		}
   873  		// Short circuit if the owner header is unknown
   874  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   875  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   876  		}
   877  		// Skip if the entire data is already known
   878  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   879  			stats.ignored++
   880  			continue
   881  		}
   882  		// Compute all the non-consensus fields of the receipts
   883  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   884  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   885  		}
   886  		// Write all the data out into the database
   887  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   888  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   889  		rawdb.WriteTxLookupEntries(batch, block)
   890  
   891  		stats.processed++
   892  
   893  		if batch.ValueSize() >= neatdb.IdealBatchSize {
   894  			if err := batch.Write(); err != nil {
   895  				return 0, err
   896  			}
   897  			bytes += batch.ValueSize()
   898  			batch.Reset()
   899  		}
   900  	}
   901  	if batch.ValueSize() > 0 {
   902  		bytes += batch.ValueSize()
   903  		if err := batch.Write(); err != nil {
   904  			return 0, err
   905  		}
   906  	}
   907  
   908  	// Update the head fast sync block if better
   909  	bc.chainmu.Lock()
   910  	head := blockChain[len(blockChain)-1]
   911  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   912  		currentFastBlock := bc.CurrentFastBlock()
   913  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   914  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   915  			bc.currentFastBlock.Store(head)
   916  		}
   917  	}
   918  	bc.chainmu.Unlock()
   919  
   920  	bc.logger.Info("Imported new block receipts",
   921  		"count", stats.processed,
   922  		"elapsed", common.PrettyDuration(time.Since(start)),
   923  		"number", head.Number(),
   924  		"hash", head.Hash(),
   925  		"size", common.StorageSize(bytes),
   926  		"ignored", stats.ignored)
   927  	return 0, nil
   928  }
   929  
   930  var lastWrite uint64
   931  
   932  // WriteBlockWithoutState writes only the block and its metadata to the database,
   933  // but does not write any state. This is used to construct competing side forks
   934  // up to the point where they exceed the canonical total difficulty.
   935  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   936  	bc.wg.Add(1)
   937  	defer bc.wg.Done()
   938  
   939  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   940  		return err
   941  	}
   942  	rawdb.WriteBlock(bc.db, block)
   943  
   944  	return nil
   945  }
   946  
   947  func (bc *BlockChain) MuLock() {
   948  	bc.chainmu.Lock()
   949  }
   950  
   951  func (bc *BlockChain) MuUnLock() {
   952  	bc.chainmu.Unlock()
   953  }
   954  
   955  // WriteBlockWithState writes the block and all associated state to the database.
   956  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   957  
   958  	return bc.writeBlockWithState(block, receipts, state)
   959  }
   960  
   961  // writeBlockWithState writes the block and all associated state to the database.
   962  // but is expects the chain mutex to be held.
   963  func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   964  	bc.wg.Add(1)
   965  	defer bc.wg.Done()
   966  
   967  	//to avoid rewrite the block, just refresh the head
   968  	// Set new head.
   969  	if bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
   970  		bc.insert(block)
   971  		bc.futureBlocks.Remove(block.Hash())
   972  		return CanonStatTy, nil
   973  	}
   974  
   975  	// Calculate the total difficulty of the block
   976  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   977  	if ptd == nil {
   978  		return NonStatTy, consensus.ErrUnknownAncestor
   979  	}
   980  	// Make sure no inconsistent state is leaked during insertion
   981  	currentBlock := bc.CurrentBlock()
   982  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   983  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   984  
   985  	// Irrelevant of the canonical status, write the block itself to the database
   986  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   987  		return NonStatTy, err
   988  	}
   989  	rawdb.WriteBlock(bc.db, block)
   990  
   991  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   992  	if err != nil {
   993  		return NonStatTy, err
   994  	}
   995  	triedb := bc.stateCache.TrieDB()
   996  
   997  	//we flush db within 5 blocks before/after epoch-switch to avoid rollback issues
   998  	nc := bc.Engine().(consensus.NeatCon)
   999  	FORCEFULSHWINDOW := uint64(5)
  1000  	curBlockNumber := block.NumberU64()
  1001  	curEpoch := nc.GetEpoch().GetEpochByBlockNumber(curBlockNumber)
  1002  	withinEpochSwitchWindow := curBlockNumber < curEpoch.StartBlock+FORCEFULSHWINDOW || curBlockNumber > curEpoch.EndBlock-FORCEFULSHWINDOW
  1003  
  1004  	FLUSHBLOCKSINTERVAL := uint64(5000) //flush per this count to reduce catch-up effort/blocks when rollback occurs
  1005  	meetFlushBlockInterval := curBlockNumber%FLUSHBLOCKSINTERVAL == 0
  1006  
  1007  	// If we're running an archive node, always flush
  1008  	if withinEpochSwitchWindow || bc.cacheConfig.TrieDirtyDisabled || meetFlushBlockInterval {
  1009  		if err := triedb.Commit(root, false); err != nil {
  1010  			return NonStatTy, err
  1011  		}
  1012  	} else {
  1013  		// Full but not archive node, do proper garbage collection
  1014  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  1015  		bc.triegc.Push(root, -int64(block.NumberU64()))
  1016  
  1017  		if current := block.NumberU64(); current > triesInMemory {
  1018  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
  1019  			var (
  1020  				nodes, imgs = triedb.Size()
  1021  				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  1022  			)
  1023  			if nodes > limit || imgs > 4*1024*1024 {
  1024  				triedb.Cap(limit - neatdb.IdealBatchSize)
  1025  			}
  1026  			// Find the next state trie we need to commit
  1027  			chosen := current - triesInMemory
  1028  
  1029  			// If we exceeded out time allowance, flush an entire trie to disk
  1030  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  1031  				// If the header is missing (canonical chain behind), we're reorging a low
  1032  				// diff sidechain. Suspend committing until this operation is completed.
  1033  				header := bc.GetHeaderByNumber(chosen)
  1034  				if header == nil {
  1035  					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  1036  				} else {
  1037  					// If we're exceeding limits but haven't reached a large enough memory gap,
  1038  					// warn the user that the system is becoming unstable.
  1039  					if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  1040  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  1041  					}
  1042  					// Flush an entire trie and restart the counters
  1043  					triedb.Commit(header.Root, true)
  1044  					lastWrite = chosen
  1045  					bc.gcproc = 0
  1046  				}
  1047  			}
  1048  			// Garbage collect anything below our required write retention
  1049  			for !bc.triegc.Empty() {
  1050  				root, number := bc.triegc.Pop()
  1051  				if uint64(-number) > chosen {
  1052  					bc.triegc.Push(root, number)
  1053  					break
  1054  				}
  1055  				triedb.Dereference(root.(common.Hash))
  1056  			}
  1057  		}
  1058  	}
  1059  
  1060  	// Write other block data using a batch.
  1061  	batch := bc.db.NewBatch()
  1062  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  1063  
  1064  	var reorg bool
  1065  	if _, ok := bc.engine.(consensus.NeatCon); ok {
  1066  		// NeatCon Engine always Canon State, insert the block to the chain,
  1067  		reorg = true
  1068  	} else {
  1069  		// If the total difficulty is higher than our known, add it to the canonical chain
  1070  		// Second clause in the if statement reduces the vulnerability to selfish mining.
  1071  		// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  1072  		reorg := externTd.Cmp(localTd) > 0
  1073  		currentBlock = bc.CurrentBlock()
  1074  		if !reorg && externTd.Cmp(localTd) == 0 {
  1075  			// Split same-difficulty blocks by number, then at random
  1076  			reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
  1077  		}
  1078  	}
  1079  	if reorg {
  1080  		// Reorganise the chain if the parent is not the head block
  1081  		if block.ParentHash() != currentBlock.Hash() {
  1082  			if err := bc.reorg(currentBlock, block); err != nil {
  1083  				return NonStatTy, err
  1084  			}
  1085  		}
  1086  		// Write the positional metadata for transaction/receipt lookups and preimages
  1087  		rawdb.WriteTxLookupEntries(batch, block)
  1088  		rawdb.WritePreimages(batch, state.Preimages())
  1089  
  1090  		status = CanonStatTy
  1091  	} else {
  1092  		status = SideStatTy
  1093  	}
  1094  	if err := batch.Write(); err != nil {
  1095  		return NonStatTy, err
  1096  	}
  1097  
  1098  	// Set new head.
  1099  	if status == CanonStatTy {
  1100  		bc.insert(block)
  1101  	}
  1102  	bc.futureBlocks.Remove(block.Hash())
  1103  	return status, nil
  1104  }
  1105  
  1106  // addFutureBlock checks if the block is within the max allowed window to get
  1107  // accepted for future processing, and returns an error if the block is too far
  1108  // ahead and was not added.
  1109  func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  1110  	max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  1111  	if block.Time() > max {
  1112  		return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  1113  	}
  1114  	bc.futureBlocks.Add(block.Hash(), block)
  1115  	return nil
  1116  }
  1117  
  1118  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1119  // chain or, otherwise, create a fork. If an error is returned it will return
  1120  // the index number of the failing block as well an error describing what went
  1121  // wrong.
  1122  //
  1123  // After insertion is done, all accumulated events will be fired.
  1124  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1125  	// Sanity check that we have something meaningful to import
  1126  	if len(chain) == 0 {
  1127  		return 0, nil
  1128  	}
  1129  
  1130  	// Remove already known canon-blocks
  1131  	var (
  1132  		block, prev *types.Block
  1133  	)
  1134  	// Do a sanity check that the provided chain is actually ordered and linked
  1135  	for i := 1; i < len(chain); i++ {
  1136  		block = chain[i]
  1137  		prev = chain[i-1]
  1138  		if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  1139  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1140  			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  1141  				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1142  
  1143  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1144  				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1145  		}
  1146  	}
  1147  
  1148  	// Pre-checks passed, start the full block imports
  1149  	bc.wg.Add(1)
  1150  	bc.chainmu.Lock()
  1151  	n, events, logs, err := bc.insertChain(chain, true)
  1152  	bc.chainmu.Unlock()
  1153  	bc.wg.Done()
  1154  
  1155  	bc.PostChainEvents(events, logs)
  1156  	return n, err
  1157  }
  1158  
  1159  // insertChain is the internal implementation of InsertChain, which assumes that
  1160  // 1) chains are contiguous, and 2) The chain mutex is held.
  1161  //
  1162  // This method is split out so that import batches that require re-injecting
  1163  // historical blocks can do so without releasing the lock, which could lead to
  1164  // racey behaviour. If a sidechain import is in progress, and the historic state
  1165  // is imported, but then new canon-head is added before the actual sidechain
  1166  // completes, then the historic state could be pruned again
  1167  func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1168  	// If the chain is terminating, don't even bother starting up
  1169  	if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1170  		return 0, nil, nil, nil
  1171  	}
  1172  
  1173  	// A queued approach to delivering events. This is generally
  1174  	// faster than direct delivery and requires much less mutex
  1175  	// acquiring.
  1176  	var (
  1177  		stats         = insertStats{startTime: mclock.Now()}
  1178  		events        = make([]interface{}, 0, len(chain))
  1179  		lastCanon     *types.Block
  1180  		coalescedLogs []*types.Log
  1181  	)
  1182  	// Start the parallel header verifier
  1183  	headers := make([]*types.Header, len(chain))
  1184  	seals := make([]bool, len(chain))
  1185  
  1186  	for i, block := range chain {
  1187  		headers[i] = block.Header()
  1188  		seals[i] = verifySeals
  1189  	}
  1190  
  1191  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1192  	defer close(abort)
  1193  
  1194  	// Peek the error for the first block to decide the directing import logic
  1195  	it := newInsertIterator(chain, results, bc.validator)
  1196  
  1197  	block, err := it.next()
  1198  
  1199  	// Left-trim all the known blocks
  1200  	if err == ErrKnownBlock {
  1201  		// First block (and state) is known
  1202  		//   1. We did a roll-back, and should now do a re-import
  1203  		//   2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1204  		// 	    from the canonical chain, which has not been verified.
  1205  		// Skip all known blocks that are behind us
  1206  		current := bc.CurrentBlock().NumberU64()
  1207  		for block != nil && err == ErrKnownBlock {
  1208  			if current >= block.NumberU64() {
  1209  				stats.ignored++
  1210  				block, err = it.next()
  1211  			} else {
  1212  				log.Infof("this block has been written, but head not refreshed. hash %x, number %v\n",
  1213  					block.Hash(), block.NumberU64())
  1214  				//make it continue to refresh head
  1215  				err = nil
  1216  				break
  1217  			}
  1218  		}
  1219  		// Falls through to the block import
  1220  	}
  1221  
  1222  	switch {
  1223  	// First block is pruned, insert as sidechain and reorg only if TD grows enough
  1224  	case err == consensus.ErrPrunedAncestor:
  1225  		return bc.insertSidechain(block, it)
  1226  
  1227  	// First block is future, shove it (and all sideren) to the future queue (unknown ancestor)
  1228  	case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1229  		for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1230  			if err := bc.addFutureBlock(block); err != nil {
  1231  				return it.index, events, coalescedLogs, err
  1232  			}
  1233  			block, err = it.next()
  1234  		}
  1235  		stats.queued += it.processed()
  1236  		stats.ignored += it.remaining()
  1237  
  1238  		// If there are any still remaining, mark as ignored
  1239  		return it.index, events, coalescedLogs, err
  1240  
  1241  	// Some other error occurred, abort
  1242  	case err != nil:
  1243  		stats.ignored += len(it.chain)
  1244  		bc.reportBlock(block, nil, err)
  1245  		return it.index, events, coalescedLogs, err
  1246  	}
  1247  
  1248  	// No validation errors for the first block (or chain prefix skipped)
  1249  	for ; block != nil && err == nil; block, err = it.next() {
  1250  		// If the chain is terminating, stop processing blocks
  1251  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1252  			bc.logger.Debug("Premature abort during blocks processing")
  1253  			break
  1254  		}
  1255  		// If the header is a banned one, straight out abort
  1256  		if BadHashes[block.Hash()] {
  1257  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1258  			return it.index, events, coalescedLogs, ErrBlacklistedHash
  1259  		}
  1260  
  1261  		// Retrieve the parent block and it's state to execute on top
  1262  		start := time.Now()
  1263  
  1264  		parent := it.previous()
  1265  		if parent == nil {
  1266  			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1267  		}
  1268  		statedb, err := state.New(parent.Root, bc.stateCache)
  1269  		if err != nil {
  1270  			return it.index, events, coalescedLogs, err
  1271  		}
  1272  		// Process block using the parent state as reference point.
  1273  		receipts, logs, usedGas, ops, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1274  		if err != nil {
  1275  			bc.reportBlock(block, receipts, err)
  1276  			return it.index, events, coalescedLogs, err
  1277  		}
  1278  
  1279  		// Validate the state using the default validator
  1280  		err = bc.Validator().ValidateState(block, statedb, receipts, usedGas)
  1281  		if err != nil {
  1282  			bc.reportBlock(block, receipts, err)
  1283  			return it.index, events, coalescedLogs, err
  1284  		}
  1285  		proctime := time.Since(start)
  1286  
  1287  		//err = bc.UpdateForbiddenState(block.Header(), statedb)
  1288  		//if err != nil {
  1289  		//	bc.logger.Error("Block chain failed to update forbidden state", "err", err)
  1290  		//}
  1291  
  1292  		// Write the block to the chain and get the status.
  1293  		status, err := bc.writeBlockWithState(block, receipts, statedb)
  1294  		if err != nil {
  1295  			return it.index, events, coalescedLogs, err
  1296  		}
  1297  		// execute the pending ops.
  1298  		for _, op := range ops.Ops() {
  1299  			if err := ApplyOp(op, bc, bc.cch); err != nil {
  1300  				bc.logger.Error("Failed executing op", op, "err", err)
  1301  			}
  1302  		}
  1303  
  1304  		blockInsertTimer.UpdateSince(start)
  1305  
  1306  		switch status {
  1307  		case CanonStatTy:
  1308  			bc.logger.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1309  				"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1310  				"elapsed", common.PrettyDuration(time.Since(start)),
  1311  				"root", block.Root())
  1312  
  1313  			coalescedLogs = append(coalescedLogs, logs...)
  1314  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1315  			lastCanon = block
  1316  
  1317  			// Only count canonical blocks for GC processing time
  1318  			bc.gcproc += proctime
  1319  
  1320  		case SideStatTy:
  1321  			bc.logger.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1322  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1323  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1324  				"root", block.Root())
  1325  			events = append(events, ChainSideEvent{block})
  1326  		}
  1327  		stats.processed++
  1328  		stats.usedGas += usedGas
  1329  
  1330  		dirty, _ := bc.stateCache.TrieDB().Size()
  1331  		stats.report(chain, it.index, dirty)
  1332  	}
  1333  	// Any blocks remaining here? The only ones we care about are the future ones
  1334  	if block != nil && err == consensus.ErrFutureBlock {
  1335  		if err := bc.addFutureBlock(block); err != nil {
  1336  			return it.index, events, coalescedLogs, err
  1337  		}
  1338  		block, err = it.next()
  1339  
  1340  		for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1341  			if err := bc.addFutureBlock(block); err != nil {
  1342  				return it.index, events, coalescedLogs, err
  1343  			}
  1344  			stats.queued++
  1345  		}
  1346  	}
  1347  	stats.ignored += it.remaining()
  1348  
  1349  	// Append a single chain head event if we've progressed the chain
  1350  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1351  		events = append(events, ChainHeadEvent{lastCanon})
  1352  	}
  1353  
  1354  	return it.index, events, coalescedLogs, err
  1355  }
  1356  
  1357  // insertSidechain is called when an import batch hits upon a pruned ancestor
  1358  // error, which happens when a sidechain with a sufficiently old fork-block is
  1359  // found.
  1360  //
  1361  // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1362  // switch over to the new chain if the TD exceeded the current chain.
  1363  func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1364  	var (
  1365  		externTd *big.Int
  1366  		current  = bc.CurrentBlock()
  1367  	)
  1368  	// The first sidechain block error is already verified to be ErrPrunedAncestor.
  1369  	// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1370  	// ones. Any other errors means that the block is invalid, and should not be written
  1371  	// to disk.
  1372  	err := consensus.ErrPrunedAncestor
  1373  	for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1374  		// Check the canonical state root for that number
  1375  		if number := block.NumberU64(); current.NumberU64() >= number {
  1376  			canonical := bc.GetBlockByNumber(number)
  1377  			if canonical != nil && canonical.Hash() == block.Hash() {
  1378  				// Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1379  				continue
  1380  			}
  1381  			if canonical != nil && canonical.Root() == block.Root() {
  1382  				// This is most likely a shadow-state attack. When a fork is imported into the
  1383  				// database, and it eventually reaches a block height which is not pruned, we
  1384  				// just found that the state already exist! This means that the sidechain block
  1385  				// refers to a state which already exists in our canon chain.
  1386  				//
  1387  				// If left unchecked, we would now proceed importing the blocks, without actually
  1388  				// having verified the state of the previous blocks.
  1389  				bc.logger.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1390  
  1391  				// If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1392  				// we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1393  				// mechanism.
  1394  				return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1395  			}
  1396  		}
  1397  		if externTd == nil {
  1398  			externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1399  		}
  1400  		externTd = new(big.Int).Add(externTd, block.Difficulty())
  1401  
  1402  		if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1403  			start := time.Now()
  1404  			if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
  1405  				return it.index, nil, nil, err
  1406  			}
  1407  			bc.logger.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1408  				"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1409  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1410  				"root", block.Root())
  1411  		}
  1412  	}
  1413  	// At this point, we've written all sidechain blocks to database. Loop ended
  1414  	// either on some other error or all were processed. If there was some other
  1415  	// error, we can ignore the rest of those blocks.
  1416  	//
  1417  	// If the externTd was larger than our local TD, we now need to reimport the previous
  1418  	// blocks to regenerate the required state
  1419  	localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1420  	if localTd.Cmp(externTd) > 0 {
  1421  		bc.logger.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1422  		return it.index, nil, nil, err
  1423  	}
  1424  	// Gather all the sidechain hashes (full blocks may be memory heavy)
  1425  	var (
  1426  		hashes  []common.Hash
  1427  		numbers []uint64
  1428  	)
  1429  	parent := it.previous()
  1430  	for parent != nil && !bc.HasState(parent.Root) {
  1431  		hashes = append(hashes, parent.Hash())
  1432  		numbers = append(numbers, parent.Number.Uint64())
  1433  
  1434  		parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1435  	}
  1436  	if parent == nil {
  1437  		return it.index, nil, nil, errors.New("missing parent")
  1438  	}
  1439  	// Import all the pruned blocks to make the state available
  1440  	var (
  1441  		blocks []*types.Block
  1442  		memory common.StorageSize
  1443  	)
  1444  	for i := len(hashes) - 1; i >= 0; i-- {
  1445  		// Append the next block to our batch
  1446  		block := bc.GetBlock(hashes[i], numbers[i])
  1447  
  1448  		blocks = append(blocks, block)
  1449  		memory += block.Size()
  1450  
  1451  		// If memory use grew too large, import and continue. Sadly we need to discard
  1452  		// all raised events and logs from notifications since we're too heavy on the
  1453  		// memory here.
  1454  		if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1455  			bc.logger.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1456  			if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1457  				return 0, nil, nil, err
  1458  			}
  1459  			blocks, memory = blocks[:0], 0
  1460  
  1461  			// If the chain is terminating, stop processing blocks
  1462  			if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1463  				bc.logger.Debug("Premature abort during blocks processing")
  1464  				return 0, nil, nil, nil
  1465  			}
  1466  		}
  1467  	}
  1468  	if len(blocks) > 0 {
  1469  		bc.logger.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1470  		return bc.insertChain(blocks, false)
  1471  	}
  1472  	return 0, nil, nil, nil
  1473  }
  1474  
  1475  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1476  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1477  // event about them
  1478  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1479  	var (
  1480  		newChain    types.Blocks
  1481  		oldChain    types.Blocks
  1482  		commonBlock *types.Block
  1483  
  1484  		deletedTxs types.Transactions
  1485  
  1486  		deletedLogs []*types.Log
  1487  
  1488  		// collectLogs collects the logs that were generated during the
  1489  		// processing of the block that corresponds with the given hash.
  1490  		// These logs are later announced as deleted.
  1491  		collectLogs = func(hash common.Hash) {
  1492  			number := bc.hc.GetBlockNumber(hash)
  1493  			if number == nil {
  1494  				return
  1495  			}
  1496  			// Coalesce logs and set 'Removed'.
  1497  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1498  			for _, receipt := range receipts {
  1499  				for _, log := range receipt.Logs {
  1500  					del := *log
  1501  					del.Removed = true
  1502  					deletedLogs = append(deletedLogs, &del)
  1503  				}
  1504  			}
  1505  		}
  1506  	)
  1507  
  1508  	// first reduce whoever is higher bound
  1509  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1510  		// reduce old chain
  1511  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1512  			oldChain = append(oldChain, oldBlock)
  1513  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1514  			collectLogs(oldBlock.Hash())
  1515  		}
  1516  	} else {
  1517  		// reduce new chain and append new chain blocks for inserting later on
  1518  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1519  			newChain = append(newChain, newBlock)
  1520  		}
  1521  	}
  1522  	if oldBlock == nil {
  1523  		return fmt.Errorf("Invalid old chain")
  1524  	}
  1525  	if newBlock == nil {
  1526  		return fmt.Errorf("Invalid new chain")
  1527  	}
  1528  
  1529  	for {
  1530  		if oldBlock.Hash() == newBlock.Hash() {
  1531  			commonBlock = oldBlock
  1532  			break
  1533  		}
  1534  
  1535  		oldChain = append(oldChain, oldBlock)
  1536  		newChain = append(newChain, newBlock)
  1537  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1538  		collectLogs(oldBlock.Hash())
  1539  
  1540  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1541  		if oldBlock == nil {
  1542  			return fmt.Errorf("Invalid old chain")
  1543  		}
  1544  		if newBlock == nil {
  1545  			return fmt.Errorf("Invalid new chain")
  1546  		}
  1547  	}
  1548  	// Ensure the user sees large reorgs
  1549  	if len(oldChain) > 0 && len(newChain) > 0 {
  1550  		logFn := log.Debug
  1551  		if len(oldChain) > 63 {
  1552  			logFn = log.Warn
  1553  		}
  1554  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1555  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1556  	} else {
  1557  		bc.logger.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1558  	}
  1559  	// Insert the new chain, taking care of the proper incremental order
  1560  	var addedTxs types.Transactions
  1561  	for i := len(newChain) - 1; i >= 0; i-- {
  1562  		// insert the block in the canonical way, re-writing history
  1563  		bc.insert(newChain[i])
  1564  
  1565  		// Write lookup entries for hash based transaction/receipt searches
  1566  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1567  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1568  	}
  1569  	// When transactions get deleted from the database, the receipts that were
  1570  	// created in the fork must also be deleted
  1571  	batch := bc.db.NewBatch()
  1572  	for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  1573  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1574  	}
  1575  	batch.Write()
  1576  
  1577  	if len(deletedLogs) > 0 {
  1578  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1579  	}
  1580  	if len(oldChain) > 0 {
  1581  		go func() {
  1582  			for _, block := range oldChain {
  1583  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1584  			}
  1585  		}()
  1586  	}
  1587  
  1588  	return nil
  1589  }
  1590  
  1591  // PostChainEvents iterates over the events generated by a chain insertion and
  1592  // posts them into the event feed.
  1593  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1594  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1595  	// post event logs for further processing
  1596  	if logs != nil {
  1597  		bc.logsFeed.Send(logs)
  1598  	}
  1599  	for _, event := range events {
  1600  		switch ev := event.(type) {
  1601  		case ChainEvent:
  1602  			bc.chainFeed.Send(ev)
  1603  
  1604  		case ChainHeadEvent:
  1605  			bc.chainHeadFeed.Send(ev)
  1606  
  1607  		case ChainSideEvent:
  1608  			bc.chainSideFeed.Send(ev)
  1609  
  1610  		case CreateSideChainEvent:
  1611  			bc.createSideChainFeed.Send(ev)
  1612  
  1613  		case StartMiningEvent:
  1614  			bc.startMiningFeed.Send(ev)
  1615  
  1616  		case StopMiningEvent:
  1617  			bc.stopMiningFeed.Send(ev)
  1618  		}
  1619  	}
  1620  }
  1621  
  1622  func (bc *BlockChain) update() {
  1623  	futureTimer := time.NewTicker(5 * time.Second)
  1624  	defer futureTimer.Stop()
  1625  	for {
  1626  		select {
  1627  		case <-futureTimer.C:
  1628  			bc.procFutureBlocks()
  1629  		case <-bc.quit:
  1630  			return
  1631  		}
  1632  	}
  1633  }
  1634  
  1635  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  1636  type BadBlockArgs struct {
  1637  	Hash   common.Hash   `json:"hash"`
  1638  	Header *types.Header `json:"header"`
  1639  }
  1640  
  1641  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1642  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  1643  	headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
  1644  	for _, hash := range bc.badBlocks.Keys() {
  1645  		if hdr, exist := bc.badBlocks.Peek(hash); exist {
  1646  			header := hdr.(*types.Header)
  1647  			headers = append(headers, BadBlockArgs{header.Hash(), header})
  1648  		}
  1649  	}
  1650  	return headers, nil
  1651  }
  1652  
  1653  // HasBadBlock returns whether the block with the hash is a bad block
  1654  func (bc *BlockChain) HasBadBlock(hash common.Hash) bool {
  1655  	return bc.badBlocks.Contains(hash)
  1656  }
  1657  
  1658  // addBadBlock adds a bad block to the bad-block LRU cache
  1659  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1660  	bc.badBlocks.Add(block.Header().Hash(), block.Header())
  1661  }
  1662  
  1663  // reportBlock logs a bad block error.
  1664  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1665  	bc.addBadBlock(block)
  1666  
  1667  	var receiptString string
  1668  	for _, receipt := range receipts {
  1669  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1670  	}
  1671  	bc.logger.Error(fmt.Sprintf(`
  1672  ########## BAD BLOCK #########
  1673  Chain config: %v
  1674  
  1675  Number: %v
  1676  Hash: 0x%x
  1677  %v
  1678  
  1679  Error: %v
  1680  ##############################
  1681  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1682  }
  1683  
  1684  // InsertHeaderChain attempts to insert the given header chain in to the local
  1685  // chain, possibly creating a reorg. If an error is returned, it will return the
  1686  // index number of the failing header as well an error describing what went wrong.
  1687  //
  1688  // The verify parameter can be used to fine tune whether nonce verification
  1689  // should be done or not. The reason behind the optional check is because some
  1690  // of the header retrieval mechanisms already need to verify nonces, as well as
  1691  // because nonces can be verified sparsely, not needing to check each.
  1692  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1693  	start := time.Now()
  1694  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1695  		return i, err
  1696  	}
  1697  
  1698  	// Make sure only one thread manipulates the chain at once
  1699  	bc.chainmu.Lock()
  1700  	defer bc.chainmu.Unlock()
  1701  
  1702  	bc.wg.Add(1)
  1703  	defer bc.wg.Done()
  1704  
  1705  	whFunc := func(header *types.Header) error {
  1706  		_, err := bc.hc.WriteHeader(header)
  1707  		return err
  1708  	}
  1709  
  1710  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1711  }
  1712  
  1713  // CurrentHeader retrieves the current head header of the canonical chain. The
  1714  // header is retrieved from the HeaderChain's internal cache.
  1715  func (bc *BlockChain) CurrentHeader() *types.Header {
  1716  	return bc.hc.CurrentHeader()
  1717  }
  1718  
  1719  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1720  // database by hash and number, caching it if found.
  1721  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1722  	return bc.hc.GetTd(hash, number)
  1723  }
  1724  
  1725  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1726  // database by hash, caching it if found.
  1727  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1728  	return bc.hc.GetTdByHash(hash)
  1729  }
  1730  
  1731  // GetHeader retrieves a block header from the database by hash and number,
  1732  // caching it if found.
  1733  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1734  	return bc.hc.GetHeader(hash, number)
  1735  }
  1736  
  1737  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1738  // found.
  1739  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1740  	return bc.hc.GetHeaderByHash(hash)
  1741  }
  1742  
  1743  // HasHeader checks if a block header is present in the database or not, caching
  1744  // it if present.
  1745  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1746  	return bc.hc.HasHeader(hash, number)
  1747  }
  1748  
  1749  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1750  // hash, fetching towards the genesis block.
  1751  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1752  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1753  }
  1754  
  1755  // GetHeaderByNumber retrieves a block header from the database by number,
  1756  // caching it (associated with its hash) if found.
  1757  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1758  	return bc.hc.GetHeaderByNumber(number)
  1759  }
  1760  
  1761  // Config retrieves the blockchain's chain configuration.
  1762  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1763  
  1764  // Engine retrieves the blockchain's consensus engine.
  1765  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1766  
  1767  //GetCrossChainHelper retrieves the blockchain's cross chain helper.
  1768  func (bc *BlockChain) GetCrossChainHelper() CrossChainHelper { return bc.cch }
  1769  
  1770  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1771  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1772  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1773  }
  1774  
  1775  // SubscribeChainEvent registers a subscription of ChainEvent.
  1776  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1777  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1778  }
  1779  
  1780  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1781  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1782  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1783  }
  1784  
  1785  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1786  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1787  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1788  }
  1789  
  1790  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1791  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1792  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1793  }
  1794  
  1795  // SubscribeCreateSideChainEvent registers a subscription of CreateSideChainEvent.
  1796  func (bc *BlockChain) SubscribeCreateSideChainEvent(ch chan<- CreateSideChainEvent) event.Subscription {
  1797  	return bc.scope.Track(bc.createSideChainFeed.Subscribe(ch))
  1798  }
  1799  
  1800  // SubscribeStartMiningEvent registers a subscription of StartMiningEvent.
  1801  func (bc *BlockChain) SubscribeStartMiningEvent(ch chan<- StartMiningEvent) event.Subscription {
  1802  	return bc.scope.Track(bc.startMiningFeed.Subscribe(ch))
  1803  }
  1804  
  1805  // SubscribeStopMiningEvent registers a subscription of StopMiningEvent.
  1806  func (bc *BlockChain) SubscribeStopMiningEvent(ch chan<- StopMiningEvent) event.Subscription {
  1807  	return bc.scope.Track(bc.stopMiningFeed.Subscribe(ch))
  1808  }
  1809  
  1810  //func (bc *BlockChain) GetForbiddenDuration() time.Duration {
  1811  //	return ForbiddenDuration
  1812  //}
  1813  //
  1814  //// Update validator block time and set forbidden if this validator did not participate in consensus more than 4 Hours
  1815  //func (bc *BlockChain) UpdateForbiddenState(header *types.Header, state *state.StateDB) error {
  1816  //	bc.wg.Add(1)
  1817  //	defer bc.wg.Done()
  1818  //
  1819  //	ep := bc.engine.(consensus.NeatCon).GetEpoch()
  1820  //	validators := ep.Validators.Validators
  1821  //	height := header.Number.Uint64()
  1822  //	blockTime := header.Time
  1823  //
  1824  //	bc.logger.Infof("update validator status height %v", height)
  1825  //
  1826  //	if height <= 1 {
  1827  //		return nil
  1828  //	}
  1829  //
  1830  //	extra, err := ntcTypes.ExtractNeatConExtra(header)
  1831  //	if err != nil {
  1832  //		bc.logger.Debugf("update validator status decode extra data error %v", err)
  1833  //		return err
  1834  //	}
  1835  //
  1836  //	if extra.SeenCommit == nil || extra.SeenCommit.BitArray == nil {
  1837  //		bc.logger.Debugf("update validator status seenCommit %v", extra.SeenCommit)
  1838  //		return fmt.Errorf("seen commit is nil")
  1839  //	}
  1840  //
  1841  //	bitMap := extra.SeenCommit.BitArray
  1842  //	for i := uint64(0); i < bitMap.Size(); i++ {
  1843  //		addr := common.BytesToAddress(validators[i].Address)
  1844  //		vObj := state.GetOrNewStateObject(addr)
  1845  //		if bitMap.GetIndex(i) {
  1846  //			vObj.SetBlockTime(blockTime)
  1847  //
  1848  //			bc.logger.Debugf("Update validator status, block time %v, current height %v", blockTime, height)
  1849  //		} else {
  1850  //			lastBlockTime := vObj.BlockTime()
  1851  //			durationTime := new(big.Int).Sub(blockTime, lastBlockTime)
  1852  //			bc.logger.Debugf("Update validator last block time, duration time %v, default forbidden time %v", durationTime, TimeForForbidden)
  1853  //			if durationTime.Cmp(big.NewInt(int64(TimeForForbidden.Seconds()))) >= 0 {
  1854  //				bc.logger.Debugf("update validator status forbidden true")
  1855  //				vObj.SetForbidden(true)
  1856  //				vObj.SetBlockTime(big.NewInt(time.Now().Unix()))
  1857  //				state.MarkAddressForbidden(addr)
  1858  //			}
  1859  //		}
  1860  //	}
  1861  //
  1862  //	return nil
  1863  //
  1864  //}