github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/core/blockchain.go (about)

     1  // Package core implements the Ethereum consensus protocol.
     2  package core
     3  
     4  import (
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math/big"
     9  	mrand "math/rand"
    10  	"sync"
    11  	"sync/atomic"
    12  	"time"
    13  
    14  	"github.com/hashicorp/golang-lru"
    15  	"github.com/quickchainproject/quickchain/common"
    16  	"github.com/quickchainproject/quickchain/common/mclock"
    17  	"github.com/quickchainproject/quickchain/consensus"
    18  	"github.com/quickchainproject/quickchain/consensus/dpos"
    19  	//"github.com/quickchainproject/quickchain/consensus/bft"
    20  	//"github.com/quickchainproject/quickchain/consensus/dbft"
    21  	"github.com/quickchainproject/quickchain/core/state"
    22  	"github.com/quickchainproject/quickchain/core/types"
    23  	"github.com/quickchainproject/quickchain/core/vm"
    24  	"github.com/quickchainproject/quickchain/crypto"
    25  	"github.com/quickchainproject/quickchain/event"
    26  	"github.com/quickchainproject/quickchain/qctdb"
    27  	"github.com/quickchainproject/quickchain/log"
    28  	"github.com/quickchainproject/quickchain/metrics"
    29  	"github.com/quickchainproject/quickchain/params"
    30  	"github.com/quickchainproject/quickchain/rlp"
    31  	"github.com/quickchainproject/quickchain/trie"
    32  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    33  )
    34  
    35  var (
    36  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    37  
    38  	ErrNoGenesis = errors.New("Genesis not found in chain")
    39  )
    40  
    41  const (
    42  	bodyCacheLimit      = 256
    43  	blockCacheLimit     = 256
    44  	maxFutureBlocks     = 256
    45  	maxTimeFutureBlocks = 30
    46  	badBlockLimit       = 10
    47  	triesInMemory       = 128
    48  
    49  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    50  	BlockChainVersion = 3
    51  )
    52  
    53  // CacheConfig contains the configuration values for the trie caching/pruning
    54  // that's resident in a blockchain.
    55  type CacheConfig struct {
    56  	Disabled      bool          // Whether to disable trie write caching (archive node)
    57  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    58  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    59  }
    60  
    61  // BlockChain represents the canonical chain given a database with a genesis
    62  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    63  //
    64  // Importing blocks in to the block chain happens according to the set of rules
    65  // defined by the two stage Validator. Processing of blocks is done using the
    66  // Processor which processes the included transaction. The validation of the state
    67  // is done in the second part of the Validator. Failing results in aborting of
    68  // the import.
    69  //
    70  // The BlockChain also helps in returning blocks from **any** chain included
    71  // in the database as well as blocks that represents the canonical chain. It's
    72  // important to note that GetBlock can return any block and does not need to be
    73  // included in the canonical one where as GetBlockByNumber always represents the
    74  // canonical chain.
    75  type BlockChain struct {
    76  	chainConfig *params.ChainConfig // Chain & network configuration
    77  	cacheConfig *CacheConfig        // Cache configuration for pruning
    78  
    79  	db     qctdb.Database // Low level persistent database to store final content in
    80  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    81  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    82  
    83  	hc            *HeaderChain
    84  	rmLogsFeed    event.Feed
    85  	chainFeed     event.Feed
    86  	chainSideFeed event.Feed
    87  	chainHeadFeed event.Feed
    88  	logsFeed      event.Feed
    89  	scope         event.SubscriptionScope
    90  	genesisBlock  *types.Block
    91  
    92  	mu      sync.RWMutex // global mutex for locking chain operations
    93  	chainmu sync.RWMutex // blockchain insertion lock
    94  	procmu  sync.RWMutex // block processor lock
    95  
    96  	checkpoint       int          // checkpoint counts towards the new checkpoint
    97  	currentBlock     atomic.Value // Current head of the block chain
    98  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
    99  
   100  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   101  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   102  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   103  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   104  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   105  
   106  	quit    chan struct{} // blockchain quit channel
   107  	running int32         // running must be called atomically
   108  	// procInterrupt must be atomically called
   109  	procInterrupt int32          // interrupt signaler for block processing
   110  	wg            sync.WaitGroup // chain processing wait group for shutting down
   111  
   112  	engine    consensus.Engine
   113  	processor Processor // block processor interface
   114  	validator Validator // block and state validator interface
   115  	vmConfig  vm.Config
   116  
   117  	badBlocks *lru.Cache // Bad block cache
   118  }
   119  
   120  // NewBlockChain returns a fully initialised block chain using information
   121  // available in the database. It initialises the default Ethereum Validator and
   122  // Processor.
   123  func NewBlockChain(db qctdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   124  	if cacheConfig == nil {
   125  		cacheConfig = &CacheConfig{
   126  			TrieNodeLimit: 256 * 1024 * 1024,
   127  			TrieTimeLimit: 5 * time.Minute,
   128  		}
   129  	}
   130  	bodyCache, _ := lru.New(bodyCacheLimit)
   131  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   132  	blockCache, _ := lru.New(blockCacheLimit)
   133  	futureBlocks, _ := lru.New(maxFutureBlocks)
   134  	badBlocks, _ := lru.New(badBlockLimit)
   135  
   136  	bc := &BlockChain{
   137  		chainConfig:  chainConfig,
   138  		cacheConfig:  cacheConfig,
   139  		db:           db,
   140  		triegc:       prque.New(),
   141  		stateCache:   state.NewDatabase(db),
   142  		quit:         make(chan struct{}),
   143  		bodyCache:    bodyCache,
   144  		bodyRLPCache: bodyRLPCache,
   145  		blockCache:   blockCache,
   146  		futureBlocks: futureBlocks,
   147  		engine:       engine,
   148  		vmConfig:     vmConfig,
   149  		badBlocks:    badBlocks,
   150  	}
   151  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   152  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   153  
   154  	var err error
   155  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   156  	if err != nil {
   157  		return nil, err
   158  	}
   159  	bc.genesisBlock = bc.GetBlockByNumber(0)
   160  	if bc.genesisBlock == nil {
   161  		return nil, ErrNoGenesis
   162  	}
   163  	if err := bc.loadLastState(); err != nil {
   164  		return nil, err
   165  	}
   166  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   167  	for hash := range BadHashes {
   168  		if header := bc.GetHeaderByHash(hash); header != nil {
   169  			// get the canonical block corresponding to the offending header's number
   170  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   171  			// make sure the headerByNumber (if present) is in our current canonical chain
   172  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   173  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   174  				bc.SetHead(header.Number.Uint64() - 1)
   175  				log.Error("Chain rewind was successful, resuming normal operation")
   176  			}
   177  		}
   178  	}
   179  	// Take ownership of this particular state
   180  	go bc.update()
   181  	return bc, nil
   182  }
   183  
   184  func (bc *BlockChain) getProcInterrupt() bool {
   185  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   186  }
   187  
   188  // loadLastState loads the last known chain state from the database. This method
   189  // assumes that the chain manager mutex is held.
   190  func (bc *BlockChain) loadLastState() error {
   191  	// Restore the last known head block
   192  	head := GetHeadBlockHash(bc.db)
   193  	if head == (common.Hash{}) {
   194  		// Corrupt or empty database, init from scratch
   195  		log.Warn("Empty database, resetting chain")
   196  		return bc.Reset()
   197  	}
   198  	// Make sure the entire head block is available
   199  	currentBlock := bc.GetBlockByHash(head)
   200  	if currentBlock == nil {
   201  		// Corrupt or empty database, init from scratch
   202  		log.Warn("Head block missing, resetting chain", "hash", head)
   203  		return bc.Reset()
   204  	}
   205  	// Make sure the state associated with the block is available
   206  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   207  		// Dangling block without a state associated, init from scratch
   208  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   209  		if err := bc.repair(&currentBlock); err != nil {
   210  			return err
   211  		}
   212  	}
   213  	// Everything seems to be fine, set as the head block
   214  	bc.currentBlock.Store(currentBlock)
   215  
   216  	// Restore the last known head header
   217  	currentHeader := currentBlock.Header()
   218  	if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
   219  		if header := bc.GetHeaderByHash(head); header != nil {
   220  			currentHeader = header
   221  		}
   222  	}
   223  	bc.hc.SetCurrentHeader(currentHeader)
   224  
   225  	// Restore the last known head fast block
   226  	bc.currentFastBlock.Store(currentBlock)
   227  	if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   228  		if block := bc.GetBlockByHash(head); block != nil {
   229  			bc.currentFastBlock.Store(block)
   230  		}
   231  	}
   232  
   233  	// Issue a status log for the user
   234  	currentFastBlock := bc.CurrentFastBlock()
   235  
   236  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   237  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   238  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   239  
   240  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   241  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   242  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   243  
   244  	return nil
   245  }
   246  
   247  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   248  // above the new head will be deleted and the new one set. In the case of blocks
   249  // though, the head may be further rewound if block bodies are missing (non-archive
   250  // nodes after a fast sync).
   251  func (bc *BlockChain) SetHead(head uint64) error {
   252  	log.Warn("Rewinding blockchain", "target", head)
   253  
   254  	bc.mu.Lock()
   255  	defer bc.mu.Unlock()
   256  
   257  	// Rewind the header chain, deleting all block bodies until then
   258  	delFn := func(hash common.Hash, num uint64) {
   259  		DeleteBody(bc.db, hash, num)
   260  	}
   261  	bc.hc.SetHead(head, delFn)
   262  	currentHeader := bc.hc.CurrentHeader()
   263  
   264  	// Clear out any stale content from the caches
   265  	bc.bodyCache.Purge()
   266  	bc.bodyRLPCache.Purge()
   267  	bc.blockCache.Purge()
   268  	bc.futureBlocks.Purge()
   269  
   270  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   271  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   272  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   273  	}
   274  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   275  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   276  			// Rewound state missing, rolled back to before pivot, reset to genesis
   277  			bc.currentBlock.Store(bc.genesisBlock)
   278  		}
   279  	}
   280  	// Rewind the fast block in a simpleton way to the target head
   281  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   282  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   283  	}
   284  	// If either blocks reached nil, reset to the genesis state
   285  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   286  		bc.currentBlock.Store(bc.genesisBlock)
   287  	}
   288  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   289  		bc.currentFastBlock.Store(bc.genesisBlock)
   290  	}
   291  	currentBlock := bc.CurrentBlock()
   292  	currentFastBlock := bc.CurrentFastBlock()
   293  	if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
   294  		log.Crit("Failed to reset head full block", "err", err)
   295  	}
   296  	if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
   297  		log.Crit("Failed to reset head fast block", "err", err)
   298  	}
   299  	return bc.loadLastState()
   300  }
   301  
   302  // FastSyncCommitHead sets the current head block to the one defined by the hash
   303  // irrelevant what the chain contents were prior.
   304  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   305  	// Make sure that both the block as well at its state trie exists
   306  	block := bc.GetBlockByHash(hash)
   307  	if block == nil {
   308  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   309  	}
   310  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   311  		return err
   312  	}
   313  	// If all checks out, manually set the head block
   314  	bc.mu.Lock()
   315  	bc.currentBlock.Store(block)
   316  	bc.mu.Unlock()
   317  
   318  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   319  	return nil
   320  }
   321  
   322  // GasLimit returns the gas limit of the current HEAD block.
   323  func (bc *BlockChain) GasLimit() uint64 {
   324  	return bc.CurrentBlock().GasLimit()
   325  }
   326  
   327  // CurrentBlock retrieves the current head block of the canonical chain. The
   328  // block is retrieved from the blockchain's internal cache.
   329  func (bc *BlockChain) CurrentBlock() *types.Block {
   330  	return bc.currentBlock.Load().(*types.Block)
   331  }
   332  
   333  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   334  // chain. The block is retrieved from the blockchain's internal cache.
   335  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   336  	return bc.currentFastBlock.Load().(*types.Block)
   337  }
   338  
   339  // SetProcessor sets the processor required for making state modifications.
   340  func (bc *BlockChain) SetProcessor(processor Processor) {
   341  	bc.procmu.Lock()
   342  	defer bc.procmu.Unlock()
   343  	bc.processor = processor
   344  }
   345  
   346  // SetValidator sets the validator which is used to validate incoming blocks.
   347  func (bc *BlockChain) SetValidator(validator Validator) {
   348  	bc.procmu.Lock()
   349  	defer bc.procmu.Unlock()
   350  	bc.validator = validator
   351  }
   352  
   353  // Validator returns the current validator.
   354  func (bc *BlockChain) Validator() Validator {
   355  	bc.procmu.RLock()
   356  	defer bc.procmu.RUnlock()
   357  	return bc.validator
   358  }
   359  
   360  // Processor returns the current processor.
   361  func (bc *BlockChain) Processor() Processor {
   362  	bc.procmu.RLock()
   363  	defer bc.procmu.RUnlock()
   364  	return bc.processor
   365  }
   366  
   367  // State returns a new mutable state based on the current HEAD block.
   368  func (bc *BlockChain) State() (*state.StateDB, error) {
   369  	return bc.StateAt(bc.CurrentBlock().Root())
   370  }
   371  
   372  // StateAt returns a new mutable state based on a particular point in time.
   373  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   374  	return state.New(root, bc.stateCache)
   375  }
   376  
   377  // Reset purges the entire blockchain, restoring it to its genesis state.
   378  func (bc *BlockChain) Reset() error {
   379  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   380  }
   381  
   382  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   383  // specified genesis state.
   384  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   385  	// Dump the entire block chain and purge the caches
   386  	if err := bc.SetHead(0); err != nil {
   387  		return err
   388  	}
   389  	bc.mu.Lock()
   390  	defer bc.mu.Unlock()
   391  
   392  	// Prepare the genesis block and reinitialise the chain
   393  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   394  		log.Crit("Failed to write genesis block TD", "err", err)
   395  	}
   396  	if err := WriteBlock(bc.db, genesis); err != nil {
   397  		log.Crit("Failed to write genesis block", "err", err)
   398  	}
   399  	bc.genesisBlock = genesis
   400  	bc.insert(bc.genesisBlock)
   401  	bc.currentBlock.Store(bc.genesisBlock)
   402  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   403  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   404  	bc.currentFastBlock.Store(bc.genesisBlock)
   405  
   406  	return nil
   407  }
   408  
   409  // repair tries to repair the current blockchain by rolling back the current block
   410  // until one with associated state is found. This is needed to fix incomplete db
   411  // writes caused either by crashes/power outages, or simply non-committed tries.
   412  //
   413  // This method only rolls back the current block. The current header and current
   414  // fast block are left intact.
   415  func (bc *BlockChain) repair(head **types.Block) error {
   416  	for {
   417  		// Abort if we've rewound to a head block that does have associated state
   418  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   419  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   420  			return nil
   421  		}
   422  		// Otherwise rewind one block and recheck state availability there
   423  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   424  	}
   425  }
   426  
   427  // Export writes the active chain to the given writer.
   428  func (bc *BlockChain) Export(w io.Writer) error {
   429  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   430  }
   431  
   432  // ExportN writes a subset of the active chain to the given writer.
   433  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   434  	bc.mu.RLock()
   435  	defer bc.mu.RUnlock()
   436  
   437  	if first > last {
   438  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   439  	}
   440  	log.Info("Exporting batch of blocks", "count", last-first+1)
   441  
   442  	for nr := first; nr <= last; nr++ {
   443  		block := bc.GetBlockByNumber(nr)
   444  		if block == nil {
   445  			return fmt.Errorf("export failed on #%d: not found", nr)
   446  		}
   447  
   448  		if err := block.EncodeRLP(w); err != nil {
   449  			return err
   450  		}
   451  	}
   452  
   453  	return nil
   454  }
   455  
   456  // insert injects a new head block into the current block chain. This method
   457  // assumes that the block is indeed a true head. It will also reset the head
   458  // header and the head fast sync block to this very same block if they are older
   459  // or if they are on a different side chain.
   460  //
   461  // Note, this function assumes that the `mu` mutex is held!
   462  func (bc *BlockChain) insert(block *types.Block) {
   463  	// If the block is on a side chain or an unknown one, force other heads onto it too
   464  	updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   465  
   466  	// Add the block to the canonical chain number scheme and mark as the head
   467  	if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil {
   468  		log.Crit("Failed to insert block number", "err", err)
   469  	}
   470  	if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
   471  		log.Crit("Failed to insert head block hash", "err", err)
   472  	}
   473  	bc.currentBlock.Store(block)
   474  
   475  	// If the block is better than our head or is on a different chain, force update heads
   476  	if updateHeads {
   477  		bc.hc.SetCurrentHeader(block.Header())
   478  
   479  		if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
   480  			log.Crit("Failed to insert head fast block hash", "err", err)
   481  		}
   482  		bc.currentFastBlock.Store(block)
   483  	}
   484  }
   485  
   486  // Genesis retrieves the chain's genesis block.
   487  func (bc *BlockChain) Genesis() *types.Block {
   488  	return bc.genesisBlock
   489  }
   490  
   491  // GetBody retrieves a block body (transactions and uncles) from the database by
   492  // hash, caching it if found.
   493  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   494  	// Short circuit if the body's already in the cache, retrieve otherwise
   495  	if cached, ok := bc.bodyCache.Get(hash); ok {
   496  		body := cached.(*types.Body)
   497  		return body
   498  	}
   499  	body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash))
   500  	if body == nil {
   501  		return nil
   502  	}
   503  	// Cache the found body for next time and return
   504  	bc.bodyCache.Add(hash, body)
   505  	return body
   506  }
   507  
   508  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   509  // caching it if found.
   510  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   511  	// Short circuit if the body's already in the cache, retrieve otherwise
   512  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   513  		return cached.(rlp.RawValue)
   514  	}
   515  	body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash))
   516  	if len(body) == 0 {
   517  		return nil
   518  	}
   519  	// Cache the found body for next time and return
   520  	bc.bodyRLPCache.Add(hash, body)
   521  	return body
   522  }
   523  
   524  // HasBlock checks if a block is fully present in the database or not.
   525  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   526  	if bc.blockCache.Contains(hash) {
   527  		return true
   528  	}
   529  	ok, _ := bc.db.Has(blockBodyKey(hash, number))
   530  	return ok
   531  }
   532  
   533  // HasState checks if state trie is fully present in the database or not.
   534  func (bc *BlockChain) HasState(hash common.Hash) bool {
   535  	_, err := bc.stateCache.OpenTrie(hash)
   536  	return err == nil
   537  }
   538  
   539  // HasBlockAndState checks if a block and associated state trie is fully present
   540  // in the database or not, caching it if present.
   541  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   542  	// Check first that the block itself is known
   543  	block := bc.GetBlock(hash, number)
   544  	if block == nil {
   545  		return false
   546  	}
   547  	return bc.HasState(block.Root())
   548  }
   549  
   550  // GetBlock retrieves a block from the database by hash and number,
   551  // caching it if found.
   552  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   553  	// Short circuit if the block's already in the cache, retrieve otherwise
   554  	if block, ok := bc.blockCache.Get(hash); ok {
   555  		return block.(*types.Block)
   556  	}
   557  	block := GetBlock(bc.db, hash, number)
   558  	if block == nil {
   559  		return nil
   560  	}
   561  	// Cache the found block for next time and return
   562  	bc.blockCache.Add(block.Hash(), block)
   563  	return block
   564  }
   565  
   566  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   567  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   568  	return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
   569  }
   570  
   571  // GetBlockByNumber retrieves a block from the database by number, caching it
   572  // (associated with its hash) if found.
   573  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   574  	hash := GetCanonicalHash(bc.db, number)
   575  	if hash == (common.Hash{}) {
   576  		return nil
   577  	}
   578  	return bc.GetBlock(hash, number)
   579  }
   580  
   581  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   582  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   583  	return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash))
   584  }
   585  
   586  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   587  // [deprecated by eth/62]
   588  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   589  	number := bc.hc.GetBlockNumber(hash)
   590  	for i := 0; i < n; i++ {
   591  		block := bc.GetBlock(hash, number)
   592  		if block == nil {
   593  			break
   594  		}
   595  		blocks = append(blocks, block)
   596  		hash = block.ParentHash()
   597  		number--
   598  	}
   599  	return
   600  }
   601  
   602  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   603  // a specific distance is reached.
   604  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   605  	uncles := []*types.Header{}
   606  	for i := 0; block != nil && i < length; i++ {
   607  		uncles = append(uncles, block.Uncles()...)
   608  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   609  	}
   610  	return uncles
   611  }
   612  
   613  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   614  // either from ephemeral in-memory cache, or from persistent storage.
   615  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   616  	return bc.stateCache.TrieDB().Node(hash)
   617  }
   618  
   619  // Stop stops the blockchain service. If any imports are currently in progress
   620  // it will abort them using the procInterrupt.
   621  func (bc *BlockChain) Stop() {
   622  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   623  		return
   624  	}
   625  	// Unsubscribe all subscriptions registered from blockchain
   626  	bc.scope.Close()
   627  	close(bc.quit)
   628  	atomic.StoreInt32(&bc.procInterrupt, 1)
   629  
   630  	bc.wg.Wait()
   631  
   632  	// Ensure the state of a recent block is also stored to disk before exiting.
   633  	// We're writing three different states to catch different restart scenarios:
   634  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   635  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   636  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   637  	if !bc.cacheConfig.Disabled {
   638  		triedb := bc.stateCache.TrieDB()
   639  
   640  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   641  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   642  				recent := bc.GetBlockByNumber(number - offset)
   643  
   644  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   645  				if err := triedb.Commit(recent.Root(), true); err != nil {
   646  					log.Error("Failed to commit recent state trie", "err", err)
   647  				}
   648  			}
   649  		}
   650  		for !bc.triegc.Empty() {
   651  			triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
   652  		}
   653  		if size := triedb.Size(); size != 0 {
   654  			log.Error("Dangling trie nodes after full cleanup")
   655  		}
   656  	}
   657  	log.Info("Blockchain manager stopped")
   658  }
   659  
   660  func (bc *BlockChain) procFutureBlocks() {
   661  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   662  	for _, hash := range bc.futureBlocks.Keys() {
   663  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   664  			blocks = append(blocks, block.(*types.Block))
   665  		}
   666  	}
   667  	if len(blocks) > 0 {
   668  		types.BlockBy(types.Number).Sort(blocks)
   669  
   670  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   671  		for i := range blocks {
   672  			bc.InsertChain(blocks[i : i+1])
   673  		}
   674  	}
   675  }
   676  
   677  // WriteStatus status of write
   678  type WriteStatus byte
   679  
   680  const (
   681  	NonStatTy WriteStatus = iota
   682  	CanonStatTy
   683  	SideStatTy
   684  )
   685  
   686  // Rollback is designed to remove a chain of links from the database that aren't
   687  // certain enough to be valid.
   688  func (bc *BlockChain) Rollback(chain []common.Hash) {
   689  	bc.mu.Lock()
   690  	defer bc.mu.Unlock()
   691  
   692  	for i := len(chain) - 1; i >= 0; i-- {
   693  		hash := chain[i]
   694  
   695  		currentHeader := bc.hc.CurrentHeader()
   696  		if currentHeader.Hash() == hash {
   697  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   698  		}
   699  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   700  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   701  			bc.currentFastBlock.Store(newFastBlock)
   702  			WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   703  		}
   704  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   705  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   706  			bc.currentBlock.Store(newBlock)
   707  			WriteHeadBlockHash(bc.db, newBlock.Hash())
   708  		}
   709  	}
   710  }
   711  
   712  // SetReceiptsData computes all the non-consensus fields of the receipts
   713  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   714  	signer := types.MakeSigner(config, block.Number())
   715  
   716  	transactions, logIndex := block.Transactions(), uint(0)
   717  	if len(transactions) != len(receipts) {
   718  		return errors.New("transaction and receipt count mismatch")
   719  	}
   720  
   721  	for j := 0; j < len(receipts); j++ {
   722  		// The transaction hash can be retrieved from the transaction itself
   723  		receipts[j].TxHash = transactions[j].Hash()
   724  
   725  		// The contract address can be derived from the transaction itself
   726  		if transactions[j].To() == nil {
   727  			// Deriving the signer is expensive, only do if it's actually needed
   728  			from, _ := types.Sender(signer, transactions[j])
   729  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   730  		}
   731  		// The used gas can be calculated based on previous receipts
   732  		if j == 0 {
   733  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   734  		} else {
   735  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   736  		}
   737  		// The derived log fields can simply be set from the block and transaction
   738  		for k := 0; k < len(receipts[j].Logs); k++ {
   739  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   740  			receipts[j].Logs[k].BlockHash = block.Hash()
   741  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   742  			receipts[j].Logs[k].TxIndex = uint(j)
   743  			receipts[j].Logs[k].Index = logIndex
   744  			logIndex++
   745  		}
   746  	}
   747  	return nil
   748  }
   749  
   750  // InsertReceiptChain attempts to complete an already existing header chain with
   751  // transaction and receipt data.
   752  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   753  	bc.wg.Add(1)
   754  	defer bc.wg.Done()
   755  
   756  	// Do a sanity check that the provided chain is actually ordered and linked
   757  	for i := 1; i < len(blockChain); i++ {
   758  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   759  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   760  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   761  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   762  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   763  		}
   764  	}
   765  
   766  	var (
   767  		stats = struct{ processed, ignored int32 }{}
   768  		start = time.Now()
   769  		bytes = 0
   770  		batch = bc.db.NewBatch()
   771  	)
   772  	for i, block := range blockChain {
   773  		receipts := receiptChain[i]
   774  		// Short circuit insertion if shutting down or processing failed
   775  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   776  			return 0, nil
   777  		}
   778  		// Short circuit if the owner header is unknown
   779  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   780  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   781  		}
   782  		// Skip if the entire data is already known
   783  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   784  			stats.ignored++
   785  			continue
   786  		}
   787  		// Compute all the non-consensus fields of the receipts
   788  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   789  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   790  		}
   791  		// Write all the data out into the database
   792  		if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
   793  			return i, fmt.Errorf("failed to write block body: %v", err)
   794  		}
   795  		if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   796  			return i, fmt.Errorf("failed to write block receipts: %v", err)
   797  		}
   798  		if err := WriteTxLookupEntries(batch, block); err != nil {
   799  			return i, fmt.Errorf("failed to write lookup metadata: %v", err)
   800  		}
   801  		stats.processed++
   802  
   803  		if batch.ValueSize() >= qctdb.IdealBatchSize {
   804  			if err := batch.Write(); err != nil {
   805  				return 0, err
   806  			}
   807  			bytes += batch.ValueSize()
   808  			batch.Reset()
   809  		}
   810  	}
   811  	if batch.ValueSize() > 0 {
   812  		bytes += batch.ValueSize()
   813  		if err := batch.Write(); err != nil {
   814  			return 0, err
   815  		}
   816  	}
   817  
   818  	// Update the head fast sync block if better
   819  	bc.mu.Lock()
   820  	head := blockChain[len(blockChain)-1]
   821  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   822  		currentFastBlock := bc.CurrentFastBlock()
   823  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   824  			if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
   825  				log.Crit("Failed to update head fast block hash", "err", err)
   826  			}
   827  			bc.currentFastBlock.Store(head)
   828  		}
   829  	}
   830  	bc.mu.Unlock()
   831  
   832  	log.Info("Imported new block receipts",
   833  		"count", stats.processed,
   834  		"elapsed", common.PrettyDuration(time.Since(start)),
   835  		"number", head.Number(),
   836  		"hash", head.Hash(),
   837  		"size", common.StorageSize(bytes),
   838  		"ignored", stats.ignored)
   839  	return 0, nil
   840  }
   841  
   842  var lastWrite uint64
   843  
   844  // WriteBlockWithoutState writes only the block and its metadata to the database,
   845  // but does not write any state. This is used to construct competing side forks
   846  // up to the point where they exceed the canonical total difficulty.
   847  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   848  	bc.wg.Add(1)
   849  	defer bc.wg.Done()
   850  
   851  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   852  		return err
   853  	}
   854  	if err := WriteBlock(bc.db, block); err != nil {
   855  		return err
   856  	}
   857  	return nil
   858  }
   859  
   860  // WriteBlockWithState writes the block and all associated state to the database.
   861  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   862  	bc.wg.Add(1)
   863  	defer bc.wg.Done()
   864  
   865  	// Calculate the total difficulty of the block
   866  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   867  	if ptd == nil {
   868  		return NonStatTy, consensus.ErrUnknownAncestor
   869  	}
   870  	// Make sure no inconsistent state is leaked during insertion
   871  	bc.mu.Lock()
   872  	defer bc.mu.Unlock()
   873  
   874  	currentBlock := bc.CurrentBlock()
   875  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   876  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   877  
   878  	// Irrelevant of the canonical status, write the block itself to the database
   879  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   880  		return NonStatTy, err
   881  	}
   882  	// Write other block data using a batch.
   883  	batch := bc.db.NewBatch()
   884  	if err := WriteBlock(batch, block); err != nil {
   885  		log.Error("WriteBlockWithState writeBlock", "err", err)
   886  		return NonStatTy, err
   887  	}
   888  
   889  	//FIXME:
   890  	/*
   891  	if block.DposContext != nil {
   892  		if _, err := block.DposContext.Commit(); err != nil {
   893  			log.Error("WriteBlockWithState DposContext.Commit", "err", err)
   894  			return NonStatTy, err
   895  		}
   896  	}
   897  	*/
   898  
   899  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   900  	if err != nil {
   901  		return NonStatTy, err
   902  	}
   903  	triedb := bc.stateCache.TrieDB()
   904  
   905  	dposContext, err := types.NewDposContextFromProto(bc.db, block.Header().DposContext)
   906  	block.DposContext = dposContext
   907  	if err != nil {
   908  		return NonStatTy, err
   909  	}
   910  
   911  	if _, err := block.DposContext.CommitTo(triedb); err != nil {
   912  		log.Error("WriteBlockWithState dposCommitTo", "err", err)
   913  		return NonStatTy, err
   914  	}
   915  
   916  	// If we're running an archive node, always flush
   917  	if bc.cacheConfig.Disabled {
   918  		if err := triedb.Commit(root, false); err != nil {
   919  			return NonStatTy, err
   920  		}
   921  	} else {
   922  		// Full but not archive node, do proper garbage collection
   923  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   924  		bc.triegc.Push(root, -float32(block.NumberU64()))
   925  
   926  		if current := block.NumberU64(); current > triesInMemory {
   927  			// Find the next state trie we need to commit
   928  			header := bc.GetHeaderByNumber(current - triesInMemory)
   929  			chosen := header.Number.Uint64()
   930  
   931  			// Only write to disk if we exceeded our memory allowance *and* also have at
   932  			// least a given number of tries gapped.
   933  			var (
   934  				size  = triedb.Size()
   935  				limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   936  			)
   937  			if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   938  				// If we're exceeding limits but haven't reached a large enough memory gap,
   939  				// warn the user that the system is becoming unstable.
   940  				if chosen < lastWrite+triesInMemory {
   941  					switch {
   942  					case size >= 2*limit:
   943  						log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   944  					case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
   945  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   946  					}
   947  				}
   948  				// If optimum or critical limits reached, write to disk
   949  				if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   950  					triedb.Commit(header.Root, true)
   951  					lastWrite = chosen
   952  					bc.gcproc = 0
   953  				}
   954  			}
   955  			// Garbage collect anything below our required write retention
   956  			for !bc.triegc.Empty() {
   957  				root, number := bc.triegc.Pop()
   958  				if uint64(-number) > chosen {
   959  					bc.triegc.Push(root, number)
   960  					break
   961  				}
   962  				triedb.Dereference(root.(common.Hash), common.Hash{})
   963  			}
   964  		}
   965  	}
   966  	if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   967  		return NonStatTy, err
   968  	}
   969  	// If the total difficulty is higher than our known, add it to the canonical chain
   970  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   971  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   972  	reorg := externTd.Cmp(localTd) > 0
   973  	currentBlock = bc.CurrentBlock()
   974  	if !reorg && externTd.Cmp(localTd) == 0 {
   975  		// Split same-difficulty blocks by number, then at random
   976  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   977  	}
   978  	if reorg {
   979  		// Reorganise the chain if the parent is not the head block
   980  		if block.ParentHash() != currentBlock.Hash() {
   981  			if err := bc.reorg(currentBlock, block); err != nil {
   982  				return NonStatTy, err
   983  			}
   984  		}
   985  		// Write the positional metadata for transaction and receipt lookups
   986  		if err := WriteTxLookupEntries(batch, block); err != nil {
   987  			return NonStatTy, err
   988  		}
   989  		// Write hash preimages
   990  		if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil {
   991  			return NonStatTy, err
   992  		}
   993  		status = CanonStatTy
   994  	} else {
   995  		status = SideStatTy
   996  	}
   997  	if err := batch.Write(); err != nil {
   998  		return NonStatTy, err
   999  	}
  1000  
  1001  	// Set new head.
  1002  	if status == CanonStatTy {
  1003  		bc.insert(block)
  1004  	}
  1005  	bc.futureBlocks.Remove(block.Hash())
  1006  	return status, nil
  1007  }
  1008  
  1009  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1010  // chain or, otherwise, create a fork. If an error is returned it will return
  1011  // the index number of the failing block as well an error describing what went
  1012  // wrong.
  1013  //
  1014  // After insertion is done, all accumulated events will be fired.
  1015  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1016  	n, events, logs, err := bc.insertChain(chain)
  1017  	bc.PostChainEvents(events, logs)
  1018  	return n, err
  1019  }
  1020  
  1021  // insertChain will execute the actual chain insertion and event aggregation. The
  1022  // only reason this method exists as a separate one is to make locking cleaner
  1023  // with deferred statements.
  1024  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1025  	// Do a sanity check that the provided chain is actually ordered and linked
  1026  	for i := 1; i < len(chain); i++ {
  1027  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1028  			// Chain broke ancestry, log a messge (programming error) and skip insertion
  1029  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1030  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1031  
  1032  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1033  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1034  		}
  1035  	}
  1036  	// Pre-checks passed, start the full block imports
  1037  	bc.wg.Add(1)
  1038  	defer bc.wg.Done()
  1039  
  1040  	bc.chainmu.Lock()
  1041  	defer bc.chainmu.Unlock()
  1042  
  1043  	// A queued approach to delivering events. This is generally
  1044  	// faster than direct delivery and requires much less mutex
  1045  	// acquiring.
  1046  	var (
  1047  		stats         = insertStats{startTime: mclock.Now()}
  1048  		events        = make([]interface{}, 0, len(chain))
  1049  		lastCanon     *types.Block
  1050  		coalescedLogs []*types.Log
  1051  	)
  1052  	// Start the parallel header verifier
  1053  	headers := make([]*types.Header, len(chain))
  1054  	seals := make([]bool, len(chain))
  1055  
  1056  	for i, block := range chain {
  1057  		headers[i] = block.Header()
  1058  		seals[i] = true
  1059  	}
  1060  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1061  	defer close(abort)
  1062  
  1063  	// Iterate over the blocks and insert when the verifier permits
  1064  	for i, block := range chain {
  1065  		// If the chain is terminating, stop processing blocks
  1066  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1067  			log.Debug("Premature abort during blocks processing")
  1068  			break
  1069  		}
  1070  		// If the header is a banned one, straight out abort
  1071  		if BadHashes[block.Hash()] {
  1072  			log.Error("BadHashes", "block.Hash()", block.Hash())
  1073  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1074  			return i, events, coalescedLogs, ErrBlacklistedHash
  1075  		}
  1076  		// Wait for the block's verification to complete
  1077  		bstart := time.Now()
  1078  
  1079  		err := <-results
  1080  		if err == nil {
  1081  			err = bc.Validator().ValidateBody(block)
  1082  		}
  1083  		switch {
  1084  		case err == ErrKnownBlock:
  1085  			// Block and state both already known. However if the current block is below
  1086  			// this number we did a rollback and we should reimport it nonetheless.
  1087  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1088  				stats.ignored++
  1089  				continue
  1090  			}
  1091  
  1092  		case err == consensus.ErrFutureBlock:
  1093  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1094  			// the chain is discarded and processed at a later time if given.
  1095  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1096  			if block.Time().Cmp(max) > 0 {
  1097  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1098  			}
  1099  			bc.futureBlocks.Add(block.Hash(), block)
  1100  			stats.queued++
  1101  			continue
  1102  
  1103  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1104  			bc.futureBlocks.Add(block.Hash(), block)
  1105  			stats.queued++
  1106  			continue
  1107  
  1108  		case err == consensus.ErrPrunedAncestor:
  1109  			// Block competing with the canonical chain, store in the db, but don't process
  1110  			// until the competitor TD goes above the canonical TD
  1111  			currentBlock := bc.CurrentBlock()
  1112  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1113  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1114  			if localTd.Cmp(externTd) > 0 {
  1115  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1116  					return i, events, coalescedLogs, err
  1117  				}
  1118  				continue
  1119  			}
  1120  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1121  			var winner []*types.Block
  1122  
  1123  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1124  			for !bc.HasState(parent.Root()) {
  1125  				winner = append(winner, parent)
  1126  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1127  			}
  1128  			for j := 0; j < len(winner)/2; j++ {
  1129  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1130  			}
  1131  			// Import all the pruned blocks to make the state available
  1132  			bc.chainmu.Unlock()
  1133  			_, evs, logs, err := bc.insertChain(winner)
  1134  			bc.chainmu.Lock()
  1135  			events, coalescedLogs = evs, logs
  1136  
  1137  			if err != nil {
  1138  				return i, events, coalescedLogs, err
  1139  			}
  1140  
  1141  		case err != nil:
  1142  			log.Error("insertChain", "err", err)
  1143  			bc.reportBlock(block, nil, err)
  1144  			return i, events, coalescedLogs, err
  1145  		}
  1146  		// Create a new statedb using the parent block and report an
  1147  		// error if it fails.
  1148  		var parent *types.Block
  1149  		if i == 0 {
  1150  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1151  		} else {
  1152  			parent = chain[i-1]
  1153  		}
  1154  
  1155  		block.DposContext, err = types.NewDposContextFromProto(bc.db, parent.Header().DposContext)
  1156  		if err != nil {
  1157  			log.Error("NewDposContextFromProto", "err", err)
  1158  			return i, events, coalescedLogs, err
  1159  		}
  1160  
  1161  		state, err := state.New(parent.Root(), bc.stateCache)
  1162  		if err != nil {
  1163  			log.Error("State.New", "err", err)
  1164  			return i, events, coalescedLogs, err
  1165  		}
  1166  		// Process block using the parent state as reference point.
  1167  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1168  		if err != nil {
  1169  			log.Error("bc.processor.Process", "err", err)
  1170  			bc.reportBlock(block, receipts, err)
  1171  			return i, events, coalescedLogs, err
  1172  		}
  1173  		// Validate the state using the default validator
  1174  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1175  		if err != nil {
  1176  			log.Error("bc.Validator().ValidateState", "err", err)
  1177  			bc.reportBlock(block, receipts, err)
  1178  			return i, events, coalescedLogs, err
  1179  		}
  1180  
  1181  		// Validate the dpos state using the default validator
  1182  		err = bc.Validator().ValidateDposState(block)
  1183  		if err != nil {
  1184  			log.Error("ValidateDposState", "err", err)
  1185  			bc.reportBlock(block, receipts, err)
  1186  			return i, events, coalescedLogs, err
  1187  		}
  1188  		// Validate validator
  1189  		dposEngine, isDpos := bc.engine.(*dpos.DPOS)
  1190  		if isDpos {
  1191  			err = dposEngine.VerifySeal(bc, block.Header())
  1192  			if err != nil {
  1193  				log.Error("dposEngine.VerifySeal", "err", err)
  1194  				bc.reportBlock(block, receipts, err)
  1195  				return i, events, coalescedLogs, err
  1196  			}
  1197  		}
  1198  		
  1199  		proctime := time.Since(bstart)
  1200  
  1201  		// Write the block to the chain and get the status.
  1202  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1203  		if err != nil {
  1204  			log.Error("bc.WriteBlockWithState", "err", err)
  1205  			return i, events, coalescedLogs, err
  1206  		}
  1207  		switch status {
  1208  		case CanonStatTy:
  1209  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1210  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1211  
  1212  			coalescedLogs = append(coalescedLogs, logs...)
  1213  			blockInsertTimer.UpdateSince(bstart)
  1214  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1215  			lastCanon = block
  1216  
  1217  			// Only count canonical blocks for GC processing time
  1218  			bc.gcproc += proctime
  1219  
  1220  		case SideStatTy:
  1221  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1222  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1223  
  1224  			blockInsertTimer.UpdateSince(bstart)
  1225  			events = append(events, ChainSideEvent{block})
  1226  		}
  1227  		stats.processed++
  1228  		stats.usedGas += usedGas
  1229  		stats.report(chain, i, bc.stateCache.TrieDB().Size())
  1230  	}
  1231  	// Append a single chain head event if we've progressed the chain
  1232  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1233  		events = append(events, ChainHeadEvent{lastCanon})
  1234  	}
  1235  	return 0, events, coalescedLogs, nil
  1236  }
  1237  
  1238  // insertStats tracks and reports on block insertion.
  1239  type insertStats struct {
  1240  	queued, processed, ignored int
  1241  	usedGas                    uint64
  1242  	lastIndex                  int
  1243  	startTime                  mclock.AbsTime
  1244  }
  1245  
  1246  // statsReportLimit is the time limit during import after which we always print
  1247  // out progress. This avoids the user wondering what's going on.
  1248  const statsReportLimit = 8 * time.Second
  1249  
  1250  // report prints statistics if some number of blocks have been processed
  1251  // or more than a few seconds have passed since the last message.
  1252  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1253  	// Fetch the timings for the batch
  1254  	var (
  1255  		now     = mclock.Now()
  1256  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1257  	)
  1258  	// If we're at the last block of the batch or report period reached, log
  1259  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1260  		var (
  1261  			end = chain[index]
  1262  			txs = countTransactions(chain[st.lastIndex : index+1])
  1263  		)
  1264  		context := []interface{}{
  1265  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1266  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1267  			"number", end.Number(), "hash", end.Hash(), "cache", cache, "diff", end.Difficulty(),
  1268  		}
  1269  		if st.queued > 0 {
  1270  			context = append(context, []interface{}{"queued", st.queued}...)
  1271  		}
  1272  		if st.ignored > 0 {
  1273  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1274  		}
  1275  		log.Info("Imported new chain segment", context...)
  1276  
  1277  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1278  	}
  1279  }
  1280  
  1281  func countTransactions(chain []*types.Block) (c int) {
  1282  	for _, b := range chain {
  1283  		c += len(b.Transactions())
  1284  	}
  1285  	return c
  1286  }
  1287  
  1288  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1289  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1290  // event about them
  1291  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1292  	var (
  1293  		newChain    types.Blocks
  1294  		oldChain    types.Blocks
  1295  		commonBlock *types.Block
  1296  		deletedTxs  types.Transactions
  1297  		deletedLogs []*types.Log
  1298  		// collectLogs collects the logs that were generated during the
  1299  		// processing of the block that corresponds with the given hash.
  1300  		// These logs are later announced as deleted.
  1301  		collectLogs = func(h common.Hash) {
  1302  			// Coalesce logs and set 'Removed'.
  1303  			receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h))
  1304  			for _, receipt := range receipts {
  1305  				for _, log := range receipt.Logs {
  1306  					del := *log
  1307  					del.Removed = true
  1308  					deletedLogs = append(deletedLogs, &del)
  1309  				}
  1310  			}
  1311  		}
  1312  	)
  1313  
  1314  	// first reduce whoever is higher bound
  1315  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1316  		// reduce old chain
  1317  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1318  			oldChain = append(oldChain, oldBlock)
  1319  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1320  
  1321  			collectLogs(oldBlock.Hash())
  1322  		}
  1323  	} else {
  1324  		// reduce new chain and append new chain blocks for inserting later on
  1325  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1326  			newChain = append(newChain, newBlock)
  1327  		}
  1328  	}
  1329  	if oldBlock == nil {
  1330  		return fmt.Errorf("Invalid old chain")
  1331  	}
  1332  	if newBlock == nil {
  1333  		return fmt.Errorf("Invalid new chain")
  1334  	}
  1335  
  1336  	for {
  1337  		if oldBlock.Hash() == newBlock.Hash() {
  1338  			commonBlock = oldBlock
  1339  			break
  1340  		}
  1341  
  1342  		oldChain = append(oldChain, oldBlock)
  1343  		newChain = append(newChain, newBlock)
  1344  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1345  		collectLogs(oldBlock.Hash())
  1346  
  1347  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1348  		if oldBlock == nil {
  1349  			return fmt.Errorf("Invalid old chain")
  1350  		}
  1351  		if newBlock == nil {
  1352  			return fmt.Errorf("Invalid new chain")
  1353  		}
  1354  	}
  1355  	// Ensure the user sees large reorgs
  1356  	if len(oldChain) > 0 && len(newChain) > 0 {
  1357  		logFn := log.Debug
  1358  		if len(oldChain) > 63 {
  1359  			logFn = log.Warn
  1360  		}
  1361  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1362  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1363  	} else {
  1364  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1365  	}
  1366  	// Insert the new chain, taking care of the proper incremental order
  1367  	var addedTxs types.Transactions
  1368  	for i := len(newChain) - 1; i >= 0; i-- {
  1369  		// insert the block in the canonical way, re-writing history
  1370  		bc.insert(newChain[i])
  1371  		// write lookup entries for hash based transaction/receipt searches
  1372  		if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil {
  1373  			return err
  1374  		}
  1375  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1376  	}
  1377  	// calculate the difference between deleted and added transactions
  1378  	diff := types.TxDifference(deletedTxs, addedTxs)
  1379  	// When transactions get deleted from the database that means the
  1380  	// receipts that were created in the fork must also be deleted
  1381  	for _, tx := range diff {
  1382  		DeleteTxLookupEntry(bc.db, tx.Hash())
  1383  	}
  1384  	if len(deletedLogs) > 0 {
  1385  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1386  	}
  1387  	if len(oldChain) > 0 {
  1388  		go func() {
  1389  			for _, block := range oldChain {
  1390  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1391  			}
  1392  		}()
  1393  	}
  1394  
  1395  	return nil
  1396  }
  1397  
  1398  // PostChainEvents iterates over the events generated by a chain insertion and
  1399  // posts them into the event feed.
  1400  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1401  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1402  	// post event logs for further processing
  1403  	if logs != nil {
  1404  		bc.logsFeed.Send(logs)
  1405  	}
  1406  	for _, event := range events {
  1407  		switch ev := event.(type) {
  1408  		case ChainEvent:
  1409  			bc.chainFeed.Send(ev)
  1410  
  1411  		case ChainHeadEvent:
  1412  			bc.chainHeadFeed.Send(ev)
  1413  
  1414  		case ChainSideEvent:
  1415  			bc.chainSideFeed.Send(ev)
  1416  		}
  1417  	}
  1418  }
  1419  
  1420  func (bc *BlockChain) update() {
  1421  	futureTimer := time.NewTicker(5 * time.Second)
  1422  	defer futureTimer.Stop()
  1423  	for {
  1424  		select {
  1425  		case <-futureTimer.C:
  1426  			bc.procFutureBlocks()
  1427  		case <-bc.quit:
  1428  			return
  1429  		}
  1430  	}
  1431  }
  1432  
  1433  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  1434  type BadBlockArgs struct {
  1435  	Hash   common.Hash   `json:"hash"`
  1436  	Header *types.Header `json:"header"`
  1437  }
  1438  
  1439  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1440  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  1441  	headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
  1442  	for _, hash := range bc.badBlocks.Keys() {
  1443  		if hdr, exist := bc.badBlocks.Peek(hash); exist {
  1444  			header := hdr.(*types.Header)
  1445  			headers = append(headers, BadBlockArgs{header.Hash(), header})
  1446  		}
  1447  	}
  1448  	return headers, nil
  1449  }
  1450  
  1451  // HasBadBlock returns whether the block with the hash is a bad block
  1452  func (bc *BlockChain) HasBadBlock(hash common.Hash) bool {
  1453  	return bc.badBlocks.Contains(hash)
  1454  }
  1455  
  1456  // addBadBlock adds a bad block to the bad-block LRU cache
  1457  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1458  	bc.badBlocks.Add(block.Header().Hash(), block.Header())
  1459  }
  1460  
  1461  // reportBlock logs a bad block error.
  1462  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1463  	bc.addBadBlock(block)
  1464  
  1465  	var receiptString string
  1466  	for _, receipt := range receipts {
  1467  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1468  	}
  1469  	log.Error(fmt.Sprintf(`
  1470  ########## BAD BLOCK #########
  1471  Chain config: %v
  1472  
  1473  Number: %v
  1474  Hash: 0x%x
  1475  %v
  1476  
  1477  Error: %v
  1478  ##############################
  1479  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1480  }
  1481  
  1482  // InsertHeaderChain attempts to insert the given header chain in to the local
  1483  // chain, possibly creating a reorg. If an error is returned, it will return the
  1484  // index number of the failing header as well an error describing what went wrong.
  1485  //
  1486  // The verify parameter can be used to fine tune whether nonce verification
  1487  // should be done or not. The reason behind the optional check is because some
  1488  // of the header retrieval mechanisms already need to verify nonces, as well as
  1489  // because nonces can be verified sparsely, not needing to check each.
  1490  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1491  	start := time.Now()
  1492  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1493  		return i, err
  1494  	}
  1495  
  1496  	// Make sure only one thread manipulates the chain at once
  1497  	bc.chainmu.Lock()
  1498  	defer bc.chainmu.Unlock()
  1499  
  1500  	bc.wg.Add(1)
  1501  	defer bc.wg.Done()
  1502  
  1503  	whFunc := func(header *types.Header) error {
  1504  		bc.mu.Lock()
  1505  		defer bc.mu.Unlock()
  1506  
  1507  		_, err := bc.hc.WriteHeader(header)
  1508  		return err
  1509  	}
  1510  
  1511  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1512  }
  1513  
  1514  // writeHeader writes a header into the local chain, given that its parent is
  1515  // already known. If the total difficulty of the newly inserted header becomes
  1516  // greater than the current known TD, the canonical chain is re-routed.
  1517  //
  1518  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1519  // into the chain, as side effects caused by reorganisations cannot be emulated
  1520  // without the real blocks. Hence, writing headers directly should only be done
  1521  // in two scenarios: pure-header mode of operation (light clients), or properly
  1522  // separated header/block phases (non-archive clients).
  1523  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1524  	bc.wg.Add(1)
  1525  	defer bc.wg.Done()
  1526  
  1527  	bc.mu.Lock()
  1528  	defer bc.mu.Unlock()
  1529  
  1530  	_, err := bc.hc.WriteHeader(header)
  1531  	return err
  1532  }
  1533  
  1534  // CurrentHeader retrieves the current head header of the canonical chain. The
  1535  // header is retrieved from the HeaderChain's internal cache.
  1536  func (bc *BlockChain) CurrentHeader() *types.Header {
  1537  	return bc.hc.CurrentHeader()
  1538  }
  1539  
  1540  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1541  // database by hash and number, caching it if found.
  1542  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1543  	return bc.hc.GetTd(hash, number)
  1544  }
  1545  
  1546  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1547  // database by hash, caching it if found.
  1548  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1549  	return bc.hc.GetTdByHash(hash)
  1550  }
  1551  
  1552  // GetHeader retrieves a block header from the database by hash and number,
  1553  // caching it if found.
  1554  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1555  	return bc.hc.GetHeader(hash, number)
  1556  }
  1557  
  1558  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1559  // found.
  1560  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1561  	return bc.hc.GetHeaderByHash(hash)
  1562  }
  1563  
  1564  // HasHeader checks if a block header is present in the database or not, caching
  1565  // it if present.
  1566  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1567  	return bc.hc.HasHeader(hash, number)
  1568  }
  1569  
  1570  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1571  // hash, fetching towards the genesis block.
  1572  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1573  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1574  }
  1575  
  1576  // GetHeaderByNumber retrieves a block header from the database by number,
  1577  // caching it (associated with its hash) if found.
  1578  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1579  	return bc.hc.GetHeaderByNumber(number)
  1580  }
  1581  
  1582  // Config retrieves the blockchain's chain configuration.
  1583  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1584  
  1585  // Engine retrieves the blockchain's consensus engine.
  1586  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1587  
  1588  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1589  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1590  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1591  }
  1592  
  1593  // SubscribeChainEvent registers a subscription of ChainEvent.
  1594  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1595  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1596  }
  1597  
  1598  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1599  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1600  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1601  }
  1602  
  1603  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1604  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1605  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1606  }
  1607  
  1608  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1609  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1610  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1611  }