gitlab.com/aquachain/aquachain@v1.17.16-rc3.0.20221018032414-e3ddf1e1c055/core/blockchain.go (about)

     1  // Copyright 2018 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Aquachain consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"runtime"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	lru "github.com/hashicorp/golang-lru"
    32  	"gitlab.com/aquachain/aquachain/aqua/event"
    33  	"gitlab.com/aquachain/aquachain/aquadb"
    34  	"gitlab.com/aquachain/aquachain/common"
    35  	"gitlab.com/aquachain/aquachain/common/log"
    36  	"gitlab.com/aquachain/aquachain/common/mclock"
    37  	"gitlab.com/aquachain/aquachain/common/metrics"
    38  	"gitlab.com/aquachain/aquachain/common/prque"
    39  	"gitlab.com/aquachain/aquachain/consensus"
    40  	"gitlab.com/aquachain/aquachain/core/state"
    41  	"gitlab.com/aquachain/aquachain/core/types"
    42  	"gitlab.com/aquachain/aquachain/core/vm"
    43  	"gitlab.com/aquachain/aquachain/crypto"
    44  	"gitlab.com/aquachain/aquachain/params"
    45  	"gitlab.com/aquachain/aquachain/rlp"
    46  	"gitlab.com/aquachain/aquachain/trie"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	maxFutureBlocks     = 256
    59  	maxTimeFutureBlocks = 30
    60  	badBlockLimit       = 10
    61  	triesInMemory       = 128
    62  
    63  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    64  	BlockChainVersion = 1
    65  )
    66  
    67  // CacheConfig contains the configuration values for the trie caching/pruning
    68  // that's resident in a blockchain.
    69  type CacheConfig struct {
    70  	Disabled      bool          // Whether to disable trie write caching (archive node)
    71  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    72  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    73  }
    74  
    75  // BlockChain represents the canonical chain given a database with a genesis
    76  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    77  //
    78  // Importing blocks in to the block chain happens according to the set of rules
    79  // defined by the two stage Validator. Processing of blocks is done using the
    80  // Processor which processes the included transaction. The validation of the state
    81  // is done in the second part of the Validator. Failing results in aborting of
    82  // the import.
    83  //
    84  // The BlockChain also helps in returning blocks from **any** chain included
    85  // in the database as well as blocks that represents the canonical chain. It's
    86  // important to note that GetBlock can return any block and does not need to be
    87  // included in the canonical one where as GetBlockByNumber always represents the
    88  // canonical chain.
    89  type BlockChain struct {
    90  	chainConfig *params.ChainConfig // Chain & network configuration
    91  	cacheConfig *CacheConfig        // Cache configuration for pruning
    92  
    93  	db     aquadb.Database // Low level persistent database to store final content in
    94  	triegc *prque.Prque    // Priority queue mapping block numbers to tries to gc
    95  	gcproc time.Duration   // Accumulates canonical block processing for trie dumping
    96  
    97  	hc            *HeaderChain
    98  	rmLogsFeed    event.Feed
    99  	chainFeed     event.Feed
   100  	chainSideFeed event.Feed
   101  	chainHeadFeed event.Feed
   102  	logsFeed      event.Feed
   103  	scope         event.SubscriptionScope
   104  	genesisBlock  *types.Block
   105  
   106  	mu      sync.RWMutex // global mutex for locking chain operations
   107  	chainmu sync.RWMutex // blockchain insertion lock
   108  	procmu  sync.RWMutex // block processor lock
   109  
   110  	currentBlock     atomic.Value // Current head of the block chain
   111  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   112  
   113  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   114  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   115  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   116  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   117  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   118  
   119  	quit    chan struct{} // blockchain quit channel
   120  	running int32         // running must be called atomically
   121  	// procInterrupt must be atomically called
   122  	procInterrupt int32          // interrupt signaler for block processing
   123  	wg            sync.WaitGroup // chain processing wait group for shutting down
   124  
   125  	engine    consensus.Engine
   126  	processor Processor // block processor interface
   127  	validator Validator // block and state validator interface
   128  	vmConfig  vm.Config
   129  
   130  	badBlocks *lru.Cache // Bad block cache
   131  }
   132  
   133  // NewBlockChain returns a fully initialised block chain using information
   134  // available in the database. It initialises the default Aquachain Validator and
   135  // Processor.
   136  func NewBlockChain(db aquadb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   137  	if cacheConfig == nil {
   138  		cacheConfig = &CacheConfig{
   139  			TrieNodeLimit: 256 * 1024 * 1024,
   140  			TrieTimeLimit: 5 * time.Minute,
   141  		}
   142  	}
   143  	if chainConfig == nil {
   144  		return nil, fmt.Errorf("nil config")
   145  	}
   146  	bodyCache, _ := lru.New(bodyCacheLimit)
   147  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   148  	blockCache, _ := lru.New(blockCacheLimit)
   149  	futureBlocks, _ := lru.New(maxFutureBlocks)
   150  	badBlocks, _ := lru.New(badBlockLimit)
   151  
   152  	bc := &BlockChain{
   153  		chainConfig:  chainConfig,
   154  		cacheConfig:  cacheConfig,
   155  		db:           db,
   156  		triegc:       prque.New(nil),
   157  		stateCache:   state.NewDatabase(db),
   158  		quit:         make(chan struct{}),
   159  		bodyCache:    bodyCache,
   160  		bodyRLPCache: bodyRLPCache,
   161  		blockCache:   blockCache,
   162  		futureBlocks: futureBlocks,
   163  		engine:       engine,
   164  		vmConfig:     vmConfig,
   165  		badBlocks:    badBlocks,
   166  	}
   167  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   168  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   169  
   170  	var err error
   171  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   172  	if err != nil {
   173  		return nil, err
   174  	}
   175  	bc.genesisBlock = bc.GetBlockByNumber(0)
   176  	if bc.genesisBlock == nil {
   177  		return nil, ErrNoGenesis
   178  	}
   179  	if err := bc.loadLastState(); err != nil {
   180  		return nil, err
   181  	}
   182  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   183  	for hash := range BadHashes {
   184  		if header := bc.GetHeaderByHash(hash); header != nil {
   185  			// get the canonical block corresponding to the offending header's number
   186  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   187  			// make sure the headerByNumber (if present) is in our current canonical chain
   188  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   189  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   190  				bc.SetHead(header.Number.Uint64() - 1)
   191  				log.Error("Chain rewind was successful, resuming normal operation")
   192  			}
   193  		}
   194  	}
   195  	// Take ownership of this particular state
   196  	go bc.update()
   197  	return bc, nil
   198  }
   199  
   200  func (bc *BlockChain) getProcInterrupt() bool {
   201  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   202  }
   203  
   204  // loadLastState loads the last known chain state from the database. This method
   205  // assumes that the chain manager mutex is held.
   206  func (bc *BlockChain) loadLastState() error {
   207  	// Restore the last known head block
   208  	head := GetHeadBlockHash(bc.db)
   209  	if head == (common.Hash{}) {
   210  		// Corrupt or empty database, init from scratch
   211  		log.Warn("Empty database, resetting chain")
   212  		return bc.Reset()
   213  	}
   214  	// Make sure the entire head block is available
   215  	currentBlock := bc.GetBlockByHash(head)
   216  	if currentBlock == nil {
   217  		// Corrupt or empty database, init from scratch
   218  		log.Warn("Head block missing, resetting chain", "hash", head)
   219  		return bc.Reset()
   220  	}
   221  	// Make sure the state associated with the block is available
   222  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   223  		// Dangling block without a state associated, init from scratch
   224  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   225  		if err := bc.repair(&currentBlock); err != nil {
   226  			return err
   227  		}
   228  	}
   229  	// Everything seems to be fine, set as the head block
   230  	bc.currentBlock.Store(currentBlock)
   231  
   232  	// Restore the last known head header
   233  	currentHeader := currentBlock.Header()
   234  	if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
   235  		if header := bc.GetHeaderByHash(head); header != nil {
   236  			currentHeader = header
   237  		}
   238  	}
   239  	bc.hc.SetCurrentHeader(currentHeader)
   240  
   241  	// Restore the last known head fast block
   242  	bc.currentFastBlock.Store(currentBlock)
   243  	if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   244  		if block := bc.GetBlockByHash(head); block != nil {
   245  			bc.currentFastBlock.Store(block)
   246  		}
   247  	}
   248  
   249  	// Issue a status log for the user
   250  	currentFastBlock := bc.CurrentFastBlock()
   251  
   252  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   253  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   254  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   255  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   256  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   257  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   258  
   259  	return nil
   260  }
   261  
   262  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   263  // above the new head will be deleted and the new one set. In the case of blocks
   264  // though, the head may be further rewound if block bodies are missing (non-archive
   265  // nodes after a fast sync).
   266  func (bc *BlockChain) SetHead(head uint64) error {
   267  	log.Warn("Rewinding blockchain", "target", head)
   268  
   269  	bc.mu.Lock()
   270  	defer bc.mu.Unlock()
   271  
   272  	// Rewind the header chain, deleting all block bodies until then
   273  	delFn := func(hash common.Hash, num uint64) {
   274  		DeleteBody(bc.db, hash, num)
   275  	}
   276  	bc.hc.SetHead(head, delFn)
   277  	currentHeader := bc.hc.CurrentHeader()
   278  
   279  	// Clear out any stale content from the caches
   280  	bc.bodyCache.Purge()
   281  	bc.bodyRLPCache.Purge()
   282  	bc.blockCache.Purge()
   283  	bc.futureBlocks.Purge()
   284  
   285  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   286  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   287  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   288  	}
   289  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   290  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   291  			// Rewound state missing, rolled back to before pivot, reset to genesis
   292  			bc.currentBlock.Store(bc.genesisBlock)
   293  		}
   294  	}
   295  	// Rewind the fast block in a simpleton way to the target head
   296  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   297  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   298  	}
   299  	// If either blocks reached nil, reset to the genesis state
   300  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   301  		bc.currentBlock.Store(bc.genesisBlock)
   302  	}
   303  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   304  		bc.currentFastBlock.Store(bc.genesisBlock)
   305  	}
   306  	currentBlock := bc.CurrentBlock()
   307  	currentFastBlock := bc.CurrentFastBlock()
   308  	if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
   309  		log.Crit("Failed to reset head full block", "err", err)
   310  	}
   311  	if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
   312  		log.Crit("Failed to reset head fast block", "err", err)
   313  	}
   314  	return bc.loadLastState()
   315  }
   316  
   317  // FastSyncCommitHead sets the current head block to the one defined by the hash
   318  // irrelevant what the chain contents were prior.
   319  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   320  	// Make sure that both the block as well at its state trie exists
   321  	block := bc.GetBlockByHash(hash)
   322  	if block == nil {
   323  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   324  	}
   325  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   326  		return err
   327  	}
   328  	// If all checks out, manually set the head block
   329  	bc.mu.Lock()
   330  	bc.currentBlock.Store(block)
   331  	bc.mu.Unlock()
   332  
   333  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   334  	return nil
   335  }
   336  
   337  // GasLimit returns the gas limit of the current HEAD block.
   338  func (bc *BlockChain) GasLimit() uint64 {
   339  	return bc.CurrentBlock().GasLimit()
   340  }
   341  
   342  // CurrentBlock retrieves the current head block of the canonical chain. The
   343  // block is retrieved from the blockchain's internal cache.
   344  func (bc *BlockChain) CurrentBlock() *types.Block {
   345  	b := bc.currentBlock.Load().(*types.Block)
   346  	//b.SetVersion(bc.Config().GetBlockVersion(b.Number()))
   347  	return b
   348  }
   349  
   350  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   351  // chain. The block is retrieved from the blockchain's internal cache.
   352  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   353  	return bc.currentFastBlock.Load().(*types.Block)
   354  }
   355  
   356  // SetProcessor sets the processor required for making state modifications.
   357  func (bc *BlockChain) SetProcessor(processor Processor) {
   358  	bc.procmu.Lock()
   359  	defer bc.procmu.Unlock()
   360  	bc.processor = processor
   361  }
   362  
   363  // SetValidator sets the validator which is used to validate incoming blocks.
   364  func (bc *BlockChain) SetValidator(validator Validator) {
   365  	bc.procmu.Lock()
   366  	defer bc.procmu.Unlock()
   367  	bc.validator = validator
   368  }
   369  
   370  // Validator returns the current validator.
   371  func (bc *BlockChain) Validator() Validator {
   372  	bc.procmu.RLock()
   373  	defer bc.procmu.RUnlock()
   374  	return bc.validator
   375  }
   376  
   377  // Processor returns the current processor.
   378  func (bc *BlockChain) Processor() Processor {
   379  	bc.procmu.RLock()
   380  	defer bc.procmu.RUnlock()
   381  	return bc.processor
   382  }
   383  
   384  // State returns a new mutable state based on the current HEAD block.
   385  func (bc *BlockChain) State() (*state.StateDB, error) {
   386  	return bc.StateAt(bc.CurrentBlock().Root())
   387  }
   388  
   389  // StateAt returns a new mutable state based on a particular point in time.
   390  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   391  	return state.New(root, bc.stateCache)
   392  }
   393  
   394  // Reset purges the entire blockchain, restoring it to its genesis state.
   395  func (bc *BlockChain) Reset() error {
   396  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   397  }
   398  
   399  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   400  // specified genesis state.
   401  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   402  	// Dump the entire block chain and purge the caches
   403  	if err := bc.SetHead(0); err != nil {
   404  		return err
   405  	}
   406  	bc.mu.Lock()
   407  	defer bc.mu.Unlock()
   408  
   409  	// Prepare the genesis block and reinitialise the chain
   410  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   411  		log.Crit("Failed to write genesis block TD", "err", err)
   412  	}
   413  	if err := WriteBlock(bc.db, genesis); err != nil {
   414  		log.Crit("Failed to write genesis block", "err", err)
   415  	}
   416  	bc.genesisBlock = genesis
   417  	bc.insert(bc.genesisBlock)
   418  	bc.currentBlock.Store(bc.genesisBlock)
   419  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   420  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   421  	bc.currentFastBlock.Store(bc.genesisBlock)
   422  
   423  	return nil
   424  }
   425  
   426  // repair tries to repair the current blockchain by rolling back the current block
   427  // until one with associated state is found. This is needed to fix incomplete db
   428  // writes caused either by crashes/power outages, or simply non-committed tries.
   429  //
   430  // This method only rolls back the current block. The current header and current
   431  // fast block are left intact.
   432  func (bc *BlockChain) repair(head **types.Block) error {
   433  	for {
   434  		// Abort if we've rewound to a head block that does have associated state
   435  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   436  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   437  			return nil
   438  		}
   439  		// Otherwise rewind one block and recheck state availability there
   440  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   441  	}
   442  }
   443  
   444  // Export writes the active chain to the given writer.
   445  func (bc *BlockChain) Export(w io.Writer) error {
   446  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   447  }
   448  
   449  // ExportN writes a subset of the active chain to the given writer.
   450  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   451  	bc.mu.RLock()
   452  	defer bc.mu.RUnlock()
   453  
   454  	if first > last {
   455  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   456  	}
   457  	log.Info("Exporting batch of blocks", "count", last-first+1)
   458  
   459  	for nr := first; nr <= last; nr++ {
   460  		block := bc.GetBlockByNumber(nr)
   461  		if block == nil {
   462  			return fmt.Errorf("export failed on #%d: not found", nr)
   463  		}
   464  
   465  		if err := block.EncodeRLP(w); err != nil {
   466  			return err
   467  		}
   468  	}
   469  
   470  	return nil
   471  }
   472  
   473  // insert injects a new head block into the current block chain. This method
   474  // assumes that the block is indeed a true head. It will also reset the head
   475  // header and the head fast sync block to this very same block if they are older
   476  // or if they are on a different side chain.
   477  //
   478  // Note, this function assumes that the `mu` mutex is held!
   479  func (bc *BlockChain) insert(block *types.Block) {
   480  	// If the block is on a side chain or an unknown one, force other heads onto it too
   481  	updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   482  
   483  	// Add the block to the canonical chain number scheme and mark as the head
   484  	if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil {
   485  		log.Crit("Failed to insert block number", "err", err)
   486  	}
   487  	if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
   488  		log.Crit("Failed to insert head block hash", "err", err)
   489  	}
   490  	bc.currentBlock.Store(block)
   491  
   492  	// If the block is better than our head or is on a different chain, force update heads
   493  	if updateHeads {
   494  		bc.hc.SetCurrentHeader(block.Header())
   495  
   496  		if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
   497  			log.Crit("Failed to insert head fast block hash", "err", err)
   498  		}
   499  		bc.currentFastBlock.Store(block)
   500  	}
   501  }
   502  
   503  // Genesis retrieves the chain's genesis block.
   504  func (bc *BlockChain) Genesis() *types.Block {
   505  	return bc.genesisBlock
   506  }
   507  
   508  // GetBlockVersion returns the version byte for the given height
   509  func (bc *BlockChain) GetBlockVersion(height *big.Int) params.HeaderVersion {
   510  	return bc.Config().GetBlockVersion(height)
   511  }
   512  
   513  // GetBody retrieves a block body (transactions and uncles) from the database by
   514  // hash, caching it if found.
   515  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   516  	// Short circuit if the body's already in the cache, retrieve otherwise
   517  	if cached, ok := bc.bodyCache.Get(hash); ok {
   518  		body := cached.(*types.Body)
   519  		return body
   520  	}
   521  	body := GetBodyNoVersion(bc.db, hash, bc.hc.GetBlockNumber(hash))
   522  	if body == nil {
   523  		return nil
   524  	}
   525  
   526  	for i := range body.Uncles {
   527  		body.Uncles[i].Version = bc.GetBlockVersion(body.Uncles[0].Number) // only one version
   528  	}
   529  	// Cache the found body for next time and return
   530  	bc.bodyCache.Add(hash, body)
   531  	return body
   532  }
   533  
   534  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   535  // caching it if found.
   536  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   537  	// Short circuit if the body's already in the cache, retrieve otherwise
   538  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   539  		return cached.(rlp.RawValue)
   540  	}
   541  	body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash))
   542  	if len(body) == 0 {
   543  		return nil
   544  	}
   545  	// Cache the found body for next time and return
   546  	bc.bodyRLPCache.Add(hash, body)
   547  	return body
   548  }
   549  
   550  // HasBlock checks if a block is fully present in the database or not.
   551  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   552  	if bc.blockCache.Contains(hash) {
   553  		return true
   554  	}
   555  	ok, _ := bc.db.Has(blockBodyKey(hash, number))
   556  	return ok
   557  }
   558  
   559  // HasState checks if state trie is fully present in the database or not.
   560  func (bc *BlockChain) HasState(hash common.Hash) bool {
   561  	_, err := bc.stateCache.OpenTrie(hash)
   562  	return err == nil
   563  }
   564  
   565  // HasBlockAndState checks if a block and associated state trie is fully present
   566  // in the database or not, caching it if present.
   567  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   568  	// Check first that the block itself is known
   569  	block := bc.GetBlock(hash, number)
   570  	if block == nil {
   571  		return false
   572  	}
   573  	// block.SetVersion(bc.GetBlockVersion(block.Number()))
   574  	return bc.HasState(block.Root())
   575  }
   576  
   577  // GetBlock retrieves a block from the database by hash and number,
   578  // caching it if found, adding the correct version
   579  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   580  	// Short circuit if the block's already in the cache, retrieve otherwise
   581  	if block, ok := bc.blockCache.Get(hash); ok {
   582  		block := block.(*types.Block)
   583  		if block.Version() == 0 {
   584  			block.SetVersion(bc.Config().GetBlockVersion(block.Number()))
   585  		}
   586  		return block
   587  	}
   588  	block := GetBlockNoVersion(bc.db, hash, number)
   589  	if block == nil {
   590  		return nil
   591  	}
   592  	hashv := block.SetVersion(bc.Config().GetBlockVersion(block.Number()))
   593  	// Cache the found block for next time and return
   594  	bc.blockCache.Add(hashv, block)
   595  	return block
   596  }
   597  
   598  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   599  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   600  	return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
   601  }
   602  
   603  // GetBlockByNumber retrieves a block from the database by number, caching it
   604  // (associated with its hash) if found.
   605  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   606  	hash := GetCanonicalHash(bc.db, number)
   607  	if hash == (common.Hash{}) {
   608  		return nil
   609  	}
   610  	return bc.GetBlock(hash, number)
   611  }
   612  
   613  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   614  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   615  	return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash))
   616  }
   617  
   618  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   619  // [deprecated by aqua/62]
   620  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   621  	number := bc.hc.GetBlockNumber(hash)
   622  	for i := 0; i < n; i++ {
   623  		block := bc.GetBlock(hash, number)
   624  		if block == nil {
   625  			break
   626  		}
   627  		blocks = append(blocks, block)
   628  		hash = block.ParentHash()
   629  		number--
   630  	}
   631  	return
   632  }
   633  
   634  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   635  // a specific distance is reached.
   636  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   637  	uncles := []*types.Header{}
   638  	for i := 0; block != nil && i < length; i++ {
   639  		uncles = append(uncles, block.Uncles()...)
   640  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   641  	}
   642  	return uncles
   643  }
   644  
   645  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   646  // either from ephemeral in-memory cache, or from persistent storage.
   647  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   648  	return bc.stateCache.TrieDB().Node(hash)
   649  }
   650  
   651  // Stop stops the blockchain service. If any imports are currently in progress
   652  // it will abort them using the procInterrupt.
   653  func (bc *BlockChain) Stop() {
   654  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   655  		return
   656  	}
   657  	// Unsubscribe all subscriptions registered from blockchain
   658  	bc.scope.Close()
   659  	close(bc.quit)
   660  	atomic.StoreInt32(&bc.procInterrupt, 1)
   661  
   662  	bc.wg.Wait()
   663  
   664  	// Ensure the state of a recent block is also stored to disk before exiting.
   665  	// We're writing three different states to catch different restart scenarios:
   666  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   667  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   668  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   669  	if !bc.cacheConfig.Disabled {
   670  		triedb := bc.stateCache.TrieDB()
   671  
   672  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   673  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   674  				//fmt.Printf("number: %v\n", number-offset)
   675  				recent := bc.GetBlockByNumber(number - offset)
   676  				//fmt.Printf("Recent: %s\n", recent)
   677  				//fmt.Printf("Root: %x\n", recent.Root())
   678  				hash := recent.Hash()
   679  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", hash, "root", recent.Root())
   680  				if err := triedb.Commit(recent.Root(), true); err != nil {
   681  					log.Error("Failed to commit recent state trie", "err", err)
   682  				}
   683  			}
   684  		}
   685  		for !bc.triegc.Empty() {
   686  			triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
   687  		}
   688  		if size := triedb.Size(); size != 0 {
   689  			log.Error("Dangling trie nodes after full cleanup")
   690  		}
   691  	}
   692  	log.Info("Blockchain manager stopped")
   693  }
   694  
   695  func (bc *BlockChain) procFutureBlocks() {
   696  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   697  	for _, hash := range bc.futureBlocks.Keys() {
   698  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   699  			blocks = append(blocks, block.(*types.Block))
   700  		}
   701  	}
   702  	if len(blocks) > 0 {
   703  		types.BlockBy(types.Number).Sort(blocks)
   704  
   705  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   706  		for i := range blocks {
   707  			bc.InsertChain(blocks[i : i+1])
   708  		}
   709  	}
   710  }
   711  
   712  // WriteStatus status of write
   713  type WriteStatus byte
   714  
   715  const (
   716  	NonStatTy WriteStatus = iota
   717  	CanonStatTy
   718  	SideStatTy
   719  )
   720  
   721  // Rollback is designed to remove a chain of links from the database that aren't
   722  // certain enough to be valid.
   723  func (bc *BlockChain) Rollback(chain []common.Hash) {
   724  	bc.mu.Lock()
   725  	defer bc.mu.Unlock()
   726  
   727  	for i := len(chain) - 1; i >= 0; i-- {
   728  		hash := chain[i]
   729  
   730  		currentHeader := bc.hc.CurrentHeader()
   731  		if currentHeader.Hash() == hash {
   732  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   733  		}
   734  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   735  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   736  			bc.currentFastBlock.Store(newFastBlock)
   737  			WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   738  		}
   739  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   740  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   741  			bc.currentBlock.Store(newBlock)
   742  			WriteHeadBlockHash(bc.db, newBlock.Hash())
   743  		}
   744  	}
   745  }
   746  
   747  // SetReceiptsData computes all the non-consensus fields of the receipts
   748  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) {
   749  	signer := types.MakeSigner(config, block.Number())
   750  
   751  	transactions, logIndex := block.Transactions(), uint(0)
   752  
   753  	for j := 0; j < len(receipts); j++ {
   754  		// The transaction hash can be retrieved from the transaction itself
   755  		receipts[j].TxHash = transactions[j].Hash()
   756  
   757  		// The contract address can be derived from the transaction itself
   758  		if transactions[j].To() == nil {
   759  			// Deriving the signer is expensive, only do if it's actually needed
   760  			from, _ := types.Sender(signer, transactions[j])
   761  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   762  		}
   763  		// The used gas can be calculated based on previous receipts
   764  		if j == 0 {
   765  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   766  		} else {
   767  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   768  		}
   769  		// The derived log fields can simply be set from the block and transaction
   770  		for k := 0; k < len(receipts[j].Logs); k++ {
   771  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   772  			receipts[j].Logs[k].BlockHash = block.Hash()
   773  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   774  			receipts[j].Logs[k].TxIndex = uint(j)
   775  			receipts[j].Logs[k].Index = logIndex
   776  			logIndex++
   777  		}
   778  	}
   779  }
   780  
   781  // InsertReceiptChain attempts to complete an already existing header chain with
   782  // transaction and receipt data.
   783  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   784  	bc.wg.Add(1)
   785  	defer bc.wg.Done()
   786  
   787  	// Do a sanity check that the provided chain is actually ordered and linked
   788  	for i := 1; i < len(blockChain); i++ {
   789  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   790  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   791  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   792  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   793  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   794  		}
   795  	}
   796  
   797  	var (
   798  		stats = struct{ processed, ignored int32 }{}
   799  		start = time.Now()
   800  		bytes = 0
   801  		batch = bc.db.NewBatch()
   802  	)
   803  	for i, block := range blockChain {
   804  		receipts := receiptChain[i]
   805  		// Short circuit insertion if shutting down or processing failed
   806  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   807  			return 0, nil
   808  		}
   809  		// Short circuit if the owner header is unknown
   810  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   811  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   812  		}
   813  		// Skip if the entire data is already known
   814  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   815  			stats.ignored++
   816  			continue
   817  		}
   818  		// Compute all the non-consensus fields of the receipts
   819  		SetReceiptsData(bc.chainConfig, block, receipts)
   820  		// Write all the data out into the database
   821  		if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
   822  			return i, fmt.Errorf("failed to write block body: %v", err)
   823  		}
   824  		if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   825  			return i, fmt.Errorf("failed to write block receipts: %v", err)
   826  		}
   827  		if err := WriteTxLookupEntries(batch, block); err != nil {
   828  			return i, fmt.Errorf("failed to write lookup metadata: %v", err)
   829  		}
   830  		stats.processed++
   831  
   832  		if batch.ValueSize() >= aquadb.IdealBatchSize {
   833  			if err := batch.Write(); err != nil {
   834  				return 0, err
   835  			}
   836  			bytes += batch.ValueSize()
   837  			batch.Reset()
   838  		}
   839  	}
   840  	if batch.ValueSize() > 0 {
   841  		bytes += batch.ValueSize()
   842  		if err := batch.Write(); err != nil {
   843  			return 0, err
   844  		}
   845  	}
   846  
   847  	// Update the head fast sync block if better
   848  	bc.mu.Lock()
   849  	head := blockChain[len(blockChain)-1]
   850  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   851  		currentFastBlock := bc.CurrentFastBlock()
   852  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   853  			if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
   854  				log.Crit("Failed to update head fast block hash", "err", err)
   855  			}
   856  			bc.currentFastBlock.Store(head)
   857  		}
   858  	}
   859  	bc.mu.Unlock()
   860  
   861  	log.Info("Imported new block receipts",
   862  		"count", stats.processed,
   863  		"elapsed", common.PrettyDuration(time.Since(start)),
   864  		"number", head.Number(),
   865  		"hash", head.Hash(),
   866  		"size", common.StorageSize(bytes),
   867  		"ignored", stats.ignored)
   868  	return 0, nil
   869  }
   870  
   871  var lastWrite uint64
   872  
   873  // WriteBlockWithoutState writes only the block and its metadata to the database,
   874  // but does not write any state. This is used to construct competing side forks
   875  // up to the point where they exceed the canonical total difficulty.
   876  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   877  	bc.wg.Add(1)
   878  	defer bc.wg.Done()
   879  
   880  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   881  		return err
   882  	}
   883  	if err := WriteBlock(bc.db, block); err != nil {
   884  		return err
   885  	}
   886  	return nil
   887  }
   888  
   889  // WriteBlockWithState writes the block and all associated state to the database.
   890  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   891  	bc.wg.Add(1)
   892  	defer bc.wg.Done()
   893  
   894  	// Calculate the total difficulty of the block
   895  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   896  	if ptd == nil {
   897  		return NonStatTy, consensus.ErrUnknownAncestor
   898  	}
   899  	// Make sure no inconsistent state is leaked during insertion
   900  	bc.mu.Lock()
   901  	defer bc.mu.Unlock()
   902  
   903  	currentBlock := bc.CurrentBlock()
   904  	if hf7 := bc.Config().GetHF(7); hf7 != nil && hf7.Cmp(currentBlock.Number()) == 0 {
   905  		log.Info("Activating Hardfork", "HF", 7, "BlockNumber", hf7)
   906  	}
   907  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   908  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   909  
   910  	// Irrelevant of the canonical status, write the block itself to the database
   911  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   912  		return NonStatTy, err
   913  	}
   914  	// Write other block data using a batch.
   915  	batch := bc.db.NewBatch()
   916  	if err := WriteBlock(batch, block); err != nil {
   917  		return NonStatTy, err
   918  	}
   919  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   920  	if err != nil {
   921  		return NonStatTy, err
   922  	}
   923  	triedb := bc.stateCache.TrieDB()
   924  
   925  	// If we're running an archive node, always flush
   926  	if bc.cacheConfig.Disabled {
   927  		if err := triedb.Commit(root, false); err != nil {
   928  			return NonStatTy, err
   929  		}
   930  	} else {
   931  		// Full but not archive node, do proper garbage collection
   932  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   933  		bc.triegc.Push(root, -int64(block.NumberU64()))
   934  
   935  		if current := block.NumberU64(); current > triesInMemory {
   936  			// Find the next state trie we need to commit
   937  			header := bc.GetHeaderByNumber(current - triesInMemory)
   938  			if header == nil {
   939  				return NonStatTy, errors.New("header nil")
   940  			}
   941  			chosen := header.Number.Uint64()
   942  			// Only write to disk if we exceeded our memory allowance *and* also have at
   943  			// least a given number of tries gapped.
   944  			var (
   945  				size  = triedb.Size()
   946  				limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   947  			)
   948  			if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   949  				// If we're exceeding limits but haven't reached a large enough memory gap,
   950  				// warn the user that the system is becoming unstable.
   951  				if chosen < lastWrite+triesInMemory {
   952  					switch {
   953  					case size >= 2*limit:
   954  						log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   955  					case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
   956  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   957  					}
   958  				}
   959  				// If optimum or critical limits reached, write to disk
   960  				if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   961  					triedb.Commit(header.Root, true)
   962  					lastWrite = chosen
   963  					bc.gcproc = 0
   964  				}
   965  			}
   966  			// Garbage collect anything below our required write retention
   967  			for !bc.triegc.Empty() {
   968  				root, number := bc.triegc.Pop()
   969  				if uint64(-number) > chosen {
   970  					bc.triegc.Push(root, number)
   971  					break
   972  				}
   973  				triedb.Dereference(root.(common.Hash), common.Hash{})
   974  			}
   975  		}
   976  	}
   977  	if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   978  		return NonStatTy, err
   979  	}
   980  	// If the total difficulty is higher than our known, add it to the canonical chain
   981  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   982  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   983  	reorg := externTd.Cmp(localTd) > 0
   984  	currentBlock = bc.CurrentBlock()
   985  	if !reorg && externTd.Cmp(localTd) == 0 {
   986  		// Split same-difficulty blocks by number, then at random
   987  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   988  	}
   989  	if reorg {
   990  		// Reorganise the chain if the parent is not the head block
   991  		if block.ParentHash() != currentBlock.Hash() {
   992  			if err := bc.reorg(currentBlock, block); err != nil {
   993  				return NonStatTy, err
   994  			}
   995  		}
   996  		// Write the positional metadata for transaction and receipt lookups
   997  		if err := WriteTxLookupEntries(batch, block); err != nil {
   998  			return NonStatTy, err
   999  		}
  1000  		// Write hash preimages
  1001  		if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil {
  1002  			return NonStatTy, err
  1003  		}
  1004  		status = CanonStatTy
  1005  	} else {
  1006  		status = SideStatTy
  1007  	}
  1008  	if err := batch.Write(); err != nil {
  1009  		return NonStatTy, err
  1010  	}
  1011  
  1012  	// Set new head.
  1013  	if status == CanonStatTy {
  1014  		bc.insert(block)
  1015  	}
  1016  	bc.futureBlocks.Remove(block.Hash())
  1017  	return status, nil
  1018  }
  1019  
  1020  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1021  // chain or, otherwise, create a fork. If an error is returned it will return
  1022  // the index number of the failing block as well an error describing what went
  1023  // wrong.
  1024  //
  1025  // After insertion is done, all accumulated events will be fired.
  1026  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1027  	n, events, logs, err := bc.insertChain(chain)
  1028  	bc.PostChainEvents(events, logs)
  1029  	return n, err
  1030  }
  1031  
  1032  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1033  	return bc.insertChain2(chain, 1)
  1034  }
  1035  
  1036  // insertChain will execute the actual chain insertion and event aggregation. The
  1037  // only reason this method exists as a separate one is to make locking cleaner
  1038  // with deferred statements.
  1039  func (bc *BlockChain) insertChain2(chain types.Blocks, try int) (int, []interface{}, []*types.Log, error) {
  1040  	if len(chain) == 0 {
  1041  		return 0, nil, nil, fmt.Errorf("no chain to insert")
  1042  	}
  1043  
  1044  	if try > 3 {
  1045  		return 0, nil, nil, fmt.Errorf("after 3 tries, no good chain")
  1046  	}
  1047  	log.Debug("Inserting chain", "length", len(chain), "startversion", chain[0].Version())
  1048  	// Do a sanity check that the provided chain is actually ordered and linked
  1049  	for i := 1; i < len(chain); i++ {
  1050  		if chain[i-1].Version() == 0 {
  1051  			chain[i-1].SetVersion(bc.Config().GetBlockVersion(chain[i-1].Number()))
  1052  		}
  1053  		if chain[i].Version() == 0 {
  1054  			chain[i].SetVersion(bc.Config().GetBlockVersion(chain[i].Number()))
  1055  		}
  1056  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1057  			// Chain broke ancestry, log a messge (programming error) and skip insertion
  1058  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1059  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1060  			chain = chain[:i]
  1061  			return bc.insertChain2(chain, try+1)
  1062  		}
  1063  
  1064  	}
  1065  	// Pre-checks passed, start the full block imports
  1066  	bc.wg.Add(1)
  1067  	defer bc.wg.Done()
  1068  
  1069  	bc.chainmu.Lock()
  1070  	defer bc.chainmu.Unlock()
  1071  
  1072  	// A queued approach to delivering events. This is generally
  1073  	// faster than direct delivery and requires much less mutex
  1074  	// acquiring.
  1075  	var (
  1076  		stats         = insertStats{startTime: mclock.Now()}
  1077  		events        = make([]interface{}, 0, len(chain))
  1078  		lastCanon     *types.Block
  1079  		coalescedLogs []*types.Log
  1080  	)
  1081  	// Start the parallel header verifier
  1082  	headers := make([]*types.Header, len(chain))
  1083  	seals := make([]bool, len(chain))
  1084  
  1085  	for i, block := range chain {
  1086  		headers[i] = block.Header()
  1087  		if headers[i].Version == 0 {
  1088  			panic("header version not set")
  1089  		}
  1090  		seals[i] = true
  1091  	}
  1092  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1093  	defer close(abort)
  1094  
  1095  	// Iterate over the blocks and insert when the verifier permits
  1096  	for i, block := range chain {
  1097  		if block.Version() == 0 {
  1098  			panic("block version not set")
  1099  		}
  1100  		// If the chain is terminating, stop processing blocks
  1101  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1102  			log.Warn("Premature abort during blocks processing")
  1103  			return i, events, coalescedLogs, fmt.Errorf("aborted")
  1104  		}
  1105  		// If the header is a banned one, straight out abort
  1106  		if BadHashes[block.Hash()] {
  1107  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1108  			return i, events, coalescedLogs, ErrBlacklistedHash
  1109  		}
  1110  		// Wait for the block's verification to complete
  1111  		bstart := time.Now()
  1112  
  1113  		err := <-results
  1114  		if err == nil {
  1115  			block.Hash()
  1116  			err = bc.Validator().ValidateBody(block)
  1117  		}
  1118  		switch {
  1119  		case err == ErrKnownBlock:
  1120  			// Block and state both already known. However if the current block is below
  1121  			// this number we did a rollback and we should reimport it nonetheless.
  1122  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1123  				stats.ignored++
  1124  				continue
  1125  			}
  1126  
  1127  		case err == consensus.ErrFutureBlock:
  1128  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1129  			// the chain is discarded and processed at a later time if given.
  1130  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1131  			if block.Time().Cmp(max) > 0 {
  1132  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1133  			}
  1134  			bc.futureBlocks.Add(block.Hash(), block)
  1135  			stats.queued++
  1136  			continue
  1137  
  1138  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1139  			bc.futureBlocks.Add(block.Hash(), block)
  1140  			stats.queued++
  1141  			continue
  1142  
  1143  		case err == consensus.ErrPrunedAncestor:
  1144  			// Block competing with the canonical chain, store in the db, but don't process
  1145  			// until the competitor TD goes above the canonical TD
  1146  			currentBlock := bc.CurrentBlock()
  1147  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1148  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1149  			if localTd.Cmp(externTd) > 0 {
  1150  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1151  					return i, events, coalescedLogs, err
  1152  				}
  1153  				continue
  1154  			}
  1155  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1156  			var winner []*types.Block
  1157  
  1158  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1159  			for !bc.HasState(parent.Root()) {
  1160  				winner = append(winner, parent)
  1161  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1162  			}
  1163  			for j := 0; j < len(winner)/2; j++ {
  1164  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1165  			}
  1166  			// Import all the pruned blocks to make the state available
  1167  			bc.chainmu.Unlock()
  1168  			_, evs, logs, err := bc.insertChain(winner)
  1169  			bc.chainmu.Lock()
  1170  			events, coalescedLogs = evs, logs
  1171  
  1172  			if err != nil {
  1173  				return i, events, coalescedLogs, err
  1174  			}
  1175  
  1176  		case err != nil:
  1177  			bc.reportBlock(block, nil, err)
  1178  			return i, events, coalescedLogs, err
  1179  		}
  1180  		// Create a new statedb using the parent block and report an
  1181  		// error if it fails.
  1182  		var parent *types.Block
  1183  		if i == 0 {
  1184  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1185  		} else {
  1186  			parent = chain[i-1]
  1187  		}
  1188  		state, err := state.New(parent.Root(), bc.stateCache)
  1189  		if err != nil {
  1190  			return i, events, coalescedLogs, err
  1191  		}
  1192  		// Process block using the parent state as reference point.
  1193  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1194  		if err != nil {
  1195  			bc.reportBlock(block, receipts, err)
  1196  			return i, events, coalescedLogs, err
  1197  		}
  1198  		// Validate the state using the default validator
  1199  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1200  		if err != nil {
  1201  			bc.reportBlock(block, receipts, err)
  1202  			return i, events, coalescedLogs, err
  1203  		}
  1204  		proctime := time.Since(bstart)
  1205  
  1206  		// Write the block to the chain and get the status.
  1207  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1208  		if err != nil {
  1209  			return i, events, coalescedLogs, err
  1210  		}
  1211  		switch status {
  1212  		case CanonStatTy:
  1213  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1214  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1215  
  1216  			coalescedLogs = append(coalescedLogs, logs...)
  1217  			blockInsertTimer.UpdateSince(bstart)
  1218  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1219  			lastCanon = block
  1220  
  1221  			// Only count canonical blocks for GC processing time
  1222  			bc.gcproc += proctime
  1223  
  1224  		case SideStatTy:
  1225  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1226  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1227  
  1228  			blockInsertTimer.UpdateSince(bstart)
  1229  			events = append(events, ChainSideEvent{block})
  1230  		}
  1231  		stats.processed++
  1232  		stats.usedGas += usedGas
  1233  		stats.report(chain, i, bc.stateCache.TrieDB().Size())
  1234  	}
  1235  	// Append a single chain head event if we've progressed the chain
  1236  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1237  		events = append(events, ChainHeadEvent{lastCanon})
  1238  	}
  1239  	return 0, events, coalescedLogs, nil
  1240  }
  1241  
  1242  // insertStats tracks and reports on block insertion.
  1243  type insertStats struct {
  1244  	queued, processed, ignored int
  1245  	usedGas                    uint64
  1246  	lastIndex                  int
  1247  	startTime                  mclock.AbsTime
  1248  }
  1249  
  1250  // statsReportLimit is the time limit during import after which we always print
  1251  // out progress. This avoids the user wondering what's going on.
  1252  const statsReportLimit = 8 * time.Second
  1253  
  1254  // report prints statistics if some number of blocks have been processed
  1255  // or more than a few seconds have passed since the last message.
  1256  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1257  	// Fetch the timings for the batch
  1258  	var (
  1259  		now     = mclock.Now()
  1260  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1261  	)
  1262  	// If we're at the last block of the batch or report period reached, log
  1263  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1264  		var (
  1265  			end = chain[index]
  1266  			txs = countTransactions(chain[st.lastIndex : index+1])
  1267  		)
  1268  		context := []interface{}{
  1269  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1270  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1271  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1272  		}
  1273  		if st.queued > 0 {
  1274  			context = append(context, []interface{}{"queued", st.queued}...)
  1275  		}
  1276  		if st.ignored > 0 {
  1277  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1278  		}
  1279  		if st.processed == 1 {
  1280  			context = append(context, []interface{}{"miner", end.Coinbase()}...)
  1281  		} else {
  1282  			context = append(context, []interface{}{"timestamp", time.Unix(end.Time().Int64(), 0).UTC().Format("Mon Jan 2 15:04:05 MST 2006")}...)
  1283  		}
  1284  
  1285  		log.Info("Imported new chain segment", context...)
  1286  
  1287  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1288  	}
  1289  }
  1290  
  1291  func countTransactions(chain []*types.Block) (c int) {
  1292  	for _, b := range chain {
  1293  		c += len(b.Transactions())
  1294  	}
  1295  	return c
  1296  }
  1297  
  1298  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1299  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1300  // event about them
  1301  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1302  	var (
  1303  		newChain    types.Blocks
  1304  		oldChain    types.Blocks
  1305  		commonBlock *types.Block
  1306  		deletedTxs  types.Transactions
  1307  		deletedLogs []*types.Log
  1308  		// collectLogs collects the logs that were generated during the
  1309  		// processing of the block that corresponds with the given hash.
  1310  		// These logs are later announced as deleted.
  1311  		collectLogs = func(h common.Hash) {
  1312  			// Coalesce logs and set 'Removed'.
  1313  			receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h))
  1314  			for _, receipt := range receipts {
  1315  				for _, log := range receipt.Logs {
  1316  					del := *log
  1317  					del.Removed = true
  1318  					deletedLogs = append(deletedLogs, &del)
  1319  				}
  1320  			}
  1321  		}
  1322  	)
  1323  
  1324  	// first reduce whoever is higher bound
  1325  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1326  		// reduce old chain
  1327  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1328  			oldChain = append(oldChain, oldBlock)
  1329  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1330  
  1331  			collectLogs(oldBlock.Hash())
  1332  		}
  1333  	} else {
  1334  		// reduce new chain and append new chain blocks for inserting later on
  1335  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1336  			newChain = append(newChain, newBlock)
  1337  		}
  1338  	}
  1339  	if oldBlock == nil {
  1340  		return fmt.Errorf("Invalid old chain")
  1341  	}
  1342  	if newBlock == nil {
  1343  		return fmt.Errorf("Invalid new chain")
  1344  	}
  1345  
  1346  	for {
  1347  		if oldBlock.Hash() == newBlock.Hash() {
  1348  			commonBlock = oldBlock
  1349  			break
  1350  		}
  1351  
  1352  		oldChain = append(oldChain, oldBlock)
  1353  		newChain = append(newChain, newBlock)
  1354  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1355  		collectLogs(oldBlock.Hash())
  1356  
  1357  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1358  		if oldBlock == nil {
  1359  			return fmt.Errorf("Invalid old chain")
  1360  		}
  1361  		if newBlock == nil {
  1362  			return fmt.Errorf("Invalid new chain")
  1363  		}
  1364  	}
  1365  	// Ensure the user sees large reorgs
  1366  	if len(oldChain) > 0 && len(newChain) > 0 {
  1367  		logFn := log.Debug
  1368  		if len(oldChain) > 5 {
  1369  			logFn = log.Warn
  1370  		}
  1371  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1372  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1373  	} else {
  1374  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1375  	}
  1376  	// Insert the new chain, taking care of the proper incremental order
  1377  	var addedTxs types.Transactions
  1378  	for i := len(newChain) - 1; i >= 0; i-- {
  1379  		// insert the block in the canonical way, re-writing history
  1380  		bc.insert(newChain[i])
  1381  		// write lookup entries for hash based transaction/receipt searches
  1382  		if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil {
  1383  			return err
  1384  		}
  1385  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1386  	}
  1387  	// calculate the difference between deleted and added transactions
  1388  	diff := types.TxDifference(deletedTxs, addedTxs)
  1389  	// When transactions get deleted from the database that means the
  1390  	// receipts that were created in the fork must also be deleted
  1391  	for _, tx := range diff {
  1392  		DeleteTxLookupEntry(bc.db, tx.Hash())
  1393  	}
  1394  	if len(deletedLogs) > 0 {
  1395  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1396  	}
  1397  	if len(oldChain) > 0 {
  1398  		go func() {
  1399  			for _, block := range oldChain {
  1400  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1401  			}
  1402  		}()
  1403  	}
  1404  
  1405  	return nil
  1406  }
  1407  
  1408  // PostChainEvents iterates over the events generated by a chain insertion and
  1409  // posts them into the event feed.
  1410  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1411  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1412  	// post event logs for further processing
  1413  	if logs != nil {
  1414  		bc.logsFeed.Send(logs)
  1415  	}
  1416  	for _, event := range events {
  1417  		switch ev := event.(type) {
  1418  		case ChainEvent:
  1419  			bc.chainFeed.Send(ev)
  1420  
  1421  		case ChainHeadEvent:
  1422  			bc.chainHeadFeed.Send(ev)
  1423  
  1424  		case ChainSideEvent:
  1425  			bc.chainSideFeed.Send(ev)
  1426  		}
  1427  	}
  1428  }
  1429  
  1430  func (bc *BlockChain) update() {
  1431  	futureTimer := time.NewTicker(5 * time.Second)
  1432  	defer futureTimer.Stop()
  1433  	for {
  1434  		select {
  1435  		case <-futureTimer.C:
  1436  			bc.procFutureBlocks()
  1437  		case <-bc.quit:
  1438  			return
  1439  		}
  1440  	}
  1441  }
  1442  
  1443  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  1444  type BadBlockArgs struct {
  1445  	Hash   common.Hash   `json:"hash"`
  1446  	Header *types.Header `json:"header"`
  1447  }
  1448  
  1449  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1450  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  1451  	headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
  1452  	for _, hash := range bc.badBlocks.Keys() {
  1453  		if hdr, exist := bc.badBlocks.Peek(hash); exist {
  1454  			header := hdr.(*types.Header)
  1455  			headers = append(headers, BadBlockArgs{header.Hash(), header})
  1456  		}
  1457  	}
  1458  	return headers, nil
  1459  }
  1460  
  1461  // addBadBlock adds a bad block to the bad-block LRU cache
  1462  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1463  	bc.badBlocks.Add(block.Header().Hash(), block.Header())
  1464  }
  1465  
  1466  // reportBlock logs a bad block error.
  1467  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1468  	bc.addBadBlock(block)
  1469  	var caller, line string
  1470  	var num int
  1471  	_, line, num, _ = runtime.Caller(1)
  1472  	caller += fmt.Sprintf("%s:%v\n", line, num)
  1473  	_, line, num, _ = runtime.Caller(2)
  1474  	caller += fmt.Sprintf("%s:%v\n", line, num)
  1475  	var receiptString string
  1476  	for _, receipt := range receipts {
  1477  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1478  	}
  1479  	log.Error(fmt.Sprintf(`
  1480  ########## BAD BLOCK #########
  1481  Chain config: %v
  1482  
  1483  Number:  %v
  1484  Hash:  0x%x
  1485  Version: %v
  1486  Receipts: %v
  1487  Error: %v
  1488  Caller:
  1489  
  1490  %s
  1491  
  1492  ##############################
  1493  `, bc.chainConfig, block.Number(), block.Hash(), block.Version(), receiptString, err, caller))
  1494  
  1495  	log.Debug("%v", "block", block)
  1496  }
  1497  
  1498  // InsertHeaderChain attempts to insert the given header chain in to the local
  1499  // chain, possibly creating a reorg. If an error is returned, it will return the
  1500  // index number of the failing header as well an error describing what went wrong.
  1501  //
  1502  // The verify parameter can be used to fine tune whether nonce verification
  1503  // should be done or not. The reason behind the optional check is because some
  1504  // of the header retrieval mechanisms already need to verify nonces, as well as
  1505  // because nonces can be verified sparsely, not needing to check each.
  1506  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1507  	start := time.Now()
  1508  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1509  		return i, err
  1510  	}
  1511  
  1512  	// Make sure only one thread manipulates the chain at once
  1513  	bc.chainmu.Lock()
  1514  	defer bc.chainmu.Unlock()
  1515  
  1516  	bc.wg.Add(1)
  1517  	defer bc.wg.Done()
  1518  
  1519  	whFunc := func(header *types.Header) error {
  1520  		bc.mu.Lock()
  1521  		defer bc.mu.Unlock()
  1522  
  1523  		_, err := bc.hc.WriteHeader(header)
  1524  		return err
  1525  	}
  1526  
  1527  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1528  }
  1529  
  1530  // CurrentHeader retrieves the current head header of the canonical chain. The
  1531  // header is retrieved from the HeaderChain's internal cache.
  1532  func (bc *BlockChain) CurrentHeader() *types.Header {
  1533  	return bc.hc.CurrentHeader()
  1534  }
  1535  
  1536  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1537  // database by hash and number, caching it if found.
  1538  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1539  	return bc.hc.GetTd(hash, number)
  1540  }
  1541  
  1542  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1543  // database by hash, caching it if found.
  1544  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1545  	return bc.hc.GetTdByHash(hash)
  1546  }
  1547  
  1548  // GetHeader retrieves a block header from the database by hash and number,
  1549  // caching it if found.
  1550  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1551  	return bc.hc.GetHeader(hash, number)
  1552  }
  1553  
  1554  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1555  // found.
  1556  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1557  	return bc.hc.GetHeaderByHash(hash)
  1558  }
  1559  
  1560  // HasHeader checks if a block header is present in the database or not, caching
  1561  // it if present.
  1562  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1563  	return bc.hc.HasHeader(hash, number)
  1564  }
  1565  
  1566  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1567  // hash, fetching towards the genesis block.
  1568  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1569  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1570  }
  1571  
  1572  // GetHeaderByNumber retrieves a block header from the database by number,
  1573  // caching it (associated with its hash) if found.
  1574  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1575  	return bc.hc.GetHeaderByNumber(number)
  1576  }
  1577  
  1578  // Config retrieves the blockchain's chain configuration.
  1579  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1580  
  1581  // Engine retrieves the blockchain's consensus engine.
  1582  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1583  
  1584  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1585  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1586  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1587  }
  1588  
  1589  // SubscribeChainEvent registers a subscription of ChainEvent.
  1590  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1591  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1592  }
  1593  
  1594  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1595  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1596  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1597  }
  1598  
  1599  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1600  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1601  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1602  }
  1603  
  1604  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1605  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1606  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1607  }