github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/mclock"
    32  	"github.com/ethereum/go-ethereum/common/prque"
    33  	"github.com/ethereum/go-ethereum/consensus"
    34  	"github.com/ethereum/go-ethereum/core/rawdb"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/types"
    37  	"github.com/ethereum/go-ethereum/core/vm"
    38  	"github.com/ethereum/go-ethereum/crypto"
    39  	"github.com/ethereum/go-ethereum/ethdb"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/metrics"
    43  	"github.com/ethereum/go-ethereum/params"
    44  	"github.com/ethereum/go-ethereum/rlp"
    45  	"github.com/ethereum/go-ethereum/trie"
    46  	"github.com/hashicorp/golang-lru"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	receiptsCacheLimit  = 32
    59  	maxFutureBlocks     = 256
    60  	maxTimeFutureBlocks = 30
    61  	badBlockLimit       = 10
    62  	triesInMemory       = 128
    63  
    64  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    65  	BlockChainVersion = 3
    66  )
    67  
    68  // CacheConfig contains the configuration values for the trie caching/pruning
    69  // that's resident in a blockchain.
    70  type CacheConfig struct {
    71  	Disabled      bool          // Whether to disable trie write caching (archive node)
    72  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    73  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    74  }
    75  
    76  // BlockChain represents the canonical chain given a database with a genesis
    77  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    78  //
    79  // Importing blocks in to the block chain happens according to the set of rules
    80  // defined by the two stage Validator. Processing of blocks is done using the
    81  // Processor which processes the included transaction. The validation of the state
    82  // is done in the second part of the Validator. Failing results in aborting of
    83  // the import.
    84  //
    85  // The BlockChain also helps in returning blocks from **any** chain included
    86  // in the database as well as blocks that represents the canonical chain. It's
    87  // important to note that GetBlock can return any block and does not need to be
    88  // included in the canonical one where as GetBlockByNumber always represents the
    89  // canonical chain.
    90  type BlockChain struct {
    91  	chainConfig *params.ChainConfig // Chain & network configuration
    92  	cacheConfig *CacheConfig        // Cache configuration for pruning
    93  
    94  	db     ethdb.Database // Low level persistent database to store final content in
    95  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    96  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    97  
    98  	hc            *HeaderChain
    99  	rmLogsFeed    event.Feed
   100  	chainFeed     event.Feed
   101  	chainSideFeed event.Feed
   102  	chainHeadFeed event.Feed
   103  	logsFeed      event.Feed
   104  	scope         event.SubscriptionScope
   105  	genesisBlock  *types.Block
   106  
   107  	mu      sync.RWMutex // global mutex for locking chain operations
   108  	chainmu sync.RWMutex // blockchain insertion lock
   109  	procmu  sync.RWMutex // block processor lock
   110  
   111  	checkpoint       int          // checkpoint counts towards the new checkpoint
   112  	currentBlock     atomic.Value // Current head of the block chain
   113  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   114  
   115  	stateCache    state.Database // State database to reuse between imports (contains state cache)
   116  	bodyCache     *lru.Cache     // Cache for the most recent block bodies
   117  	bodyRLPCache  *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   118  	receiptsCache *lru.Cache     // Cache for the most recent receipts per block
   119  	blockCache    *lru.Cache     // Cache for the most recent entire blocks
   120  	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
   121  
   122  	quit    chan struct{} // blockchain quit channel
   123  	running int32         // running must be called atomically
   124  	// procInterrupt must be atomically called
   125  	procInterrupt int32          // interrupt signaler for block processing
   126  	wg            sync.WaitGroup // chain processing wait group for shutting down
   127  
   128  	engine    consensus.Engine
   129  	processor Processor // block processor interface
   130  	validator Validator // block and state validator interface
   131  	vmConfig  vm.Config
   132  
   133  	badBlocks      *lru.Cache              // Bad block cache
   134  	shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
   135  }
   136  
   137  // NewBlockChain returns a fully initialised block chain using information
   138  // available in the database. It initialises the default Ethereum Validator and
   139  // Processor.
   140  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
   141  	if cacheConfig == nil {
   142  		cacheConfig = &CacheConfig{
   143  			TrieNodeLimit: 256 * 1024 * 1024,
   144  			TrieTimeLimit: 5 * time.Minute,
   145  		}
   146  	}
   147  	bodyCache, _ := lru.New(bodyCacheLimit)
   148  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   149  	receiptsCache, _ := lru.New(receiptsCacheLimit)
   150  	blockCache, _ := lru.New(blockCacheLimit)
   151  	futureBlocks, _ := lru.New(maxFutureBlocks)
   152  	badBlocks, _ := lru.New(badBlockLimit)
   153  
   154  	bc := &BlockChain{
   155  		chainConfig:    chainConfig,
   156  		cacheConfig:    cacheConfig,
   157  		db:             db,
   158  		triegc:         prque.New(nil),
   159  		stateCache:     state.NewDatabase(db),
   160  		quit:           make(chan struct{}),
   161  		shouldPreserve: shouldPreserve,
   162  		bodyCache:      bodyCache,
   163  		bodyRLPCache:   bodyRLPCache,
   164  		receiptsCache:  receiptsCache,
   165  		blockCache:     blockCache,
   166  		futureBlocks:   futureBlocks,
   167  		engine:         engine,
   168  		vmConfig:       vmConfig,
   169  		badBlocks:      badBlocks,
   170  	}
   171  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   172  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   173  
   174  	var err error
   175  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   176  	if err != nil {
   177  		return nil, err
   178  	}
   179  	bc.genesisBlock = bc.GetBlockByNumber(0)
   180  	if bc.genesisBlock == nil {
   181  		return nil, ErrNoGenesis
   182  	}
   183  	if err := bc.loadLastState(); err != nil {
   184  		return nil, err
   185  	}
   186  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   187  	for hash := range BadHashes {
   188  		if header := bc.GetHeaderByHash(hash); header != nil {
   189  			// get the canonical block corresponding to the offending header's number
   190  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   191  			// make sure the headerByNumber (if present) is in our current canonical chain
   192  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   193  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   194  				bc.SetHead(header.Number.Uint64() - 1)
   195  				log.Error("Chain rewind was successful, resuming normal operation")
   196  			}
   197  		}
   198  	}
   199  	// Take ownership of this particular state
   200  	go bc.update()
   201  	return bc, nil
   202  }
   203  
   204  func (bc *BlockChain) getProcInterrupt() bool {
   205  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   206  }
   207  
   208  // loadLastState loads the last known chain state from the database. This method
   209  // assumes that the chain manager mutex is held.
   210  func (bc *BlockChain) loadLastState() error {
   211  	// Restore the last known head block
   212  	head := rawdb.ReadHeadBlockHash(bc.db)
   213  	if head == (common.Hash{}) {
   214  		// Corrupt or empty database, init from scratch
   215  		log.Warn("Empty database, resetting chain")
   216  		return bc.Reset()
   217  	}
   218  	// Make sure the entire head block is available
   219  	currentBlock := bc.GetBlockByHash(head)
   220  	if currentBlock == nil {
   221  		// Corrupt or empty database, init from scratch
   222  		log.Warn("Head block missing, resetting chain", "hash", head)
   223  		return bc.Reset()
   224  	}
   225  	// Make sure the state associated with the block is available
   226  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   227  		// Dangling block without a state associated, init from scratch
   228  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   229  		if err := bc.repair(&currentBlock); err != nil {
   230  			return err
   231  		}
   232  	}
   233  	// Everything seems to be fine, set as the head block
   234  	bc.currentBlock.Store(currentBlock)
   235  
   236  	// Restore the last known head header
   237  	currentHeader := currentBlock.Header()
   238  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   239  		if header := bc.GetHeaderByHash(head); header != nil {
   240  			currentHeader = header
   241  		}
   242  	}
   243  	bc.hc.SetCurrentHeader(currentHeader)
   244  
   245  	// Restore the last known head fast block
   246  	bc.currentFastBlock.Store(currentBlock)
   247  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   248  		if block := bc.GetBlockByHash(head); block != nil {
   249  			bc.currentFastBlock.Store(block)
   250  		}
   251  	}
   252  
   253  	// Issue a status log for the user
   254  	currentFastBlock := bc.CurrentFastBlock()
   255  
   256  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   257  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   258  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   259  
   260  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
   261  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
   262  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
   263  
   264  	return nil
   265  }
   266  
   267  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   268  // above the new head will be deleted and the new one set. In the case of blocks
   269  // though, the head may be further rewound if block bodies are missing (non-archive
   270  // nodes after a fast sync).
   271  func (bc *BlockChain) SetHead(head uint64) error {
   272  	log.Warn("Rewinding blockchain", "target", head)
   273  
   274  	bc.mu.Lock()
   275  	defer bc.mu.Unlock()
   276  
   277  	// Rewind the header chain, deleting all block bodies until then
   278  	delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
   279  		rawdb.DeleteBody(db, hash, num)
   280  	}
   281  	bc.hc.SetHead(head, delFn)
   282  	currentHeader := bc.hc.CurrentHeader()
   283  
   284  	// Clear out any stale content from the caches
   285  	bc.bodyCache.Purge()
   286  	bc.bodyRLPCache.Purge()
   287  	bc.receiptsCache.Purge()
   288  	bc.blockCache.Purge()
   289  	bc.futureBlocks.Purge()
   290  
   291  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   292  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   293  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   294  	}
   295  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   296  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   297  			// Rewound state missing, rolled back to before pivot, reset to genesis
   298  			bc.currentBlock.Store(bc.genesisBlock)
   299  		}
   300  	}
   301  	// Rewind the fast block in a simpleton way to the target head
   302  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   303  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   304  	}
   305  	// If either blocks reached nil, reset to the genesis state
   306  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   307  		bc.currentBlock.Store(bc.genesisBlock)
   308  	}
   309  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   310  		bc.currentFastBlock.Store(bc.genesisBlock)
   311  	}
   312  	currentBlock := bc.CurrentBlock()
   313  	currentFastBlock := bc.CurrentFastBlock()
   314  
   315  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   316  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   317  
   318  	return bc.loadLastState()
   319  }
   320  
   321  // FastSyncCommitHead sets the current head block to the one defined by the hash
   322  // irrelevant what the chain contents were prior.
   323  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   324  	// Make sure that both the block as well at its state trie exists
   325  	block := bc.GetBlockByHash(hash)
   326  	if block == nil {
   327  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   328  	}
   329  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   330  		return err
   331  	}
   332  	// If all checks out, manually set the head block
   333  	bc.mu.Lock()
   334  	bc.currentBlock.Store(block)
   335  	bc.mu.Unlock()
   336  
   337  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   338  	return nil
   339  }
   340  
   341  // GasLimit returns the gas limit of the current HEAD block.
   342  func (bc *BlockChain) GasLimit() uint64 {
   343  	return bc.CurrentBlock().GasLimit()
   344  }
   345  
   346  // CurrentBlock retrieves the current head block of the canonical chain. The
   347  // block is retrieved from the blockchain's internal cache.
   348  func (bc *BlockChain) CurrentBlock() *types.Block {
   349  	return bc.currentBlock.Load().(*types.Block)
   350  }
   351  
   352  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   353  // chain. The block is retrieved from the blockchain's internal cache.
   354  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   355  	return bc.currentFastBlock.Load().(*types.Block)
   356  }
   357  
   358  // SetProcessor sets the processor required for making state modifications.
   359  func (bc *BlockChain) SetProcessor(processor Processor) {
   360  	bc.procmu.Lock()
   361  	defer bc.procmu.Unlock()
   362  	bc.processor = processor
   363  }
   364  
   365  // SetValidator sets the validator which is used to validate incoming blocks.
   366  func (bc *BlockChain) SetValidator(validator Validator) {
   367  	bc.procmu.Lock()
   368  	defer bc.procmu.Unlock()
   369  	bc.validator = validator
   370  }
   371  
   372  // Validator returns the current validator.
   373  func (bc *BlockChain) Validator() Validator {
   374  	bc.procmu.RLock()
   375  	defer bc.procmu.RUnlock()
   376  	return bc.validator
   377  }
   378  
   379  // Processor returns the current processor.
   380  func (bc *BlockChain) Processor() Processor {
   381  	bc.procmu.RLock()
   382  	defer bc.procmu.RUnlock()
   383  	return bc.processor
   384  }
   385  
   386  // State returns a new mutable state based on the current HEAD block.
   387  func (bc *BlockChain) State() (*state.StateDB, error) {
   388  	return bc.StateAt(bc.CurrentBlock().Root())
   389  }
   390  
   391  // StateAt returns a new mutable state based on a particular point in time.
   392  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   393  	return state.New(root, bc.stateCache)
   394  }
   395  
   396  // Reset purges the entire blockchain, restoring it to its genesis state.
   397  func (bc *BlockChain) Reset() error {
   398  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   399  }
   400  
   401  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   402  // specified genesis state.
   403  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   404  	// Dump the entire block chain and purge the caches
   405  	if err := bc.SetHead(0); err != nil {
   406  		return err
   407  	}
   408  	bc.mu.Lock()
   409  	defer bc.mu.Unlock()
   410  
   411  	// Prepare the genesis block and reinitialise the chain
   412  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   413  		log.Crit("Failed to write genesis block TD", "err", err)
   414  	}
   415  	rawdb.WriteBlock(bc.db, genesis)
   416  
   417  	bc.genesisBlock = genesis
   418  	bc.insert(bc.genesisBlock)
   419  	bc.currentBlock.Store(bc.genesisBlock)
   420  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   421  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   422  	bc.currentFastBlock.Store(bc.genesisBlock)
   423  
   424  	return nil
   425  }
   426  
   427  // repair tries to repair the current blockchain by rolling back the current block
   428  // until one with associated state is found. This is needed to fix incomplete db
   429  // writes caused either by crashes/power outages, or simply non-committed tries.
   430  //
   431  // This method only rolls back the current block. The current header and current
   432  // fast block are left intact.
   433  func (bc *BlockChain) repair(head **types.Block) error {
   434  	for {
   435  		// Abort if we've rewound to a head block that does have associated state
   436  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   437  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   438  			return nil
   439  		}
   440  		// Otherwise rewind one block and recheck state availability there
   441  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   442  	}
   443  }
   444  
   445  // Export writes the active chain to the given writer.
   446  func (bc *BlockChain) Export(w io.Writer) error {
   447  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   448  }
   449  
   450  // ExportN writes a subset of the active chain to the given writer.
   451  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   452  	bc.mu.RLock()
   453  	defer bc.mu.RUnlock()
   454  
   455  	if first > last {
   456  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   457  	}
   458  	log.Info("Exporting batch of blocks", "count", last-first+1)
   459  
   460  	start, reported := time.Now(), time.Now()
   461  	for nr := first; nr <= last; nr++ {
   462  		block := bc.GetBlockByNumber(nr)
   463  		if block == nil {
   464  			return fmt.Errorf("export failed on #%d: not found", nr)
   465  		}
   466  		if err := block.EncodeRLP(w); err != nil {
   467  			return err
   468  		}
   469  		if time.Since(reported) >= statsReportLimit {
   470  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   471  			reported = time.Now()
   472  		}
   473  	}
   474  
   475  	return nil
   476  }
   477  
   478  // insert injects a new head block into the current block chain. This method
   479  // assumes that the block is indeed a true head. It will also reset the head
   480  // header and the head fast sync block to this very same block if they are older
   481  // or if they are on a different side chain.
   482  //
   483  // Note, this function assumes that the `mu` mutex is held!
   484  func (bc *BlockChain) insert(block *types.Block) {
   485  	// If the block is on a side chain or an unknown one, force other heads onto it too
   486  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   487  
   488  	// Add the block to the canonical chain number scheme and mark as the head
   489  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   490  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   491  
   492  	bc.currentBlock.Store(block)
   493  
   494  	// If the block is better than our head or is on a different chain, force update heads
   495  	if updateHeads {
   496  		bc.hc.SetCurrentHeader(block.Header())
   497  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   498  
   499  		bc.currentFastBlock.Store(block)
   500  	}
   501  }
   502  
   503  // Genesis retrieves the chain's genesis block.
   504  func (bc *BlockChain) Genesis() *types.Block {
   505  	return bc.genesisBlock
   506  }
   507  
   508  // GetBody retrieves a block body (transactions and uncles) from the database by
   509  // hash, caching it if found.
   510  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   511  	// Short circuit if the body's already in the cache, retrieve otherwise
   512  	if cached, ok := bc.bodyCache.Get(hash); ok {
   513  		body := cached.(*types.Body)
   514  		return body
   515  	}
   516  	number := bc.hc.GetBlockNumber(hash)
   517  	if number == nil {
   518  		return nil
   519  	}
   520  	body := rawdb.ReadBody(bc.db, hash, *number)
   521  	if body == nil {
   522  		return nil
   523  	}
   524  	// Cache the found body for next time and return
   525  	bc.bodyCache.Add(hash, body)
   526  	return body
   527  }
   528  
   529  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   530  // caching it if found.
   531  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   532  	// Short circuit if the body's already in the cache, retrieve otherwise
   533  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   534  		return cached.(rlp.RawValue)
   535  	}
   536  	number := bc.hc.GetBlockNumber(hash)
   537  	if number == nil {
   538  		return nil
   539  	}
   540  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   541  	if len(body) == 0 {
   542  		return nil
   543  	}
   544  	// Cache the found body for next time and return
   545  	bc.bodyRLPCache.Add(hash, body)
   546  	return body
   547  }
   548  
   549  // HasBlock checks if a block is fully present in the database or not.
   550  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   551  	if bc.blockCache.Contains(hash) {
   552  		return true
   553  	}
   554  	return rawdb.HasBody(bc.db, hash, number)
   555  }
   556  
   557  // HasState checks if state trie is fully present in the database or not.
   558  func (bc *BlockChain) HasState(hash common.Hash) bool {
   559  	_, err := bc.stateCache.OpenTrie(hash)
   560  	return err == nil
   561  }
   562  
   563  // HasBlockAndState checks if a block and associated state trie is fully present
   564  // in the database or not, caching it if present.
   565  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   566  	// Check first that the block itself is known
   567  	block := bc.GetBlock(hash, number)
   568  	if block == nil {
   569  		return false
   570  	}
   571  	return bc.HasState(block.Root())
   572  }
   573  
   574  // GetBlock retrieves a block from the database by hash and number,
   575  // caching it if found.
   576  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   577  	// Short circuit if the block's already in the cache, retrieve otherwise
   578  	if block, ok := bc.blockCache.Get(hash); ok {
   579  		return block.(*types.Block)
   580  	}
   581  	block := rawdb.ReadBlock(bc.db, hash, number)
   582  	if block == nil {
   583  		return nil
   584  	}
   585  	// Cache the found block for next time and return
   586  	bc.blockCache.Add(block.Hash(), block)
   587  	return block
   588  }
   589  
   590  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   591  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   592  	number := bc.hc.GetBlockNumber(hash)
   593  	if number == nil {
   594  		return nil
   595  	}
   596  	return bc.GetBlock(hash, *number)
   597  }
   598  
   599  // GetBlockByNumber retrieves a block from the database by number, caching it
   600  // (associated with its hash) if found.
   601  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   602  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   603  	if hash == (common.Hash{}) {
   604  		return nil
   605  	}
   606  	return bc.GetBlock(hash, number)
   607  }
   608  
   609  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   610  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   611  	if receipts, ok := bc.receiptsCache.Get(hash); ok {
   612  		return receipts.(types.Receipts)
   613  	}
   614  
   615  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   616  	if number == nil {
   617  		return nil
   618  	}
   619  
   620  	receipts := rawdb.ReadReceipts(bc.db, hash, *number)
   621  	bc.receiptsCache.Add(hash, receipts)
   622  	return receipts
   623  }
   624  
   625  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   626  // [deprecated by eth/62]
   627  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   628  	number := bc.hc.GetBlockNumber(hash)
   629  	if number == nil {
   630  		return nil
   631  	}
   632  	for i := 0; i < n; i++ {
   633  		block := bc.GetBlock(hash, *number)
   634  		if block == nil {
   635  			break
   636  		}
   637  		blocks = append(blocks, block)
   638  		hash = block.ParentHash()
   639  		*number--
   640  	}
   641  	return
   642  }
   643  
   644  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   645  // a specific distance is reached.
   646  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   647  	uncles := []*types.Header{}
   648  	for i := 0; block != nil && i < length; i++ {
   649  		uncles = append(uncles, block.Uncles()...)
   650  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   651  	}
   652  	return uncles
   653  }
   654  
   655  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   656  // either from ephemeral in-memory cache, or from persistent storage.
   657  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   658  	return bc.stateCache.TrieDB().Node(hash)
   659  }
   660  
   661  // Stop stops the blockchain service. If any imports are currently in progress
   662  // it will abort them using the procInterrupt.
   663  func (bc *BlockChain) Stop() {
   664  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   665  		return
   666  	}
   667  	// Unsubscribe all subscriptions registered from blockchain
   668  	bc.scope.Close()
   669  	close(bc.quit)
   670  	atomic.StoreInt32(&bc.procInterrupt, 1)
   671  
   672  	bc.wg.Wait()
   673  
   674  	// Ensure the state of a recent block is also stored to disk before exiting.
   675  	// We're writing three different states to catch different restart scenarios:
   676  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   677  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   678  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   679  	if !bc.cacheConfig.Disabled {
   680  		triedb := bc.stateCache.TrieDB()
   681  
   682  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   683  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   684  				recent := bc.GetBlockByNumber(number - offset)
   685  
   686  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   687  				if err := triedb.Commit(recent.Root(), true); err != nil {
   688  					log.Error("Failed to commit recent state trie", "err", err)
   689  				}
   690  			}
   691  		}
   692  		for !bc.triegc.Empty() {
   693  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   694  		}
   695  		if size, _ := triedb.Size(); size != 0 {
   696  			log.Error("Dangling trie nodes after full cleanup")
   697  		}
   698  	}
   699  	log.Info("Blockchain manager stopped")
   700  }
   701  
   702  func (bc *BlockChain) procFutureBlocks() {
   703  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   704  	for _, hash := range bc.futureBlocks.Keys() {
   705  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   706  			blocks = append(blocks, block.(*types.Block))
   707  		}
   708  	}
   709  	if len(blocks) > 0 {
   710  		types.BlockBy(types.Number).Sort(blocks)
   711  
   712  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   713  		for i := range blocks {
   714  			bc.InsertChain(blocks[i : i+1])
   715  		}
   716  	}
   717  }
   718  
   719  // WriteStatus status of write
   720  type WriteStatus byte
   721  
   722  const (
   723  	NonStatTy WriteStatus = iota
   724  	CanonStatTy
   725  	SideStatTy
   726  )
   727  
   728  // Rollback is designed to remove a chain of links from the database that aren't
   729  // certain enough to be valid.
   730  func (bc *BlockChain) Rollback(chain []common.Hash) {
   731  	bc.mu.Lock()
   732  	defer bc.mu.Unlock()
   733  
   734  	for i := len(chain) - 1; i >= 0; i-- {
   735  		hash := chain[i]
   736  
   737  		currentHeader := bc.hc.CurrentHeader()
   738  		if currentHeader.Hash() == hash {
   739  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   740  		}
   741  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   742  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   743  			bc.currentFastBlock.Store(newFastBlock)
   744  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   745  		}
   746  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   747  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   748  			bc.currentBlock.Store(newBlock)
   749  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   750  		}
   751  	}
   752  }
   753  
   754  // SetReceiptsData computes all the non-consensus fields of the receipts
   755  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   756  	signer := types.MakeSigner(config, block.Number())
   757  
   758  	transactions, logIndex := block.Transactions(), uint(0)
   759  	if len(transactions) != len(receipts) {
   760  		return errors.New("transaction and receipt count mismatch")
   761  	}
   762  
   763  	for j := 0; j < len(receipts); j++ {
   764  		// The transaction hash can be retrieved from the transaction itself
   765  		receipts[j].TxHash = transactions[j].Hash()
   766  
   767  		// The contract address can be derived from the transaction itself
   768  		if transactions[j].To() == nil {
   769  			// Deriving the signer is expensive, only do if it's actually needed
   770  			from, _ := types.Sender(signer, transactions[j])
   771  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   772  		}
   773  		// The used gas can be calculated based on previous receipts
   774  		if j == 0 {
   775  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   776  		} else {
   777  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   778  		}
   779  		// The derived log fields can simply be set from the block and transaction
   780  		for k := 0; k < len(receipts[j].Logs); k++ {
   781  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   782  			receipts[j].Logs[k].BlockHash = block.Hash()
   783  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   784  			receipts[j].Logs[k].TxIndex = uint(j)
   785  			receipts[j].Logs[k].Index = logIndex
   786  			logIndex++
   787  		}
   788  	}
   789  	return nil
   790  }
   791  
   792  // InsertReceiptChain attempts to complete an already existing header chain with
   793  // transaction and receipt data.
   794  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   795  	bc.wg.Add(1)
   796  	defer bc.wg.Done()
   797  
   798  	// Do a sanity check that the provided chain is actually ordered and linked
   799  	for i := 1; i < len(blockChain); i++ {
   800  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   801  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   802  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   803  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   804  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   805  		}
   806  	}
   807  
   808  	var (
   809  		stats = struct{ processed, ignored int32 }{}
   810  		start = time.Now()
   811  		bytes = 0
   812  		batch = bc.db.NewBatch()
   813  	)
   814  	for i, block := range blockChain {
   815  		receipts := receiptChain[i]
   816  		// Short circuit insertion if shutting down or processing failed
   817  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   818  			return 0, nil
   819  		}
   820  		// Short circuit if the owner header is unknown
   821  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   822  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   823  		}
   824  		// Skip if the entire data is already known
   825  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   826  			stats.ignored++
   827  			continue
   828  		}
   829  		// Compute all the non-consensus fields of the receipts
   830  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   831  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   832  		}
   833  		// Write all the data out into the database
   834  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   835  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   836  		rawdb.WriteTxLookupEntries(batch, block)
   837  
   838  		stats.processed++
   839  
   840  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   841  			if err := batch.Write(); err != nil {
   842  				return 0, err
   843  			}
   844  			bytes += batch.ValueSize()
   845  			batch.Reset()
   846  		}
   847  	}
   848  	if batch.ValueSize() > 0 {
   849  		bytes += batch.ValueSize()
   850  		if err := batch.Write(); err != nil {
   851  			return 0, err
   852  		}
   853  	}
   854  
   855  	// Update the head fast sync block if better
   856  	bc.mu.Lock()
   857  	head := blockChain[len(blockChain)-1]
   858  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   859  		currentFastBlock := bc.CurrentFastBlock()
   860  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   861  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   862  			bc.currentFastBlock.Store(head)
   863  		}
   864  	}
   865  	bc.mu.Unlock()
   866  
   867  	context := []interface{}{
   868  		"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
   869  		"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
   870  		"size", common.StorageSize(bytes),
   871  	}
   872  	if stats.ignored > 0 {
   873  		context = append(context, []interface{}{"ignored", stats.ignored}...)
   874  	}
   875  	log.Info("Imported new block receipts", context...)
   876  
   877  	return 0, nil
   878  }
   879  
   880  var lastWrite uint64
   881  
   882  // WriteBlockWithoutState writes only the block and its metadata to the database,
   883  // but does not write any state. This is used to construct competing side forks
   884  // up to the point where they exceed the canonical total difficulty.
   885  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   886  	bc.wg.Add(1)
   887  	defer bc.wg.Done()
   888  
   889  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   890  		return err
   891  	}
   892  	rawdb.WriteBlock(bc.db, block)
   893  
   894  	return nil
   895  }
   896  
   897  // WriteBlockWithState writes the block and all associated state to the database.
   898  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   899  	bc.wg.Add(1)
   900  	defer bc.wg.Done()
   901  
   902  	// Calculate the total difficulty of the block
   903  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   904  	if ptd == nil {
   905  		return NonStatTy, consensus.ErrUnknownAncestor
   906  	}
   907  	// Make sure no inconsistent state is leaked during insertion
   908  	bc.mu.Lock()
   909  	defer bc.mu.Unlock()
   910  
   911  	currentBlock := bc.CurrentBlock()
   912  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   913  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   914  
   915  	// Irrelevant of the canonical status, write the block itself to the database
   916  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   917  		return NonStatTy, err
   918  	}
   919  	rawdb.WriteBlock(bc.db, block)
   920  
   921  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   922  	if err != nil {
   923  		return NonStatTy, err
   924  	}
   925  	triedb := bc.stateCache.TrieDB()
   926  
   927  	// If we're running an archive node, always flush
   928  	if bc.cacheConfig.Disabled {
   929  		if err := triedb.Commit(root, false); err != nil {
   930  			return NonStatTy, err
   931  		}
   932  	} else {
   933  		// Full but not archive node, do proper garbage collection
   934  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   935  		bc.triegc.Push(root, -int64(block.NumberU64()))
   936  
   937  		if current := block.NumberU64(); current > triesInMemory {
   938  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
   939  			var (
   940  				nodes, imgs = triedb.Size()
   941  				limit       = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   942  			)
   943  			if nodes > limit || imgs > 4*1024*1024 {
   944  				triedb.Cap(limit - ethdb.IdealBatchSize)
   945  			}
   946  			// Find the next state trie we need to commit
   947  			header := bc.GetHeaderByNumber(current - triesInMemory)
   948  			chosen := header.Number.Uint64()
   949  
   950  			// If we exceeded out time allowance, flush an entire trie to disk
   951  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   952  				// If we're exceeding limits but haven't reached a large enough memory gap,
   953  				// warn the user that the system is becoming unstable.
   954  				if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   955  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   956  				}
   957  				// Flush an entire trie and restart the counters
   958  				triedb.Commit(header.Root, true)
   959  				lastWrite = chosen
   960  				bc.gcproc = 0
   961  			}
   962  			// Garbage collect anything below our required write retention
   963  			for !bc.triegc.Empty() {
   964  				root, number := bc.triegc.Pop()
   965  				if uint64(-number) > chosen {
   966  					bc.triegc.Push(root, number)
   967  					break
   968  				}
   969  				triedb.Dereference(root.(common.Hash))
   970  			}
   971  		}
   972  	}
   973  
   974  	// Write other block data using a batch.
   975  	batch := bc.db.NewBatch()
   976  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   977  
   978  	// If the total difficulty is higher than our known, add it to the canonical chain
   979  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   980  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   981  	reorg := externTd.Cmp(localTd) > 0
   982  	currentBlock = bc.CurrentBlock()
   983  	if !reorg && externTd.Cmp(localTd) == 0 {
   984  		// Split same-difficulty blocks by number, then preferentially select
   985  		// the block generated by the local miner as the canonical block.
   986  		if block.NumberU64() < currentBlock.NumberU64() {
   987  			reorg = true
   988  		} else if block.NumberU64() == currentBlock.NumberU64() {
   989  			var currentPreserve, blockPreserve bool
   990  			if bc.shouldPreserve != nil {
   991  				currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
   992  			}
   993  			reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
   994  		}
   995  	}
   996  	if reorg {
   997  		// Reorganise the chain if the parent is not the head block
   998  		if block.ParentHash() != currentBlock.Hash() {
   999  			if err := bc.reorg(currentBlock, block); err != nil {
  1000  				return NonStatTy, err
  1001  			}
  1002  		}
  1003  		// Write the positional metadata for transaction/receipt lookups and preimages
  1004  		rawdb.WriteTxLookupEntries(batch, block)
  1005  		rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
  1006  
  1007  		status = CanonStatTy
  1008  	} else {
  1009  		status = SideStatTy
  1010  	}
  1011  	if err := batch.Write(); err != nil {
  1012  		return NonStatTy, err
  1013  	}
  1014  
  1015  	// Set new head.
  1016  	if status == CanonStatTy {
  1017  		bc.insert(block)
  1018  	}
  1019  	bc.futureBlocks.Remove(block.Hash())
  1020  	return status, nil
  1021  }
  1022  
  1023  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1024  // chain or, otherwise, create a fork. If an error is returned it will return
  1025  // the index number of the failing block as well an error describing what went
  1026  // wrong.
  1027  //
  1028  // After insertion is done, all accumulated events will be fired.
  1029  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1030  	n, events, logs, err := bc.insertChain(chain)
  1031  	bc.PostChainEvents(events, logs)
  1032  	return n, err
  1033  }
  1034  
  1035  // insertChain will execute the actual chain insertion and event aggregation. The
  1036  // only reason this method exists as a separate one is to make locking cleaner
  1037  // with deferred statements.
  1038  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1039  	// Sanity check that we have something meaningful to import
  1040  	if len(chain) == 0 {
  1041  		return 0, nil, nil, nil
  1042  	}
  1043  	// Do a sanity check that the provided chain is actually ordered and linked
  1044  	for i := 1; i < len(chain); i++ {
  1045  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1046  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1047  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1048  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1049  
  1050  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1051  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1052  		}
  1053  	}
  1054  	// Pre-checks passed, start the full block imports
  1055  	bc.wg.Add(1)
  1056  	defer bc.wg.Done()
  1057  
  1058  	bc.chainmu.Lock()
  1059  	defer bc.chainmu.Unlock()
  1060  
  1061  	// A queued approach to delivering events. This is generally
  1062  	// faster than direct delivery and requires much less mutex
  1063  	// acquiring.
  1064  	var (
  1065  		stats         = insertStats{startTime: mclock.Now()}
  1066  		events        = make([]interface{}, 0, len(chain))
  1067  		lastCanon     *types.Block
  1068  		coalescedLogs []*types.Log
  1069  	)
  1070  	// Start the parallel header verifier
  1071  	headers := make([]*types.Header, len(chain))
  1072  	seals := make([]bool, len(chain))
  1073  
  1074  	for i, block := range chain {
  1075  		headers[i] = block.Header()
  1076  		seals[i] = true
  1077  	}
  1078  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1079  	defer close(abort)
  1080  
  1081  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1082  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1083  
  1084  	// Iterate over the blocks and insert when the verifier permits
  1085  	for i, block := range chain {
  1086  		// If the chain is terminating, stop processing blocks
  1087  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1088  			log.Debug("Premature abort during blocks processing")
  1089  			break
  1090  		}
  1091  		// If the header is a banned one, straight out abort
  1092  		if BadHashes[block.Hash()] {
  1093  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1094  			return i, events, coalescedLogs, ErrBlacklistedHash
  1095  		}
  1096  		// Wait for the block's verification to complete
  1097  		bstart := time.Now()
  1098  
  1099  		err := <-results
  1100  		if err == nil {
  1101  			err = bc.Validator().ValidateBody(block)
  1102  		}
  1103  		switch {
  1104  		case err == ErrKnownBlock:
  1105  			// Block and state both already known. However if the current block is below
  1106  			// this number we did a rollback and we should reimport it nonetheless.
  1107  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1108  				stats.ignored++
  1109  				continue
  1110  			}
  1111  
  1112  		case err == consensus.ErrFutureBlock:
  1113  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1114  			// the chain is discarded and processed at a later time if given.
  1115  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1116  			if block.Time().Cmp(max) > 0 {
  1117  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1118  			}
  1119  			bc.futureBlocks.Add(block.Hash(), block)
  1120  			stats.queued++
  1121  			continue
  1122  
  1123  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1124  			bc.futureBlocks.Add(block.Hash(), block)
  1125  			stats.queued++
  1126  			continue
  1127  
  1128  		case err == consensus.ErrPrunedAncestor:
  1129  			// Block competing with the canonical chain, store in the db, but don't process
  1130  			// until the competitor TD goes above the canonical TD
  1131  			currentBlock := bc.CurrentBlock()
  1132  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1133  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1134  			if localTd.Cmp(externTd) > 0 {
  1135  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1136  					return i, events, coalescedLogs, err
  1137  				}
  1138  				continue
  1139  			}
  1140  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1141  			var winner []*types.Block
  1142  
  1143  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1144  			for !bc.HasState(parent.Root()) {
  1145  				winner = append(winner, parent)
  1146  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1147  			}
  1148  			for j := 0; j < len(winner)/2; j++ {
  1149  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1150  			}
  1151  			// Import all the pruned blocks to make the state available
  1152  			bc.chainmu.Unlock()
  1153  			_, evs, logs, err := bc.insertChain(winner)
  1154  			bc.chainmu.Lock()
  1155  			events, coalescedLogs = evs, logs
  1156  
  1157  			if err != nil {
  1158  				return i, events, coalescedLogs, err
  1159  			}
  1160  
  1161  		case err != nil:
  1162  			bc.reportBlock(block, nil, err)
  1163  			return i, events, coalescedLogs, err
  1164  		}
  1165  		// Create a new statedb using the parent block and report an
  1166  		// error if it fails.
  1167  		var parent *types.Block
  1168  		if i == 0 {
  1169  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1170  		} else {
  1171  			parent = chain[i-1]
  1172  		}
  1173  		state, err := state.New(parent.Root(), bc.stateCache)
  1174  		if err != nil {
  1175  			return i, events, coalescedLogs, err
  1176  		}
  1177  		// Process block using the parent state as reference point.
  1178  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1179  		if err != nil {
  1180  			bc.reportBlock(block, receipts, err)
  1181  			return i, events, coalescedLogs, err
  1182  		}
  1183  		// Validate the state using the default validator
  1184  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1185  		if err != nil {
  1186  			bc.reportBlock(block, receipts, err)
  1187  			return i, events, coalescedLogs, err
  1188  		}
  1189  		proctime := time.Since(bstart)
  1190  
  1191  		// Write the block to the chain and get the status.
  1192  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1193  		if err != nil {
  1194  			return i, events, coalescedLogs, err
  1195  		}
  1196  		switch status {
  1197  		case CanonStatTy:
  1198  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1199  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1200  
  1201  			coalescedLogs = append(coalescedLogs, logs...)
  1202  			blockInsertTimer.UpdateSince(bstart)
  1203  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1204  			lastCanon = block
  1205  
  1206  			// Only count canonical blocks for GC processing time
  1207  			bc.gcproc += proctime
  1208  
  1209  		case SideStatTy:
  1210  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1211  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1212  
  1213  			blockInsertTimer.UpdateSince(bstart)
  1214  			events = append(events, ChainSideEvent{block})
  1215  		}
  1216  		stats.processed++
  1217  		stats.usedGas += usedGas
  1218  
  1219  		cache, _ := bc.stateCache.TrieDB().Size()
  1220  		stats.report(chain, i, cache)
  1221  	}
  1222  	// Append a single chain head event if we've progressed the chain
  1223  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1224  		events = append(events, ChainHeadEvent{lastCanon})
  1225  	}
  1226  	return 0, events, coalescedLogs, nil
  1227  }
  1228  
  1229  // insertStats tracks and reports on block insertion.
  1230  type insertStats struct {
  1231  	queued, processed, ignored int
  1232  	usedGas                    uint64
  1233  	lastIndex                  int
  1234  	startTime                  mclock.AbsTime
  1235  }
  1236  
  1237  // statsReportLimit is the time limit during import and export after which we
  1238  // always print out progress. This avoids the user wondering what's going on.
  1239  const statsReportLimit = 8 * time.Second
  1240  
  1241  // report prints statistics if some number of blocks have been processed
  1242  // or more than a few seconds have passed since the last message.
  1243  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1244  	// Fetch the timings for the batch
  1245  	var (
  1246  		now     = mclock.Now()
  1247  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1248  	)
  1249  	// If we're at the last block of the batch or report period reached, log
  1250  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1251  		var (
  1252  			end = chain[index]
  1253  			txs = countTransactions(chain[st.lastIndex : index+1])
  1254  		)
  1255  		context := []interface{}{
  1256  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1257  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1258  			"number", end.Number(), "hash", end.Hash(),
  1259  		}
  1260  		if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
  1261  			context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  1262  		}
  1263  		context = append(context, []interface{}{"cache", cache}...)
  1264  
  1265  		if st.queued > 0 {
  1266  			context = append(context, []interface{}{"queued", st.queued}...)
  1267  		}
  1268  		if st.ignored > 0 {
  1269  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1270  		}
  1271  		log.Info("Imported new chain segment", context...)
  1272  
  1273  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1274  	}
  1275  }
  1276  
  1277  func countTransactions(chain []*types.Block) (c int) {
  1278  	for _, b := range chain {
  1279  		c += len(b.Transactions())
  1280  	}
  1281  	return c
  1282  }
  1283  
  1284  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1285  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1286  // event about them
  1287  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1288  	var (
  1289  		newChain    types.Blocks
  1290  		oldChain    types.Blocks
  1291  		commonBlock *types.Block
  1292  		deletedTxs  types.Transactions
  1293  		deletedLogs []*types.Log
  1294  		// collectLogs collects the logs that were generated during the
  1295  		// processing of the block that corresponds with the given hash.
  1296  		// These logs are later announced as deleted.
  1297  		collectLogs = func(hash common.Hash) {
  1298  			// Coalesce logs and set 'Removed'.
  1299  			number := bc.hc.GetBlockNumber(hash)
  1300  			if number == nil {
  1301  				return
  1302  			}
  1303  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1304  			for _, receipt := range receipts {
  1305  				for _, log := range receipt.Logs {
  1306  					del := *log
  1307  					del.Removed = true
  1308  					deletedLogs = append(deletedLogs, &del)
  1309  				}
  1310  			}
  1311  		}
  1312  	)
  1313  
  1314  	// first reduce whoever is higher bound
  1315  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1316  		// reduce old chain
  1317  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1318  			oldChain = append(oldChain, oldBlock)
  1319  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1320  
  1321  			collectLogs(oldBlock.Hash())
  1322  		}
  1323  	} else {
  1324  		// reduce new chain and append new chain blocks for inserting later on
  1325  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1326  			newChain = append(newChain, newBlock)
  1327  		}
  1328  	}
  1329  	if oldBlock == nil {
  1330  		return fmt.Errorf("Invalid old chain")
  1331  	}
  1332  	if newBlock == nil {
  1333  		return fmt.Errorf("Invalid new chain")
  1334  	}
  1335  
  1336  	for {
  1337  		if oldBlock.Hash() == newBlock.Hash() {
  1338  			commonBlock = oldBlock
  1339  			break
  1340  		}
  1341  
  1342  		oldChain = append(oldChain, oldBlock)
  1343  		newChain = append(newChain, newBlock)
  1344  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1345  		collectLogs(oldBlock.Hash())
  1346  
  1347  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1348  		if oldBlock == nil {
  1349  			return fmt.Errorf("Invalid old chain")
  1350  		}
  1351  		if newBlock == nil {
  1352  			return fmt.Errorf("Invalid new chain")
  1353  		}
  1354  	}
  1355  	// Ensure the user sees large reorgs
  1356  	if len(oldChain) > 0 && len(newChain) > 0 {
  1357  		logFn := log.Debug
  1358  		if len(oldChain) > 63 {
  1359  			logFn = log.Warn
  1360  		}
  1361  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1362  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1363  	} else {
  1364  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1365  	}
  1366  	// Insert the new chain, taking care of the proper incremental order
  1367  	var addedTxs types.Transactions
  1368  	for i := len(newChain) - 1; i >= 0; i-- {
  1369  		// insert the block in the canonical way, re-writing history
  1370  		bc.insert(newChain[i])
  1371  		// write lookup entries for hash based transaction/receipt searches
  1372  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1373  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1374  	}
  1375  	// calculate the difference between deleted and added transactions
  1376  	diff := types.TxDifference(deletedTxs, addedTxs)
  1377  	// When transactions get deleted from the database that means the
  1378  	// receipts that were created in the fork must also be deleted
  1379  	batch := bc.db.NewBatch()
  1380  	for _, tx := range diff {
  1381  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1382  	}
  1383  	batch.Write()
  1384  
  1385  	if len(deletedLogs) > 0 {
  1386  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1387  	}
  1388  	if len(oldChain) > 0 {
  1389  		go func() {
  1390  			for _, block := range oldChain {
  1391  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1392  			}
  1393  		}()
  1394  	}
  1395  
  1396  	return nil
  1397  }
  1398  
  1399  // PostChainEvents iterates over the events generated by a chain insertion and
  1400  // posts them into the event feed.
  1401  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1402  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1403  	// post event logs for further processing
  1404  	if logs != nil {
  1405  		bc.logsFeed.Send(logs)
  1406  	}
  1407  	for _, event := range events {
  1408  		switch ev := event.(type) {
  1409  		case ChainEvent:
  1410  			bc.chainFeed.Send(ev)
  1411  
  1412  		case ChainHeadEvent:
  1413  			bc.chainHeadFeed.Send(ev)
  1414  
  1415  		case ChainSideEvent:
  1416  			bc.chainSideFeed.Send(ev)
  1417  		}
  1418  	}
  1419  }
  1420  
  1421  func (bc *BlockChain) update() {
  1422  	futureTimer := time.NewTicker(5 * time.Second)
  1423  	defer futureTimer.Stop()
  1424  	for {
  1425  		select {
  1426  		case <-futureTimer.C:
  1427  			bc.procFutureBlocks()
  1428  		case <-bc.quit:
  1429  			return
  1430  		}
  1431  	}
  1432  }
  1433  
  1434  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1435  func (bc *BlockChain) BadBlocks() []*types.Block {
  1436  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1437  	for _, hash := range bc.badBlocks.Keys() {
  1438  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1439  			block := blk.(*types.Block)
  1440  			blocks = append(blocks, block)
  1441  		}
  1442  	}
  1443  	return blocks
  1444  }
  1445  
  1446  // addBadBlock adds a bad block to the bad-block LRU cache
  1447  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1448  	bc.badBlocks.Add(block.Hash(), block)
  1449  }
  1450  
  1451  // reportBlock logs a bad block error.
  1452  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1453  	bc.addBadBlock(block)
  1454  
  1455  	var receiptString string
  1456  	for _, receipt := range receipts {
  1457  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1458  	}
  1459  	log.Error(fmt.Sprintf(`
  1460  ########## BAD BLOCK #########
  1461  Chain config: %v
  1462  
  1463  Number: %v
  1464  Hash: 0x%x
  1465  %v
  1466  
  1467  Error: %v
  1468  ##############################
  1469  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1470  }
  1471  
  1472  // InsertHeaderChain attempts to insert the given header chain in to the local
  1473  // chain, possibly creating a reorg. If an error is returned, it will return the
  1474  // index number of the failing header as well an error describing what went wrong.
  1475  //
  1476  // The verify parameter can be used to fine tune whether nonce verification
  1477  // should be done or not. The reason behind the optional check is because some
  1478  // of the header retrieval mechanisms already need to verify nonces, as well as
  1479  // because nonces can be verified sparsely, not needing to check each.
  1480  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1481  	start := time.Now()
  1482  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1483  		return i, err
  1484  	}
  1485  
  1486  	// Make sure only one thread manipulates the chain at once
  1487  	bc.chainmu.Lock()
  1488  	defer bc.chainmu.Unlock()
  1489  
  1490  	bc.wg.Add(1)
  1491  	defer bc.wg.Done()
  1492  
  1493  	whFunc := func(header *types.Header) error {
  1494  		bc.mu.Lock()
  1495  		defer bc.mu.Unlock()
  1496  
  1497  		_, err := bc.hc.WriteHeader(header)
  1498  		return err
  1499  	}
  1500  
  1501  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1502  }
  1503  
  1504  // writeHeader writes a header into the local chain, given that its parent is
  1505  // already known. If the total difficulty of the newly inserted header becomes
  1506  // greater than the current known TD, the canonical chain is re-routed.
  1507  //
  1508  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1509  // into the chain, as side effects caused by reorganisations cannot be emulated
  1510  // without the real blocks. Hence, writing headers directly should only be done
  1511  // in two scenarios: pure-header mode of operation (light clients), or properly
  1512  // separated header/block phases (non-archive clients).
  1513  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1514  	bc.wg.Add(1)
  1515  	defer bc.wg.Done()
  1516  
  1517  	bc.mu.Lock()
  1518  	defer bc.mu.Unlock()
  1519  
  1520  	_, err := bc.hc.WriteHeader(header)
  1521  	return err
  1522  }
  1523  
  1524  // CurrentHeader retrieves the current head header of the canonical chain. The
  1525  // header is retrieved from the HeaderChain's internal cache.
  1526  func (bc *BlockChain) CurrentHeader() *types.Header {
  1527  	return bc.hc.CurrentHeader()
  1528  }
  1529  
  1530  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1531  // database by hash and number, caching it if found.
  1532  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1533  	return bc.hc.GetTd(hash, number)
  1534  }
  1535  
  1536  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1537  // database by hash, caching it if found.
  1538  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1539  	return bc.hc.GetTdByHash(hash)
  1540  }
  1541  
  1542  // GetHeader retrieves a block header from the database by hash and number,
  1543  // caching it if found.
  1544  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1545  	return bc.hc.GetHeader(hash, number)
  1546  }
  1547  
  1548  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1549  // found.
  1550  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1551  	return bc.hc.GetHeaderByHash(hash)
  1552  }
  1553  
  1554  // HasHeader checks if a block header is present in the database or not, caching
  1555  // it if present.
  1556  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1557  	return bc.hc.HasHeader(hash, number)
  1558  }
  1559  
  1560  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1561  // hash, fetching towards the genesis block.
  1562  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1563  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1564  }
  1565  
  1566  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1567  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1568  // number of blocks to be individually checked before we reach the canonical chain.
  1569  //
  1570  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1571  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1572  	bc.chainmu.Lock()
  1573  	defer bc.chainmu.Unlock()
  1574  
  1575  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1576  }
  1577  
  1578  // GetHeaderByNumber retrieves a block header from the database by number,
  1579  // caching it (associated with its hash) if found.
  1580  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1581  	return bc.hc.GetHeaderByNumber(number)
  1582  }
  1583  
  1584  // Config retrieves the blockchain's chain configuration.
  1585  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1586  
  1587  // Engine retrieves the blockchain's consensus engine.
  1588  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1589  
  1590  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1591  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1592  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1593  }
  1594  
  1595  // SubscribeChainEvent registers a subscription of ChainEvent.
  1596  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1597  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1598  }
  1599  
  1600  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1601  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1602  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1603  }
  1604  
  1605  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1606  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1607  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1608  }
  1609  
  1610  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1611  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1612  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1613  }