github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/mclock"
    32  	"github.com/ethereum/go-ethereum/consensus"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/core/vm"
    37  	"github.com/ethereum/go-ethereum/crypto"
    38  	"github.com/ethereum/go-ethereum/ethdb"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/metrics"
    42  	"github.com/ethereum/go-ethereum/params"
    43  	"github.com/ethereum/go-ethereum/rlp"
    44  	"github.com/ethereum/go-ethereum/trie"
    45  	"github.com/hashicorp/golang-lru"
    46  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	maxFutureBlocks     = 256
    59  	maxTimeFutureBlocks = 30
    60  	badBlockLimit       = 10
    61  	triesInMemory       = 128
    62  
    63  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    64  	BlockChainVersion = 3
    65  )
    66  
    67  // CacheConfig contains the configuration values for the trie caching/pruning
    68  // that's resident in a blockchain.
    69  type CacheConfig struct {
    70  	Disabled      bool          // Whether to disable trie write caching (archive node)
    71  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    72  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    73  }
    74  
    75  // BlockChain represents the canonical chain given a database with a genesis
    76  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    77  //
    78  // Importing blocks in to the block chain happens according to the set of rules
    79  // defined by the two stage Validator. Processing of blocks is done using the
    80  // Processor which processes the included transaction. The validation of the state
    81  // is done in the second part of the Validator. Failing results in aborting of
    82  // the import.
    83  //
    84  // The BlockChain also helps in returning blocks from **any** chain included
    85  // in the database as well as blocks that represents the canonical chain. It's
    86  // important to note that GetBlock can return any block and does not need to be
    87  // included in the canonical one where as GetBlockByNumber always represents the
    88  // canonical chain.
    89  type BlockChain struct {
    90  	chainConfig *params.ChainConfig // Chain & network configuration
    91  	cacheConfig *CacheConfig        // Cache configuration for pruning
    92  
    93  	db     ethdb.Database // Low level persistent database to store final content in
    94  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    95  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    96  
    97  	hc            *HeaderChain
    98  	rmLogsFeed    event.Feed
    99  	chainFeed     event.Feed
   100  	chainSideFeed event.Feed
   101  	chainHeadFeed event.Feed
   102  	logsFeed      event.Feed
   103  	scope         event.SubscriptionScope
   104  	genesisBlock  *types.Block
   105  
   106  	mu      sync.RWMutex // global mutex for locking chain operations
   107  	chainmu sync.RWMutex // blockchain insertion lock
   108  	procmu  sync.RWMutex // block processor lock
   109  
   110  	checkpoint       int          // checkpoint counts towards the new checkpoint
   111  	currentBlock     atomic.Value // Current head of the block chain
   112  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   113  
   114  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   115  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   116  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   117  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   118  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   119  
   120  	quit    chan struct{} // blockchain quit channel
   121  	running int32         // running must be called atomically
   122  	// procInterrupt must be atomically called
   123  	procInterrupt int32          // interrupt signaler for block processing
   124  	wg            sync.WaitGroup // chain processing wait group for shutting down
   125  
   126  	engine    consensus.Engine
   127  	processor Processor // block processor interface
   128  	validator Validator // block and state validator interface
   129  	vmConfig  vm.Config
   130  
   131  	badBlocks *lru.Cache // Bad block cache
   132  }
   133  
   134  // NewBlockChain returns a fully initialised block chain using information
   135  // available in the database. It initialises the default Ethereum Validator and
   136  // Processor.
   137  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   138  	if cacheConfig == nil {
   139  		cacheConfig = &CacheConfig{
   140  			TrieNodeLimit: 256 * 1024 * 1024,
   141  			TrieTimeLimit: 5 * time.Minute,
   142  		}
   143  	}
   144  	bodyCache, _ := lru.New(bodyCacheLimit)
   145  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   146  	blockCache, _ := lru.New(blockCacheLimit)
   147  	futureBlocks, _ := lru.New(maxFutureBlocks)
   148  	badBlocks, _ := lru.New(badBlockLimit)
   149  
   150  	bc := &BlockChain{
   151  		chainConfig:  chainConfig,
   152  		cacheConfig:  cacheConfig,
   153  		db:           db,
   154  		triegc:       prque.New(),
   155  		stateCache:   state.NewDatabase(db),
   156  		quit:         make(chan struct{}),
   157  		bodyCache:    bodyCache,
   158  		bodyRLPCache: bodyRLPCache,
   159  		blockCache:   blockCache,
   160  		futureBlocks: futureBlocks,
   161  		engine:       engine,
   162  		vmConfig:     vmConfig,
   163  		badBlocks:    badBlocks,
   164  	}
   165  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   166  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   167  
   168  	var err error
   169  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   170  	if err != nil {
   171  		return nil, err
   172  	}
   173  	bc.genesisBlock = bc.GetBlockByNumber(0)
   174  	if bc.genesisBlock == nil {
   175  		return nil, ErrNoGenesis
   176  	}
   177  	if err := bc.loadLastState(); err != nil {
   178  		return nil, err
   179  	}
   180  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   181  	for hash := range BadHashes {
   182  		if header := bc.GetHeaderByHash(hash); header != nil {
   183  			// get the canonical block corresponding to the offending header's number
   184  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   185  			// make sure the headerByNumber (if present) is in our current canonical chain
   186  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   187  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   188  				bc.SetHead(header.Number.Uint64() - 1)
   189  				log.Error("Chain rewind was successful, resuming normal operation")
   190  			}
   191  		}
   192  	}
   193  	// Take ownership of this particular state
   194  	go bc.update()
   195  	return bc, nil
   196  }
   197  
   198  func (bc *BlockChain) getProcInterrupt() bool {
   199  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   200  }
   201  
   202  // loadLastState loads the last known chain state from the database. This method
   203  // assumes that the chain manager mutex is held.
   204  func (bc *BlockChain) loadLastState() error {
   205  	// Restore the last known head block
   206  	head := rawdb.ReadHeadBlockHash(bc.db)
   207  	if head == (common.Hash{}) {
   208  		// Corrupt or empty database, init from scratch
   209  		log.Warn("Empty database, resetting chain")
   210  		return bc.Reset()
   211  	}
   212  	// Make sure the entire head block is available
   213  	currentBlock := bc.GetBlockByHash(head)
   214  	if currentBlock == nil {
   215  		// Corrupt or empty database, init from scratch
   216  		log.Warn("Head block missing, resetting chain", "hash", head)
   217  		return bc.Reset()
   218  	}
   219  	// Make sure the state associated with the block is available
   220  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   221  		// Dangling block without a state associated, init from scratch
   222  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   223  		if err := bc.repair(&currentBlock); err != nil {
   224  			return err
   225  		}
   226  	}
   227  	// Everything seems to be fine, set as the head block
   228  	bc.currentBlock.Store(currentBlock)
   229  
   230  	// Restore the last known head header
   231  	currentHeader := currentBlock.Header()
   232  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   233  		if header := bc.GetHeaderByHash(head); header != nil {
   234  			currentHeader = header
   235  		}
   236  	}
   237  	bc.hc.SetCurrentHeader(currentHeader)
   238  
   239  	// Restore the last known head fast block
   240  	bc.currentFastBlock.Store(currentBlock)
   241  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   242  		if block := bc.GetBlockByHash(head); block != nil {
   243  			bc.currentFastBlock.Store(block)
   244  		}
   245  	}
   246  
   247  	// Issue a status log for the user
   248  	currentFastBlock := bc.CurrentFastBlock()
   249  
   250  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   251  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   252  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   253  
   254  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   255  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   256  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   257  
   258  	return nil
   259  }
   260  
   261  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   262  // above the new head will be deleted and the new one set. In the case of blocks
   263  // though, the head may be further rewound if block bodies are missing (non-archive
   264  // nodes after a fast sync).
   265  func (bc *BlockChain) SetHead(head uint64) error {
   266  	log.Warn("Rewinding blockchain", "target", head)
   267  
   268  	bc.mu.Lock()
   269  	defer bc.mu.Unlock()
   270  
   271  	// Rewind the header chain, deleting all block bodies until then
   272  	delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
   273  		rawdb.DeleteBody(db, hash, num)
   274  	}
   275  	bc.hc.SetHead(head, delFn)
   276  	currentHeader := bc.hc.CurrentHeader()
   277  
   278  	// Clear out any stale content from the caches
   279  	bc.bodyCache.Purge()
   280  	bc.bodyRLPCache.Purge()
   281  	bc.blockCache.Purge()
   282  	bc.futureBlocks.Purge()
   283  
   284  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   285  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   286  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   287  	}
   288  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   289  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   290  			// Rewound state missing, rolled back to before pivot, reset to genesis
   291  			bc.currentBlock.Store(bc.genesisBlock)
   292  		}
   293  	}
   294  	// Rewind the fast block in a simpleton way to the target head
   295  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   296  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   297  	}
   298  	// If either blocks reached nil, reset to the genesis state
   299  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   300  		bc.currentBlock.Store(bc.genesisBlock)
   301  	}
   302  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   303  		bc.currentFastBlock.Store(bc.genesisBlock)
   304  	}
   305  	currentBlock := bc.CurrentBlock()
   306  	currentFastBlock := bc.CurrentFastBlock()
   307  
   308  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   309  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   310  
   311  	return bc.loadLastState()
   312  }
   313  
   314  // FastSyncCommitHead sets the current head block to the one defined by the hash
   315  // irrelevant what the chain contents were prior.
   316  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   317  	// Make sure that both the block as well at its state trie exists
   318  	block := bc.GetBlockByHash(hash)
   319  	if block == nil {
   320  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   321  	}
   322  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   323  		return err
   324  	}
   325  	// If all checks out, manually set the head block
   326  	bc.mu.Lock()
   327  	bc.currentBlock.Store(block)
   328  	bc.mu.Unlock()
   329  
   330  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   331  	return nil
   332  }
   333  
   334  // GasLimit returns the gas limit of the current HEAD block.
   335  func (bc *BlockChain) GasLimit() uint64 {
   336  	return bc.CurrentBlock().GasLimit()
   337  }
   338  
   339  // CurrentBlock retrieves the current head block of the canonical chain. The
   340  // block is retrieved from the blockchain's internal cache.
   341  func (bc *BlockChain) CurrentBlock() *types.Block {
   342  	return bc.currentBlock.Load().(*types.Block)
   343  }
   344  
   345  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   346  // chain. The block is retrieved from the blockchain's internal cache.
   347  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   348  	return bc.currentFastBlock.Load().(*types.Block)
   349  }
   350  
   351  // SetProcessor sets the processor required for making state modifications.
   352  func (bc *BlockChain) SetProcessor(processor Processor) {
   353  	bc.procmu.Lock()
   354  	defer bc.procmu.Unlock()
   355  	bc.processor = processor
   356  }
   357  
   358  // SetValidator sets the validator which is used to validate incoming blocks.
   359  func (bc *BlockChain) SetValidator(validator Validator) {
   360  	bc.procmu.Lock()
   361  	defer bc.procmu.Unlock()
   362  	bc.validator = validator
   363  }
   364  
   365  // Validator returns the current validator.
   366  func (bc *BlockChain) Validator() Validator {
   367  	bc.procmu.RLock()
   368  	defer bc.procmu.RUnlock()
   369  	return bc.validator
   370  }
   371  
   372  // Processor returns the current processor.
   373  func (bc *BlockChain) Processor() Processor {
   374  	bc.procmu.RLock()
   375  	defer bc.procmu.RUnlock()
   376  	return bc.processor
   377  }
   378  
   379  // State returns a new mutable state based on the current HEAD block.
   380  func (bc *BlockChain) State() (*state.StateDB, error) {
   381  	return bc.StateAt(bc.CurrentBlock().Root())
   382  }
   383  
   384  // StateAt returns a new mutable state based on a particular point in time.
   385  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   386  	return state.New(root, bc.stateCache)
   387  }
   388  
   389  // Reset purges the entire blockchain, restoring it to its genesis state.
   390  func (bc *BlockChain) Reset() error {
   391  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   392  }
   393  
   394  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   395  // specified genesis state.
   396  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   397  	// Dump the entire block chain and purge the caches
   398  	if err := bc.SetHead(0); err != nil {
   399  		return err
   400  	}
   401  	bc.mu.Lock()
   402  	defer bc.mu.Unlock()
   403  
   404  	// Prepare the genesis block and reinitialise the chain
   405  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   406  		log.Crit("Failed to write genesis block TD", "err", err)
   407  	}
   408  	rawdb.WriteBlock(bc.db, genesis)
   409  
   410  	bc.genesisBlock = genesis
   411  	bc.insert(bc.genesisBlock)
   412  	bc.currentBlock.Store(bc.genesisBlock)
   413  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   414  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   415  	bc.currentFastBlock.Store(bc.genesisBlock)
   416  
   417  	return nil
   418  }
   419  
   420  // repair tries to repair the current blockchain by rolling back the current block
   421  // until one with associated state is found. This is needed to fix incomplete db
   422  // writes caused either by crashes/power outages, or simply non-committed tries.
   423  //
   424  // This method only rolls back the current block. The current header and current
   425  // fast block are left intact.
   426  func (bc *BlockChain) repair(head **types.Block) error {
   427  	for {
   428  		// Abort if we've rewound to a head block that does have associated state
   429  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   430  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   431  			return nil
   432  		}
   433  		// Otherwise rewind one block and recheck state availability there
   434  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   435  	}
   436  }
   437  
   438  // Export writes the active chain to the given writer.
   439  func (bc *BlockChain) Export(w io.Writer) error {
   440  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   441  }
   442  
   443  // ExportN writes a subset of the active chain to the given writer.
   444  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   445  	bc.mu.RLock()
   446  	defer bc.mu.RUnlock()
   447  
   448  	if first > last {
   449  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   450  	}
   451  	log.Info("Exporting batch of blocks", "count", last-first+1)
   452  
   453  	start, reported := time.Now(), time.Now()
   454  	for nr := first; nr <= last; nr++ {
   455  		block := bc.GetBlockByNumber(nr)
   456  		if block == nil {
   457  			return fmt.Errorf("export failed on #%d: not found", nr)
   458  		}
   459  		if err := block.EncodeRLP(w); err != nil {
   460  			return err
   461  		}
   462  		if time.Since(reported) >= statsReportLimit {
   463  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   464  			reported = time.Now()
   465  		}
   466  	}
   467  
   468  	return nil
   469  }
   470  
   471  // insert injects a new head block into the current block chain. This method
   472  // assumes that the block is indeed a true head. It will also reset the head
   473  // header and the head fast sync block to this very same block if they are older
   474  // or if they are on a different side chain.
   475  //
   476  // Note, this function assumes that the `mu` mutex is held!
   477  func (bc *BlockChain) insert(block *types.Block) {
   478  	// If the block is on a side chain or an unknown one, force other heads onto it too
   479  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   480  
   481  	// Add the block to the canonical chain number scheme and mark as the head
   482  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   483  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   484  
   485  	bc.currentBlock.Store(block)
   486  
   487  	// If the block is better than our head or is on a different chain, force update heads
   488  	if updateHeads {
   489  		bc.hc.SetCurrentHeader(block.Header())
   490  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   491  
   492  		bc.currentFastBlock.Store(block)
   493  	}
   494  }
   495  
   496  // Genesis retrieves the chain's genesis block.
   497  func (bc *BlockChain) Genesis() *types.Block {
   498  	return bc.genesisBlock
   499  }
   500  
   501  // GetBody retrieves a block body (transactions and uncles) from the database by
   502  // hash, caching it if found.
   503  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   504  	// Short circuit if the body's already in the cache, retrieve otherwise
   505  	if cached, ok := bc.bodyCache.Get(hash); ok {
   506  		body := cached.(*types.Body)
   507  		return body
   508  	}
   509  	number := bc.hc.GetBlockNumber(hash)
   510  	if number == nil {
   511  		return nil
   512  	}
   513  	body := rawdb.ReadBody(bc.db, hash, *number)
   514  	if body == nil {
   515  		return nil
   516  	}
   517  	// Cache the found body for next time and return
   518  	bc.bodyCache.Add(hash, body)
   519  	return body
   520  }
   521  
   522  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   523  // caching it if found.
   524  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   525  	// Short circuit if the body's already in the cache, retrieve otherwise
   526  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   527  		return cached.(rlp.RawValue)
   528  	}
   529  	number := bc.hc.GetBlockNumber(hash)
   530  	if number == nil {
   531  		return nil
   532  	}
   533  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   534  	if len(body) == 0 {
   535  		return nil
   536  	}
   537  	// Cache the found body for next time and return
   538  	bc.bodyRLPCache.Add(hash, body)
   539  	return body
   540  }
   541  
   542  // HasBlock checks if a block is fully present in the database or not.
   543  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   544  	if bc.blockCache.Contains(hash) {
   545  		return true
   546  	}
   547  	return rawdb.HasBody(bc.db, hash, number)
   548  }
   549  
   550  // HasState checks if state trie is fully present in the database or not.
   551  func (bc *BlockChain) HasState(hash common.Hash) bool {
   552  	_, err := bc.stateCache.OpenTrie(hash)
   553  	return err == nil
   554  }
   555  
   556  // HasBlockAndState checks if a block and associated state trie is fully present
   557  // in the database or not, caching it if present.
   558  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   559  	// Check first that the block itself is known
   560  	block := bc.GetBlock(hash, number)
   561  	if block == nil {
   562  		return false
   563  	}
   564  	return bc.HasState(block.Root())
   565  }
   566  
   567  // GetBlock retrieves a block from the database by hash and number,
   568  // caching it if found.
   569  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   570  	// Short circuit if the block's already in the cache, retrieve otherwise
   571  	if block, ok := bc.blockCache.Get(hash); ok {
   572  		return block.(*types.Block)
   573  	}
   574  	block := rawdb.ReadBlock(bc.db, hash, number)
   575  	if block == nil {
   576  		return nil
   577  	}
   578  	// Cache the found block for next time and return
   579  	bc.blockCache.Add(block.Hash(), block)
   580  	return block
   581  }
   582  
   583  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   584  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   585  	number := bc.hc.GetBlockNumber(hash)
   586  	if number == nil {
   587  		return nil
   588  	}
   589  	return bc.GetBlock(hash, *number)
   590  }
   591  
   592  // GetBlockByNumber retrieves a block from the database by number, caching it
   593  // (associated with its hash) if found.
   594  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   595  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   596  	if hash == (common.Hash{}) {
   597  		return nil
   598  	}
   599  	return bc.GetBlock(hash, number)
   600  }
   601  
   602  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   603  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   604  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   605  	if number == nil {
   606  		return nil
   607  	}
   608  	return rawdb.ReadReceipts(bc.db, hash, *number)
   609  }
   610  
   611  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   612  // [deprecated by eth/62]
   613  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   614  	number := bc.hc.GetBlockNumber(hash)
   615  	if number == nil {
   616  		return nil
   617  	}
   618  	for i := 0; i < n; i++ {
   619  		block := bc.GetBlock(hash, *number)
   620  		if block == nil {
   621  			break
   622  		}
   623  		blocks = append(blocks, block)
   624  		hash = block.ParentHash()
   625  		*number--
   626  	}
   627  	return
   628  }
   629  
   630  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   631  // a specific distance is reached.
   632  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   633  	uncles := []*types.Header{}
   634  	for i := 0; block != nil && i < length; i++ {
   635  		uncles = append(uncles, block.Uncles()...)
   636  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   637  	}
   638  	return uncles
   639  }
   640  
   641  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   642  // either from ephemeral in-memory cache, or from persistent storage.
   643  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   644  	return bc.stateCache.TrieDB().Node(hash)
   645  }
   646  
   647  // Stop stops the blockchain service. If any imports are currently in progress
   648  // it will abort them using the procInterrupt.
   649  func (bc *BlockChain) Stop() {
   650  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   651  		return
   652  	}
   653  	// Unsubscribe all subscriptions registered from blockchain
   654  	bc.scope.Close()
   655  	close(bc.quit)
   656  	atomic.StoreInt32(&bc.procInterrupt, 1)
   657  
   658  	bc.wg.Wait()
   659  
   660  	// Ensure the state of a recent block is also stored to disk before exiting.
   661  	// We're writing three different states to catch different restart scenarios:
   662  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   663  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   664  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   665  	if !bc.cacheConfig.Disabled {
   666  		triedb := bc.stateCache.TrieDB()
   667  
   668  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   669  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   670  				recent := bc.GetBlockByNumber(number - offset)
   671  
   672  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   673  				if err := triedb.Commit(recent.Root(), true); err != nil {
   674  					log.Error("Failed to commit recent state trie", "err", err)
   675  				}
   676  			}
   677  		}
   678  		for !bc.triegc.Empty() {
   679  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   680  		}
   681  		if size, _ := triedb.Size(); size != 0 {
   682  			log.Error("Dangling trie nodes after full cleanup")
   683  		}
   684  	}
   685  	log.Info("Blockchain manager stopped")
   686  }
   687  
   688  func (bc *BlockChain) procFutureBlocks() {
   689  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   690  	for _, hash := range bc.futureBlocks.Keys() {
   691  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   692  			blocks = append(blocks, block.(*types.Block))
   693  		}
   694  	}
   695  	if len(blocks) > 0 {
   696  		types.BlockBy(types.Number).Sort(blocks)
   697  
   698  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   699  		for i := range blocks {
   700  			bc.InsertChain(blocks[i : i+1])
   701  		}
   702  	}
   703  }
   704  
   705  // WriteStatus status of write
   706  type WriteStatus byte
   707  
   708  const (
   709  	NonStatTy WriteStatus = iota
   710  	CanonStatTy
   711  	SideStatTy
   712  )
   713  
   714  // Rollback is designed to remove a chain of links from the database that aren't
   715  // certain enough to be valid.
   716  func (bc *BlockChain) Rollback(chain []common.Hash) {
   717  	bc.mu.Lock()
   718  	defer bc.mu.Unlock()
   719  
   720  	for i := len(chain) - 1; i >= 0; i-- {
   721  		hash := chain[i]
   722  
   723  		currentHeader := bc.hc.CurrentHeader()
   724  		if currentHeader.Hash() == hash {
   725  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   726  		}
   727  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   728  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   729  			bc.currentFastBlock.Store(newFastBlock)
   730  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   731  		}
   732  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   733  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   734  			bc.currentBlock.Store(newBlock)
   735  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   736  		}
   737  	}
   738  }
   739  
   740  // SetReceiptsData computes all the non-consensus fields of the receipts
   741  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   742  	signer := types.MakeSigner(config, block.Number())
   743  
   744  	transactions, logIndex := block.Transactions(), uint(0)
   745  	if len(transactions) != len(receipts) {
   746  		return errors.New("transaction and receipt count mismatch")
   747  	}
   748  
   749  	for j := 0; j < len(receipts); j++ {
   750  		// The transaction hash can be retrieved from the transaction itself
   751  		receipts[j].TxHash = transactions[j].Hash()
   752  
   753  		// The contract address can be derived from the transaction itself
   754  		if transactions[j].To() == nil {
   755  			// Deriving the signer is expensive, only do if it's actually needed
   756  			from, _ := types.Sender(signer, transactions[j])
   757  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   758  		}
   759  		// The used gas can be calculated based on previous receipts
   760  		if j == 0 {
   761  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   762  		} else {
   763  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   764  		}
   765  		// The derived log fields can simply be set from the block and transaction
   766  		for k := 0; k < len(receipts[j].Logs); k++ {
   767  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   768  			receipts[j].Logs[k].BlockHash = block.Hash()
   769  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   770  			receipts[j].Logs[k].TxIndex = uint(j)
   771  			receipts[j].Logs[k].Index = logIndex
   772  			logIndex++
   773  		}
   774  	}
   775  	return nil
   776  }
   777  
   778  // InsertReceiptChain attempts to complete an already existing header chain with
   779  // transaction and receipt data.
   780  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   781  	bc.wg.Add(1)
   782  	defer bc.wg.Done()
   783  
   784  	// Do a sanity check that the provided chain is actually ordered and linked
   785  	for i := 1; i < len(blockChain); i++ {
   786  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   787  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   788  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   789  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   790  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   791  		}
   792  	}
   793  
   794  	var (
   795  		stats = struct{ processed, ignored int32 }{}
   796  		start = time.Now()
   797  		bytes = 0
   798  		batch = bc.db.NewBatch()
   799  	)
   800  	for i, block := range blockChain {
   801  		receipts := receiptChain[i]
   802  		// Short circuit insertion if shutting down or processing failed
   803  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   804  			return 0, nil
   805  		}
   806  		// Short circuit if the owner header is unknown
   807  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   808  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   809  		}
   810  		// Skip if the entire data is already known
   811  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   812  			stats.ignored++
   813  			continue
   814  		}
   815  		// Compute all the non-consensus fields of the receipts
   816  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   817  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   818  		}
   819  		// Write all the data out into the database
   820  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   821  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   822  		rawdb.WriteTxLookupEntries(batch, block)
   823  
   824  		stats.processed++
   825  
   826  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   827  			if err := batch.Write(); err != nil {
   828  				return 0, err
   829  			}
   830  			bytes += batch.ValueSize()
   831  			batch.Reset()
   832  		}
   833  	}
   834  	if batch.ValueSize() > 0 {
   835  		bytes += batch.ValueSize()
   836  		if err := batch.Write(); err != nil {
   837  			return 0, err
   838  		}
   839  	}
   840  
   841  	// Update the head fast sync block if better
   842  	bc.mu.Lock()
   843  	head := blockChain[len(blockChain)-1]
   844  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   845  		currentFastBlock := bc.CurrentFastBlock()
   846  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   847  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   848  			bc.currentFastBlock.Store(head)
   849  		}
   850  	}
   851  	bc.mu.Unlock()
   852  
   853  	log.Info("Imported new block receipts",
   854  		"count", stats.processed,
   855  		"elapsed", common.PrettyDuration(time.Since(start)),
   856  		"number", head.Number(),
   857  		"hash", head.Hash(),
   858  		"size", common.StorageSize(bytes),
   859  		"ignored", stats.ignored)
   860  	return 0, nil
   861  }
   862  
   863  var lastWrite uint64
   864  
   865  // WriteBlockWithoutState writes only the block and its metadata to the database,
   866  // but does not write any state. This is used to construct competing side forks
   867  // up to the point where they exceed the canonical total difficulty.
   868  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   869  	bc.wg.Add(1)
   870  	defer bc.wg.Done()
   871  
   872  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   873  		return err
   874  	}
   875  	rawdb.WriteBlock(bc.db, block)
   876  
   877  	return nil
   878  }
   879  
   880  // WriteBlockWithState writes the block and all associated state to the database.
   881  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   882  	bc.wg.Add(1)
   883  	defer bc.wg.Done()
   884  
   885  	// Calculate the total difficulty of the block
   886  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   887  	if ptd == nil {
   888  		return NonStatTy, consensus.ErrUnknownAncestor
   889  	}
   890  	// Make sure no inconsistent state is leaked during insertion
   891  	bc.mu.Lock()
   892  	defer bc.mu.Unlock()
   893  
   894  	currentBlock := bc.CurrentBlock()
   895  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   896  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   897  
   898  	// Irrelevant of the canonical status, write the block itself to the database
   899  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   900  		return NonStatTy, err
   901  	}
   902  	// Write other block data using a batch.
   903  	batch := bc.db.NewBatch()
   904  	rawdb.WriteBlock(batch, block)
   905  
   906  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   907  	if err != nil {
   908  		return NonStatTy, err
   909  	}
   910  	triedb := bc.stateCache.TrieDB()
   911  
   912  	// If we're running an archive node, always flush
   913  	if bc.cacheConfig.Disabled {
   914  		if err := triedb.Commit(root, false); err != nil {
   915  			return NonStatTy, err
   916  		}
   917  	} else {
   918  		// Full but not archive node, do proper garbage collection
   919  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   920  		bc.triegc.Push(root, -float32(block.NumberU64()))
   921  
   922  		if current := block.NumberU64(); current > triesInMemory {
   923  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
   924  			var (
   925  				nodes, imgs = triedb.Size()
   926  				limit       = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   927  			)
   928  			if nodes > limit || imgs > 4*1024*1024 {
   929  				triedb.Cap(limit - ethdb.IdealBatchSize)
   930  			}
   931  			// Find the next state trie we need to commit
   932  			header := bc.GetHeaderByNumber(current - triesInMemory)
   933  			chosen := header.Number.Uint64()
   934  
   935  			// If we exceeded out time allowance, flush an entire trie to disk
   936  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   937  				// If we're exceeding limits but haven't reached a large enough memory gap,
   938  				// warn the user that the system is becoming unstable.
   939  				if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   940  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   941  				}
   942  				// Flush an entire trie and restart the counters
   943  				triedb.Commit(header.Root, true)
   944  				lastWrite = chosen
   945  				bc.gcproc = 0
   946  			}
   947  			// Garbage collect anything below our required write retention
   948  			for !bc.triegc.Empty() {
   949  				root, number := bc.triegc.Pop()
   950  				if uint64(-number) > chosen {
   951  					bc.triegc.Push(root, number)
   952  					break
   953  				}
   954  				triedb.Dereference(root.(common.Hash))
   955  			}
   956  		}
   957  	}
   958  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   959  
   960  	// If the total difficulty is higher than our known, add it to the canonical chain
   961  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   962  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   963  	reorg := externTd.Cmp(localTd) > 0
   964  	currentBlock = bc.CurrentBlock()
   965  	if !reorg && externTd.Cmp(localTd) == 0 {
   966  		// Split same-difficulty blocks by number, then at random
   967  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   968  	}
   969  	if reorg {
   970  		// Reorganise the chain if the parent is not the head block
   971  		if block.ParentHash() != currentBlock.Hash() {
   972  			if err := bc.reorg(currentBlock, block); err != nil {
   973  				return NonStatTy, err
   974  			}
   975  		}
   976  		// Write the positional metadata for transaction/receipt lookups and preimages
   977  		rawdb.WriteTxLookupEntries(batch, block)
   978  		rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
   979  
   980  		status = CanonStatTy
   981  	} else {
   982  		status = SideStatTy
   983  	}
   984  	if err := batch.Write(); err != nil {
   985  		return NonStatTy, err
   986  	}
   987  
   988  	// Set new head.
   989  	if status == CanonStatTy {
   990  		bc.insert(block)
   991  	}
   992  	bc.futureBlocks.Remove(block.Hash())
   993  	return status, nil
   994  }
   995  
   996  // InsertChain attempts to insert the given batch of blocks in to the canonical
   997  // chain or, otherwise, create a fork. If an error is returned it will return
   998  // the index number of the failing block as well an error describing what went
   999  // wrong.
  1000  //
  1001  // After insertion is done, all accumulated events will be fired.
  1002  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1003  	n, events, logs, err := bc.insertChain(chain)
  1004  	bc.PostChainEvents(events, logs)
  1005  	return n, err
  1006  }
  1007  
  1008  // insertChain will execute the actual chain insertion and event aggregation. The
  1009  // only reason this method exists as a separate one is to make locking cleaner
  1010  // with deferred statements.
  1011  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1012  	// Sanity check that we have something meaningful to import
  1013  	if len(chain) == 0 {
  1014  		return 0, nil, nil, nil
  1015  	}
  1016  	// Do a sanity check that the provided chain is actually ordered and linked
  1017  	for i := 1; i < len(chain); i++ {
  1018  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1019  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1020  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1021  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1022  
  1023  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1024  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1025  		}
  1026  	}
  1027  	// Pre-checks passed, start the full block imports
  1028  	bc.wg.Add(1)
  1029  	defer bc.wg.Done()
  1030  
  1031  	bc.chainmu.Lock()
  1032  	defer bc.chainmu.Unlock()
  1033  
  1034  	// A queued approach to delivering events. This is generally
  1035  	// faster than direct delivery and requires much less mutex
  1036  	// acquiring.
  1037  	var (
  1038  		stats         = insertStats{startTime: mclock.Now()}
  1039  		events        = make([]interface{}, 0, len(chain))
  1040  		lastCanon     *types.Block
  1041  		coalescedLogs []*types.Log
  1042  	)
  1043  	// Start the parallel header verifier
  1044  	headers := make([]*types.Header, len(chain))
  1045  	seals := make([]bool, len(chain))
  1046  
  1047  	for i, block := range chain {
  1048  		headers[i] = block.Header()
  1049  		seals[i] = true
  1050  	}
  1051  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1052  	defer close(abort)
  1053  
  1054  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1055  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1056  
  1057  	// Iterate over the blocks and insert when the verifier permits
  1058  	for i, block := range chain {
  1059  		// If the chain is terminating, stop processing blocks
  1060  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1061  			log.Debug("Premature abort during blocks processing")
  1062  			break
  1063  		}
  1064  		// If the header is a banned one, straight out abort
  1065  		if BadHashes[block.Hash()] {
  1066  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1067  			return i, events, coalescedLogs, ErrBlacklistedHash
  1068  		}
  1069  		// Wait for the block's verification to complete
  1070  		bstart := time.Now()
  1071  
  1072  		err := <-results
  1073  		if err == nil {
  1074  			err = bc.Validator().ValidateBody(block)
  1075  		}
  1076  		switch {
  1077  		case err == ErrKnownBlock:
  1078  			// Block and state both already known. However if the current block is below
  1079  			// this number we did a rollback and we should reimport it nonetheless.
  1080  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1081  				stats.ignored++
  1082  				continue
  1083  			}
  1084  
  1085  		case err == consensus.ErrFutureBlock:
  1086  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1087  			// the chain is discarded and processed at a later time if given.
  1088  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1089  			if block.Time().Cmp(max) > 0 {
  1090  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1091  			}
  1092  			bc.futureBlocks.Add(block.Hash(), block)
  1093  			stats.queued++
  1094  			continue
  1095  
  1096  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1097  			bc.futureBlocks.Add(block.Hash(), block)
  1098  			stats.queued++
  1099  			continue
  1100  
  1101  		case err == consensus.ErrPrunedAncestor:
  1102  			// Block competing with the canonical chain, store in the db, but don't process
  1103  			// until the competitor TD goes above the canonical TD
  1104  			currentBlock := bc.CurrentBlock()
  1105  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1106  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1107  			if localTd.Cmp(externTd) > 0 {
  1108  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1109  					return i, events, coalescedLogs, err
  1110  				}
  1111  				continue
  1112  			}
  1113  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1114  			var winner []*types.Block
  1115  
  1116  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1117  			for !bc.HasState(parent.Root()) {
  1118  				winner = append(winner, parent)
  1119  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1120  			}
  1121  			for j := 0; j < len(winner)/2; j++ {
  1122  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1123  			}
  1124  			// Import all the pruned blocks to make the state available
  1125  			bc.chainmu.Unlock()
  1126  			_, evs, logs, err := bc.insertChain(winner)
  1127  			bc.chainmu.Lock()
  1128  			events, coalescedLogs = evs, logs
  1129  
  1130  			if err != nil {
  1131  				return i, events, coalescedLogs, err
  1132  			}
  1133  
  1134  		case err != nil:
  1135  			bc.reportBlock(block, nil, err)
  1136  			return i, events, coalescedLogs, err
  1137  		}
  1138  		// Create a new statedb using the parent block and report an
  1139  		// error if it fails.
  1140  		var parent *types.Block
  1141  		if i == 0 {
  1142  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1143  		} else {
  1144  			parent = chain[i-1]
  1145  		}
  1146  		state, err := state.New(parent.Root(), bc.stateCache)
  1147  		if err != nil {
  1148  			return i, events, coalescedLogs, err
  1149  		}
  1150  		// Process block using the parent state as reference point.
  1151  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1152  		if err != nil {
  1153  			bc.reportBlock(block, receipts, err)
  1154  			return i, events, coalescedLogs, err
  1155  		}
  1156  		// Validate the state using the default validator
  1157  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1158  		if err != nil {
  1159  			bc.reportBlock(block, receipts, err)
  1160  			return i, events, coalescedLogs, err
  1161  		}
  1162  		proctime := time.Since(bstart)
  1163  
  1164  		// Write the block to the chain and get the status.
  1165  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1166  		if err != nil {
  1167  			return i, events, coalescedLogs, err
  1168  		}
  1169  		switch status {
  1170  		case CanonStatTy:
  1171  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1172  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1173  
  1174  			coalescedLogs = append(coalescedLogs, logs...)
  1175  			blockInsertTimer.UpdateSince(bstart)
  1176  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1177  			lastCanon = block
  1178  
  1179  			// Only count canonical blocks for GC processing time
  1180  			bc.gcproc += proctime
  1181  
  1182  		case SideStatTy:
  1183  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1184  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1185  
  1186  			blockInsertTimer.UpdateSince(bstart)
  1187  			events = append(events, ChainSideEvent{block})
  1188  		}
  1189  		stats.processed++
  1190  		stats.usedGas += usedGas
  1191  
  1192  		cache, _ := bc.stateCache.TrieDB().Size()
  1193  		stats.report(chain, i, cache)
  1194  	}
  1195  	// Append a single chain head event if we've progressed the chain
  1196  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1197  		events = append(events, ChainHeadEvent{lastCanon})
  1198  	}
  1199  	return 0, events, coalescedLogs, nil
  1200  }
  1201  
  1202  // insertStats tracks and reports on block insertion.
  1203  type insertStats struct {
  1204  	queued, processed, ignored int
  1205  	usedGas                    uint64
  1206  	lastIndex                  int
  1207  	startTime                  mclock.AbsTime
  1208  }
  1209  
  1210  // statsReportLimit is the time limit during import and export after which we
  1211  // always print out progress. This avoids the user wondering what's going on.
  1212  const statsReportLimit = 8 * time.Second
  1213  
  1214  // report prints statistics if some number of blocks have been processed
  1215  // or more than a few seconds have passed since the last message.
  1216  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1217  	// Fetch the timings for the batch
  1218  	var (
  1219  		now     = mclock.Now()
  1220  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1221  	)
  1222  	// If we're at the last block of the batch or report period reached, log
  1223  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1224  		var (
  1225  			end = chain[index]
  1226  			txs = countTransactions(chain[st.lastIndex : index+1])
  1227  		)
  1228  		context := []interface{}{
  1229  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1230  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1231  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1232  		}
  1233  		if st.queued > 0 {
  1234  			context = append(context, []interface{}{"queued", st.queued}...)
  1235  		}
  1236  		if st.ignored > 0 {
  1237  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1238  		}
  1239  		log.Info("Imported new chain segment", context...)
  1240  
  1241  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1242  	}
  1243  }
  1244  
  1245  func countTransactions(chain []*types.Block) (c int) {
  1246  	for _, b := range chain {
  1247  		c += len(b.Transactions())
  1248  	}
  1249  	return c
  1250  }
  1251  
  1252  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1253  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1254  // event about them
  1255  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1256  	var (
  1257  		newChain    types.Blocks
  1258  		oldChain    types.Blocks
  1259  		commonBlock *types.Block
  1260  		deletedTxs  types.Transactions
  1261  		deletedLogs []*types.Log
  1262  		// collectLogs collects the logs that were generated during the
  1263  		// processing of the block that corresponds with the given hash.
  1264  		// These logs are later announced as deleted.
  1265  		collectLogs = func(hash common.Hash) {
  1266  			// Coalesce logs and set 'Removed'.
  1267  			number := bc.hc.GetBlockNumber(hash)
  1268  			if number == nil {
  1269  				return
  1270  			}
  1271  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1272  			for _, receipt := range receipts {
  1273  				for _, log := range receipt.Logs {
  1274  					del := *log
  1275  					del.Removed = true
  1276  					deletedLogs = append(deletedLogs, &del)
  1277  				}
  1278  			}
  1279  		}
  1280  	)
  1281  
  1282  	// first reduce whoever is higher bound
  1283  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1284  		// reduce old chain
  1285  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1286  			oldChain = append(oldChain, oldBlock)
  1287  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1288  
  1289  			collectLogs(oldBlock.Hash())
  1290  		}
  1291  	} else {
  1292  		// reduce new chain and append new chain blocks for inserting later on
  1293  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1294  			newChain = append(newChain, newBlock)
  1295  		}
  1296  	}
  1297  	if oldBlock == nil {
  1298  		return fmt.Errorf("Invalid old chain")
  1299  	}
  1300  	if newBlock == nil {
  1301  		return fmt.Errorf("Invalid new chain")
  1302  	}
  1303  
  1304  	for {
  1305  		if oldBlock.Hash() == newBlock.Hash() {
  1306  			commonBlock = oldBlock
  1307  			break
  1308  		}
  1309  
  1310  		oldChain = append(oldChain, oldBlock)
  1311  		newChain = append(newChain, newBlock)
  1312  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1313  		collectLogs(oldBlock.Hash())
  1314  
  1315  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1316  		if oldBlock == nil {
  1317  			return fmt.Errorf("Invalid old chain")
  1318  		}
  1319  		if newBlock == nil {
  1320  			return fmt.Errorf("Invalid new chain")
  1321  		}
  1322  	}
  1323  	// Ensure the user sees large reorgs
  1324  	if len(oldChain) > 0 && len(newChain) > 0 {
  1325  		logFn := log.Debug
  1326  		if len(oldChain) > 63 {
  1327  			logFn = log.Warn
  1328  		}
  1329  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1330  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1331  	} else {
  1332  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1333  	}
  1334  	// Insert the new chain, taking care of the proper incremental order
  1335  	var addedTxs types.Transactions
  1336  	for i := len(newChain) - 1; i >= 0; i-- {
  1337  		// insert the block in the canonical way, re-writing history
  1338  		bc.insert(newChain[i])
  1339  		// write lookup entries for hash based transaction/receipt searches
  1340  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1341  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1342  	}
  1343  	// calculate the difference between deleted and added transactions
  1344  	diff := types.TxDifference(deletedTxs, addedTxs)
  1345  	// When transactions get deleted from the database that means the
  1346  	// receipts that were created in the fork must also be deleted
  1347  	batch := bc.db.NewBatch()
  1348  	for _, tx := range diff {
  1349  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1350  	}
  1351  	batch.Write()
  1352  
  1353  	if len(deletedLogs) > 0 {
  1354  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1355  	}
  1356  	if len(oldChain) > 0 {
  1357  		go func() {
  1358  			for _, block := range oldChain {
  1359  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1360  			}
  1361  		}()
  1362  	}
  1363  
  1364  	return nil
  1365  }
  1366  
  1367  // PostChainEvents iterates over the events generated by a chain insertion and
  1368  // posts them into the event feed.
  1369  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1370  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1371  	// post event logs for further processing
  1372  	if logs != nil {
  1373  		bc.logsFeed.Send(logs)
  1374  	}
  1375  	for _, event := range events {
  1376  		switch ev := event.(type) {
  1377  		case ChainEvent:
  1378  			bc.chainFeed.Send(ev)
  1379  
  1380  		case ChainHeadEvent:
  1381  			bc.chainHeadFeed.Send(ev)
  1382  
  1383  		case ChainSideEvent:
  1384  			bc.chainSideFeed.Send(ev)
  1385  		}
  1386  	}
  1387  }
  1388  
  1389  func (bc *BlockChain) update() {
  1390  	futureTimer := time.NewTicker(5 * time.Second)
  1391  	defer futureTimer.Stop()
  1392  	for {
  1393  		select {
  1394  		case <-futureTimer.C:
  1395  			bc.procFutureBlocks()
  1396  		case <-bc.quit:
  1397  			return
  1398  		}
  1399  	}
  1400  }
  1401  
  1402  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1403  func (bc *BlockChain) BadBlocks() []*types.Block {
  1404  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1405  	for _, hash := range bc.badBlocks.Keys() {
  1406  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1407  			block := blk.(*types.Block)
  1408  			blocks = append(blocks, block)
  1409  		}
  1410  	}
  1411  	return blocks
  1412  }
  1413  
  1414  // addBadBlock adds a bad block to the bad-block LRU cache
  1415  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1416  	bc.badBlocks.Add(block.Hash(), block)
  1417  }
  1418  
  1419  // reportBlock logs a bad block error.
  1420  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1421  	bc.addBadBlock(block)
  1422  
  1423  	var receiptString string
  1424  	for _, receipt := range receipts {
  1425  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1426  	}
  1427  	log.Error(fmt.Sprintf(`
  1428  ########## BAD BLOCK #########
  1429  Chain config: %v
  1430  
  1431  Number: %v
  1432  Hash: 0x%x
  1433  %v
  1434  
  1435  Error: %v
  1436  ##############################
  1437  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1438  }
  1439  
  1440  // InsertHeaderChain attempts to insert the given header chain in to the local
  1441  // chain, possibly creating a reorg. If an error is returned, it will return the
  1442  // index number of the failing header as well an error describing what went wrong.
  1443  //
  1444  // The verify parameter can be used to fine tune whether nonce verification
  1445  // should be done or not. The reason behind the optional check is because some
  1446  // of the header retrieval mechanisms already need to verify nonces, as well as
  1447  // because nonces can be verified sparsely, not needing to check each.
  1448  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1449  	start := time.Now()
  1450  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1451  		return i, err
  1452  	}
  1453  
  1454  	// Make sure only one thread manipulates the chain at once
  1455  	bc.chainmu.Lock()
  1456  	defer bc.chainmu.Unlock()
  1457  
  1458  	bc.wg.Add(1)
  1459  	defer bc.wg.Done()
  1460  
  1461  	whFunc := func(header *types.Header) error {
  1462  		bc.mu.Lock()
  1463  		defer bc.mu.Unlock()
  1464  
  1465  		_, err := bc.hc.WriteHeader(header)
  1466  		return err
  1467  	}
  1468  
  1469  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1470  }
  1471  
  1472  // writeHeader writes a header into the local chain, given that its parent is
  1473  // already known. If the total difficulty of the newly inserted header becomes
  1474  // greater than the current known TD, the canonical chain is re-routed.
  1475  //
  1476  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1477  // into the chain, as side effects caused by reorganisations cannot be emulated
  1478  // without the real blocks. Hence, writing headers directly should only be done
  1479  // in two scenarios: pure-header mode of operation (light clients), or properly
  1480  // separated header/block phases (non-archive clients).
  1481  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1482  	bc.wg.Add(1)
  1483  	defer bc.wg.Done()
  1484  
  1485  	bc.mu.Lock()
  1486  	defer bc.mu.Unlock()
  1487  
  1488  	_, err := bc.hc.WriteHeader(header)
  1489  	return err
  1490  }
  1491  
  1492  // CurrentHeader retrieves the current head header of the canonical chain. The
  1493  // header is retrieved from the HeaderChain's internal cache.
  1494  func (bc *BlockChain) CurrentHeader() *types.Header {
  1495  	return bc.hc.CurrentHeader()
  1496  }
  1497  
  1498  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1499  // database by hash and number, caching it if found.
  1500  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1501  	return bc.hc.GetTd(hash, number)
  1502  }
  1503  
  1504  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1505  // database by hash, caching it if found.
  1506  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1507  	return bc.hc.GetTdByHash(hash)
  1508  }
  1509  
  1510  // GetHeader retrieves a block header from the database by hash and number,
  1511  // caching it if found.
  1512  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1513  	return bc.hc.GetHeader(hash, number)
  1514  }
  1515  
  1516  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1517  // found.
  1518  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1519  	return bc.hc.GetHeaderByHash(hash)
  1520  }
  1521  
  1522  // HasHeader checks if a block header is present in the database or not, caching
  1523  // it if present.
  1524  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1525  	return bc.hc.HasHeader(hash, number)
  1526  }
  1527  
  1528  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1529  // hash, fetching towards the genesis block.
  1530  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1531  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1532  }
  1533  
  1534  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1535  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1536  // number of blocks to be individually checked before we reach the canonical chain.
  1537  //
  1538  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1539  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1540  	bc.chainmu.Lock()
  1541  	defer bc.chainmu.Unlock()
  1542  
  1543  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1544  }
  1545  
  1546  // GetHeaderByNumber retrieves a block header from the database by number,
  1547  // caching it (associated with its hash) if found.
  1548  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1549  	return bc.hc.GetHeaderByNumber(number)
  1550  }
  1551  
  1552  // Config retrieves the blockchain's chain configuration.
  1553  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1554  
  1555  // Engine retrieves the blockchain's consensus engine.
  1556  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1557  
  1558  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1559  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1560  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1561  }
  1562  
  1563  // SubscribeChainEvent registers a subscription of ChainEvent.
  1564  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1565  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1566  }
  1567  
  1568  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1569  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1570  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1571  }
  1572  
  1573  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1574  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1575  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1576  }
  1577  
  1578  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1579  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1580  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1581  }