github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/core/blockchain.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  // Package core implements the Sberex consensus protocol.
    13  package core
    14  
    15  import (
    16  	"errors"
    17  	"fmt"
    18  	"io"
    19  	"math/big"
    20  	mrand "math/rand"
    21  	"sync"
    22  	"sync/atomic"
    23  	"time"
    24  
    25  	"github.com/Sberex/go-sberex/common"
    26  	"github.com/Sberex/go-sberex/common/mclock"
    27  	"github.com/Sberex/go-sberex/consensus"
    28  	"github.com/Sberex/go-sberex/core/state"
    29  	"github.com/Sberex/go-sberex/core/types"
    30  	"github.com/Sberex/go-sberex/core/vm"
    31  	"github.com/Sberex/go-sberex/crypto"
    32  	"github.com/Sberex/go-sberex/ethdb"
    33  	"github.com/Sberex/go-sberex/event"
    34  	"github.com/Sberex/go-sberex/log"
    35  	"github.com/Sberex/go-sberex/metrics"
    36  	"github.com/Sberex/go-sberex/params"
    37  	"github.com/Sberex/go-sberex/rlp"
    38  	"github.com/Sberex/go-sberex/trie"
    39  	"github.com/hashicorp/golang-lru"
    40  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    41  )
    42  
    43  var (
    44  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    45  
    46  	ErrNoGenesis = errors.New("Genesis not found in chain")
    47  )
    48  
    49  const (
    50  	bodyCacheLimit      = 256
    51  	blockCacheLimit     = 256
    52  	maxFutureBlocks     = 256
    53  	maxTimeFutureBlocks = 70
    54  	badBlockLimit       = 10
    55  	triesInMemory       = 128
    56  
    57  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    58  	BlockChainVersion = 3
    59  )
    60  
    61  // CacheConfig contains the configuration values for the trie caching/pruning
    62  // that's resident in a blockchain.
    63  type CacheConfig struct {
    64  	Disabled      bool          // Whether to disable trie write caching (archive node)
    65  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    66  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    67  }
    68  
    69  // BlockChain represents the canonical chain given a database with a genesis
    70  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    71  //
    72  // Importing blocks in to the block chain happens according to the set of rules
    73  // defined by the two stage Validator. Processing of blocks is done using the
    74  // Processor which processes the included transaction. The validation of the state
    75  // is done in the second part of the Validator. Failing results in aborting of
    76  // the import.
    77  //
    78  // The BlockChain also helps in returning blocks from **any** chain included
    79  // in the database as well as blocks that represents the canonical chain. It's
    80  // important to note that GetBlock can return any block and does not need to be
    81  // included in the canonical one where as GetBlockByNumber always represents the
    82  // canonical chain.
    83  type BlockChain struct {
    84  	chainConfig *params.ChainConfig // Chain & network configuration
    85  	cacheConfig *CacheConfig        // Cache configuration for pruning
    86  
    87  	db     ethdb.Database // Low level persistent database to store final content in
    88  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    89  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    90  
    91  	hc            *HeaderChain
    92  	rmLogsFeed    event.Feed
    93  	chainFeed     event.Feed
    94  	chainSideFeed event.Feed
    95  	chainHeadFeed event.Feed
    96  	logsFeed      event.Feed
    97  	scope         event.SubscriptionScope
    98  	genesisBlock  *types.Block
    99  
   100  	mu      sync.RWMutex // global mutex for locking chain operations
   101  	chainmu sync.RWMutex // blockchain insertion lock
   102  	procmu  sync.RWMutex // block processor lock
   103  
   104  	checkpoint       int          // checkpoint counts towards the new checkpoint
   105  	currentBlock     atomic.Value // Current head of the block chain
   106  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   107  
   108  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   109  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   110  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   111  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   112  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   113  
   114  	quit    chan struct{} // blockchain quit channel
   115  	running int32         // running must be called atomically
   116  	// procInterrupt must be atomically called
   117  	procInterrupt int32          // interrupt signaler for block processing
   118  	wg            sync.WaitGroup // chain processing wait group for shutting down
   119  
   120  	engine    consensus.Engine
   121  	processor Processor // block processor interface
   122  	validator Validator // block and state validator interface
   123  	vmConfig  vm.Config
   124  
   125  	badBlocks *lru.Cache // Bad block cache
   126  }
   127  
   128  // NewBlockChain returns a fully initialised block chain using information
   129  // available in the database. It initialises the default Sberex Validator and
   130  // Processor.
   131  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   132  	if cacheConfig == nil {
   133  		cacheConfig = &CacheConfig{
   134  			TrieNodeLimit: 256 * 1024 * 1024,
   135  			TrieTimeLimit: 5 * time.Minute,
   136  		}
   137  	}
   138  	bodyCache, _ := lru.New(bodyCacheLimit)
   139  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   140  	blockCache, _ := lru.New(blockCacheLimit)
   141  	futureBlocks, _ := lru.New(maxFutureBlocks)
   142  	badBlocks, _ := lru.New(badBlockLimit)
   143  
   144  	bc := &BlockChain{
   145  		chainConfig:  chainConfig,
   146  		cacheConfig:  cacheConfig,
   147  		db:           db,
   148  		triegc:       prque.New(),
   149  		stateCache:   state.NewDatabase(db),
   150  		quit:         make(chan struct{}),
   151  		bodyCache:    bodyCache,
   152  		bodyRLPCache: bodyRLPCache,
   153  		blockCache:   blockCache,
   154  		futureBlocks: futureBlocks,
   155  		engine:       engine,
   156  		vmConfig:     vmConfig,
   157  		badBlocks:    badBlocks,
   158  	}
   159  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   160  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   161  
   162  	var err error
   163  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   164  	if err != nil {
   165  		return nil, err
   166  	}
   167  	bc.genesisBlock = bc.GetBlockByNumber(0)
   168  	if bc.genesisBlock == nil {
   169  		return nil, ErrNoGenesis
   170  	}
   171  	if err := bc.loadLastState(); err != nil {
   172  		return nil, err
   173  	}
   174  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   175  	for hash := range BadHashes {
   176  		if header := bc.GetHeaderByHash(hash); header != nil {
   177  			// get the canonical block corresponding to the offending header's number
   178  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   179  			// make sure the headerByNumber (if present) is in our current canonical chain
   180  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   181  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   182  				bc.SetHead(header.Number.Uint64() - 1)
   183  				log.Error("Chain rewind was successful, resuming normal operation")
   184  			}
   185  		}
   186  	}
   187  	// Take ownership of this particular state
   188  	go bc.update()
   189  	return bc, nil
   190  }
   191  
   192  func (bc *BlockChain) getProcInterrupt() bool {
   193  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   194  }
   195  
   196  // loadLastState loads the last known chain state from the database. This method
   197  // assumes that the chain manager mutex is held.
   198  func (bc *BlockChain) loadLastState() error {
   199  	// Restore the last known head block
   200  	head := GetHeadBlockHash(bc.db)
   201  	if head == (common.Hash{}) {
   202  		// Corrupt or empty database, init from scratch
   203  		log.Warn("Empty database, resetting chain")
   204  		return bc.Reset()
   205  	}
   206  	// Make sure the entire head block is available
   207  	currentBlock := bc.GetBlockByHash(head)
   208  	if currentBlock == nil {
   209  		// Corrupt or empty database, init from scratch
   210  		log.Warn("Head block missing, resetting chain", "hash", head)
   211  		return bc.Reset()
   212  	}
   213  	// Make sure the state associated with the block is available
   214  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   215  		// Dangling block without a state associated, init from scratch
   216  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   217  		if err := bc.repair(&currentBlock); err != nil {
   218  			return err
   219  		}
   220  	}
   221  	// Everything seems to be fine, set as the head block
   222  	bc.currentBlock.Store(currentBlock)
   223  
   224  	// Restore the last known head header
   225  	currentHeader := currentBlock.Header()
   226  	if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
   227  		if header := bc.GetHeaderByHash(head); header != nil {
   228  			currentHeader = header
   229  		}
   230  	}
   231  	bc.hc.SetCurrentHeader(currentHeader)
   232  
   233  	// Restore the last known head fast block
   234  	bc.currentFastBlock.Store(currentBlock)
   235  	if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   236  		if block := bc.GetBlockByHash(head); block != nil {
   237  			bc.currentFastBlock.Store(block)
   238  		}
   239  	}
   240  
   241  	// Issue a status log for the user
   242  	currentFastBlock := bc.CurrentFastBlock()
   243  
   244  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   245  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   246  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   247  
   248  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   249  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   250  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   251  
   252  	return nil
   253  }
   254  
   255  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   256  // above the new head will be deleted and the new one set. In the case of blocks
   257  // though, the head may be further rewound if block bodies are missing (non-archive
   258  // nodes after a fast sync).
   259  func (bc *BlockChain) SetHead(head uint64) error {
   260  	log.Warn("Rewinding blockchain", "target", head)
   261  
   262  	bc.mu.Lock()
   263  	defer bc.mu.Unlock()
   264  
   265  	// Rewind the header chain, deleting all block bodies until then
   266  	delFn := func(hash common.Hash, num uint64) {
   267  		DeleteBody(bc.db, hash, num)
   268  	}
   269  	bc.hc.SetHead(head, delFn)
   270  	currentHeader := bc.hc.CurrentHeader()
   271  
   272  	// Clear out any stale content from the caches
   273  	bc.bodyCache.Purge()
   274  	bc.bodyRLPCache.Purge()
   275  	bc.blockCache.Purge()
   276  	bc.futureBlocks.Purge()
   277  
   278  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   279  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   280  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   281  	}
   282  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   283  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   284  			// Rewound state missing, rolled back to before pivot, reset to genesis
   285  			bc.currentBlock.Store(bc.genesisBlock)
   286  		}
   287  	}
   288  	// Rewind the fast block in a simpleton way to the target head
   289  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   290  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   291  	}
   292  	// If either blocks reached nil, reset to the genesis state
   293  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   294  		bc.currentBlock.Store(bc.genesisBlock)
   295  	}
   296  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   297  		bc.currentFastBlock.Store(bc.genesisBlock)
   298  	}
   299  	currentBlock := bc.CurrentBlock()
   300  	currentFastBlock := bc.CurrentFastBlock()
   301  	if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
   302  		log.Crit("Failed to reset head full block", "err", err)
   303  	}
   304  	if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
   305  		log.Crit("Failed to reset head fast block", "err", err)
   306  	}
   307  	return bc.loadLastState()
   308  }
   309  
   310  // FastSyncCommitHead sets the current head block to the one defined by the hash
   311  // irrelevant what the chain contents were prior.
   312  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   313  	// Make sure that both the block as well at its state trie exists
   314  	block := bc.GetBlockByHash(hash)
   315  	if block == nil {
   316  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   317  	}
   318  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   319  		return err
   320  	}
   321  	// If all checks out, manually set the head block
   322  	bc.mu.Lock()
   323  	bc.currentBlock.Store(block)
   324  	bc.mu.Unlock()
   325  
   326  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   327  	return nil
   328  }
   329  
   330  // GasLimit returns the gas limit of the current HEAD block.
   331  func (bc *BlockChain) GasLimit() uint64 {
   332  	return bc.CurrentBlock().GasLimit()
   333  }
   334  
   335  // CurrentBlock retrieves the current head block of the canonical chain. The
   336  // block is retrieved from the blockchain's internal cache.
   337  func (bc *BlockChain) CurrentBlock() *types.Block {
   338  	return bc.currentBlock.Load().(*types.Block)
   339  }
   340  
   341  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   342  // chain. The block is retrieved from the blockchain's internal cache.
   343  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   344  	return bc.currentFastBlock.Load().(*types.Block)
   345  }
   346  
   347  // SetProcessor sets the processor required for making state modifications.
   348  func (bc *BlockChain) SetProcessor(processor Processor) {
   349  	bc.procmu.Lock()
   350  	defer bc.procmu.Unlock()
   351  	bc.processor = processor
   352  }
   353  
   354  // SetValidator sets the validator which is used to validate incoming blocks.
   355  func (bc *BlockChain) SetValidator(validator Validator) {
   356  	bc.procmu.Lock()
   357  	defer bc.procmu.Unlock()
   358  	bc.validator = validator
   359  }
   360  
   361  // Validator returns the current validator.
   362  func (bc *BlockChain) Validator() Validator {
   363  	bc.procmu.RLock()
   364  	defer bc.procmu.RUnlock()
   365  	return bc.validator
   366  }
   367  
   368  // Processor returns the current processor.
   369  func (bc *BlockChain) Processor() Processor {
   370  	bc.procmu.RLock()
   371  	defer bc.procmu.RUnlock()
   372  	return bc.processor
   373  }
   374  
   375  // State returns a new mutable state based on the current HEAD block.
   376  func (bc *BlockChain) State() (*state.StateDB, error) {
   377  	return bc.StateAt(bc.CurrentBlock().Root())
   378  }
   379  
   380  // StateAt returns a new mutable state based on a particular point in time.
   381  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   382  	return state.New(root, bc.stateCache)
   383  }
   384  
   385  // Reset purges the entire blockchain, restoring it to its genesis state.
   386  func (bc *BlockChain) Reset() error {
   387  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   388  }
   389  
   390  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   391  // specified genesis state.
   392  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   393  	// Dump the entire block chain and purge the caches
   394  	if err := bc.SetHead(0); err != nil {
   395  		return err
   396  	}
   397  	bc.mu.Lock()
   398  	defer bc.mu.Unlock()
   399  
   400  	// Prepare the genesis block and reinitialise the chain
   401  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   402  		log.Crit("Failed to write genesis block TD", "err", err)
   403  	}
   404  	if err := WriteBlock(bc.db, genesis); err != nil {
   405  		log.Crit("Failed to write genesis block", "err", err)
   406  	}
   407  	bc.genesisBlock = genesis
   408  	bc.insert(bc.genesisBlock)
   409  	bc.currentBlock.Store(bc.genesisBlock)
   410  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   411  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   412  	bc.currentFastBlock.Store(bc.genesisBlock)
   413  
   414  	return nil
   415  }
   416  
   417  // repair tries to repair the current blockchain by rolling back the current block
   418  // until one with associated state is found. This is needed to fix incomplete db
   419  // writes caused either by crashes/power outages, or simply non-committed tries.
   420  //
   421  // This method only rolls back the current block. The current header and current
   422  // fast block are left intact.
   423  func (bc *BlockChain) repair(head **types.Block) error {
   424  	for {
   425  		// Abort if we've rewound to a head block that does have associated state
   426  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   427  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   428  			return nil
   429  		}
   430  		// Otherwise rewind one block and recheck state availability there
   431  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   432  	}
   433  }
   434  
   435  // Export writes the active chain to the given writer.
   436  func (bc *BlockChain) Export(w io.Writer) error {
   437  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   438  }
   439  
   440  // ExportN writes a subset of the active chain to the given writer.
   441  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   442  	bc.mu.RLock()
   443  	defer bc.mu.RUnlock()
   444  
   445  	if first > last {
   446  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   447  	}
   448  	log.Info("Exporting batch of blocks", "count", last-first+1)
   449  
   450  	for nr := first; nr <= last; nr++ {
   451  		block := bc.GetBlockByNumber(nr)
   452  		if block == nil {
   453  			return fmt.Errorf("export failed on #%d: not found", nr)
   454  		}
   455  
   456  		if err := block.EncodeRLP(w); err != nil {
   457  			return err
   458  		}
   459  	}
   460  
   461  	return nil
   462  }
   463  
   464  // insert injects a new head block into the current block chain. This method
   465  // assumes that the block is indeed a true head. It will also reset the head
   466  // header and the head fast sync block to this very same block if they are older
   467  // or if they are on a different side chain.
   468  //
   469  // Note, this function assumes that the `mu` mutex is held!
   470  func (bc *BlockChain) insert(block *types.Block) {
   471  	// If the block is on a side chain or an unknown one, force other heads onto it too
   472  	updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   473  
   474  	// Add the block to the canonical chain number scheme and mark as the head
   475  	if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil {
   476  		log.Crit("Failed to insert block number", "err", err)
   477  	}
   478  	if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
   479  		log.Crit("Failed to insert head block hash", "err", err)
   480  	}
   481  	bc.currentBlock.Store(block)
   482  
   483  	// If the block is better than our head or is on a different chain, force update heads
   484  	if updateHeads {
   485  		bc.hc.SetCurrentHeader(block.Header())
   486  
   487  		if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
   488  			log.Crit("Failed to insert head fast block hash", "err", err)
   489  		}
   490  		bc.currentFastBlock.Store(block)
   491  	}
   492  }
   493  
   494  // Genesis retrieves the chain's genesis block.
   495  func (bc *BlockChain) Genesis() *types.Block {
   496  	return bc.genesisBlock
   497  }
   498  
   499  // GetBody retrieves a block body (transactions and uncles) from the database by
   500  // hash, caching it if found.
   501  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   502  	// Short circuit if the body's already in the cache, retrieve otherwise
   503  	if cached, ok := bc.bodyCache.Get(hash); ok {
   504  		body := cached.(*types.Body)
   505  		return body
   506  	}
   507  	body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash))
   508  	if body == nil {
   509  		return nil
   510  	}
   511  	// Cache the found body for next time and return
   512  	bc.bodyCache.Add(hash, body)
   513  	return body
   514  }
   515  
   516  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   517  // caching it if found.
   518  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   519  	// Short circuit if the body's already in the cache, retrieve otherwise
   520  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   521  		return cached.(rlp.RawValue)
   522  	}
   523  	body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash))
   524  	if len(body) == 0 {
   525  		return nil
   526  	}
   527  	// Cache the found body for next time and return
   528  	bc.bodyRLPCache.Add(hash, body)
   529  	return body
   530  }
   531  
   532  // HasBlock checks if a block is fully present in the database or not.
   533  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   534  	if bc.blockCache.Contains(hash) {
   535  		return true
   536  	}
   537  	ok, _ := bc.db.Has(blockBodyKey(hash, number))
   538  	return ok
   539  }
   540  
   541  // HasState checks if state trie is fully present in the database or not.
   542  func (bc *BlockChain) HasState(hash common.Hash) bool {
   543  	_, err := bc.stateCache.OpenTrie(hash)
   544  	return err == nil
   545  }
   546  
   547  // HasBlockAndState checks if a block and associated state trie is fully present
   548  // in the database or not, caching it if present.
   549  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   550  	// Check first that the block itself is known
   551  	block := bc.GetBlock(hash, number)
   552  	if block == nil {
   553  		return false
   554  	}
   555  	return bc.HasState(block.Root())
   556  }
   557  
   558  // GetBlock retrieves a block from the database by hash and number,
   559  // caching it if found.
   560  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   561  	// Short circuit if the block's already in the cache, retrieve otherwise
   562  	if block, ok := bc.blockCache.Get(hash); ok {
   563  		return block.(*types.Block)
   564  	}
   565  	block := GetBlock(bc.db, hash, number)
   566  	if block == nil {
   567  		return nil
   568  	}
   569  	// Cache the found block for next time and return
   570  	bc.blockCache.Add(block.Hash(), block)
   571  	return block
   572  }
   573  
   574  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   575  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   576  	return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
   577  }
   578  
   579  // GetBlockByNumber retrieves a block from the database by number, caching it
   580  // (associated with its hash) if found.
   581  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   582  	hash := GetCanonicalHash(bc.db, number)
   583  	if hash == (common.Hash{}) {
   584  		return nil
   585  	}
   586  	return bc.GetBlock(hash, number)
   587  }
   588  
   589  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   590  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   591  	return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash))
   592  }
   593  
   594  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   595  // [deprecated by eth/62]
   596  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   597  	number := bc.hc.GetBlockNumber(hash)
   598  	for i := 0; i < n; i++ {
   599  		block := bc.GetBlock(hash, number)
   600  		if block == nil {
   601  			break
   602  		}
   603  		blocks = append(blocks, block)
   604  		hash = block.ParentHash()
   605  		number--
   606  	}
   607  	return
   608  }
   609  
   610  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   611  // a specific distance is reached.
   612  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   613  	uncles := []*types.Header{}
   614  	for i := 0; block != nil && i < length; i++ {
   615  		uncles = append(uncles, block.Uncles()...)
   616  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   617  	}
   618  	return uncles
   619  }
   620  
   621  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   622  // either from ephemeral in-memory cache, or from persistent storage.
   623  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   624  	return bc.stateCache.TrieDB().Node(hash)
   625  }
   626  
   627  // Stop stops the blockchain service. If any imports are currently in progress
   628  // it will abort them using the procInterrupt.
   629  func (bc *BlockChain) Stop() {
   630  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   631  		return
   632  	}
   633  	// Unsubscribe all subscriptions registered from blockchain
   634  	bc.scope.Close()
   635  	close(bc.quit)
   636  	atomic.StoreInt32(&bc.procInterrupt, 1)
   637  
   638  	bc.wg.Wait()
   639  
   640  	// Ensure the state of a recent block is also stored to disk before exiting.
   641  	// We're writing three different states to catch different restart scenarios:
   642  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   643  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   644  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   645  	if !bc.cacheConfig.Disabled {
   646  		triedb := bc.stateCache.TrieDB()
   647  
   648  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   649  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   650  				recent := bc.GetBlockByNumber(number - offset)
   651  
   652  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   653  				if err := triedb.Commit(recent.Root(), true); err != nil {
   654  					log.Error("Failed to commit recent state trie", "err", err)
   655  				}
   656  			}
   657  		}
   658  		for !bc.triegc.Empty() {
   659  			triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
   660  		}
   661  		if size := triedb.Size(); size != 0 {
   662  			log.Error("Dangling trie nodes after full cleanup")
   663  		}
   664  	}
   665  	log.Info("Blockchain manager stopped")
   666  }
   667  
   668  func (bc *BlockChain) procFutureBlocks() {
   669  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   670  	for _, hash := range bc.futureBlocks.Keys() {
   671  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   672  			blocks = append(blocks, block.(*types.Block))
   673  		}
   674  	}
   675  	if len(blocks) > 0 {
   676  		types.BlockBy(types.Number).Sort(blocks)
   677  
   678  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   679  		for i := range blocks {
   680  			bc.InsertChain(blocks[i : i+1])
   681  		}
   682  	}
   683  }
   684  
   685  // WriteStatus status of write
   686  type WriteStatus byte
   687  
   688  const (
   689  	NonStatTy WriteStatus = iota
   690  	CanonStatTy
   691  	SideStatTy
   692  )
   693  
   694  // Rollback is designed to remove a chain of links from the database that aren't
   695  // certain enough to be valid.
   696  func (bc *BlockChain) Rollback(chain []common.Hash) {
   697  	bc.mu.Lock()
   698  	defer bc.mu.Unlock()
   699  
   700  	for i := len(chain) - 1; i >= 0; i-- {
   701  		hash := chain[i]
   702  
   703  		currentHeader := bc.hc.CurrentHeader()
   704  		if currentHeader.Hash() == hash {
   705  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   706  		}
   707  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   708  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   709  			bc.currentFastBlock.Store(newFastBlock)
   710  			WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   711  		}
   712  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   713  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   714  			bc.currentBlock.Store(newBlock)
   715  			WriteHeadBlockHash(bc.db, newBlock.Hash())
   716  		}
   717  	}
   718  }
   719  
   720  // SetReceiptsData computes all the non-consensus fields of the receipts
   721  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) {
   722  	signer := types.MakeSigner(config, block.Number())
   723  
   724  	transactions, logIndex := block.Transactions(), uint(0)
   725  
   726  	for j := 0; j < len(receipts); j++ {
   727  		// The transaction hash can be retrieved from the transaction itself
   728  		receipts[j].TxHash = transactions[j].Hash()
   729  
   730  		// The contract address can be derived from the transaction itself
   731  		if transactions[j].To() == nil {
   732  			// Deriving the signer is expensive, only do if it's actually needed
   733  			from, _ := types.Sender(signer, transactions[j])
   734  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   735  		}
   736  		// The used gas can be calculated based on previous receipts
   737  		if j == 0 {
   738  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   739  		} else {
   740  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   741  		}
   742  		// The derived log fields can simply be set from the block and transaction
   743  		for k := 0; k < len(receipts[j].Logs); k++ {
   744  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   745  			receipts[j].Logs[k].BlockHash = block.Hash()
   746  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   747  			receipts[j].Logs[k].TxIndex = uint(j)
   748  			receipts[j].Logs[k].Index = logIndex
   749  			logIndex++
   750  		}
   751  	}
   752  }
   753  
   754  // InsertReceiptChain attempts to complete an already existing header chain with
   755  // transaction and receipt data.
   756  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   757  	bc.wg.Add(1)
   758  	defer bc.wg.Done()
   759  
   760  	// Do a sanity check that the provided chain is actually ordered and linked
   761  	for i := 1; i < len(blockChain); i++ {
   762  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   763  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   764  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   765  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   766  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   767  		}
   768  	}
   769  
   770  	var (
   771  		stats = struct{ processed, ignored int32 }{}
   772  		start = time.Now()
   773  		bytes = 0
   774  		batch = bc.db.NewBatch()
   775  	)
   776  	for i, block := range blockChain {
   777  		receipts := receiptChain[i]
   778  		// Short circuit insertion if shutting down or processing failed
   779  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   780  			return 0, nil
   781  		}
   782  		// Short circuit if the owner header is unknown
   783  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   784  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   785  		}
   786  		// Skip if the entire data is already known
   787  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   788  			stats.ignored++
   789  			continue
   790  		}
   791  		// Compute all the non-consensus fields of the receipts
   792  		SetReceiptsData(bc.chainConfig, block, receipts)
   793  		// Write all the data out into the database
   794  		if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
   795  			return i, fmt.Errorf("failed to write block body: %v", err)
   796  		}
   797  		if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   798  			return i, fmt.Errorf("failed to write block receipts: %v", err)
   799  		}
   800  		if err := WriteTxLookupEntries(batch, block); err != nil {
   801  			return i, fmt.Errorf("failed to write lookup metadata: %v", err)
   802  		}
   803  		stats.processed++
   804  
   805  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   806  			if err := batch.Write(); err != nil {
   807  				return 0, err
   808  			}
   809  			bytes += batch.ValueSize()
   810  			batch.Reset()
   811  		}
   812  	}
   813  	if batch.ValueSize() > 0 {
   814  		bytes += batch.ValueSize()
   815  		if err := batch.Write(); err != nil {
   816  			return 0, err
   817  		}
   818  	}
   819  
   820  	// Update the head fast sync block if better
   821  	bc.mu.Lock()
   822  	head := blockChain[len(blockChain)-1]
   823  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   824  		currentFastBlock := bc.CurrentFastBlock()
   825  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   826  			if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
   827  				log.Crit("Failed to update head fast block hash", "err", err)
   828  			}
   829  			bc.currentFastBlock.Store(head)
   830  		}
   831  	}
   832  	bc.mu.Unlock()
   833  
   834  	log.Info("Imported new block receipts",
   835  		"count", stats.processed,
   836  		"elapsed", common.PrettyDuration(time.Since(start)),
   837  		"number", head.Number(),
   838  		"hash", head.Hash(),
   839  		"size", common.StorageSize(bytes),
   840  		"ignored", stats.ignored)
   841  	return 0, nil
   842  }
   843  
   844  var lastWrite uint64
   845  
   846  // WriteBlockWithoutState writes only the block and its metadata to the database,
   847  // but does not write any state. This is used to construct competing side forks
   848  // up to the point where they exceed the canonical total difficulty.
   849  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   850  	bc.wg.Add(1)
   851  	defer bc.wg.Done()
   852  
   853  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   854  		return err
   855  	}
   856  	if err := WriteBlock(bc.db, block); err != nil {
   857  		return err
   858  	}
   859  	return nil
   860  }
   861  
   862  // WriteBlockWithState writes the block and all associated state to the database.
   863  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   864  	bc.wg.Add(1)
   865  	defer bc.wg.Done()
   866  
   867  	// Calculate the total difficulty of the block
   868  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   869  	if ptd == nil {
   870  		return NonStatTy, consensus.ErrUnknownAncestor
   871  	}
   872  	// Make sure no inconsistent state is leaked during insertion
   873  	bc.mu.Lock()
   874  	defer bc.mu.Unlock()
   875  
   876  	currentBlock := bc.CurrentBlock()
   877  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   878  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   879  
   880  	// Irrelevant of the canonical status, write the block itself to the database
   881  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   882  		return NonStatTy, err
   883  	}
   884  	// Write other block data using a batch.
   885  	batch := bc.db.NewBatch()
   886  	if err := WriteBlock(batch, block); err != nil {
   887  		return NonStatTy, err
   888  	}
   889  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   890  	if err != nil {
   891  		return NonStatTy, err
   892  	}
   893  	triedb := bc.stateCache.TrieDB()
   894  
   895  	// If we're running an archive node, always flush
   896  	if bc.cacheConfig.Disabled {
   897  		if err := triedb.Commit(root, false); err != nil {
   898  			return NonStatTy, err
   899  		}
   900  	} else {
   901  		// Full but not archive node, do proper garbage collection
   902  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   903  		bc.triegc.Push(root, -float32(block.NumberU64()))
   904  
   905  		if current := block.NumberU64(); current > triesInMemory {
   906  			// Find the next state trie we need to commit
   907  			header := bc.GetHeaderByNumber(current - triesInMemory)
   908  			chosen := header.Number.Uint64()
   909  
   910  			// Only write to disk if we exceeded our memory allowance *and* also have at
   911  			// least a given number of tries gapped.
   912  			var (
   913  				size  = triedb.Size()
   914  				limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   915  			)
   916  			if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   917  				// If we're exceeding limits but haven't reached a large enough memory gap,
   918  				// warn the user that the system is becoming unstable.
   919  				if chosen < lastWrite+triesInMemory {
   920  					switch {
   921  					case size >= 2*limit:
   922  						log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   923  					case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
   924  						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   925  					}
   926  				}
   927  				// If optimum or critical limits reached, write to disk
   928  				if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   929  					triedb.Commit(header.Root, true)
   930  					lastWrite = chosen
   931  					bc.gcproc = 0
   932  				}
   933  			}
   934  			// Garbage collect anything below our required write retention
   935  			for !bc.triegc.Empty() {
   936  				root, number := bc.triegc.Pop()
   937  				if uint64(-number) > chosen {
   938  					bc.triegc.Push(root, number)
   939  					break
   940  				}
   941  				triedb.Dereference(root.(common.Hash), common.Hash{})
   942  			}
   943  		}
   944  	}
   945  	if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
   946  		return NonStatTy, err
   947  	}
   948  	// If the total difficulty is higher than our known, add it to the canonical chain
   949  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   950  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   951  	reorg := externTd.Cmp(localTd) > 0
   952  	currentBlock = bc.CurrentBlock()
   953  	if !reorg && externTd.Cmp(localTd) == 0 {
   954  		// Split same-difficulty blocks by number, then at random
   955  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   956  	}
   957  	if reorg {
   958  		// Reorganise the chain if the parent is not the head block
   959  		if block.ParentHash() != currentBlock.Hash() {
   960  			if err := bc.reorg(currentBlock, block); err != nil {
   961  				return NonStatTy, err
   962  			}
   963  		}
   964  		// Write the positional metadata for transaction and receipt lookups
   965  		if err := WriteTxLookupEntries(batch, block); err != nil {
   966  			return NonStatTy, err
   967  		}
   968  		// Write hash preimages
   969  		if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil {
   970  			return NonStatTy, err
   971  		}
   972  		status = CanonStatTy
   973  	} else {
   974  		status = SideStatTy
   975  	}
   976  	if err := batch.Write(); err != nil {
   977  		return NonStatTy, err
   978  	}
   979  
   980  	// Set new head.
   981  	if status == CanonStatTy {
   982  		bc.insert(block)
   983  	}
   984  	bc.futureBlocks.Remove(block.Hash())
   985  	return status, nil
   986  }
   987  
   988  // InsertChain attempts to insert the given batch of blocks in to the canonical
   989  // chain or, otherwise, create a fork. If an error is returned it will return
   990  // the index number of the failing block as well an error describing what went
   991  // wrong.
   992  //
   993  // After insertion is done, all accumulated events will be fired.
   994  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
   995  	n, events, logs, err := bc.insertChain(chain)
   996  	bc.PostChainEvents(events, logs)
   997  	return n, err
   998  }
   999  
  1000  // insertChain will execute the actual chain insertion and event aggregation. The
  1001  // only reason this method exists as a separate one is to make locking cleaner
  1002  // with deferred statements.
  1003  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1004  	// Do a sanity check that the provided chain is actually ordered and linked
  1005  	for i := 1; i < len(chain); i++ {
  1006  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1007  			// Chain broke ancestry, log a messge (programming error) and skip insertion
  1008  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1009  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1010  
  1011  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1012  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1013  		}
  1014  	}
  1015  	// Pre-checks passed, start the full block imports
  1016  	bc.wg.Add(1)
  1017  	defer bc.wg.Done()
  1018  
  1019  	bc.chainmu.Lock()
  1020  	defer bc.chainmu.Unlock()
  1021  
  1022  	// A queued approach to delivering events. This is generally
  1023  	// faster than direct delivery and requires much less mutex
  1024  	// acquiring.
  1025  	var (
  1026  		stats         = insertStats{startTime: mclock.Now()}
  1027  		events        = make([]interface{}, 0, len(chain))
  1028  		lastCanon     *types.Block
  1029  		coalescedLogs []*types.Log
  1030  	)
  1031  	// Start the parallel header verifier
  1032  	headers := make([]*types.Header, len(chain))
  1033  	seals := make([]bool, len(chain))
  1034  
  1035  	for i, block := range chain {
  1036  		headers[i] = block.Header()
  1037  		seals[i] = true
  1038  	}
  1039  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1040  	defer close(abort)
  1041  
  1042  	// Iterate over the blocks and insert when the verifier permits
  1043  	for i, block := range chain {
  1044  		// If the chain is terminating, stop processing blocks
  1045  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1046  			log.Debug("Premature abort during blocks processing")
  1047  			break
  1048  		}
  1049  		// If the header is a banned one, straight out abort
  1050  		if BadHashes[block.Hash()] {
  1051  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1052  			return i, events, coalescedLogs, ErrBlacklistedHash
  1053  		}
  1054  		// Wait for the block's verification to complete
  1055  		bstart := time.Now()
  1056  
  1057  		err := <-results
  1058  		if err == nil {
  1059  			err = bc.Validator().ValidateBody(block)
  1060  		}
  1061  		switch {
  1062  		case err == ErrKnownBlock:
  1063  			// Block and state both already known. However if the current block is below
  1064  			// this number we did a rollback and we should reimport it nonetheless.
  1065  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1066  				stats.ignored++
  1067  				continue
  1068  			}
  1069  
  1070  		case err == consensus.ErrFutureBlock:
  1071  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1072  			// the chain is discarded and processed at a later time if given.
  1073  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1074  			if block.Time().Cmp(max) > 0 {
  1075  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1076  			}
  1077  			bc.futureBlocks.Add(block.Hash(), block)
  1078  			stats.queued++
  1079  			continue
  1080  
  1081  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1082  			bc.futureBlocks.Add(block.Hash(), block)
  1083  			stats.queued++
  1084  			continue
  1085  
  1086  		case err == consensus.ErrPrunedAncestor:
  1087  			// Block competing with the canonical chain, store in the db, but don't process
  1088  			// until the competitor TD goes above the canonical TD
  1089  			currentBlock := bc.CurrentBlock()
  1090  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1091  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1092  			if localTd.Cmp(externTd) > 0 {
  1093  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1094  					return i, events, coalescedLogs, err
  1095  				}
  1096  				continue
  1097  			}
  1098  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1099  			var winner []*types.Block
  1100  
  1101  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1102  			for !bc.HasState(parent.Root()) {
  1103  				winner = append(winner, parent)
  1104  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1105  			}
  1106  			for j := 0; j < len(winner)/2; j++ {
  1107  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1108  			}
  1109  			// Import all the pruned blocks to make the state available
  1110  			bc.chainmu.Unlock()
  1111  			_, evs, logs, err := bc.insertChain(winner)
  1112  			bc.chainmu.Lock()
  1113  			events, coalescedLogs = evs, logs
  1114  
  1115  			if err != nil {
  1116  				return i, events, coalescedLogs, err
  1117  			}
  1118  
  1119  		case err != nil:
  1120  			bc.reportBlock(block, nil, err)
  1121  			return i, events, coalescedLogs, err
  1122  		}
  1123  		// Create a new statedb using the parent block and report an
  1124  		// error if it fails.
  1125  		var parent *types.Block
  1126  		if i == 0 {
  1127  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1128  		} else {
  1129  			parent = chain[i-1]
  1130  		}
  1131  		state, err := state.New(parent.Root(), bc.stateCache)
  1132  		if err != nil {
  1133  			return i, events, coalescedLogs, err
  1134  		}
  1135  		// Process block using the parent state as reference point.
  1136  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1137  		if err != nil {
  1138  			bc.reportBlock(block, receipts, err)
  1139  			return i, events, coalescedLogs, err
  1140  		}
  1141  		// Validate the state using the default validator
  1142  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1143  		if err != nil {
  1144  			bc.reportBlock(block, receipts, err)
  1145  			return i, events, coalescedLogs, err
  1146  		}
  1147  		proctime := time.Since(bstart)
  1148  
  1149  		// Write the block to the chain and get the status.
  1150  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1151  		if err != nil {
  1152  			return i, events, coalescedLogs, err
  1153  		}
  1154  		switch status {
  1155  		case CanonStatTy:
  1156  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1157  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1158  
  1159  			coalescedLogs = append(coalescedLogs, logs...)
  1160  			blockInsertTimer.UpdateSince(bstart)
  1161  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1162  			lastCanon = block
  1163  
  1164  			// Only count canonical blocks for GC processing time
  1165  			bc.gcproc += proctime
  1166  
  1167  		case SideStatTy:
  1168  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1169  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1170  
  1171  			blockInsertTimer.UpdateSince(bstart)
  1172  			events = append(events, ChainSideEvent{block})
  1173  		}
  1174  		stats.processed++
  1175  		stats.usedGas += usedGas
  1176  		stats.report(chain, i, bc.stateCache.TrieDB().Size())
  1177  	}
  1178  	// Append a single chain head event if we've progressed the chain
  1179  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1180  		events = append(events, ChainHeadEvent{lastCanon})
  1181  	}
  1182  	return 0, events, coalescedLogs, nil
  1183  }
  1184  
  1185  // insertStats tracks and reports on block insertion.
  1186  type insertStats struct {
  1187  	queued, processed, ignored int
  1188  	usedGas                    uint64
  1189  	lastIndex                  int
  1190  	startTime                  mclock.AbsTime
  1191  }
  1192  
  1193  // statsReportLimit is the time limit during import after which we always print
  1194  // out progress. This avoids the user wondering what's going on.
  1195  const statsReportLimit = 8 * time.Second
  1196  
  1197  // report prints statistics if some number of blocks have been processed
  1198  // or more than a few seconds have passed since the last message.
  1199  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1200  	// Fetch the timings for the batch
  1201  	var (
  1202  		now     = mclock.Now()
  1203  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1204  	)
  1205  	// If we're at the last block of the batch or report period reached, log
  1206  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1207  		var (
  1208  			end = chain[index]
  1209  			txs = countTransactions(chain[st.lastIndex : index+1])
  1210  		)
  1211  		context := []interface{}{
  1212  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1213  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1214  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1215  		}
  1216  		if st.queued > 0 {
  1217  			context = append(context, []interface{}{"queued", st.queued}...)
  1218  		}
  1219  		if st.ignored > 0 {
  1220  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1221  		}
  1222  		log.Info("Imported new chain segment", context...)
  1223  
  1224  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1225  	}
  1226  }
  1227  
  1228  func countTransactions(chain []*types.Block) (c int) {
  1229  	for _, b := range chain {
  1230  		c += len(b.Transactions())
  1231  	}
  1232  	return c
  1233  }
  1234  
  1235  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1236  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1237  // event about them
  1238  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1239  	var (
  1240  		newChain    types.Blocks
  1241  		oldChain    types.Blocks
  1242  		commonBlock *types.Block
  1243  		deletedTxs  types.Transactions
  1244  		deletedLogs []*types.Log
  1245  		// collectLogs collects the logs that were generated during the
  1246  		// processing of the block that corresponds with the given hash.
  1247  		// These logs are later announced as deleted.
  1248  		collectLogs = func(h common.Hash) {
  1249  			// Coalesce logs and set 'Removed'.
  1250  			receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h))
  1251  			for _, receipt := range receipts {
  1252  				for _, log := range receipt.Logs {
  1253  					del := *log
  1254  					del.Removed = true
  1255  					deletedLogs = append(deletedLogs, &del)
  1256  				}
  1257  			}
  1258  		}
  1259  	)
  1260  
  1261  	// first reduce whoever is higher bound
  1262  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1263  		// reduce old chain
  1264  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1265  			oldChain = append(oldChain, oldBlock)
  1266  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1267  
  1268  			collectLogs(oldBlock.Hash())
  1269  		}
  1270  	} else {
  1271  		// reduce new chain and append new chain blocks for inserting later on
  1272  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1273  			newChain = append(newChain, newBlock)
  1274  		}
  1275  	}
  1276  	if oldBlock == nil {
  1277  		return fmt.Errorf("Invalid old chain")
  1278  	}
  1279  	if newBlock == nil {
  1280  		return fmt.Errorf("Invalid new chain")
  1281  	}
  1282  
  1283  	for {
  1284  		if oldBlock.Hash() == newBlock.Hash() {
  1285  			commonBlock = oldBlock
  1286  			break
  1287  		}
  1288  
  1289  		oldChain = append(oldChain, oldBlock)
  1290  		newChain = append(newChain, newBlock)
  1291  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1292  		collectLogs(oldBlock.Hash())
  1293  
  1294  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1295  		if oldBlock == nil {
  1296  			return fmt.Errorf("Invalid old chain")
  1297  		}
  1298  		if newBlock == nil {
  1299  			return fmt.Errorf("Invalid new chain")
  1300  		}
  1301  	}
  1302  	// Ensure the user sees large reorgs
  1303  	if len(oldChain) > 0 && len(newChain) > 0 {
  1304  		logFn := log.Debug
  1305  		if len(oldChain) > 63 {
  1306  			logFn = log.Warn
  1307  		}
  1308  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1309  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1310  	} else {
  1311  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1312  	}
  1313  	// Insert the new chain, taking care of the proper incremental order
  1314  	var addedTxs types.Transactions
  1315  	for i := len(newChain) - 1; i >= 0; i-- {
  1316  		// insert the block in the canonical way, re-writing history
  1317  		bc.insert(newChain[i])
  1318  		// write lookup entries for hash based transaction/receipt searches
  1319  		if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil {
  1320  			return err
  1321  		}
  1322  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1323  	}
  1324  	// calculate the difference between deleted and added transactions
  1325  	diff := types.TxDifference(deletedTxs, addedTxs)
  1326  	// When transactions get deleted from the database that means the
  1327  	// receipts that were created in the fork must also be deleted
  1328  	for _, tx := range diff {
  1329  		DeleteTxLookupEntry(bc.db, tx.Hash())
  1330  	}
  1331  	if len(deletedLogs) > 0 {
  1332  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1333  	}
  1334  	if len(oldChain) > 0 {
  1335  		go func() {
  1336  			for _, block := range oldChain {
  1337  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1338  			}
  1339  		}()
  1340  	}
  1341  
  1342  	return nil
  1343  }
  1344  
  1345  // PostChainEvents iterates over the events generated by a chain insertion and
  1346  // posts them into the event feed.
  1347  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1348  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1349  	// post event logs for further processing
  1350  	if logs != nil {
  1351  		bc.logsFeed.Send(logs)
  1352  	}
  1353  	for _, event := range events {
  1354  		switch ev := event.(type) {
  1355  		case ChainEvent:
  1356  			bc.chainFeed.Send(ev)
  1357  
  1358  		case ChainHeadEvent:
  1359  			bc.chainHeadFeed.Send(ev)
  1360  
  1361  		case ChainSideEvent:
  1362  			bc.chainSideFeed.Send(ev)
  1363  		}
  1364  	}
  1365  }
  1366  
  1367  func (bc *BlockChain) update() {
  1368  	futureTimer := time.NewTicker(5 * time.Second)
  1369  	defer futureTimer.Stop()
  1370  	for {
  1371  		select {
  1372  		case <-futureTimer.C:
  1373  			bc.procFutureBlocks()
  1374  		case <-bc.quit:
  1375  			return
  1376  		}
  1377  	}
  1378  }
  1379  
  1380  // BadBlockArgs represents the entries in the list returned when bad blocks are queried.
  1381  type BadBlockArgs struct {
  1382  	Hash   common.Hash   `json:"hash"`
  1383  	Header *types.Header `json:"header"`
  1384  }
  1385  
  1386  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1387  func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) {
  1388  	headers := make([]BadBlockArgs, 0, bc.badBlocks.Len())
  1389  	for _, hash := range bc.badBlocks.Keys() {
  1390  		if hdr, exist := bc.badBlocks.Peek(hash); exist {
  1391  			header := hdr.(*types.Header)
  1392  			headers = append(headers, BadBlockArgs{header.Hash(), header})
  1393  		}
  1394  	}
  1395  	return headers, nil
  1396  }
  1397  
  1398  // addBadBlock adds a bad block to the bad-block LRU cache
  1399  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1400  	bc.badBlocks.Add(block.Header().Hash(), block.Header())
  1401  }
  1402  
  1403  // reportBlock logs a bad block error.
  1404  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1405  	bc.addBadBlock(block)
  1406  
  1407  	var receiptString string
  1408  	for _, receipt := range receipts {
  1409  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1410  	}
  1411  	log.Error(fmt.Sprintf(`
  1412  ########## BAD BLOCK #########
  1413  Chain config: %v
  1414  
  1415  Number: %v
  1416  Hash: 0x%x
  1417  %v
  1418  
  1419  Error: %v
  1420  ##############################
  1421  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1422  }
  1423  
  1424  // InsertHeaderChain attempts to insert the given header chain in to the local
  1425  // chain, possibly creating a reorg. If an error is returned, it will return the
  1426  // index number of the failing header as well an error describing what went wrong.
  1427  //
  1428  // The verify parameter can be used to fine tune whether nonce verification
  1429  // should be done or not. The reason behind the optional check is because some
  1430  // of the header retrieval mechanisms already need to verify nonces, as well as
  1431  // because nonces can be verified sparsely, not needing to check each.
  1432  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1433  	start := time.Now()
  1434  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1435  		return i, err
  1436  	}
  1437  
  1438  	// Make sure only one thread manipulates the chain at once
  1439  	bc.chainmu.Lock()
  1440  	defer bc.chainmu.Unlock()
  1441  
  1442  	bc.wg.Add(1)
  1443  	defer bc.wg.Done()
  1444  
  1445  	whFunc := func(header *types.Header) error {
  1446  		bc.mu.Lock()
  1447  		defer bc.mu.Unlock()
  1448  
  1449  		_, err := bc.hc.WriteHeader(header)
  1450  		return err
  1451  	}
  1452  
  1453  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1454  }
  1455  
  1456  // writeHeader writes a header into the local chain, given that its parent is
  1457  // already known. If the total difficulty of the newly inserted header becomes
  1458  // greater than the current known TD, the canonical chain is re-routed.
  1459  //
  1460  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1461  // into the chain, as side effects caused by reorganisations cannot be emulated
  1462  // without the real blocks. Hence, writing headers directly should only be done
  1463  // in two scenarios: pure-header mode of operation (light clients), or properly
  1464  // separated header/block phases (non-archive clients).
  1465  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1466  	bc.wg.Add(1)
  1467  	defer bc.wg.Done()
  1468  
  1469  	bc.mu.Lock()
  1470  	defer bc.mu.Unlock()
  1471  
  1472  	_, err := bc.hc.WriteHeader(header)
  1473  	return err
  1474  }
  1475  
  1476  // CurrentHeader retrieves the current head header of the canonical chain. The
  1477  // header is retrieved from the HeaderChain's internal cache.
  1478  func (bc *BlockChain) CurrentHeader() *types.Header {
  1479  	return bc.hc.CurrentHeader()
  1480  }
  1481  
  1482  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1483  // database by hash and number, caching it if found.
  1484  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1485  	return bc.hc.GetTd(hash, number)
  1486  }
  1487  
  1488  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1489  // database by hash, caching it if found.
  1490  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1491  	return bc.hc.GetTdByHash(hash)
  1492  }
  1493  
  1494  // GetHeader retrieves a block header from the database by hash and number,
  1495  // caching it if found.
  1496  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1497  	return bc.hc.GetHeader(hash, number)
  1498  }
  1499  
  1500  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1501  // found.
  1502  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1503  	return bc.hc.GetHeaderByHash(hash)
  1504  }
  1505  
  1506  // HasHeader checks if a block header is present in the database or not, caching
  1507  // it if present.
  1508  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1509  	return bc.hc.HasHeader(hash, number)
  1510  }
  1511  
  1512  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1513  // hash, fetching towards the genesis block.
  1514  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1515  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1516  }
  1517  
  1518  // GetHeaderByNumber retrieves a block header from the database by number,
  1519  // caching it (associated with its hash) if found.
  1520  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1521  	return bc.hc.GetHeaderByNumber(number)
  1522  }
  1523  
  1524  // Config retrieves the blockchain's chain configuration.
  1525  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1526  
  1527  // Engine retrieves the blockchain's consensus engine.
  1528  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1529  
  1530  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1531  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1532  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1533  }
  1534  
  1535  // SubscribeChainEvent registers a subscription of ChainEvent.
  1536  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1537  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1538  }
  1539  
  1540  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1541  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1542  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1543  }
  1544  
  1545  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1546  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1547  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1548  }
  1549  
  1550  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1551  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1552  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1553  }