github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/core/blockchain.go (about)

     1  // Copyright 2014 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Atlantis consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/athereum/go-athereum/common"
    31  	"github.com/athereum/go-athereum/common/mclock"
    32  	"github.com/athereum/go-athereum/consensus"
    33  	"github.com/athereum/go-athereum/core/rawdb"
    34  	"github.com/athereum/go-athereum/core/state"
    35  	"github.com/athereum/go-athereum/core/types"
    36  	"github.com/athereum/go-athereum/core/vm"
    37  	"github.com/athereum/go-athereum/crypto"
    38  	"github.com/athereum/go-athereum/athdb"
    39  	"github.com/athereum/go-athereum/event"
    40  	"github.com/athereum/go-athereum/log"
    41  	"github.com/athereum/go-athereum/metrics"
    42  	"github.com/athereum/go-athereum/params"
    43  	"github.com/athereum/go-athereum/rlp"
    44  	"github.com/athereum/go-athereum/trie"
    45  	"github.com/hashicorp/golang-lru"
    46  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	maxFutureBlocks     = 256
    59  	maxTimeFutureBlocks = 30
    60  	badBlockLimit       = 10
    61  	triesInMemory       = 128
    62  
    63  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    64  	BlockChainVersion = 3
    65  )
    66  
    67  // CacheConfig contains the configuration values for the trie caching/pruning
    68  // that's resident in a blockchain.
    69  type CacheConfig struct {
    70  	Disabled      bool          // Whather to disable trie write caching (archive node)
    71  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    72  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    73  }
    74  
    75  // BlockChain represents the canonical chain given a database with a genesis
    76  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    77  //
    78  // Importing blocks in to the block chain happens according to the set of rules
    79  // defined by the two stage Validator. Processing of blocks is done using the
    80  // Processor which processes the included transaction. The validation of the state
    81  // is done in the second part of the Validator. Failing results in aborting of
    82  // the import.
    83  //
    84  // The BlockChain also helps in returning blocks from **any** chain included
    85  // in the database as well as blocks that represents the canonical chain. It's
    86  // important to note that GetBlock can return any block and does not need to be
    87  // included in the canonical one where as GetBlockByNumber always represents the
    88  // canonical chain.
    89  type BlockChain struct {
    90  	chainConfig *params.ChainConfig // Chain & network configuration
    91  	cacheConfig *CacheConfig        // Cache configuration for pruning
    92  
    93  	db     athdb.Database // Low level persistent database to store final content in
    94  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    95  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    96  
    97  	hc            *HeaderChain
    98  	rmLogsFeed    event.Feed
    99  	chainFeed     event.Feed
   100  	chainSideFeed event.Feed
   101  	chainHeadFeed event.Feed
   102  	logsFeed      event.Feed
   103  	scope         event.SubscriptionScope
   104  	genesisBlock  *types.Block
   105  
   106  	mu      sync.RWMutex // global mutex for locking chain operations
   107  	chainmu sync.RWMutex // blockchain insertion lock
   108  	procmu  sync.RWMutex // block processor lock
   109  
   110  	checkpoint       int          // checkpoint counts towards the new checkpoint
   111  	currentBlock     atomic.Value // Current head of the block chain
   112  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   113  
   114  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   115  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   116  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   117  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   118  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   119  
   120  	quit    chan struct{} // blockchain quit channel
   121  	running int32         // running must be called atomically
   122  	// procInterrupt must be atomically called
   123  	procInterrupt int32          // interrupt signaler for block processing
   124  	wg            sync.WaitGroup // chain processing wait group for shutting down
   125  
   126  	engine    consensus.Engine
   127  	processor Processor // block processor interface
   128  	validator Validator // block and state validator interface
   129  	vmConfig  vm.Config
   130  
   131  	badBlocks *lru.Cache // Bad block cache
   132  }
   133  
   134  // NewBlockChain returns a fully initialised block chain using information
   135  // available in the database. It initialises the default Atlantis Validator and
   136  // Processor.
   137  func NewBlockChain(db athdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   138  	if cacheConfig == nil {
   139  		cacheConfig = &CacheConfig{
   140  			TrieNodeLimit: 256 * 1024 * 1024,
   141  			TrieTimeLimit: 5 * time.Minute,
   142  		}
   143  	}
   144  	bodyCache, _ := lru.New(bodyCacheLimit)
   145  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   146  	blockCache, _ := lru.New(blockCacheLimit)
   147  	futureBlocks, _ := lru.New(maxFutureBlocks)
   148  	badBlocks, _ := lru.New(badBlockLimit)
   149  
   150  	bc := &BlockChain{
   151  		chainConfig:  chainConfig,
   152  		cacheConfig:  cacheConfig,
   153  		db:           db,
   154  		triegc:       prque.New(),
   155  		stateCache:   state.NewDatabase(db),
   156  		quit:         make(chan struct{}),
   157  		bodyCache:    bodyCache,
   158  		bodyRLPCache: bodyRLPCache,
   159  		blockCache:   blockCache,
   160  		futureBlocks: futureBlocks,
   161  		engine:       engine,
   162  		vmConfig:     vmConfig,
   163  		badBlocks:    badBlocks,
   164  	}
   165  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   166  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   167  
   168  	var err error
   169  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   170  	if err != nil {
   171  		return nil, err
   172  	}
   173  	bc.genesisBlock = bc.GetBlockByNumber(0)
   174  	if bc.genesisBlock == nil {
   175  		return nil, ErrNoGenesis
   176  	}
   177  	if err := bc.loadLastState(); err != nil {
   178  		return nil, err
   179  	}
   180  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   181  	for hash := range BadHashes {
   182  		if header := bc.GetHeaderByHash(hash); header != nil {
   183  			// get the canonical block corresponding to the offending header's number
   184  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   185  			// make sure the headerByNumber (if present) is in our current canonical chain
   186  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   187  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   188  				bc.SetHead(header.Number.Uint64() - 1)
   189  				log.Error("Chain rewind was successful, resuming normal operation")
   190  			}
   191  		}
   192  	}
   193  	// Take ownership of this particular state
   194  	go bc.update()
   195  	return bc, nil
   196  }
   197  
   198  func (bc *BlockChain) getProcInterrupt() bool {
   199  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   200  }
   201  
   202  // loadLastState loads the last known chain state from the database. This method
   203  // assumes that the chain manager mutex is held.
   204  func (bc *BlockChain) loadLastState() error {
   205  	// Restore the last known head block
   206  	head := rawdb.ReadHeadBlockHash(bc.db)
   207  	if head == (common.Hash{}) {
   208  		// Corrupt or empty database, init from scratch
   209  		log.Warn("Empty database, resetting chain")
   210  		return bc.Reset()
   211  	}
   212  	// Make sure the entire head block is available
   213  	currentBlock := bc.GetBlockByHash(head)
   214  	if currentBlock == nil {
   215  		// Corrupt or empty database, init from scratch
   216  		log.Warn("Head block missing, resetting chain", "hash", head)
   217  		return bc.Reset()
   218  	}
   219  	// Make sure the state associated with the block is available
   220  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   221  		// Dangling block without a state associated, init from scratch
   222  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   223  		if err := bc.repair(&currentBlock); err != nil {
   224  			return err
   225  		}
   226  	}
   227  	// Everything seems to be fine, set as the head block
   228  	bc.currentBlock.Store(currentBlock)
   229  
   230  	// Restore the last known head header
   231  	currentHeader := currentBlock.Header()
   232  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   233  		if header := bc.GetHeaderByHash(head); header != nil {
   234  			currentHeader = header
   235  		}
   236  	}
   237  	bc.hc.SetCurrentHeader(currentHeader)
   238  
   239  	// Restore the last known head fast block
   240  	bc.currentFastBlock.Store(currentBlock)
   241  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   242  		if block := bc.GetBlockByHash(head); block != nil {
   243  			bc.currentFastBlock.Store(block)
   244  		}
   245  	}
   246  
   247  	// Issue a status log for the user
   248  	currentFastBlock := bc.CurrentFastBlock()
   249  
   250  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   251  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   252  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   253  
   254  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   255  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   256  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   257  
   258  	return nil
   259  }
   260  
   261  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   262  // above the new head will be deleted and the new one set. In the case of blocks
   263  // though, the head may be further rewound if block bodies are missing (non-archive
   264  // nodes after a fast sync).
   265  func (bc *BlockChain) SetHead(head uint64) error {
   266  	log.Warn("Rewinding blockchain", "target", head)
   267  
   268  	bc.mu.Lock()
   269  	defer bc.mu.Unlock()
   270  
   271  	// Rewind the header chain, deleting all block bodies until then
   272  	delFn := func(hash common.Hash, num uint64) {
   273  		rawdb.DeleteBody(bc.db, hash, num)
   274  	}
   275  	bc.hc.SetHead(head, delFn)
   276  	currentHeader := bc.hc.CurrentHeader()
   277  
   278  	// Clear out any stale content from the caches
   279  	bc.bodyCache.Purge()
   280  	bc.bodyRLPCache.Purge()
   281  	bc.blockCache.Purge()
   282  	bc.futureBlocks.Purge()
   283  
   284  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   285  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   286  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   287  	}
   288  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   289  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   290  			// Rewound state missing, rolled back to before pivot, reset to genesis
   291  			bc.currentBlock.Store(bc.genesisBlock)
   292  		}
   293  	}
   294  	// Rewind the fast block in a simpleton way to the target head
   295  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   296  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   297  	}
   298  	// If either blocks reached nil, reset to the genesis state
   299  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   300  		bc.currentBlock.Store(bc.genesisBlock)
   301  	}
   302  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   303  		bc.currentFastBlock.Store(bc.genesisBlock)
   304  	}
   305  	currentBlock := bc.CurrentBlock()
   306  	currentFastBlock := bc.CurrentFastBlock()
   307  
   308  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   309  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   310  
   311  	return bc.loadLastState()
   312  }
   313  
   314  // FastSyncCommitHead sets the current head block to the one defined by the hash
   315  // irrelevant what the chain contents were prior.
   316  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   317  	// Make sure that both the block as well at its state trie exists
   318  	block := bc.GetBlockByHash(hash)
   319  	if block == nil {
   320  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   321  	}
   322  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   323  		return err
   324  	}
   325  	// If all checks out, manually set the head block
   326  	bc.mu.Lock()
   327  	bc.currentBlock.Store(block)
   328  	bc.mu.Unlock()
   329  
   330  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   331  	return nil
   332  }
   333  
   334  // GasLimit returns the gas limit of the current HEAD block.
   335  func (bc *BlockChain) GasLimit() uint64 {
   336  	return bc.CurrentBlock().GasLimit()
   337  }
   338  
   339  // CurrentBlock retrieves the current head block of the canonical chain. The
   340  // block is retrieved from the blockchain's internal cache.
   341  func (bc *BlockChain) CurrentBlock() *types.Block {
   342  	return bc.currentBlock.Load().(*types.Block)
   343  }
   344  
   345  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   346  // chain. The block is retrieved from the blockchain's internal cache.
   347  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   348  	return bc.currentFastBlock.Load().(*types.Block)
   349  }
   350  
   351  // SetProcessor sets the processor required for making state modifications.
   352  func (bc *BlockChain) SetProcessor(processor Processor) {
   353  	bc.procmu.Lock()
   354  	defer bc.procmu.Unlock()
   355  	bc.processor = processor
   356  }
   357  
   358  // SetValidator sets the validator which is used to validate incoming blocks.
   359  func (bc *BlockChain) SetValidator(validator Validator) {
   360  	bc.procmu.Lock()
   361  	defer bc.procmu.Unlock()
   362  	bc.validator = validator
   363  }
   364  
   365  // Validator returns the current validator.
   366  func (bc *BlockChain) Validator() Validator {
   367  	bc.procmu.RLock()
   368  	defer bc.procmu.RUnlock()
   369  	return bc.validator
   370  }
   371  
   372  // Processor returns the current processor.
   373  func (bc *BlockChain) Processor() Processor {
   374  	bc.procmu.RLock()
   375  	defer bc.procmu.RUnlock()
   376  	return bc.processor
   377  }
   378  
   379  // State returns a new mutable state based on the current HEAD block.
   380  func (bc *BlockChain) State() (*state.StateDB, error) {
   381  	return bc.StateAt(bc.CurrentBlock().Root())
   382  }
   383  
   384  // StateAt returns a new mutable state based on a particular point in time.
   385  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   386  	return state.New(root, bc.stateCache)
   387  }
   388  
   389  // Reset purges the entire blockchain, restoring it to its genesis state.
   390  func (bc *BlockChain) Reset() error {
   391  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   392  }
   393  
   394  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   395  // specified genesis state.
   396  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   397  	// Dump the entire block chain and purge the caches
   398  	if err := bc.SetHead(0); err != nil {
   399  		return err
   400  	}
   401  	bc.mu.Lock()
   402  	defer bc.mu.Unlock()
   403  
   404  	// Prepare the genesis block and reinitialise the chain
   405  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   406  		log.Crit("Failed to write genesis block TD", "err", err)
   407  	}
   408  	rawdb.WriteBlock(bc.db, genesis)
   409  
   410  	bc.genesisBlock = genesis
   411  	bc.insert(bc.genesisBlock)
   412  	bc.currentBlock.Store(bc.genesisBlock)
   413  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   414  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   415  	bc.currentFastBlock.Store(bc.genesisBlock)
   416  
   417  	return nil
   418  }
   419  
   420  // repair tries to repair the current blockchain by rolling back the current block
   421  // until one with associated state is found. This is needed to fix incomplete db
   422  // writes caused either by crashes/power outages, or simply non-committed tries.
   423  //
   424  // This method only rolls back the current block. The current header and current
   425  // fast block are left intact.
   426  func (bc *BlockChain) repair(head **types.Block) error {
   427  	for {
   428  		// Abort if we've rewound to a head block that does have associated state
   429  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   430  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   431  			return nil
   432  		}
   433  		// Otherwise rewind one block and recheck state availability there
   434  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   435  	}
   436  }
   437  
   438  // Export writes the active chain to the given writer.
   439  func (bc *BlockChain) Export(w io.Writer) error {
   440  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   441  }
   442  
   443  // ExportN writes a subset of the active chain to the given writer.
   444  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   445  	bc.mu.RLock()
   446  	defer bc.mu.RUnlock()
   447  
   448  	if first > last {
   449  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   450  	}
   451  	log.Info("Exporting batch of blocks", "count", last-first+1)
   452  
   453  	for nr := first; nr <= last; nr++ {
   454  		block := bc.GetBlockByNumber(nr)
   455  		if block == nil {
   456  			return fmt.Errorf("export failed on #%d: not found", nr)
   457  		}
   458  
   459  		if err := block.EncodeRLP(w); err != nil {
   460  			return err
   461  		}
   462  	}
   463  
   464  	return nil
   465  }
   466  
   467  // insert injects a new head block into the current block chain. This method
   468  // assumes that the block is indeed a true head. It will also reset the head
   469  // header and the head fast sync block to this very same block if they are older
   470  // or if they are on a different side chain.
   471  //
   472  // Note, this function assumes that the `mu` mutex is held!
   473  func (bc *BlockChain) insert(block *types.Block) {
   474  	// If the block is on a side chain or an unknown one, force other heads onto it too
   475  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   476  
   477  	// Add the block to the canonical chain number scheme and mark as the head
   478  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   479  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   480  
   481  	bc.currentBlock.Store(block)
   482  
   483  	// If the block is better than our head or is on a different chain, force update heads
   484  	if updateHeads {
   485  		bc.hc.SetCurrentHeader(block.Header())
   486  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   487  
   488  		bc.currentFastBlock.Store(block)
   489  	}
   490  }
   491  
   492  // Genesis retrieves the chain's genesis block.
   493  func (bc *BlockChain) Genesis() *types.Block {
   494  	return bc.genesisBlock
   495  }
   496  
   497  // GetBody retrieves a block body (transactions and uncles) from the database by
   498  // hash, caching it if found.
   499  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   500  	// Short circuit if the body's already in the cache, retrieve otherwise
   501  	if cached, ok := bc.bodyCache.Get(hash); ok {
   502  		body := cached.(*types.Body)
   503  		return body
   504  	}
   505  	number := bc.hc.GetBlockNumber(hash)
   506  	if number == nil {
   507  		return nil
   508  	}
   509  	body := rawdb.ReadBody(bc.db, hash, *number)
   510  	if body == nil {
   511  		return nil
   512  	}
   513  	// Cache the found body for next time and return
   514  	bc.bodyCache.Add(hash, body)
   515  	return body
   516  }
   517  
   518  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   519  // caching it if found.
   520  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   521  	// Short circuit if the body's already in the cache, retrieve otherwise
   522  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   523  		return cached.(rlp.RawValue)
   524  	}
   525  	number := bc.hc.GetBlockNumber(hash)
   526  	if number == nil {
   527  		return nil
   528  	}
   529  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   530  	if len(body) == 0 {
   531  		return nil
   532  	}
   533  	// Cache the found body for next time and return
   534  	bc.bodyRLPCache.Add(hash, body)
   535  	return body
   536  }
   537  
   538  // HasBlock checks if a block is fully present in the database or not.
   539  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   540  	if bc.blockCache.Contains(hash) {
   541  		return true
   542  	}
   543  	return rawdb.HasBody(bc.db, hash, number)
   544  }
   545  
   546  // HasState checks if state trie is fully present in the database or not.
   547  func (bc *BlockChain) HasState(hash common.Hash) bool {
   548  	_, err := bc.stateCache.OpenTrie(hash)
   549  	return err == nil
   550  }
   551  
   552  // HasBlockAndState checks if a block and associated state trie is fully present
   553  // in the database or not, caching it if present.
   554  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   555  	// Check first that the block itself is known
   556  	block := bc.GetBlock(hash, number)
   557  	if block == nil {
   558  		return false
   559  	}
   560  	return bc.HasState(block.Root())
   561  }
   562  
   563  // GetBlock retrieves a block from the database by hash and number,
   564  // caching it if found.
   565  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   566  	// Short circuit if the block's already in the cache, retrieve otherwise
   567  	if block, ok := bc.blockCache.Get(hash); ok {
   568  		return block.(*types.Block)
   569  	}
   570  	block := rawdb.ReadBlock(bc.db, hash, number)
   571  	if block == nil {
   572  		return nil
   573  	}
   574  	// Cache the found block for next time and return
   575  	bc.blockCache.Add(block.Hash(), block)
   576  	return block
   577  }
   578  
   579  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   580  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   581  	number := bc.hc.GetBlockNumber(hash)
   582  	if number == nil {
   583  		return nil
   584  	}
   585  	return bc.GetBlock(hash, *number)
   586  }
   587  
   588  // GetBlockByNumber retrieves a block from the database by number, caching it
   589  // (associated with its hash) if found.
   590  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   591  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   592  	if hash == (common.Hash{}) {
   593  		return nil
   594  	}
   595  	return bc.GetBlock(hash, number)
   596  }
   597  
   598  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   599  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   600  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   601  	if number == nil {
   602  		return nil
   603  	}
   604  	return rawdb.ReadReceipts(bc.db, hash, *number)
   605  }
   606  
   607  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   608  // [deprecated by ath/62]
   609  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   610  	number := bc.hc.GetBlockNumber(hash)
   611  	if number == nil {
   612  		return nil
   613  	}
   614  	for i := 0; i < n; i++ {
   615  		block := bc.GetBlock(hash, *number)
   616  		if block == nil {
   617  			break
   618  		}
   619  		blocks = append(blocks, block)
   620  		hash = block.ParentHash()
   621  		*number--
   622  	}
   623  	return
   624  }
   625  
   626  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   627  // a specific distance is reached.
   628  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   629  	uncles := []*types.Header{}
   630  	for i := 0; block != nil && i < length; i++ {
   631  		uncles = append(uncles, block.Uncles()...)
   632  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   633  	}
   634  	return uncles
   635  }
   636  
   637  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   638  // either from ephemeral in-memory cache, or from persistent storage.
   639  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   640  	return bc.stateCache.TrieDB().Node(hash)
   641  }
   642  
   643  // Stop stops the blockchain service. If any imports are currently in progress
   644  // it will abort them using the procInterrupt.
   645  func (bc *BlockChain) Stop() {
   646  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   647  		return
   648  	}
   649  	// Unsubscribe all subscriptions registered from blockchain
   650  	bc.scope.Close()
   651  	close(bc.quit)
   652  	atomic.StoreInt32(&bc.procInterrupt, 1)
   653  
   654  	bc.wg.Wait()
   655  
   656  	// Ensure the state of a recent block is also stored to disk before exiting.
   657  	// We're writing three different states to catch different restart scenarios:
   658  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   659  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   660  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   661  	if !bc.cacheConfig.Disabled {
   662  		triedb := bc.stateCache.TrieDB()
   663  
   664  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   665  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   666  				recent := bc.GetBlockByNumber(number - offset)
   667  
   668  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   669  				if err := triedb.Commit(recent.Root(), true); err != nil {
   670  					log.Error("Failed to commit recent state trie", "err", err)
   671  				}
   672  			}
   673  		}
   674  		for !bc.triegc.Empty() {
   675  			triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{})
   676  		}
   677  		if size, _ := triedb.Size(); size != 0 {
   678  			log.Error("Dangling trie nodes after full cleanup")
   679  		}
   680  	}
   681  	log.Info("Blockchain manager stopped")
   682  }
   683  
   684  func (bc *BlockChain) procFutureBlocks() {
   685  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   686  	for _, hash := range bc.futureBlocks.Keys() {
   687  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   688  			blocks = append(blocks, block.(*types.Block))
   689  		}
   690  	}
   691  	if len(blocks) > 0 {
   692  		types.BlockBy(types.Number).Sort(blocks)
   693  
   694  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   695  		for i := range blocks {
   696  			bc.InsertChain(blocks[i : i+1])
   697  		}
   698  	}
   699  }
   700  
   701  // WriteStatus status of write
   702  type WriteStatus byte
   703  
   704  const (
   705  	NonStatTy WriteStatus = iota
   706  	CanonStatTy
   707  	SideStatTy
   708  )
   709  
   710  // Rollback is designed to remove a chain of links from the database that aren't
   711  // certain enough to be valid.
   712  func (bc *BlockChain) Rollback(chain []common.Hash) {
   713  	bc.mu.Lock()
   714  	defer bc.mu.Unlock()
   715  
   716  	for i := len(chain) - 1; i >= 0; i-- {
   717  		hash := chain[i]
   718  
   719  		currentHeader := bc.hc.CurrentHeader()
   720  		if currentHeader.Hash() == hash {
   721  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   722  		}
   723  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   724  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   725  			bc.currentFastBlock.Store(newFastBlock)
   726  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   727  		}
   728  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   729  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   730  			bc.currentBlock.Store(newBlock)
   731  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   732  		}
   733  	}
   734  }
   735  
   736  // SetReceiptsData computes all the non-consensus fields of the receipts
   737  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   738  	signer := types.MakeSigner(config, block.Number())
   739  
   740  	transactions, logIndex := block.Transactions(), uint(0)
   741  	if len(transactions) != len(receipts) {
   742  		return errors.New("transaction and receipt count mismatch")
   743  	}
   744  
   745  	for j := 0; j < len(receipts); j++ {
   746  		// The transaction hash can be retrieved from the transaction itself
   747  		receipts[j].TxHash = transactions[j].Hash()
   748  
   749  		// The contract address can be derived from the transaction itself
   750  		if transactions[j].To() == nil {
   751  			// Deriving the signer is expensive, only do if it's actually needed
   752  			from, _ := types.Sender(signer, transactions[j])
   753  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   754  		}
   755  		// The used gas can be calculated based on previous receipts
   756  		if j == 0 {
   757  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   758  		} else {
   759  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   760  		}
   761  		// The derived log fields can simply be set from the block and transaction
   762  		for k := 0; k < len(receipts[j].Logs); k++ {
   763  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   764  			receipts[j].Logs[k].BlockHash = block.Hash()
   765  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   766  			receipts[j].Logs[k].TxIndex = uint(j)
   767  			receipts[j].Logs[k].Index = logIndex
   768  			logIndex++
   769  		}
   770  	}
   771  	return nil
   772  }
   773  
   774  // InsertReceiptChain attempts to complete an already existing header chain with
   775  // transaction and receipt data.
   776  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   777  	bc.wg.Add(1)
   778  	defer bc.wg.Done()
   779  
   780  	// Do a sanity check that the provided chain is actually ordered and linked
   781  	for i := 1; i < len(blockChain); i++ {
   782  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   783  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   784  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   785  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   786  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   787  		}
   788  	}
   789  
   790  	var (
   791  		stats = struct{ processed, ignored int32 }{}
   792  		start = time.Now()
   793  		bytes = 0
   794  		batch = bc.db.NewBatch()
   795  	)
   796  	for i, block := range blockChain {
   797  		receipts := receiptChain[i]
   798  		// Short circuit insertion if shutting down or processing failed
   799  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   800  			return 0, nil
   801  		}
   802  		// Short circuit if the owner header is unknown
   803  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   804  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   805  		}
   806  		// Skip if the entire data is already known
   807  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   808  			stats.ignored++
   809  			continue
   810  		}
   811  		// Compute all the non-consensus fields of the receipts
   812  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   813  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   814  		}
   815  		// Write all the data out into the database
   816  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   817  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   818  		rawdb.WriteTxLookupEntries(batch, block)
   819  
   820  		stats.processed++
   821  
   822  		if batch.ValueSize() >= athdb.IdealBatchSize {
   823  			if err := batch.Write(); err != nil {
   824  				return 0, err
   825  			}
   826  			bytes += batch.ValueSize()
   827  			batch.Reset()
   828  		}
   829  	}
   830  	if batch.ValueSize() > 0 {
   831  		bytes += batch.ValueSize()
   832  		if err := batch.Write(); err != nil {
   833  			return 0, err
   834  		}
   835  	}
   836  
   837  	// Update the head fast sync block if better
   838  	bc.mu.Lock()
   839  	head := blockChain[len(blockChain)-1]
   840  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   841  		currentFastBlock := bc.CurrentFastBlock()
   842  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   843  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   844  			bc.currentFastBlock.Store(head)
   845  		}
   846  	}
   847  	bc.mu.Unlock()
   848  
   849  	log.Info("Imported new block receipts",
   850  		"count", stats.processed,
   851  		"elapsed", common.PrettyDuration(time.Since(start)),
   852  		"number", head.Number(),
   853  		"hash", head.Hash(),
   854  		"size", common.StorageSize(bytes),
   855  		"ignored", stats.ignored)
   856  	return 0, nil
   857  }
   858  
   859  var lastWrite uint64
   860  
   861  // WriteBlockWithoutState writes only the block and its metadata to the database,
   862  // but does not write any state. This is used to construct competing side forks
   863  // up to the point where they exceed the canonical total difficulty.
   864  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   865  	bc.wg.Add(1)
   866  	defer bc.wg.Done()
   867  
   868  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   869  		return err
   870  	}
   871  	rawdb.WriteBlock(bc.db, block)
   872  
   873  	return nil
   874  }
   875  
   876  // WriteBlockWithState writes the block and all associated state to the database.
   877  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   878  	bc.wg.Add(1)
   879  	defer bc.wg.Done()
   880  
   881  	// Calculate the total difficulty of the block
   882  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   883  	if ptd == nil {
   884  		return NonStatTy, consensus.ErrUnknownAncestor
   885  	}
   886  	// Make sure no inconsistent state is leaked during insertion
   887  	bc.mu.Lock()
   888  	defer bc.mu.Unlock()
   889  
   890  	currentBlock := bc.CurrentBlock()
   891  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   892  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   893  
   894  	// Irrelevant of the canonical status, write the block itself to the database
   895  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   896  		return NonStatTy, err
   897  	}
   898  	// Write other block data using a batch.
   899  	batch := bc.db.NewBatch()
   900  	rawdb.WriteBlock(batch, block)
   901  
   902  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   903  	if err != nil {
   904  		return NonStatTy, err
   905  	}
   906  	triedb := bc.stateCache.TrieDB()
   907  
   908  	// If we're running an archive node, always flush
   909  	if bc.cacheConfig.Disabled {
   910  		if err := triedb.Commit(root, false); err != nil {
   911  			return NonStatTy, err
   912  		}
   913  	} else {
   914  		// Full but not archive node, do proper garbage collection
   915  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   916  		bc.triegc.Push(root, -float32(block.NumberU64()))
   917  
   918  		if current := block.NumberU64(); current > triesInMemory {
   919  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
   920  			var (
   921  				nodes, imgs = triedb.Size()
   922  				limit       = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   923  			)
   924  			if nodes > limit || imgs > 4*1024*1024 {
   925  				triedb.Cap(limit - athdb.IdealBatchSize)
   926  			}
   927  			// Find the next state trie we need to commit
   928  			header := bc.GetHeaderByNumber(current - triesInMemory)
   929  			chosen := header.Number.Uint64()
   930  
   931  			// If we exceeded out time allowance, flush an entire trie to disk
   932  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   933  				// If we're exceeding limits but haven't reached a large enough memory gap,
   934  				// warn the user that the system is becoming unstable.
   935  				if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   936  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   937  				}
   938  				// Flush an entire trie and restart the counters
   939  				triedb.Commit(header.Root, true)
   940  				lastWrite = chosen
   941  				bc.gcproc = 0
   942  			}
   943  			// Garbage collect anything below our required write retention
   944  			for !bc.triegc.Empty() {
   945  				root, number := bc.triegc.Pop()
   946  				if uint64(-number) > chosen {
   947  					bc.triegc.Push(root, number)
   948  					break
   949  				}
   950  				triedb.Dereference(root.(common.Hash), common.Hash{})
   951  			}
   952  		}
   953  	}
   954  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   955  
   956  	// If the total difficulty is higher than our known, add it to the canonical chain
   957  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   958  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   959  	reorg := externTd.Cmp(localTd) > 0
   960  	currentBlock = bc.CurrentBlock()
   961  	if !reorg && externTd.Cmp(localTd) == 0 {
   962  		// Split same-difficulty blocks by number, then at random
   963  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   964  	}
   965  	if reorg {
   966  		// Reorganise the chain if the parent is not the head block
   967  		if block.ParentHash() != currentBlock.Hash() {
   968  			if err := bc.reorg(currentBlock, block); err != nil {
   969  				return NonStatTy, err
   970  			}
   971  		}
   972  		// Write the positional metadata for transaction/receipt lookups and preimages
   973  		rawdb.WriteTxLookupEntries(batch, block)
   974  		rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
   975  
   976  		status = CanonStatTy
   977  	} else {
   978  		status = SideStatTy
   979  	}
   980  	if err := batch.Write(); err != nil {
   981  		return NonStatTy, err
   982  	}
   983  
   984  	// Set new head.
   985  	if status == CanonStatTy {
   986  		bc.insert(block)
   987  	}
   988  	bc.futureBlocks.Remove(block.Hash())
   989  	return status, nil
   990  }
   991  
   992  // InsertChain attempts to insert the given batch of blocks in to the canonical
   993  // chain or, otherwise, create a fork. If an error is returned it will return
   994  // the index number of the failing block as well an error describing what went
   995  // wrong.
   996  //
   997  // After insertion is done, all accumulated events will be fired.
   998  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
   999  	n, events, logs, err := bc.insertChain(chain)
  1000  	bc.PostChainEvents(events, logs)
  1001  	return n, err
  1002  }
  1003  
  1004  // insertChain will execute the actual chain insertion and event aggregation. The
  1005  // only reason this method exists as a separate one is to make locking cleaner
  1006  // with deferred statements.
  1007  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1008  	// Sanity check that we have somathing meaningful to import
  1009  	if len(chain) == 0 {
  1010  		return 0, nil, nil, nil
  1011  	}
  1012  	// Do a sanity check that the provided chain is actually ordered and linked
  1013  	for i := 1; i < len(chain); i++ {
  1014  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1015  			// Chain broke ancestry, log a messge (programming error) and skip insertion
  1016  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1017  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1018  
  1019  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1020  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1021  		}
  1022  	}
  1023  	// Pre-checks passed, start the full block imports
  1024  	bc.wg.Add(1)
  1025  	defer bc.wg.Done()
  1026  
  1027  	bc.chainmu.Lock()
  1028  	defer bc.chainmu.Unlock()
  1029  
  1030  	// A queued approach to delivering events. This is generally
  1031  	// faster than direct delivery and requires much less mutex
  1032  	// acquiring.
  1033  	var (
  1034  		stats         = insertStats{startTime: mclock.Now()}
  1035  		events        = make([]interface{}, 0, len(chain))
  1036  		lastCanon     *types.Block
  1037  		coalescedLogs []*types.Log
  1038  	)
  1039  	// Start the parallel header verifier
  1040  	headers := make([]*types.Header, len(chain))
  1041  	seals := make([]bool, len(chain))
  1042  
  1043  	for i, block := range chain {
  1044  		headers[i] = block.Header()
  1045  		seals[i] = true
  1046  	}
  1047  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1048  	defer close(abort)
  1049  
  1050  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1051  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1052  
  1053  	// Iterate over the blocks and insert when the verifier permits
  1054  	for i, block := range chain {
  1055  		// If the chain is terminating, stop processing blocks
  1056  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1057  			log.Debug("Premature abort during blocks processing")
  1058  			break
  1059  		}
  1060  		// If the header is a banned one, straight out abort
  1061  		if BadHashes[block.Hash()] {
  1062  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1063  			return i, events, coalescedLogs, ErrBlacklistedHash
  1064  		}
  1065  		// Wait for the block's verification to complete
  1066  		bstart := time.Now()
  1067  
  1068  		err := <-results
  1069  		if err == nil {
  1070  			err = bc.Validator().ValidateBody(block)
  1071  		}
  1072  		switch {
  1073  		case err == ErrKnownBlock:
  1074  			// Block and state both already known. However if the current block is below
  1075  			// this number we did a rollback and we should reimport it nonatheless.
  1076  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1077  				stats.ignored++
  1078  				continue
  1079  			}
  1080  
  1081  		case err == consensus.ErrFutureBlock:
  1082  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1083  			// the chain is discarded and processed at a later time if given.
  1084  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1085  			if block.Time().Cmp(max) > 0 {
  1086  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1087  			}
  1088  			bc.futureBlocks.Add(block.Hash(), block)
  1089  			stats.queued++
  1090  			continue
  1091  
  1092  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1093  			bc.futureBlocks.Add(block.Hash(), block)
  1094  			stats.queued++
  1095  			continue
  1096  
  1097  		case err == consensus.ErrPrunedAncestor:
  1098  			// Block competing with the canonical chain, store in the db, but don't process
  1099  			// until the competitor TD goes above the canonical TD
  1100  			currentBlock := bc.CurrentBlock()
  1101  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1102  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1103  			if localTd.Cmp(externTd) > 0 {
  1104  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1105  					return i, events, coalescedLogs, err
  1106  				}
  1107  				continue
  1108  			}
  1109  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1110  			var winner []*types.Block
  1111  
  1112  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1113  			for !bc.HasState(parent.Root()) {
  1114  				winner = append(winner, parent)
  1115  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1116  			}
  1117  			for j := 0; j < len(winner)/2; j++ {
  1118  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1119  			}
  1120  			// Import all the pruned blocks to make the state available
  1121  			bc.chainmu.Unlock()
  1122  			_, evs, logs, err := bc.insertChain(winner)
  1123  			bc.chainmu.Lock()
  1124  			events, coalescedLogs = evs, logs
  1125  
  1126  			if err != nil {
  1127  				return i, events, coalescedLogs, err
  1128  			}
  1129  
  1130  		case err != nil:
  1131  			bc.reportBlock(block, nil, err)
  1132  			return i, events, coalescedLogs, err
  1133  		}
  1134  		// Create a new statedb using the parent block and report an
  1135  		// error if it fails.
  1136  		var parent *types.Block
  1137  		if i == 0 {
  1138  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1139  		} else {
  1140  			parent = chain[i-1]
  1141  		}
  1142  		state, err := state.New(parent.Root(), bc.stateCache)
  1143  		if err != nil {
  1144  			return i, events, coalescedLogs, err
  1145  		}
  1146  		// Process block using the parent state as reference point.
  1147  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1148  		if err != nil {
  1149  			bc.reportBlock(block, receipts, err)
  1150  			return i, events, coalescedLogs, err
  1151  		}
  1152  		// Validate the state using the default validator
  1153  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1154  		if err != nil {
  1155  			bc.reportBlock(block, receipts, err)
  1156  			return i, events, coalescedLogs, err
  1157  		}
  1158  		proctime := time.Since(bstart)
  1159  
  1160  		// Write the block to the chain and get the status.
  1161  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1162  		if err != nil {
  1163  			return i, events, coalescedLogs, err
  1164  		}
  1165  		switch status {
  1166  		case CanonStatTy:
  1167  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1168  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1169  
  1170  			coalescedLogs = append(coalescedLogs, logs...)
  1171  			blockInsertTimer.UpdateSince(bstart)
  1172  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1173  			lastCanon = block
  1174  
  1175  			// Only count canonical blocks for GC processing time
  1176  			bc.gcproc += proctime
  1177  
  1178  		case SideStatTy:
  1179  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1180  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1181  
  1182  			blockInsertTimer.UpdateSince(bstart)
  1183  			events = append(events, ChainSideEvent{block})
  1184  		}
  1185  		stats.processed++
  1186  		stats.usedGas += usedGas
  1187  
  1188  		cache, _ := bc.stateCache.TrieDB().Size()
  1189  		stats.report(chain, i, cache)
  1190  	}
  1191  	// Append a single chain head event if we've progressed the chain
  1192  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1193  		events = append(events, ChainHeadEvent{lastCanon})
  1194  	}
  1195  	return 0, events, coalescedLogs, nil
  1196  }
  1197  
  1198  // insertStats tracks and reports on block insertion.
  1199  type insertStats struct {
  1200  	queued, processed, ignored int
  1201  	usedGas                    uint64
  1202  	lastIndex                  int
  1203  	startTime                  mclock.AbsTime
  1204  }
  1205  
  1206  // statsReportLimit is the time limit during import after which we always print
  1207  // out progress. This avoids the user wondering what's going on.
  1208  const statsReportLimit = 8 * time.Second
  1209  
  1210  // report prints statistics if some number of blocks have been processed
  1211  // or more than a few seconds have passed since the last message.
  1212  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1213  	// Fetch the timings for the batch
  1214  	var (
  1215  		now     = mclock.Now()
  1216  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1217  	)
  1218  	// If we're at the last block of the batch or report period reached, log
  1219  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1220  		var (
  1221  			end = chain[index]
  1222  			txs = countTransactions(chain[st.lastIndex : index+1])
  1223  		)
  1224  		context := []interface{}{
  1225  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1226  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1227  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1228  		}
  1229  		if st.queued > 0 {
  1230  			context = append(context, []interface{}{"queued", st.queued}...)
  1231  		}
  1232  		if st.ignored > 0 {
  1233  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1234  		}
  1235  		log.Info("Imported new chain segment", context...)
  1236  
  1237  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1238  	}
  1239  }
  1240  
  1241  func countTransactions(chain []*types.Block) (c int) {
  1242  	for _, b := range chain {
  1243  		c += len(b.Transactions())
  1244  	}
  1245  	return c
  1246  }
  1247  
  1248  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1249  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1250  // event about them
  1251  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1252  	var (
  1253  		newChain    types.Blocks
  1254  		oldChain    types.Blocks
  1255  		commonBlock *types.Block
  1256  		deletedTxs  types.Transactions
  1257  		deletedLogs []*types.Log
  1258  		// collectLogs collects the logs that were generated during the
  1259  		// processing of the block that corresponds with the given hash.
  1260  		// These logs are later announced as deleted.
  1261  		collectLogs = func(hash common.Hash) {
  1262  			// Coalesce logs and set 'Removed'.
  1263  			number := bc.hc.GetBlockNumber(hash)
  1264  			if number == nil {
  1265  				return
  1266  			}
  1267  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1268  			for _, receipt := range receipts {
  1269  				for _, log := range receipt.Logs {
  1270  					del := *log
  1271  					del.Removed = true
  1272  					deletedLogs = append(deletedLogs, &del)
  1273  				}
  1274  			}
  1275  		}
  1276  	)
  1277  
  1278  	// first reduce whoever is higher bound
  1279  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1280  		// reduce old chain
  1281  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1282  			oldChain = append(oldChain, oldBlock)
  1283  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1284  
  1285  			collectLogs(oldBlock.Hash())
  1286  		}
  1287  	} else {
  1288  		// reduce new chain and append new chain blocks for inserting later on
  1289  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1290  			newChain = append(newChain, newBlock)
  1291  		}
  1292  	}
  1293  	if oldBlock == nil {
  1294  		return fmt.Errorf("Invalid old chain")
  1295  	}
  1296  	if newBlock == nil {
  1297  		return fmt.Errorf("Invalid new chain")
  1298  	}
  1299  
  1300  	for {
  1301  		if oldBlock.Hash() == newBlock.Hash() {
  1302  			commonBlock = oldBlock
  1303  			break
  1304  		}
  1305  
  1306  		oldChain = append(oldChain, oldBlock)
  1307  		newChain = append(newChain, newBlock)
  1308  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1309  		collectLogs(oldBlock.Hash())
  1310  
  1311  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1312  		if oldBlock == nil {
  1313  			return fmt.Errorf("Invalid old chain")
  1314  		}
  1315  		if newBlock == nil {
  1316  			return fmt.Errorf("Invalid new chain")
  1317  		}
  1318  	}
  1319  	// Ensure the user sees large reorgs
  1320  	if len(oldChain) > 0 && len(newChain) > 0 {
  1321  		logFn := log.Debug
  1322  		if len(oldChain) > 63 {
  1323  			logFn = log.Warn
  1324  		}
  1325  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1326  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1327  	} else {
  1328  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1329  	}
  1330  	// Insert the new chain, taking care of the proper incremental order
  1331  	var addedTxs types.Transactions
  1332  	for i := len(newChain) - 1; i >= 0; i-- {
  1333  		// insert the block in the canonical way, re-writing history
  1334  		bc.insert(newChain[i])
  1335  		// write lookup entries for hash based transaction/receipt searches
  1336  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1337  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1338  	}
  1339  	// calculate the difference between deleted and added transactions
  1340  	diff := types.TxDifference(deletedTxs, addedTxs)
  1341  	// When transactions get deleted from the database that means the
  1342  	// receipts that were created in the fork must also be deleted
  1343  	for _, tx := range diff {
  1344  		rawdb.DeleteTxLookupEntry(bc.db, tx.Hash())
  1345  	}
  1346  	if len(deletedLogs) > 0 {
  1347  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1348  	}
  1349  	if len(oldChain) > 0 {
  1350  		go func() {
  1351  			for _, block := range oldChain {
  1352  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1353  			}
  1354  		}()
  1355  	}
  1356  
  1357  	return nil
  1358  }
  1359  
  1360  // PostChainEvents iterates over the events generated by a chain insertion and
  1361  // posts them into the event feed.
  1362  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1363  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1364  	// post event logs for further processing
  1365  	if logs != nil {
  1366  		bc.logsFeed.Send(logs)
  1367  	}
  1368  	for _, event := range events {
  1369  		switch ev := event.(type) {
  1370  		case ChainEvent:
  1371  			bc.chainFeed.Send(ev)
  1372  
  1373  		case ChainHeadEvent:
  1374  			bc.chainHeadFeed.Send(ev)
  1375  
  1376  		case ChainSideEvent:
  1377  			bc.chainSideFeed.Send(ev)
  1378  		}
  1379  	}
  1380  }
  1381  
  1382  func (bc *BlockChain) update() {
  1383  	futureTimer := time.NewTicker(5 * time.Second)
  1384  	defer futureTimer.Stop()
  1385  	for {
  1386  		select {
  1387  		case <-futureTimer.C:
  1388  			bc.procFutureBlocks()
  1389  		case <-bc.quit:
  1390  			return
  1391  		}
  1392  	}
  1393  }
  1394  
  1395  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1396  func (bc *BlockChain) BadBlocks() []*types.Block {
  1397  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1398  	for _, hash := range bc.badBlocks.Keys() {
  1399  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1400  			block := blk.(*types.Block)
  1401  			blocks = append(blocks, block)
  1402  		}
  1403  	}
  1404  	return blocks
  1405  }
  1406  
  1407  // addBadBlock adds a bad block to the bad-block LRU cache
  1408  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1409  	bc.badBlocks.Add(block.Hash(), block)
  1410  }
  1411  
  1412  // reportBlock logs a bad block error.
  1413  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1414  	bc.addBadBlock(block)
  1415  
  1416  	var receiptString string
  1417  	for _, receipt := range receipts {
  1418  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1419  	}
  1420  	log.Error(fmt.Sprintf(`
  1421  ########## BAD BLOCK #########
  1422  Chain config: %v
  1423  
  1424  Number: %v
  1425  Hash: 0x%x
  1426  %v
  1427  
  1428  Error: %v
  1429  ##############################
  1430  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1431  }
  1432  
  1433  // InsertHeaderChain attempts to insert the given header chain in to the local
  1434  // chain, possibly creating a reorg. If an error is returned, it will return the
  1435  // index number of the failing header as well an error describing what went wrong.
  1436  //
  1437  // The verify parameter can be used to fine tune whather nonce verification
  1438  // should be done or not. The reason behind the optional check is because some
  1439  // of the header retrieval mechanisms already need to verify nonces, as well as
  1440  // because nonces can be verified sparsely, not needing to check each.
  1441  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1442  	start := time.Now()
  1443  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1444  		return i, err
  1445  	}
  1446  
  1447  	// Make sure only one thread manipulates the chain at once
  1448  	bc.chainmu.Lock()
  1449  	defer bc.chainmu.Unlock()
  1450  
  1451  	bc.wg.Add(1)
  1452  	defer bc.wg.Done()
  1453  
  1454  	whFunc := func(header *types.Header) error {
  1455  		bc.mu.Lock()
  1456  		defer bc.mu.Unlock()
  1457  
  1458  		_, err := bc.hc.WriteHeader(header)
  1459  		return err
  1460  	}
  1461  
  1462  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1463  }
  1464  
  1465  // writeHeader writes a header into the local chain, given that its parent is
  1466  // already known. If the total difficulty of the newly inserted header becomes
  1467  // greater than the current known TD, the canonical chain is re-routed.
  1468  //
  1469  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1470  // into the chain, as side effects caused by reorganisations cannot be emulated
  1471  // without the real blocks. Hence, writing headers directly should only be done
  1472  // in two scenarios: pure-header mode of operation (light clients), or properly
  1473  // separated header/block phases (non-archive clients).
  1474  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1475  	bc.wg.Add(1)
  1476  	defer bc.wg.Done()
  1477  
  1478  	bc.mu.Lock()
  1479  	defer bc.mu.Unlock()
  1480  
  1481  	_, err := bc.hc.WriteHeader(header)
  1482  	return err
  1483  }
  1484  
  1485  // CurrentHeader retrieves the current head header of the canonical chain. The
  1486  // header is retrieved from the HeaderChain's internal cache.
  1487  func (bc *BlockChain) CurrentHeader() *types.Header {
  1488  	return bc.hc.CurrentHeader()
  1489  }
  1490  
  1491  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1492  // database by hash and number, caching it if found.
  1493  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1494  	return bc.hc.GetTd(hash, number)
  1495  }
  1496  
  1497  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1498  // database by hash, caching it if found.
  1499  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1500  	return bc.hc.GetTdByHash(hash)
  1501  }
  1502  
  1503  // GetHeader retrieves a block header from the database by hash and number,
  1504  // caching it if found.
  1505  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1506  	return bc.hc.GetHeader(hash, number)
  1507  }
  1508  
  1509  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1510  // found.
  1511  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1512  	return bc.hc.GetHeaderByHash(hash)
  1513  }
  1514  
  1515  // HasHeader checks if a block header is present in the database or not, caching
  1516  // it if present.
  1517  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1518  	return bc.hc.HasHeader(hash, number)
  1519  }
  1520  
  1521  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1522  // hash, fetching towards the genesis block.
  1523  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1524  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1525  }
  1526  
  1527  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1528  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1529  // number of blocks to be individually checked before we reach the canonical chain.
  1530  //
  1531  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1532  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1533  	bc.chainmu.Lock()
  1534  	defer bc.chainmu.Unlock()
  1535  
  1536  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1537  }
  1538  
  1539  // GetHeaderByNumber retrieves a block header from the database by number,
  1540  // caching it (associated with its hash) if found.
  1541  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1542  	return bc.hc.GetHeaderByNumber(number)
  1543  }
  1544  
  1545  // Config retrieves the blockchain's chain configuration.
  1546  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1547  
  1548  // Engine retrieves the blockchain's consensus engine.
  1549  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1550  
  1551  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1552  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1553  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1554  }
  1555  
  1556  // SubscribeChainEvent registers a subscription of ChainEvent.
  1557  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1558  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1559  }
  1560  
  1561  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1562  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1563  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1564  }
  1565  
  1566  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1567  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1568  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1569  }
  1570  
  1571  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1572  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1573  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1574  }