github.com/luckypickle/go-ethereum-vet@v1.14.2/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/hashicorp/golang-lru"
    31  	"github.com/luckypickle/go-ethereum-vet/common"
    32  	"github.com/luckypickle/go-ethereum-vet/common/mclock"
    33  	"github.com/luckypickle/go-ethereum-vet/consensus"
    34  	"github.com/luckypickle/go-ethereum-vet/core/rawdb"
    35  	"github.com/luckypickle/go-ethereum-vet/core/state"
    36  	"github.com/luckypickle/go-ethereum-vet/core/types"
    37  	"github.com/luckypickle/go-ethereum-vet/core/vm"
    38  	"github.com/luckypickle/go-ethereum-vet/crypto"
    39  	"github.com/luckypickle/go-ethereum-vet/ethdb"
    40  	"github.com/luckypickle/go-ethereum-vet/event"
    41  	"github.com/luckypickle/go-ethereum-vet/log"
    42  	"github.com/luckypickle/go-ethereum-vet/metrics"
    43  	"github.com/luckypickle/go-ethereum-vet/params"
    44  	"github.com/luckypickle/go-ethereum-vet/rlp"
    45  	"github.com/luckypickle/go-ethereum-vet/trie"
    46  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	maxFutureBlocks     = 256
    59  	maxTimeFutureBlocks = 30
    60  	badBlockLimit       = 10
    61  	triesInMemory       = 128
    62  
    63  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    64  	BlockChainVersion = 3
    65  )
    66  
    67  // CacheConfig contains the configuration values for the trie caching/pruning
    68  // that's resident in a blockchain.
    69  type CacheConfig struct {
    70  	Disabled      bool          // Whether to disable trie write caching (archive node)
    71  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    72  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    73  }
    74  
    75  // BlockChain represents the canonical chain given a database with a genesis
    76  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    77  //
    78  // Importing blocks in to the block chain happens according to the set of rules
    79  // defined by the two stage Validator. Processing of blocks is done using the
    80  // Processor which processes the included transaction. The validation of the state
    81  // is done in the second part of the Validator. Failing results in aborting of
    82  // the import.
    83  //
    84  // The BlockChain also helps in returning blocks from **any** chain included
    85  // in the database as well as blocks that represents the canonical chain. It's
    86  // important to note that GetBlock can return any block and does not need to be
    87  // included in the canonical one where as GetBlockByNumber always represents the
    88  // canonical chain.
    89  type BlockChain struct {
    90  	chainConfig *params.ChainConfig // Chain & network configuration
    91  	cacheConfig *CacheConfig        // Cache configuration for pruning
    92  
    93  	db     ethdb.Database // Low level persistent database to store final content in
    94  	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
    95  	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
    96  
    97  	hc            *HeaderChain
    98  	rmLogsFeed    event.Feed
    99  	chainFeed     event.Feed
   100  	chainSideFeed event.Feed
   101  	chainHeadFeed event.Feed
   102  	logsFeed      event.Feed
   103  	scope         event.SubscriptionScope
   104  	genesisBlock  *types.Block
   105  
   106  	mu      sync.RWMutex // global mutex for locking chain operations
   107  	chainmu sync.RWMutex // blockchain insertion lock
   108  	procmu  sync.RWMutex // block processor lock
   109  
   110  	checkpoint       int          // checkpoint counts towards the new checkpoint
   111  	currentBlock     atomic.Value // Current head of the block chain
   112  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   113  
   114  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   115  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   116  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   117  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   118  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   119  
   120  	quit    chan struct{} // blockchain quit channel
   121  	running int32         // running must be called atomically
   122  	// procInterrupt must be atomically called
   123  	procInterrupt int32          // interrupt signaler for block processing
   124  	wg            sync.WaitGroup // chain processing wait group for shutting down
   125  
   126  	engine    consensus.Engine
   127  	processor Processor // block processor interface
   128  	validator Validator // block and state validator interface
   129  	vmConfig  vm.Config
   130  
   131  	badBlocks *lru.Cache // Bad block cache
   132  }
   133  
   134  // NewBlockChain returns a fully initialised block chain using information
   135  // available in the database. It initialises the default Ethereum Validator and
   136  // Processor.
   137  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
   138  	if cacheConfig == nil {
   139  		cacheConfig = &CacheConfig{
   140  			TrieNodeLimit: 256 * 1024 * 1024,
   141  			TrieTimeLimit: 5 * time.Minute,
   142  		}
   143  	}
   144  	bodyCache, _ := lru.New(bodyCacheLimit)
   145  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   146  	blockCache, _ := lru.New(blockCacheLimit)
   147  	futureBlocks, _ := lru.New(maxFutureBlocks)
   148  	badBlocks, _ := lru.New(badBlockLimit)
   149  
   150  	bc := &BlockChain{
   151  		chainConfig:  chainConfig,
   152  		cacheConfig:  cacheConfig,
   153  		db:           db,
   154  		triegc:       prque.New(),
   155  		stateCache:   state.NewDatabase(db),
   156  		quit:         make(chan struct{}),
   157  		bodyCache:    bodyCache,
   158  		bodyRLPCache: bodyRLPCache,
   159  		blockCache:   blockCache,
   160  		futureBlocks: futureBlocks,
   161  		engine:       engine,
   162  		vmConfig:     vmConfig,
   163  		badBlocks:    badBlocks,
   164  	}
   165  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   166  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   167  
   168  	var err error
   169  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
   170  	if err != nil {
   171  		return nil, err
   172  	}
   173  	bc.genesisBlock = bc.GetBlockByNumber(0)
   174  	if bc.genesisBlock == nil {
   175  		return nil, ErrNoGenesis
   176  	}
   177  	if err := bc.loadLastState(); err != nil {
   178  		return nil, err
   179  	}
   180  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   181  	for hash := range BadHashes {
   182  		if header := bc.GetHeaderByHash(hash); header != nil {
   183  			// get the canonical block corresponding to the offending header's number
   184  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   185  			// make sure the headerByNumber (if present) is in our current canonical chain
   186  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   187  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   188  				bc.SetHead(header.Number.Uint64() - 1)
   189  				log.Error("Chain rewind was successful, resuming normal operation")
   190  			}
   191  		}
   192  	}
   193  	// Take ownership of this particular state
   194  	go bc.update()
   195  	return bc, nil
   196  }
   197  
   198  func (bc *BlockChain) getProcInterrupt() bool {
   199  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   200  }
   201  
   202  // loadLastState loads the last known chain state from the database. This method
   203  // assumes that the chain manager mutex is held.
   204  func (bc *BlockChain) loadLastState() error {
   205  	// Restore the last known head block
   206  	head := rawdb.ReadHeadBlockHash(bc.db)
   207  	if head == (common.Hash{}) {
   208  		// Corrupt or empty database, init from scratch
   209  		log.Warn("Empty database, resetting chain")
   210  		return bc.Reset()
   211  	}
   212  	// Make sure the entire head block is available
   213  	currentBlock := bc.GetBlockByHash(head)
   214  	if currentBlock == nil {
   215  		// Corrupt or empty database, init from scratch
   216  		log.Warn("Head block missing, resetting chain", "hash", head)
   217  		return bc.Reset()
   218  	}
   219  	// Make sure the state associated with the block is available
   220  	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   221  		// Dangling block without a state associated, init from scratch
   222  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   223  		if err := bc.repair(&currentBlock); err != nil {
   224  			return err
   225  		}
   226  	}
   227  	// Everything seems to be fine, set as the head block
   228  	bc.currentBlock.Store(currentBlock)
   229  
   230  	// Restore the last known head header
   231  	currentHeader := currentBlock.Header()
   232  	if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
   233  		if header := bc.GetHeaderByHash(head); header != nil {
   234  			currentHeader = header
   235  		}
   236  	}
   237  	bc.hc.SetCurrentHeader(currentHeader)
   238  
   239  	// Restore the last known head fast block
   240  	bc.currentFastBlock.Store(currentBlock)
   241  	if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
   242  		if block := bc.GetBlockByHash(head); block != nil {
   243  			bc.currentFastBlock.Store(block)
   244  		}
   245  	}
   246  
   247  	// Issue a status log for the user
   248  	currentFastBlock := bc.CurrentFastBlock()
   249  
   250  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   251  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   252  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   253  
   254  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   255  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   256  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   257  
   258  	return nil
   259  }
   260  
   261  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   262  // above the new head will be deleted and the new one set. In the case of blocks
   263  // though, the head may be further rewound if block bodies are missing (non-archive
   264  // nodes after a fast sync).
   265  func (bc *BlockChain) SetHead(head uint64) error {
   266  	log.Warn("Rewinding blockchain", "target", head)
   267  
   268  	bc.mu.Lock()
   269  	defer bc.mu.Unlock()
   270  
   271  	// Rewind the header chain, deleting all block bodies until then
   272  	delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
   273  		rawdb.DeleteBody(db, hash, num)
   274  	}
   275  	bc.hc.SetHead(head, delFn)
   276  	currentHeader := bc.hc.CurrentHeader()
   277  
   278  	// Clear out any stale content from the caches
   279  	bc.bodyCache.Purge()
   280  	bc.bodyRLPCache.Purge()
   281  	bc.blockCache.Purge()
   282  	bc.futureBlocks.Purge()
   283  
   284  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   285  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   286  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   287  	}
   288  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   289  		if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
   290  			// Rewound state missing, rolled back to before pivot, reset to genesis
   291  			bc.currentBlock.Store(bc.genesisBlock)
   292  		}
   293  	}
   294  	// Rewind the fast block in a simpleton way to the target head
   295  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   296  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   297  	}
   298  	// If either blocks reached nil, reset to the genesis state
   299  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   300  		bc.currentBlock.Store(bc.genesisBlock)
   301  	}
   302  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   303  		bc.currentFastBlock.Store(bc.genesisBlock)
   304  	}
   305  	currentBlock := bc.CurrentBlock()
   306  	currentFastBlock := bc.CurrentFastBlock()
   307  
   308  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
   309  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
   310  
   311  	return bc.loadLastState()
   312  }
   313  
   314  // FastSyncCommitHead sets the current head block to the one defined by the hash
   315  // irrelevant what the chain contents were prior.
   316  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   317  	// Make sure that both the block as well at its state trie exists
   318  	block := bc.GetBlockByHash(hash)
   319  	if block == nil {
   320  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   321  	}
   322  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   323  		return err
   324  	}
   325  	// If all checks out, manually set the head block
   326  	bc.mu.Lock()
   327  	bc.currentBlock.Store(block)
   328  	bc.mu.Unlock()
   329  
   330  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   331  	return nil
   332  }
   333  
   334  // GasLimit returns the gas limit of the current HEAD block.
   335  func (bc *BlockChain) GasLimit() uint64 {
   336  	return bc.CurrentBlock().GasLimit()
   337  }
   338  
   339  // CurrentBlock retrieves the current head block of the canonical chain. The
   340  // block is retrieved from the blockchain's internal cache.
   341  func (bc *BlockChain) CurrentBlock() *types.Block {
   342  	return bc.currentBlock.Load().(*types.Block)
   343  }
   344  
   345  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   346  // chain. The block is retrieved from the blockchain's internal cache.
   347  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   348  	return bc.currentFastBlock.Load().(*types.Block)
   349  }
   350  
   351  // SetProcessor sets the processor required for making state modifications.
   352  func (bc *BlockChain) SetProcessor(processor Processor) {
   353  	bc.procmu.Lock()
   354  	defer bc.procmu.Unlock()
   355  	bc.processor = processor
   356  }
   357  
   358  // SetValidator sets the validator which is used to validate incoming blocks.
   359  func (bc *BlockChain) SetValidator(validator Validator) {
   360  	bc.procmu.Lock()
   361  	defer bc.procmu.Unlock()
   362  	bc.validator = validator
   363  }
   364  
   365  // Validator returns the current validator.
   366  func (bc *BlockChain) Validator() Validator {
   367  	bc.procmu.RLock()
   368  	defer bc.procmu.RUnlock()
   369  	return bc.validator
   370  }
   371  
   372  // Processor returns the current processor.
   373  func (bc *BlockChain) Processor() Processor {
   374  	bc.procmu.RLock()
   375  	defer bc.procmu.RUnlock()
   376  	return bc.processor
   377  }
   378  
   379  // State returns a new mutable state based on the current HEAD block.
   380  func (bc *BlockChain) State() (*state.StateDB, error) {
   381  	return bc.StateAt(bc.CurrentBlock().Root())
   382  }
   383  
   384  // StateAt returns a new mutable state based on a particular point in time.
   385  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   386  	return state.New(root, bc.stateCache)
   387  }
   388  
   389  // Reset purges the entire blockchain, restoring it to its genesis state.
   390  func (bc *BlockChain) Reset() error {
   391  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   392  }
   393  
   394  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   395  // specified genesis state.
   396  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   397  	// Dump the entire block chain and purge the caches
   398  	if err := bc.SetHead(0); err != nil {
   399  		return err
   400  	}
   401  	bc.mu.Lock()
   402  	defer bc.mu.Unlock()
   403  
   404  	// Prepare the genesis block and reinitialise the chain
   405  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   406  		log.Crit("Failed to write genesis block TD", "err", err)
   407  	}
   408  	rawdb.WriteBlock(bc.db, genesis)
   409  
   410  	bc.genesisBlock = genesis
   411  	bc.insert(bc.genesisBlock)
   412  	bc.currentBlock.Store(bc.genesisBlock)
   413  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   414  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   415  	bc.currentFastBlock.Store(bc.genesisBlock)
   416  
   417  	return nil
   418  }
   419  
   420  // repair tries to repair the current blockchain by rolling back the current block
   421  // until one with associated state is found. This is needed to fix incomplete db
   422  // writes caused either by crashes/power outages, or simply non-committed tries.
   423  //
   424  // This method only rolls back the current block. The current header and current
   425  // fast block are left intact.
   426  func (bc *BlockChain) repair(head **types.Block) error {
   427  	for {
   428  		// Abort if we've rewound to a head block that does have associated state
   429  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   430  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   431  			return nil
   432  		}
   433  		// Otherwise rewind one block and recheck state availability there
   434  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   435  	}
   436  }
   437  
   438  // Export writes the active chain to the given writer.
   439  func (bc *BlockChain) Export(w io.Writer) error {
   440  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   441  }
   442  
   443  // ExportN writes a subset of the active chain to the given writer.
   444  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   445  	bc.mu.RLock()
   446  	defer bc.mu.RUnlock()
   447  
   448  	if first > last {
   449  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   450  	}
   451  	log.Info("Exporting batch of blocks", "count", last-first+1)
   452  
   453  	start, reported := time.Now(), time.Now()
   454  	for nr := first; nr <= last; nr++ {
   455  		block := bc.GetBlockByNumber(nr)
   456  		if block == nil {
   457  			return fmt.Errorf("export failed on #%d: not found", nr)
   458  		}
   459  		if err := block.EncodeRLP(w); err != nil {
   460  			return err
   461  		}
   462  		if time.Since(reported) >= statsReportLimit {
   463  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   464  			reported = time.Now()
   465  		}
   466  	}
   467  
   468  	return nil
   469  }
   470  
   471  // insert injects a new head block into the current block chain. This method
   472  // assumes that the block is indeed a true head. It will also reset the head
   473  // header and the head fast sync block to this very same block if they are older
   474  // or if they are on a different side chain.
   475  //
   476  // Note, this function assumes that the `mu` mutex is held!
   477  func (bc *BlockChain) insert(block *types.Block) {
   478  	// If the block is on a side chain or an unknown one, force other heads onto it too
   479  	updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
   480  
   481  	// Add the block to the canonical chain number scheme and mark as the head
   482  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
   483  	rawdb.WriteHeadBlockHash(bc.db, block.Hash())
   484  
   485  	bc.currentBlock.Store(block)
   486  
   487  	// If the block is better than our head or is on a different chain, force update heads
   488  	if updateHeads {
   489  		bc.hc.SetCurrentHeader(block.Header())
   490  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
   491  
   492  		bc.currentFastBlock.Store(block)
   493  	}
   494  }
   495  
   496  // Genesis retrieves the chain's genesis block.
   497  func (bc *BlockChain) Genesis() *types.Block {
   498  	return bc.genesisBlock
   499  }
   500  
   501  // GetBody retrieves a block body (transactions and uncles) from the database by
   502  // hash, caching it if found.
   503  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   504  	// Short circuit if the body's already in the cache, retrieve otherwise
   505  	if cached, ok := bc.bodyCache.Get(hash); ok {
   506  		body := cached.(*types.Body)
   507  		return body
   508  	}
   509  	number := bc.hc.GetBlockNumber(hash)
   510  	if number == nil {
   511  		return nil
   512  	}
   513  	body := rawdb.ReadBody(bc.db, hash, *number)
   514  	if body == nil {
   515  		return nil
   516  	}
   517  	// Cache the found body for next time and return
   518  	bc.bodyCache.Add(hash, body)
   519  	return body
   520  }
   521  
   522  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   523  // caching it if found.
   524  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   525  	// Short circuit if the body's already in the cache, retrieve otherwise
   526  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   527  		return cached.(rlp.RawValue)
   528  	}
   529  	number := bc.hc.GetBlockNumber(hash)
   530  	if number == nil {
   531  		return nil
   532  	}
   533  	body := rawdb.ReadBodyRLP(bc.db, hash, *number)
   534  	if len(body) == 0 {
   535  		return nil
   536  	}
   537  	// Cache the found body for next time and return
   538  	bc.bodyRLPCache.Add(hash, body)
   539  	return body
   540  }
   541  
   542  // HasBlock checks if a block is fully present in the database or not.
   543  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   544  	if bc.blockCache.Contains(hash) {
   545  		return true
   546  	}
   547  	return rawdb.HasBody(bc.db, hash, number)
   548  }
   549  
   550  // HasState checks if state trie is fully present in the database or not.
   551  func (bc *BlockChain) HasState(hash common.Hash) bool {
   552  	_, err := bc.stateCache.OpenTrie(hash)
   553  	return err == nil
   554  }
   555  
   556  // HasBlockAndState checks if a block and associated state trie is fully present
   557  // in the database or not, caching it if present.
   558  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   559  	// Check first that the block itself is known
   560  	block := bc.GetBlock(hash, number)
   561  	if block == nil {
   562  		return false
   563  	}
   564  	return bc.HasState(block.Root())
   565  }
   566  
   567  // GetBlock retrieves a block from the database by hash and number,
   568  // caching it if found.
   569  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   570  	// Short circuit if the block's already in the cache, retrieve otherwise
   571  	if block, ok := bc.blockCache.Get(hash); ok {
   572  		return block.(*types.Block)
   573  	}
   574  	block := rawdb.ReadBlock(bc.db, hash, number)
   575  	if block == nil {
   576  		return nil
   577  	}
   578  	// Cache the found block for next time and return
   579  	bc.blockCache.Add(block.Hash(), block)
   580  	return block
   581  }
   582  
   583  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   584  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   585  	number := bc.hc.GetBlockNumber(hash)
   586  	if number == nil {
   587  		return nil
   588  	}
   589  	return bc.GetBlock(hash, *number)
   590  }
   591  
   592  // GetBlockByNumber retrieves a block from the database by number, caching it
   593  // (associated with its hash) if found.
   594  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   595  	hash := rawdb.ReadCanonicalHash(bc.db, number)
   596  	if hash == (common.Hash{}) {
   597  		return nil
   598  	}
   599  	return bc.GetBlock(hash, number)
   600  }
   601  
   602  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   603  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
   604  	number := rawdb.ReadHeaderNumber(bc.db, hash)
   605  	if number == nil {
   606  		return nil
   607  	}
   608  	return rawdb.ReadReceipts(bc.db, hash, *number)
   609  }
   610  
   611  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   612  // [deprecated by eth/62]
   613  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   614  	number := bc.hc.GetBlockNumber(hash)
   615  	if number == nil {
   616  		return nil
   617  	}
   618  	for i := 0; i < n; i++ {
   619  		block := bc.GetBlock(hash, *number)
   620  		if block == nil {
   621  			break
   622  		}
   623  		blocks = append(blocks, block)
   624  		hash = block.ParentHash()
   625  		*number--
   626  	}
   627  	return
   628  }
   629  
   630  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   631  // a specific distance is reached.
   632  func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   633  	uncles := []*types.Header{}
   634  	for i := 0; block != nil && i < length; i++ {
   635  		uncles = append(uncles, block.Uncles()...)
   636  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   637  	}
   638  	return uncles
   639  }
   640  
   641  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   642  // either from ephemeral in-memory cache, or from persistent storage.
   643  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   644  	return bc.stateCache.TrieDB().Node(hash)
   645  }
   646  
   647  // Stop stops the blockchain service. If any imports are currently in progress
   648  // it will abort them using the procInterrupt.
   649  func (bc *BlockChain) Stop() {
   650  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   651  		return
   652  	}
   653  	// Unsubscribe all subscriptions registered from blockchain
   654  	bc.scope.Close()
   655  	close(bc.quit)
   656  	atomic.StoreInt32(&bc.procInterrupt, 1)
   657  
   658  	bc.wg.Wait()
   659  
   660  	// Ensure the state of a recent block is also stored to disk before exiting.
   661  	// We're writing three different states to catch different restart scenarios:
   662  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   663  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   664  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   665  	if !bc.cacheConfig.Disabled {
   666  		triedb := bc.stateCache.TrieDB()
   667  
   668  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   669  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   670  				recent := bc.GetBlockByNumber(number - offset)
   671  
   672  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   673  				if err := triedb.Commit(recent.Root(), true); err != nil {
   674  					log.Error("Failed to commit recent state trie", "err", err)
   675  				}
   676  			}
   677  		}
   678  		for !bc.triegc.Empty() {
   679  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   680  		}
   681  		if size, _ := triedb.Size(); size != 0 {
   682  			log.Error("Dangling trie nodes after full cleanup")
   683  		}
   684  	}
   685  	log.Info("Blockchain manager stopped")
   686  }
   687  
   688  func (bc *BlockChain) procFutureBlocks() {
   689  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   690  	for _, hash := range bc.futureBlocks.Keys() {
   691  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   692  			blocks = append(blocks, block.(*types.Block))
   693  		}
   694  	}
   695  	if len(blocks) > 0 {
   696  		types.BlockBy(types.Number).Sort(blocks)
   697  
   698  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   699  		for i := range blocks {
   700  			bc.InsertChain(blocks[i : i+1])
   701  		}
   702  	}
   703  }
   704  
   705  // WriteStatus status of write
   706  type WriteStatus byte
   707  
   708  const (
   709  	NonStatTy WriteStatus = iota
   710  	CanonStatTy
   711  	SideStatTy
   712  )
   713  
   714  // Rollback is designed to remove a chain of links from the database that aren't
   715  // certain enough to be valid.
   716  func (bc *BlockChain) Rollback(chain []common.Hash) {
   717  	bc.mu.Lock()
   718  	defer bc.mu.Unlock()
   719  
   720  	for i := len(chain) - 1; i >= 0; i-- {
   721  		hash := chain[i]
   722  
   723  		currentHeader := bc.hc.CurrentHeader()
   724  		if currentHeader.Hash() == hash {
   725  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   726  		}
   727  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   728  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   729  			bc.currentFastBlock.Store(newFastBlock)
   730  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
   731  		}
   732  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   733  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   734  			bc.currentBlock.Store(newBlock)
   735  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
   736  		}
   737  	}
   738  }
   739  
   740  // SetReceiptsData computes all the non-consensus fields of the receipts
   741  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
   742  	signer := types.MakeSigner(config, block.Number())
   743  
   744  	transactions, logIndex := block.Transactions(), uint(0)
   745  	if len(transactions) != len(receipts) {
   746  		return errors.New("transaction and receipt count mismatch")
   747  	}
   748  
   749  	for j := 0; j < len(receipts); j++ {
   750  		// The transaction hash can be retrieved from the transaction itself
   751  		receipts[j].TxHash = transactions[j].Hash()
   752  
   753  		// The contract address can be derived from the transaction itself
   754  		if transactions[j].To() == nil {
   755  			// Deriving the signer is expensive, only do if it's actually needed
   756  			from, _ := types.Sender(signer, transactions[j])
   757  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   758  		}
   759  		// The used gas can be calculated based on previous receipts
   760  		if j == 0 {
   761  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   762  		} else {
   763  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   764  		}
   765  		// The derived log fields can simply be set from the block and transaction
   766  		for k := 0; k < len(receipts[j].Logs); k++ {
   767  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   768  			receipts[j].Logs[k].BlockHash = block.Hash()
   769  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   770  			receipts[j].Logs[k].TxIndex = uint(j)
   771  			receipts[j].Logs[k].Index = logIndex
   772  			logIndex++
   773  		}
   774  	}
   775  	return nil
   776  }
   777  
   778  // InsertReceiptChain attempts to complete an already existing header chain with
   779  // transaction and receipt data.
   780  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
   781  	bc.wg.Add(1)
   782  	defer bc.wg.Done()
   783  
   784  	// Do a sanity check that the provided chain is actually ordered and linked
   785  	for i := 1; i < len(blockChain); i++ {
   786  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   787  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   788  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   789  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   790  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   791  		}
   792  	}
   793  
   794  	var (
   795  		stats = struct{ processed, ignored int32 }{}
   796  		start = time.Now()
   797  		bytes = 0
   798  		batch = bc.db.NewBatch()
   799  	)
   800  	for i, block := range blockChain {
   801  		receipts := receiptChain[i]
   802  		// Short circuit insertion if shutting down or processing failed
   803  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   804  			return 0, nil
   805  		}
   806  		// Short circuit if the owner header is unknown
   807  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   808  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   809  		}
   810  		// Skip if the entire data is already known
   811  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   812  			stats.ignored++
   813  			continue
   814  		}
   815  		// Compute all the non-consensus fields of the receipts
   816  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   817  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   818  		}
   819  		// Write all the data out into the database
   820  		rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
   821  		rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   822  		rawdb.WriteTxLookupEntries(batch, block)
   823  
   824  		stats.processed++
   825  
   826  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   827  			if err := batch.Write(); err != nil {
   828  				return 0, err
   829  			}
   830  			bytes += batch.ValueSize()
   831  			batch.Reset()
   832  		}
   833  	}
   834  	if batch.ValueSize() > 0 {
   835  		bytes += batch.ValueSize()
   836  		if err := batch.Write(); err != nil {
   837  			return 0, err
   838  		}
   839  	}
   840  
   841  	// Update the head fast sync block if better
   842  	bc.mu.Lock()
   843  	head := blockChain[len(blockChain)-1]
   844  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   845  		currentFastBlock := bc.CurrentFastBlock()
   846  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   847  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
   848  			bc.currentFastBlock.Store(head)
   849  		}
   850  	}
   851  	bc.mu.Unlock()
   852  
   853  	log.Info("Imported new block receipts",
   854  		"count", stats.processed,
   855  		"elapsed", common.PrettyDuration(time.Since(start)),
   856  		"number", head.Number(),
   857  		"hash", head.Hash(),
   858  		"size", common.StorageSize(bytes),
   859  		"ignored", stats.ignored)
   860  	return 0, nil
   861  }
   862  
   863  var lastWrite uint64
   864  
   865  // WriteBlockWithoutState writes only the block and its metadata to the database,
   866  // but does not write any state. This is used to construct competing side forks
   867  // up to the point where they exceed the canonical total difficulty.
   868  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   869  	bc.wg.Add(1)
   870  	defer bc.wg.Done()
   871  
   872  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   873  		return err
   874  	}
   875  	rawdb.WriteBlock(bc.db, block)
   876  
   877  	return nil
   878  }
   879  
   880  // WriteBlockWithState writes the block and all associated state to the database.
   881  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
   882  	bc.wg.Add(1)
   883  	defer bc.wg.Done()
   884  
   885  	// Calculate the total difficulty of the block
   886  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   887  	if ptd == nil {
   888  		return NonStatTy, consensus.ErrUnknownAncestor
   889  	}
   890  	// Make sure no inconsistent state is leaked during insertion
   891  	bc.mu.Lock()
   892  	defer bc.mu.Unlock()
   893  
   894  	currentBlock := bc.CurrentBlock()
   895  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   896  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   897  
   898  	// Irrelevant of the canonical status, write the block itself to the database
   899  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   900  		return NonStatTy, err
   901  	}
   902  	rawdb.WriteBlock(bc.db, block)
   903  
   904  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   905  	if err != nil {
   906  		return NonStatTy, err
   907  	}
   908  	triedb := bc.stateCache.TrieDB()
   909  
   910  	// If we're running an archive node, always flush
   911  	if bc.cacheConfig.Disabled {
   912  		if err := triedb.Commit(root, false); err != nil {
   913  			return NonStatTy, err
   914  		}
   915  	} else {
   916  		// Full but not archive node, do proper garbage collection
   917  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   918  		bc.triegc.Push(root, -float32(block.NumberU64()))
   919  
   920  		if current := block.NumberU64(); current > triesInMemory {
   921  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
   922  			var (
   923  				nodes, imgs = triedb.Size()
   924  				limit       = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   925  			)
   926  			if nodes > limit || imgs > 4*1024*1024 {
   927  				triedb.Cap(limit - ethdb.IdealBatchSize)
   928  			}
   929  			// Find the next state trie we need to commit
   930  			header := bc.GetHeaderByNumber(current - triesInMemory)
   931  			chosen := header.Number.Uint64()
   932  
   933  			// If we exceeded out time allowance, flush an entire trie to disk
   934  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   935  				// If we're exceeding limits but haven't reached a large enough memory gap,
   936  				// warn the user that the system is becoming unstable.
   937  				if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   938  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   939  				}
   940  				// Flush an entire trie and restart the counters
   941  				triedb.Commit(header.Root, true)
   942  				lastWrite = chosen
   943  				bc.gcproc = 0
   944  			}
   945  			// Garbage collect anything below our required write retention
   946  			for !bc.triegc.Empty() {
   947  				root, number := bc.triegc.Pop()
   948  				if uint64(-number) > chosen {
   949  					bc.triegc.Push(root, number)
   950  					break
   951  				}
   952  				triedb.Dereference(root.(common.Hash))
   953  			}
   954  		}
   955  	}
   956  
   957  	// Write other block data using a batch.
   958  	batch := bc.db.NewBatch()
   959  	rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
   960  
   961  	// If the total difficulty is higher than our known, add it to the canonical chain
   962  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   963  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   964  	reorg := externTd.Cmp(localTd) > 0
   965  	currentBlock = bc.CurrentBlock()
   966  	if !reorg && externTd.Cmp(localTd) == 0 {
   967  		// Split same-difficulty blocks by number, then at random
   968  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   969  	}
   970  	if reorg {
   971  		// Reorganise the chain if the parent is not the head block
   972  		if block.ParentHash() != currentBlock.Hash() {
   973  			if err := bc.reorg(currentBlock, block); err != nil {
   974  				return NonStatTy, err
   975  			}
   976  		}
   977  		// Write the positional metadata for transaction/receipt lookups and preimages
   978  		rawdb.WriteTxLookupEntries(batch, block)
   979  		rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
   980  
   981  		status = CanonStatTy
   982  	} else {
   983  		status = SideStatTy
   984  	}
   985  	if err := batch.Write(); err != nil {
   986  		return NonStatTy, err
   987  	}
   988  
   989  	// Set new head.
   990  	if status == CanonStatTy {
   991  		bc.insert(block)
   992  	}
   993  	bc.futureBlocks.Remove(block.Hash())
   994  	return status, nil
   995  }
   996  
   997  // InsertChain attempts to insert the given batch of blocks in to the canonical
   998  // chain or, otherwise, create a fork. If an error is returned it will return
   999  // the index number of the failing block as well an error describing what went
  1000  // wrong.
  1001  //
  1002  // After insertion is done, all accumulated events will be fired.
  1003  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1004  	n, events, logs, err := bc.insertChain(chain)
  1005  	bc.PostChainEvents(events, logs)
  1006  	return n, err
  1007  }
  1008  
  1009  // insertChain will execute the actual chain insertion and event aggregation. The
  1010  // only reason this method exists as a separate one is to make locking cleaner
  1011  // with deferred statements.
  1012  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1013  	// Sanity check that we have something meaningful to import
  1014  	if len(chain) == 0 {
  1015  		return 0, nil, nil, nil
  1016  	}
  1017  	// Do a sanity check that the provided chain is actually ordered and linked
  1018  	for i := 1; i < len(chain); i++ {
  1019  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1020  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1021  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1022  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1023  
  1024  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1025  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1026  		}
  1027  	}
  1028  	// Pre-checks passed, start the full block imports
  1029  	bc.wg.Add(1)
  1030  	defer bc.wg.Done()
  1031  
  1032  	bc.chainmu.Lock()
  1033  	defer bc.chainmu.Unlock()
  1034  
  1035  	// A queued approach to delivering events. This is generally
  1036  	// faster than direct delivery and requires much less mutex
  1037  	// acquiring.
  1038  	var (
  1039  		stats         = insertStats{startTime: mclock.Now()}
  1040  		events        = make([]interface{}, 0, len(chain))
  1041  		lastCanon     *types.Block
  1042  		coalescedLogs []*types.Log
  1043  	)
  1044  	// Start the parallel header verifier
  1045  	headers := make([]*types.Header, len(chain))
  1046  	seals := make([]bool, len(chain))
  1047  
  1048  	for i, block := range chain {
  1049  		headers[i] = block.Header()
  1050  		seals[i] = true
  1051  	}
  1052  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1053  	defer close(abort)
  1054  
  1055  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1056  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1057  
  1058  	// Iterate over the blocks and insert when the verifier permits
  1059  	for i, block := range chain {
  1060  		// If the chain is terminating, stop processing blocks
  1061  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1062  			log.Debug("Premature abort during blocks processing")
  1063  			break
  1064  		}
  1065  		// If the header is a banned one, straight out abort
  1066  		if BadHashes[block.Hash()] {
  1067  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1068  			return i, events, coalescedLogs, ErrBlacklistedHash
  1069  		}
  1070  		// Wait for the block's verification to complete
  1071  		bstart := time.Now()
  1072  
  1073  		err := <-results
  1074  		if err == nil {
  1075  			err = bc.Validator().ValidateBody(block)
  1076  		}
  1077  		switch {
  1078  		case err == ErrKnownBlock:
  1079  			// Block and state both already known. However if the current block is below
  1080  			// this number we did a rollback and we should reimport it nonetheless.
  1081  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1082  				stats.ignored++
  1083  				continue
  1084  			}
  1085  
  1086  		case err == consensus.ErrFutureBlock:
  1087  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1088  			// the chain is discarded and processed at a later time if given.
  1089  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1090  			if block.Time().Cmp(max) > 0 {
  1091  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1092  			}
  1093  			bc.futureBlocks.Add(block.Hash(), block)
  1094  			stats.queued++
  1095  			continue
  1096  
  1097  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1098  			bc.futureBlocks.Add(block.Hash(), block)
  1099  			stats.queued++
  1100  			continue
  1101  
  1102  		case err == consensus.ErrPrunedAncestor:
  1103  			// Block competing with the canonical chain, store in the db, but don't process
  1104  			// until the competitor TD goes above the canonical TD
  1105  			currentBlock := bc.CurrentBlock()
  1106  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1107  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1108  			if localTd.Cmp(externTd) > 0 {
  1109  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1110  					return i, events, coalescedLogs, err
  1111  				}
  1112  				continue
  1113  			}
  1114  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1115  			var winner []*types.Block
  1116  
  1117  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1118  			for !bc.HasState(parent.Root()) {
  1119  				winner = append(winner, parent)
  1120  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1121  			}
  1122  			for j := 0; j < len(winner)/2; j++ {
  1123  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1124  			}
  1125  			// Import all the pruned blocks to make the state available
  1126  			bc.chainmu.Unlock()
  1127  			_, evs, logs, err := bc.insertChain(winner)
  1128  			bc.chainmu.Lock()
  1129  			events, coalescedLogs = evs, logs
  1130  
  1131  			if err != nil {
  1132  				return i, events, coalescedLogs, err
  1133  			}
  1134  
  1135  		case err != nil:
  1136  			bc.reportBlock(block, nil, err)
  1137  			return i, events, coalescedLogs, err
  1138  		}
  1139  		// Create a new statedb using the parent block and report an
  1140  		// error if it fails.
  1141  		var parent *types.Block
  1142  		if i == 0 {
  1143  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1144  		} else {
  1145  			parent = chain[i-1]
  1146  		}
  1147  		state, err := state.New(parent.Root(), bc.stateCache)
  1148  		if err != nil {
  1149  			return i, events, coalescedLogs, err
  1150  		}
  1151  		// Process block using the parent state as reference point.
  1152  		receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1153  		if err != nil {
  1154  			bc.reportBlock(block, receipts, err)
  1155  			return i, events, coalescedLogs, err
  1156  		}
  1157  		// Validate the state using the default validator
  1158  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1159  		if err != nil {
  1160  			bc.reportBlock(block, receipts, err)
  1161  			return i, events, coalescedLogs, err
  1162  		}
  1163  		proctime := time.Since(bstart)
  1164  
  1165  		// Write the block to the chain and get the status.
  1166  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1167  		if err != nil {
  1168  			return i, events, coalescedLogs, err
  1169  		}
  1170  		switch status {
  1171  		case CanonStatTy:
  1172  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1173  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1174  
  1175  			coalescedLogs = append(coalescedLogs, logs...)
  1176  			blockInsertTimer.UpdateSince(bstart)
  1177  			events = append(events, ChainEvent{block, block.Hash(), logs})
  1178  			lastCanon = block
  1179  
  1180  			// Only count canonical blocks for GC processing time
  1181  			bc.gcproc += proctime
  1182  
  1183  		case SideStatTy:
  1184  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1185  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1186  
  1187  			blockInsertTimer.UpdateSince(bstart)
  1188  			events = append(events, ChainSideEvent{block})
  1189  		}
  1190  		stats.processed++
  1191  		stats.usedGas += usedGas
  1192  
  1193  		cache, _ := bc.stateCache.TrieDB().Size()
  1194  		stats.report(chain, i, cache)
  1195  	}
  1196  	// Append a single chain head event if we've progressed the chain
  1197  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1198  		events = append(events, ChainHeadEvent{lastCanon})
  1199  	}
  1200  	return 0, events, coalescedLogs, nil
  1201  }
  1202  
  1203  // insertStats tracks and reports on block insertion.
  1204  type insertStats struct {
  1205  	queued, processed, ignored int
  1206  	usedGas                    uint64
  1207  	lastIndex                  int
  1208  	startTime                  mclock.AbsTime
  1209  }
  1210  
  1211  // statsReportLimit is the time limit during import and export after which we
  1212  // always print out progress. This avoids the user wondering what's going on.
  1213  const statsReportLimit = 8 * time.Second
  1214  
  1215  // report prints statistics if some number of blocks have been processed
  1216  // or more than a few seconds have passed since the last message.
  1217  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1218  	// Fetch the timings for the batch
  1219  	var (
  1220  		now     = mclock.Now()
  1221  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1222  	)
  1223  	// If we're at the last block of the batch or report period reached, log
  1224  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1225  		var (
  1226  			end = chain[index]
  1227  			txs = countTransactions(chain[st.lastIndex : index+1])
  1228  		)
  1229  		context := []interface{}{
  1230  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1231  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1232  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1233  		}
  1234  		if st.queued > 0 {
  1235  			context = append(context, []interface{}{"queued", st.queued}...)
  1236  		}
  1237  		if st.ignored > 0 {
  1238  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1239  		}
  1240  		log.Info("Imported new chain segment", context...)
  1241  
  1242  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1243  	}
  1244  }
  1245  
  1246  func countTransactions(chain []*types.Block) (c int) {
  1247  	for _, b := range chain {
  1248  		c += len(b.Transactions())
  1249  	}
  1250  	return c
  1251  }
  1252  
  1253  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1254  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1255  // event about them
  1256  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1257  	var (
  1258  		newChain    types.Blocks
  1259  		oldChain    types.Blocks
  1260  		commonBlock *types.Block
  1261  		deletedTxs  types.Transactions
  1262  		deletedLogs []*types.Log
  1263  		// collectLogs collects the logs that were generated during the
  1264  		// processing of the block that corresponds with the given hash.
  1265  		// These logs are later announced as deleted.
  1266  		collectLogs = func(hash common.Hash) {
  1267  			// Coalesce logs and set 'Removed'.
  1268  			number := bc.hc.GetBlockNumber(hash)
  1269  			if number == nil {
  1270  				return
  1271  			}
  1272  			receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1273  			for _, receipt := range receipts {
  1274  				for _, log := range receipt.Logs {
  1275  					del := *log
  1276  					del.Removed = true
  1277  					deletedLogs = append(deletedLogs, &del)
  1278  				}
  1279  			}
  1280  		}
  1281  	)
  1282  
  1283  	// first reduce whoever is higher bound
  1284  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1285  		// reduce old chain
  1286  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1287  			oldChain = append(oldChain, oldBlock)
  1288  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1289  
  1290  			collectLogs(oldBlock.Hash())
  1291  		}
  1292  	} else {
  1293  		// reduce new chain and append new chain blocks for inserting later on
  1294  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1295  			newChain = append(newChain, newBlock)
  1296  		}
  1297  	}
  1298  	if oldBlock == nil {
  1299  		return fmt.Errorf("Invalid old chain")
  1300  	}
  1301  	if newBlock == nil {
  1302  		return fmt.Errorf("Invalid new chain")
  1303  	}
  1304  
  1305  	for {
  1306  		if oldBlock.Hash() == newBlock.Hash() {
  1307  			commonBlock = oldBlock
  1308  			break
  1309  		}
  1310  
  1311  		oldChain = append(oldChain, oldBlock)
  1312  		newChain = append(newChain, newBlock)
  1313  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1314  		collectLogs(oldBlock.Hash())
  1315  
  1316  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1317  		if oldBlock == nil {
  1318  			return fmt.Errorf("Invalid old chain")
  1319  		}
  1320  		if newBlock == nil {
  1321  			return fmt.Errorf("Invalid new chain")
  1322  		}
  1323  	}
  1324  	// Ensure the user sees large reorgs
  1325  	if len(oldChain) > 0 && len(newChain) > 0 {
  1326  		logFn := log.Debug
  1327  		if len(oldChain) > 63 {
  1328  			logFn = log.Warn
  1329  		}
  1330  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1331  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1332  	} else {
  1333  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1334  	}
  1335  	// Insert the new chain, taking care of the proper incremental order
  1336  	var addedTxs types.Transactions
  1337  	for i := len(newChain) - 1; i >= 0; i-- {
  1338  		// insert the block in the canonical way, re-writing history
  1339  		bc.insert(newChain[i])
  1340  		// write lookup entries for hash based transaction/receipt searches
  1341  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1342  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1343  	}
  1344  	// calculate the difference between deleted and added transactions
  1345  	diff := types.TxDifference(deletedTxs, addedTxs)
  1346  	// When transactions get deleted from the database that means the
  1347  	// receipts that were created in the fork must also be deleted
  1348  	batch := bc.db.NewBatch()
  1349  	for _, tx := range diff {
  1350  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1351  	}
  1352  	batch.Write()
  1353  
  1354  	if len(deletedLogs) > 0 {
  1355  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1356  	}
  1357  	if len(oldChain) > 0 {
  1358  		go func() {
  1359  			for _, block := range oldChain {
  1360  				bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1361  			}
  1362  		}()
  1363  	}
  1364  
  1365  	return nil
  1366  }
  1367  
  1368  // PostChainEvents iterates over the events generated by a chain insertion and
  1369  // posts them into the event feed.
  1370  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1371  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1372  	// post event logs for further processing
  1373  	if logs != nil {
  1374  		bc.logsFeed.Send(logs)
  1375  	}
  1376  	for _, event := range events {
  1377  		switch ev := event.(type) {
  1378  		case ChainEvent:
  1379  			bc.chainFeed.Send(ev)
  1380  
  1381  		case ChainHeadEvent:
  1382  			bc.chainHeadFeed.Send(ev)
  1383  
  1384  		case ChainSideEvent:
  1385  			bc.chainSideFeed.Send(ev)
  1386  		}
  1387  	}
  1388  }
  1389  
  1390  func (bc *BlockChain) update() {
  1391  	futureTimer := time.NewTicker(5 * time.Second)
  1392  	defer futureTimer.Stop()
  1393  	for {
  1394  		select {
  1395  		case <-futureTimer.C:
  1396  			bc.procFutureBlocks()
  1397  		case <-bc.quit:
  1398  			return
  1399  		}
  1400  	}
  1401  }
  1402  
  1403  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1404  func (bc *BlockChain) BadBlocks() []*types.Block {
  1405  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1406  	for _, hash := range bc.badBlocks.Keys() {
  1407  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1408  			block := blk.(*types.Block)
  1409  			blocks = append(blocks, block)
  1410  		}
  1411  	}
  1412  	return blocks
  1413  }
  1414  
  1415  // addBadBlock adds a bad block to the bad-block LRU cache
  1416  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1417  	bc.badBlocks.Add(block.Hash(), block)
  1418  }
  1419  
  1420  // reportBlock logs a bad block error.
  1421  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1422  	bc.addBadBlock(block)
  1423  
  1424  	var receiptString string
  1425  	for _, receipt := range receipts {
  1426  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1427  	}
  1428  	log.Error(fmt.Sprintf(`
  1429  ########## BAD BLOCK #########
  1430  Chain config: %v
  1431  
  1432  Number: %v
  1433  Hash: 0x%x
  1434  %v
  1435  
  1436  Error: %v
  1437  ##############################
  1438  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1439  }
  1440  
  1441  // InsertHeaderChain attempts to insert the given header chain in to the local
  1442  // chain, possibly creating a reorg. If an error is returned, it will return the
  1443  // index number of the failing header as well an error describing what went wrong.
  1444  //
  1445  // The verify parameter can be used to fine tune whether nonce verification
  1446  // should be done or not. The reason behind the optional check is because some
  1447  // of the header retrieval mechanisms already need to verify nonces, as well as
  1448  // because nonces can be verified sparsely, not needing to check each.
  1449  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1450  	start := time.Now()
  1451  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1452  		return i, err
  1453  	}
  1454  
  1455  	// Make sure only one thread manipulates the chain at once
  1456  	bc.chainmu.Lock()
  1457  	defer bc.chainmu.Unlock()
  1458  
  1459  	bc.wg.Add(1)
  1460  	defer bc.wg.Done()
  1461  
  1462  	whFunc := func(header *types.Header) error {
  1463  		bc.mu.Lock()
  1464  		defer bc.mu.Unlock()
  1465  
  1466  		_, err := bc.hc.WriteHeader(header)
  1467  		return err
  1468  	}
  1469  
  1470  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1471  }
  1472  
  1473  // writeHeader writes a header into the local chain, given that its parent is
  1474  // already known. If the total difficulty of the newly inserted header becomes
  1475  // greater than the current known TD, the canonical chain is re-routed.
  1476  //
  1477  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1478  // into the chain, as side effects caused by reorganisations cannot be emulated
  1479  // without the real blocks. Hence, writing headers directly should only be done
  1480  // in two scenarios: pure-header mode of operation (light clients), or properly
  1481  // separated header/block phases (non-archive clients).
  1482  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1483  	bc.wg.Add(1)
  1484  	defer bc.wg.Done()
  1485  
  1486  	bc.mu.Lock()
  1487  	defer bc.mu.Unlock()
  1488  
  1489  	_, err := bc.hc.WriteHeader(header)
  1490  	return err
  1491  }
  1492  
  1493  // CurrentHeader retrieves the current head header of the canonical chain. The
  1494  // header is retrieved from the HeaderChain's internal cache.
  1495  func (bc *BlockChain) CurrentHeader() *types.Header {
  1496  	return bc.hc.CurrentHeader()
  1497  }
  1498  
  1499  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1500  // database by hash and number, caching it if found.
  1501  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1502  	return bc.hc.GetTd(hash, number)
  1503  }
  1504  
  1505  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1506  // database by hash, caching it if found.
  1507  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1508  	return bc.hc.GetTdByHash(hash)
  1509  }
  1510  
  1511  // GetHeader retrieves a block header from the database by hash and number,
  1512  // caching it if found.
  1513  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1514  	return bc.hc.GetHeader(hash, number)
  1515  }
  1516  
  1517  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1518  // found.
  1519  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1520  	return bc.hc.GetHeaderByHash(hash)
  1521  }
  1522  
  1523  // HasHeader checks if a block header is present in the database or not, caching
  1524  // it if present.
  1525  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1526  	return bc.hc.HasHeader(hash, number)
  1527  }
  1528  
  1529  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1530  // hash, fetching towards the genesis block.
  1531  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1532  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1533  }
  1534  
  1535  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1536  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1537  // number of blocks to be individually checked before we reach the canonical chain.
  1538  //
  1539  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1540  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1541  	bc.chainmu.Lock()
  1542  	defer bc.chainmu.Unlock()
  1543  
  1544  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1545  }
  1546  
  1547  // GetHeaderByNumber retrieves a block header from the database by number,
  1548  // caching it (associated with its hash) if found.
  1549  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1550  	return bc.hc.GetHeaderByNumber(number)
  1551  }
  1552  
  1553  // Config retrieves the blockchain's chain configuration.
  1554  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1555  
  1556  // Engine retrieves the blockchain's consensus engine.
  1557  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1558  
  1559  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1560  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1561  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1562  }
  1563  
  1564  // SubscribeChainEvent registers a subscription of ChainEvent.
  1565  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1566  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1567  }
  1568  
  1569  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1570  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1571  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1572  }
  1573  
  1574  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1575  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1576  	return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1577  }
  1578  
  1579  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1580  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1581  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1582  }