github.com/edxfund/validator@v1.8.16-0.20181020093046-c1def72855da/core/blockchain.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package core implements the Ethereum consensus protocol.
    18  package core
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/EDXFund/Validator/common"
    31  	"github.com/EDXFund/Validator/common/mclock"
    32  	"github.com/EDXFund/Validator/common/prque"
    33  	"github.com/EDXFund/Validator/consensus"
    34  	"github.com/EDXFund/Validator/core/rawdb"
    35  	"github.com/EDXFund/Validator/core/state"
    36  	"github.com/EDXFund/Validator/core/types"
    37  	"github.com/EDXFund/Validator/core/vm"
    38  	//"github.com/EDXFund/Validator/crypto"
    39  	"github.com/EDXFund/Validator/ethdb"
    40  	"github.com/EDXFund/Validator/event"
    41  	"github.com/EDXFund/Validator/log"
    42  	"github.com/EDXFund/Validator/metrics"
    43  	"github.com/EDXFund/Validator/params"
    44  	"github.com/EDXFund/Validator/rlp"
    45  	"github.com/EDXFund/Validator/trie"
    46  	lru "github.com/hashicorp/golang-lru"
    47  )
    48  
    49  var (
    50  	blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
    51  
    52  	ErrNoGenesis = errors.New("Genesis not found in chain")
    53  )
    54  
    55  const (
    56  	bodyCacheLimit      = 256
    57  	blockCacheLimit     = 256
    58  	maxFutureBlocks     = 256
    59  	maxTimeFutureBlocks = 30
    60  	badBlockLimit       = 10
    61  	triesInMemory       = 128
    62  
    63  	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
    64  	BlockChainVersion = 3
    65  )
    66  
    67  // CacheConfig contains the configuration values for the trie caching/pruning
    68  // that's resident in a blockchain.
    69  type CacheConfig struct {
    70  	Disabled      bool          // Whether to disable trie write caching (archive node)
    71  	TrieNodeLimit int           // Memory limit (MB) at which to flush the current in-memory trie to disk
    72  	TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
    73  }
    74  
    75  // BlockChain represents the canonical chain given a database with a genesis
    76  // block. The Blockchain manages chain imports, reverts, chain reorganisations.
    77  //
    78  // Importing blocks in to the block chain happens according to the set of rules
    79  // defined by the two stage Validator. Processing of blocks is done using the
    80  // Processor which processes the included transaction. The validation of the state
    81  // is done in the second part of the Validator. Failing results in aborting of
    82  // the import.
    83  //
    84  // The BlockChain also helps in returning blocks from **any** chain included
    85  // in the database as well as blocks that represents the canonical chain. It's
    86  // important to note that GetBlock can return any block and does not need to be
    87  // included in the canonical one where as GetBlockByNumber always represents the
    88  // canonical chain.
    89  type BlockChain struct {
    90  	chainConfig *params.ChainConfig // Chain & network configuration
    91  	cacheConfig *CacheConfig        // Cache configuration for pruning
    92  
    93  	shardId uint16         //Shard Id of this block chain
    94  	db      ethdb.Database // Low level persistent database to store final content in
    95  	triegc  *prque.Prque   // Priority queue mapping block numbers to tries to gc
    96  	gcproc  time.Duration  // Accumulates canonical block processing for trie dumping
    97  
    98  	hc            *HeaderChain
    99  	rmLogsFeed    event.Feed
   100  	chainFeed     event.Feed
   101  	chainMasterFeed event.Feed
   102  	chainHeadFeed event.Feed
   103  	logsFeed      event.Feed
   104  	scope         event.SubscriptionScope
   105  	genesisBlock  *types.Block
   106  
   107  	mu      sync.RWMutex // global mutex for locking chain operations
   108  	chainmu sync.RWMutex // blockchain insertion lock
   109  	procmu  sync.RWMutex // block processor lock
   110  
   111  	checkpoint       int          // checkpoint counts towards the new checkpoint
   112  	currentBlock     atomic.Value // Current head of the block chain
   113  	currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
   114  
   115  	stateCache   state.Database // State database to reuse between imports (contains state cache)
   116  	bodyCache    *lru.Cache     // Cache for the most recent block bodies
   117  	bodyRLPCache *lru.Cache     // Cache for the most recent block bodies in RLP encoded format
   118  	blockCache   *lru.Cache     // Cache for the most recent entire blocks
   119  	futureBlocks *lru.Cache     // future blocks are blocks added for later processing
   120  
   121  	quit    chan struct{} // blockchain quit channel
   122  	running int32         // running must be called atomically
   123  	// procInterrupt must be atomically called
   124  	procInterrupt int32          // interrupt signaler for block processing
   125  	wg            sync.WaitGroup // chain processing wait group for shutting down
   126  
   127  	engine    consensus.Engine
   128  	processor Processor // block processor interface
   129  	validator Validator // block and state validator interface
   130  	vmConfig  vm.Config
   131  	//	shardPool ShardPool
   132  	badBlocks *lru.Cache // Bad block cache
   133  }
   134  
   135  // NewBlockChain returns a fully initialised block chain using information
   136  // available in the database. It initialises the default Ethereum Validator and
   137  // Processor.
   138  func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shardId uint16) (*BlockChain, error) {
   139  	if cacheConfig == nil {
   140  		cacheConfig = &CacheConfig{
   141  			TrieNodeLimit: 256 * 1024 * 1024,
   142  			TrieTimeLimit: 5 * time.Minute,
   143  		}
   144  	}
   145  	bodyCache, _ := lru.New(bodyCacheLimit)
   146  	bodyRLPCache, _ := lru.New(bodyCacheLimit)
   147  	blockCache, _ := lru.New(blockCacheLimit)
   148  	futureBlocks, _ := lru.New(maxFutureBlocks)
   149  	badBlocks, _ := lru.New(badBlockLimit)
   150  
   151  	bc := &BlockChain{
   152  		chainConfig:  chainConfig,
   153  		cacheConfig:  cacheConfig,
   154  		db:           db,
   155  		triegc:       prque.New(nil),
   156  		stateCache:   state.NewDatabase(db),
   157  		quit:         make(chan struct{}),
   158  		bodyCache:    bodyCache,
   159  		bodyRLPCache: bodyRLPCache,
   160  		blockCache:   blockCache,
   161  		futureBlocks: futureBlocks,
   162  		engine:       engine,
   163  		vmConfig:     vmConfig,
   164  		badBlocks:    badBlocks,
   165  	}
   166  	bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
   167  	bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
   168  
   169  	var err error
   170  	bc.hc, err = NewHeaderChain(db, chainConfig, engine, shardId, bc.getProcInterrupt)
   171  	if err != nil {
   172  		return nil, err
   173  	}
   174  	bc.genesisBlock = bc.GetBlockByNumber(0)
   175  	if bc.genesisBlock == nil {
   176  		return nil, ErrNoGenesis
   177  	}
   178  
   179  	if err := bc.loadLastState(); err != nil {
   180  		return nil, err
   181  	}
   182  	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
   183  	for hash := range BadHashes {
   184  		if header := bc.GetHeaderByHash(hash); header != nil {
   185  			// get the canonical block corresponding to the offending header's number
   186  			headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
   187  			// make sure the headerByNumber (if present) is in our current canonical chain
   188  			if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
   189  				log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
   190  				bc.SetHead(header.Number.Uint64() - 1)
   191  				log.Error("Chain rewind was successful, resuming normal operation")
   192  			}
   193  		}
   194  	}
   195  	// Take ownership of this particular state
   196  	go bc.update()
   197  	return bc, nil
   198  }
   199  
   200  func (bc *BlockChain) getProcInterrupt() bool {
   201  	return atomic.LoadInt32(&bc.procInterrupt) == 1
   202  }
   203  
   204  // loadLastState loads the last known chain state from the database. This method
   205  // assumes that the chain manager mutex is held.
   206  func (bc *BlockChain) loadLastState() error {
   207  	// Restore the last known head block
   208  	head := rawdb.ReadHeadBlockHash(bc.db, bc.shardId)
   209  	if head == (common.Hash{}) {
   210  		// Corrupt or empty database, init from scratch
   211  		log.Warn("Empty database, resetting chain")
   212  		return bc.Reset()
   213  	}
   214  	// Make sure the entire head block is available
   215  	currentBlock := bc.GetBlockByHash(head)
   216  	if currentBlock == nil {
   217  		// Corrupt or empty database, init from scratch
   218  		log.Warn("Head block missing, resetting chain", "hash", head)
   219  		return bc.Reset()
   220  	}
   221  	//only master chain can have Root
   222  	// Make sure the state associated with the block is available
   223  	if bc.ShardId() == uint16(types.ShardMaster) {
   224  		// Dangling block without a state associated, init from scratch
   225  		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
   226  		if err := bc.repair(&currentBlock); err != nil {
   227  			return err
   228  		}
   229  	}
   230  	// Everything seems to be fine, set as the head block
   231  	bc.currentBlock.Store(currentBlock)
   232  
   233  	// Restore the last known head header
   234  	currentHeader := currentBlock.Header()
   235  	if head := rawdb.ReadHeadHeaderHash(bc.db, bc.ShardId()); head != (common.Hash{}) {
   236  		if header := bc.GetHeaderByHash(head); header != nil {
   237  			currentHeader = header
   238  		}
   239  	}
   240  	bc.hc.SetCurrentHeader(currentHeader)
   241  
   242  	// Restore the last known head fast block
   243  	bc.currentFastBlock.Store(currentBlock)
   244  	if head := rawdb.ReadHeadFastBlockHash(bc.db, bc.ShardId()); head != (common.Hash{}) {
   245  		if block := bc.GetBlockByHash(head); block != nil {
   246  			bc.currentFastBlock.Store(block)
   247  		}
   248  	}
   249  
   250  	// Issue a status log for the user
   251  	currentFastBlock := bc.CurrentFastBlock()
   252  
   253  	headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
   254  	blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   255  	fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
   256  
   257  	log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
   258  	log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
   259  	log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
   260  
   261  	return nil
   262  }
   263  func (bc *BlockChain) ShardId() uint16 {
   264  	return bc.shardId
   265  }
   266  
   267  // SetHead rewinds the local chain to a new head. In the case of headers, everything
   268  // above the new head will be deleted and the new one set. In the case of blocks
   269  // though, the head may be further rewound if block bodies are missing (non-archive
   270  // nodes after a fast sync).
   271  func (bc *BlockChain) SetHead(head uint64) error {
   272  	log.Warn("Rewinding blockchain", "target", head)
   273  
   274  	bc.mu.Lock()
   275  	defer bc.mu.Unlock()
   276  
   277  	// Rewind the header chain, deleting all block bodies until then
   278  	delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
   279  		rawdb.DeleteBody(db, hash, bc.shardId, num)
   280  	}
   281  	bc.hc.SetHead(head, delFn)
   282  	currentHeader := bc.hc.CurrentHeader()
   283  
   284  	// Clear out any stale content from the caches
   285  	bc.bodyCache.Purge()
   286  	bc.bodyRLPCache.Purge()
   287  	bc.blockCache.Purge()
   288  	bc.futureBlocks.Purge()
   289  
   290  	// Rewind the block chain, ensuring we don't end up with a stateless head block
   291  	if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
   292  		bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   293  	}
   294  	if currentBlock := bc.CurrentBlock(); currentBlock != nil {
   295  
   296  		noneBlock := false
   297  		//only master chain node should check Root
   298  		_, err := state.New(currentBlock.Root(), bc.stateCache)
   299  		if bc.shardId == uint16(types.ShardMaster) {
   300  			if err != nil {
   301  				// Rewound state missing, rolled back to before pivot, reset to genesis
   302  				noneBlock = true
   303  			}
   304  		}
   305  		if noneBlock {
   306  			bc.currentBlock.Store(bc.genesisBlock)
   307  		}
   308  	}
   309  
   310  	// Rewind the fast block in a simpleton way to the target head
   311  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
   312  		bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
   313  	}
   314  	// If either blocks reached nil, reset to the genesis state
   315  	if currentBlock := bc.CurrentBlock(); currentBlock == nil {
   316  		bc.currentBlock.Store(bc.genesisBlock)
   317  	}
   318  	if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
   319  		bc.currentFastBlock.Store(bc.genesisBlock)
   320  	}
   321  	currentBlock := bc.CurrentBlock()
   322  	currentFastBlock := bc.CurrentFastBlock()
   323  
   324  	rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash(), bc.ShardId())
   325  	rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash(), bc.ShardId())
   326  
   327  	return bc.loadLastState()
   328  }
   329  
   330  // FastSyncCommitHead sets the current head block to the one defined by the hash
   331  // irrelevant what the chain contents were prior.
   332  func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
   333  	// Make sure that both the block as well at its state trie exists
   334  	block := bc.GetBlockByHash(hash)
   335  	if block == nil {
   336  		return fmt.Errorf("non existent block [%x…]", hash[:4])
   337  	}
   338  	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
   339  		return err
   340  	}
   341  	// If all checks out, manually set the head block
   342  	bc.mu.Lock()
   343  	bc.currentBlock.Store(block)
   344  	bc.mu.Unlock()
   345  
   346  	log.Info("Committed new head block", "number", block.Number(), "hash", hash)
   347  	return nil
   348  }
   349  
   350  // GasLimit returns the gas limit of the current HEAD block.
   351  func (bc *BlockChain) GasLimit() uint64 {
   352  	return bc.CurrentBlock().GasLimit()
   353  }
   354  
   355  // CurrentBlock retrieves the current head block of the canonical chain. The
   356  // block is retrieved from the blockchain's internal cache.
   357  func (bc *BlockChain) CurrentBlock() *types.Block {
   358  	return bc.currentBlock.Load().(*types.Block)
   359  }
   360  
   361  // CurrentFastBlock retrieves the current fast-sync head block of the canonical
   362  // chain. The block is retrieved from the blockchain's internal cache.
   363  func (bc *BlockChain) CurrentFastBlock() *types.Block {
   364  	return bc.currentFastBlock.Load().(*types.Block)
   365  }
   366  
   367  // SetProcessor sets the processor required for making state modifications.
   368  func (bc *BlockChain) SetProcessor(processor Processor) {
   369  	bc.procmu.Lock()
   370  	defer bc.procmu.Unlock()
   371  	bc.processor = processor
   372  }
   373  
   374  // SetValidator sets the validator which is used to validate incoming blocks.
   375  func (bc *BlockChain) SetValidator(validator Validator) {
   376  	bc.procmu.Lock()
   377  	defer bc.procmu.Unlock()
   378  	bc.validator = validator
   379  }
   380  
   381  // Validator returns the current validator.
   382  func (bc *BlockChain) Validator() Validator {
   383  	bc.procmu.RLock()
   384  	defer bc.procmu.RUnlock()
   385  	return bc.validator
   386  }
   387  
   388  // Processor returns the current processor.
   389  func (bc *BlockChain) Processor() Processor {
   390  	bc.procmu.RLock()
   391  	defer bc.procmu.RUnlock()
   392  	return bc.processor
   393  }
   394  
   395  // State returns a new mutable state based on the current HEAD block.
   396  func (bc *BlockChain) State() (*state.StateDB, error) {
   397  	return bc.StateAt(bc.CurrentBlock().Root())
   398  }
   399  
   400  // StateAt returns a new mutable state based on a particular point in time.
   401  func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
   402  	return state.New(root, bc.stateCache)
   403  }
   404  
   405  // Reset purges the entire blockchain, restoring it to its genesis state.
   406  func (bc *BlockChain) Reset() error {
   407  	return bc.ResetWithGenesisBlock(bc.genesisBlock)
   408  }
   409  
   410  // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
   411  // specified genesis state.
   412  func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
   413  	// Dump the entire block chain and purge the caches
   414  	if err := bc.SetHead(0); err != nil {
   415  		return err
   416  	}
   417  	bc.mu.Lock()
   418  	defer bc.mu.Unlock()
   419  
   420  	// Prepare the genesis block and reinitialise the chain
   421  	if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
   422  		log.Crit("Failed to write genesis block TD", "err", err)
   423  	}
   424  	rawdb.WriteBlock(bc.db, genesis)
   425  
   426  	bc.genesisBlock = genesis
   427  	bc.insert(bc.genesisBlock)
   428  	bc.currentBlock.Store(bc.genesisBlock)
   429  	bc.hc.SetGenesis(bc.genesisBlock.Header())
   430  	bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
   431  	bc.currentFastBlock.Store(bc.genesisBlock)
   432  
   433  	return nil
   434  }
   435  
   436  // repair tries to repair the current blockchain by rolling back the current block
   437  // until one with associated state is found. This is needed to fix incomplete db
   438  // writes caused either by crashes/power outages, or simply non-committed tries.
   439  //
   440  // This method only rolls back the current block. The current header and current
   441  // fast block are left intact.
   442  func (bc *BlockChain) repair(head **types.Block) error {
   443  	for {
   444  		// Abort if we've rewound to a head block that does have associated state
   445  		if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
   446  			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
   447  			return nil
   448  		}
   449  		// Otherwise rewind one block and recheck state availability there
   450  		(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
   451  	}
   452  }
   453  
   454  // Export writes the active chain to the given writer.
   455  func (bc *BlockChain) Export(w io.Writer) error {
   456  	return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
   457  }
   458  
   459  // ExportN writes a subset of the active chain to the given writer.
   460  func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
   461  	bc.mu.RLock()
   462  	defer bc.mu.RUnlock()
   463  
   464  	if first > last {
   465  		return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
   466  	}
   467  	log.Info("Exporting batch of blocks", "count", last-first+1)
   468  
   469  	start, reported := time.Now(), time.Now()
   470  	for nr := first; nr <= last; nr++ {
   471  		block := bc.GetBlockByNumber(nr)
   472  		if block == nil {
   473  			return fmt.Errorf("export failed on #%d: not found", nr)
   474  		}
   475  		if err := block.EncodeRLP(w); err != nil {
   476  			return err
   477  		}
   478  		if time.Since(reported) >= statsReportLimit {
   479  			log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
   480  			reported = time.Now()
   481  		}
   482  	}
   483  
   484  	return nil
   485  }
   486  
   487  // insert injects a new head block into the current block chain. This method
   488  // assumes that the block is indeed a true head. It will also reset the head
   489  // header and the head fast sync block to this very same block if they are older
   490  // or if they are on a different side chain.
   491  //
   492  // Note, this function assumes that the `mu` mutex is held!
   493  func (bc *BlockChain) insert(block *types.Block) {
   494  	// If the block is on a side chain or an unknown one, force other heads onto it too
   495  	updateHeads := rawdb.ReadCanonicalHash(bc.db, bc.ShardId(), block.NumberU64()) != block.Hash()
   496  	shardId := block.Header().ShardId
   497  	// Add the block to the canonical chain number scheme and mark as the head
   498  	rawdb.WriteCanonicalHash(bc.db, block.Hash(), shardId, block.NumberU64())
   499  	rawdb.WriteHeadBlockHash(bc.db, block.Hash(), shardId)
   500  
   501  	bc.currentBlock.Store(block)
   502  
   503  	// If the block is better than our head or is on a different chain, force update heads
   504  	if updateHeads {
   505  		bc.hc.SetCurrentHeader(block.Header())
   506  		rawdb.WriteHeadFastBlockHash(bc.db, block.Hash(), bc.ShardId())
   507  
   508  		bc.currentFastBlock.Store(block)
   509  	}
   510  }
   511  
   512  // Genesis retrieves the chain's genesis block.
   513  func (bc *BlockChain) Genesis() *types.Block {
   514  	return bc.genesisBlock
   515  }
   516  
   517  // GetBody retrieves a block body (transactions and uncles) from the database by
   518  // hash, caching it if found.
   519  func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
   520  	// Short circuit if the body's already in the cache, retrieve otherwise
   521  	if cached, ok := bc.bodyCache.Get(hash); ok {
   522  		body := cached.(*types.Body)
   523  		return body
   524  	}
   525  	number := bc.hc.GetBlockNumber(hash)
   526  	if number == nil {
   527  		return nil
   528  	}
   529  	body := rawdb.ReadBody(bc.db, hash, bc.ShardId(), *number)
   530  	if body == nil {
   531  		return nil
   532  	}
   533  	// Cache the found body for next time and return
   534  	bc.bodyCache.Add(hash, body)
   535  	return body
   536  }
   537  
   538  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   539  // caching it if found.
   540  func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
   541  	// Short circuit if the body's already in the cache, retrieve otherwise
   542  	if cached, ok := bc.bodyRLPCache.Get(hash); ok {
   543  		return cached.(rlp.RawValue)
   544  	}
   545  	number := bc.hc.GetBlockNumber(hash)
   546  	if number == nil {
   547  		return nil
   548  	}
   549  	body := rawdb.ReadBodyRLP(bc.db, hash, bc.ShardId(), *number)
   550  	if len(body) == 0 {
   551  		return nil
   552  	}
   553  	// Cache the found body for next time and return
   554  	bc.bodyRLPCache.Add(hash, body)
   555  	return body
   556  }
   557  
   558  // HasBlock checks if a block is fully present in the database or not.
   559  func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
   560  	if bc.blockCache.Contains(hash) {
   561  		return true
   562  	}
   563  	return rawdb.HasBody(bc.db, hash, bc.ShardId(), number)
   564  }
   565  
   566  // HasState checks if state trie is fully present in the database or not.
   567  func (bc *BlockChain) HasState(hash common.Hash) bool {
   568  	_, err := bc.stateCache.OpenTrie(hash)
   569  	return err == nil
   570  }
   571  
   572  // HasBlockAndState checks if a block and associated state trie is fully present
   573  // in the database or not, caching it if present.
   574  func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
   575  	// Check first that the block itself is known
   576  	block := bc.GetBlock(hash, number)
   577  	if block == nil {
   578  		return false
   579  	}
   580  	return bc.HasState(block.Root())
   581  }
   582  
   583  // GetBlock retrieves a block from the database by hash and number,
   584  // caching it if found.
   585  func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   586  	// Short circuit if the block's already in the cache, retrieve otherwise
   587  	if block, ok := bc.blockCache.Get(hash); ok {
   588  		return block.(*types.Block)
   589  	}
   590  	block := rawdb.ReadBlock(bc.db, hash, bc.ShardId(), number)
   591  	if block == nil {
   592  		return nil
   593  	}
   594  	// Cache the found block for next time and return
   595  	bc.blockCache.Add(block.Hash(), block)
   596  	return block
   597  }
   598  
   599  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   600  func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
   601  	number := bc.hc.GetBlockNumber(hash)
   602  	if number == nil {
   603  		return nil
   604  	}
   605  	return bc.GetBlock(hash, *number)
   606  }
   607  
   608  // GetBlockByNumber retrieves a block from the database by number, caching it
   609  // (associated with its hash) if found.
   610  func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
   611  	hash := rawdb.ReadCanonicalHash(bc.db, bc.ShardId(), number)
   612  	if hash == (common.Hash{}) {
   613  		return nil
   614  	}
   615  	return bc.GetBlock(hash, number)
   616  }
   617  
   618  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
   619  func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.ContractResults {
   620  	shardId, number := rawdb.ReadHeaderNumber(bc.db, hash)
   621  	if number == nil {
   622  		return nil
   623  	}
   624  	return rawdb.ReadReceipts(bc.db, hash, shardId, *number)
   625  }
   626  
   627  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   628  // [deprecated by eth/62]
   629  func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
   630  	number := bc.hc.GetBlockNumber(hash)
   631  	if number == nil {
   632  		return nil
   633  	}
   634  	for i := 0; i < n; i++ {
   635  		block := bc.GetBlock(hash, *number)
   636  		if block == nil {
   637  			break
   638  		}
   639  		blocks = append(blocks, block)
   640  		hash = block.ParentHash()
   641  		*number--
   642  	}
   643  	return
   644  }
   645  
   646  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   647  // a specific distance is reached.
   648  /*func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   649  	uncles := []*types.Header{}
   650  	for i := 0; block != nil && i < length; i++ {
   651  		uncles = append(uncles, block.Uncles()...)
   652  		block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
   653  	}
   654  	return uncles
   655  }*/
   656  
   657  // TrieNode retrieves a blob of data associated with a trie node (or code hash)
   658  // either from ephemeral in-memory cache, or from persistent storage.
   659  func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
   660  	return bc.stateCache.TrieDB().Node(hash)
   661  }
   662  
   663  // Stop stops the blockchain service. If any imports are currently in progress
   664  // it will abort them using the procInterrupt.
   665  func (bc *BlockChain) Stop() {
   666  	if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
   667  		return
   668  	}
   669  	// Unsubscribe all subscriptions registered from blockchain
   670  	bc.scope.Close()
   671  	close(bc.quit)
   672  	atomic.StoreInt32(&bc.procInterrupt, 1)
   673  
   674  	bc.wg.Wait()
   675  
   676  	// Ensure the state of a recent block is also stored to disk before exiting.
   677  	// We're writing three different states to catch different restart scenarios:
   678  	//  - HEAD:     So we don't need to reprocess any blocks in the general case
   679  	//  - HEAD-1:   So we don't do large reorgs if our HEAD becomes an uncle
   680  	//  - HEAD-127: So we have a hard limit on the number of blocks reexecuted
   681  	if !bc.cacheConfig.Disabled {
   682  		triedb := bc.stateCache.TrieDB()
   683  
   684  		for _, offset := range []uint64{0, 1, triesInMemory - 1} {
   685  			if number := bc.CurrentBlock().NumberU64(); number > offset {
   686  				recent := bc.GetBlockByNumber(number - offset)
   687  
   688  				log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
   689  				if err := triedb.Commit(recent.Root(), true); err != nil {
   690  					log.Error("Failed to commit recent state trie", "err", err)
   691  				}
   692  			}
   693  		}
   694  		for !bc.triegc.Empty() {
   695  			triedb.Dereference(bc.triegc.PopItem().(common.Hash))
   696  		}
   697  		if size, _ := triedb.Size(); size != 0 {
   698  			log.Error("Dangling trie nodes after full cleanup")
   699  		}
   700  	}
   701  	log.Info("Blockchain manager stopped")
   702  }
   703  
   704  func (bc *BlockChain) procFutureBlocks() {
   705  	blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
   706  	for _, hash := range bc.futureBlocks.Keys() {
   707  		if block, exist := bc.futureBlocks.Peek(hash); exist {
   708  			blocks = append(blocks, block.(*types.Block))
   709  		}
   710  	}
   711  	if len(blocks) > 0 {
   712  		types.BlockBy(types.Number).Sort(blocks)
   713  
   714  		// Insert one by one as chain insertion needs contiguous ancestry between blocks
   715  		for i := range blocks {
   716  			bc.InsertChain(blocks[i : i+1])
   717  		}
   718  	}
   719  }
   720  
   721  // WriteStatus status of write
   722  type WriteStatus byte
   723  
   724  const (
   725  	NonStatTy WriteStatus = iota
   726  	CanonStatTy
   727  	SideStatTy
   728  )
   729  
   730  // Rollback is designed to remove a chain of links from the database that aren't
   731  // certain enough to be valid.
   732  func (bc *BlockChain) Rollback(chain []common.Hash) {
   733  	bc.mu.Lock()
   734  	defer bc.mu.Unlock()
   735  
   736  	for i := len(chain) - 1; i >= 0; i-- {
   737  		hash := chain[i]
   738  
   739  		currentHeader := bc.hc.CurrentHeader()
   740  		if currentHeader.Hash() == hash {
   741  			bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
   742  		}
   743  		if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
   744  			newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
   745  			bc.currentFastBlock.Store(newFastBlock)
   746  			rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash(), bc.ShardId())
   747  		}
   748  		if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
   749  			newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
   750  			bc.currentBlock.Store(newBlock)
   751  			rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash(), bc.ShardId())
   752  		}
   753  	}
   754  }
   755  
   756  // SetReceiptsData computes all the non-consensus fields of the receipts
   757  func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.ContractResults) error {
   758  	//signer := types.MakeSigner(config, block.Number())
   759  
   760  	transactions, _ := block.Transactions(), uint(0)
   761  	if len(transactions) != len(receipts) {
   762  		return errors.New("transaction and receipt count mismatch")
   763  	}
   764  
   765  	for j := 0; j < len(receipts); j++ {
   766  		// The transaction hash can be retrieved from the transaction itself
   767  		receipts[j].TxHash = transactions[j].Hash()
   768  
   769  
   770  		////MUST TODO refine receipts
   771  		// The contract address can be derived from the transaction itself
   772  		/*if transactions[j].To() == nil {
   773  			// Deriving the signer is expensive, only do if it's actually needed
   774  			from, _ := types.Sender(signer, transactions[j])
   775  			receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
   776  		}
   777  		// The used gas can be calculated based on previous receipts
   778  		if j == 0 {
   779  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed
   780  		} else {
   781  			receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
   782  		}
   783  		// The derived log fields can simply be set from the block and transaction
   784  		for k := 0; k < len(receipts[j].Logs); k++ {
   785  			receipts[j].Logs[k].BlockNumber = block.NumberU64()
   786  			receipts[j].Logs[k].BlockHash = block.Hash()
   787  			receipts[j].Logs[k].TxHash = receipts[j].TxHash
   788  			receipts[j].Logs[k].TxIndex = uint(j)
   789  			receipts[j].Logs[k].Index = logIndex
   790  			logIndex++
   791  		}*/
   792  	}
   793  	return nil
   794  }
   795  
   796  // InsertReceiptChain attempts to complete an already existing header chain with
   797  // transaction and receipt data.
   798  func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.ContractResults) (int, error) {
   799  	bc.wg.Add(1)
   800  	defer bc.wg.Done()
   801  
   802  	// Do a sanity check that the provided chain is actually ordered and linked
   803  	for i := 1; i < len(blockChain); i++ {
   804  		if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
   805  			log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
   806  				"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
   807  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
   808  				blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
   809  		}
   810  	}
   811  
   812  	var (
   813  		stats = struct{ processed, ignored int32 }{}
   814  		start = time.Now()
   815  		bytes = 0
   816  		batch = bc.db.NewBatch()
   817  	)
   818  	for i, block := range blockChain {
   819  		receipts := receiptChain[i]
   820  		// Short circuit insertion if shutting down or processing failed
   821  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
   822  			return 0, nil
   823  		}
   824  		// Short circuit if the owner header is unknown
   825  		if !bc.HasHeader(block.Hash(), block.NumberU64()) {
   826  			return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
   827  		}
   828  		// Skip if the entire data is already known
   829  		if bc.HasBlock(block.Hash(), block.NumberU64()) {
   830  			stats.ignored++
   831  			continue
   832  		}
   833  		// Compute all the non-consensus fields of the receipts
   834  		if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
   835  			return i, fmt.Errorf("failed to set receipts data: %v", err)
   836  		}
   837  		// Write all the data out into the database
   838  		shardId := block.Header().ShardId
   839  		rawdb.WriteBody(batch, block.Hash(), shardId, block.NumberU64(), block.Body())
   840  		rawdb.WriteReceipts(batch, block.Hash(), shardId, block.NumberU64(), receipts)
   841  		rawdb.WriteTxLookupEntries(batch, block)
   842  
   843  		stats.processed++
   844  
   845  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   846  			if err := batch.Write(); err != nil {
   847  				return 0, err
   848  			}
   849  			bytes += batch.ValueSize()
   850  			batch.Reset()
   851  		}
   852  	}
   853  	if batch.ValueSize() > 0 {
   854  		bytes += batch.ValueSize()
   855  		if err := batch.Write(); err != nil {
   856  			return 0, err
   857  		}
   858  	}
   859  
   860  	// Update the head fast sync block if better
   861  	bc.mu.Lock()
   862  	head := blockChain[len(blockChain)-1]
   863  	if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
   864  		currentFastBlock := bc.CurrentFastBlock()
   865  		if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
   866  			rawdb.WriteHeadFastBlockHash(bc.db, head.Hash(), bc.ShardId())
   867  			bc.currentFastBlock.Store(head)
   868  		}
   869  	}
   870  	bc.mu.Unlock()
   871  
   872  	log.Info("Imported new block receipts",
   873  		"count", stats.processed,
   874  		"elapsed", common.PrettyDuration(time.Since(start)),
   875  		"number", head.Number(),
   876  		"hash", head.Hash(),
   877  		"size", common.StorageSize(bytes),
   878  		"ignored", stats.ignored)
   879  	return 0, nil
   880  }
   881  
   882  var lastWrite uint64
   883  
   884  // WriteBlockWithoutState writes only the block and its metadata to the database,
   885  // but does not write any state. This is used to construct competing side forks
   886  // up to the point where they exceed the canonical total difficulty.
   887  func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
   888  	bc.wg.Add(1)
   889  	defer bc.wg.Done()
   890  
   891  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
   892  		return err
   893  	}
   894  	rawdb.WriteBlock(bc.db, block)
   895  
   896  	return nil
   897  }
   898  
   899  // WriteBlockWithState writes the block and all associated state to the database.
   900  func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.ContractResult, state *state.StateDB) (status WriteStatus, err error) {
   901  	bc.wg.Add(1)
   902  	defer bc.wg.Done()
   903  
   904  	// Calculate the total difficulty of the block
   905  	ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
   906  	if ptd == nil {
   907  		return NonStatTy, consensus.ErrUnknownAncestor
   908  	}
   909  	// Make sure no inconsistent state is leaked during insertion
   910  	bc.mu.Lock()
   911  	defer bc.mu.Unlock()
   912  
   913  	currentBlock := bc.CurrentBlock()
   914  	localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   915  	externTd := new(big.Int).Add(block.Difficulty(), ptd)
   916  
   917  	// Irrelevant of the canonical status, write the block itself to the database
   918  	if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
   919  		return NonStatTy, err
   920  	}
   921  	rawdb.WriteBlock(bc.db, block)
   922  
   923  	root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
   924  	if err != nil {
   925  		return NonStatTy, err
   926  	}
   927  	triedb := bc.stateCache.TrieDB()
   928  
   929  	// If we're running an archive node, always flush
   930  	if bc.cacheConfig.Disabled {
   931  		if err := triedb.Commit(root, false); err != nil {
   932  			return NonStatTy, err
   933  		}
   934  	} else {
   935  		// Full but not archive node, do proper garbage collection
   936  		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
   937  		bc.triegc.Push(root, -int64(block.NumberU64()))
   938  
   939  		if current := block.NumberU64(); current > triesInMemory {
   940  			// If we exceeded our memory allowance, flush matured singleton nodes to disk
   941  			var (
   942  				nodes, imgs = triedb.Size()
   943  				limit       = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
   944  			)
   945  			if nodes > limit || imgs > 4*1024*1024 {
   946  				triedb.Cap(limit - ethdb.IdealBatchSize)
   947  			}
   948  			// Find the next state trie we need to commit
   949  			header := bc.GetHeaderByNumber(current - triesInMemory)
   950  			chosen := header.Number.Uint64()
   951  
   952  			// If we exceeded out time allowance, flush an entire trie to disk
   953  			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
   954  				// If we're exceeding limits but haven't reached a large enough memory gap,
   955  				// warn the user that the system is becoming unstable.
   956  				if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
   957  					log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
   958  				}
   959  				// Flush an entire trie and restart the counters
   960  				triedb.Commit(header.Root, true)
   961  				lastWrite = chosen
   962  				bc.gcproc = 0
   963  			}
   964  			// Garbage collect anything below our required write retention
   965  			for !bc.triegc.Empty() {
   966  				root, number := bc.triegc.Pop()
   967  				if uint64(-number) > chosen {
   968  					bc.triegc.Push(root, number)
   969  					break
   970  				}
   971  				triedb.Dereference(root.(common.Hash))
   972  			}
   973  		}
   974  	}
   975  
   976  	// Write other block data using a batch.
   977  	batch := bc.db.NewBatch()
   978  	rawdb.WriteReceipts(batch, block.Hash(), block.ShardId(), block.NumberU64(), receipts)
   979  
   980  	// If the total difficulty is higher than our known, add it to the canonical chain
   981  	// Second clause in the if statement reduces the vulnerability to selfish mining.
   982  	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
   983  	reorg := externTd.Cmp(localTd) > 0
   984  	currentBlock = bc.CurrentBlock()
   985  	//node have 50% percent chance on same difficulty
   986  	if !reorg && externTd.Cmp(localTd) == 0 {
   987  		// Split same-difficulty blocks by number, then at random
   988  		reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
   989  	}
   990  	if reorg {
   991  		// Reorganise the chain if the parent is not the head block
   992  		if block.ParentHash() != currentBlock.Hash() {
   993  			if err := bc.reorg(currentBlock, block); err != nil {
   994  				return NonStatTy, err
   995  			}
   996  		}
   997  		// Write the positional metadata for transaction/receipt lookups and preimages
   998  		rawdb.WriteTxLookupEntries(batch, block)
   999  		rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
  1000  
  1001  		status = CanonStatTy
  1002  	} else {
  1003  		//actually uncle blocks can be discarded immediately now
  1004  		status = SideStatTy
  1005  	}
  1006  	if err := batch.Write(); err != nil {
  1007  		return NonStatTy, err
  1008  	}
  1009  
  1010  	// Set new head.
  1011  	if status == CanonStatTy {
  1012  		bc.insert(block)
  1013  	}
  1014  	bc.futureBlocks.Remove(block.Hash())
  1015  	return status, nil
  1016  }
  1017  
  1018  // InsertChain attempts to insert the given batch of blocks in to the canonical
  1019  // chain or, otherwise, create a fork. If an error is returned it will return
  1020  // the index number of the failing block as well an error describing what went
  1021  // wrong.
  1022  //
  1023  // After insertion is done, all accumulated events will be fired.
  1024  func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  1025  	n, events, logs, err := bc.insertChain(chain)
  1026  	bc.PostChainEvents(events, logs)
  1027  	return n, err
  1028  }
  1029  
  1030  // insertChain will execute the actual chain insertion and event aggregation. The
  1031  // only reason this method exists as a separate one is to make locking cleaner
  1032  // with deferred statements.
  1033  func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  1034  	// Sanity check that we have something meaningful to import
  1035  	if len(chain) == 0 {
  1036  		return 0, nil, nil, nil
  1037  	}
  1038  	// Do a sanity check that the provided chain is actually ordered and linked
  1039  	for i := 1; i < len(chain); i++ {
  1040  		if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  1041  			// Chain broke ancestry, log a message (programming error) and skip insertion
  1042  			log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  1043  				"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  1044  
  1045  			return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  1046  				chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  1047  		}
  1048  	}
  1049  	// Pre-checks passed, start the full block imports
  1050  	bc.wg.Add(1)
  1051  	defer bc.wg.Done()
  1052  
  1053  	bc.chainmu.Lock()
  1054  	defer bc.chainmu.Unlock()
  1055  
  1056  	// A queued approach to delivering events. This is generally
  1057  	// faster than direct delivery and requires much less mutex
  1058  	// acquiring.
  1059  	var (
  1060  		stats         = insertStats{startTime: mclock.Now()}
  1061  		events        = make([]interface{}, 0, len(chain))
  1062  		lastCanon     *types.Block
  1063  		coalescedLogs []*types.Log
  1064  	)
  1065  	// Start the parallel header verifier
  1066  	headers := make([]*types.Header, len(chain))
  1067  	seals := make([]bool, len(chain))
  1068  
  1069  	for i, block := range chain {
  1070  		headers[i] = block.Header()
  1071  		seals[i] = true
  1072  	}
  1073  	abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1074  	defer close(abort)
  1075  
  1076  	// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1077  	senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1078  
  1079  	// Iterate over the blocks and insert when the verifier permits
  1080  	for i, block := range chain {
  1081  		// If the chain is terminating, stop processing blocks
  1082  		if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1083  			log.Debug("Premature abort during blocks processing")
  1084  			break
  1085  		}
  1086  		// If the header is a banned one, straight out abort
  1087  		if BadHashes[block.Hash()] {
  1088  			bc.reportBlock(block, nil, ErrBlacklistedHash)
  1089  			return i, events, coalescedLogs, ErrBlacklistedHash
  1090  		}
  1091  		// Wait for the block's verification to complete
  1092  		bstart := time.Now()
  1093  
  1094  		err := <-results
  1095  		if err == nil {
  1096  			err = bc.Validator().ValidateBody(block)
  1097  		}
  1098  		switch {
  1099  		case err == ErrKnownBlock:
  1100  			// Block and state both already known. However if the current block is below
  1101  			// this number we did a rollback and we should reimport it nonetheless.
  1102  			if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  1103  				stats.ignored++
  1104  				continue
  1105  			}
  1106  
  1107  		case err == consensus.ErrFutureBlock:
  1108  			// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  1109  			// the chain is discarded and processed at a later time if given.
  1110  			max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  1111  			if block.Time().Cmp(max) > 0 {
  1112  				return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  1113  			}
  1114  			bc.futureBlocks.Add(block.Hash(), block)
  1115  			stats.queued++
  1116  			continue
  1117  
  1118  		case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1119  			bc.futureBlocks.Add(block.Hash(), block)
  1120  			stats.queued++
  1121  			continue
  1122  
  1123  		case err == consensus.ErrPrunedAncestor:
  1124  			// Block competing with the canonical chain, store in the db, but don't process
  1125  			// until the competitor TD goes above the canonical TD
  1126  			currentBlock := bc.CurrentBlock()
  1127  			localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1128  			externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1129  			if localTd.Cmp(externTd) > 0 {
  1130  				if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1131  					return i, events, coalescedLogs, err
  1132  				}
  1133  				continue
  1134  			}
  1135  			// Competitor chain beat canonical, gather all blocks from the common ancestor
  1136  			var winner []*types.Block
  1137  
  1138  			parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1139  			for !bc.HasState(parent.Root()) {
  1140  				winner = append(winner, parent)
  1141  				parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1142  			}
  1143  			for j := 0; j < len(winner)/2; j++ {
  1144  				winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1145  			}
  1146  			// Import all the pruned blocks to make the state available
  1147  			bc.chainmu.Unlock()
  1148  			_, evs, logs, err := bc.insertChain(winner)
  1149  			bc.chainmu.Lock()
  1150  			events, coalescedLogs = evs, logs
  1151  
  1152  			if err != nil {
  1153  				return i, events, coalescedLogs, err
  1154  			}
  1155  
  1156  		case err != nil:
  1157  			bc.reportBlock(block, nil, err)
  1158  			return i, events, coalescedLogs, err
  1159  		}
  1160  		// Create a new statedb using the parent block and report an
  1161  		// error if it fails.
  1162  		var parent *types.Block
  1163  		if i == 0 {
  1164  			parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1165  		} else {
  1166  			parent = chain[i-1]
  1167  		}
  1168  		state, err := state.New(parent.Root(), bc.stateCache)
  1169  		if err != nil {
  1170  			return i, events, coalescedLogs, err
  1171  		}
  1172  		// Process block using the parent state as reference point.
  1173  		receipts, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1174  		if err != nil {
  1175  			bc.reportBlock(block, receipts, err)
  1176  			return i, events, coalescedLogs, err
  1177  		}
  1178  		// Validate the state using the default validator
  1179  		err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1180  		if err != nil {
  1181  			bc.reportBlock(block, receipts, err)
  1182  			return i, events, coalescedLogs, err
  1183  		}
  1184  		proctime := time.Since(bstart)
  1185  
  1186  		// Write the block to the chain and get the status.
  1187  		status, err := bc.WriteBlockWithState(block, receipts, state)
  1188  		if err != nil {
  1189  			return i, events, coalescedLogs, err
  1190  		}
  1191  		switch status {
  1192  		case CanonStatTy:
  1193  			log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), /*"uncles", len(block.Uncles()),*/
  1194  				"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1195  
  1196  		//	coalescedLogs = append(coalescedLogs, logs...)
  1197  			blockInsertTimer.UpdateSince(bstart)
  1198  			events = append(events, ChainEvent{block, block.Hash()})
  1199  			lastCanon = block
  1200  
  1201  			// Only count canonical blocks for GC processing time
  1202  			bc.gcproc += proctime
  1203  
  1204  	/*	case SideStatTy:
  1205  			log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1206  				common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed() /*, "uncles", len(block.Uncles()))*/
  1207  
  1208  			/*blockInsertTimer.UpdateSince(bstart)
  1209  			events = append(events, ChainSideEvent{block})*/
  1210  		}
  1211  		stats.processed++
  1212  		stats.usedGas += usedGas
  1213  
  1214  		cache, _ := bc.stateCache.TrieDB().Size()
  1215  		stats.report(chain, i, cache)
  1216  	}
  1217  	// Append a single chain head event if we've progressed the chain
  1218  	if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1219  		events = append(events, ChainHeadEvent{lastCanon})
  1220  	}
  1221  	return 0, events, coalescedLogs, nil
  1222  }
  1223  
  1224  // insertStats tracks and reports on block insertion.
  1225  type insertStats struct {
  1226  	queued, processed, ignored int
  1227  	usedGas                    uint64
  1228  	lastIndex                  int
  1229  	startTime                  mclock.AbsTime
  1230  }
  1231  
  1232  // statsReportLimit is the time limit during import and export after which we
  1233  // always print out progress. This avoids the user wondering what's going on.
  1234  const statsReportLimit = 8 * time.Second
  1235  
  1236  // report prints statistics if some number of blocks have been processed
  1237  // or more than a few seconds have passed since the last message.
  1238  func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1239  	// Fetch the timings for the batch
  1240  	var (
  1241  		now     = mclock.Now()
  1242  		elapsed = time.Duration(now) - time.Duration(st.startTime)
  1243  	)
  1244  	// If we're at the last block of the batch or report period reached, log
  1245  	if index == len(chain)-1 || elapsed >= statsReportLimit {
  1246  		var (
  1247  			end = chain[index]
  1248  			txs = countTransactions(chain[st.lastIndex : index+1])
  1249  		)
  1250  		context := []interface{}{
  1251  			"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1252  			"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1253  			"number", end.Number(), "hash", end.Hash(), "cache", cache,
  1254  		}
  1255  		if st.queued > 0 {
  1256  			context = append(context, []interface{}{"queued", st.queued}...)
  1257  		}
  1258  		if st.ignored > 0 {
  1259  			context = append(context, []interface{}{"ignored", st.ignored}...)
  1260  		}
  1261  		log.Info("Imported new chain segment", context...)
  1262  
  1263  		*st = insertStats{startTime: now, lastIndex: index + 1}
  1264  	}
  1265  }
  1266  
  1267  func countTransactions(chain []*types.Block) (c int) {
  1268  	for _, b := range chain {
  1269  		c += len(b.Transactions())
  1270  	}
  1271  	return c
  1272  }
  1273  
  1274  // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1275  // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1276  // event about them
  1277  func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1278  	var (
  1279  		newChain    types.Blocks
  1280  		oldChain    types.Blocks
  1281  		commonBlock *types.Block
  1282  		deletedTxs  types.Transactions
  1283  		deletedLogs []*types.Log
  1284  		// collectLogs collects the logs that were generated during the
  1285  		// processing of the block that corresponds with the given hash.
  1286  		// These logs are later announced as deleted.
  1287  		collectLogs = func(hash common.Hash) {
  1288  			// Coalesce logs and set 'Removed'.
  1289  			number := bc.hc.GetBlockNumber(hash)
  1290  			if number == nil {
  1291  				return
  1292  			}
  1293  
  1294  			//// MUST TODO delete from db
  1295  			/*receipts := rawdb.ReadReceipts(bc.db, hash, bc.ShardId(), *number)
  1296  			for _, receipt := range receipts {
  1297  				for _, log := range receipt.Logs {
  1298  					del := *log
  1299  					del.Removed = true
  1300  					deletedLogs = append(deletedLogs, &del)
  1301  				}
  1302  			}*/
  1303  		}
  1304  	)
  1305  
  1306  	// first reduce whoever is higher bound
  1307  	if oldBlock.NumberU64() > newBlock.NumberU64() {
  1308  		// reduce old chain
  1309  		for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1310  			oldChain = append(oldChain, oldBlock)
  1311  			deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1312  
  1313  			collectLogs(oldBlock.Hash())
  1314  		}
  1315  	} else {
  1316  		// reduce new chain and append new chain blocks for inserting later on
  1317  		for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1318  			newChain = append(newChain, newBlock)
  1319  		}
  1320  	}
  1321  	if oldBlock == nil {
  1322  		return fmt.Errorf("Invalid old chain")
  1323  	}
  1324  	if newBlock == nil {
  1325  		return fmt.Errorf("Invalid new chain")
  1326  	}
  1327  
  1328  	for {
  1329  		if oldBlock.Hash() == newBlock.Hash() {
  1330  			commonBlock = oldBlock
  1331  			break
  1332  		}
  1333  
  1334  		oldChain = append(oldChain, oldBlock)
  1335  		newChain = append(newChain, newBlock)
  1336  		deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1337  		collectLogs(oldBlock.Hash())
  1338  
  1339  		oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1340  		if oldBlock == nil {
  1341  			return fmt.Errorf("Invalid old chain")
  1342  		}
  1343  		if newBlock == nil {
  1344  			return fmt.Errorf("Invalid new chain")
  1345  		}
  1346  	}
  1347  	// Ensure the user sees large reorgs
  1348  	if len(oldChain) > 0 && len(newChain) > 0 {
  1349  		logFn := log.Debug
  1350  		if len(oldChain) > 63 {
  1351  			logFn = log.Warn
  1352  		}
  1353  		logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1354  			"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1355  	} else {
  1356  		log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1357  	}
  1358  	// Insert the new chain, taking care of the proper incremental order
  1359  	var addedTxs types.Transactions
  1360  	for i := len(newChain) - 1; i >= 0; i-- {
  1361  		// insert the block in the canonical way, re-writing history
  1362  		bc.insert(newChain[i])
  1363  		// write lookup entries for hash based transaction/receipt searches
  1364  		rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1365  		addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1366  	}
  1367  	// calculate the difference between deleted and added transactions
  1368  	diff := types.TxDifference(deletedTxs, addedTxs)
  1369  	// When transactions get deleted from the database that means the
  1370  	// receipts that were created in the fork must also be deleted
  1371  	batch := bc.db.NewBatch()
  1372  	for _, tx := range diff {
  1373  		rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1374  	}
  1375  	batch.Write()
  1376  
  1377  	if len(deletedLogs) > 0 {
  1378  		go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1379  	}
  1380  	if len(oldChain) > 0 {
  1381  		go func() {
  1382  			for _, block := range oldChain {
  1383  				bc.chainMasterFeed.Send(ChainSideEvent{Block: block})
  1384  			}
  1385  		}()
  1386  	}
  1387  
  1388  	return nil
  1389  }
  1390  
  1391  // PostChainEvents iterates over the events generated by a chain insertion and
  1392  // posts them into the event feed.
  1393  // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1394  func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1395  	// post event logs for further processing
  1396  	if logs != nil {
  1397  		bc.logsFeed.Send(logs)
  1398  	}
  1399  	for _, event := range events {
  1400  		switch ev := event.(type) {
  1401  		case ChainEvent:
  1402  			bc.chainFeed.Send(ev)
  1403  
  1404  		case ChainHeadEvent:
  1405  			bc.chainHeadFeed.Send(ev)
  1406  
  1407  	//	case ChainSideEvent:
  1408  		//	bc.chainSideFeed.Send(ev)
  1409  		}
  1410  	}
  1411  }
  1412  
  1413  func (bc *BlockChain) update() {
  1414  	futureTimer := time.NewTicker(5 * time.Second)
  1415  	defer futureTimer.Stop()
  1416  	for {
  1417  		select {
  1418  		case <-futureTimer.C:
  1419  			bc.procFutureBlocks()
  1420  		case <-bc.quit:
  1421  			return
  1422  		}
  1423  	}
  1424  }
  1425  
  1426  // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1427  func (bc *BlockChain) BadBlocks() []*types.Block {
  1428  	blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1429  	for _, hash := range bc.badBlocks.Keys() {
  1430  		if blk, exist := bc.badBlocks.Peek(hash); exist {
  1431  			block := blk.(*types.Block)
  1432  			blocks = append(blocks, block)
  1433  		}
  1434  	}
  1435  	return blocks
  1436  }
  1437  
  1438  // addBadBlock adds a bad block to the bad-block LRU cache
  1439  func (bc *BlockChain) addBadBlock(block *types.Block) {
  1440  	bc.badBlocks.Add(block.Hash(), block)
  1441  }
  1442  
  1443  // reportBlock logs a bad block error.
  1444  func (bc *BlockChain) reportBlock(block *types.Block, receipts types.ContractResults, err error) {
  1445  	bc.addBadBlock(block)
  1446  
  1447  	var receiptString string
  1448  	for _, receipt := range receipts {
  1449  		receiptString += fmt.Sprintf("\t%v\n", receipt)
  1450  	}
  1451  	log.Error(fmt.Sprintf(`
  1452  ########## BAD BLOCK #########
  1453  Chain config: %v
  1454  
  1455  Number: %v
  1456  Hash: 0x%x
  1457  %v
  1458  
  1459  Error: %v
  1460  ##############################
  1461  `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1462  }
  1463  
  1464  // InsertHeaderChain attempts to insert the given header chain in to the local
  1465  // chain, possibly creating a reorg. If an error is returned, it will return the
  1466  // index number of the failing header as well an error describing what went wrong.
  1467  //
  1468  // The verify parameter can be used to fine tune whether nonce verification
  1469  // should be done or not. The reason behind the optional check is because some
  1470  // of the header retrieval mechanisms already need to verify nonces, as well as
  1471  // because nonces can be verified sparsely, not needing to check each.
  1472  func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1473  	start := time.Now()
  1474  	if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1475  		return i, err
  1476  	}
  1477  
  1478  	// Make sure only one thread manipulates the chain at once
  1479  	bc.chainmu.Lock()
  1480  	defer bc.chainmu.Unlock()
  1481  
  1482  	bc.wg.Add(1)
  1483  	defer bc.wg.Done()
  1484  
  1485  	whFunc := func(header *types.Header) error {
  1486  		bc.mu.Lock()
  1487  		defer bc.mu.Unlock()
  1488  
  1489  		_, err := bc.hc.WriteHeader(header)
  1490  		return err
  1491  	}
  1492  
  1493  	return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1494  }
  1495  
  1496  // writeHeader writes a header into the local chain, given that its parent is
  1497  // already known. If the total difficulty of the newly inserted header becomes
  1498  // greater than the current known TD, the canonical chain is re-routed.
  1499  //
  1500  // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1501  // into the chain, as side effects caused by reorganisations cannot be emulated
  1502  // without the real blocks. Hence, writing headers directly should only be done
  1503  // in two scenarios: pure-header mode of operation (light clients), or properly
  1504  // separated header/block phases (non-archive clients).
  1505  func (bc *BlockChain) writeHeader(header *types.Header) error {
  1506  	bc.wg.Add(1)
  1507  	defer bc.wg.Done()
  1508  
  1509  	bc.mu.Lock()
  1510  	defer bc.mu.Unlock()
  1511  
  1512  	_, err := bc.hc.WriteHeader(header)
  1513  	return err
  1514  }
  1515  
  1516  // CurrentHeader retrieves the current head header of the canonical chain. The
  1517  // header is retrieved from the HeaderChain's internal cache.
  1518  func (bc *BlockChain) CurrentHeader() *types.Header {
  1519  	return bc.hc.CurrentHeader()
  1520  }
  1521  
  1522  // GetTd retrieves a block's total difficulty in the canonical chain from the
  1523  // database by hash and number, caching it if found.
  1524  func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1525  	return bc.hc.GetTd(hash, number)
  1526  }
  1527  
  1528  // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1529  // database by hash, caching it if found.
  1530  func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1531  	return bc.hc.GetTdByHash(hash)
  1532  }
  1533  
  1534  // GetHeader retrieves a block header from the database by hash and number,
  1535  // caching it if found.
  1536  func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1537  	return bc.hc.GetHeader(hash, number)
  1538  }
  1539  
  1540  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1541  // found.
  1542  func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1543  	return bc.hc.GetHeaderByHash(hash)
  1544  }
  1545  
  1546  // HasHeader checks if a block header is present in the database or not, caching
  1547  // it if present.
  1548  func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1549  	return bc.hc.HasHeader(hash, number)
  1550  }
  1551  
  1552  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1553  // hash, fetching towards the genesis block.
  1554  func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1555  	return bc.hc.GetBlockHashesFromHash(hash, max)
  1556  }
  1557  
  1558  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1559  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1560  // number of blocks to be individually checked before we reach the canonical chain.
  1561  //
  1562  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1563  func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1564  	bc.chainmu.Lock()
  1565  	defer bc.chainmu.Unlock()
  1566  
  1567  	return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1568  }
  1569  
  1570  // GetHeaderByNumber retrieves a block header from the database by number,
  1571  // caching it (associated with its hash) if found.
  1572  func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1573  	return bc.hc.GetHeaderByNumber(number)
  1574  }
  1575  
  1576  // Config retrieves the blockchain's chain configuration.
  1577  func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1578  
  1579  // Engine retrieves the blockchain's consensus engine.
  1580  func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1581  
  1582  // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1583  func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1584  	return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1585  }
  1586  
  1587  // SubscribeChainEvent registers a subscription of ChainEvent.
  1588  func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1589  	return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1590  }
  1591  
  1592  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1593  func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1594  	return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1595  }
  1596  
  1597  ////MUST TODO set chainsideFeed as master block
  1598  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1599  func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1600  	//return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1601  	return nil
  1602  }
  1603  
  1604  // SubscribeLogsEvent registers a subscription of []*types.Log.
  1605  func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1606  	return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1607  }