github.com/dominant-strategies/go-quai@v0.28.2/core/core.go (about)

     1  package core
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"io"
     7  	"math/big"
     8  	"sort"
     9  	"strconv"
    10  	"strings"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/dominant-strategies/go-quai/common"
    15  	"github.com/dominant-strategies/go-quai/common/math"
    16  	"github.com/dominant-strategies/go-quai/consensus"
    17  	"github.com/dominant-strategies/go-quai/core/rawdb"
    18  	"github.com/dominant-strategies/go-quai/core/state"
    19  	"github.com/dominant-strategies/go-quai/core/state/snapshot"
    20  	"github.com/dominant-strategies/go-quai/core/types"
    21  	"github.com/dominant-strategies/go-quai/core/vm"
    22  	"github.com/dominant-strategies/go-quai/ethdb"
    23  	"github.com/dominant-strategies/go-quai/event"
    24  	"github.com/dominant-strategies/go-quai/log"
    25  	"github.com/dominant-strategies/go-quai/params"
    26  	"github.com/dominant-strategies/go-quai/rlp"
    27  	"github.com/dominant-strategies/go-quai/trie"
    28  	lru "github.com/hnlq715/golang-lru"
    29  )
    30  
    31  const (
    32  	c_maxAppendQueue                           = 1000000 // Maximum number of future headers we can store in cache
    33  	c_maxFutureTime                            = 30      // Max time into the future (in seconds) we will accept a block
    34  	c_appendQueueRetryPeriod                   = 1       // Time (in seconds) before retrying to append from AppendQueue
    35  	c_appendQueueThreshold                     = 200     // Number of blocks to load from the disk to ram on every proc of append queue
    36  	c_processingCache                          = 10      // Number of block hashes held to prevent multi simultaneous appends on a single block hash
    37  	c_primeRetryThreshold                      = 1800    // Number of times a block is retry to be appended before eviction from append queue in Prime
    38  	c_regionRetryThreshold                     = 1200    // Number of times a block is retry to be appended before eviction from append queue in Region
    39  	c_zoneRetryThreshold                       = 600     // Number of times a block is retry to be appended before eviction from append queue in Zone
    40  	c_maxFutureBlocksPrime              uint64 = 3       // Number of blocks ahead of the current block to be put in the hashNumberList
    41  	c_maxFutureBlocksRegion             uint64 = 3
    42  	c_maxFutureBlocksRegionAtFray       uint64 = 150
    43  	c_maxFutureBlocksZone               uint64 = 200
    44  	c_maxFutureBlocksZoneAtFray         uint64 = 2000
    45  	c_appendQueueRetryPriorityThreshold        = 5  // If retry counter for a block is less than this number,  then its put in the special list that is tried first to be appended
    46  	c_appendQueueRemoveThreshold               = 10 // Number of blocks behind the block should be from the current header to be eligble for removal from the append queue
    47  	c_normalListProcCounter                    = 1  // Ratio of Number of times the PriorityList is serviced over the NormalList
    48  	c_statsPrintPeriod                         = 60 // Time between stats prints
    49  	c_appendQueuePrintSize                     = 10
    50  	c_badSyncTargetsSize                       = 20 // List of bad sync target hashes
    51  	c_badSyncTargetCheckTime                   = 15 * time.Minute
    52  	c_normalListBackoffThreshold               = 5 // Max multiple on the c_normalListProcCounter
    53  )
    54  
    55  type blockNumberAndRetryCounter struct {
    56  	number uint64
    57  	retry  uint64
    58  }
    59  
    60  type Core struct {
    61  	sl     *Slice
    62  	engine consensus.Engine
    63  
    64  	appendQueue     *lru.Cache
    65  	processingCache *lru.Cache
    66  
    67  	badSyncTargets *lru.Cache
    68  	prevSyncTarget common.Hash
    69  
    70  	writeBlockLock sync.RWMutex
    71  
    72  	procCounter int
    73  
    74  	syncTarget *types.Header // sync target header decided based on Best Prime Block as the target to sync to
    75  
    76  	normalListBackoff uint64 // normalListBackoff is the multiple on c_normalListProcCounter which delays the proc on normal list
    77  
    78  	quit chan struct{} // core quit channel
    79  }
    80  
    81  func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis) (*Core, error) {
    82  	slice, err := NewSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, vmConfig, genesis)
    83  	if err != nil {
    84  		return nil, err
    85  	}
    86  
    87  	c := &Core{
    88  		sl:                slice,
    89  		engine:            engine,
    90  		quit:              make(chan struct{}),
    91  		procCounter:       0,
    92  		normalListBackoff: 1,
    93  	}
    94  
    95  	// Initialize the sync target to current header parent entropy
    96  	c.syncTarget = c.CurrentHeader()
    97  
    98  	appendQueue, _ := lru.New(c_maxAppendQueue)
    99  	c.appendQueue = appendQueue
   100  
   101  	proccesingCache, _ := lru.NewWithExpire(c_processingCache, time.Second*60)
   102  	c.processingCache = proccesingCache
   103  
   104  	badSyncTargetsCache, _ := lru.New(c_badSyncTargetsSize)
   105  	c.badSyncTargets = badSyncTargetsCache
   106  
   107  	go c.updateAppendQueue()
   108  	go c.startStatsTimer()
   109  	go c.checkSyncTarget()
   110  	return c, nil
   111  }
   112  
   113  // InsertChain attempts to append a list of blocks to the slice, optionally
   114  // caching any pending blocks which cannot yet be appended. InsertChain return
   115  // the number of blocks which were successfully consumed (either appended, or
   116  // cached), and an error.
   117  func (c *Core) InsertChain(blocks types.Blocks) (int, error) {
   118  	nodeCtx := common.NodeLocation.Context()
   119  	for idx, block := range blocks {
   120  		// Only attempt to append a block, if it is not coincident with our dominant
   121  		// chain. If it is dom coincident, then the dom chain node in our slice needs
   122  		// to initiate the append.
   123  		_, order, err := c.CalcOrder(block.Header())
   124  		if err != nil {
   125  			return idx, err
   126  		}
   127  
   128  		if order == nodeCtx {
   129  			if !c.processingCache.Contains(block.Hash()) {
   130  				c.processingCache.Add(block.Hash(), 1)
   131  			} else {
   132  				log.Info("Already processing block:", "Number:", block.Header().NumberArray(), "Hash:", block.Hash())
   133  				return idx, errors.New("Already in process of appending this block")
   134  			}
   135  			newPendingEtxs, _, _, err := c.sl.Append(block.Header(), types.EmptyHeader(), common.Hash{}, false, nil)
   136  			c.processingCache.Remove(block.Hash())
   137  			if err == nil {
   138  				// If we have a dom, send the dom any pending ETXs which will become
   139  				// referencable by this block. When this block is referenced in the dom's
   140  				// subordinate block manifest, then ETXs produced by this block and the rollup
   141  				// of ETXs produced by subordinate chain(s) will become referencable.
   142  				if nodeCtx > common.PRIME_CTX {
   143  					pendingEtx := types.PendingEtxs{block.Header(), newPendingEtxs}
   144  					// Only send the pending Etxs to dom if valid, because in the case of running a slice, for the zones that the node doesn't run, it cannot have the etxs generated
   145  					if pendingEtx.IsValid(trie.NewStackTrie(nil)) {
   146  						if err := c.SendPendingEtxsToDom(pendingEtx); err != nil {
   147  							log.Error("failed to send ETXs to domclient", "block: ", block.Hash(), "err", err)
   148  						}
   149  					}
   150  				}
   151  				c.removeFromAppendQueue(block)
   152  			} else if err.Error() == consensus.ErrFutureBlock.Error() ||
   153  				err.Error() == ErrBodyNotFound.Error() ||
   154  				err.Error() == ErrPendingEtxNotFound.Error() ||
   155  				err.Error() == consensus.ErrPrunedAncestor.Error() ||
   156  				err.Error() == consensus.ErrUnknownAncestor.Error() ||
   157  				err.Error() == ErrSubNotSyncedToDom.Error() ||
   158  				err.Error() == ErrDomClientNotUp.Error() {
   159  				if c.sl.CurrentInfo(block.Header()) {
   160  					log.Info("Cannot append yet.", "loc", common.NodeLocation.Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err)
   161  				} else {
   162  					log.Debug("Cannot append yet.", "loc", common.NodeLocation.Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err)
   163  				}
   164  				if err.Error() == ErrSubNotSyncedToDom.Error() ||
   165  					err.Error() == ErrPendingEtxNotFound.Error() {
   166  					if nodeCtx != common.ZONE_CTX && c.sl.subClients[block.Location().SubIndex()] != nil {
   167  						c.sl.subClients[block.Location().SubIndex()].DownloadBlocksInManifest(context.Background(), block.Hash(), block.SubManifest(), block.ParentEntropy())
   168  					}
   169  				}
   170  				return idx, ErrPendingBlock
   171  			} else if err.Error() != ErrKnownBlock.Error() {
   172  				log.Info("Append failed.", "hash", block.Hash(), "err", err)
   173  			}
   174  			if err != nil && strings.Contains(err.Error(), "connection refused") {
   175  				log.Error("Append failed because of connection refused error")
   176  			} else {
   177  				c.removeFromAppendQueue(block)
   178  			}
   179  		}
   180  	}
   181  	return len(blocks), nil
   182  }
   183  
   184  // procAppendQueue sorts the append queue and attempts to append
   185  func (c *Core) procAppendQueue() {
   186  	nodeCtx := common.NodeLocation.Context()
   187  
   188  	maxFutureBlocks := c_maxFutureBlocksPrime
   189  	// If sync point is reached increase the maxFutureBlocks
   190  	// we can increse scope when we are near, region future blocks is increased to sync the fray fast
   191  	if c.CurrentHeader() != nil && c.syncTarget != nil && c.CurrentHeader().NumberU64() >= c.syncTarget.NumberU64() {
   192  		if nodeCtx == common.REGION_CTX {
   193  			maxFutureBlocks = c_maxFutureBlocksRegionAtFray
   194  		} else if nodeCtx == common.ZONE_CTX {
   195  			maxFutureBlocks = c_maxFutureBlocksZoneAtFray
   196  		}
   197  	} else {
   198  		if nodeCtx == common.REGION_CTX {
   199  			maxFutureBlocks = c_maxFutureBlocksRegion
   200  		} else if nodeCtx == common.ZONE_CTX {
   201  			maxFutureBlocks = c_maxFutureBlocksZone
   202  		}
   203  	}
   204  
   205  	// Sort the blocks by number and retry attempts and try to insert them
   206  	// blocks will be aged out of the append queue after the retry threhsold
   207  	var hashNumberList []types.HashAndNumber
   208  	var hashNumberPriorityList []types.HashAndNumber
   209  	for _, hash := range c.appendQueue.Keys() {
   210  		if value, exist := c.appendQueue.Peek(hash); exist {
   211  			hashNumber := types.HashAndNumber{Hash: hash.(common.Hash), Number: value.(blockNumberAndRetryCounter).number}
   212  			if hashNumber.Number < c.CurrentHeader().NumberU64()+maxFutureBlocks {
   213  				if value.(blockNumberAndRetryCounter).retry < c_appendQueueRetryPriorityThreshold {
   214  					hashNumberPriorityList = append(hashNumberPriorityList, hashNumber)
   215  				} else {
   216  					hashNumberList = append(hashNumberList, hashNumber)
   217  				}
   218  			}
   219  		}
   220  	}
   221  
   222  	c.serviceBlocks(hashNumberPriorityList)
   223  	if len(hashNumberPriorityList) > 0 {
   224  		log.Info("Size of hashNumberPriorityList", "len", len(hashNumberPriorityList), "first entry", hashNumberPriorityList[0].Number, "last entry", hashNumberPriorityList[len(hashNumberPriorityList)-1].Number)
   225  	}
   226  
   227  	normalListProcCounter := c.normalListBackoff * c_normalListProcCounter
   228  	if len(c.appendQueue.Keys()) < c_appendQueueThreshold || c.procCounter%int(normalListProcCounter) == 0 {
   229  		c.procCounter = 0
   230  		c.serviceBlocks(hashNumberList)
   231  		if len(hashNumberList) > 0 {
   232  			log.Info("Size of hashNumberList", "len", len(hashNumberList), "first entry", hashNumberList[0].Number, "last entry", hashNumberList[len(hashNumberList)-1].Number)
   233  		}
   234  	}
   235  	c.procCounter++
   236  }
   237  
   238  func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) {
   239  	sort.Slice(hashNumberList, func(i, j int) bool {
   240  		return hashNumberList[i].Number < hashNumberList[j].Number
   241  	})
   242  
   243  	var retryThreshold uint64
   244  	switch common.NodeLocation.Context() {
   245  	case common.PRIME_CTX:
   246  		retryThreshold = c_primeRetryThreshold
   247  	case common.REGION_CTX:
   248  		retryThreshold = c_regionRetryThreshold
   249  	case common.ZONE_CTX:
   250  		retryThreshold = c_zoneRetryThreshold
   251  	}
   252  
   253  	// Attempt to service the sorted list
   254  	for i, hashAndNumber := range hashNumberList {
   255  		block := c.GetBlockOrCandidateByHash(hashAndNumber.Hash)
   256  		if block != nil {
   257  			var numberAndRetryCounter blockNumberAndRetryCounter
   258  			if value, exist := c.appendQueue.Peek(block.Hash()); exist {
   259  				numberAndRetryCounter = value.(blockNumberAndRetryCounter)
   260  				numberAndRetryCounter.retry += 1
   261  				if numberAndRetryCounter.retry > retryThreshold && numberAndRetryCounter.number+c_appendQueueRemoveThreshold < c.CurrentHeader().NumberU64() {
   262  					c.appendQueue.Remove(block.Hash())
   263  				} else {
   264  					c.appendQueue.Add(block.Hash(), numberAndRetryCounter)
   265  				}
   266  			}
   267  			parentBlock := c.sl.hc.GetBlockOrCandidate(block.ParentHash(), block.NumberU64()-1)
   268  			if parentBlock != nil {
   269  				// If parent header is dom, send a signal to dom to request for the block if it doesnt have it
   270  				_, parentHeaderOrder, err := c.sl.engine.CalcOrder(parentBlock.Header())
   271  				if err != nil {
   272  					log.Warn("Error calculating the parent block order in serviceBlocks", "Hash", parentBlock.Hash(), "Number", parentBlock.Header().NumberArray())
   273  					continue
   274  				}
   275  				nodeCtx := common.NodeLocation.Context()
   276  				if parentHeaderOrder < nodeCtx && c.GetHeaderByHash(parentBlock.Hash()) == nil {
   277  					log.Info("Requesting the dom to get the block if it doesnt have and try to append", "Hash", parentBlock.Hash(), "Order", parentHeaderOrder)
   278  					if c.sl.domClient != nil {
   279  						// send a signal to the required dom to fetch the block if it doesnt have it, or its not in its appendqueue
   280  						go c.sl.domClient.RequestDomToAppendOrFetch(context.Background(), parentBlock.Hash(), parentBlock.ParentEntropy(), parentHeaderOrder)
   281  					}
   282  				}
   283  				c.addToQueueIfNotAppended(parentBlock)
   284  				_, err = c.InsertChain([]*types.Block{block})
   285  				if err != nil && err.Error() == ErrPendingBlock.Error() {
   286  					// Best check here would be to check the first hash in each Fork, until we do that
   287  					// checking the first item in the sorted hashNumberList will do
   288  					if i == 0 && c.normalListBackoff < c_normalListBackoffThreshold {
   289  						c.normalListBackoff++
   290  					}
   291  				} else {
   292  					c.normalListBackoff = 1
   293  				}
   294  			} else {
   295  				c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: block.ParentHash(), Entropy: block.ParentEntropy()})
   296  			}
   297  		} else {
   298  			log.Warn("Entry in the FH cache without being in the db: ", "Hash: ", hashAndNumber.Hash)
   299  		}
   300  	}
   301  }
   302  
   303  func (c *Core) RequestDomToAppendOrFetch(hash common.Hash, entropy *big.Int, order int) {
   304  	// TODO: optimize to check if the block is in the appendqueue or already
   305  	// appended to reduce the network bandwidth utilization
   306  	nodeCtx := common.NodeLocation.Context()
   307  	if nodeCtx == common.PRIME_CTX {
   308  		// If prime all you can do it to ask for the block
   309  		_, exists := c.appendQueue.Get(hash)
   310  		if !exists {
   311  			log.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order)
   312  			block := c.GetBlockOrCandidateByHash(hash)
   313  			if block == nil {
   314  				c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: hash, Entropy: entropy}) // Using the missing parent feed to ask for the block
   315  			} else {
   316  				c.addToQueueIfNotAppended(block)
   317  			}
   318  		}
   319  	} else if nodeCtx == common.REGION_CTX {
   320  		if order < nodeCtx { // Prime block
   321  			if c.sl.domClient != nil {
   322  				go c.sl.domClient.RequestDomToAppendOrFetch(context.Background(), hash, entropy, order)
   323  			}
   324  		}
   325  		_, exists := c.appendQueue.Get(hash)
   326  		if !exists {
   327  			log.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order)
   328  			block := c.GetBlockByHash(hash)
   329  			if block == nil {
   330  				c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: hash, Entropy: entropy}) // Using the missing parent feed to ask for the block
   331  			} else {
   332  				c.addToQueueIfNotAppended(block)
   333  			}
   334  		}
   335  	}
   336  
   337  }
   338  
   339  // addToQueueIfNotAppended checks if block is appended and if its not adds the block to appendqueue
   340  func (c *Core) addToQueueIfNotAppended(block *types.Block) {
   341  	// Check if the hash is in the blockchain, otherwise add it to the append queue
   342  	if c.GetHeaderByHash(block.Hash()) == nil {
   343  		c.addToAppendQueue(block)
   344  	}
   345  }
   346  
   347  // SetSyncTarget sets the sync target entropy based on the prime blocks
   348  func (c *Core) SetSyncTarget(header *types.Header) {
   349  	if c.sl.subClients == nil || header.Hash() == c.sl.config.GenesisHash {
   350  		return
   351  	}
   352  
   353  	// Check if the header is in the badSyncTargets cache
   354  	_, ok := c.badSyncTargets.Get(header.Hash())
   355  	if ok {
   356  		return
   357  	}
   358  
   359  	nodeCtx := common.NodeLocation.Context()
   360  	// Set Sync Target for subs
   361  	if nodeCtx != common.ZONE_CTX {
   362  		if header != nil {
   363  			if c.sl.subClients[header.Location().SubIndex()] != nil {
   364  				c.sl.subClients[header.Location().SubIndex()].SetSyncTarget(context.Background(), header)
   365  			}
   366  		}
   367  	}
   368  	if c.syncTarget == nil || c.syncTarget.ParentEntropy().Cmp(header.ParentEntropy()) < 0 {
   369  		c.syncTarget = header
   370  	}
   371  }
   372  
   373  // SyncTargetEntropy returns the syncTargetEntropy if its not nil, otherwise
   374  // returns the current header parent entropy
   375  func (c *Core) SyncTargetEntropy() (*big.Int, *big.Int) {
   376  	if c.syncTarget != nil {
   377  		target := new(big.Int).Div(common.Big2e256, c.syncTarget.Difficulty())
   378  		zoneThresholdS := c.sl.engine.IntrinsicLogS(common.BytesToHash(target.Bytes()))
   379  		return c.syncTarget.ParentEntropy(), zoneThresholdS
   380  	} else {
   381  		target := new(big.Int).Div(common.Big2e256, c.CurrentHeader().Difficulty())
   382  		zoneThresholdS := c.sl.engine.IntrinsicLogS(common.BytesToHash(target.Bytes()))
   383  		return c.CurrentHeader().ParentEntropy(), zoneThresholdS
   384  	}
   385  }
   386  
   387  // addToAppendQueue adds a block to the append queue
   388  func (c *Core) addToAppendQueue(block *types.Block) error {
   389  	nodeCtx := common.NodeLocation.Context()
   390  	_, order, err := c.engine.CalcOrder(block.Header())
   391  	if err != nil {
   392  		return err
   393  	}
   394  	if order == nodeCtx {
   395  		c.appendQueue.ContainsOrAdd(block.Hash(), blockNumberAndRetryCounter{block.NumberU64(), 0})
   396  	}
   397  	return nil
   398  }
   399  
   400  // removeFromAppendQueue removes a block from the append queue
   401  func (c *Core) removeFromAppendQueue(block *types.Block) {
   402  	c.appendQueue.Remove(block.Hash())
   403  }
   404  
   405  // updateAppendQueue is a time to procAppendQueue
   406  func (c *Core) updateAppendQueue() {
   407  	futureTimer := time.NewTicker(c_appendQueueRetryPeriod * time.Second)
   408  	defer futureTimer.Stop()
   409  	for {
   410  		select {
   411  		case <-futureTimer.C:
   412  			c.procAppendQueue()
   413  		case <-c.quit:
   414  			return
   415  		}
   416  	}
   417  }
   418  
   419  func (c *Core) checkSyncTarget() {
   420  	badSyncTimer := time.NewTicker(c_badSyncTargetCheckTime)
   421  	defer badSyncTimer.Stop()
   422  	for {
   423  		select {
   424  		case <-badSyncTimer.C:
   425  			// If the prevSyncTarget hasn't changed in the c_badSyncTargetCheckTime
   426  			// we add it to the badSyncTargets List
   427  			if c.prevSyncTarget != c.syncTarget.Hash() {
   428  				c.prevSyncTarget = c.syncTarget.Hash()
   429  			} else {
   430  				c.badSyncTargets.Add(c.syncTarget.Hash(), true)
   431  				c.syncTarget = c.CurrentHeader()
   432  			}
   433  		case <-c.quit:
   434  			return
   435  		}
   436  	}
   437  }
   438  
   439  func (c *Core) startStatsTimer() {
   440  	futureTimer := time.NewTicker(c_statsPrintPeriod * time.Second)
   441  	defer futureTimer.Stop()
   442  	for {
   443  		select {
   444  		case <-futureTimer.C:
   445  			c.printStats()
   446  		case <-c.quit:
   447  			return
   448  		}
   449  	}
   450  }
   451  
   452  // printStats displays stats on syncing, latestHeight, etc.
   453  func (c *Core) printStats() {
   454  	log.Info("Blocks waiting to be appended", "loc", common.NodeLocation.Name(), "len(appendQueue)", len(c.appendQueue.Keys()))
   455  
   456  	// Print hashes & heights of all queue entries.
   457  	for _, hash := range c.appendQueue.Keys()[:math.Min(len(c.appendQueue.Keys()), c_appendQueuePrintSize)] {
   458  		if value, exist := c.appendQueue.Peek(hash); exist {
   459  			hashNumber := types.HashAndNumber{Hash: hash.(common.Hash), Number: value.(blockNumberAndRetryCounter).number}
   460  			log.Lazy(func() string {
   461  				return "AppendQueue entry. Number: " + strconv.FormatUint(hashNumber.Number, 10) + ". Hash: " + hashNumber.Hash.String()
   462  			}, "debug")
   463  		}
   464  	}
   465  
   466  }
   467  
   468  func (c *Core) BadHashExistsInChain() bool {
   469  	nodeCtx := common.NodeLocation.Context()
   470  	// Lookup the bad hashes list to see if we have it in the database
   471  	for _, fork := range BadHashes {
   472  		switch nodeCtx {
   473  		case common.PRIME_CTX:
   474  			if c.GetBlockByHash(fork.PrimeContext) != nil {
   475  				return true
   476  			}
   477  		case common.REGION_CTX:
   478  			if c.GetBlockByHash(fork.RegionContext[common.NodeLocation.Region()]) != nil {
   479  				return true
   480  			}
   481  		case common.ZONE_CTX:
   482  			if c.GetBlockByHash(fork.ZoneContext[common.NodeLocation.Region()][common.NodeLocation.Zone()]) != nil {
   483  				return true
   484  			}
   485  		}
   486  	}
   487  	return false
   488  }
   489  
   490  func (c *Core) SubscribeMissingBlockEvent(ch chan<- types.BlockRequest) event.Subscription {
   491  	return c.sl.SubscribeMissingBlockEvent(ch)
   492  }
   493  
   494  // InsertChainWithoutSealVerification works exactly the same
   495  // except for seal verification, seal verification is omitted
   496  func (c *Core) InsertChainWithoutSealVerification(block *types.Block) (int, error) {
   497  	return 0, nil
   498  }
   499  
   500  func (c *Core) Processor() *StateProcessor {
   501  	return c.sl.hc.bc.processor
   502  }
   503  
   504  func (c *Core) Config() *params.ChainConfig {
   505  	return c.sl.hc.bc.chainConfig
   506  }
   507  
   508  // Engine retreives the blake3 consensus engine.
   509  func (c *Core) Engine() consensus.Engine {
   510  	return c.engine
   511  }
   512  
   513  // Slice retrieves the slice struct.
   514  func (c *Core) Slice() *Slice {
   515  	return c.sl
   516  }
   517  
   518  func (c *Core) TxPool() *TxPool {
   519  	return c.sl.txPool
   520  }
   521  
   522  func (c *Core) Stop() {
   523  	// Delete the append queue
   524  	c.appendQueue.Purge()
   525  	close(c.quit)
   526  	c.sl.Stop()
   527  }
   528  
   529  //---------------//
   530  // Slice methods //
   531  //---------------//
   532  
   533  // WriteBlock write the block to the bodydb database
   534  func (c *Core) WriteBlock(block *types.Block) {
   535  	c.writeBlockLock.Lock()
   536  	defer c.writeBlockLock.Unlock()
   537  	nodeCtx := common.NodeLocation.Context()
   538  
   539  	if c.sl.IsBlockHashABadHash(block.Hash()) {
   540  		return
   541  	}
   542  	if c.GetHeaderByHash(block.Hash()) == nil {
   543  		// Only add non dom blocks to the append queue
   544  		_, order, err := c.CalcOrder(block.Header())
   545  		if err != nil {
   546  			return
   547  		}
   548  		if order == nodeCtx {
   549  			parentHeader := c.GetHeader(block.ParentHash(), block.NumberU64()-1)
   550  			if parentHeader != nil {
   551  				c.sl.WriteBlock(block)
   552  				c.InsertChain([]*types.Block{block})
   553  			}
   554  			c.addToAppendQueue(block)
   555  			// If a dom block comes in and we havent appended it yet
   556  		} else if order < nodeCtx && c.GetHeaderByHash(block.Hash()) == nil {
   557  			if c.sl.domClient != nil {
   558  				go c.sl.domClient.RequestDomToAppendOrFetch(context.Background(), block.Hash(), block.ParentEntropy(), order)
   559  			}
   560  		}
   561  	}
   562  	if c.GetHeaderOrCandidateByHash(block.Hash()) == nil {
   563  		c.sl.WriteBlock(block)
   564  	}
   565  
   566  	if nodeCtx == common.PRIME_CTX {
   567  		if block != nil {
   568  			c.SetSyncTarget(block.Header())
   569  		}
   570  	}
   571  }
   572  
   573  func (c *Core) Append(header *types.Header, manifest types.BlockManifest, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) {
   574  	newPendingEtxs, subReorg, setHead, err := c.sl.Append(header, domPendingHeader, domTerminus, domOrigin, newInboundEtxs)
   575  	if err != nil {
   576  		if err.Error() == ErrBodyNotFound.Error() || err.Error() == consensus.ErrUnknownAncestor.Error() || err.Error() == ErrSubNotSyncedToDom.Error() {
   577  			// Fetch the blocks for each hash in the manifest
   578  			block := c.GetBlockOrCandidateByHash(header.Hash())
   579  			if block == nil {
   580  				c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: header.Hash(), Entropy: header.ParentEntropy()})
   581  			} else {
   582  				c.addToQueueIfNotAppended(block)
   583  			}
   584  			for _, m := range manifest {
   585  				block := c.GetBlockOrCandidateByHash(m)
   586  				if block == nil {
   587  					c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: m, Entropy: header.ParentEntropy()})
   588  				} else {
   589  					c.addToQueueIfNotAppended(block)
   590  				}
   591  			}
   592  			block = c.GetBlockOrCandidateByHash(header.ParentHash())
   593  			if block == nil {
   594  				c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: header.ParentHash(), Entropy: header.ParentEntropy()})
   595  			} else {
   596  				c.addToQueueIfNotAppended(block)
   597  			}
   598  		}
   599  	}
   600  	return newPendingEtxs, subReorg, setHead, err
   601  }
   602  
   603  func (c *Core) DownloadBlocksInManifest(blockHash common.Hash, manifest types.BlockManifest, entropy *big.Int) {
   604  	// Fetch the blocks for each hash in the manifest
   605  	for _, m := range manifest {
   606  		block := c.GetBlockOrCandidateByHash(m)
   607  		if block == nil {
   608  			c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: m, Entropy: entropy})
   609  		} else {
   610  			c.addToQueueIfNotAppended(block)
   611  		}
   612  	}
   613  	if common.NodeLocation.Context() == common.REGION_CTX {
   614  		block := c.GetBlockOrCandidateByHash(blockHash)
   615  		if block != nil {
   616  			// If a prime block comes in
   617  			if c.sl.subClients[block.Location().SubIndex()] != nil {
   618  				c.sl.subClients[block.Location().SubIndex()].DownloadBlocksInManifest(context.Background(), block.Hash(), block.SubManifest(), block.ParentEntropy())
   619  			}
   620  		}
   621  	}
   622  }
   623  
   624  // ConstructLocalBlock takes a header and construct the Block locally
   625  func (c *Core) ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) {
   626  	return c.sl.ConstructLocalMinedBlock(header)
   627  }
   628  
   629  func (c *Core) SubRelayPendingHeader(slPendingHeader types.PendingHeader, newEntropy *big.Int, location common.Location, subReorg bool, order int) {
   630  	c.sl.SubRelayPendingHeader(slPendingHeader, newEntropy, location, subReorg, order)
   631  }
   632  
   633  func (c *Core) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingHeader, location common.Location) {
   634  	c.sl.UpdateDom(oldTerminus, pendingHeader, location)
   635  }
   636  
   637  func (c *Core) NewGenesisPendigHeader(pendingHeader *types.Header) {
   638  	c.sl.NewGenesisPendingHeader(pendingHeader)
   639  }
   640  
   641  func (c *Core) GetPendingHeader() (*types.Header, error) {
   642  	return c.sl.GetPendingHeader()
   643  }
   644  
   645  func (c *Core) GetManifest(blockHash common.Hash) (types.BlockManifest, error) {
   646  	return c.sl.GetManifest(blockHash)
   647  }
   648  
   649  func (c *Core) GetSubManifest(slice common.Location, blockHash common.Hash) (types.BlockManifest, error) {
   650  	return c.sl.GetSubManifest(slice, blockHash)
   651  }
   652  
   653  func (c *Core) GetPendingEtxs(hash common.Hash) *types.PendingEtxs {
   654  	return rawdb.ReadPendingEtxs(c.sl.sliceDb, hash)
   655  }
   656  
   657  func (c *Core) GetPendingEtxsRollup(hash common.Hash) *types.PendingEtxsRollup {
   658  	return rawdb.ReadPendingEtxsRollup(c.sl.sliceDb, hash)
   659  }
   660  
   661  func (c *Core) GetPendingEtxsRollupFromSub(hash common.Hash, location common.Location) (types.PendingEtxsRollup, error) {
   662  	return c.sl.GetPendingEtxsRollupFromSub(hash, location)
   663  }
   664  
   665  func (c *Core) GetPendingEtxsFromSub(hash common.Hash, location common.Location) (types.PendingEtxs, error) {
   666  	return c.sl.GetPendingEtxsFromSub(hash, location)
   667  }
   668  
   669  func (c *Core) HasPendingEtxs(hash common.Hash) bool {
   670  	return c.GetPendingEtxs(hash) != nil
   671  }
   672  
   673  func (c *Core) SendPendingEtxsToDom(pEtxs types.PendingEtxs) error {
   674  	return c.sl.SendPendingEtxsToDom(pEtxs)
   675  }
   676  
   677  func (c *Core) AddPendingEtxs(pEtxs types.PendingEtxs) error {
   678  	return c.sl.AddPendingEtxs(pEtxs)
   679  }
   680  
   681  func (c *Core) AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error {
   682  	return c.sl.AddPendingEtxsRollup(pEtxsRollup)
   683  }
   684  
   685  func (c *Core) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error {
   686  	return c.sl.GenerateRecoveryPendingHeader(pendingHeader, checkpointHashes)
   687  }
   688  
   689  func (c *Core) IsBlockHashABadHash(hash common.Hash) bool {
   690  	return c.sl.IsBlockHashABadHash(hash)
   691  }
   692  
   693  func (c *Core) ProcessingState() bool {
   694  	return c.sl.ProcessingState()
   695  }
   696  
   697  //---------------------//
   698  // HeaderChain methods //
   699  //---------------------//
   700  
   701  // GetBlock retrieves a block from the database by hash and number,
   702  // caching it if found.
   703  func (c *Core) GetBlock(hash common.Hash, number uint64) *types.Block {
   704  	return c.sl.hc.GetBlock(hash, number)
   705  }
   706  
   707  // GetBlockByHash retrieves a block from the database by hash, caching it if found.
   708  func (c *Core) GetBlockByHash(hash common.Hash) *types.Block {
   709  	return c.sl.hc.GetBlockByHash(hash)
   710  }
   711  
   712  // GetBlockOrCandidateByHash retrieves a block from the database by hash, caching it if found.
   713  func (c *Core) GetBlockOrCandidateByHash(hash common.Hash) *types.Block {
   714  	return c.sl.hc.GetBlockOrCandidateByHash(hash)
   715  }
   716  
   717  // GetHeaderByNumber retrieves a block header from the database by number,
   718  // caching it (associated with its hash) if found.
   719  func (c *Core) GetHeaderByNumber(number uint64) *types.Header {
   720  	return c.sl.hc.GetHeaderByNumber(number)
   721  }
   722  
   723  // GetBlockByNumber retrieves a block from the database by number, caching it
   724  // (associated with its hash) if found.
   725  func (c *Core) GetBlockByNumber(number uint64) *types.Block {
   726  	return c.sl.hc.GetBlockByNumber(number)
   727  }
   728  
   729  // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
   730  // [deprecated by eth/62]
   731  func (c *Core) GetBlocksFromHash(hash common.Hash, n int) []*types.Block {
   732  	return c.sl.hc.GetBlocksFromHash(hash, n)
   733  }
   734  
   735  // GetUnclesInChain retrieves all the uncles from a given block backwards until
   736  // a specific distance is reached.
   737  func (c *Core) GetUnclesInChain(block *types.Block, length int) []*types.Header {
   738  	return c.sl.hc.GetUnclesInChain(block, length)
   739  }
   740  
   741  // GetGasUsedInChain retrieves all the gas used from a given block backwards until
   742  // a specific distance is reached.
   743  func (c *Core) GetGasUsedInChain(block *types.Block, length int) int64 {
   744  	return c.sl.hc.GetGasUsedInChain(block, length)
   745  }
   746  
   747  // GetGasUsedInChain retrieves all the gas used from a given block backwards until
   748  // a specific distance is reached.
   749  func (c *Core) CalculateBaseFee(header *types.Header) *big.Int {
   750  	return c.sl.hc.CalculateBaseFee(header)
   751  }
   752  
   753  // CurrentBlock returns the block for the current header.
   754  func (c *Core) CurrentBlock() *types.Block {
   755  	return c.sl.hc.CurrentBlock()
   756  }
   757  
   758  // CurrentHeader retrieves the current head header of the canonical chain. The
   759  // header is retrieved from the HeaderChain's internal cache.
   760  func (c *Core) CurrentHeader() *types.Header {
   761  	return c.sl.hc.CurrentHeader()
   762  }
   763  
   764  // CurrentLogEntropy returns the logarithm of the total entropy reduction since genesis for our current head block
   765  func (c *Core) CurrentLogEntropy() *big.Int {
   766  	return c.engine.TotalLogS(c.sl.hc.CurrentHeader())
   767  }
   768  
   769  // TotalLogS returns the total entropy reduction if the chain since genesis to the given header
   770  func (c *Core) TotalLogS(header *types.Header) *big.Int {
   771  	return c.engine.TotalLogS(header)
   772  }
   773  
   774  // CalcOrder returns the order of the block within the hierarchy of chains
   775  func (c *Core) CalcOrder(header *types.Header) (*big.Int, int, error) {
   776  	return c.engine.CalcOrder(header)
   777  }
   778  
   779  // GetHeader retrieves a block header from the database by hash and number,
   780  // caching it if found.
   781  func (c *Core) GetHeader(hash common.Hash, number uint64) *types.Header {
   782  	return c.sl.hc.GetHeader(hash, number)
   783  }
   784  
   785  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
   786  // found.
   787  func (c *Core) GetHeaderByHash(hash common.Hash) *types.Header {
   788  	return c.sl.hc.GetHeaderByHash(hash)
   789  }
   790  
   791  // GetHeaderOrCandidate retrieves a block header from the database by hash and number,
   792  // caching it if found.
   793  func (c *Core) GetHeaderOrCandidate(hash common.Hash, number uint64) *types.Header {
   794  	return c.sl.hc.GetHeaderOrCandidate(hash, number)
   795  }
   796  
   797  // GetHeaderOrCandidateByHash retrieves a block header from the database by hash, caching it if
   798  // found.
   799  func (c *Core) GetHeaderOrCandidateByHash(hash common.Hash) *types.Header {
   800  	return c.sl.hc.GetHeaderOrCandidateByHash(hash)
   801  }
   802  
   803  // HasHeader checks if a block header is present in the database or not, caching
   804  // it if present.
   805  func (c *Core) HasHeader(hash common.Hash, number uint64) bool {
   806  	return c.sl.hc.HasHeader(hash, number)
   807  }
   808  
   809  // GetCanonicalHash returns the canonical hash for a given block number
   810  func (c *Core) GetCanonicalHash(number uint64) common.Hash {
   811  	return c.sl.hc.GetCanonicalHash(number)
   812  }
   813  
   814  // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
   815  // hash, fetching towards the genesis block.
   816  func (c *Core) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
   817  	return c.sl.hc.GetBlockHashesFromHash(hash, max)
   818  }
   819  
   820  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
   821  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
   822  // number of blocks to be individually checked before we reach the canonical chain.
   823  //
   824  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
   825  func (c *Core) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
   826  	return c.sl.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
   827  }
   828  
   829  // Genesis retrieves the chain's genesis block.
   830  func (c *Core) Genesis() *types.Block {
   831  	return c.GetBlockByHash(c.sl.hc.genesisHeader.Hash())
   832  }
   833  
   834  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
   835  func (c *Core) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
   836  	return c.sl.hc.SubscribeChainHeadEvent(ch)
   837  }
   838  
   839  // GetBody retrieves a block body (transactions and uncles) from the database by
   840  // hash, caching it if found.
   841  func (c *Core) GetBody(hash common.Hash) *types.Body {
   842  	return c.sl.hc.GetBody(hash)
   843  }
   844  
   845  // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
   846  // caching it if found.
   847  func (c *Core) GetBodyRLP(hash common.Hash) rlp.RawValue {
   848  	return c.sl.hc.GetBodyRLP(hash)
   849  }
   850  
   851  // GetTerminiByHash retrieves the termini stored for a given header hash
   852  func (c *Core) GetTerminiByHash(hash common.Hash) *types.Termini {
   853  	return c.sl.hc.GetTerminiByHash(hash)
   854  }
   855  
   856  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
   857  func (c *Core) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
   858  	return c.sl.hc.SubscribeChainSideEvent(ch)
   859  }
   860  
   861  //--------------------//
   862  // BlockChain methods //
   863  //--------------------//
   864  
   865  // HasBlock checks if a block is fully present in the database or not.
   866  func (c *Core) HasBlock(hash common.Hash, number uint64) bool {
   867  	return c.sl.hc.bc.HasBlock(hash, number)
   868  }
   869  
   870  // SubscribeChainEvent registers a subscription of ChainEvent.
   871  func (c *Core) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
   872  	return c.sl.hc.bc.SubscribeChainEvent(ch)
   873  }
   874  
   875  // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
   876  func (c *Core) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
   877  	return c.sl.hc.bc.SubscribeRemovedLogsEvent(ch)
   878  }
   879  
   880  // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
   881  func (c *Core) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   882  	return c.sl.hc.bc.SubscribeLogsEvent(ch)
   883  }
   884  
   885  // SubscribeBlockProcessingEvent registers a subscription of bool where true means
   886  // block processing has started while false means it has stopped.
   887  func (c *Core) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
   888  	return c.sl.hc.bc.SubscribeBlockProcessingEvent(ch)
   889  }
   890  
   891  // Export writes the active chain to the given writer.
   892  func (c *Core) Export(w io.Writer) error {
   893  	return c.sl.hc.Export(w)
   894  }
   895  
   896  // ExportN writes a subset of the active chain to the given writer.
   897  func (c *Core) ExportN(w io.Writer, first uint64, last uint64) error {
   898  	return c.sl.hc.ExportN(w, first, last)
   899  }
   900  
   901  // Snapshots returns the blockchain snapshot tree.
   902  func (c *Core) Snapshots() *snapshot.Tree {
   903  	return nil
   904  }
   905  
   906  func (c *Core) TxLookupLimit() uint64 {
   907  	return 0
   908  }
   909  
   910  func (c *Core) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   911  	return c.sl.txPool.SubscribeNewTxsEvent(ch)
   912  }
   913  
   914  func (c *Core) SetExtra(extra []byte) error {
   915  	return c.sl.miner.SetExtra(extra)
   916  }
   917  
   918  //---------------//
   919  // Miner methods //
   920  //---------------//
   921  
   922  func (c *Core) Miner() *Miner {
   923  	return c.sl.Miner()
   924  }
   925  
   926  func (c *Core) Hashrate() uint64 {
   927  	if pow, ok := c.sl.engine.(consensus.PoW); ok {
   928  		return uint64(pow.Hashrate())
   929  	}
   930  	return 0
   931  }
   932  
   933  func (c *Core) SetRecommitInterval(interval time.Duration) {
   934  	c.sl.miner.SetRecommitInterval(interval)
   935  }
   936  
   937  // SetGasCeil sets the gaslimit to strive for when mining blocks.
   938  func (c *Core) SetGasCeil(ceil uint64) {
   939  	c.sl.miner.SetGasCeil(ceil)
   940  }
   941  
   942  // EnablePreseal turns on the preseal mining feature. It's enabled by default.
   943  // Note this function shouldn't be exposed to API, it's unnecessary for users
   944  // (miners) to actually know the underlying detail. It's only for outside project
   945  // which uses this library.
   946  func (c *Core) EnablePreseal() {
   947  	c.sl.miner.EnablePreseal()
   948  }
   949  
   950  // DisablePreseal turns off the preseal mining feature. It's necessary for some
   951  // fake consensus engine which can seal blocks instantaneously.
   952  // Note this function shouldn't be exposed to API, it's unnecessary for users
   953  // (miners) to actually know the underlying detail. It's only for outside project
   954  // which uses this library.
   955  func (c *Core) DisablePreseal() {
   956  	c.sl.miner.DisablePreseal()
   957  }
   958  
   959  func (c *Core) StopMining() {
   960  	c.sl.miner.StopMining()
   961  }
   962  
   963  // Pending returns the currently pending block and associated state.
   964  func (c *Core) Pending() *types.Block {
   965  	return c.sl.miner.Pending()
   966  }
   967  
   968  // PendingBlock returns the currently pending block.
   969  //
   970  // Note, to access both the pending block and the pending state
   971  // simultaneously, please use Pending(), as the pending state can
   972  // change between multiple method calls
   973  func (c *Core) PendingBlock() *types.Block {
   974  	return c.sl.miner.PendingBlock()
   975  }
   976  
   977  // PendingBlockAndReceipts returns the currently pending block and corresponding receipts.
   978  func (c *Core) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
   979  	return c.sl.miner.PendingBlockAndReceipts()
   980  }
   981  
   982  func (c *Core) SetEtherbase(addr common.Address) {
   983  	c.sl.miner.SetEtherbase(addr)
   984  }
   985  
   986  // SubscribePendingLogs starts delivering logs from pending transactions
   987  // to the given channel.
   988  func (c *Core) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
   989  	return c.sl.miner.worker.pendingLogsFeed.Subscribe(ch)
   990  }
   991  
   992  // SubscribePendingBlock starts delivering the pending block to the given channel.
   993  func (c *Core) SubscribePendingHeader(ch chan<- *types.Header) event.Subscription {
   994  	return c.sl.miner.SubscribePendingHeader(ch)
   995  }
   996  
   997  func (c *Core) IsMining() bool { return c.sl.miner.Mining() }
   998  
   999  //-------------------------//
  1000  // State Processor methods //
  1001  //-------------------------//
  1002  
  1003  // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
  1004  func (c *Core) GetReceiptsByHash(hash common.Hash) types.Receipts {
  1005  	return c.sl.hc.bc.processor.GetReceiptsByHash(hash)
  1006  }
  1007  
  1008  // GetVMConfig returns the block chain VM config.
  1009  func (c *Core) GetVMConfig() *vm.Config {
  1010  	return &c.sl.hc.bc.processor.vmConfig
  1011  }
  1012  
  1013  // GetTransactionLookup retrieves the lookup associate with the given transaction
  1014  // hash from the cache or database.
  1015  func (c *Core) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
  1016  	return c.sl.hc.bc.processor.GetTransactionLookup(hash)
  1017  }
  1018  
  1019  func (c *Core) HasBlockAndState(hash common.Hash, number uint64) bool {
  1020  	return c.Processor().HasBlockAndState(hash, number)
  1021  }
  1022  
  1023  // ContractCode retrieves a blob of data associated with a contract hash
  1024  // either from ephemeral in-memory cache, or from persistent storage.
  1025  func (c *Core) ContractCode(hash common.Hash) ([]byte, error) {
  1026  	return c.sl.hc.bc.processor.ContractCode(hash)
  1027  }
  1028  
  1029  // State returns a new mutable state based on the current HEAD block.
  1030  func (c *Core) State() (*state.StateDB, error) {
  1031  	return c.sl.hc.bc.processor.State()
  1032  }
  1033  
  1034  // StateAt returns a new mutable state based on a particular point in time.
  1035  func (c *Core) StateAt(root common.Hash) (*state.StateDB, error) {
  1036  	return c.sl.hc.bc.processor.StateAt(root)
  1037  }
  1038  
  1039  // StateCache returns the caching database underpinning the blockchain instance.
  1040  func (c *Core) StateCache() state.Database {
  1041  	return c.sl.hc.bc.processor.stateCache
  1042  }
  1043  
  1044  // ContractCodeWithPrefix retrieves a blob of data associated with a contract
  1045  // hash either from ephemeral in-memory cache, or from persistent storage.
  1046  //
  1047  // If the code doesn't exist in the in-memory cache, check the storage with
  1048  // new code scheme.
  1049  func (c *Core) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
  1050  	return c.sl.hc.bc.processor.ContractCodeWithPrefix(hash)
  1051  }
  1052  func (c *Core) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) {
  1053  	return c.sl.hc.bc.processor.StateAtBlock(block, reexec, base, checkLive)
  1054  }
  1055  
  1056  func (c *Core) StateAtTransaction(block *types.Block, txIndex int, reexec uint64) (Message, vm.BlockContext, *state.StateDB, error) {
  1057  	return c.sl.hc.bc.processor.StateAtTransaction(block, txIndex, reexec)
  1058  }
  1059  
  1060  func (c *Core) TrieNode(hash common.Hash) ([]byte, error) {
  1061  	return c.sl.hc.bc.processor.TrieNode(hash)
  1062  }
  1063  
  1064  //----------------//
  1065  // TxPool methods //
  1066  //----------------//
  1067  
  1068  func (c *Core) SetGasPrice(price *big.Int) {
  1069  	c.sl.txPool.SetGasPrice(price)
  1070  }
  1071  
  1072  func (c *Core) AddLocal(tx *types.Transaction) error {
  1073  	return c.sl.txPool.AddLocal(tx)
  1074  }
  1075  
  1076  func (c *Core) TxPoolPending(enforceTips bool) (map[common.AddressBytes]types.Transactions, error) {
  1077  	return c.sl.txPool.TxPoolPending(enforceTips, nil)
  1078  }
  1079  
  1080  func (c *Core) Get(hash common.Hash) *types.Transaction {
  1081  	return c.sl.txPool.Get(hash)
  1082  }
  1083  
  1084  func (c *Core) Nonce(addr common.Address) uint64 {
  1085  	internal, err := addr.InternalAddress()
  1086  	if err != nil {
  1087  		return 0
  1088  	}
  1089  	return c.sl.txPool.Nonce(internal)
  1090  }
  1091  
  1092  func (c *Core) Stats() (int, int) {
  1093  	return c.sl.txPool.Stats()
  1094  }
  1095  
  1096  func (c *Core) Content() (map[common.InternalAddress]types.Transactions, map[common.InternalAddress]types.Transactions) {
  1097  	return c.sl.txPool.Content()
  1098  }
  1099  
  1100  func (c *Core) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
  1101  	internal, err := addr.InternalAddress()
  1102  	if err != nil {
  1103  		return nil, nil
  1104  	}
  1105  	return c.sl.txPool.ContentFrom(internal)
  1106  }