github.com/dominant-strategies/go-quai@v0.28.2/core/slice.go (about)

     1  package core
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"math/big"
     9  	"math/rand"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/dominant-strategies/go-quai/common"
    14  	"github.com/dominant-strategies/go-quai/consensus"
    15  	"github.com/dominant-strategies/go-quai/core/rawdb"
    16  	"github.com/dominant-strategies/go-quai/core/state/snapshot"
    17  	"github.com/dominant-strategies/go-quai/core/types"
    18  	"github.com/dominant-strategies/go-quai/core/vm"
    19  	"github.com/dominant-strategies/go-quai/ethdb"
    20  	"github.com/dominant-strategies/go-quai/event"
    21  	"github.com/dominant-strategies/go-quai/log"
    22  	"github.com/dominant-strategies/go-quai/params"
    23  	"github.com/dominant-strategies/go-quai/quaiclient"
    24  	"github.com/dominant-strategies/go-quai/trie"
    25  	lru "github.com/hashicorp/golang-lru"
    26  )
    27  
    28  const (
    29  	c_maxPendingEtxBatchesPrime       = 30000
    30  	c_maxPendingEtxBatchesRegion      = 10000
    31  	c_maxPendingEtxsRollup            = 256
    32  	c_maxBloomFilters                 = 1024
    33  	c_pendingHeaderChacheBufferFactor = 2
    34  	pendingHeaderGCTime               = 5
    35  	c_terminusIndex                   = 3
    36  	c_startingPrintLimit              = 10
    37  	c_regionRelayProc                 = 3
    38  	c_primeRelayProc                  = 10
    39  	c_asyncPhUpdateChanSize           = 10
    40  	c_phCacheSize                     = 500
    41  	c_pEtxRetryThreshold              = 100 // Number of pEtxNotFound return on a dom block before asking for pEtx/Rollup from sub
    42  	c_currentStateComputeWindow       = 20  // Number of blocks around the current header the state generation is always done
    43  	c_inboundEtxCacheSize             = 10  // Number of inboundEtxs to keep in cache so that, we don't recompute it every time dom is processed
    44  )
    45  
    46  type pEtxRetry struct {
    47  	hash    common.Hash
    48  	retries uint64
    49  }
    50  
    51  type Slice struct {
    52  	hc *HeaderChain
    53  
    54  	txPool *TxPool
    55  	miner  *Miner
    56  
    57  	sliceDb ethdb.Database
    58  	config  *params.ChainConfig
    59  	engine  consensus.Engine
    60  
    61  	quit chan struct{} // slice quit channel
    62  
    63  	domClient  *quaiclient.Client
    64  	subClients []*quaiclient.Client
    65  
    66  	wg                    sync.WaitGroup
    67  	scope                 event.SubscriptionScope
    68  	pendingEtxsFeed       event.Feed
    69  	pendingEtxsRollupFeed event.Feed
    70  	missingBlockFeed      event.Feed
    71  
    72  	pEtxRetryCache *lru.Cache
    73  	asyncPhCh      chan *types.Header
    74  	asyncPhSub     event.Subscription
    75  
    76  	bestPhKey        common.Hash
    77  	phCache          *lru.Cache
    78  	inboundEtxsCache *lru.Cache
    79  
    80  	validator Validator // Block and state validator interface
    81  	phCacheMu sync.RWMutex
    82  	reorgMu   sync.RWMutex
    83  
    84  	badHashesCache map[common.Hash]bool
    85  }
    86  
    87  func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis) (*Slice, error) {
    88  	nodeCtx := common.NodeLocation.Context()
    89  	sl := &Slice{
    90  		config:         chainConfig,
    91  		engine:         engine,
    92  		sliceDb:        db,
    93  		quit:           make(chan struct{}),
    94  		badHashesCache: make(map[common.Hash]bool),
    95  	}
    96  
    97  	var err error
    98  	sl.hc, err = NewHeaderChain(db, engine, sl.GetPEtxRollupAfterRetryThreshold, sl.GetPEtxAfterRetryThreshold, chainConfig, cacheConfig, txLookupLimit, vmConfig, slicesRunning)
    99  	if err != nil {
   100  		return nil, err
   101  	}
   102  
   103  	sl.validator = NewBlockValidator(chainConfig, sl.hc, engine)
   104  
   105  	// tx pool is only used in zone
   106  	if nodeCtx == common.ZONE_CTX && sl.ProcessingState() {
   107  		sl.txPool = NewTxPool(*txConfig, chainConfig, sl.hc)
   108  		sl.hc.pool = sl.txPool
   109  	}
   110  	sl.miner = New(sl.hc, sl.txPool, config, db, chainConfig, engine, isLocalBlock, sl.ProcessingState())
   111  
   112  	sl.phCache, _ = lru.New(c_phCacheSize)
   113  
   114  	sl.pEtxRetryCache, _ = lru.New(c_pEtxRetryThreshold)
   115  
   116  	sl.inboundEtxsCache, _ = lru.New(c_inboundEtxCacheSize)
   117  
   118  	// only set the subClients if the chain is not Zone
   119  	sl.subClients = make([]*quaiclient.Client, 3)
   120  	if nodeCtx != common.ZONE_CTX {
   121  		sl.subClients = makeSubClients(subClientUrls)
   122  	}
   123  
   124  	// only set domClient if the chain is not Prime.
   125  	if nodeCtx != common.PRIME_CTX {
   126  		go func() {
   127  			sl.domClient = makeDomClient(domClientUrl)
   128  		}()
   129  	}
   130  
   131  	if err := sl.init(genesis); err != nil {
   132  		return nil, err
   133  	}
   134  
   135  	sl.CheckForBadHashAndRecover()
   136  
   137  	if nodeCtx == common.ZONE_CTX && sl.ProcessingState() {
   138  		go sl.asyncPendingHeaderLoop()
   139  	}
   140  
   141  	return sl, nil
   142  }
   143  
   144  // Append takes a proposed header and constructs a local block and attempts to hierarchically append it to the block graph.
   145  // If this is called from a dominant context a domTerminus must be provided else a common.Hash{} should be used and domOrigin should be set to true.
   146  // Return of this function is the Etxs generated in the Zone Block, subReorg bool that tells dom if should be mined on, setHead bool that determines if we should set the block as the current head and the error
   147  func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) {
   148  	start := time.Now()
   149  
   150  	if header.Hash() == sl.config.GenesisHash {
   151  		return nil, false, false, nil
   152  	}
   153  
   154  	// Only print in Info level if block is c_startingPrintLimit behind or less
   155  	if sl.CurrentInfo(header) {
   156  		log.Info("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash())
   157  	} else {
   158  		log.Debug("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash())
   159  	}
   160  
   161  	time0_1 := common.PrettyDuration(time.Since(start))
   162  	// Check if the header hash exists in the BadHashes list
   163  	if sl.IsBlockHashABadHash(header.Hash()) {
   164  		return nil, false, false, ErrBadBlockHash
   165  	}
   166  	time0_2 := common.PrettyDuration(time.Since(start))
   167  
   168  	nodeCtx := common.NodeLocation.Context()
   169  	location := header.Location()
   170  	_, order, err := sl.engine.CalcOrder(header)
   171  	if err != nil {
   172  		return nil, false, false, err
   173  	}
   174  	// Don't append the block which already exists in the database.
   175  	if sl.hc.HasHeader(header.Hash(), header.NumberU64()) && (sl.hc.GetTerminiByHash(header.Hash()) != nil) {
   176  		log.Debug("Block has already been appended: ", "Hash: ", header.Hash())
   177  		return nil, false, false, nil
   178  	}
   179  	time1 := common.PrettyDuration(time.Since(start))
   180  	// This is to prevent a crash when we try to insert blocks before domClient is on.
   181  	// Ideally this check should not exist here and should be fixed before we start the slice.
   182  	if sl.domClient == nil && nodeCtx != common.PRIME_CTX {
   183  		return nil, false, false, ErrDomClientNotUp
   184  	}
   185  
   186  	batch := sl.sliceDb.NewBatch()
   187  
   188  	// Run Previous Coincident Reference Check (PCRC)
   189  	domTerminus, newTermini, err := sl.pcrc(batch, header, domTerminus, domOrigin)
   190  	if err != nil {
   191  		return nil, false, false, err
   192  	}
   193  	log.Debug("PCRC done", "hash", header.Hash(), "number", header.NumberArray(), "termini", newTermini)
   194  
   195  	time2 := common.PrettyDuration(time.Since(start))
   196  	// Append the new block
   197  	err = sl.hc.AppendHeader(header)
   198  	if err != nil {
   199  		return nil, false, false, err
   200  	}
   201  
   202  	time3 := common.PrettyDuration(time.Since(start))
   203  	// Construct the block locally
   204  	block, err := sl.ConstructLocalBlock(header)
   205  	if err != nil {
   206  		return nil, false, false, err
   207  	}
   208  	time4 := common.PrettyDuration(time.Since(start))
   209  
   210  	var pendingHeaderWithTermini types.PendingHeader
   211  	if nodeCtx != common.ZONE_CTX {
   212  		// Upate the local pending header
   213  		pendingHeaderWithTermini, err = sl.generateSlicePendingHeader(block, newTermini, domPendingHeader, domOrigin, true, false)
   214  		if err != nil {
   215  			return nil, false, false, err
   216  		}
   217  	}
   218  
   219  	// If this was a coincident block, our dom will be passing us a set of newly
   220  	// confirmed ETXs If this is not a coincident block, we need to build up the
   221  	// list of confirmed ETXs using the subordinate manifest In either case, if
   222  	// we are a dominant node, we need to collect the ETX rollup from our sub.
   223  	if !domOrigin && nodeCtx != common.ZONE_CTX {
   224  		cachedInboundEtxs, exists := sl.inboundEtxsCache.Get(block.Hash())
   225  		if exists && cachedInboundEtxs != nil {
   226  			newInboundEtxs = cachedInboundEtxs.(types.Transactions)
   227  		} else {
   228  			newInboundEtxs, _, err = sl.CollectNewlyConfirmedEtxs(block, block.Location())
   229  			if err != nil {
   230  				log.Trace("Error collecting newly confirmed etxs: ", "err", err)
   231  				// Keeping track of the number of times pending etx fails and if it crossed the retry threshold
   232  				// ask the sub for the pending etx/rollup data
   233  				val, exist := sl.pEtxRetryCache.Get(block.Hash())
   234  				var retry uint64
   235  				if exist {
   236  					pEtxCurrent, ok := val.(pEtxRetry)
   237  					if ok {
   238  						retry = pEtxCurrent.retries + 1
   239  					}
   240  				}
   241  				pEtxNew := pEtxRetry{hash: block.Hash(), retries: retry}
   242  				sl.pEtxRetryCache.Add(block.Hash(), pEtxNew)
   243  				return nil, false, false, ErrSubNotSyncedToDom
   244  			}
   245  			sl.inboundEtxsCache.Add(block.Hash(), newInboundEtxs)
   246  		}
   247  	}
   248  	time5 := common.PrettyDuration(time.Since(start))
   249  
   250  	time6 := common.PrettyDuration(time.Since(start))
   251  	var subPendingEtxs types.Transactions
   252  	var subReorg bool
   253  	var setHead bool
   254  	var time6_1 common.PrettyDuration
   255  	var time6_2 common.PrettyDuration
   256  	var time6_3 common.PrettyDuration
   257  	// Call my sub to append the block, and collect the rolled up ETXs from that sub
   258  	if nodeCtx != common.ZONE_CTX {
   259  		// How to get the sub pending etxs if not running the full node?.
   260  		if sl.subClients[location.SubIndex()] != nil {
   261  			subPendingEtxs, subReorg, setHead, err = sl.subClients[location.SubIndex()].Append(context.Background(), header, block.SubManifest(), pendingHeaderWithTermini.Header(), domTerminus, true, newInboundEtxs)
   262  			if err != nil {
   263  				return nil, false, false, err
   264  			}
   265  			time6_1 = common.PrettyDuration(time.Since(start))
   266  			// Cache the subordinate's pending ETXs
   267  			pEtxs := types.PendingEtxs{header, subPendingEtxs}
   268  			time6_2 = common.PrettyDuration(time.Since(start))
   269  			// Add the pending etx given by the sub in the rollup
   270  			sl.AddPendingEtxs(pEtxs)
   271  			// Only region has the rollup hashes for pendingEtxs
   272  			if nodeCtx == common.REGION_CTX {
   273  				// We also need to store the pendingEtxRollup to the dom
   274  				pEtxRollup := types.PendingEtxsRollup{header, block.SubManifest()}
   275  				sl.AddPendingEtxsRollup(pEtxRollup)
   276  			}
   277  			time6_3 = common.PrettyDuration(time.Since(start))
   278  		}
   279  	}
   280  
   281  	time7 := common.PrettyDuration(time.Since(start))
   282  
   283  	sl.phCacheMu.Lock()
   284  	defer sl.phCacheMu.Unlock()
   285  
   286  	var time8, time9 common.PrettyDuration
   287  	var bestPh types.PendingHeader
   288  	var exist bool
   289  	if nodeCtx == common.ZONE_CTX {
   290  		bestPh, exist = sl.readPhCache(sl.bestPhKey)
   291  		if !exist {
   292  			sl.WriteBestPhKey(sl.config.GenesisHash)
   293  			sl.writePhCache(block.Hash(), pendingHeaderWithTermini)
   294  			bestPh = types.EmptyPendingHeader()
   295  			log.Error("BestPh Key does not exist for", "key", sl.bestPhKey)
   296  		}
   297  
   298  		time8 = common.PrettyDuration(time.Since(start))
   299  
   300  		tempPendingHeader, err := sl.generateSlicePendingHeader(block, newTermini, domPendingHeader, domOrigin, false, false)
   301  		if err != nil {
   302  			return nil, false, false, err
   303  		}
   304  
   305  		subReorg = sl.miningStrategy(bestPh, tempPendingHeader)
   306  
   307  		if order < nodeCtx {
   308  			// Store the inbound etxs for dom blocks that did not get picked and use
   309  			// it in the future if dom switch happens
   310  			rawdb.WriteInboundEtxs(sl.sliceDb, block.Hash(), newInboundEtxs)
   311  		}
   312  
   313  		setHead = sl.poem(sl.engine.TotalLogS(block.Header()), sl.engine.TotalLogS(sl.hc.CurrentHeader()))
   314  
   315  		if subReorg || (sl.hc.CurrentHeader().NumberU64() < block.NumberU64()+c_currentStateComputeWindow) {
   316  			err := sl.hc.SetCurrentState(block.Header())
   317  			if err != nil {
   318  				log.Error("Error setting current state", "err", err, "Hash", block.Hash())
   319  				return nil, false, false, err
   320  			}
   321  		}
   322  		// Upate the local pending header
   323  		pendingHeaderWithTermini, err = sl.generateSlicePendingHeader(block, newTermini, domPendingHeader, domOrigin, subReorg, false)
   324  		if err != nil {
   325  			return nil, false, false, err
   326  		}
   327  
   328  	}
   329  	time9 = common.PrettyDuration(time.Since(start))
   330  	sl.updatePhCache(pendingHeaderWithTermini, true, nil, subReorg, common.NodeLocation)
   331  
   332  	var updateDom bool
   333  	if subReorg {
   334  		if order == common.ZONE_CTX && pendingHeaderWithTermini.Termini().DomTerminus() != bestPh.Termini().DomTerminus() {
   335  			updateDom = true
   336  		}
   337  		log.Info("Choosing phHeader Append:", "NumberArray:", pendingHeaderWithTermini.Header().NumberArray(), "Number:", pendingHeaderWithTermini.Header().Number(), "ParentHash:", pendingHeaderWithTermini.Header().ParentHash(), "Terminus:", pendingHeaderWithTermini.Termini().DomTerminus())
   338  		sl.WriteBestPhKey(pendingHeaderWithTermini.Termini().DomTerminus())
   339  		block.SetAppendTime(time.Duration(time9))
   340  	}
   341  
   342  	// Append has succeeded write the batch
   343  	if err := batch.Write(); err != nil {
   344  		return nil, false, false, err
   345  	}
   346  
   347  	if setHead {
   348  		sl.hc.SetCurrentHeader(block.Header())
   349  	}
   350  
   351  	if subReorg {
   352  		sl.hc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
   353  	}
   354  
   355  	// Relay the new pendingHeader
   356  	sl.relayPh(block, pendingHeaderWithTermini, domOrigin, block.Location(), subReorg)
   357  
   358  	time10 := common.PrettyDuration(time.Since(start))
   359  	log.Info("Times during append:", "t0_1", time0_1, "t0_2", time0_2, "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10)
   360  	log.Debug("Times during sub append:", "t6_1:", time6_1, "t6_2:", time6_2, "t6_3:", time6_3)
   361  	log.Info("Appended new block", "number", block.Header().NumberArray(), "hash", block.Hash(),
   362  		"difficulty", block.Header().Difficulty(),
   363  		"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "gasLimit", block.GasLimit(),
   364  		"root", block.Root(),
   365  		"order", order,
   366  		"location", block.Header().Location(),
   367  		"elapsed", common.PrettyDuration(time.Since(start)))
   368  
   369  	if nodeCtx == common.ZONE_CTX {
   370  		if updateDom {
   371  			log.Info("Append updateDom", "oldTermini():", bestPh.Termini().DomTerminus(), "newTermini():", pendingHeaderWithTermini.Termini().DomTerminus(), "location:", common.NodeLocation)
   372  			if sl.domClient != nil {
   373  				go sl.domClient.UpdateDom(context.Background(), bestPh.Termini().DomTerminus(), pendingHeaderWithTermini, common.NodeLocation)
   374  			}
   375  		}
   376  		return block.ExtTransactions(), subReorg, setHead, nil
   377  	} else {
   378  		return subPendingEtxs, subReorg, setHead, nil
   379  	}
   380  }
   381  
   382  func (sl *Slice) miningStrategy(bestPh types.PendingHeader, pendingHeader types.PendingHeader) bool {
   383  	if bestPh.Header() == nil { // This is the case where we try to append the block before we have not initialized the bestPh
   384  		return true
   385  	}
   386  	subReorg := sl.poem(sl.engine.TotalLogPhS(pendingHeader.Header()), sl.engine.TotalLogPhS(bestPh.Header()))
   387  	return subReorg
   388  }
   389  
   390  func (sl *Slice) ProcessingState() bool {
   391  	return sl.hc.ProcessingState()
   392  }
   393  
   394  // relayPh sends pendingHeaderWithTermini to subordinates
   395  func (sl *Slice) relayPh(block *types.Block, pendingHeaderWithTermini types.PendingHeader, domOrigin bool, location common.Location, subReorg bool) {
   396  	nodeCtx := common.NodeLocation.Context()
   397  
   398  	if nodeCtx == common.ZONE_CTX && sl.ProcessingState() {
   399  		// Send an empty header to miner
   400  		bestPh, exists := sl.readPhCache(sl.bestPhKey)
   401  		if exists {
   402  			bestPh.Header().SetLocation(common.NodeLocation)
   403  			sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header())
   404  			return
   405  		} else {
   406  			log.Warn("Pending Header for Best ph key does not exist", "best ph key", sl.bestPhKey)
   407  		}
   408  	} else if !domOrigin && subReorg {
   409  		for _, i := range sl.randomRelayArray() {
   410  			if sl.subClients[i] != nil {
   411  				sl.subClients[i].SubRelayPendingHeader(context.Background(), pendingHeaderWithTermini, pendingHeaderWithTermini.Header().ParentEntropy(), location, subReorg, nodeCtx)
   412  			}
   413  		}
   414  	}
   415  }
   416  
   417  // If a zone changes its best ph key on a dom block, it sends a signal to the
   418  // dom and we can relay that information to the coords, to build on the right dom header
   419  func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingHeader, location common.Location) {
   420  	nodeCtx := common.NodeLocation.Context()
   421  	sl.phCacheMu.Lock()
   422  	defer sl.phCacheMu.Unlock()
   423  	newDomTermini := sl.hc.GetTerminiByHash(pendingHeader.Termini().DomTerminiAtIndex(location.SubIndex()))
   424  	if newDomTermini == nil {
   425  		log.Warn("New Dom Termini doesn't exists in the database for", "hash", pendingHeader.Termini().DomTerminiAtIndex(location.SubIndex()))
   426  		return
   427  	}
   428  	newDomTerminus := newDomTermini.DomTerminus()
   429  	oldDomTermini := sl.hc.GetTerminiByHash(oldTerminus)
   430  	if oldDomTermini == nil {
   431  		log.Warn("Old Dom Termini doesn't exists in the database for", "hash", oldTerminus)
   432  		return
   433  	}
   434  	oldDomTerminus := oldDomTermini.DomTerminus()
   435  	// Find the dom TerminusHash with the newTerminus
   436  	newPh, newDomTerminiExists := sl.readPhCache(newDomTerminus)
   437  	if !newDomTerminiExists {
   438  		log.Warn("Update Dom:", "newTerminus does not exist:", newDomTerminus)
   439  		return
   440  	}
   441  	log.Debug("UpdateDom:", "NewDomTerminus:", newDomTerminus, "OldDomTerminus:", oldDomTerminus, "NewDomTermini:", pendingHeader.Termini().DomTermini(), "Location")
   442  	if nodeCtx == common.REGION_CTX && oldDomTerminus == newPh.Termini().DomTerminus() {
   443  		// Can update
   444  		sl.WriteBestPhKey(newDomTerminus)
   445  		newPh, exists := sl.readPhCache(newDomTerminus)
   446  		if exists {
   447  			for _, i := range sl.randomRelayArray() {
   448  				if sl.subClients[i] != nil {
   449  					log.Info("SubRelay in UpdateDom", "parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i))
   450  					sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx)
   451  				}
   452  			}
   453  		} else {
   454  			log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
   455  		}
   456  		return
   457  	} else {
   458  		// need to update dom
   459  		log.Info("UpdateDom needs to updateDom", "oldDomTermini:", oldDomTerminus, "newDomTermini:", newPh.Termini(), "location:", location)
   460  		if sl.domClient != nil {
   461  			go sl.domClient.UpdateDom(context.Background(), oldDomTerminus, types.NewPendingHeader(pendingHeader.Header(), newPh.Termini()), location)
   462  		} else {
   463  			// Can update
   464  			sl.WriteBestPhKey(newDomTerminus)
   465  			newPh, exists := sl.readPhCache(newDomTerminus)
   466  			if exists {
   467  				for _, i := range sl.randomRelayArray() {
   468  					if sl.subClients[i] != nil {
   469  						log.Info("SubRelay in UpdateDom:", "Parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "NewTermini:", newPh.Termini().SubTerminiAtIndex(i))
   470  						sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx)
   471  					}
   472  				}
   473  			} else {
   474  				log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
   475  			}
   476  			return
   477  		}
   478  	}
   479  }
   480  
   481  func (sl *Slice) randomRelayArray() [3]int {
   482  	rand.Seed(time.Now().UnixNano())
   483  	nums := [3]int{0, 1, 2}
   484  	for i := len(nums) - 1; i > 0; i-- {
   485  		j := rand.Intn(i + 1)
   486  		nums[i], nums[j] = nums[j], nums[i]
   487  	}
   488  	return nums
   489  }
   490  
   491  // asyncPendingHeaderLoop waits for the pendingheader updates from the worker and updates the phCache
   492  func (sl *Slice) asyncPendingHeaderLoop() {
   493  
   494  	// Subscribe to the AsyncPh updates from the worker
   495  	sl.asyncPhCh = make(chan *types.Header, c_asyncPhUpdateChanSize)
   496  	sl.asyncPhSub = sl.miner.worker.SubscribeAsyncPendingHeader(sl.asyncPhCh)
   497  
   498  	for {
   499  		select {
   500  		case asyncPh := <-sl.asyncPhCh:
   501  			sl.phCacheMu.Lock()
   502  			sl.updatePhCache(types.PendingHeader{}, true, asyncPh, true, common.NodeLocation)
   503  			sl.phCacheMu.Unlock()
   504  			bestPh, exists := sl.readPhCache(sl.bestPhKey)
   505  			if exists {
   506  				bestPh.Header().SetLocation(common.NodeLocation)
   507  				sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header())
   508  			}
   509  		case <-sl.asyncPhSub.Err():
   510  			return
   511  
   512  		case <-sl.quit:
   513  			return
   514  		}
   515  	}
   516  }
   517  
   518  // Read the phCache
   519  func (sl *Slice) readPhCache(hash common.Hash) (types.PendingHeader, bool) {
   520  	if ph, exists := sl.phCache.Get(hash); exists {
   521  		if ph, ok := ph.(types.PendingHeader); ok {
   522  			if ph.Header() != nil {
   523  				return *types.CopyPendingHeader(&ph), exists
   524  			} else {
   525  				return types.PendingHeader{}, false
   526  			}
   527  		}
   528  	} else {
   529  		ph := rawdb.ReadPendingHeader(sl.sliceDb, hash)
   530  		if ph != nil {
   531  			sl.phCache.Add(hash, ph)
   532  			return *types.CopyPendingHeader(ph), true
   533  		} else {
   534  			return types.PendingHeader{}, false
   535  		}
   536  	}
   537  	return types.PendingHeader{}, false
   538  }
   539  
   540  // Write the phCache
   541  func (sl *Slice) writePhCache(hash common.Hash, pendingHeader types.PendingHeader) {
   542  	sl.phCache.Add(hash, pendingHeader)
   543  	rawdb.WritePendingHeader(sl.sliceDb, hash, pendingHeader)
   544  }
   545  
   546  // WriteBestPhKey writes the sl.bestPhKey
   547  func (sl *Slice) WriteBestPhKey(hash common.Hash) {
   548  	sl.bestPhKey = hash
   549  	// write the ph head hash to the db.
   550  	rawdb.WriteBestPhKey(sl.sliceDb, hash)
   551  }
   552  
   553  // Generate a slice pending header
   554  func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini types.Termini, domPendingHeader *types.Header, domOrigin bool, subReorg bool, fill bool) (types.PendingHeader, error) {
   555  	nodeCtx := common.NodeLocation.Context()
   556  	var localPendingHeader *types.Header
   557  	var err error
   558  	if subReorg {
   559  		// Upate the local pending header
   560  		localPendingHeader, err = sl.miner.worker.GeneratePendingHeader(block, fill)
   561  		if err != nil {
   562  			return types.PendingHeader{}, err
   563  		}
   564  	} else {
   565  		// Just compute the necessary information for the pending Header
   566  		// i.e ParentHash field, Number and writing manifest to the disk
   567  		localPendingHeader = types.EmptyHeader()
   568  		localPendingHeader.SetParentHash(block.Hash(), nodeCtx)
   569  		localPendingHeader.SetNumber(big.NewInt(int64(block.NumberU64()) + 1))
   570  		localPendingHeader.SetParentEntropy(sl.engine.TotalLogS(block.Header()))
   571  		if nodeCtx != common.PRIME_CTX {
   572  			if domOrigin {
   573  				localPendingHeader.SetParentDeltaS(big.NewInt(0), nodeCtx)
   574  			} else {
   575  				localPendingHeader.SetParentDeltaS(sl.engine.DeltaLogS(block.Header()), nodeCtx)
   576  			}
   577  		}
   578  
   579  		manifestHash := sl.miner.worker.ComputeManifestHash(block.Header())
   580  		localPendingHeader.SetManifestHash(manifestHash)
   581  	}
   582  
   583  	// Combine subordinates pending header with local pending header
   584  	pendingHeaderWithTermini := sl.computePendingHeader(types.NewPendingHeader(localPendingHeader, newTermini), domPendingHeader, domOrigin)
   585  	pendingHeaderWithTermini.Header().SetLocation(block.Header().Location())
   586  
   587  	return pendingHeaderWithTermini, nil
   588  }
   589  
   590  // CollectNewlyConfirmedEtxs collects all newly confirmed ETXs since the last coincident with the given location
   591  func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.Block, location common.Location) (types.Transactions, types.Transactions, error) {
   592  	nodeCtx := common.NodeLocation.Context()
   593  	// Collect rollup of ETXs from the subordinate node's manifest
   594  	subRollup := types.Transactions{}
   595  	var err error
   596  	if nodeCtx < common.ZONE_CTX {
   597  		rollup, exists := sl.hc.subRollupCache.Get(block.Hash())
   598  		if exists && rollup != nil {
   599  			subRollup = rollup.(types.Transactions)
   600  			log.Info("Found the rollup in cache", "Hash", block.Hash(), "len", len(subRollup))
   601  		} else {
   602  			subRollup, err = sl.hc.CollectSubRollup(block)
   603  			if err != nil {
   604  				return nil, nil, err
   605  			}
   606  			sl.hc.subRollupCache.Add(block.Hash(), subRollup)
   607  		}
   608  	}
   609  
   610  	// Filter for ETXs destined to this slice
   611  	newInboundEtxs := subRollup.FilterToSlice(location, nodeCtx)
   612  
   613  	// Filter this list to exclude any ETX for which we are not the crossing
   614  	// context node. Such ETXs cannot be used by our subordinate for one of the
   615  	// following reasons:
   616  	// * if we are prime, but common dom was a region node, than the given ETX has
   617  	//   already been confirmed and passed down from the region node
   618  	// * if we are region, but the common dom is prime, then the destination is
   619  	//   not in one of our sub chains
   620  	//
   621  	// Note: here "common dom" refers to the highes context chain which exists in
   622  	// both the origin & destination. See the definition of the `CommonDom()`
   623  	// method for more explanation.
   624  	newlyConfirmedEtxs := newInboundEtxs.FilterConfirmationCtx(nodeCtx)
   625  
   626  	// Terminate the search if we reached genesis
   627  	if block.NumberU64() == 0 {
   628  		if block.Hash() != sl.config.GenesisHash {
   629  			return nil, nil, fmt.Errorf("terminated search on bad genesis, block0 hash: %s", block.Hash().String())
   630  		} else {
   631  			return newlyConfirmedEtxs, subRollup, nil
   632  		}
   633  	}
   634  	ancHash := block.ParentHash()
   635  	ancNum := block.NumberU64() - 1
   636  	ancestor := sl.hc.GetBlock(ancHash, ancNum)
   637  	if ancestor == nil {
   638  		return nil, nil, fmt.Errorf("unable to find ancestor, hash: %s", ancHash.String())
   639  	}
   640  
   641  	// Terminate the search when we find a block produced by the same sub
   642  	if ancestor.Location().SubIndex() == location.SubIndex() {
   643  		return newlyConfirmedEtxs, subRollup, nil
   644  	}
   645  
   646  	// Otherwise recursively process the ancestor and collect its newly confirmed ETXs too
   647  	ancEtxs, _, err := sl.CollectNewlyConfirmedEtxs(ancestor, location)
   648  	if err != nil {
   649  		return nil, nil, err
   650  	}
   651  	newlyConfirmedEtxs = append(ancEtxs, newlyConfirmedEtxs...)
   652  	return newlyConfirmedEtxs, subRollup, nil
   653  }
   654  
   655  // PCRC previous coincidence reference check makes sure there are not any cyclic references in the graph and calculates new termini and the block terminus
   656  func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus common.Hash, domOrigin bool) (common.Hash, types.Termini, error) {
   657  	nodeCtx := common.NodeLocation.Context()
   658  	location := header.Location()
   659  
   660  	log.Debug("PCRC:", "Parent Hash:", header.ParentHash(), "Number", header.Number, "Location:", header.Location())
   661  	termini := sl.hc.GetTerminiByHash(header.ParentHash())
   662  
   663  	if !termini.IsValid() {
   664  		return common.Hash{}, types.EmptyTermini(), ErrSubNotSyncedToDom
   665  	}
   666  
   667  	newTermini := types.CopyTermini(*termini)
   668  	// Set the subtermini
   669  	if nodeCtx != common.ZONE_CTX {
   670  		newTermini.SetSubTerminiAtIndex(header.Hash(), location.SubIndex())
   671  	}
   672  
   673  	// Set the terminus
   674  	if nodeCtx == common.PRIME_CTX || domOrigin {
   675  		newTermini.SetDomTerminiAtIndex(header.Hash(), location.DomIndex())
   676  	} else {
   677  		newTermini.SetDomTerminiAtIndex(termini.DomTerminus(), location.DomIndex())
   678  	}
   679  
   680  	// Check for a graph cyclic reference
   681  	if domOrigin {
   682  		if termini.DomTerminus() != domTerminus {
   683  			log.Warn("Cyclic Block:", "block number", header.NumberArray(), "hash", header.Hash(), "terminus", domTerminus, "termini", termini.DomTerminus())
   684  			return common.Hash{}, types.EmptyTermini(), errors.New("termini do not match, block rejected due to cyclic reference")
   685  		}
   686  	}
   687  
   688  	//Save the termini
   689  	rawdb.WriteTermini(batch, header.Hash(), newTermini)
   690  
   691  	if nodeCtx == common.ZONE_CTX {
   692  		return common.Hash{}, newTermini, nil
   693  	}
   694  
   695  	return termini.SubTerminiAtIndex(location.SubIndex()), newTermini, nil
   696  }
   697  
   698  // POEM compares externS to the currentHead S and returns true if externS is greater
   699  func (sl *Slice) poem(externS *big.Int, currentS *big.Int) bool {
   700  	log.Debug("POEM:", "currentS:", common.BigBitsToBits(currentS), "externS:", common.BigBitsToBits(externS))
   701  	reorg := currentS.Cmp(externS) <= 0
   702  	return reorg
   703  }
   704  
   705  // GetPendingHeader is used by the miner to request the current pending header
   706  func (sl *Slice) GetPendingHeader() (*types.Header, error) {
   707  	if ph, exists := sl.readPhCache(sl.bestPhKey); exists {
   708  		return ph.Header(), nil
   709  	} else {
   710  		return nil, errors.New("empty pending header")
   711  	}
   712  }
   713  
   714  // GetManifest gathers the manifest of ancestor block hashes since the last
   715  // coincident block.
   716  func (sl *Slice) GetManifest(blockHash common.Hash) (types.BlockManifest, error) {
   717  	manifest := rawdb.ReadManifest(sl.sliceDb, blockHash)
   718  	if manifest != nil {
   719  		return manifest, nil
   720  	}
   721  	return nil, errors.New("manifest not found in the disk")
   722  }
   723  
   724  // GetSubManifest gets the block manifest from the subordinate node which
   725  // produced this block
   726  func (sl *Slice) GetSubManifest(slice common.Location, blockHash common.Hash) (types.BlockManifest, error) {
   727  	subIdx := slice.SubIndex()
   728  	if sl.subClients[subIdx] == nil {
   729  		return nil, errors.New("missing requested subordinate node")
   730  	}
   731  	return sl.subClients[subIdx].GetManifest(context.Background(), blockHash)
   732  }
   733  
   734  // SendPendingEtxsToDom shares a set of pending ETXs with your dom, so he can reference them when a coincident block is found
   735  func (sl *Slice) SendPendingEtxsToDom(pEtxs types.PendingEtxs) error {
   736  	return sl.domClient.SendPendingEtxsToDom(context.Background(), pEtxs)
   737  }
   738  
   739  func (sl *Slice) GetPEtxRollupAfterRetryThreshold(blockHash common.Hash, hash common.Hash, location common.Location) (types.PendingEtxsRollup, error) {
   740  	pEtx, exists := sl.pEtxRetryCache.Get(blockHash)
   741  	if !exists || pEtx.(pEtxRetry).retries < c_pEtxRetryThreshold {
   742  		return types.PendingEtxsRollup{}, ErrPendingEtxNotFound
   743  	}
   744  	return sl.GetPendingEtxsRollupFromSub(hash, location)
   745  }
   746  
   747  // GetPendingEtxsRollupFromSub gets the pending etxs rollup from the appropriate prime
   748  func (sl *Slice) GetPendingEtxsRollupFromSub(hash common.Hash, location common.Location) (types.PendingEtxsRollup, error) {
   749  	nodeCtx := common.NodeLocation.Context()
   750  	if nodeCtx == common.PRIME_CTX {
   751  		if sl.subClients[location.SubIndex()] != nil {
   752  			pEtxRollup, err := sl.subClients[location.SubIndex()].GetPendingEtxsRollupFromSub(context.Background(), hash, location)
   753  			if err != nil {
   754  				return types.PendingEtxsRollup{}, err
   755  			} else {
   756  				sl.AddPendingEtxsRollup(pEtxRollup)
   757  				return pEtxRollup, nil
   758  			}
   759  		}
   760  	} else if nodeCtx == common.REGION_CTX {
   761  		block := sl.hc.GetBlockByHash(hash)
   762  		if block != nil {
   763  			return types.PendingEtxsRollup{Header: block.Header(), Manifest: block.SubManifest()}, nil
   764  		}
   765  	}
   766  	return types.PendingEtxsRollup{}, ErrPendingEtxNotFound
   767  }
   768  
   769  func (sl *Slice) GetPEtxAfterRetryThreshold(blockHash common.Hash, hash common.Hash, location common.Location) (types.PendingEtxs, error) {
   770  	pEtx, exists := sl.pEtxRetryCache.Get(blockHash)
   771  	if !exists || pEtx.(pEtxRetry).retries < c_pEtxRetryThreshold {
   772  		return types.PendingEtxs{}, ErrPendingEtxNotFound
   773  	}
   774  	return sl.GetPendingEtxsFromSub(hash, location)
   775  }
   776  
   777  // GetPendingEtxsFromSub gets the pending etxs from the appropriate prime
   778  func (sl *Slice) GetPendingEtxsFromSub(hash common.Hash, location common.Location) (types.PendingEtxs, error) {
   779  	nodeCtx := common.NodeLocation.Context()
   780  	if nodeCtx != common.ZONE_CTX {
   781  		if sl.subClients[location.SubIndex()] != nil {
   782  			pEtx, err := sl.subClients[location.SubIndex()].GetPendingEtxsFromSub(context.Background(), hash, location)
   783  			if err != nil {
   784  				return types.PendingEtxs{}, err
   785  			} else {
   786  				sl.AddPendingEtxs(pEtx)
   787  				return pEtx, nil
   788  			}
   789  		}
   790  	}
   791  	block := sl.hc.GetBlockByHash(hash)
   792  	if block != nil {
   793  		return types.PendingEtxs{Header: block.Header(), Etxs: block.ExtTransactions()}, nil
   794  	}
   795  	return types.PendingEtxs{}, ErrPendingEtxNotFound
   796  }
   797  
   798  // SubRelayPendingHeader takes a pending header from the sender (ie dominant), updates the phCache with a composited header and relays result to subordinates
   799  func (sl *Slice) SubRelayPendingHeader(pendingHeader types.PendingHeader, newEntropy *big.Int, location common.Location, subReorg bool, order int) {
   800  	nodeCtx := common.NodeLocation.Context()
   801  	var err error
   802  
   803  	if nodeCtx == common.REGION_CTX {
   804  		// Adding a guard on the region that was already updated in the synchronous path.
   805  		if location.Region() != common.NodeLocation.Region() {
   806  			err = sl.updatePhCacheFromDom(pendingHeader, common.NodeLocation.Region(), []int{common.PRIME_CTX}, newEntropy, subReorg, location)
   807  			if err != nil {
   808  				return
   809  			}
   810  		}
   811  
   812  		for _, i := range sl.randomRelayArray() {
   813  			if sl.subClients[i] != nil {
   814  				if ph, exists := sl.readPhCache(pendingHeader.Termini().SubTerminiAtIndex(common.NodeLocation.Region())); exists {
   815  					sl.subClients[i].SubRelayPendingHeader(context.Background(), ph, newEntropy, location, subReorg, order)
   816  				}
   817  			}
   818  		}
   819  	} else {
   820  		// This check prevents a double send to the miner.
   821  		// If the previous block on which the given pendingHeader was built is the same as the NodeLocation
   822  		// the pendingHeader update has already been sent to the miner for the given location in relayPh.
   823  		if !bytes.Equal(location, common.NodeLocation) {
   824  			updateCtx := []int{common.REGION_CTX}
   825  			if order == common.PRIME_CTX {
   826  				updateCtx = append(updateCtx, common.PRIME_CTX)
   827  			}
   828  			err = sl.updatePhCacheFromDom(pendingHeader, common.NodeLocation.Zone(), updateCtx, newEntropy, subReorg, location)
   829  			if err != nil {
   830  				return
   831  			}
   832  		}
   833  
   834  		if !bytes.Equal(location, common.NodeLocation) {
   835  			bestPh, exists := sl.readPhCache(sl.bestPhKey)
   836  			if exists {
   837  				bestPh.Header().SetLocation(common.NodeLocation)
   838  				sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header())
   839  			}
   840  		}
   841  	}
   842  }
   843  
   844  // computePendingHeader takes in an localPendingHeaderWithTermini and updates the pending header on the same terminus if the number is greater
   845  func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.PendingHeader, domPendingHeader *types.Header, domOrigin bool) types.PendingHeader {
   846  	nodeCtx := common.NodeLocation.Context()
   847  
   848  	var cachedPendingHeaderWithTermini types.PendingHeader
   849  	hash := localPendingHeaderWithTermini.Termini().DomTerminus()
   850  	cachedPendingHeaderWithTermini, exists := sl.readPhCache(hash)
   851  	var newPh *types.Header
   852  
   853  	if exists {
   854  		log.Debug("computePendingHeader:", "hash:", hash, "pendingHeader:", cachedPendingHeaderWithTermini, "termini:", cachedPendingHeaderWithTermini.Termini())
   855  		if domOrigin {
   856  			newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true)
   857  			return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini())
   858  		}
   859  		newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), cachedPendingHeaderWithTermini.Header(), nodeCtx, true)
   860  		return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini())
   861  	} else {
   862  		if domOrigin {
   863  			newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true)
   864  			return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini())
   865  		}
   866  		return localPendingHeaderWithTermini
   867  	}
   868  }
   869  
   870  // updatePhCacheFromDom combines the recieved pending header with the pending header stored locally at a given terminus for specified context
   871  func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, terminiIndex int, indices []int, newEntropy *big.Int, subReorg bool, location common.Location) error {
   872  	sl.phCacheMu.Lock()
   873  	defer sl.phCacheMu.Unlock()
   874  	hash := pendingHeader.Termini().SubTerminiAtIndex(terminiIndex)
   875  	localPendingHeader, exists := sl.readPhCache(hash)
   876  
   877  	if exists {
   878  		combinedPendingHeader := types.CopyHeader(localPendingHeader.Header())
   879  		for _, i := range indices {
   880  			combinedPendingHeader = sl.combinePendingHeader(pendingHeader.Header(), combinedPendingHeader, i, false)
   881  		}
   882  
   883  		localTermini := localPendingHeader.Termini()
   884  		if location.Equal(common.Location{}) {
   885  			for i := 0; i < len(localTermini.DomTermini()); i++ {
   886  				localTermini.SetDomTerminiAtIndex(pendingHeader.Termini().SubTerminiAtIndex(i), i)
   887  			}
   888  		} else {
   889  			domIndex := location.DomIndex()
   890  			localTermini.SetDomTerminiAtIndex(pendingHeader.Termini().SubTerminiAtIndex(domIndex), domIndex)
   891  		}
   892  
   893  		bestPh, exists := sl.readPhCache(sl.bestPhKey)
   894  		nodeCtx := common.NodeLocation.Context()
   895  		if nodeCtx == common.ZONE_CTX && exists && sl.bestPhKey != localPendingHeader.Termini().DomTerminus() && !sl.poem(newEntropy, bestPh.Header().ParentEntropy()) {
   896  			log.Warn("Subrelay Rejected", "local dom terminus", localPendingHeader.Termini().DomTerminus(), "Number", combinedPendingHeader.NumberArray(), "best ph key", sl.bestPhKey, "number", bestPh.Header().NumberArray(), "newentropy", newEntropy)
   897  			sl.updatePhCache(types.NewPendingHeader(combinedPendingHeader, localTermini), false, nil, sl.poem(newEntropy, localPendingHeader.Header().ParentEntropy()), location)
   898  			go sl.domClient.UpdateDom(context.Background(), localPendingHeader.Termini().DomTerminus(), bestPh, common.NodeLocation)
   899  			return nil
   900  		}
   901  		// Pick the head
   902  		if subReorg {
   903  			if (localPendingHeader.Header().Root() != types.EmptyRootHash && nodeCtx == common.ZONE_CTX) || nodeCtx == common.REGION_CTX {
   904  				log.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "Number:", combinedPendingHeader.Number(), "ParentHash:", combinedPendingHeader.ParentHash(), "Terminus:", localPendingHeader.Termini().DomTerminus())
   905  				sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus())
   906  			} else {
   907  				block := sl.hc.GetBlockByHash(localPendingHeader.Header().ParentHash())
   908  				if block != nil {
   909  					// setting the current state will help speed the process of append
   910  					// after mining this block since the state will already be computed
   911  					err := sl.hc.SetCurrentState(block.Header())
   912  					if err != nil {
   913  						log.Error("Error setting current state", "err", err, "Hash", block.Hash())
   914  						return nil
   915  					}
   916  					newPendingHeader, err := sl.generateSlicePendingHeader(block, localPendingHeader.Termini(), combinedPendingHeader, true, true, false)
   917  					if err != nil {
   918  						log.Error("Error generating slice pending header", "err", err)
   919  						return err
   920  					}
   921  					combinedPendingHeader = types.CopyHeader(newPendingHeader.Header())
   922  					log.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "ParentHash:", combinedPendingHeader.ParentHash(), "Terminus:", localPendingHeader.Termini().DomTerminus())
   923  					sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus())
   924  				} else {
   925  					log.Warn("unable to set the current header after the cord update", "Hash", localPendingHeader.Header().ParentHash())
   926  				}
   927  			}
   928  		}
   929  
   930  		sl.updatePhCache(types.NewPendingHeader(combinedPendingHeader, localTermini), false, nil, subReorg, location)
   931  
   932  		return nil
   933  	}
   934  	log.Warn("no pending header found for", "terminus", hash, "pendingHeaderNumber", pendingHeader.Header().NumberArray(), "Hash", pendingHeader.Header().ParentHash(), "Termini index", terminiIndex, "indices", indices)
   935  	return errors.New("pending header not found in cache")
   936  }
   937  
   938  // updatePhCache updates cache given a pendingHeaderWithTermini with the terminus used as the key.
   939  func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inSlice bool, localHeader *types.Header, subReorg bool, location common.Location) {
   940  
   941  	var exists bool
   942  	if localHeader != nil {
   943  		termini := sl.hc.GetTerminiByHash(localHeader.ParentHash())
   944  		if termini == nil {
   945  			return
   946  		}
   947  
   948  		pendingHeaderWithTermini, exists = sl.readPhCache(termini.DomTerminus())
   949  		if exists {
   950  			pendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localHeader, pendingHeaderWithTermini.Header(), common.ZONE_CTX, true))
   951  		}
   952  
   953  		bestPh, exists := sl.readPhCache(sl.bestPhKey)
   954  		if !exists {
   955  			return
   956  		}
   957  		if !sl.miningStrategy(bestPh, pendingHeaderWithTermini) {
   958  			return
   959  		}
   960  	}
   961  
   962  	termini := pendingHeaderWithTermini.Termini()
   963  
   964  	// This logic implements the Termini Carry
   965  	// * If a terminus exists in the cache, we can just copy the current value and make updates to it
   966  	// * If a terminus doesn't exist in the cache, that means that this is invoked
   967  	// 		on a dom block and we have to carry the parents Termini and make an update
   968  	var cachedTermini types.Termini
   969  	ph, exists := sl.readPhCache(pendingHeaderWithTermini.Termini().DomTerminus())
   970  	if exists {
   971  		cachedTermini = types.CopyTermini(ph.Termini())
   972  	} else {
   973  		parentHeader := sl.hc.GetHeaderOrCandidateByHash(pendingHeaderWithTermini.Header().ParentHash())
   974  		if parentHeader.Hash() == sl.config.GenesisHash {
   975  			ph, _ = sl.readPhCache(sl.config.GenesisHash)
   976  			cachedTermini = types.CopyTermini(ph.Termini())
   977  		} else {
   978  			localTermini := sl.hc.GetTerminiByHash(parentHeader.ParentHash())
   979  			ph, _ = sl.readPhCache(localTermini.DomTerminus())
   980  			cachedTermini = types.CopyTermini(ph.Termini())
   981  		}
   982  	}
   983  
   984  	// During the UpdateDom cycle we have to just copy what the Dom gives us
   985  	// otherwise update only the value that is being changed during inslice
   986  	// update and coord update
   987  	if !location.Equal(common.Location{}) {
   988  		cachedTermini.SetDomTerminiAtIndex(termini.DomTerminiAtIndex(location.DomIndex()), location.DomIndex())
   989  	} else {
   990  		for i := 0; i < len(termini.DomTermini()); i++ {
   991  			cachedTermini.SetDomTerminiAtIndex(termini.DomTerminiAtIndex(i), i)
   992  		}
   993  	}
   994  	cachedTermini.SetSubTermini(termini.SubTermini())
   995  
   996  	// Update the pendingHeader Cache
   997  	deepCopyPendingHeaderWithTermini := types.NewPendingHeader(types.CopyHeader(pendingHeaderWithTermini.Header()), cachedTermini)
   998  	deepCopyPendingHeaderWithTermini.Header().SetLocation(common.NodeLocation)
   999  	deepCopyPendingHeaderWithTermini.Header().SetTime(uint64(time.Now().Unix()))
  1000  
  1001  	if subReorg || !exists {
  1002  		sl.writePhCache(deepCopyPendingHeaderWithTermini.Termini().DomTerminus(), deepCopyPendingHeaderWithTermini)
  1003  		log.Info("PhCache update:", "new terminus?:", !exists, "inSlice:", inSlice, "Ph Number:", deepCopyPendingHeaderWithTermini.Header().NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini())
  1004  	}
  1005  }
  1006  
  1007  // init checks if the headerchain is empty and if it's empty appends the Knot
  1008  // otherwise loads the last stored state of the chain.
  1009  func (sl *Slice) init(genesis *Genesis) error {
  1010  	// Even though the genesis block cannot have any ETXs, we still need an empty
  1011  	// pending ETX entry for that block hash, so that the state processor can build
  1012  	// on it
  1013  	genesisHash := sl.Config().GenesisHash
  1014  	genesisHeader := sl.hc.GetHeader(genesisHash, 0)
  1015  	if genesisHeader == nil {
  1016  		return errors.New("failed to get genesis header")
  1017  	}
  1018  
  1019  	// Loading the badHashes from the data base and storing it in the cache
  1020  	badHashes := rawdb.ReadBadHashesList(sl.sliceDb)
  1021  	sl.AddToBadHashesList(badHashes)
  1022  
  1023  	// If the headerchain is empty start from genesis
  1024  	if sl.hc.Empty() {
  1025  		// Initialize slice state for genesis knot
  1026  		genesisTermini := types.EmptyTermini()
  1027  		for i := 0; i < len(genesisTermini.SubTermini()); i++ {
  1028  			genesisTermini.SetSubTerminiAtIndex(genesisHash, i)
  1029  		}
  1030  		for i := 0; i < len(genesisTermini.DomTermini()); i++ {
  1031  			genesisTermini.SetDomTerminiAtIndex(genesisHash, i)
  1032  		}
  1033  
  1034  		rawdb.WriteTermini(sl.sliceDb, genesisHash, genesisTermini)
  1035  		rawdb.WriteManifest(sl.sliceDb, genesisHash, types.BlockManifest{genesisHash})
  1036  
  1037  		// Append each of the knot blocks
  1038  		sl.WriteBestPhKey(genesisHash)
  1039  		sl.hc.SetCurrentHeader(genesisHeader)
  1040  
  1041  		// Create empty pending ETX entry for genesis block -- genesis may not emit ETXs
  1042  		emptyPendingEtxs := types.Transactions{}
  1043  		err := sl.hc.AddPendingEtxs(types.PendingEtxs{genesisHeader, emptyPendingEtxs})
  1044  		if err != nil {
  1045  			return err
  1046  		}
  1047  		err = sl.AddPendingEtxsRollup(types.PendingEtxsRollup{genesisHeader, []common.Hash{}})
  1048  		if err != nil {
  1049  			return err
  1050  		}
  1051  		err = sl.hc.AddBloom(types.Bloom{}, genesisHeader.Hash())
  1052  		if err != nil {
  1053  			return err
  1054  		}
  1055  		rawdb.WriteEtxSet(sl.sliceDb, genesisHash, 0, types.NewEtxSet())
  1056  
  1057  		if common.NodeLocation.Context() == common.PRIME_CTX {
  1058  			go sl.NewGenesisPendingHeader(nil)
  1059  		}
  1060  	} else { // load the phCache and slice current pending header hash
  1061  		if err := sl.loadLastState(); err != nil {
  1062  			return err
  1063  		}
  1064  	}
  1065  	return nil
  1066  }
  1067  
  1068  // constructLocalBlock takes a header and construct the Block locally by getting the body
  1069  // from the candidate body db. This method is used when peers give the block as a placeholder
  1070  // for the body.
  1071  func (sl *Slice) ConstructLocalBlock(header *types.Header) (*types.Block, error) {
  1072  	pendingBlockBody := rawdb.ReadBody(sl.sliceDb, header.Hash(), header.NumberU64())
  1073  	if pendingBlockBody == nil {
  1074  		return nil, ErrBodyNotFound
  1075  	}
  1076  	// Load uncles because they are not included in the block response.
  1077  	txs := make([]*types.Transaction, len(pendingBlockBody.Transactions))
  1078  	for i, tx := range pendingBlockBody.Transactions {
  1079  		txs[i] = tx
  1080  	}
  1081  	uncles := make([]*types.Header, len(pendingBlockBody.Uncles))
  1082  	for i, uncle := range pendingBlockBody.Uncles {
  1083  		uncles[i] = uncle
  1084  		log.Debug("Pending Block uncle", "hash: ", uncle.Hash())
  1085  	}
  1086  	etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions))
  1087  	for i, etx := range pendingBlockBody.ExtTransactions {
  1088  		etxs[i] = etx
  1089  	}
  1090  	subManifest := make(types.BlockManifest, len(pendingBlockBody.SubManifest))
  1091  	for i, blockHash := range pendingBlockBody.SubManifest {
  1092  		subManifest[i] = blockHash
  1093  	}
  1094  	block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest)
  1095  	if err := sl.validator.ValidateBody(block); err != nil {
  1096  		return block, err
  1097  	} else {
  1098  		return block, nil
  1099  	}
  1100  }
  1101  
  1102  // constructLocalMinedBlock takes a header and construct the Block locally by getting the block
  1103  // body from the workers pendingBlockBodyCache. This method is used when the miner sends in the
  1104  // header.
  1105  func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) {
  1106  	nodeCtx := common.NodeLocation.Context()
  1107  	var pendingBlockBody *types.Body
  1108  	if nodeCtx == common.ZONE_CTX {
  1109  		pendingBlockBody = sl.GetPendingBlockBody(header)
  1110  		if pendingBlockBody == nil {
  1111  			return nil, ErrBodyNotFound
  1112  		}
  1113  	} else {
  1114  		pendingBlockBody = &types.Body{}
  1115  	}
  1116  	// Load uncles because they are not included in the block response.
  1117  	txs := make([]*types.Transaction, len(pendingBlockBody.Transactions))
  1118  	for i, tx := range pendingBlockBody.Transactions {
  1119  		txs[i] = tx
  1120  	}
  1121  	uncles := make([]*types.Header, len(pendingBlockBody.Uncles))
  1122  	for i, uncle := range pendingBlockBody.Uncles {
  1123  		uncles[i] = uncle
  1124  		log.Debug("Pending Block uncle", "hash: ", uncle.Hash())
  1125  	}
  1126  	etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions))
  1127  	for i, etx := range pendingBlockBody.ExtTransactions {
  1128  		etxs[i] = etx
  1129  	}
  1130  	subManifest := make(types.BlockManifest, len(pendingBlockBody.SubManifest))
  1131  	for i, blockHash := range pendingBlockBody.SubManifest {
  1132  		subManifest[i] = blockHash
  1133  	}
  1134  	block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest)
  1135  	if err := sl.validator.ValidateBody(block); err != nil {
  1136  		return block, err
  1137  	} else {
  1138  		return block, nil
  1139  	}
  1140  }
  1141  
  1142  // combinePendingHeader updates the pending header at the given index with the value from given header.
  1143  func (sl *Slice) combinePendingHeader(header *types.Header, slPendingHeader *types.Header, index int, inSlice bool) *types.Header {
  1144  	// copying the slPendingHeader and updating the copy to remove any shared memory access issues
  1145  	combinedPendingHeader := types.CopyHeader(slPendingHeader)
  1146  
  1147  	combinedPendingHeader.SetParentHash(header.ParentHash(index), index)
  1148  	combinedPendingHeader.SetNumber(header.Number(index), index)
  1149  	combinedPendingHeader.SetManifestHash(header.ManifestHash(index), index)
  1150  	combinedPendingHeader.SetParentEntropy(header.ParentEntropy(index), index)
  1151  	combinedPendingHeader.SetParentDeltaS(header.ParentDeltaS(index), index)
  1152  
  1153  	if inSlice {
  1154  		combinedPendingHeader.SetEtxRollupHash(header.EtxRollupHash())
  1155  		combinedPendingHeader.SetDifficulty(header.Difficulty())
  1156  		combinedPendingHeader.SetUncleHash(header.UncleHash())
  1157  		combinedPendingHeader.SetTxHash(header.TxHash())
  1158  		combinedPendingHeader.SetEtxHash(header.EtxHash())
  1159  		combinedPendingHeader.SetReceiptHash(header.ReceiptHash())
  1160  		combinedPendingHeader.SetRoot(header.Root())
  1161  		combinedPendingHeader.SetCoinbase(header.Coinbase())
  1162  		combinedPendingHeader.SetBaseFee(header.BaseFee())
  1163  		combinedPendingHeader.SetGasLimit(header.GasLimit())
  1164  		combinedPendingHeader.SetGasUsed(header.GasUsed())
  1165  		combinedPendingHeader.SetExtra(header.Extra())
  1166  	}
  1167  
  1168  	return combinedPendingHeader
  1169  }
  1170  
  1171  // NewGenesisPendingHeader creates a pending header on the genesis block
  1172  func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header) {
  1173  	nodeCtx := common.NodeLocation.Context()
  1174  	genesisHash := sl.config.GenesisHash
  1175  	// Upate the local pending header
  1176  	localPendingHeader, err := sl.miner.worker.GeneratePendingHeader(sl.hc.GetBlockByHash(genesisHash), false)
  1177  	if err != nil {
  1178  		return
  1179  	}
  1180  
  1181  	if nodeCtx == common.PRIME_CTX {
  1182  		domPendingHeader = types.CopyHeader(localPendingHeader)
  1183  	} else {
  1184  		domPendingHeader = sl.combinePendingHeader(localPendingHeader, domPendingHeader, nodeCtx, true)
  1185  		domPendingHeader.SetLocation(common.NodeLocation)
  1186  	}
  1187  
  1188  	if nodeCtx != common.ZONE_CTX {
  1189  		for _, client := range sl.subClients {
  1190  			if client != nil {
  1191  				client.NewGenesisPendingHeader(context.Background(), domPendingHeader)
  1192  				if err != nil {
  1193  					return
  1194  				}
  1195  			}
  1196  		}
  1197  	}
  1198  	genesisTermini := types.EmptyTermini()
  1199  	for i := 0; i < len(genesisTermini.SubTermini()); i++ {
  1200  		genesisTermini.SetSubTerminiAtIndex(genesisHash, i)
  1201  	}
  1202  	for i := 0; i < len(genesisTermini.DomTermini()); i++ {
  1203  		genesisTermini.SetDomTerminiAtIndex(genesisHash, i)
  1204  	}
  1205  	if sl.hc.Empty() {
  1206  		domPendingHeader.SetTime(uint64(time.Now().Unix()))
  1207  		sl.phCache.Add(sl.config.GenesisHash, types.NewPendingHeader(domPendingHeader, genesisTermini))
  1208  	}
  1209  }
  1210  
  1211  func (sl *Slice) GetPendingBlockBody(header *types.Header) *types.Body {
  1212  	return sl.miner.worker.GetPendingBlockBody(header)
  1213  }
  1214  
  1215  func (sl *Slice) SubscribeMissingBlockEvent(ch chan<- types.BlockRequest) event.Subscription {
  1216  	return sl.scope.Track(sl.missingBlockFeed.Subscribe(ch))
  1217  }
  1218  
  1219  // MakeDomClient creates the quaiclient for the given domurl
  1220  func makeDomClient(domurl string) *quaiclient.Client {
  1221  	if domurl == "" {
  1222  		log.Fatal("dom client url is empty")
  1223  	}
  1224  	domClient, err := quaiclient.Dial(domurl)
  1225  	if err != nil {
  1226  		log.Fatal("Error connecting to the dominant go-quai client", "err", err)
  1227  	}
  1228  	return domClient
  1229  }
  1230  
  1231  // MakeSubClients creates the quaiclient for the given suburls
  1232  func makeSubClients(suburls []string) []*quaiclient.Client {
  1233  	subClients := make([]*quaiclient.Client, 3)
  1234  	for i, suburl := range suburls {
  1235  		if suburl != "" {
  1236  			subClient, err := quaiclient.Dial(suburl)
  1237  			if err != nil {
  1238  				log.Fatal("Error connecting to the subordinate go-quai client for index", "index", i, " err ", err)
  1239  			}
  1240  			subClients[i] = subClient
  1241  		}
  1242  	}
  1243  	return subClients
  1244  }
  1245  
  1246  // loadLastState loads the phCache and the slice pending header hash from the db.
  1247  func (sl *Slice) loadLastState() error {
  1248  	sl.bestPhKey = rawdb.ReadBestPhKey(sl.sliceDb)
  1249  	bestPh := rawdb.ReadPendingHeader(sl.sliceDb, sl.bestPhKey)
  1250  	if bestPh != nil {
  1251  		sl.writePhCache(sl.bestPhKey, *bestPh)
  1252  	}
  1253  
  1254  	if sl.ProcessingState() {
  1255  		sl.miner.worker.LoadPendingBlockBody()
  1256  	}
  1257  	return nil
  1258  }
  1259  
  1260  // Stop stores the phCache and the sl.pendingHeader hash value to the db.
  1261  func (sl *Slice) Stop() {
  1262  	nodeCtx := common.NodeLocation.Context()
  1263  
  1264  	var badHashes []common.Hash
  1265  	for hash := range sl.badHashesCache {
  1266  		badHashes = append(badHashes, hash)
  1267  	}
  1268  	rawdb.WriteBadHashesList(sl.sliceDb, badHashes)
  1269  	sl.miner.worker.StorePendingBlockBody()
  1270  
  1271  	sl.scope.Close()
  1272  	close(sl.quit)
  1273  
  1274  	sl.hc.Stop()
  1275  	if nodeCtx == common.ZONE_CTX && sl.ProcessingState() {
  1276  		sl.asyncPhSub.Unsubscribe()
  1277  		sl.txPool.Stop()
  1278  	}
  1279  	sl.miner.Stop()
  1280  }
  1281  
  1282  func (sl *Slice) Config() *params.ChainConfig { return sl.config }
  1283  
  1284  func (sl *Slice) Engine() consensus.Engine { return sl.engine }
  1285  
  1286  func (sl *Slice) HeaderChain() *HeaderChain { return sl.hc }
  1287  
  1288  func (sl *Slice) TxPool() *TxPool { return sl.txPool }
  1289  
  1290  func (sl *Slice) Miner() *Miner { return sl.miner }
  1291  
  1292  func (sl *Slice) CurrentInfo(header *types.Header) bool {
  1293  	return sl.miner.worker.CurrentInfo(header)
  1294  }
  1295  
  1296  func (sl *Slice) WriteBlock(block *types.Block) {
  1297  	sl.hc.WriteBlock(block)
  1298  }
  1299  
  1300  func (sl *Slice) AddPendingEtxs(pEtxs types.PendingEtxs) error {
  1301  	nodeCtx := common.NodeLocation.Context()
  1302  	if err := sl.hc.AddPendingEtxs(pEtxs); err == nil || err.Error() != ErrPendingEtxAlreadyKnown.Error() {
  1303  		// Only in the region case we have to send the pendingEtxs to dom from the AddPendingEtxs
  1304  		if nodeCtx == common.REGION_CTX {
  1305  			// Also the first time when adding the pending etx broadcast it to the peers
  1306  			sl.pendingEtxsFeed.Send(pEtxs)
  1307  			if sl.domClient != nil {
  1308  				sl.domClient.SendPendingEtxsToDom(context.Background(), pEtxs)
  1309  			}
  1310  		}
  1311  	} else if err.Error() == ErrPendingEtxAlreadyKnown.Error() {
  1312  		return nil
  1313  	} else {
  1314  		return err
  1315  	}
  1316  	return nil
  1317  }
  1318  
  1319  func (sl *Slice) AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error {
  1320  	if !pEtxsRollup.IsValid(trie.NewStackTrie(nil)) {
  1321  		log.Info("PendingEtxRollup is invalid")
  1322  		return ErrPendingEtxRollupNotValid
  1323  	}
  1324  	nodeCtx := common.NodeLocation.Context()
  1325  	log.Debug("Received pending ETXs Rollup", "header: ", pEtxsRollup.Header.Hash(), "Len of Rollup", len(pEtxsRollup.Manifest))
  1326  	// Only write the pending ETXs if we have not seen them before
  1327  	if !sl.hc.pendingEtxsRollup.Contains(pEtxsRollup.Header.Hash()) {
  1328  		// Also write to cache for faster access
  1329  		sl.hc.pendingEtxsRollup.Add(pEtxsRollup.Header.Hash(), pEtxsRollup)
  1330  		// Write to pending ETX rollup database
  1331  		rawdb.WritePendingEtxsRollup(sl.sliceDb, pEtxsRollup)
  1332  
  1333  		// Only Prime broadcasts the pendingEtxRollups
  1334  		if nodeCtx == common.PRIME_CTX {
  1335  			// Also the first time when adding the pending etx rollup broadcast it to the peers
  1336  			sl.pendingEtxsRollupFeed.Send(pEtxsRollup)
  1337  			// Only in the region case, send the pending etx rollup to the dom
  1338  		} else if nodeCtx == common.REGION_CTX {
  1339  			if sl.domClient != nil {
  1340  				sl.domClient.SendPendingEtxsRollupToDom(context.Background(), pEtxsRollup)
  1341  			}
  1342  		}
  1343  	}
  1344  	return nil
  1345  }
  1346  
  1347  func (sl *Slice) CheckForBadHashAndRecover() {
  1348  	nodeCtx := common.NodeLocation.Context()
  1349  	// Lookup the bad hashes list to see if we have it in the database
  1350  	for _, fork := range BadHashes {
  1351  		var badBlock *types.Block
  1352  		var badHash common.Hash
  1353  		switch nodeCtx {
  1354  		case common.PRIME_CTX:
  1355  			badHash = fork.PrimeContext
  1356  		case common.REGION_CTX:
  1357  			badHash = fork.RegionContext[common.NodeLocation.Region()]
  1358  		case common.ZONE_CTX:
  1359  			badHash = fork.ZoneContext[common.NodeLocation.Region()][common.NodeLocation.Zone()]
  1360  		}
  1361  		badBlock = sl.hc.GetBlockByHash(badHash)
  1362  		// Node has a bad block in the database
  1363  		if badBlock != nil {
  1364  			// Start from the current tip and delete every block from the database until this bad hash block
  1365  			sl.cleanCacheAndDatabaseTillBlock(badBlock.ParentHash())
  1366  			if nodeCtx == common.PRIME_CTX {
  1367  				sl.SetHeadBackToRecoveryState(nil, badBlock.ParentHash())
  1368  			}
  1369  		}
  1370  	}
  1371  }
  1372  
  1373  // SetHeadBackToRecoveryState sets the heads of the whole hierarchy to the recovery state
  1374  func (sl *Slice) SetHeadBackToRecoveryState(pendingHeader *types.Header, hash common.Hash) types.PendingHeader {
  1375  	nodeCtx := common.NodeLocation.Context()
  1376  	if nodeCtx == common.PRIME_CTX {
  1377  		localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash)
  1378  		sl.phCache.Add(hash, localPendingHeaderWithTermini)
  1379  		sl.GenerateRecoveryPendingHeader(localPendingHeaderWithTermini.Header(), localPendingHeaderWithTermini.Termini())
  1380  	} else {
  1381  		localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash)
  1382  		localPendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), pendingHeader, nodeCtx, true))
  1383  		localPendingHeaderWithTermini.Header().SetLocation(common.NodeLocation)
  1384  		sl.phCache.Add(hash, localPendingHeaderWithTermini)
  1385  		return localPendingHeaderWithTermini
  1386  	}
  1387  	return types.PendingHeader{}
  1388  }
  1389  
  1390  // cleanCacheAndDatabaseTillBlock till delete all entries of header and other
  1391  // data structures around slice until the given block hash
  1392  func (sl *Slice) cleanCacheAndDatabaseTillBlock(hash common.Hash) {
  1393  	currentHeader := sl.hc.CurrentHeader()
  1394  	// If the hash is the current header hash, there is nothing to clean from the database
  1395  	if hash == currentHeader.Hash() {
  1396  		return
  1397  	}
  1398  	nodeCtx := common.NodeLocation.Context()
  1399  	// slice caches
  1400  	sl.phCache.Purge()
  1401  	sl.miner.worker.pendingBlockBody.Purge()
  1402  	rawdb.DeleteBestPhKey(sl.sliceDb)
  1403  	// headerchain caches
  1404  	sl.hc.headerCache.Purge()
  1405  	sl.hc.numberCache.Purge()
  1406  	sl.hc.pendingEtxsRollup.Purge()
  1407  	sl.hc.pendingEtxs.Purge()
  1408  	rawdb.DeleteAllHeadsHashes(sl.sliceDb)
  1409  	// bodydb caches
  1410  	sl.hc.bc.blockCache.Purge()
  1411  	sl.hc.bc.bodyCache.Purge()
  1412  	sl.hc.bc.bodyRLPCache.Purge()
  1413  
  1414  	var badHashes []common.Hash
  1415  	header := currentHeader
  1416  	for {
  1417  		rawdb.DeleteBlock(sl.sliceDb, header.Hash(), header.NumberU64())
  1418  		rawdb.DeleteCanonicalHash(sl.sliceDb, header.NumberU64())
  1419  		rawdb.DeleteHeaderNumber(sl.sliceDb, header.Hash())
  1420  		rawdb.DeleteTermini(sl.sliceDb, header.Hash())
  1421  		rawdb.DeleteEtxSet(sl.sliceDb, header.Hash(), header.NumberU64())
  1422  		if nodeCtx != common.ZONE_CTX {
  1423  			pendingEtxsRollup := rawdb.ReadPendingEtxsRollup(sl.sliceDb, header.Hash())
  1424  			// First hash in the manifest is always a dom block and it needs to be
  1425  			// deleted separately because last hash in the final iteration will be
  1426  			// referenced in the next dom block after the restart
  1427  			for _, manifestHash := range pendingEtxsRollup.Manifest[1:] {
  1428  				rawdb.DeletePendingEtxs(sl.sliceDb, manifestHash)
  1429  			}
  1430  			rawdb.DeletePendingEtxs(sl.sliceDb, header.Hash())
  1431  			rawdb.DeletePendingEtxsRollup(sl.sliceDb, header.Hash())
  1432  		}
  1433  		// delete the trie node for a given root of the header
  1434  		rawdb.DeleteTrieNode(sl.sliceDb, header.Root())
  1435  		badHashes = append(badHashes, header.Hash())
  1436  		parent := sl.hc.GetHeader(header.ParentHash(), header.NumberU64()-1)
  1437  		header = parent
  1438  		if header.Hash() == hash || header.Hash() == sl.config.GenesisHash {
  1439  			break
  1440  		}
  1441  	}
  1442  
  1443  	sl.AddToBadHashesList(badHashes)
  1444  	// Set the current header
  1445  	currentHeader = sl.hc.GetHeaderByHash(hash)
  1446  	sl.hc.currentHeader.Store(currentHeader)
  1447  	rawdb.WriteHeadBlockHash(sl.sliceDb, currentHeader.Hash())
  1448  
  1449  	// Recover the snaps
  1450  	if nodeCtx == common.ZONE_CTX && sl.ProcessingState() {
  1451  		sl.hc.bc.processor.snaps, _ = snapshot.New(sl.sliceDb, sl.hc.bc.processor.stateCache.TrieDB(), sl.hc.bc.processor.cacheConfig.SnapshotLimit, currentHeader.Root(), true, true)
  1452  	}
  1453  }
  1454  
  1455  func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkPointHashes types.Termini) error {
  1456  	nodeCtx := common.NodeLocation.Context()
  1457  	if nodeCtx == common.PRIME_CTX {
  1458  		for i := 0; i < common.NumRegionsInPrime; i++ {
  1459  			if sl.subClients[i] != nil {
  1460  				sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), pendingHeader, checkPointHashes)
  1461  			}
  1462  		}
  1463  	} else if nodeCtx == common.REGION_CTX {
  1464  		newPendingHeader := sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes.SubTerminiAtIndex(common.NodeLocation.Region()))
  1465  		for i := 0; i < common.NumZonesInRegion; i++ {
  1466  			if sl.subClients[i] != nil {
  1467  				sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), newPendingHeader.Header(), newPendingHeader.Termini())
  1468  			}
  1469  		}
  1470  	} else {
  1471  		sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes.SubTerminiAtIndex(common.NodeLocation.Zone()))
  1472  	}
  1473  	return nil
  1474  }
  1475  
  1476  // ComputeRecoveryPendingHeader generates the pending header at a given hash
  1477  // and gets the termini from the database and returns the pending header with
  1478  // termini
  1479  func (sl *Slice) ComputeRecoveryPendingHeader(hash common.Hash) types.PendingHeader {
  1480  	block := sl.hc.GetBlockByHash(hash)
  1481  	pendingHeader, err := sl.miner.worker.GeneratePendingHeader(block, false)
  1482  	if err != nil {
  1483  		log.Error("Error generating pending header during the checkpoint recovery process")
  1484  		return types.PendingHeader{}
  1485  	}
  1486  	termini := sl.hc.GetTerminiByHash(hash)
  1487  	sl.WriteBestPhKey(hash)
  1488  	return types.NewPendingHeader(pendingHeader, *termini)
  1489  }
  1490  
  1491  // AddToBadHashesList adds a given set of badHashes to the BadHashesList
  1492  func (sl *Slice) AddToBadHashesList(badHashes []common.Hash) {
  1493  	for _, hash := range badHashes {
  1494  		sl.badHashesCache[hash] = true
  1495  	}
  1496  }
  1497  
  1498  // HashExistsInBadHashesList checks if the given hash exists in the badHashesCache
  1499  func (sl *Slice) HashExistsInBadHashesList(hash common.Hash) bool {
  1500  	// Look for pending ETXs first in pending ETX map, then in database
  1501  	_, ok := sl.badHashesCache[hash]
  1502  	return ok
  1503  }
  1504  
  1505  // IsBlockHashABadHash checks if the given hash exists in BadHashes List
  1506  func (sl *Slice) IsBlockHashABadHash(hash common.Hash) bool {
  1507  	nodeCtx := common.NodeLocation.Context()
  1508  	switch nodeCtx {
  1509  	case common.PRIME_CTX:
  1510  		for _, fork := range BadHashes {
  1511  			if fork.PrimeContext == hash {
  1512  				return true
  1513  			}
  1514  		}
  1515  	case common.REGION_CTX:
  1516  		for _, fork := range BadHashes {
  1517  			if fork.RegionContext[common.NodeLocation.Region()] == hash {
  1518  				return true
  1519  			}
  1520  		}
  1521  	case common.ZONE_CTX:
  1522  		for _, fork := range BadHashes {
  1523  			if fork.ZoneContext[common.NodeLocation.Region()][common.NodeLocation.Zone()] == hash {
  1524  				return true
  1525  			}
  1526  		}
  1527  	}
  1528  	return sl.HashExistsInBadHashesList(hash)
  1529  }