github.com/dominant-strategies/go-quai@v0.28.2/core/worker.go (about)

     1  package core
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math/big"
     7  	"strings"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	mapset "github.com/deckarep/golang-set"
    13  	"github.com/dominant-strategies/go-quai/common"
    14  	"github.com/dominant-strategies/go-quai/common/hexutil"
    15  	"github.com/dominant-strategies/go-quai/consensus"
    16  	"github.com/dominant-strategies/go-quai/consensus/misc"
    17  	"github.com/dominant-strategies/go-quai/core/rawdb"
    18  	"github.com/dominant-strategies/go-quai/core/state"
    19  	"github.com/dominant-strategies/go-quai/core/types"
    20  	"github.com/dominant-strategies/go-quai/ethdb"
    21  	"github.com/dominant-strategies/go-quai/event"
    22  	"github.com/dominant-strategies/go-quai/log"
    23  	"github.com/dominant-strategies/go-quai/params"
    24  	"github.com/dominant-strategies/go-quai/trie"
    25  	lru "github.com/hashicorp/golang-lru"
    26  	expireLru "github.com/hnlq715/golang-lru"
    27  )
    28  
    29  const (
    30  	// resultQueueSize is the size of channel listening to sealing result.
    31  	resultQueueSize = 10
    32  
    33  	// resubmitAdjustChanSize is the size of resubmitting interval adjustment channel.
    34  	resubmitAdjustChanSize = 10
    35  
    36  	// sealingLogAtDepth is the number of confirmations before logging successful sealing.
    37  	sealingLogAtDepth = 7
    38  
    39  	// minRecommitInterval is the minimal time interval to recreate the sealing block with
    40  	// any newly arrived transactions.
    41  	minRecommitInterval = 1 * time.Second
    42  
    43  	// staleThreshold is the maximum depth of the acceptable stale block.
    44  	staleThreshold = 7
    45  
    46  	// pendingBlockBodyLimit is maximum number of pending block bodies to be kept in cache.
    47  	pendingBlockBodyLimit = 320
    48  
    49  	// c_headerPrintsExpiryTime is how long a header hash is kept in the cache, so that currentInfo
    50  	// is not printed on a Proc frequency
    51  	c_headerPrintsExpiryTime = 2 * time.Minute
    52  )
    53  
    54  // environment is the worker's current environment and holds all
    55  // information of the sealing block generation.
    56  type environment struct {
    57  	signer types.Signer
    58  
    59  	state     *state.StateDB // apply state changes here
    60  	ancestors mapset.Set     // ancestor set (used for checking uncle parent validity)
    61  	family    mapset.Set     // family set (used for checking uncle invalidity)
    62  	tcount    int            // tx count in cycle
    63  	gasPool   *GasPool       // available gas used to pack transactions
    64  	coinbase  common.Address
    65  	etxRLimit int // Remaining number of cross-region ETXs that can be included
    66  	etxPLimit int // Remaining number of cross-prime ETXs that can be included
    67  
    68  	header      *types.Header
    69  	txs         []*types.Transaction
    70  	etxs        []*types.Transaction
    71  	subManifest types.BlockManifest
    72  	receipts    []*types.Receipt
    73  	uncleMu     sync.RWMutex
    74  	uncles      map[common.Hash]*types.Header
    75  }
    76  
    77  // copy creates a deep copy of environment.
    78  func (env *environment) copy(processingState bool) *environment {
    79  	nodeCtx := common.NodeLocation.Context()
    80  	if nodeCtx == common.ZONE_CTX && processingState {
    81  		cpy := &environment{
    82  			signer:    env.signer,
    83  			state:     env.state.Copy(),
    84  			ancestors: env.ancestors.Clone(),
    85  			family:    env.family.Clone(),
    86  			tcount:    env.tcount,
    87  			coinbase:  env.coinbase,
    88  			etxRLimit: env.etxRLimit,
    89  			etxPLimit: env.etxPLimit,
    90  			header:    types.CopyHeader(env.header),
    91  			receipts:  copyReceipts(env.receipts),
    92  		}
    93  		if env.gasPool != nil {
    94  			gasPool := *env.gasPool
    95  			cpy.gasPool = &gasPool
    96  		}
    97  		// The content of txs and uncles are immutable, unnecessary
    98  		// to do the expensive deep copy for them.
    99  		cpy.txs = make([]*types.Transaction, len(env.txs))
   100  		copy(cpy.txs, env.txs)
   101  		cpy.etxs = make([]*types.Transaction, len(env.etxs))
   102  		copy(cpy.etxs, env.etxs)
   103  
   104  		env.uncleMu.Lock()
   105  		cpy.uncles = make(map[common.Hash]*types.Header)
   106  		for hash, uncle := range env.uncles {
   107  			cpy.uncles[hash] = uncle
   108  		}
   109  		env.uncleMu.Unlock()
   110  		return cpy
   111  	} else {
   112  		return &environment{header: types.CopyHeader(env.header)}
   113  	}
   114  }
   115  
   116  // unclelist returns the contained uncles as the list format.
   117  func (env *environment) unclelist() []*types.Header {
   118  	env.uncleMu.RLock()
   119  	defer env.uncleMu.RUnlock()
   120  	var uncles []*types.Header
   121  	for _, uncle := range env.uncles {
   122  		uncles = append(uncles, uncle)
   123  	}
   124  	return uncles
   125  }
   126  
   127  // discard terminates the background prefetcher go-routine. It should
   128  // always be called for all created environment instances otherwise
   129  // the go-routine leak can happen.
   130  func (env *environment) discard() {
   131  	if env.state == nil {
   132  		return
   133  	}
   134  	env.state.StopPrefetcher()
   135  }
   136  
   137  // task contains all information for consensus engine sealing and result submitting.
   138  type task struct {
   139  	receipts  []*types.Receipt
   140  	state     *state.StateDB
   141  	block     *types.Block
   142  	createdAt time.Time
   143  }
   144  
   145  const (
   146  	commitInterruptNone int32 = iota
   147  	commitInterruptNewHead
   148  	commitInterruptResubmit
   149  )
   150  
   151  // intervalAdjust represents a resubmitting interval adjustment.
   152  type intervalAdjust struct {
   153  	ratio float64
   154  	inc   bool
   155  }
   156  
   157  // Config is the configuration parameters of mining.
   158  type Config struct {
   159  	Etherbase  common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account)
   160  	Notify     []string       `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash).
   161  	NotifyFull bool           `toml:",omitempty"` // Notify with pending block headers instead of work packages
   162  	ExtraData  hexutil.Bytes  `toml:",omitempty"` // Block extra data set by the miner
   163  	GasFloor   uint64         // Target gas floor for mined blocks.
   164  	GasCeil    uint64         // Target gas ceiling for mined blocks.
   165  	GasPrice   *big.Int       // Minimum gas price for mining a transaction
   166  	Recommit   time.Duration  // The time interval for miner to re-create mining work.
   167  	Noverify   bool           // Disable remote mining solution verification(only useful in ethash).
   168  }
   169  
   170  // worker is the main object which takes care of submitting new work to consensus engine
   171  // and gathering the sealing result.
   172  type worker struct {
   173  	config      *Config
   174  	chainConfig *params.ChainConfig
   175  	engine      consensus.Engine
   176  	hc          *HeaderChain
   177  	txPool      *TxPool
   178  
   179  	// Feeds
   180  	pendingLogsFeed   event.Feed
   181  	pendingHeaderFeed event.Feed
   182  
   183  	// Subscriptions
   184  	chainHeadCh  chan ChainHeadEvent
   185  	chainHeadSub event.Subscription
   186  
   187  	// Channels
   188  	taskCh                         chan *task
   189  	resultCh                       chan *types.Block
   190  	exitCh                         chan struct{}
   191  	resubmitIntervalCh             chan time.Duration
   192  	resubmitAdjustCh               chan *intervalAdjust
   193  	fillTransactionsRollingAverage *RollingAverage
   194  
   195  	interrupt   chan struct{}
   196  	asyncPhFeed event.Feed // asyncPhFeed sends an event after each state root update
   197  	scope       event.SubscriptionScope
   198  
   199  	wg sync.WaitGroup
   200  
   201  	current      *environment                 // An environment for current running cycle.
   202  	localUncles  map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks.
   203  	remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
   204  	uncleMu      sync.RWMutex
   205  
   206  	mu       sync.RWMutex // The lock used to protect the coinbase and extra fields
   207  	coinbase common.Address
   208  	extra    []byte
   209  
   210  	workerDb ethdb.Database
   211  
   212  	pendingBlockBody *lru.Cache
   213  
   214  	snapshotMu    sync.RWMutex // The lock used to protect the snapshots below
   215  	snapshotBlock *types.Block
   216  
   217  	headerPrints *expireLru.Cache
   218  
   219  	// atomic status counters
   220  	running int32 // The indicator whether the consensus engine is running or not.
   221  	newTxs  int32 // New arrival transaction count since last sealing work submitting.
   222  
   223  	// noempty is the flag used to control whether the feature of pre-seal empty
   224  	// block is enabled. The default value is false(pre-seal is enabled by default).
   225  	// But in some special scenario the consensus engine will seal blocks instantaneously,
   226  	// in this case this feature will add all empty blocks into canonical chain
   227  	// non-stop and no real transaction will be included.
   228  	noempty uint32
   229  
   230  	// External functions
   231  	isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner.
   232  
   233  	// Test hooks
   234  	newTaskHook  func(*task) // Method to call upon receiving a new sealing task.
   235  	fullTaskHook func()      // Method to call before pushing the full sealing task.
   236  }
   237  
   238  type RollingAverage struct {
   239  	windowSize int
   240  	durations  []time.Duration
   241  	sum        time.Duration
   242  }
   243  
   244  func (ra *RollingAverage) Add(d time.Duration) {
   245  	if len(ra.durations) == ra.windowSize {
   246  		// Remove the oldest duration from the sum
   247  		ra.sum -= ra.durations[0]
   248  		ra.durations = ra.durations[1:]
   249  	}
   250  	ra.durations = append(ra.durations, d)
   251  	ra.sum += d
   252  }
   253  func (ra *RollingAverage) Average() time.Duration {
   254  	if len(ra.durations) == 0 {
   255  		return 0
   256  	}
   257  	return ra.sum / time.Duration(len(ra.durations))
   258  }
   259  
   260  func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Database, engine consensus.Engine, headerchain *HeaderChain, txPool *TxPool, isLocalBlock func(header *types.Header) bool, init bool, processingState bool) *worker {
   261  	worker := &worker{
   262  		config:                         config,
   263  		chainConfig:                    chainConfig,
   264  		engine:                         engine,
   265  		hc:                             headerchain,
   266  		txPool:                         txPool,
   267  		coinbase:                       config.Etherbase,
   268  		isLocalBlock:                   isLocalBlock,
   269  		workerDb:                       db,
   270  		localUncles:                    make(map[common.Hash]*types.Block),
   271  		remoteUncles:                   make(map[common.Hash]*types.Block),
   272  		chainHeadCh:                    make(chan ChainHeadEvent, chainHeadChanSize),
   273  		taskCh:                         make(chan *task),
   274  		resultCh:                       make(chan *types.Block, resultQueueSize),
   275  		exitCh:                         make(chan struct{}),
   276  		interrupt:                      make(chan struct{}),
   277  		resubmitIntervalCh:             make(chan time.Duration),
   278  		resubmitAdjustCh:               make(chan *intervalAdjust, resubmitAdjustChanSize),
   279  		fillTransactionsRollingAverage: &RollingAverage{windowSize: 100},
   280  	}
   281  	// Set the GasFloor of the worker to the minGasLimit
   282  	worker.config.GasFloor = params.MinGasLimit
   283  
   284  	phBodyCache, _ := lru.New(pendingBlockBodyLimit)
   285  	worker.pendingBlockBody = phBodyCache
   286  
   287  	// Sanitize recommit interval if the user-specified one is too short.
   288  	recommit := worker.config.Recommit
   289  	if recommit < minRecommitInterval {
   290  		log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval)
   291  		recommit = minRecommitInterval
   292  	}
   293  
   294  	headerPrints, _ := expireLru.NewWithExpire(1, c_headerPrintsExpiryTime)
   295  	worker.headerPrints = headerPrints
   296  
   297  	nodeCtx := common.NodeLocation.Context()
   298  	if headerchain.ProcessingState() && nodeCtx == common.ZONE_CTX {
   299  		worker.chainHeadSub = worker.hc.SubscribeChainHeadEvent(worker.chainHeadCh)
   300  		worker.wg.Add(1)
   301  		go worker.asyncStateLoop()
   302  	}
   303  
   304  	return worker
   305  }
   306  
   307  // setEtherbase sets the etherbase used to initialize the block coinbase field.
   308  func (w *worker) setEtherbase(addr common.Address) {
   309  	w.mu.Lock()
   310  	defer w.mu.Unlock()
   311  	w.coinbase = addr
   312  }
   313  
   314  func (w *worker) setGasCeil(ceil uint64) {
   315  	w.mu.Lock()
   316  	defer w.mu.Unlock()
   317  	w.config.GasCeil = ceil
   318  }
   319  
   320  // setExtra sets the content used to initialize the block extra field.
   321  func (w *worker) setExtra(extra []byte) {
   322  	w.mu.Lock()
   323  	defer w.mu.Unlock()
   324  	w.extra = extra
   325  }
   326  
   327  // setRecommitInterval updates the interval for miner sealing work recommitting.
   328  func (w *worker) setRecommitInterval(interval time.Duration) {
   329  	select {
   330  	case w.resubmitIntervalCh <- interval:
   331  	case <-w.exitCh:
   332  	}
   333  }
   334  
   335  // disablePreseal disables pre-sealing feature
   336  func (w *worker) disablePreseal() {
   337  	atomic.StoreUint32(&w.noempty, 1)
   338  }
   339  
   340  // enablePreseal enables pre-sealing feature
   341  func (w *worker) enablePreseal() {
   342  	atomic.StoreUint32(&w.noempty, 0)
   343  }
   344  
   345  // pending returns the pending state and corresponding block.
   346  func (w *worker) pending() *types.Block {
   347  	// return a snapshot to avoid contention on currentMu mutex
   348  	w.snapshotMu.RLock()
   349  	defer w.snapshotMu.RUnlock()
   350  	return w.snapshotBlock
   351  }
   352  
   353  // pendingBlock returns pending block.
   354  func (w *worker) pendingBlock() *types.Block {
   355  	// return a snapshot to avoid contention on currentMu mutex
   356  	w.snapshotMu.RLock()
   357  	defer w.snapshotMu.RUnlock()
   358  	return w.snapshotBlock
   359  }
   360  
   361  // pendingBlockAndReceipts returns pending block and corresponding receipts.
   362  func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) {
   363  	// return a snapshot to avoid contention on currentMu mutex
   364  	w.snapshotMu.RLock()
   365  	defer w.snapshotMu.RUnlock()
   366  	// snapshot receipts are not stored in the worker anymore, so pending receipts is nil
   367  	return w.snapshotBlock, nil
   368  }
   369  
   370  // start sets the running status as 1 and triggers new work submitting.
   371  func (w *worker) start() {
   372  	atomic.StoreInt32(&w.running, 1)
   373  }
   374  
   375  // stop sets the running status as 0.
   376  func (w *worker) stop() {
   377  	if w.hc.ProcessingState() && common.NodeLocation.Context() == common.ZONE_CTX {
   378  		w.chainHeadSub.Unsubscribe()
   379  	}
   380  	atomic.StoreInt32(&w.running, 0)
   381  }
   382  
   383  // isRunning returns an indicator whether worker is running or not.
   384  func (w *worker) isRunning() bool {
   385  	return atomic.LoadInt32(&w.running) == 1
   386  }
   387  
   388  // close terminates all background threads maintained by the worker.
   389  // Note the worker does not support being closed multiple times.
   390  func (w *worker) close() {
   391  	atomic.StoreInt32(&w.running, 0)
   392  	close(w.exitCh)
   393  	w.scope.Close()
   394  	w.wg.Wait()
   395  }
   396  
   397  func (w *worker) LoadPendingBlockBody() {
   398  	pendingBlockBodykeys := rawdb.ReadPbBodyKeys(w.workerDb)
   399  	for _, key := range pendingBlockBodykeys {
   400  		if key == types.EmptyBodyHash {
   401  			w.pendingBlockBody.Add(key, &types.Body{})
   402  		} else {
   403  			w.pendingBlockBody.Add(key, rawdb.ReadPbCacheBody(w.workerDb, key))
   404  		}
   405  		// Remove the entry from the database so that body is not accumulated over multiple stops
   406  		rawdb.DeletePbCacheBody(w.workerDb, key)
   407  	}
   408  	rawdb.DeleteAllPbBodyKeys(w.workerDb)
   409  }
   410  
   411  // StorePendingBlockBody stores the pending block body cache into the db
   412  func (w *worker) StorePendingBlockBody() {
   413  	// store the pendingBodyCache body
   414  	var pendingBlockBodyKeys []common.Hash
   415  	pendingBlockBody := w.pendingBlockBody
   416  	for _, key := range pendingBlockBody.Keys() {
   417  		if value, exist := pendingBlockBody.Peek(key); exist {
   418  			pendingBlockBodyKeys = append(pendingBlockBodyKeys, key.(common.Hash))
   419  			if key.(common.Hash) != types.EmptyBodyHash {
   420  				rawdb.WritePbCacheBody(w.workerDb, key.(common.Hash), value.(*types.Body))
   421  			}
   422  		}
   423  	}
   424  	rawdb.WritePbBodyKeys(w.workerDb, pendingBlockBodyKeys)
   425  }
   426  
   427  // asyncStateLoop updates the state root for a block and returns the state udpate in a channel
   428  func (w *worker) asyncStateLoop() {
   429  	defer w.wg.Done() // decrement the wait group after the close of the loop
   430  
   431  	for {
   432  		select {
   433  		case head := <-w.chainHeadCh:
   434  
   435  			w.interruptAsyncPhGen()
   436  
   437  			go func() {
   438  				select {
   439  				case <-w.interrupt:
   440  					w.interrupt = make(chan struct{})
   441  					return
   442  				default:
   443  					block := head.Block
   444  					header, err := w.GeneratePendingHeader(block, true)
   445  					if err != nil {
   446  						log.Error("Error generating pending header with state", "err", err)
   447  						return
   448  					}
   449  					// Send the updated pendingHeader in the asyncPhFeed
   450  					w.asyncPhFeed.Send(header)
   451  					return
   452  				}
   453  			}()
   454  		case <-w.exitCh:
   455  			return
   456  		case <-w.chainHeadSub.Err():
   457  			return
   458  		}
   459  	}
   460  }
   461  
   462  // GeneratePendingBlock generates pending block given a commited block.
   463  func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.Header, error) {
   464  	nodeCtx := common.NodeLocation.Context()
   465  
   466  	w.interruptAsyncPhGen()
   467  
   468  	var (
   469  		interrupt *int32
   470  		timestamp int64 // timestamp for each round of sealing.
   471  	)
   472  
   473  	if interrupt != nil {
   474  		atomic.StoreInt32(interrupt, commitInterruptNewHead)
   475  	}
   476  	interrupt = new(int32)
   477  	atomic.StoreInt32(&w.newTxs, 0)
   478  
   479  	start := time.Now()
   480  	// Set the coinbase if the worker is running or it's required
   481  	var coinbase common.Address
   482  	if w.coinbase.Equal(common.ZeroAddr) {
   483  		log.Error("Refusing to mine without etherbase")
   484  		return nil, errors.New("etherbase not found")
   485  	}
   486  	coinbase = w.coinbase // Use the preset address as the fee recipient
   487  
   488  	work, err := w.prepareWork(&generateParams{
   489  		timestamp: uint64(timestamp),
   490  		coinbase:  coinbase,
   491  	}, block)
   492  	if err != nil {
   493  		return nil, err
   494  	}
   495  
   496  	if nodeCtx == common.ZONE_CTX && w.hc.ProcessingState() {
   497  		// Fill pending transactions from the txpool
   498  		w.adjustGasLimit(nil, work, block)
   499  		if fill {
   500  			start := time.Now()
   501  			w.fillTransactions(interrupt, work, block)
   502  			w.fillTransactionsRollingAverage.Add(time.Since(start))
   503  			log.Info("Filled and sorted pending transactions", "count", len(work.txs), "elapsed", common.PrettyDuration(time.Since(start)), "average", common.PrettyDuration(w.fillTransactionsRollingAverage.Average()))
   504  		}
   505  	}
   506  
   507  	// Swap out the old work with the new one, terminating any leftover
   508  	// prefetcher processes in the mean time and starting a new one.
   509  	if w.current != nil {
   510  		w.current.discard()
   511  	}
   512  	w.current = work
   513  
   514  	// Create a local environment copy, avoid the data race with snapshot state.
   515  	newBlock, err := w.FinalizeAssemble(w.hc, work.header, block, work.state, work.txs, work.unclelist(), work.etxs, work.subManifest, work.receipts)
   516  	if err != nil {
   517  		return nil, err
   518  	}
   519  
   520  	work.header = newBlock.Header()
   521  	w.printPendingHeaderInfo(work, newBlock, start)
   522  
   523  	return work.header, nil
   524  }
   525  
   526  // printPendingHeaderInfo logs the pending header information
   527  func (w *worker) printPendingHeaderInfo(work *environment, block *types.Block, start time.Time) {
   528  	work.uncleMu.RLock()
   529  	if w.CurrentInfo(block.Header()) {
   530  		log.Info("Commit new sealing work", "number", block.Number(), "sealhash", block.Header().SealHash(),
   531  			"uncles", len(work.uncles), "txs", work.tcount, "etxs", len(block.ExtTransactions()),
   532  			"gas", block.GasUsed(), "fees", totalFees(block, work.receipts),
   533  			"elapsed", common.PrettyDuration(time.Since(start)))
   534  	} else {
   535  		log.Debug("Commit new sealing work", "number", block.Number(), "sealhash", block.Header().SealHash(),
   536  			"uncles", len(work.uncles), "txs", work.tcount, "etxs", len(block.ExtTransactions()),
   537  			"gas", block.GasUsed(), "fees", totalFees(block, work.receipts),
   538  			"elapsed", common.PrettyDuration(time.Since(start)))
   539  	}
   540  	work.uncleMu.RUnlock()
   541  }
   542  
   543  // interruptAsyncPhGen kills any async ph generation running
   544  func (w *worker) interruptAsyncPhGen() {
   545  	if w.interrupt != nil {
   546  		close(w.interrupt)
   547  		w.interrupt = nil
   548  	}
   549  }
   550  
   551  func (w *worker) eventExitLoop() {
   552  	for {
   553  		select {
   554  		case <-w.exitCh:
   555  			return
   556  		}
   557  	}
   558  }
   559  
   560  // makeEnv creates a new environment for the sealing block.
   561  func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) {
   562  	// Retrieve the parent state to execute on top and start a prefetcher for
   563  	// the miner to speed block sealing up a bit.
   564  	state, err := w.hc.bc.processor.StateAt(parent.Root())
   565  	if err != nil {
   566  		return nil, err
   567  	}
   568  
   569  	etxRLimit := len(parent.Transactions()) / params.ETXRegionMaxFraction
   570  	if etxRLimit < params.ETXRLimitMin {
   571  		etxRLimit = params.ETXRLimitMin
   572  	}
   573  	etxPLimit := len(parent.Transactions()) / params.ETXPrimeMaxFraction
   574  	if etxPLimit < params.ETXPLimitMin {
   575  		etxPLimit = params.ETXPLimitMin
   576  	}
   577  	// Note the passed coinbase may be different with header.Coinbase.
   578  	env := &environment{
   579  		signer:    types.MakeSigner(w.chainConfig, header.Number()),
   580  		state:     state,
   581  		coinbase:  coinbase,
   582  		ancestors: mapset.NewSet(),
   583  		family:    mapset.NewSet(),
   584  		header:    header,
   585  		uncles:    make(map[common.Hash]*types.Header),
   586  		etxRLimit: etxRLimit,
   587  		etxPLimit: etxPLimit,
   588  	}
   589  	// when 08 is processed ancestors contain 07 (quick block)
   590  	for _, ancestor := range w.hc.GetBlocksFromHash(parent.Hash(), 7) {
   591  		for _, uncle := range ancestor.Uncles() {
   592  			env.family.Add(uncle.Hash())
   593  		}
   594  		env.family.Add(ancestor.Hash())
   595  		env.ancestors.Add(ancestor.Hash())
   596  	}
   597  	// Keep track of transactions which return errors so they can be removed
   598  	env.tcount = 0
   599  	return env, nil
   600  }
   601  
   602  // commitUncle adds the given block to uncle block set, returns error if failed to add.
   603  func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
   604  	env.uncleMu.Lock()
   605  	defer env.uncleMu.Unlock()
   606  	hash := uncle.Hash()
   607  	if _, exist := env.uncles[hash]; exist {
   608  		return errors.New("uncle not unique")
   609  	}
   610  	if env.header.ParentHash() == uncle.ParentHash() {
   611  		return errors.New("uncle is sibling")
   612  	}
   613  	if !env.ancestors.Contains(uncle.ParentHash()) {
   614  		return errors.New("uncle's parent unknown")
   615  	}
   616  	if env.family.Contains(hash) {
   617  		return errors.New("uncle already included")
   618  	}
   619  	env.uncles[hash] = uncle
   620  	return nil
   621  }
   622  
   623  func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
   624  	if tx != nil {
   625  		snap := env.state.Snapshot()
   626  		// retrieve the gas used int and pass in the reference to the ApplyTransaction
   627  		gasUsed := env.header.GasUsed()
   628  		receipt, err := ApplyTransaction(w.chainConfig, w.hc, &env.coinbase, env.gasPool, env.state, env.header, tx, &gasUsed, *w.hc.bc.processor.GetVMConfig(), &env.etxRLimit, &env.etxPLimit)
   629  		if err != nil {
   630  			log.Debug("Error playing transaction in worker", "err", err, "tx", tx.Hash().Hex(), "block", env.header.Number, "gasUsed", gasUsed)
   631  			env.state.RevertToSnapshot(snap)
   632  			return nil, err
   633  		}
   634  		// once the gasUsed pointer is updated in the ApplyTransaction it has to be set back to the env.Header.GasUsed
   635  		// This extra step is needed because previously the GasUsed was a public method and direct update of the value
   636  		// was possible.
   637  		env.header.SetGasUsed(gasUsed)
   638  		env.txs = append(env.txs, tx)
   639  		env.receipts = append(env.receipts, receipt)
   640  		if receipt.Status == types.ReceiptStatusSuccessful {
   641  			env.etxs = append(env.etxs, receipt.Etxs...)
   642  		}
   643  		return receipt.Logs, nil
   644  	}
   645  	return nil, errors.New("error finding transaction")
   646  }
   647  
   648  func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool {
   649  	gasLimit := env.header.GasLimit
   650  	if env.gasPool == nil {
   651  		env.gasPool = new(GasPool).AddGas(gasLimit())
   652  	}
   653  	var coalescedLogs []*types.Log
   654  
   655  	for {
   656  		// In the following three cases, we will interrupt the execution of the transaction.
   657  		// (1) new head block event arrival, the interrupt signal is 1
   658  		// (2) worker start or restart, the interrupt signal is 1
   659  		// (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
   660  		// For the first two cases, the semi-finished work will be discarded.
   661  		// For the third case, the semi-finished work will be submitted to the consensus engine.
   662  		if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
   663  			// Notify resubmit loop to increase resubmitting interval due to too frequent commits.
   664  			if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
   665  				ratio := float64(gasLimit()-env.gasPool.Gas()) / float64(gasLimit())
   666  				if ratio < 0.1 {
   667  					ratio = 0.1
   668  				}
   669  				w.resubmitAdjustCh <- &intervalAdjust{
   670  					ratio: ratio,
   671  					inc:   true,
   672  				}
   673  			}
   674  			return atomic.LoadInt32(interrupt) == commitInterruptNewHead
   675  		}
   676  		// If we don't have enough gas for any further transactions then we're done
   677  		if env.gasPool.Gas() < params.TxGas {
   678  			log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
   679  			break
   680  		}
   681  		// Retrieve the next transaction and abort if all done
   682  		tx := txs.Peek()
   683  		if tx == nil {
   684  			break
   685  		}
   686  		// Error may be ignored here. The error has already been checked
   687  		// during transaction acceptance is the transaction pool.
   688  		//
   689  		// We use the signer regardless of the current hf.
   690  		from, _ := types.Sender(env.signer, tx)
   691  		// Start executing the transaction
   692  		env.state.Prepare(tx.Hash(), env.tcount)
   693  
   694  		logs, err := w.commitTransaction(env, tx)
   695  		switch {
   696  		case errors.Is(err, ErrGasLimitReached):
   697  			// Pop the current out-of-gas transaction without shifting in the next from the account
   698  			log.Trace("Gas limit exceeded for current block", "sender", from)
   699  			txs.PopNoSort()
   700  
   701  		case errors.Is(err, ErrEtxLimitReached):
   702  			// Pop the current transaction without shifting in the next from the account
   703  			log.Trace("Etx limit exceeded for current block", "sender", from)
   704  			txs.PopNoSort()
   705  
   706  		case errors.Is(err, ErrNonceTooLow):
   707  			// New head notification data race between the transaction pool and miner, shift
   708  			log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
   709  			txs.Shift(from.Bytes20(), false)
   710  
   711  		case errors.Is(err, ErrNonceTooHigh):
   712  			// Reorg notification data race between the transaction pool and miner, skip account =
   713  			log.Debug("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
   714  			txs.PopNoSort()
   715  
   716  		case errors.Is(err, nil):
   717  			// Everything ok, collect the logs and shift in the next transaction from the same account
   718  			coalescedLogs = append(coalescedLogs, logs...)
   719  			env.tcount++
   720  			txs.PopNoSort()
   721  
   722  		case errors.Is(err, ErrTxTypeNotSupported):
   723  			// Pop the unsupported transaction without shifting in the next from the account
   724  			log.Error("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
   725  			txs.PopNoSort()
   726  
   727  		case strings.Contains(err.Error(), "emits too many cross"): // This is ErrEtxLimitReached with more info
   728  			// Pop the unsupported transaction without shifting in the next from the account
   729  			log.Trace("Etx limit exceeded for current block", "sender", from, "err", err)
   730  			txs.PopNoSort()
   731  
   732  		default:
   733  			// Strange error, discard the transaction and get the next in line (note, the
   734  			// nonce-too-high clause will prevent us from executing in vain).
   735  			log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
   736  			txs.Shift(from.Bytes20(), false)
   737  		}
   738  	}
   739  
   740  	if !w.isRunning() && len(coalescedLogs) > 0 {
   741  		// We don't push the pendingLogsEvent while we are sealing. The reason is that
   742  		// when we are sealing, the worker will regenerate a sealing block every 3 seconds.
   743  		// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
   744  
   745  		// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
   746  		// logs by filling in the block hash when the block was mined by the local miner. This can
   747  		// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
   748  		cpy := make([]*types.Log, len(coalescedLogs))
   749  		for i, l := range coalescedLogs {
   750  			cpy[i] = new(types.Log)
   751  			*cpy[i] = *l
   752  		}
   753  		w.pendingLogsFeed.Send(cpy)
   754  	}
   755  	return false
   756  }
   757  
   758  // generateParams wraps various of settings for generating sealing task.
   759  type generateParams struct {
   760  	timestamp uint64         // The timstamp for sealing task
   761  	forceTime bool           // Flag whether the given timestamp is immutable or not
   762  	coinbase  common.Address // The fee recipient address for including transaction
   763  }
   764  
   765  // prepareWork constructs the sealing task according to the given parameters,
   766  // either based on the last chain head or specified parent. In this function
   767  // the pending transactions are not filled yet, only the empty task returned.
   768  func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*environment, error) {
   769  	w.mu.RLock()
   770  	defer w.mu.RUnlock()
   771  	nodeCtx := common.NodeLocation.Context()
   772  
   773  	// Find the parent block for sealing task
   774  	parent := block
   775  	// Sanity check the timestamp correctness, recap the timestamp
   776  	// to parent+1 if the mutation is allowed.
   777  	timestamp := genParams.timestamp
   778  	if parent.Time() >= timestamp {
   779  		if genParams.forceTime {
   780  			return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp)
   781  		}
   782  		timestamp = parent.Time() + 1
   783  	}
   784  	// Construct the sealing block header, set the extra field if it's allowed
   785  	num := parent.Number()
   786  	header := types.EmptyHeader()
   787  	header.SetParentHash(block.Header().Hash())
   788  	header.SetNumber(big.NewInt(int64(num.Uint64()) + 1))
   789  	header.SetTime(timestamp)
   790  
   791  	// Only calculate entropy if the parent is not the genesis block
   792  	if parent.Hash() != w.hc.config.GenesisHash {
   793  		_, order, err := w.engine.CalcOrder(parent.Header())
   794  		if err != nil {
   795  			return nil, err
   796  		}
   797  		// Set the parent delta S prior to sending to sub
   798  		if nodeCtx != common.PRIME_CTX {
   799  			if order < nodeCtx {
   800  				header.SetParentDeltaS(big.NewInt(0), nodeCtx)
   801  			} else {
   802  				header.SetParentDeltaS(w.engine.DeltaLogS(parent.Header()), nodeCtx)
   803  			}
   804  		}
   805  		header.SetParentEntropy(w.engine.TotalLogS(parent.Header()))
   806  	}
   807  
   808  	// Only zone should calculate state
   809  	if nodeCtx == common.ZONE_CTX && w.hc.ProcessingState() {
   810  		header.SetExtra(w.extra)
   811  		header.SetBaseFee(misc.CalcBaseFee(w.chainConfig, parent.Header()))
   812  		if w.isRunning() {
   813  			if w.coinbase.Equal(common.ZeroAddr) {
   814  				log.Error("Refusing to mine without etherbase")
   815  				return nil, errors.New("refusing to mine without etherbase")
   816  			}
   817  			header.SetCoinbase(w.coinbase)
   818  		}
   819  
   820  		// Run the consensus preparation with the default or customized consensus engine.
   821  		if err := w.engine.Prepare(w.hc, header, block.Header()); err != nil {
   822  			log.Error("Failed to prepare header for sealing", "err", err)
   823  			return nil, err
   824  		}
   825  		env, err := w.makeEnv(parent, header, w.coinbase)
   826  		if err != nil {
   827  			log.Error("Failed to create sealing context", "err", err)
   828  			return nil, err
   829  		}
   830  		// Accumulate the uncles for the sealing work.
   831  		commitUncles := func(blocks map[common.Hash]*types.Block) {
   832  			for hash, uncle := range blocks {
   833  				env.uncleMu.RLock()
   834  				if len(env.uncles) == 2 {
   835  					env.uncleMu.RUnlock()
   836  					break
   837  				}
   838  				env.uncleMu.RUnlock()
   839  				if err := w.commitUncle(env, uncle.Header()); err != nil {
   840  					log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
   841  				} else {
   842  					log.Debug("Committing new uncle to block", "hash", hash)
   843  				}
   844  			}
   845  		}
   846  		w.uncleMu.RLock()
   847  		// Prefer to locally generated uncle
   848  		commitUncles(w.localUncles)
   849  		commitUncles(w.remoteUncles)
   850  		w.uncleMu.RUnlock()
   851  		return env, nil
   852  	} else {
   853  		return &environment{header: header}, nil
   854  	}
   855  
   856  }
   857  
   858  // fillTransactions retrieves the pending transactions from the txpool and fills them
   859  // into the given sealing block. The transaction selection and ordering strategy can
   860  // be customized with the plugin in the future.
   861  func (w *worker) fillTransactions(interrupt *int32, env *environment, block *types.Block) {
   862  	// Split the pending transactions into locals and remotes
   863  	// Fill the block with all available pending transactions.
   864  	etxSet := rawdb.ReadEtxSet(w.hc.bc.db, block.Hash(), block.NumberU64())
   865  	if etxSet == nil {
   866  		return
   867  	}
   868  	etxSet.Update(types.Transactions{}, block.NumberU64()+1) // Prune any expired ETXs
   869  	pending, err := w.txPool.TxPoolPending(true, etxSet)
   870  	if err != nil {
   871  		return
   872  	}
   873  	if len(pending) > 0 {
   874  		txs := types.NewTransactionsByPriceAndNonce(env.signer, pending, env.header.BaseFee(), true)
   875  		if w.commitTransactions(env, txs, interrupt) {
   876  			return
   877  		}
   878  	}
   879  }
   880  
   881  // fillTransactions retrieves the pending transactions from the txpool and fills them
   882  // into the given sealing block. The transaction selection and ordering strategy can
   883  // be customized with the plugin in the future.
   884  func (w *worker) adjustGasLimit(interrupt *int32, env *environment, parent *types.Block) {
   885  	env.header.SetGasLimit(CalcGasLimit(parent.Header(), w.config.GasCeil))
   886  }
   887  
   888  // ComputeManifestHash given a header computes the manifest hash for the header
   889  // and stores it in the database
   890  func (w *worker) ComputeManifestHash(header *types.Header) common.Hash {
   891  	manifest := rawdb.ReadManifest(w.workerDb, header.Hash())
   892  	if manifest == nil {
   893  		nodeCtx := common.NodeLocation.Context()
   894  		// Compute and set manifest hash
   895  		manifest = types.BlockManifest{}
   896  		if nodeCtx == common.PRIME_CTX {
   897  			// Nothing to do for prime chain
   898  			manifest = types.BlockManifest{}
   899  		} else if w.engine.IsDomCoincident(w.hc, header) {
   900  			manifest = types.BlockManifest{header.Hash()}
   901  		} else {
   902  			parentManifest := rawdb.ReadManifest(w.workerDb, header.ParentHash())
   903  			manifest = append(parentManifest, header.Hash())
   904  		}
   905  		// write the manifest into the disk
   906  		rawdb.WriteManifest(w.workerDb, header.Hash(), manifest)
   907  	}
   908  	manifestHash := types.DeriveSha(manifest, trie.NewStackTrie(nil))
   909  
   910  	return manifestHash
   911  }
   912  
   913  func (w *worker) FinalizeAssemble(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Block, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) {
   914  	nodeCtx := common.NodeLocation.Context()
   915  	block, err := w.engine.FinalizeAndAssemble(chain, header, state, txs, uncles, etxs, subManifest, receipts)
   916  	if err != nil {
   917  		return nil, err
   918  	}
   919  
   920  	manifestHash := w.ComputeManifestHash(parent.Header())
   921  
   922  	if w.hc.ProcessingState() {
   923  		block.Header().SetManifestHash(manifestHash)
   924  		if nodeCtx == common.ZONE_CTX {
   925  			// Compute and set etx rollup hash
   926  			var etxRollup types.Transactions
   927  			if w.engine.IsDomCoincident(w.hc, parent.Header()) {
   928  				etxRollup = parent.ExtTransactions()
   929  			} else {
   930  				etxRollup, err = w.hc.CollectEtxRollup(parent)
   931  				if err != nil {
   932  					return nil, err
   933  				}
   934  				etxRollup = append(etxRollup, parent.ExtTransactions()...)
   935  			}
   936  			etxRollupHash := types.DeriveSha(etxRollup, trie.NewStackTrie(nil))
   937  			block.Header().SetEtxRollupHash(etxRollupHash)
   938  		}
   939  
   940  		w.AddPendingBlockBody(block.Header(), block.Body())
   941  	}
   942  
   943  	return block, nil
   944  }
   945  
   946  // commit runs any post-transaction state modifications, assembles the final block
   947  // and commits new work if consensus engine is running.
   948  // Note the assumption is held that the mutation is allowed to the passed env, do
   949  // the deep copy first.
   950  func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error {
   951  	if w.isRunning() {
   952  		if interval != nil {
   953  			interval()
   954  		}
   955  		// Create a local environment copy, avoid the data race with snapshot state.
   956  		env := env.copy(w.hc.ProcessingState())
   957  		parent := w.hc.GetBlock(env.header.ParentHash(), env.header.NumberU64()-1)
   958  		block, err := w.FinalizeAssemble(w.hc, env.header, parent, env.state, env.txs, env.unclelist(), env.etxs, env.subManifest, env.receipts)
   959  		if err != nil {
   960  			return err
   961  		}
   962  		env.header = block.Header()
   963  		select {
   964  		case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}:
   965  			env.uncleMu.RLock()
   966  			log.Info("Commit new sealing work", "number", block.Number(), "sealhash", block.Header().SealHash(),
   967  				"uncles", len(env.uncles), "txs", env.tcount, "etxs", len(block.ExtTransactions()),
   968  				"gas", block.GasUsed(), "fees", totalFees(block, env.receipts),
   969  				"elapsed", common.PrettyDuration(time.Since(start)))
   970  			env.uncleMu.RUnlock()
   971  		case <-w.exitCh:
   972  			log.Info("worker has exited")
   973  		}
   974  
   975  	}
   976  
   977  	return nil
   978  }
   979  
   980  // GetPendingBlockBodyKey takes a header and hashes all the Roots together
   981  // and returns the key to be used for the pendingBlockBodyCache.
   982  func (w *worker) getPendingBlockBodyKey(header *types.Header) common.Hash {
   983  	return types.RlpHash([]interface{}{
   984  		header.UncleHash(),
   985  		header.TxHash(),
   986  		header.EtxHash(),
   987  	})
   988  }
   989  
   990  // AddPendingBlockBody adds an entry in the lru cache for the given pendingBodyKey
   991  // maps it to body.
   992  func (w *worker) AddPendingBlockBody(header *types.Header, body *types.Body) {
   993  	w.pendingBlockBody.ContainsOrAdd(w.getPendingBlockBodyKey(header), body)
   994  }
   995  
   996  // GetPendingBlockBody gets the block body associated with the given header.
   997  func (w *worker) GetPendingBlockBody(header *types.Header) *types.Body {
   998  	key := w.getPendingBlockBodyKey(header)
   999  	body, ok := w.pendingBlockBody.Get(key)
  1000  	if ok {
  1001  		return body.(*types.Body)
  1002  	}
  1003  	log.Warn("pending block body not found for header: ", key)
  1004  	return nil
  1005  }
  1006  
  1007  func (w *worker) SubscribeAsyncPendingHeader(ch chan *types.Header) event.Subscription {
  1008  	return w.scope.Track(w.asyncPhFeed.Subscribe(ch))
  1009  }
  1010  
  1011  // copyReceipts makes a deep copy of the given receipts.
  1012  func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
  1013  	result := make([]*types.Receipt, len(receipts))
  1014  	for i, l := range receipts {
  1015  		cpy := *l
  1016  		result[i] = &cpy
  1017  	}
  1018  	return result
  1019  }
  1020  
  1021  // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order.
  1022  func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float {
  1023  	feesWei := new(big.Int)
  1024  	for i, tx := range block.Transactions() {
  1025  		minerFee, _ := tx.EffectiveGasTip(block.BaseFee())
  1026  		feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee))
  1027  	}
  1028  	return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether)))
  1029  }
  1030  
  1031  func (w *worker) CurrentInfo(header *types.Header) bool {
  1032  	if w.headerPrints.Contains(header.Hash()) {
  1033  		return false
  1034  	}
  1035  
  1036  	w.headerPrints.Add(header.Hash(), nil)
  1037  	return header.NumberU64()+c_startingPrintLimit > w.hc.CurrentHeader().NumberU64()
  1038  }