github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/miner/worker.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package miner
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	mapset "github.com/deckarep/golang-set"
    29  
    30  	"github.com/scroll-tech/go-ethereum/common"
    31  	"github.com/scroll-tech/go-ethereum/consensus"
    32  	"github.com/scroll-tech/go-ethereum/consensus/misc"
    33  	"github.com/scroll-tech/go-ethereum/core"
    34  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    35  	"github.com/scroll-tech/go-ethereum/core/state"
    36  	"github.com/scroll-tech/go-ethereum/core/types"
    37  	"github.com/scroll-tech/go-ethereum/event"
    38  	"github.com/scroll-tech/go-ethereum/log"
    39  	"github.com/scroll-tech/go-ethereum/metrics"
    40  	"github.com/scroll-tech/go-ethereum/params"
    41  	"github.com/scroll-tech/go-ethereum/rollup/circuitcapacitychecker"
    42  	"github.com/scroll-tech/go-ethereum/trie"
    43  )
    44  
    45  const (
    46  	// resultQueueSize is the size of channel listening to sealing result.
    47  	resultQueueSize = 10
    48  
    49  	// txChanSize is the size of channel listening to NewTxsEvent.
    50  	// The number is referenced from the size of tx pool.
    51  	txChanSize = 4096
    52  
    53  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    54  	chainHeadChanSize = 10
    55  
    56  	// chainSideChanSize is the size of channel listening to ChainSideEvent.
    57  	chainSideChanSize = 10
    58  
    59  	// resubmitAdjustChanSize is the size of resubmitting interval adjustment channel.
    60  	resubmitAdjustChanSize = 10
    61  
    62  	// miningLogAtDepth is the number of confirmations before logging successful mining.
    63  	miningLogAtDepth = 7
    64  
    65  	// minRecommitInterval is the minimal time interval to recreate the mining block with
    66  	// any newly arrived transactions.
    67  	minRecommitInterval = 1 * time.Second
    68  
    69  	// maxRecommitInterval is the maximum time interval to recreate the mining block with
    70  	// any newly arrived transactions.
    71  	maxRecommitInterval = 15 * time.Second
    72  
    73  	// intervalAdjustRatio is the impact a single interval adjustment has on sealing work
    74  	// resubmitting interval.
    75  	intervalAdjustRatio = 0.1
    76  
    77  	// intervalAdjustBias is applied during the new resubmit interval calculation in favor of
    78  	// increasing upper limit or decreasing lower limit so that the limit can be reachable.
    79  	intervalAdjustBias = 200 * 1000.0 * 1000.0
    80  
    81  	// staleThreshold is the maximum depth of the acceptable stale block.
    82  	staleThreshold = 7
    83  )
    84  
    85  var (
    86  	// Metrics for the skipped txs
    87  	l1TxGasLimitExceededCounter       = metrics.NewRegisteredCounter("miner/skipped_txs/l1/gas_limit_exceeded", nil)
    88  	l1TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/row_consumption_overflow", nil)
    89  	l2TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/row_consumption_overflow", nil)
    90  	l1TxCccUnknownErrCounter          = metrics.NewRegisteredCounter("miner/skipped_txs/l1/ccc_unknown_err", nil)
    91  	l2TxCccUnknownErrCounter          = metrics.NewRegisteredCounter("miner/skipped_txs/l2/ccc_unknown_err", nil)
    92  	l1TxStrangeErrCounter             = metrics.NewRegisteredCounter("miner/skipped_txs/l1/strange_err", nil)
    93  )
    94  
    95  // environment is the worker's current environment and holds all of the current state information.
    96  type environment struct {
    97  	signer types.Signer
    98  
    99  	state     *state.StateDB     // apply state changes here
   100  	ancestors mapset.Set         // ancestor set (used for checking uncle parent validity)
   101  	family    mapset.Set         // family set (used for checking uncle invalidity)
   102  	uncles    mapset.Set         // uncle set
   103  	tcount    int                // tx count in cycle
   104  	blockSize common.StorageSize // approximate size of tx payload in bytes
   105  	l1TxCount int                // l1 msg count in cycle
   106  	gasPool   *core.GasPool      // available gas used to pack transactions
   107  
   108  	header   *types.Header
   109  	txs      []*types.Transaction
   110  	receipts []*types.Receipt
   111  
   112  	// circuit capacity check related fields
   113  	traceEnv       *core.TraceEnv        // env for tracing
   114  	accRows        *types.RowConsumption // accumulated row consumption for a block
   115  	nextL1MsgIndex uint64                // next L1 queue index to be processed
   116  }
   117  
   118  // task contains all information for consensus engine sealing and result submitting.
   119  type task struct {
   120  	receipts       []*types.Receipt
   121  	state          *state.StateDB
   122  	block          *types.Block
   123  	createdAt      time.Time
   124  	accRows        *types.RowConsumption // accumulated row consumption in the circuit side
   125  	nextL1MsgIndex uint64                // next L1 queue index to be processed
   126  }
   127  
   128  const (
   129  	commitInterruptNone int32 = iota
   130  	commitInterruptNewHead
   131  	commitInterruptResubmit
   132  )
   133  
   134  // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier.
   135  type newWorkReq struct {
   136  	interrupt *int32
   137  	noempty   bool
   138  	timestamp int64
   139  }
   140  
   141  // intervalAdjust represents a resubmitting interval adjustment.
   142  type intervalAdjust struct {
   143  	ratio float64
   144  	inc   bool
   145  }
   146  
   147  // worker is the main object which takes care of submitting new work to consensus engine
   148  // and gathering the sealing result.
   149  type worker struct {
   150  	config      *Config
   151  	chainConfig *params.ChainConfig
   152  	engine      consensus.Engine
   153  	eth         Backend
   154  	chain       *core.BlockChain
   155  
   156  	// Feeds
   157  	pendingLogsFeed event.Feed
   158  
   159  	// Subscriptions
   160  	mux          *event.TypeMux
   161  	txsCh        chan core.NewTxsEvent
   162  	txsSub       event.Subscription
   163  	chainHeadCh  chan core.ChainHeadEvent
   164  	chainHeadSub event.Subscription
   165  	chainSideCh  chan core.ChainSideEvent
   166  	chainSideSub event.Subscription
   167  	l1MsgsCh     chan core.NewL1MsgsEvent
   168  	l1MsgsSub    event.Subscription
   169  
   170  	// Channels
   171  	newWorkCh          chan *newWorkReq
   172  	taskCh             chan *task
   173  	resultCh           chan *types.Block
   174  	startCh            chan struct{}
   175  	exitCh             chan struct{}
   176  	resubmitIntervalCh chan time.Duration
   177  	resubmitAdjustCh   chan *intervalAdjust
   178  
   179  	wg sync.WaitGroup
   180  
   181  	current      *environment                 // An environment for current running cycle.
   182  	localUncles  map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks.
   183  	remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
   184  	unconfirmed  *unconfirmedBlocks           // A set of locally mined blocks pending canonicalness confirmations.
   185  
   186  	mu       sync.RWMutex // The lock used to protect the coinbase and extra fields
   187  	coinbase common.Address
   188  	extra    []byte
   189  
   190  	pendingMu    sync.RWMutex
   191  	pendingTasks map[common.Hash]*task
   192  
   193  	snapshotMu       sync.RWMutex // The lock used to protect the snapshots below
   194  	snapshotBlock    *types.Block
   195  	snapshotReceipts types.Receipts
   196  	snapshotState    *state.StateDB
   197  
   198  	// atomic status counters
   199  	running   int32 // The indicator whether the consensus engine is running or not.
   200  	newTxs    int32 // New arrival transaction count since last sealing work submitting.
   201  	newL1Msgs int32 // New arrival L1 message count since last sealing work submitting.
   202  
   203  	// noempty is the flag used to control whether the feature of pre-seal empty
   204  	// block is enabled. The default value is false(pre-seal is enabled by default).
   205  	// But in some special scenario the consensus engine will seal blocks instantaneously,
   206  	// in this case this feature will add all empty blocks into canonical chain
   207  	// non-stop and no real transaction will be included.
   208  	noempty uint32
   209  
   210  	// External functions
   211  	isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner.
   212  
   213  	circuitCapacityChecker *circuitcapacitychecker.CircuitCapacityChecker
   214  
   215  	// Test hooks
   216  	newTaskHook  func(*task)                        // Method to call upon receiving a new sealing task.
   217  	skipSealHook func(*task) bool                   // Method to decide whether skipping the sealing.
   218  	fullTaskHook func()                             // Method to call before pushing the full sealing task.
   219  	resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
   220  }
   221  
   222  func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker {
   223  	worker := &worker{
   224  		config:                 config,
   225  		chainConfig:            chainConfig,
   226  		engine:                 engine,
   227  		eth:                    eth,
   228  		mux:                    mux,
   229  		chain:                  eth.BlockChain(),
   230  		isLocalBlock:           isLocalBlock,
   231  		localUncles:            make(map[common.Hash]*types.Block),
   232  		remoteUncles:           make(map[common.Hash]*types.Block),
   233  		unconfirmed:            newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
   234  		pendingTasks:           make(map[common.Hash]*task),
   235  		txsCh:                  make(chan core.NewTxsEvent, txChanSize),
   236  		l1MsgsCh:               make(chan core.NewL1MsgsEvent, txChanSize),
   237  		chainHeadCh:            make(chan core.ChainHeadEvent, chainHeadChanSize),
   238  		chainSideCh:            make(chan core.ChainSideEvent, chainSideChanSize),
   239  		newWorkCh:              make(chan *newWorkReq),
   240  		taskCh:                 make(chan *task),
   241  		resultCh:               make(chan *types.Block, resultQueueSize),
   242  		exitCh:                 make(chan struct{}),
   243  		startCh:                make(chan struct{}, 1),
   244  		resubmitIntervalCh:     make(chan time.Duration),
   245  		resubmitAdjustCh:       make(chan *intervalAdjust, resubmitAdjustChanSize),
   246  		circuitCapacityChecker: circuitcapacitychecker.NewCircuitCapacityChecker(true),
   247  	}
   248  	log.Info("created new worker", "CircuitCapacityChecker ID", worker.circuitCapacityChecker.ID)
   249  
   250  	// Subscribe NewTxsEvent for tx pool
   251  	worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh)
   252  
   253  	// Subscribe NewL1MsgsEvent for sync service
   254  	if s := eth.SyncService(); s != nil {
   255  		worker.l1MsgsSub = s.SubscribeNewL1MsgsEvent(worker.l1MsgsCh)
   256  	} else {
   257  		// create an empty subscription so that the tests won't fail
   258  		worker.l1MsgsSub = event.NewSubscription(func(quit <-chan struct{}) error {
   259  			<-quit
   260  			return nil
   261  		})
   262  	}
   263  
   264  	// Subscribe events for blockchain
   265  	worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
   266  	worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
   267  
   268  	// Sanitize recommit interval if the user-specified one is too short.
   269  	recommit := worker.config.Recommit
   270  	if recommit < minRecommitInterval {
   271  		log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval)
   272  		recommit = minRecommitInterval
   273  	}
   274  
   275  	worker.wg.Add(4)
   276  	go worker.mainLoop()
   277  	go worker.newWorkLoop(recommit)
   278  	go worker.resultLoop()
   279  	go worker.taskLoop()
   280  
   281  	// Submit first work to initialize pending state.
   282  	if init {
   283  		worker.startCh <- struct{}{}
   284  	}
   285  	return worker
   286  }
   287  
   288  // getCCC returns a pointer to this worker's CCC instance.
   289  // Only used in tests.
   290  func (w *worker) getCCC() *circuitcapacitychecker.CircuitCapacityChecker {
   291  	return w.circuitCapacityChecker
   292  }
   293  
   294  // setEtherbase sets the etherbase used to initialize the block coinbase field.
   295  func (w *worker) setEtherbase(addr common.Address) {
   296  	w.mu.Lock()
   297  	defer w.mu.Unlock()
   298  	w.coinbase = addr
   299  }
   300  
   301  func (w *worker) setGasCeil(ceil uint64) {
   302  	w.mu.Lock()
   303  	defer w.mu.Unlock()
   304  	w.config.GasCeil = ceil
   305  }
   306  
   307  // setExtra sets the content used to initialize the block extra field.
   308  func (w *worker) setExtra(extra []byte) {
   309  	w.mu.Lock()
   310  	defer w.mu.Unlock()
   311  	w.extra = extra
   312  }
   313  
   314  // setRecommitInterval updates the interval for miner sealing work recommitting.
   315  func (w *worker) setRecommitInterval(interval time.Duration) {
   316  	w.resubmitIntervalCh <- interval
   317  }
   318  
   319  // disablePreseal disables pre-sealing mining feature
   320  func (w *worker) disablePreseal() {
   321  	atomic.StoreUint32(&w.noempty, 1)
   322  }
   323  
   324  // enablePreseal enables pre-sealing mining feature
   325  func (w *worker) enablePreseal() {
   326  	atomic.StoreUint32(&w.noempty, 0)
   327  }
   328  
   329  // pending returns the pending state and corresponding block.
   330  func (w *worker) pending() (*types.Block, *state.StateDB) {
   331  	// return a snapshot to avoid contention on currentMu mutex
   332  	w.snapshotMu.RLock()
   333  	defer w.snapshotMu.RUnlock()
   334  	if w.snapshotState == nil {
   335  		return nil, nil
   336  	}
   337  	return w.snapshotBlock, w.snapshotState.Copy()
   338  }
   339  
   340  // pendingBlock returns pending block.
   341  func (w *worker) pendingBlock() *types.Block {
   342  	// return a snapshot to avoid contention on currentMu mutex
   343  	w.snapshotMu.RLock()
   344  	defer w.snapshotMu.RUnlock()
   345  	return w.snapshotBlock
   346  }
   347  
   348  // pendingBlockAndReceipts returns pending block and corresponding receipts.
   349  func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) {
   350  	// return a snapshot to avoid contention on currentMu mutex
   351  	w.snapshotMu.RLock()
   352  	defer w.snapshotMu.RUnlock()
   353  	return w.snapshotBlock, w.snapshotReceipts
   354  }
   355  
   356  // start sets the running status as 1 and triggers new work submitting.
   357  func (w *worker) start() {
   358  	atomic.StoreInt32(&w.running, 1)
   359  	w.startCh <- struct{}{}
   360  }
   361  
   362  // stop sets the running status as 0.
   363  func (w *worker) stop() {
   364  	atomic.StoreInt32(&w.running, 0)
   365  }
   366  
   367  // isRunning returns an indicator whether worker is running or not.
   368  func (w *worker) isRunning() bool {
   369  	return atomic.LoadInt32(&w.running) == 1
   370  }
   371  
   372  // close terminates all background threads maintained by the worker.
   373  // Note the worker does not support being closed multiple times.
   374  func (w *worker) close() {
   375  	atomic.StoreInt32(&w.running, 0)
   376  	close(w.exitCh)
   377  	w.wg.Wait()
   378  }
   379  
   380  // recalcRecommit recalculates the resubmitting interval upon feedback.
   381  func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration {
   382  	var (
   383  		prevF = float64(prev.Nanoseconds())
   384  		next  float64
   385  	)
   386  	if inc {
   387  		next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias)
   388  		max := float64(maxRecommitInterval.Nanoseconds())
   389  		if next > max {
   390  			next = max
   391  		}
   392  	} else {
   393  		next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias)
   394  		min := float64(minRecommit.Nanoseconds())
   395  		if next < min {
   396  			next = min
   397  		}
   398  	}
   399  	return time.Duration(int64(next))
   400  }
   401  
   402  // newWorkLoop is a standalone goroutine to submit new mining work upon received events.
   403  func (w *worker) newWorkLoop(recommit time.Duration) {
   404  	defer w.wg.Done()
   405  	var (
   406  		interrupt   *int32
   407  		minRecommit = recommit // minimal resubmit interval specified by user.
   408  		timestamp   int64      // timestamp for each round of mining.
   409  	)
   410  
   411  	timer := time.NewTimer(0)
   412  	defer timer.Stop()
   413  	<-timer.C // discard the initial tick
   414  
   415  	// commit aborts in-flight transaction execution with given signal and resubmits a new one.
   416  	commit := func(noempty bool, s int32) {
   417  		if interrupt != nil {
   418  			atomic.StoreInt32(interrupt, s)
   419  		}
   420  		interrupt = new(int32)
   421  		select {
   422  		case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}:
   423  		case <-w.exitCh:
   424  			return
   425  		}
   426  		timer.Reset(recommit)
   427  		atomic.StoreInt32(&w.newTxs, 0)
   428  		atomic.StoreInt32(&w.newL1Msgs, 0)
   429  	}
   430  	// clearPending cleans the stale pending tasks.
   431  	clearPending := func(number uint64) {
   432  		w.pendingMu.Lock()
   433  		for h, t := range w.pendingTasks {
   434  			if t.block.NumberU64()+staleThreshold <= number {
   435  				delete(w.pendingTasks, h)
   436  			}
   437  		}
   438  		w.pendingMu.Unlock()
   439  	}
   440  
   441  	for {
   442  		select {
   443  		case <-w.startCh:
   444  			clearPending(w.chain.CurrentBlock().NumberU64())
   445  			timestamp = time.Now().Unix()
   446  			commit(false, commitInterruptNewHead)
   447  
   448  		case head := <-w.chainHeadCh:
   449  			clearPending(head.Block.NumberU64())
   450  			timestamp = time.Now().Unix()
   451  			commit(true, commitInterruptNewHead)
   452  
   453  		case <-timer.C:
   454  			// If mining is running resubmit a new work cycle periodically to pull in
   455  			// higher priced transactions. Disable this overhead for pending blocks.
   456  			if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) {
   457  				// Short circuit if no new transaction arrives.
   458  				if atomic.LoadInt32(&w.newTxs) == 0 && atomic.LoadInt32(&w.newL1Msgs) == 0 {
   459  					timer.Reset(recommit)
   460  					continue
   461  				}
   462  				commit(true, commitInterruptResubmit)
   463  			}
   464  
   465  		case interval := <-w.resubmitIntervalCh:
   466  			// Adjust resubmit interval explicitly by user.
   467  			if interval < minRecommitInterval {
   468  				log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval)
   469  				interval = minRecommitInterval
   470  			}
   471  			log.Info("Miner recommit interval update", "from", minRecommit, "to", interval)
   472  			minRecommit, recommit = interval, interval
   473  
   474  			if w.resubmitHook != nil {
   475  				w.resubmitHook(minRecommit, recommit)
   476  			}
   477  
   478  		case adjust := <-w.resubmitAdjustCh:
   479  			// Adjust resubmit interval by feedback.
   480  			if adjust.inc {
   481  				before := recommit
   482  				target := float64(recommit.Nanoseconds()) / adjust.ratio
   483  				recommit = recalcRecommit(minRecommit, recommit, target, true)
   484  				log.Trace("Increase miner recommit interval", "from", before, "to", recommit)
   485  			} else {
   486  				before := recommit
   487  				recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false)
   488  				log.Trace("Decrease miner recommit interval", "from", before, "to", recommit)
   489  			}
   490  
   491  			if w.resubmitHook != nil {
   492  				w.resubmitHook(minRecommit, recommit)
   493  			}
   494  
   495  		case <-w.exitCh:
   496  			return
   497  		}
   498  	}
   499  }
   500  
   501  // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event.
   502  func (w *worker) mainLoop() {
   503  	defer w.wg.Done()
   504  	defer w.txsSub.Unsubscribe()
   505  	defer w.l1MsgsSub.Unsubscribe()
   506  	defer w.chainHeadSub.Unsubscribe()
   507  	defer w.chainSideSub.Unsubscribe()
   508  	defer func() {
   509  		if w.current != nil && w.current.state != nil {
   510  			w.current.state.StopPrefetcher()
   511  		}
   512  	}()
   513  
   514  	for {
   515  		select {
   516  		case req := <-w.newWorkCh:
   517  			w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
   518  			// new block created.
   519  
   520  		case ev := <-w.chainSideCh:
   521  			// Short circuit for duplicate side blocks
   522  			if _, exist := w.localUncles[ev.Block.Hash()]; exist {
   523  				continue
   524  			}
   525  			if _, exist := w.remoteUncles[ev.Block.Hash()]; exist {
   526  				continue
   527  			}
   528  			// Add side block to possible uncle block set depending on the author.
   529  			if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) {
   530  				w.localUncles[ev.Block.Hash()] = ev.Block
   531  			} else {
   532  				w.remoteUncles[ev.Block.Hash()] = ev.Block
   533  			}
   534  			// If our mining block contains less than 2 uncle blocks,
   535  			// add the new uncle block if valid and regenerate a mining block.
   536  			if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
   537  				start := time.Now()
   538  				if err := w.commitUncle(w.current, ev.Block.Header()); err == nil {
   539  					var uncles []*types.Header
   540  					w.current.uncles.Each(func(item interface{}) bool {
   541  						hash, ok := item.(common.Hash)
   542  						if !ok {
   543  							return false
   544  						}
   545  						uncle, exist := w.localUncles[hash]
   546  						if !exist {
   547  							uncle, exist = w.remoteUncles[hash]
   548  						}
   549  						if !exist {
   550  							return false
   551  						}
   552  						uncles = append(uncles, uncle.Header())
   553  						return false
   554  					})
   555  					w.commit(uncles, nil, true, start)
   556  				}
   557  			}
   558  
   559  		case ev := <-w.txsCh:
   560  			// Apply transactions to the pending state if we're not mining.
   561  			//
   562  			// Note all transactions received may not be continuous with transactions
   563  			// already included in the current mining block. These transactions will
   564  			// be automatically eliminated.
   565  			if !w.isRunning() && w.current != nil {
   566  				// If block is already full, abort
   567  				if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
   568  					continue
   569  				}
   570  				w.mu.RLock()
   571  				coinbase := w.coinbase
   572  				w.mu.RUnlock()
   573  
   574  				txs := make(map[common.Address]types.Transactions)
   575  				for _, tx := range ev.Txs {
   576  					acc, _ := types.Sender(w.current.signer, tx)
   577  					txs[acc] = append(txs[acc], tx)
   578  				}
   579  				txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
   580  				tcount := w.current.tcount
   581  				w.commitTransactions(txset, coinbase, nil)
   582  				// Only update the snapshot if any new transactons were added
   583  				// to the pending block
   584  				if tcount != w.current.tcount {
   585  					w.updateSnapshot()
   586  				}
   587  			} else {
   588  				// Special case, if the consensus engine is 0 period clique(dev mode),
   589  				// submit mining work here since all empty submission will be rejected
   590  				// by clique. Of course the advance sealing(empty submission) is disabled.
   591  				if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
   592  					w.commitNewWork(nil, true, time.Now().Unix())
   593  				}
   594  			}
   595  			atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
   596  
   597  		case ev := <-w.l1MsgsCh:
   598  			atomic.AddInt32(&w.newL1Msgs, int32(ev.Count))
   599  
   600  		// System stopped
   601  		case <-w.exitCh:
   602  			return
   603  		case <-w.txsSub.Err():
   604  			return
   605  		case <-w.l1MsgsSub.Err():
   606  			return
   607  		case <-w.chainHeadSub.Err():
   608  			return
   609  		case <-w.chainSideSub.Err():
   610  			return
   611  		}
   612  	}
   613  }
   614  
   615  // taskLoop is a standalone goroutine to fetch sealing task from the generator and
   616  // push them to consensus engine.
   617  func (w *worker) taskLoop() {
   618  	defer w.wg.Done()
   619  	var (
   620  		stopCh chan struct{}
   621  		prev   common.Hash
   622  	)
   623  
   624  	// interrupt aborts the in-flight sealing task.
   625  	interrupt := func() {
   626  		if stopCh != nil {
   627  			close(stopCh)
   628  			stopCh = nil
   629  		}
   630  	}
   631  	for {
   632  		select {
   633  		case task := <-w.taskCh:
   634  			if w.newTaskHook != nil {
   635  				w.newTaskHook(task)
   636  			}
   637  			// Reject duplicate sealing work due to resubmitting.
   638  			sealHash := w.engine.SealHash(task.block.Header())
   639  			if sealHash == prev {
   640  				continue
   641  			}
   642  			// Interrupt previous sealing operation
   643  			interrupt()
   644  			stopCh, prev = make(chan struct{}), sealHash
   645  
   646  			if w.skipSealHook != nil && w.skipSealHook(task) {
   647  				continue
   648  			}
   649  			w.pendingMu.Lock()
   650  			w.pendingTasks[sealHash] = task
   651  			w.pendingMu.Unlock()
   652  
   653  			if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil {
   654  				log.Warn("Block sealing failed", "err", err)
   655  				w.pendingMu.Lock()
   656  				delete(w.pendingTasks, sealHash)
   657  				w.pendingMu.Unlock()
   658  			}
   659  		case <-w.exitCh:
   660  			interrupt()
   661  			return
   662  		}
   663  	}
   664  }
   665  
   666  // resultLoop is a standalone goroutine to handle sealing result submitting
   667  // and flush relative data to the database.
   668  func (w *worker) resultLoop() {
   669  	defer w.wg.Done()
   670  	for {
   671  		select {
   672  		case block := <-w.resultCh:
   673  			// Short circuit when receiving empty result.
   674  			if block == nil {
   675  				continue
   676  			}
   677  			// Short circuit when receiving duplicate result caused by resubmitting.
   678  			if w.chain.HasBlock(block.Hash(), block.NumberU64()) {
   679  				continue
   680  			}
   681  			var (
   682  				sealhash = w.engine.SealHash(block.Header())
   683  				hash     = block.Hash()
   684  			)
   685  
   686  			w.pendingMu.RLock()
   687  			task, exist := w.pendingTasks[sealhash]
   688  			w.pendingMu.RUnlock()
   689  			if !exist {
   690  				log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash)
   691  				continue
   692  			}
   693  			// Different block could share same sealhash, deep copy here to prevent write-write conflict.
   694  			var (
   695  				receipts = make([]*types.Receipt, len(task.receipts))
   696  				logs     []*types.Log
   697  			)
   698  			for i, taskReceipt := range task.receipts {
   699  				receipt := new(types.Receipt)
   700  				receipts[i] = receipt
   701  				*receipt = *taskReceipt
   702  
   703  				// add block location fields
   704  				receipt.BlockHash = hash
   705  				receipt.BlockNumber = block.Number()
   706  				receipt.TransactionIndex = uint(i)
   707  
   708  				// Update the block hash in all logs since it is now available and not when the
   709  				// receipt/log of individual transactions were created.
   710  				receipt.Logs = make([]*types.Log, len(taskReceipt.Logs))
   711  				for i, taskLog := range taskReceipt.Logs {
   712  					log := new(types.Log)
   713  					receipt.Logs[i] = log
   714  					*log = *taskLog
   715  					log.BlockHash = hash
   716  				}
   717  				logs = append(logs, receipt.Logs...)
   718  			}
   719  			// It's possible that we've stored L1 queue index for this block previously,
   720  			// in this case do not overwrite it.
   721  			if index := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), hash); index == nil {
   722  				// Store first L1 queue index not processed by this block.
   723  				// Note: This accounts for both included and skipped messages. This
   724  				// way, if a block only skips messages, we won't reprocess the same
   725  				// messages from the next block.
   726  				log.Trace(
   727  					"Worker WriteFirstQueueIndexNotInL2Block",
   728  					"number", block.Number(),
   729  					"hash", hash.String(),
   730  					"task.nextL1MsgIndex", task.nextL1MsgIndex,
   731  				)
   732  				rawdb.WriteFirstQueueIndexNotInL2Block(w.eth.ChainDb(), hash, task.nextL1MsgIndex)
   733  			} else {
   734  				log.Trace(
   735  					"Worker WriteFirstQueueIndexNotInL2Block: not overwriting existing index",
   736  					"number", block.Number(),
   737  					"hash", hash.String(),
   738  					"index", *index,
   739  					"task.nextL1MsgIndex", task.nextL1MsgIndex,
   740  				)
   741  			}
   742  			// Store circuit row consumption.
   743  			log.Trace(
   744  				"Worker write block row consumption",
   745  				"id", w.circuitCapacityChecker.ID,
   746  				"number", block.Number(),
   747  				"hash", hash.String(),
   748  				"accRows", task.accRows,
   749  			)
   750  			rawdb.WriteBlockRowConsumption(w.eth.ChainDb(), hash, task.accRows)
   751  			// Commit block and state to database.
   752  			_, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true)
   753  			if err != nil {
   754  				log.Error("Failed writing block to chain", "err", err)
   755  				continue
   756  			}
   757  			log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
   758  				"elapsed", common.PrettyDuration(time.Since(task.createdAt)))
   759  
   760  			// Broadcast the block and announce chain insertion event
   761  			w.mux.Post(core.NewMinedBlockEvent{Block: block})
   762  
   763  			// Insert the block into the set of pending ones to resultLoop for confirmations
   764  			w.unconfirmed.Insert(block.NumberU64(), block.Hash())
   765  
   766  		case <-w.exitCh:
   767  			return
   768  		}
   769  	}
   770  }
   771  
   772  // makeCurrent creates a new environment for the current cycle.
   773  func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
   774  	// Retrieve the parent state to execute on top and start a prefetcher for
   775  	// the miner to speed block sealing up a bit
   776  	state, err := w.chain.StateAt(parent.Root())
   777  	if err != nil {
   778  		return err
   779  	}
   780  
   781  	// don't commit the state during tracing for circuit capacity checker, otherwise we cannot revert.
   782  	// and even if we don't commit the state, the `refund` value will still be correct, as explained in `CommitTransaction`
   783  	commitStateAfterApply := false
   784  	traceEnv, err := core.CreateTraceEnv(w.chainConfig, w.chain, w.engine, w.eth.ChainDb(), state, parent,
   785  		// new block with a placeholder tx, for traceEnv's ExecutionResults length & TxStorageTraces length
   786  		types.NewBlockWithHeader(header).WithBody([]*types.Transaction{types.NewTx(&types.LegacyTx{})}, nil),
   787  		commitStateAfterApply)
   788  	if err != nil {
   789  		return err
   790  	}
   791  
   792  	state.StartPrefetcher("miner")
   793  
   794  	env := &environment{
   795  		signer:    types.MakeSigner(w.chainConfig, header.Number),
   796  		state:     state,
   797  		ancestors: mapset.NewSet(),
   798  		family:    mapset.NewSet(),
   799  		uncles:    mapset.NewSet(),
   800  		header:    header,
   801  		traceEnv:  traceEnv,
   802  		accRows:   nil,
   803  	}
   804  	// when 08 is processed ancestors contain 07 (quick block)
   805  	for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) {
   806  		for _, uncle := range ancestor.Uncles() {
   807  			env.family.Add(uncle.Hash())
   808  		}
   809  		env.family.Add(ancestor.Hash())
   810  		env.ancestors.Add(ancestor.Hash())
   811  	}
   812  	// Keep track of transactions which return errors so they can be removed
   813  	env.tcount = 0
   814  	env.blockSize = 0
   815  	env.l1TxCount = 0
   816  
   817  	// find next L1 message queue index
   818  	nextQueueIndex := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), parent.Hash())
   819  	if nextQueueIndex == nil {
   820  		// the parent must have been processed before we start a new mining job.
   821  		log.Crit("Failed to read last L1 message in L2 block", "parent.Hash()", parent.Hash().String())
   822  	}
   823  	env.nextL1MsgIndex = *nextQueueIndex
   824  
   825  	// Swap out the old work with the new one, terminating any leftover prefetcher
   826  	// processes in the mean time and starting a new one.
   827  	if w.current != nil && w.current.state != nil {
   828  		w.current.state.StopPrefetcher()
   829  	}
   830  	w.current = env
   831  	return nil
   832  }
   833  
   834  // commitUncle adds the given block to uncle block set, returns error if failed to add.
   835  func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
   836  	hash := uncle.Hash()
   837  	if env.uncles.Contains(hash) {
   838  		return errors.New("uncle not unique")
   839  	}
   840  	if env.header.ParentHash == uncle.ParentHash {
   841  		return errors.New("uncle is sibling")
   842  	}
   843  	if !env.ancestors.Contains(uncle.ParentHash) {
   844  		return errors.New("uncle's parent unknown")
   845  	}
   846  	if env.family.Contains(hash) {
   847  		return errors.New("uncle already included")
   848  	}
   849  	env.uncles.Add(uncle.Hash())
   850  	return nil
   851  }
   852  
   853  // updateSnapshot updates pending snapshot block and state.
   854  // Note this function assumes the current variable is thread safe.
   855  func (w *worker) updateSnapshot() {
   856  	w.snapshotMu.Lock()
   857  	defer w.snapshotMu.Unlock()
   858  
   859  	var uncles []*types.Header
   860  	w.current.uncles.Each(func(item interface{}) bool {
   861  		hash, ok := item.(common.Hash)
   862  		if !ok {
   863  			return false
   864  		}
   865  		uncle, exist := w.localUncles[hash]
   866  		if !exist {
   867  			uncle, exist = w.remoteUncles[hash]
   868  		}
   869  		if !exist {
   870  			return false
   871  		}
   872  		uncles = append(uncles, uncle.Header())
   873  		return false
   874  	})
   875  
   876  	w.snapshotBlock = types.NewBlock(
   877  		w.current.header,
   878  		w.current.txs,
   879  		uncles,
   880  		w.current.receipts,
   881  		trie.NewStackTrie(nil),
   882  	)
   883  	w.snapshotReceipts = copyReceipts(w.current.receipts)
   884  	w.snapshotState = w.current.state.Copy()
   885  }
   886  
   887  func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, *types.BlockTrace, error) {
   888  	var accRows *types.RowConsumption
   889  	var traces *types.BlockTrace
   890  	var err error
   891  
   892  	// do not do CCC checks on follower nodes
   893  	if w.isRunning() {
   894  		// do gas limit check up-front and do not run CCC if it fails
   895  		if w.current.gasPool.Gas() < tx.Gas() {
   896  			return nil, nil, core.ErrGasLimitReached
   897  		}
   898  
   899  		snap := w.current.state.Snapshot()
   900  
   901  		log.Trace(
   902  			"Worker apply ccc for tx",
   903  			"id", w.circuitCapacityChecker.ID,
   904  			"txHash", tx.Hash().Hex(),
   905  		)
   906  
   907  		// 1. we have to check circuit capacity before `core.ApplyTransaction`,
   908  		// because if the tx can be successfully executed but circuit capacity overflows, it will be inconvenient to revert.
   909  		// 2. even if we don't commit to the state during the tracing (which means `clearJournalAndRefund` is not called during the tracing),
   910  		// the `refund` value will still be correct, because:
   911  		// 2.1 when starting handling the first tx, `state.refund` is 0 by default,
   912  		// 2.2 after tracing, the state is either committed in `core.ApplyTransaction`, or reverted, so the `state.refund` can be cleared,
   913  		// 2.3 when starting handling the following txs, `state.refund` comes as 0
   914  		traces, err = w.current.traceEnv.GetBlockTrace(
   915  			types.NewBlockWithHeader(w.current.header).WithBody([]*types.Transaction{tx}, nil),
   916  		)
   917  		// `w.current.traceEnv.State` & `w.current.state` share a same pointer to the state, so only need to revert `w.current.state`
   918  		// revert to snapshot for calling `core.ApplyMessage` again, (both `traceEnv.GetBlockTrace` & `core.ApplyTransaction` will call `core.ApplyMessage`)
   919  		w.current.state.RevertToSnapshot(snap)
   920  		if err != nil {
   921  			return nil, nil, err
   922  		}
   923  		accRows, err = w.circuitCapacityChecker.ApplyTransaction(traces)
   924  		if err != nil {
   925  			return nil, traces, err
   926  		}
   927  		log.Trace(
   928  			"Worker apply ccc for tx result",
   929  			"id", w.circuitCapacityChecker.ID,
   930  			"txHash", tx.Hash().Hex(),
   931  			"accRows", accRows,
   932  		)
   933  	}
   934  
   935  	// create new snapshot for `core.ApplyTransaction`
   936  	snap := w.current.state.Snapshot()
   937  
   938  	receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
   939  	if err != nil {
   940  		w.current.state.RevertToSnapshot(snap)
   941  
   942  		if accRows != nil {
   943  			// At this point, we have called CCC but the transaction failed in `ApplyTransaction`.
   944  			// If we skip this tx and continue to pack more, the next tx will likely fail with
   945  			// `circuitcapacitychecker.ErrUnknown`. However, at this point we cannot decide whether
   946  			// we should seal the block or skip the tx and continue, so we simply return the error.
   947  			log.Error(
   948  				"GetBlockTrace passed but ApplyTransaction failed, ccc is left in inconsistent state",
   949  				"blockNumber", w.current.header.Number,
   950  				"txHash", tx.Hash().Hex(),
   951  				"err", err,
   952  			)
   953  		}
   954  
   955  		return nil, traces, err
   956  	}
   957  
   958  	w.current.txs = append(w.current.txs, tx)
   959  	w.current.receipts = append(w.current.receipts, receipt)
   960  	w.current.accRows = accRows
   961  
   962  	return receipt.Logs, traces, nil
   963  }
   964  
   965  func (w *worker) commitTransactions(txs types.OrderedTransactionSet, coinbase common.Address, interrupt *int32) (bool, bool) {
   966  	var circuitCapacityReached bool
   967  
   968  	// Short circuit if current is nil
   969  	if w.current == nil {
   970  		return true, circuitCapacityReached
   971  	}
   972  
   973  	gasLimit := w.current.header.GasLimit
   974  	if w.current.gasPool == nil {
   975  		w.current.gasPool = new(core.GasPool).AddGas(gasLimit)
   976  	}
   977  
   978  	var coalescedLogs []*types.Log
   979  
   980  loop:
   981  	for {
   982  		// In the following three cases, we will interrupt the execution of the transaction.
   983  		// (1) new head block event arrival, the interrupt signal is 1
   984  		// (2) worker start or restart, the interrupt signal is 1
   985  		// (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2.
   986  		// For the first two cases, the semi-finished work will be discarded.
   987  		// For the third case, the semi-finished work will be submitted to the consensus engine.
   988  		if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
   989  			// Notify resubmit loop to increase resubmitting interval due to too frequent commits.
   990  			if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
   991  				ratio := float64(gasLimit-w.current.gasPool.Gas()) / float64(gasLimit)
   992  				if ratio < 0.1 {
   993  					ratio = 0.1
   994  				}
   995  				w.resubmitAdjustCh <- &intervalAdjust{
   996  					ratio: ratio,
   997  					inc:   true,
   998  				}
   999  			}
  1000  			return atomic.LoadInt32(interrupt) == commitInterruptNewHead, circuitCapacityReached
  1001  		}
  1002  		// If we don't have enough gas for any further transactions then we're done
  1003  		if w.current.gasPool.Gas() < params.TxGas {
  1004  			log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas)
  1005  			break
  1006  		}
  1007  		// Retrieve the next transaction and abort if all done
  1008  		tx := txs.Peek()
  1009  		if tx == nil {
  1010  			break
  1011  		}
  1012  		// If we have collected enough transactions then we're done
  1013  		// Originally we only limit l2txs count, but now strictly limit total txs number.
  1014  		if !w.chainConfig.Scroll.IsValidTxCount(w.current.tcount + 1) {
  1015  			log.Trace("Transaction count limit reached", "have", w.current.tcount, "want", w.chainConfig.Scroll.MaxTxPerBlock)
  1016  			break
  1017  		}
  1018  		if tx.IsL1MessageTx() && tx.AsL1MessageTx().QueueIndex != w.current.nextL1MsgIndex {
  1019  			log.Error(
  1020  				"Unexpected L1 message queue index in worker",
  1021  				"expected", w.current.nextL1MsgIndex,
  1022  				"got", tx.AsL1MessageTx().QueueIndex,
  1023  			)
  1024  			break
  1025  		}
  1026  		if !tx.IsL1MessageTx() && !w.chainConfig.Scroll.IsValidBlockSize(w.current.blockSize+tx.Size()) {
  1027  			log.Trace("Block size limit reached", "have", w.current.blockSize, "want", w.chainConfig.Scroll.MaxTxPayloadBytesPerBlock, "tx", tx.Size())
  1028  			txs.Pop() // skip transactions from this account
  1029  			continue
  1030  		}
  1031  		// Error may be ignored here. The error has already been checked
  1032  		// during transaction acceptance in the transaction pool.
  1033  		//
  1034  		// We use the eip155 signer regardless of the current hf.
  1035  		from, _ := types.Sender(w.current.signer, tx)
  1036  		// Check whether the tx is replay protected. If we're not in the EIP155 hf
  1037  		// phase, start ignoring the sender until we do.
  1038  		if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) {
  1039  			log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
  1040  
  1041  			txs.Pop()
  1042  			continue
  1043  		}
  1044  		// Start executing the transaction
  1045  		w.current.state.Prepare(tx.Hash(), w.current.tcount)
  1046  
  1047  		logs, traces, err := w.commitTransaction(tx, coinbase)
  1048  		switch {
  1049  		case errors.Is(err, core.ErrGasLimitReached) && tx.IsL1MessageTx():
  1050  			// If this block already contains some L1 messages,
  1051  			// terminate here and try again in the next block.
  1052  			if w.current.l1TxCount > 0 {
  1053  				break loop
  1054  			}
  1055  			// A single L1 message leads to out-of-gas. Skip it.
  1056  			queueIndex := tx.AsL1MessageTx().QueueIndex
  1057  			log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "gas limit exceeded")
  1058  			w.current.nextL1MsgIndex = queueIndex + 1
  1059  			txs.Shift()
  1060  			if w.config.StoreSkippedTxTraces {
  1061  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, "gas limit exceeded", w.current.header.Number.Uint64(), nil)
  1062  			} else {
  1063  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, "gas limit exceeded", w.current.header.Number.Uint64(), nil)
  1064  			}
  1065  			l1TxGasLimitExceededCounter.Inc(1)
  1066  
  1067  		case errors.Is(err, core.ErrGasLimitReached):
  1068  			// Pop the current out-of-gas transaction without shifting in the next from the account
  1069  			log.Trace("Gas limit exceeded for current block", "sender", from)
  1070  			txs.Pop()
  1071  
  1072  		case errors.Is(err, core.ErrNonceTooLow):
  1073  			// New head notification data race between the transaction pool and miner, shift
  1074  			log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
  1075  			txs.Shift()
  1076  
  1077  		case errors.Is(err, core.ErrNonceTooHigh):
  1078  			// Reorg notification data race between the transaction pool and miner, skip account =
  1079  			log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
  1080  			txs.Pop()
  1081  
  1082  		case errors.Is(err, nil):
  1083  			// Everything ok, collect the logs and shift in the next transaction from the same account
  1084  			coalescedLogs = append(coalescedLogs, logs...)
  1085  			w.current.tcount++
  1086  			txs.Shift()
  1087  
  1088  			if tx.IsL1MessageTx() {
  1089  				queueIndex := tx.AsL1MessageTx().QueueIndex
  1090  				log.Debug("Including L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String())
  1091  				w.current.l1TxCount++
  1092  				w.current.nextL1MsgIndex = queueIndex + 1
  1093  			} else {
  1094  				// only consider block size limit for L2 transactions
  1095  				w.current.blockSize += tx.Size()
  1096  			}
  1097  
  1098  		case errors.Is(err, core.ErrTxTypeNotSupported):
  1099  			// Pop the unsupported transaction without shifting in the next from the account
  1100  			log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
  1101  			txs.Pop()
  1102  
  1103  		// Circuit capacity check
  1104  		case errors.Is(err, circuitcapacitychecker.ErrBlockRowConsumptionOverflow):
  1105  			if w.current.tcount >= 1 {
  1106  				// 1. Circuit capacity limit reached in a block, and it's not the first tx:
  1107  				// don't pop or shift, just quit the loop immediately;
  1108  				// though it might still be possible to add some "smaller" txs,
  1109  				// but it's a trade-off between tracing overhead & block usage rate
  1110  				log.Trace("Circuit capacity limit reached in a block", "acc_rows", w.current.accRows, "tx", tx.Hash().String())
  1111  				log.Info("Skipping message", "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "accumulated row consumption overflow")
  1112  				circuitCapacityReached = true
  1113  				break loop
  1114  			} else {
  1115  				// 2. Circuit capacity limit reached in a block, and it's the first tx: skip the tx
  1116  				log.Trace("Circuit capacity limit reached for a single tx", "tx", tx.Hash().String())
  1117  
  1118  				if tx.IsL1MessageTx() {
  1119  					// Skip L1 message transaction,
  1120  					// shift to the next from the account because we shouldn't skip the entire txs from the same account
  1121  					txs.Shift()
  1122  
  1123  					queueIndex := tx.AsL1MessageTx().QueueIndex
  1124  					log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "first tx row consumption overflow")
  1125  					w.current.nextL1MsgIndex = queueIndex + 1
  1126  					l1TxRowConsumptionOverflowCounter.Inc(1)
  1127  				} else {
  1128  					// Skip L2 transaction and all other transactions from the same sender account
  1129  					log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "first tx row consumption overflow")
  1130  					txs.Pop()
  1131  					w.eth.TxPool().RemoveTx(tx.Hash(), true)
  1132  					l2TxRowConsumptionOverflowCounter.Inc(1)
  1133  				}
  1134  
  1135  				// Reset ccc so that we can process other transactions for this block
  1136  				w.circuitCapacityChecker.Reset()
  1137  				log.Trace("Worker reset ccc", "id", w.circuitCapacityChecker.ID)
  1138  				circuitCapacityReached = false
  1139  
  1140  				// Store skipped transaction in local db
  1141  				if w.config.StoreSkippedTxTraces {
  1142  					rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, "row consumption overflow", w.current.header.Number.Uint64(), nil)
  1143  				} else {
  1144  					rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, "row consumption overflow", w.current.header.Number.Uint64(), nil)
  1145  				}
  1146  			}
  1147  
  1148  		case (errors.Is(err, circuitcapacitychecker.ErrUnknown) && tx.IsL1MessageTx()):
  1149  			// Circuit capacity check: unknown circuit capacity checker error for L1MessageTx,
  1150  			// shift to the next from the account because we shouldn't skip the entire txs from the same account
  1151  			queueIndex := tx.AsL1MessageTx().QueueIndex
  1152  			log.Trace("Unknown circuit capacity checker error for L1MessageTx", "tx", tx.Hash().String(), "queueIndex", queueIndex)
  1153  			log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "unknown row consumption error")
  1154  			w.current.nextL1MsgIndex = queueIndex + 1
  1155  			// TODO: propagate more info about the error from CCC
  1156  			if w.config.StoreSkippedTxTraces {
  1157  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, "unknown circuit capacity checker error", w.current.header.Number.Uint64(), nil)
  1158  			} else {
  1159  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, "unknown circuit capacity checker error", w.current.header.Number.Uint64(), nil)
  1160  			}
  1161  			l1TxCccUnknownErrCounter.Inc(1)
  1162  
  1163  			// Normally we would do `txs.Shift()` here.
  1164  			// However, after `ErrUnknown`, ccc might remain in an
  1165  			// inconsistent state, so we cannot pack more transactions.
  1166  			circuitCapacityReached = true
  1167  			w.checkCurrentTxNumWithCCC(w.current.tcount)
  1168  			break loop
  1169  
  1170  		case (errors.Is(err, circuitcapacitychecker.ErrUnknown) && !tx.IsL1MessageTx()):
  1171  			// Circuit capacity check: unknown circuit capacity checker error for L2MessageTx, skip the account
  1172  			log.Trace("Unknown circuit capacity checker error for L2MessageTx", "tx", tx.Hash().String())
  1173  			log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "unknown row consumption error")
  1174  			// TODO: propagate more info about the error from CCC
  1175  			if w.config.StoreSkippedTxTraces {
  1176  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, "unknown circuit capacity checker error", w.current.header.Number.Uint64(), nil)
  1177  			} else {
  1178  				rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, "unknown circuit capacity checker error", w.current.header.Number.Uint64(), nil)
  1179  			}
  1180  			l2TxCccUnknownErrCounter.Inc(1)
  1181  
  1182  			// Normally we would do `txs.Pop()` here.
  1183  			// However, after `ErrUnknown`, ccc might remain in an
  1184  			// inconsistent state, so we cannot pack more transactions.
  1185  			w.eth.TxPool().RemoveTx(tx.Hash(), true)
  1186  			circuitCapacityReached = true
  1187  			w.checkCurrentTxNumWithCCC(w.current.tcount)
  1188  			break loop
  1189  
  1190  		default:
  1191  			// Strange error, discard the transaction and get the next in line (note, the
  1192  			// nonce-too-high clause will prevent us from executing in vain).
  1193  			log.Debug("Transaction failed, account skipped", "hash", tx.Hash().String(), "err", err)
  1194  			if tx.IsL1MessageTx() {
  1195  				queueIndex := tx.AsL1MessageTx().QueueIndex
  1196  				log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", w.current.header.Number, "reason", "strange error", "err", err)
  1197  				w.current.nextL1MsgIndex = queueIndex + 1
  1198  				if w.config.StoreSkippedTxTraces {
  1199  					rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, fmt.Sprintf("strange error: %v", err), w.current.header.Number.Uint64(), nil)
  1200  				} else {
  1201  					rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, fmt.Sprintf("strange error: %v", err), w.current.header.Number.Uint64(), nil)
  1202  				}
  1203  				l1TxStrangeErrCounter.Inc(1)
  1204  			}
  1205  			txs.Shift()
  1206  		}
  1207  	}
  1208  
  1209  	if !w.isRunning() && len(coalescedLogs) > 0 {
  1210  		// We don't push the pendingLogsEvent while we are mining. The reason is that
  1211  		// when we are mining, the worker will regenerate a mining block every 3 seconds.
  1212  		// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
  1213  
  1214  		// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
  1215  		// logs by filling in the block hash when the block was mined by the local miner. This can
  1216  		// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
  1217  		cpy := make([]*types.Log, len(coalescedLogs))
  1218  		for i, l := range coalescedLogs {
  1219  			cpy[i] = new(types.Log)
  1220  			*cpy[i] = *l
  1221  		}
  1222  		w.pendingLogsFeed.Send(cpy)
  1223  	}
  1224  	// Notify resubmit loop to decrease resubmitting interval if current interval is larger
  1225  	// than the user-specified one.
  1226  	if interrupt != nil {
  1227  		w.resubmitAdjustCh <- &intervalAdjust{inc: false}
  1228  	}
  1229  	return false, circuitCapacityReached
  1230  }
  1231  
  1232  func (w *worker) checkCurrentTxNumWithCCC(expected int) {
  1233  	match, got, err := w.circuitCapacityChecker.CheckTxNum(expected)
  1234  	if err != nil {
  1235  		log.Error("failed to CheckTxNum in ccc", "err", err)
  1236  		return
  1237  	}
  1238  	if !match {
  1239  		log.Error("tx count in miner is different with CCC", "w.current.tcount", w.current.tcount, "got", got)
  1240  	}
  1241  }
  1242  
  1243  func (w *worker) collectPendingL1Messages(startIndex uint64) []types.L1MessageTx {
  1244  	maxCount := w.chainConfig.Scroll.L1Config.NumL1MessagesPerBlock
  1245  	return rawdb.ReadL1MessagesFrom(w.eth.ChainDb(), startIndex, maxCount)
  1246  }
  1247  
  1248  // commitNewWork generates several new sealing tasks based on the parent block.
  1249  func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) {
  1250  	w.mu.RLock()
  1251  	defer w.mu.RUnlock()
  1252  
  1253  	tstart := time.Now()
  1254  	parent := w.chain.CurrentBlock()
  1255  	w.circuitCapacityChecker.Reset()
  1256  	log.Trace("Worker reset ccc", "id", w.circuitCapacityChecker.ID)
  1257  
  1258  	if parent.Time() >= uint64(timestamp) {
  1259  		timestamp = int64(parent.Time() + 1)
  1260  	}
  1261  	num := parent.Number()
  1262  	header := &types.Header{
  1263  		ParentHash: parent.Hash(),
  1264  		Number:     num.Add(num, common.Big1),
  1265  		GasLimit:   core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil),
  1266  		Extra:      w.extra,
  1267  		Time:       uint64(timestamp),
  1268  	}
  1269  	// Set baseFee and GasLimit if we are on an EIP-1559 chain
  1270  	if w.chainConfig.IsLondon(header.Number) {
  1271  		if w.chainConfig.Scroll.BaseFeeEnabled() {
  1272  			header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header())
  1273  		} else {
  1274  			// When disabling EIP-2718 or EIP-1559, we do not set baseFeePerGas in RPC response.
  1275  			// Setting BaseFee as nil here can help outside SDK calculates l2geth's RLP encoding,
  1276  			// otherwise the l2geth's BaseFee is not known from the outside.
  1277  			header.BaseFee = nil
  1278  		}
  1279  		if !w.chainConfig.IsLondon(parent.Number()) {
  1280  			parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier
  1281  			header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
  1282  		}
  1283  	}
  1284  	// Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
  1285  	if w.isRunning() {
  1286  		if w.coinbase == (common.Address{}) {
  1287  			log.Error("Refusing to mine without etherbase")
  1288  			return
  1289  		}
  1290  		header.Coinbase = w.coinbase
  1291  	}
  1292  	if err := w.engine.Prepare(w.chain, header); err != nil {
  1293  		log.Error("Failed to prepare header for mining", "err", err)
  1294  		return
  1295  	}
  1296  	// If we are care about TheDAO hard-fork check whether to override the extra-data or not
  1297  	if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil {
  1298  		// Check whether the block is among the fork extra-override range
  1299  		limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
  1300  		if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 {
  1301  			// Depending whether we support or oppose the fork, override differently
  1302  			if w.chainConfig.DAOForkSupport {
  1303  				header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
  1304  			} else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
  1305  				header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
  1306  			}
  1307  		}
  1308  	}
  1309  	// Could potentially happen if starting to mine in an odd state.
  1310  	err := w.makeCurrent(parent, header)
  1311  	if err != nil {
  1312  		log.Error("Failed to create mining context", "err", err)
  1313  		return
  1314  	}
  1315  	// Create the current work task and check any fork transitions needed
  1316  	env := w.current
  1317  	if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 {
  1318  		misc.ApplyDAOHardFork(env.state)
  1319  	}
  1320  	// Accumulate the uncles for the current block
  1321  	uncles := make([]*types.Header, 0, 2)
  1322  	commitUncles := func(blocks map[common.Hash]*types.Block) {
  1323  		// Clean up stale uncle blocks first
  1324  		for hash, uncle := range blocks {
  1325  			if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
  1326  				delete(blocks, hash)
  1327  			}
  1328  		}
  1329  		for hash, uncle := range blocks {
  1330  			if len(uncles) == 2 {
  1331  				break
  1332  			}
  1333  			if err := w.commitUncle(env, uncle.Header()); err != nil {
  1334  				log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
  1335  			} else {
  1336  				log.Debug("Committing new uncle to block", "hash", hash)
  1337  				uncles = append(uncles, uncle.Header())
  1338  			}
  1339  		}
  1340  	}
  1341  	// Prefer to locally generated uncle
  1342  	commitUncles(w.localUncles)
  1343  	commitUncles(w.remoteUncles)
  1344  
  1345  	// Create an empty block based on temporary copied state for
  1346  	// sealing in advance without waiting block execution finished.
  1347  	if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
  1348  		w.commit(uncles, nil, false, tstart)
  1349  	}
  1350  	// fetch l1Txs
  1351  	var l1Messages []types.L1MessageTx
  1352  	if w.chainConfig.Scroll.ShouldIncludeL1Messages() {
  1353  		l1Messages = w.collectPendingL1Messages(env.nextL1MsgIndex)
  1354  	}
  1355  	// Fill the block with all available pending transactions.
  1356  	pending := w.eth.TxPool().Pending(true)
  1357  	// Short circuit if there is no available pending transactions.
  1358  	// But if we disable empty precommit already, ignore it. Since
  1359  	// empty block is necessary to keep the liveness of the network.
  1360  	if len(pending) == 0 && len(l1Messages) == 0 && atomic.LoadUint32(&w.noempty) == 0 {
  1361  		w.updateSnapshot()
  1362  		return
  1363  	}
  1364  	// Split the pending transactions into locals and remotes
  1365  	localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending
  1366  	for _, account := range w.eth.TxPool().Locals() {
  1367  		if txs := remoteTxs[account]; len(txs) > 0 {
  1368  			delete(remoteTxs, account)
  1369  			localTxs[account] = txs
  1370  		}
  1371  	}
  1372  	var skipCommit, circuitCapacityReached bool
  1373  	if w.chainConfig.Scroll.ShouldIncludeL1Messages() && len(l1Messages) > 0 {
  1374  		log.Trace("Processing L1 messages for inclusion", "count", len(l1Messages))
  1375  		txs, err := types.NewL1MessagesByQueueIndex(l1Messages)
  1376  		if err != nil {
  1377  			log.Error("Failed to create L1 message set", "l1Messages", l1Messages, "err", err)
  1378  			return
  1379  		}
  1380  		skipCommit, circuitCapacityReached = w.commitTransactions(txs, w.coinbase, interrupt)
  1381  		if skipCommit {
  1382  			return
  1383  		}
  1384  	}
  1385  	if len(localTxs) > 0 && !circuitCapacityReached {
  1386  		txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs, header.BaseFee)
  1387  		skipCommit, circuitCapacityReached = w.commitTransactions(txs, w.coinbase, interrupt)
  1388  		if skipCommit {
  1389  			return
  1390  		}
  1391  	}
  1392  	if len(remoteTxs) > 0 && !circuitCapacityReached {
  1393  		txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs, header.BaseFee)
  1394  		// don't need to get `circuitCapacityReached` here because we don't have further `commitTransactions`
  1395  		// after this one, and if we assign it won't take effect (`ineffassign`)
  1396  		skipCommit, _ = w.commitTransactions(txs, w.coinbase, interrupt)
  1397  		if skipCommit {
  1398  			return
  1399  		}
  1400  	}
  1401  
  1402  	// do not produce empty blocks
  1403  	if w.current.tcount == 0 {
  1404  		return
  1405  	}
  1406  
  1407  	w.commit(uncles, w.fullTaskHook, true, tstart)
  1408  }
  1409  
  1410  // commit runs any post-transaction state modifications, assembles the final block
  1411  // and commits new work if consensus engine is running.
  1412  func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
  1413  	// set w.current.accRows for empty-but-not-genesis block
  1414  	if (w.current.header.Number.Uint64() != 0) &&
  1415  		(w.current.accRows == nil || len(*w.current.accRows) == 0) && w.isRunning() {
  1416  		log.Trace(
  1417  			"Worker apply ccc for empty block",
  1418  			"id", w.circuitCapacityChecker.ID,
  1419  			"number", w.current.header.Number,
  1420  			"hash", w.current.header.Hash().String(),
  1421  		)
  1422  		traces, err := w.current.traceEnv.GetBlockTrace(types.NewBlockWithHeader(w.current.header))
  1423  		if err != nil {
  1424  			return err
  1425  		}
  1426  		// truncate ExecutionResults&TxStorageTraces, because we declare their lengths with a dummy tx before;
  1427  		// however, we need to clean it up for an empty block
  1428  		traces.ExecutionResults = traces.ExecutionResults[:0]
  1429  		traces.TxStorageTraces = traces.TxStorageTraces[:0]
  1430  		accRows, err := w.circuitCapacityChecker.ApplyBlock(traces)
  1431  		if err != nil {
  1432  			return err
  1433  		}
  1434  		log.Trace(
  1435  			"Worker apply ccc for empty block result",
  1436  			"id", w.circuitCapacityChecker.ID,
  1437  			"number", w.current.header.Number,
  1438  			"hash", w.current.header.Hash().String(),
  1439  			"accRows", accRows,
  1440  		)
  1441  		w.current.accRows = accRows
  1442  	}
  1443  
  1444  	// Deep copy receipts here to avoid interaction between different tasks.
  1445  	receipts := copyReceipts(w.current.receipts)
  1446  	s := w.current.state.Copy()
  1447  	block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts)
  1448  	if err != nil {
  1449  		return err
  1450  	}
  1451  	if w.isRunning() {
  1452  		if interval != nil {
  1453  			interval()
  1454  		}
  1455  		select {
  1456  		case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now(), accRows: w.current.accRows, nextL1MsgIndex: w.current.nextL1MsgIndex}:
  1457  			w.unconfirmed.Shift(block.NumberU64() - 1)
  1458  			log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
  1459  				"uncles", len(uncles), "txs", w.current.tcount,
  1460  				"gas", block.GasUsed(), "fees", totalFees(block, receipts),
  1461  				"elapsed", common.PrettyDuration(time.Since(start)))
  1462  
  1463  		case <-w.exitCh:
  1464  			log.Info("Worker has exited")
  1465  		}
  1466  	}
  1467  	if update {
  1468  		w.updateSnapshot()
  1469  	}
  1470  	return nil
  1471  }
  1472  
  1473  // copyReceipts makes a deep copy of the given receipts.
  1474  func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
  1475  	result := make([]*types.Receipt, len(receipts))
  1476  	for i, l := range receipts {
  1477  		cpy := *l
  1478  		result[i] = &cpy
  1479  	}
  1480  	return result
  1481  }
  1482  
  1483  // postSideBlock fires a side chain event, only use it for testing.
  1484  func (w *worker) postSideBlock(event core.ChainSideEvent) {
  1485  	select {
  1486  	case w.chainSideCh <- event:
  1487  	case <-w.exitCh:
  1488  	}
  1489  }
  1490  
  1491  // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order.
  1492  func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float {
  1493  	feesWei := new(big.Int)
  1494  	for i, tx := range block.Transactions() {
  1495  		minerFee, _ := tx.EffectiveGasTip(block.BaseFee())
  1496  		feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee))
  1497  	}
  1498  	return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether)))
  1499  }