github.com/klaytn/klaytn@v1.12.1/work/worker.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from miner/worker.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package work
    22  
    23  import (
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/klaytn/klaytn/blockchain"
    30  	"github.com/klaytn/klaytn/blockchain/state"
    31  	"github.com/klaytn/klaytn/blockchain/types"
    32  	"github.com/klaytn/klaytn/blockchain/vm"
    33  	"github.com/klaytn/klaytn/common"
    34  	"github.com/klaytn/klaytn/consensus"
    35  	"github.com/klaytn/klaytn/consensus/misc"
    36  	"github.com/klaytn/klaytn/event"
    37  	klaytnmetrics "github.com/klaytn/klaytn/metrics"
    38  	"github.com/klaytn/klaytn/params"
    39  	"github.com/klaytn/klaytn/reward"
    40  	"github.com/klaytn/klaytn/storage/database"
    41  	"github.com/rcrowley/go-metrics"
    42  )
    43  
    44  const (
    45  	resultQueueSize  = 10
    46  	miningLogAtDepth = 5
    47  
    48  	// txChanSize is the size of channel listening to NewTxsEvent.
    49  	// The number is referenced from the size of tx pool.
    50  	txChanSize = 4096
    51  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    52  	chainHeadChanSize = 10
    53  	// chainSideChanSize is the size of channel listening to ChainSideEvent.
    54  	chainSideChanSize = 10
    55  	// maxResendSize is the size of resending transactions to peer in order to prevent the txs from missing.
    56  	maxResendTxSize = 1000
    57  )
    58  
    59  var (
    60  	// Metrics for miner
    61  	timeLimitReachedCounter = metrics.NewRegisteredCounter("miner/timelimitreached", nil)
    62  	tooLongTxCounter        = metrics.NewRegisteredCounter("miner/toolongtx", nil)
    63  	ResultChGauge           = metrics.NewRegisteredGauge("miner/resultch", nil)
    64  	resentTxGauge           = metrics.NewRegisteredGauge("miner/tx/resend/gauge", nil)
    65  	usedAllTxsCounter       = metrics.NewRegisteredCounter("miner/usedalltxs", nil)
    66  	checkedTxsGauge         = metrics.NewRegisteredGauge("miner/checkedtxs", nil)
    67  	tCountGauge             = metrics.NewRegisteredGauge("miner/tcount", nil)
    68  	nonceTooLowTxsGauge     = metrics.NewRegisteredGauge("miner/nonce/low/txs", nil)
    69  	nonceTooHighTxsGauge    = metrics.NewRegisteredGauge("miner/nonce/high/txs", nil)
    70  	gasLimitReachedTxsGauge = metrics.NewRegisteredGauge("miner/limitreached/gas/txs", nil)
    71  	strangeErrorTxsCounter  = metrics.NewRegisteredCounter("miner/strangeerror/txs", nil)
    72  
    73  	blockBaseFee              = metrics.NewRegisteredGauge("miner/block/mining/basefee", nil)
    74  	blockMiningTimer          = klaytnmetrics.NewRegisteredHybridTimer("miner/block/mining/time", nil)
    75  	blockMiningExecuteTxTimer = klaytnmetrics.NewRegisteredHybridTimer("miner/block/execute/time", nil)
    76  	blockMiningCommitTxTimer  = klaytnmetrics.NewRegisteredHybridTimer("miner/block/commit/time", nil)
    77  	blockMiningFinalizeTimer  = klaytnmetrics.NewRegisteredHybridTimer("miner/block/finalize/time", nil)
    78  
    79  	accountReadTimer   = klaytnmetrics.NewRegisteredHybridTimer("miner/block/account/reads", nil)
    80  	accountHashTimer   = klaytnmetrics.NewRegisteredHybridTimer("miner/block/account/hashes", nil)
    81  	accountUpdateTimer = klaytnmetrics.NewRegisteredHybridTimer("miner/block/account/updates", nil)
    82  	accountCommitTimer = klaytnmetrics.NewRegisteredHybridTimer("miner/block/account/commits", nil)
    83  
    84  	storageReadTimer   = klaytnmetrics.NewRegisteredHybridTimer("miner/block/storage/reads", nil)
    85  	storageHashTimer   = klaytnmetrics.NewRegisteredHybridTimer("miner/block/storage/hashes", nil)
    86  	storageUpdateTimer = klaytnmetrics.NewRegisteredHybridTimer("miner/block/storage/updates", nil)
    87  	storageCommitTimer = klaytnmetrics.NewRegisteredHybridTimer("miner/block/storage/commits", nil)
    88  
    89  	snapshotAccountReadTimer = metrics.NewRegisteredTimer("miner/snapshot/account/reads", nil)
    90  	snapshotStorageReadTimer = metrics.NewRegisteredTimer("miner/snapshot/storage/reads", nil)
    91  	snapshotCommitTimer      = metrics.NewRegisteredTimer("miner/snapshot/commits", nil)
    92  	calcDeferredRewardTimer  = metrics.NewRegisteredTimer("reward/distribute/calcdeferredreward", nil)
    93  )
    94  
    95  // Agent can register themself with the worker
    96  type Agent interface {
    97  	Work() chan<- *Task
    98  	SetReturnCh(chan<- *Result)
    99  	Stop()
   100  	Start()
   101  	GetHashRate() int64
   102  }
   103  
   104  // Task is the workers current environment and holds
   105  // all of the current state information
   106  type Task struct {
   107  	config *params.ChainConfig
   108  	signer types.Signer
   109  
   110  	stateMu sync.RWMutex   // protects state
   111  	state   *state.StateDB // apply state changes here
   112  	tcount  int            // tx count in cycle
   113  
   114  	Block *types.Block // the new block
   115  
   116  	header   *types.Header
   117  	txs      []*types.Transaction
   118  	receipts []*types.Receipt
   119  
   120  	createdAt time.Time
   121  }
   122  
   123  type Result struct {
   124  	Task  *Task
   125  	Block *types.Block
   126  }
   127  
   128  // worker is the main object which takes care of applying messages to the new state
   129  type worker struct {
   130  	config *params.ChainConfig
   131  	engine consensus.Engine
   132  
   133  	mu sync.Mutex
   134  
   135  	// update loop
   136  	mux          *event.TypeMux
   137  	txsCh        chan blockchain.NewTxsEvent
   138  	txsSub       event.Subscription
   139  	chainHeadCh  chan blockchain.ChainHeadEvent
   140  	chainHeadSub event.Subscription
   141  	chainSideCh  chan blockchain.ChainSideEvent
   142  	chainSideSub event.Subscription
   143  	wg           sync.WaitGroup
   144  
   145  	agents map[Agent]struct{}
   146  	recv   chan *Result
   147  
   148  	backend Backend
   149  	chain   BlockChain
   150  	proc    blockchain.Validator
   151  	chainDB database.DBManager
   152  
   153  	extra []byte
   154  
   155  	currentMu  sync.Mutex
   156  	current    *Task
   157  	rewardbase common.Address
   158  
   159  	snapshotMu    sync.RWMutex
   160  	snapshotBlock *types.Block
   161  	snapshotState *state.StateDB
   162  
   163  	// atomic status counters
   164  	mining int32
   165  	atWork int32
   166  
   167  	nodetype common.ConnType
   168  }
   169  
   170  func newWorker(config *params.ChainConfig, engine consensus.Engine, rewardbase common.Address, backend Backend, mux *event.TypeMux, nodetype common.ConnType, TxResendUseLegacy bool) *worker {
   171  	worker := &worker{
   172  		config:      config,
   173  		engine:      engine,
   174  		backend:     backend,
   175  		mux:         mux,
   176  		txsCh:       make(chan blockchain.NewTxsEvent, txChanSize),
   177  		chainHeadCh: make(chan blockchain.ChainHeadEvent, chainHeadChanSize),
   178  		chainSideCh: make(chan blockchain.ChainSideEvent, chainSideChanSize),
   179  		chainDB:     backend.ChainDB(),
   180  		recv:        make(chan *Result, resultQueueSize),
   181  		chain:       backend.BlockChain(),
   182  		proc:        backend.BlockChain().Validator(),
   183  		agents:      make(map[Agent]struct{}),
   184  		nodetype:    nodetype,
   185  		rewardbase:  rewardbase,
   186  	}
   187  
   188  	// Subscribe NewTxsEvent for tx pool
   189  	worker.txsSub = backend.TxPool().SubscribeNewTxsEvent(worker.txsCh)
   190  	// Subscribe events for blockchain
   191  	worker.chainHeadSub = backend.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
   192  	worker.chainSideSub = backend.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
   193  	go worker.update()
   194  
   195  	go worker.wait(TxResendUseLegacy)
   196  	return worker
   197  }
   198  
   199  func (self *worker) setExtra(extra []byte) {
   200  	self.mu.Lock()
   201  	defer self.mu.Unlock()
   202  	self.extra = extra
   203  }
   204  
   205  func (self *worker) pending() (*types.Block, *state.StateDB) {
   206  	if atomic.LoadInt32(&self.mining) == 0 {
   207  		// return a snapshot to avoid contention on currentMu mutex
   208  		self.snapshotMu.RLock()
   209  		defer self.snapshotMu.RUnlock()
   210  		if self.snapshotState == nil {
   211  			return nil, nil
   212  		}
   213  		return self.snapshotBlock, self.snapshotState.Copy()
   214  	}
   215  
   216  	self.currentMu.Lock()
   217  	defer self.currentMu.Unlock()
   218  	self.current.stateMu.Lock()
   219  	defer self.current.stateMu.Unlock()
   220  	return self.current.Block, self.current.state.Copy()
   221  }
   222  
   223  func (self *worker) pendingBlock() *types.Block {
   224  	if atomic.LoadInt32(&self.mining) == 0 {
   225  		// return a snapshot to avoid contention on currentMu mutex
   226  		self.snapshotMu.RLock()
   227  		defer self.snapshotMu.RUnlock()
   228  		return self.snapshotBlock
   229  	}
   230  
   231  	self.currentMu.Lock()
   232  	defer self.currentMu.Unlock()
   233  	if self.current == nil {
   234  		return nil
   235  	}
   236  	return self.current.Block
   237  }
   238  
   239  func (self *worker) start() {
   240  	self.mu.Lock()
   241  	defer self.mu.Unlock()
   242  
   243  	atomic.StoreInt32(&self.mining, 1)
   244  
   245  	// istanbul BFT
   246  	if istanbul, ok := self.engine.(consensus.Istanbul); ok {
   247  		istanbul.Start(self.chain, self.chain.CurrentBlock, self.chain.HasBadBlock)
   248  	}
   249  
   250  	// spin up agents
   251  	for agent := range self.agents {
   252  		agent.Start()
   253  	}
   254  }
   255  
   256  func (self *worker) stop() {
   257  	self.wg.Wait()
   258  
   259  	self.mu.Lock()
   260  	defer self.mu.Unlock()
   261  	if atomic.LoadInt32(&self.mining) == 1 {
   262  		for agent := range self.agents {
   263  			agent.Stop()
   264  		}
   265  	}
   266  
   267  	// istanbul BFT
   268  	if istanbul, ok := self.engine.(consensus.Istanbul); ok {
   269  		istanbul.Stop()
   270  	}
   271  
   272  	atomic.StoreInt32(&self.mining, 0)
   273  	atomic.StoreInt32(&self.atWork, 0)
   274  }
   275  
   276  func (self *worker) register(agent Agent) {
   277  	self.mu.Lock()
   278  	defer self.mu.Unlock()
   279  	self.agents[agent] = struct{}{}
   280  	agent.SetReturnCh(self.recv)
   281  }
   282  
   283  func (self *worker) unregister(agent Agent) {
   284  	self.mu.Lock()
   285  	defer self.mu.Unlock()
   286  	delete(self.agents, agent)
   287  	agent.Stop()
   288  }
   289  
   290  func (self *worker) handleTxsCh(quitByErr chan bool) {
   291  	defer self.txsSub.Unsubscribe()
   292  
   293  	for {
   294  		select {
   295  		// Handle NewTxsEvent
   296  		case <-self.txsCh:
   297  			if atomic.LoadInt32(&self.mining) != 0 {
   298  				// If we're mining, but nothing is being processed, wake on new transactions
   299  				if self.config.Clique != nil && self.config.Clique.Period == 0 {
   300  					self.commitNewWork()
   301  				}
   302  			}
   303  
   304  		case <-quitByErr:
   305  			return
   306  		}
   307  	}
   308  }
   309  
   310  func (self *worker) update() {
   311  	defer self.chainHeadSub.Unsubscribe()
   312  	defer self.chainSideSub.Unsubscribe()
   313  
   314  	quitByErr := make(chan bool, 1)
   315  	go self.handleTxsCh(quitByErr)
   316  
   317  	for {
   318  		// A real event arrived, process interesting content
   319  		select {
   320  		// Handle ChainHeadEvent
   321  		case <-self.chainHeadCh:
   322  			// istanbul BFT
   323  			if h, ok := self.engine.(consensus.Handler); ok {
   324  				h.NewChainHead()
   325  			}
   326  			self.commitNewWork()
   327  
   328  			// TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
   329  			//         Later we may be able to refine below code.
   330  			// Handle ChainSideEvent
   331  		case <-self.chainSideCh:
   332  
   333  			// System stopped
   334  		case <-self.txsSub.Err():
   335  			quitByErr <- true
   336  			return
   337  		case <-self.chainHeadSub.Err():
   338  			quitByErr <- true
   339  			return
   340  		case <-self.chainSideSub.Err():
   341  			quitByErr <- true
   342  			return
   343  		}
   344  	}
   345  }
   346  
   347  func (self *worker) wait(TxResendUseLegacy bool) {
   348  	for {
   349  		mustCommitNewWork := true
   350  		for result := range self.recv {
   351  			atomic.AddInt32(&self.atWork, -1)
   352  			ResultChGauge.Update(ResultChGauge.Value() - 1)
   353  			if result == nil {
   354  				continue
   355  			}
   356  
   357  			// TODO-Klaytn drop or missing tx
   358  			if self.nodetype != common.CONSENSUSNODE {
   359  				if !TxResendUseLegacy {
   360  					continue
   361  				}
   362  				pending, err := self.backend.TxPool().Pending()
   363  				if err != nil {
   364  					logger.Error("Failed to fetch pending transactions", "err", err)
   365  					continue
   366  				}
   367  
   368  				if len(pending) > 0 {
   369  					accounts := len(pending)
   370  					resendTxSize := maxResendTxSize / accounts
   371  					if resendTxSize == 0 {
   372  						resendTxSize = 1
   373  					}
   374  					var resendTxs []*types.Transaction
   375  					for _, sortedTxs := range pending {
   376  						if len(sortedTxs) >= resendTxSize {
   377  							resendTxs = append(resendTxs, sortedTxs[:resendTxSize]...)
   378  						} else {
   379  							resendTxs = append(resendTxs, sortedTxs...)
   380  						}
   381  					}
   382  					if len(resendTxs) > 0 {
   383  						resentTxGauge.Update(int64(len(resendTxs)))
   384  						self.backend.ReBroadcastTxs(resendTxs)
   385  					}
   386  				}
   387  				continue
   388  			}
   389  
   390  			block := result.Block
   391  			work := result.Task
   392  
   393  			// Update the block hash in all logs since it is now available and not when the
   394  			// receipt/log of individual transactions were created.
   395  			for _, r := range work.receipts {
   396  				for _, l := range r.Logs {
   397  					l.BlockHash = block.Hash()
   398  				}
   399  			}
   400  			work.stateMu.Lock()
   401  			for _, log := range work.state.Logs() {
   402  				log.BlockHash = block.Hash()
   403  			}
   404  
   405  			start := time.Now()
   406  			result, err := self.chain.WriteBlockWithState(block, work.receipts, work.state)
   407  			work.stateMu.Unlock()
   408  			if err != nil {
   409  				if err == blockchain.ErrKnownBlock {
   410  					logger.Debug("Tried to insert already known block", "num", block.NumberU64(), "hash", block.Hash().String())
   411  				} else {
   412  					logger.Error("Failed writing block to chain", "err", err)
   413  				}
   414  				continue
   415  			}
   416  			blockWriteTime := time.Since(start)
   417  
   418  			// TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
   419  			//         Later we may be able to refine below code.
   420  
   421  			// check if canon block and write transactions
   422  			if result.Status == blockchain.CanonStatTy {
   423  				// implicit by posting ChainHeadEvent
   424  				mustCommitNewWork = false
   425  			}
   426  
   427  			// Broadcast the block and announce chain insertion event
   428  			self.mux.Post(blockchain.NewMinedBlockEvent{Block: block})
   429  
   430  			var events []interface{}
   431  
   432  			work.stateMu.RLock()
   433  			logs := work.state.Logs()
   434  			work.stateMu.RUnlock()
   435  
   436  			events = append(events, blockchain.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
   437  			if result.Status == blockchain.CanonStatTy {
   438  				events = append(events, blockchain.ChainHeadEvent{Block: block})
   439  			}
   440  
   441  			// update governance CurrentSet if it is at an epoch block
   442  			if err := self.engine.CreateSnapshot(self.chain, block.NumberU64(), block.Hash(), nil); err != nil {
   443  				logger.Error("Failed to call snapshot", "err", err)
   444  			}
   445  
   446  			// update governance parameters
   447  			if istanbul, ok := self.engine.(consensus.Istanbul); ok {
   448  				if err := istanbul.UpdateParam(block.NumberU64()); err != nil {
   449  					logger.Error("Failed to update governance parameters", "err", err)
   450  				}
   451  			}
   452  
   453  			logger.Info("Successfully wrote mined block", "num", block.NumberU64(),
   454  				"hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", blockWriteTime)
   455  			self.chain.PostChainEvents(events, logs)
   456  
   457  			// TODO-Klaytn-Issue264 If we are using istanbul BFT, then we always have a canonical chain.
   458  			//         Later we may be able to refine below code.
   459  			if mustCommitNewWork {
   460  				self.commitNewWork()
   461  			}
   462  		}
   463  	}
   464  }
   465  
   466  // push sends a new work task to currently live work agents.
   467  func (self *worker) push(work *Task) {
   468  	if atomic.LoadInt32(&self.mining) != 1 {
   469  		return
   470  	}
   471  	for agent := range self.agents {
   472  		atomic.AddInt32(&self.atWork, 1)
   473  		if ch := agent.Work(); ch != nil {
   474  			ch <- work
   475  		}
   476  	}
   477  }
   478  
   479  // makeCurrent creates a new environment for the current cycle.
   480  func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error {
   481  	stateDB, err := self.chain.PrunableStateAt(parent.Root(), parent.NumberU64())
   482  	if err != nil {
   483  		return err
   484  	}
   485  	work := NewTask(self.config, types.MakeSigner(self.config, header.Number), stateDB, header)
   486  	if self.nodetype != common.CONSENSUSNODE {
   487  		// set the current block and header as pending block and header to support APIs requesting a pending block.
   488  		work.Block = parent
   489  		work.header = parent.Header()
   490  	}
   491  
   492  	// Keep track of transactions which return errors so they can be removed
   493  	work.tcount = 0
   494  	self.current = work
   495  	return nil
   496  }
   497  
   498  func (self *worker) commitNewWork() {
   499  	var pending map[common.Address]types.Transactions
   500  	var err error
   501  	if self.nodetype == common.CONSENSUSNODE {
   502  		// Check any fork transitions needed
   503  		pending, err = self.backend.TxPool().Pending()
   504  		if err != nil {
   505  			logger.Error("Failed to fetch pending transactions", "err", err)
   506  			return
   507  		}
   508  	}
   509  
   510  	self.mu.Lock()
   511  	defer self.mu.Unlock()
   512  	self.currentMu.Lock()
   513  	defer self.currentMu.Unlock()
   514  
   515  	parent := self.chain.CurrentBlock()
   516  	nextBlockNum := new(big.Int).Add(parent.Number(), common.Big1)
   517  	var nextBaseFee *big.Int
   518  	if self.nodetype == common.CONSENSUSNODE {
   519  		if self.config.IsMagmaForkEnabled(nextBlockNum) {
   520  			// NOTE-klaytn NextBlockBaseFee needs the header of parent, self.chain.CurrentBlock
   521  			// So above code, TxPool().Pending(), is separated with this and can be refactored later.
   522  			nextBaseFee = misc.NextMagmaBlockBaseFee(parent.Header(), self.config.Governance.KIP71)
   523  			pending = types.FilterTransactionWithBaseFee(pending, nextBaseFee)
   524  		}
   525  	}
   526  
   527  	// TODO-Klaytn drop or missing tx
   528  	tstart := time.Now()
   529  	tstamp := tstart.Unix()
   530  	if self.nodetype == common.CONSENSUSNODE {
   531  		parentTimestamp := parent.Time().Int64()
   532  		ideal := time.Unix(parentTimestamp+params.BlockGenerationInterval, 0)
   533  		// If a timestamp of this block is faster than the ideal timestamp,
   534  		// wait for a while and get a new timestamp
   535  		if tstart.Before(ideal) {
   536  			wait := ideal.Sub(tstart)
   537  			logger.Debug("Mining too far in the future", "wait", common.PrettyDuration(wait))
   538  			time.Sleep(wait)
   539  
   540  			tstart = time.Now()    // refresh for metrics
   541  			tstamp = tstart.Unix() // refresh for block timestamp
   542  		} else if tstart.After(ideal) {
   543  			logger.Info("Mining start for new block is later than expected",
   544  				"nextBlockNum", nextBlockNum,
   545  				"delay", tstart.Sub(ideal),
   546  				"parentBlockTimestamp", parentTimestamp,
   547  				"nextBlockTimestamp", tstamp,
   548  			)
   549  		}
   550  	}
   551  
   552  	header := &types.Header{
   553  		ParentHash: parent.Hash(),
   554  		Number:     nextBlockNum,
   555  		Extra:      self.extra,
   556  		Time:       big.NewInt(tstamp),
   557  	}
   558  	if self.config.IsMagmaForkEnabled(nextBlockNum) {
   559  		header.BaseFee = nextBaseFee
   560  	}
   561  	if err := self.engine.Prepare(self.chain, header); err != nil {
   562  		logger.Error("Failed to prepare header for mining", "err", err)
   563  		return
   564  	}
   565  	// Could potentially happen if starting to mine in an odd state.
   566  	err = self.makeCurrent(parent, header)
   567  	if err != nil {
   568  		logger.Error("Failed to create mining context", "err", err)
   569  		return
   570  	}
   571  
   572  	// Obtain current work's state lock after we receive new work assignment.
   573  	self.current.stateMu.Lock()
   574  	defer self.current.stateMu.Unlock()
   575  
   576  	// Create the current work task
   577  	work := self.current
   578  	if self.nodetype == common.CONSENSUSNODE {
   579  		txs := types.NewTransactionsByTimeAndNonce(self.current.signer, pending)
   580  		work.commitTransactions(self.mux, txs, self.chain, self.rewardbase)
   581  		finishedCommitTx := time.Now()
   582  
   583  		// Create the new block to seal with the consensus engine
   584  		if work.Block, err = self.engine.Finalize(self.chain, header, work.state, work.txs, work.receipts); err != nil {
   585  			logger.Error("Failed to finalize block for sealing", "err", err)
   586  			return
   587  		}
   588  		finishedFinalize := time.Now()
   589  
   590  		// We only care about logging if we're actually mining.
   591  		if atomic.LoadInt32(&self.mining) == 1 {
   592  			// Update the metrics subsystem with all the measurements
   593  			accountReadTimer.Update(work.state.AccountReads)
   594  			accountHashTimer.Update(work.state.AccountHashes)
   595  			accountUpdateTimer.Update(work.state.AccountUpdates)
   596  			accountCommitTimer.Update(work.state.AccountCommits)
   597  
   598  			storageReadTimer.Update(work.state.StorageReads)
   599  			storageHashTimer.Update(work.state.StorageHashes)
   600  			storageUpdateTimer.Update(work.state.StorageUpdates)
   601  			storageCommitTimer.Update(work.state.StorageCommits)
   602  
   603  			snapshotAccountReadTimer.Update(work.state.SnapshotAccountReads)
   604  			snapshotStorageReadTimer.Update(work.state.SnapshotStorageReads)
   605  			snapshotCommitTimer.Update(work.state.SnapshotCommits)
   606  
   607  			calcDeferredRewardTimer.Update(reward.CalcDeferredRewardTimer)
   608  
   609  			trieAccess := work.state.AccountReads + work.state.AccountHashes + work.state.AccountUpdates + work.state.AccountCommits
   610  			trieAccess += work.state.StorageReads + work.state.StorageHashes + work.state.StorageUpdates + work.state.StorageCommits
   611  
   612  			tCountGauge.Update(int64(work.tcount))
   613  			blockMiningTime := time.Since(tstart)
   614  			commitTxTime := finishedCommitTx.Sub(tstart)
   615  			finalizeTime := finishedFinalize.Sub(finishedCommitTx)
   616  
   617  			if header.BaseFee != nil {
   618  				blockBaseFee.Update(header.BaseFee.Int64() / int64(params.Ston))
   619  			}
   620  			blockMiningTimer.Update(blockMiningTime)
   621  			blockMiningCommitTxTimer.Update(commitTxTime)
   622  			blockMiningExecuteTxTimer.Update(commitTxTime - trieAccess)
   623  			blockMiningFinalizeTimer.Update(finalizeTime)
   624  			logger.Info("Commit new mining work",
   625  				"number", work.Block.Number(), "hash", work.Block.Hash(),
   626  				"txs", work.tcount, "elapsed", common.PrettyDuration(blockMiningTime),
   627  				"commitTime", common.PrettyDuration(commitTxTime), "finalizeTime", common.PrettyDuration(finalizeTime))
   628  		}
   629  	}
   630  
   631  	self.push(work)
   632  	self.updateSnapshot()
   633  }
   634  
   635  func (self *worker) updateSnapshot() {
   636  	self.snapshotMu.Lock()
   637  	defer self.snapshotMu.Unlock()
   638  
   639  	self.snapshotBlock = types.NewBlock(
   640  		self.current.header,
   641  		self.current.txs,
   642  		self.current.receipts,
   643  	)
   644  	self.snapshotState = self.current.state.Copy()
   645  }
   646  
   647  func (env *Task) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByTimeAndNonce, bc BlockChain, rewardbase common.Address) {
   648  	coalescedLogs := env.ApplyTransactions(txs, bc, rewardbase)
   649  
   650  	if len(coalescedLogs) > 0 || env.tcount > 0 {
   651  		// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
   652  		// logs by filling in the block hash when the block was mined by the local miner. This can
   653  		// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
   654  		cpy := make([]*types.Log, len(coalescedLogs))
   655  		for i, l := range coalescedLogs {
   656  			cpy[i] = new(types.Log)
   657  			*cpy[i] = *l
   658  		}
   659  		go func(logs []*types.Log, tcount int) {
   660  			if len(logs) > 0 {
   661  				mux.Post(blockchain.PendingLogsEvent{Logs: logs})
   662  			}
   663  			if tcount > 0 {
   664  				mux.Post(blockchain.PendingStateEvent{})
   665  			}
   666  		}(cpy, env.tcount)
   667  	}
   668  }
   669  
   670  func (env *Task) ApplyTransactions(txs *types.TransactionsByTimeAndNonce, bc BlockChain, rewardbase common.Address) []*types.Log {
   671  	var coalescedLogs []*types.Log
   672  
   673  	// Limit the execution time of all transactions in a block
   674  	var abort int32 = 0       // To break the below commitTransaction for loop when timed out
   675  	chDone := make(chan bool) // To stop the goroutine below when processing txs is completed
   676  
   677  	// chEVM is used to notify the below goroutine of the running EVM so it can call evm.Cancel
   678  	// when timed out.  We use a buffered channel to prevent the main EVM execution routine
   679  	// from being blocked due to the channel communication.
   680  	chEVM := make(chan *vm.EVM, 1)
   681  
   682  	go func() {
   683  		blockTimer := time.NewTimer(params.BlockGenerationTimeLimit)
   684  		timeout := false
   685  		var evm *vm.EVM
   686  
   687  		for {
   688  			select {
   689  			case <-blockTimer.C:
   690  				timeout = true
   691  				atomic.StoreInt32(&abort, 1)
   692  
   693  			case <-chDone:
   694  				// Everything is done. Stop this goroutine.
   695  				return
   696  
   697  			case evm = <-chEVM:
   698  			}
   699  
   700  			if timeout && evm != nil {
   701  				// Allow the first transaction to complete although it exceeds the time limit.
   702  				if env.tcount > 0 {
   703  					// The total time limit reached, thus we stop the currently running EVM.
   704  					evm.Cancel(vm.CancelByTotalTimeLimit)
   705  				}
   706  				evm = nil
   707  			}
   708  		}
   709  	}()
   710  
   711  	vmConfig := &vm.Config{
   712  		RunningEVM: chEVM,
   713  	}
   714  
   715  	var numTxsChecked int64 = 0
   716  	var numTxsNonceTooLow int64 = 0
   717  	var numTxsNonceTooHigh int64 = 0
   718  	var numTxsGasLimitReached int64 = 0
   719  CommitTransactionLoop:
   720  	for atomic.LoadInt32(&abort) == 0 {
   721  		// Retrieve the next transaction and abort if all done
   722  		tx := txs.Peek()
   723  		if tx == nil {
   724  			// To indicate that it does not have enough transactions for params.BlockGenerationTimeLimit.
   725  			if numTxsChecked > 0 {
   726  				usedAllTxsCounter.Inc(1)
   727  			}
   728  			break
   729  		}
   730  		numTxsChecked++
   731  		// Error may be ignored here. The error has already been checked
   732  		// during transaction acceptance is the transaction pool.
   733  		//
   734  		// We use the eip155 signer regardless of the current hf.
   735  		from, _ := types.Sender(env.signer, tx)
   736  
   737  		// NOTE-Klaytn Since Klaytn is always in EIP155, the below replay protection code is not needed.
   738  		// TODO-Klaytn-RemoveLater Remove the code commented below.
   739  		// Check whether the tx is replay protected. If we're not in the EIP155 hf
   740  		// phase, start ignoring the sender until we do.
   741  		//if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
   742  		//	logger.Trace("Ignoring reply protected transaction", "hash", tx.Hash())
   743  		//	//logger.Error("#### worker.commitTransaction","tx.protected",tx.Protected(),"tx.hash",tx.Hash(),"nonce",tx.Nonce(),"to",tx.To())
   744  		//	txs.Pop()
   745  		//	continue
   746  		//}
   747  		// Start executing the transaction
   748  		env.state.SetTxContext(tx.Hash(), common.Hash{}, env.tcount)
   749  
   750  		err, logs := env.commitTransaction(tx, bc, rewardbase, vmConfig)
   751  		switch err {
   752  		case blockchain.ErrGasLimitReached:
   753  			// Pop the current out-of-gas transaction without shifting in the next from the account
   754  			logger.Trace("Gas limit exceeded for current block", "sender", from)
   755  			numTxsGasLimitReached++
   756  			txs.Pop()
   757  
   758  		case blockchain.ErrNonceTooLow:
   759  			// New head notification data race between the transaction pool and miner, shift
   760  			logger.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
   761  			numTxsNonceTooLow++
   762  			txs.Shift()
   763  
   764  		case blockchain.ErrNonceTooHigh:
   765  			// Reorg notification data race between the transaction pool and miner, skip account =
   766  			logger.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
   767  			numTxsNonceTooHigh++
   768  			txs.Pop()
   769  
   770  		case vm.ErrTotalTimeLimitReached:
   771  			logger.Warn("Transaction aborted due to time limit", "hash", tx.Hash().String())
   772  			timeLimitReachedCounter.Inc(1)
   773  			if env.tcount == 0 {
   774  				logger.Error("A single transaction exceeds total time limit", "hash", tx.Hash().String())
   775  				tooLongTxCounter.Inc(1)
   776  			}
   777  			// NOTE-Klaytn Exit for loop immediately without checking abort variable again.
   778  			break CommitTransactionLoop
   779  
   780  		case blockchain.ErrTxTypeNotSupported:
   781  			// Pop the unsupported transaction without shifting in the next from the account
   782  			logger.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
   783  			txs.Pop()
   784  
   785  		case nil:
   786  			// Everything ok, collect the logs and shift in the next transaction from the same account
   787  			coalescedLogs = append(coalescedLogs, logs...)
   788  			env.tcount++
   789  			txs.Shift()
   790  
   791  		default:
   792  			// Strange error, discard the transaction and get the next in line (note, the
   793  			// nonce-too-high clause will prevent us from executing in vain).
   794  			logger.Warn("Transaction failed, account skipped", "sender", from, "hash", tx.Hash().String(), "err", err)
   795  			strangeErrorTxsCounter.Inc(1)
   796  			txs.Shift()
   797  		}
   798  	}
   799  
   800  	// Update the number of transactions checked and dropped during ApplyTransactions.
   801  	checkedTxsGauge.Update(numTxsChecked)
   802  	nonceTooLowTxsGauge.Update(numTxsNonceTooLow)
   803  	nonceTooHighTxsGauge.Update(numTxsNonceTooHigh)
   804  	gasLimitReachedTxsGauge.Update(numTxsGasLimitReached)
   805  
   806  	// Stop the goroutine that has been handling the timer.
   807  	chDone <- true
   808  
   809  	return coalescedLogs
   810  }
   811  
   812  func (env *Task) commitTransaction(tx *types.Transaction, bc BlockChain, rewardbase common.Address, vmConfig *vm.Config) (error, []*types.Log) {
   813  	snap := env.state.Snapshot()
   814  
   815  	receipt, _, err := bc.ApplyTransaction(env.config, &rewardbase, env.state, env.header, tx, &env.header.GasUsed, vmConfig)
   816  	if err != nil {
   817  		if err != vm.ErrInsufficientBalance && err != vm.ErrTotalTimeLimitReached {
   818  			tx.MarkUnexecutable(true)
   819  		}
   820  		env.state.RevertToSnapshot(snap)
   821  		return err, nil
   822  	}
   823  	env.txs = append(env.txs, tx)
   824  	env.receipts = append(env.receipts, receipt)
   825  
   826  	return nil, receipt.Logs
   827  }
   828  
   829  func NewTask(config *params.ChainConfig, signer types.Signer, statedb *state.StateDB, header *types.Header) *Task {
   830  	return &Task{
   831  		config:    config,
   832  		signer:    signer,
   833  		state:     statedb,
   834  		header:    header,
   835  		createdAt: time.Now(),
   836  	}
   837  }
   838  
   839  func (env *Task) Transactions() []*types.Transaction { return env.txs }
   840  func (env *Task) Receipts() []*types.Receipt         { return env.receipts }