github.com/klaytn/klaytn@v1.10.2/blockchain/tx_pool.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2014 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from core/tx_pool.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package blockchain
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"math"
    27  	"math/big"
    28  	"sort"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/klaytn/klaytn/blockchain/state"
    33  	"github.com/klaytn/klaytn/blockchain/types"
    34  	"github.com/klaytn/klaytn/common"
    35  	"github.com/klaytn/klaytn/common/prque"
    36  	"github.com/klaytn/klaytn/consensus/misc"
    37  	"github.com/klaytn/klaytn/event"
    38  	"github.com/klaytn/klaytn/kerrors"
    39  	"github.com/klaytn/klaytn/params"
    40  	"github.com/rcrowley/go-metrics"
    41  )
    42  
    43  const (
    44  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    45  	chainHeadChanSize = 10
    46  
    47  	// txSlotSize is used to calculate how many data slots a single transaction
    48  	// takes up based on its size. The slots are used as DoS protection, ensuring
    49  	// that validating a new transaction remains a constant operation (in reality
    50  	// O(maxslots), where max slots are 4 currently).
    51  	txSlotSize = 32 * 1024
    52  
    53  	// MaxTxDataSize is the maximum size a single transaction can have. This field has
    54  	// non-trivial consequences: larger transactions are significantly harder and
    55  	// more expensive to propagate; larger transactions also take more resources
    56  	// to validate whether they fit into the pool or not.
    57  	// TODO-klaytn: Change the name to clarify what it means. It means the max length of the transaction.
    58  	MaxTxDataSize = 4 * txSlotSize // 128KB
    59  
    60  	// demoteUnexecutablesFullValidationTxLimit is the number of txs will be fully validated in demoteUnexecutables.
    61  	demoteUnexecutablesFullValidationTxLimit = 1000
    62  	// txMsgCh is the number of list of transactions can be queued.
    63  	txMsgChSize = 100
    64  )
    65  
    66  var (
    67  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    68  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    69  
    70  	txPoolIsFullErr = fmt.Errorf("txpool is full")
    71  
    72  	errNotAllowedAnchoringTx = errors.New("locally anchoring chaindata tx is not allowed in this node")
    73  )
    74  
    75  var (
    76  	// Metrics for the pending pool
    77  	pendingDiscardCounter   = metrics.NewRegisteredCounter("txpool/pending/discard", nil)
    78  	pendingReplaceCounter   = metrics.NewRegisteredCounter("txpool/pending/replace", nil)
    79  	pendingRateLimitCounter = metrics.NewRegisteredCounter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
    80  	pendingNofundsCounter   = metrics.NewRegisteredCounter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
    81  
    82  	// Metrics for the queued pool
    83  	queuedDiscardCounter   = metrics.NewRegisteredCounter("txpool/queued/discard", nil)
    84  	queuedReplaceCounter   = metrics.NewRegisteredCounter("txpool/queued/replace", nil)
    85  	queuedRateLimitCounter = metrics.NewRegisteredCounter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
    86  	queuedNofundsCounter   = metrics.NewRegisteredCounter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
    87  
    88  	// General tx metrics
    89  	invalidTxCounter     = metrics.NewRegisteredCounter("txpool/invalid", nil)
    90  	underpricedTxCounter = metrics.NewRegisteredCounter("txpool/underpriced", nil)
    91  	refusedTxCounter     = metrics.NewRegisteredCounter("txpool/refuse", nil)
    92  	slotsGauge           = metrics.NewRegisteredGauge("txpool/slots", nil)
    93  )
    94  
    95  // TxStatus is the current status of a transaction as seen by the pool.
    96  type TxStatus uint
    97  
    98  const (
    99  	TxStatusUnknown TxStatus = iota
   100  	TxStatusQueued
   101  	TxStatusPending
   102  	// for Les
   103  	TxStatusIncluded
   104  )
   105  
   106  // blockChain provides the state of blockchain and current gas limit to do
   107  // some pre checks in tx pool and event subscribers.
   108  type blockChain interface {
   109  	CurrentBlock() *types.Block
   110  	GetBlock(hash common.Hash, number uint64) *types.Block
   111  	StateAt(root common.Hash) (*state.StateDB, error)
   112  
   113  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   114  }
   115  
   116  // TxPoolConfig are the configuration parameters of the transaction pool.
   117  type TxPoolConfig struct {
   118  	NoLocals           bool          // Whether local transaction handling should be disabled
   119  	AllowLocalAnchorTx bool          // if this is true, the txpool allow locally submitted anchor transactions
   120  	DenyRemoteTx       bool          // Denies remote transactions receiving from other peers
   121  	Journal            string        // Journal of local transactions to survive node restarts
   122  	JournalInterval    time.Duration // Time interval to regenerate the local transaction journal
   123  
   124  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   125  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   126  
   127  	ExecSlotsAccount    uint64 // Number of executable transaction slots guaranteed per account
   128  	ExecSlotsAll        uint64 // Maximum number of executable transaction slots for all accounts
   129  	NonExecSlotsAccount uint64 // Maximum number of non-executable transaction slots permitted per account
   130  	NonExecSlotsAll     uint64 // Maximum number of non-executable transaction slots for all accounts
   131  
   132  	KeepLocals bool          // Disables removing timed-out local transactions
   133  	Lifetime   time.Duration // Maximum amount of time non-executable transaction are queued
   134  
   135  	NoAccountCreation            bool // Whether account creation transactions should be disabled
   136  	EnableSpamThrottlerAtRuntime bool // Enable txpool spam throttler at runtime
   137  }
   138  
   139  // DefaultTxPoolConfig contains the default configurations for the transaction
   140  // pool.
   141  var DefaultTxPoolConfig = TxPoolConfig{
   142  	Journal:         "transactions.rlp",
   143  	JournalInterval: time.Hour,
   144  
   145  	PriceLimit: 1,
   146  	PriceBump:  10,
   147  
   148  	ExecSlotsAccount:    16,
   149  	ExecSlotsAll:        4096,
   150  	NonExecSlotsAccount: 64,
   151  	NonExecSlotsAll:     1024,
   152  
   153  	KeepLocals: false,
   154  	Lifetime:   5 * time.Minute,
   155  }
   156  
   157  // sanitize checks the provided user configurations and changes anything that's
   158  // unreasonable or unworkable.
   159  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   160  	conf := *config
   161  	if conf.JournalInterval < time.Second {
   162  		logger.Error("Sanitizing invalid txpool journal time", "provided", conf.JournalInterval, "updated", time.Second)
   163  		conf.JournalInterval = time.Second
   164  	}
   165  	if conf.PriceLimit < 1 {
   166  		logger.Error("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   167  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   168  	}
   169  	if conf.PriceBump < 1 {
   170  		logger.Error("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   171  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   172  	}
   173  	return conf
   174  }
   175  
   176  // TxPool contains all currently known transactions. Transactions
   177  // enter the pool when they are received from the network or submitted
   178  // locally. They exit the pool when they are included in the blockchain.
   179  //
   180  // The pool separates processable transactions (which can be applied to the
   181  // current state) and future transactions. Transactions move between those
   182  // two states over time as they are received and processed.
   183  type TxPool struct {
   184  	config       TxPoolConfig
   185  	chainconfig  *params.ChainConfig
   186  	chain        blockChain
   187  	gasPrice     *big.Int
   188  	txFeed       event.Feed
   189  	scope        event.SubscriptionScope
   190  	chainHeadCh  chan ChainHeadEvent
   191  	chainHeadSub event.Subscription
   192  	signer       types.Signer
   193  	mu           sync.RWMutex
   194  
   195  	currentBlockNumber uint64                    // Current block number
   196  	currentState       *state.StateDB            // Current state in the blockchain head
   197  	pendingNonce       map[common.Address]uint64 // Pending nonce tracking virtual nonces
   198  
   199  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   200  	journal *txJournal  // Journal of local transaction to back up to disk
   201  
   202  	// TODO-Klaytn
   203  	txMu sync.RWMutex
   204  
   205  	pending map[common.Address]*txList   // All currently processable transactions
   206  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   207  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   208  	all     *txLookup                    // All transactions to allow lookups
   209  	priced  *txPricedList                // All transactions sorted by price
   210  
   211  	wg sync.WaitGroup // for shutdown sync
   212  
   213  	txMsgCh chan types.Transactions
   214  
   215  	eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions.
   216  	eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions.
   217  	magma   bool // Fork indicator whether we are using Magma type transactions.
   218  }
   219  
   220  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   221  // transactions from the network.
   222  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   223  	// Sanitize the input to ensure no vulnerable gas prices are set
   224  	config = (&config).sanitize()
   225  
   226  	// Create the transaction pool with its initial settings
   227  	pool := &TxPool{
   228  		config:       config,
   229  		chainconfig:  chainconfig,
   230  		chain:        chain,
   231  		signer:       types.LatestSignerForChainID(chainconfig.ChainID),
   232  		pending:      make(map[common.Address]*txList),
   233  		queue:        make(map[common.Address]*txList),
   234  		beats:        make(map[common.Address]time.Time),
   235  		all:          newTxLookup(),
   236  		pendingNonce: make(map[common.Address]uint64),
   237  		chainHeadCh:  make(chan ChainHeadEvent, chainHeadChanSize),
   238  		gasPrice:     new(big.Int).SetUint64(chainconfig.UnitPrice),
   239  		txMsgCh:      make(chan types.Transactions, txMsgChSize),
   240  	}
   241  	pool.locals = newAccountSet(pool.signer)
   242  	pool.priced = newTxPricedList(pool.all)
   243  	pool.reset(nil, chain.CurrentBlock().Header())
   244  
   245  	// If local transactions and journaling is enabled, load from disk
   246  	if !config.NoLocals && config.Journal != "" {
   247  		pool.journal = newTxJournal(config.Journal)
   248  
   249  		if err := pool.journal.load(pool.AddLocals); err != nil {
   250  			logger.Error("Failed to load transaction journal", "err", err)
   251  		}
   252  		if err := pool.journal.rotate(pool.local(), pool.signer); err != nil {
   253  			logger.Error("Failed to rotate transaction journal", "err", err)
   254  		}
   255  	}
   256  	// Subscribe events from blockchain
   257  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   258  
   259  	// Start the event loop and return
   260  	pool.wg.Add(2)
   261  	go pool.loop()
   262  	go pool.handleTxMsg()
   263  
   264  	if config.EnableSpamThrottlerAtRuntime {
   265  		if err := pool.StartSpamThrottler(DefaultSpamThrottlerConfig); err != nil {
   266  			logger.Error("Failed to start spam throttler", "err", err)
   267  		}
   268  	}
   269  
   270  	return pool
   271  }
   272  
   273  // loop is the transaction pool's main event loop, waiting for and reacting to
   274  // outside blockchain events as well as for various reporting and transaction
   275  // eviction events.
   276  func (pool *TxPool) loop() {
   277  	defer pool.wg.Done()
   278  
   279  	// Start the stats reporting and transaction eviction tickers
   280  	var prevPending, prevQueued, prevStales int
   281  
   282  	report := time.NewTicker(statsReportInterval)
   283  	defer report.Stop()
   284  
   285  	evict := time.NewTicker(evictionInterval)
   286  	defer evict.Stop()
   287  
   288  	journal := time.NewTicker(pool.config.JournalInterval)
   289  	defer journal.Stop()
   290  
   291  	// Track the previous head headers for transaction reorgs
   292  	head := pool.chain.CurrentBlock()
   293  
   294  	// Keep waiting for and reacting to the various events
   295  	for {
   296  		select {
   297  		// Handle ChainHeadEvent
   298  		case ev := <-pool.chainHeadCh:
   299  			if ev.Block != nil {
   300  				pool.mu.Lock()
   301  				currBlock := pool.chain.CurrentBlock()
   302  				if ev.Block.Root() != currBlock.Root() {
   303  					pool.mu.Unlock()
   304  					logger.Debug("block from ChainHeadEvent is different from the CurrentBlock",
   305  						"receivedNum", ev.Block.NumberU64(), "receivedHash", ev.Block.Hash().String(),
   306  						"currNum", currBlock.NumberU64(), "currHash", currBlock.Hash().String())
   307  					continue
   308  				}
   309  				pool.reset(head.Header(), ev.Block.Header())
   310  				head = ev.Block
   311  				pool.mu.Unlock()
   312  			}
   313  		// Be unsubscribed due to system stopped
   314  		case <-pool.chainHeadSub.Err():
   315  			return
   316  
   317  		// Handle stats reporting ticks
   318  		case <-report.C:
   319  			pool.mu.RLock()
   320  			pending, queued := pool.stats()
   321  			stales := pool.priced.stales
   322  			pool.mu.RUnlock()
   323  
   324  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   325  				logger.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   326  				prevPending, prevQueued, prevStales = pending, queued, stales
   327  				txPoolPendingGauge.Update(int64(pending))
   328  				txPoolQueueGauge.Update(int64(queued))
   329  			}
   330  
   331  		// Handle inactive account transaction eviction
   332  		case <-evict.C:
   333  			pool.mu.Lock()
   334  			for addr, beat := range pool.beats {
   335  				// Skip local transactions from the eviction mechanism
   336  				if pool.config.KeepLocals && pool.locals.contains(addr) {
   337  					delete(pool.beats, addr)
   338  					continue
   339  				}
   340  
   341  				// Any non-locals old enough should be removed
   342  				if time.Since(beat) > pool.config.Lifetime {
   343  					if pool.queue[addr] != nil {
   344  						for _, tx := range pool.queue[addr].Flatten() {
   345  							pool.removeTx(tx.Hash(), true)
   346  						}
   347  					}
   348  					delete(pool.beats, addr)
   349  				}
   350  			}
   351  			pool.mu.Unlock()
   352  
   353  		// Handle local transaction journal rotation
   354  		case <-journal.C:
   355  			if pool.journal != nil {
   356  				pool.mu.Lock()
   357  				if err := pool.journal.rotate(pool.local(), pool.signer); err != nil {
   358  					logger.Error("Failed to rotate local tx journal", "err", err)
   359  				}
   360  				pool.mu.Unlock()
   361  			}
   362  		}
   363  	}
   364  }
   365  
   366  // lockedReset is a wrapper around reset to allow calling it in a thread safe
   367  // manner. This method is only ever used in the tester!
   368  func (pool *TxPool) lockedReset(oldHead, newHead *types.Header) {
   369  	pool.mu.Lock()
   370  	defer pool.mu.Unlock()
   371  
   372  	pool.reset(oldHead, newHead)
   373  }
   374  
   375  // reset retrieves the current state of the blockchain and ensures the content
   376  // of the transaction pool is valid with regard to the chain state.
   377  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
   378  	// If we're reorging an old state, reinject all dropped transactions
   379  	var reinject types.Transactions
   380  
   381  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
   382  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
   383  		oldNum := oldHead.Number.Uint64()
   384  		newNum := newHead.Number.Uint64()
   385  
   386  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
   387  			logger.Debug("Skipping deep transaction reorg", "depth", depth)
   388  		} else {
   389  			// Reorg seems shallow enough to pull in all transactions into memory
   390  			var discarded, included types.Transactions
   391  
   392  			var (
   393  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
   394  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
   395  			)
   396  			if rem == nil {
   397  				// This can happen if a setHead function is performed.
   398  				// In this case we can simply discard the old head from the chain, and replace with newhead.
   399  
   400  				// If newNum >= oldNum, then it's not a case of setHead.
   401  				if newNum >= oldNum {
   402  					logger.Error("Transaction pool reset with missing oldhead",
   403  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   404  					return
   405  				} else {
   406  					// When setHead is performed, then oldHead becomes bigger than newHead, since newHead becomes rewinded blockNumber.
   407  					// If that is the case, we don't have the lost transactions anymore, and
   408  					// there's nothing to add
   409  					logger.Warn("Skipping transaction reset caused by setHead",
   410  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   411  				}
   412  			} else {
   413  				for rem.NumberU64() > add.NumberU64() {
   414  					discarded = append(discarded, rem.Transactions()...)
   415  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   416  						logger.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
   417  						return
   418  					}
   419  				}
   420  				for add.NumberU64() > rem.NumberU64() {
   421  					included = append(included, add.Transactions()...)
   422  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   423  						logger.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
   424  						return
   425  					}
   426  				}
   427  				for rem.Hash() != add.Hash() {
   428  					discarded = append(discarded, rem.Transactions()...)
   429  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   430  						logger.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
   431  						return
   432  					}
   433  					included = append(included, add.Transactions()...)
   434  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   435  						logger.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
   436  						return
   437  					}
   438  				}
   439  				reinject = types.TxDifference(discarded, included)
   440  			}
   441  		}
   442  	}
   443  	// Initialize the internal state to the current head
   444  	if newHead == nil {
   445  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
   446  	}
   447  	stateDB, err := pool.chain.StateAt(newHead.Root)
   448  	if err != nil {
   449  		logger.Error("Failed to reset txpool state", "err", err)
   450  		return
   451  	}
   452  	pool.currentState = stateDB
   453  	pool.pendingNonce = make(map[common.Address]uint64)
   454  	pool.currentBlockNumber = newHead.Number.Uint64()
   455  
   456  	// Inject any transactions discarded due to reorgs
   457  	logger.Debug("Reinjecting stale transactions", "count", len(reinject))
   458  	senderCacher.recover(pool.signer, reinject)
   459  
   460  	// pool.mu.Lock()
   461  	// defer pool.mu.Unlock()
   462  
   463  	pool.addTxsLocked(reinject, false)
   464  
   465  	// validate the pool of pending transactions, this will remove
   466  	// any transactions that have been included in the block or
   467  	// have been invalidated because of another transaction (e.g.
   468  	// higher gas price)
   469  	pool.demoteUnexecutables()
   470  
   471  	pool.txMu.Lock()
   472  	// Update all accounts to the latest known pending nonce
   473  	for addr, list := range pool.pending {
   474  		txs := list.Flatten()
   475  		if len(txs) > 0 {
   476  			// Heavy but will be cached and is needed by the miner anyway
   477  			pool.setPendingNonce(addr, txs[len(txs)-1].Nonce()+1)
   478  		}
   479  	}
   480  	pool.txMu.Unlock()
   481  	// Check the queue and move transactions over to the pending if possible
   482  	// or remove those that have become invalid
   483  	pool.promoteExecutables(nil)
   484  
   485  	// Update all fork indicator by next pending block number.
   486  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
   487  
   488  	// Enable Ethereum tx type transactions
   489  	pool.eip2718 = pool.chainconfig.IsEthTxTypeForkEnabled(next)
   490  	pool.eip1559 = pool.chainconfig.IsEthTxTypeForkEnabled(next)
   491  	// Enable dynamic base fee
   492  	pool.magma = pool.chainconfig.IsMagmaForkEnabled(next)
   493  
   494  	// It need to update gas price of tx pool after magma hardfork
   495  	if pool.magma {
   496  		pool.gasPrice = misc.NextMagmaBlockBaseFee(newHead, pool.chainconfig.Governance.KIP71)
   497  	}
   498  }
   499  
   500  // Stop terminates the transaction pool.
   501  func (pool *TxPool) Stop() {
   502  	// Unsubscribe all subscriptions registered from txpool
   503  	pool.scope.Close()
   504  
   505  	// Unsubscribe subscriptions registered from blockchain
   506  	pool.chainHeadSub.Unsubscribe()
   507  	pool.wg.Wait()
   508  
   509  	if pool.journal != nil {
   510  		pool.journal.close()
   511  	}
   512  
   513  	pool.StopSpamThrottler()
   514  	logger.Info("Transaction pool stopped")
   515  }
   516  
   517  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   518  // starts sending event to the given channel.
   519  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   520  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   521  }
   522  
   523  // GasPrice returns the current gas price enforced by the transaction pool.
   524  func (pool *TxPool) GasPrice() *big.Int {
   525  	pool.mu.RLock()
   526  	defer pool.mu.RUnlock()
   527  
   528  	return new(big.Int).Set(pool.gasPrice)
   529  }
   530  
   531  // SetGasPrice updates the gas price of the transaction pool for new transactions, and drops all old transactions.
   532  func (pool *TxPool) SetGasPrice(price *big.Int) {
   533  	if pool.magma {
   534  		logger.Info("Ignoring SetGasPrice after Magma fork")
   535  		return
   536  	}
   537  	if pool.gasPrice.Cmp(price) != 0 {
   538  		pool.mu.Lock()
   539  
   540  		logger.Info("TxPool.SetGasPrice", "before", pool.gasPrice, "after", price)
   541  
   542  		pool.gasPrice = price
   543  		pool.pending = make(map[common.Address]*txList)
   544  		pool.queue = make(map[common.Address]*txList)
   545  		pool.beats = make(map[common.Address]time.Time)
   546  		pool.all = newTxLookup()
   547  		pool.pendingNonce = make(map[common.Address]uint64)
   548  		pool.locals = newAccountSet(pool.signer)
   549  		pool.priced = newTxPricedList(pool.all)
   550  
   551  		pool.mu.Unlock()
   552  	}
   553  }
   554  
   555  // Stats retrieves the current pool stats, namely the number of pending and the
   556  // number of queued (non-executable) transactions.
   557  func (pool *TxPool) Stats() (int, int) {
   558  	pool.mu.RLock()
   559  	defer pool.mu.RUnlock()
   560  
   561  	return pool.stats()
   562  }
   563  
   564  // stats retrieves the current pool stats, namely the number of pending and the
   565  // number of queued (non-executable) transactions.
   566  func (pool *TxPool) stats() (int, int) {
   567  	pending := 0
   568  	for _, list := range pool.pending {
   569  		pending += list.Len()
   570  	}
   571  	queued := 0
   572  	for _, list := range pool.queue {
   573  		queued += list.Len()
   574  	}
   575  	return pending, queued
   576  }
   577  
   578  // Content retrieves the data content of the transaction pool, returning all the
   579  // pending as well as queued transactions, grouped by account and sorted by nonce.
   580  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   581  	pool.mu.Lock()
   582  	defer pool.mu.Unlock()
   583  	pool.txMu.Lock()
   584  	defer pool.txMu.Unlock()
   585  
   586  	pending := make(map[common.Address]types.Transactions)
   587  	for addr, list := range pool.pending {
   588  		pending[addr] = list.Flatten()
   589  	}
   590  	queued := make(map[common.Address]types.Transactions)
   591  	for addr, list := range pool.queue {
   592  		queued[addr] = list.Flatten()
   593  	}
   594  	return pending, queued
   595  }
   596  
   597  // Pending retrieves all currently processable transactions, groupped by origin
   598  // account and sorted by nonce. The returned transaction set is a copy and can be
   599  // freely modified by calling code.
   600  func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
   601  	pool.mu.Lock()
   602  	defer pool.mu.Unlock()
   603  	pool.txMu.Lock()
   604  	defer pool.txMu.Unlock()
   605  
   606  	pending := make(map[common.Address]types.Transactions)
   607  	for addr, list := range pool.pending {
   608  		pending[addr] = list.Flatten()
   609  	}
   610  	return pending, nil
   611  }
   612  
   613  // CachedPendingTxsByCount retrieves about number of currently processable transactions
   614  // by requested count, grouped by origin account and sorted by nonce.
   615  func (pool *TxPool) CachedPendingTxsByCount(count int) types.Transactions {
   616  	if count <= 0 {
   617  		return nil
   618  	}
   619  
   620  	// It retrieves the half of the requested transaction recursively for returned
   621  	// transactions much as possible.
   622  	txPerAddr := count / 2
   623  	if txPerAddr == 0 {
   624  		txPerAddr = 1
   625  	}
   626  
   627  	pending := make(types.Transactions, 0, count)
   628  
   629  	pool.mu.Lock()
   630  	defer pool.mu.Unlock()
   631  	pool.txMu.Lock()
   632  	defer pool.txMu.Unlock()
   633  
   634  	if len(pool.pending) == 0 {
   635  		return nil
   636  	}
   637  
   638  	for _, list := range pool.pending {
   639  		pendingPerAccount := list.CachedTxsFlattenByCount(txPerAddr)
   640  
   641  		pending = append(pending, pendingPerAccount...)
   642  		if len(pending) >= count {
   643  			break
   644  		}
   645  
   646  		if len(pendingPerAccount) >= txPerAddr {
   647  			if txPerAddr > 1 {
   648  				txPerAddr = txPerAddr / 2
   649  			}
   650  		}
   651  	}
   652  	return pending
   653  }
   654  
   655  // local retrieves all currently known local transactions, groupped by origin
   656  // account and sorted by nonce. The returned transaction set is a copy and can be
   657  // freely modified by calling code.
   658  func (pool *TxPool) local() map[common.Address]types.Transactions {
   659  	txs := make(map[common.Address]types.Transactions)
   660  	for addr := range pool.locals.accounts {
   661  		if pending := pool.pending[addr]; pending != nil {
   662  			txs[addr] = append(txs[addr], pending.Flatten()...)
   663  		}
   664  		if queued := pool.queue[addr]; queued != nil {
   665  			txs[addr] = append(txs[addr], queued.Flatten()...)
   666  		}
   667  	}
   668  	return txs
   669  }
   670  
   671  // validateTx checks whether a transaction is valid according to the consensus
   672  // rules and adheres to some heuristic limits of the local node (price and size).
   673  func (pool *TxPool) validateTx(tx *types.Transaction) error {
   674  	// Accept only legacy transactions until EIP-2718/2930 activates.
   675  	if !pool.eip2718 && tx.IsEthTypedTransaction() {
   676  		return ErrTxTypeNotSupported
   677  	}
   678  	// Reject dynamic fee transactions until EIP-1559 activates.
   679  	if !pool.eip1559 && tx.Type() == types.TxTypeEthereumDynamicFee {
   680  		return ErrTxTypeNotSupported
   681  	}
   682  
   683  	gasFeePayer := uint64(0)
   684  
   685  	// Check chain Id first.
   686  	if tx.Protected() && tx.ChainId().Cmp(pool.chainconfig.ChainID) != 0 {
   687  		return ErrInvalidChainId
   688  	}
   689  
   690  	// NOTE-Klaytn Drop transactions with unexpected gasPrice
   691  	// If the transaction type is DynamicFee tx, Compare transaction's GasFeeCap(MaxFeePerGas) and GasTipCap with tx pool's gasPrice to check to have same value.
   692  	if tx.Type() == types.TxTypeEthereumDynamicFee {
   693  		// Sanity check for extremely large numbers
   694  		if tx.GasTipCap().BitLen() > 256 {
   695  			return ErrTipVeryHigh
   696  		}
   697  
   698  		if tx.GasFeeCap().BitLen() > 256 {
   699  			return ErrFeeCapVeryHigh
   700  		}
   701  
   702  		// Ensure gasFeeCap is greater than or equal to gasTipCap.
   703  		if tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0 {
   704  			return ErrTipAboveFeeCap
   705  		}
   706  
   707  		if pool.magma {
   708  			// Ensure transaction's gasFeeCap is greater than or equal to transaction pool's gasPrice(baseFee).
   709  			if pool.gasPrice.Cmp(tx.GasFeeCap()) > 0 {
   710  				logger.Trace("fail to validate maxFeePerGas", "pool.gasPrice", pool.gasPrice, "maxFeePerGas", tx.GasFeeCap())
   711  				return ErrFeeCapBelowBaseFee
   712  			}
   713  		} else {
   714  
   715  			if pool.gasPrice.Cmp(tx.GasTipCap()) != 0 {
   716  				logger.Trace("fail to validate maxPriorityFeePerGas", "unitprice", pool.gasPrice, "maxPriorityFeePerGas", tx.GasFeeCap())
   717  				return ErrInvalidGasTipCap
   718  			}
   719  
   720  			if pool.gasPrice.Cmp(tx.GasFeeCap()) != 0 {
   721  				logger.Trace("fail to validate maxFeePerGas", "unitprice", pool.gasPrice, "maxFeePerGas", tx.GasTipCap())
   722  				return ErrInvalidGasFeeCap
   723  			}
   724  		}
   725  
   726  	} else {
   727  		if pool.magma {
   728  			if pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
   729  				// Ensure transaction's gasPrice is greater than or equal to transaction pool's gasPrice(baseFee).
   730  				logger.Trace("fail to validate gasprice", "pool.gasPrice", pool.gasPrice, "tx.gasPrice", tx.GasPrice())
   731  				return ErrGasPriceBelowBaseFee
   732  			}
   733  		} else {
   734  			// Unitprice policy before magma hardfork
   735  			if pool.gasPrice.Cmp(tx.GasPrice()) != 0 {
   736  				logger.Trace("fail to validate unitprice", "unitPrice", pool.gasPrice, "txUnitPrice", tx.GasPrice())
   737  				return ErrInvalidUnitPrice
   738  			}
   739  		}
   740  	}
   741  
   742  	// Reject transactions over MaxTxDataSize to prevent DOS attacks
   743  	if uint64(tx.Size()) > MaxTxDataSize {
   744  		return ErrOversizedData
   745  	}
   746  
   747  	// Transactions can't be negative. This may never happen using RLP decoded
   748  	// transactions but may occur if you create a transaction using the RPC.
   749  	if tx.Value().Sign() < 0 {
   750  		return ErrNegativeValue
   751  	}
   752  
   753  	// Make sure the transaction is signed properly
   754  	gasFrom, err := tx.ValidateSender(pool.signer, pool.currentState, pool.currentBlockNumber)
   755  	if err != nil {
   756  		return err
   757  	}
   758  	from := tx.ValidatedSender()
   759  
   760  	// Ensure the transaction adheres to nonce ordering
   761  	if pool.getNonce(from) > tx.Nonce() {
   762  		return ErrNonceTooLow
   763  	}
   764  
   765  	// Transactor should have enough funds to cover the costs
   766  	// cost == V + GP * GL
   767  	senderBalance := pool.getBalance(from)
   768  	if tx.IsFeeDelegatedTransaction() {
   769  		// balance check for fee-delegated tx
   770  		gasFeePayer, err = tx.ValidateFeePayer(pool.signer, pool.currentState, pool.currentBlockNumber)
   771  		if err != nil {
   772  			return ErrInvalidFeePayer
   773  		}
   774  		feePayer := tx.ValidatedFeePayer()
   775  		feePayerBalance := pool.getBalance(feePayer)
   776  		feeRatio, isRatioTx := tx.FeeRatio()
   777  		if isRatioTx {
   778  			// Check fee ratio range
   779  			if !feeRatio.IsValid() {
   780  				return kerrors.ErrFeeRatioOutOfRange
   781  			}
   782  
   783  			feeByFeePayer, feeBySender := types.CalcFeeWithRatio(feeRatio, tx.Fee())
   784  
   785  			if senderBalance.Cmp(new(big.Int).Add(tx.Value(), feeBySender)) < 0 {
   786  				logger.Trace("[tx_pool] insufficient funds for feeBySender", "from", from, "balance", senderBalance, "feeBySender", feeBySender)
   787  				return ErrInsufficientFundsFrom
   788  			}
   789  
   790  			if feePayerBalance.Cmp(feeByFeePayer) < 0 {
   791  				logger.Trace("[tx_pool] insufficient funds for feeByFeePayer", "feePayer", feePayer, "balance", feePayerBalance, "feeByFeePayer", feeByFeePayer)
   792  				return ErrInsufficientFundsFeePayer
   793  			}
   794  		} else {
   795  			if senderBalance.Cmp(tx.Value()) < 0 {
   796  				logger.Trace("[tx_pool] insufficient funds for cost(value)", "from", from, "balance", senderBalance, "value", tx.Value())
   797  				return ErrInsufficientFundsFrom
   798  			}
   799  
   800  			if feePayerBalance.Cmp(tx.Fee()) < 0 {
   801  				logger.Trace("[tx_pool] insufficient funds for cost(gas * price)", "feePayer", feePayer, "balance", feePayerBalance, "fee", tx.Fee())
   802  				return ErrInsufficientFundsFeePayer
   803  			}
   804  		}
   805  		// additional balance check in case of sender = feepayer
   806  		// since a single account has to bear the both cost(feepayer_cost + sender_cost),
   807  		// it is necessary to check whether the balance is equal to the sum of the cost.
   808  		if from == feePayer && senderBalance.Cmp(tx.Cost()) < 0 {
   809  			logger.Trace("[tx_pool] insufficient funds for cost(gas * price + value)", "from", from, "balance", senderBalance, "cost", tx.Cost())
   810  			return ErrInsufficientFundsFrom
   811  		}
   812  	} else {
   813  		// balance check for non-fee-delegated tx
   814  		if senderBalance.Cmp(tx.Cost()) < 0 {
   815  			logger.Trace("[tx_pool] insufficient funds for cost(gas * price + value)", "from", from, "balance", senderBalance, "cost", tx.Cost())
   816  			return ErrInsufficientFundsFrom
   817  		}
   818  	}
   819  
   820  	intrGas, err := tx.IntrinsicGas(pool.currentBlockNumber)
   821  	intrGas += gasFrom + gasFeePayer
   822  	if err != nil {
   823  		return err
   824  	}
   825  	if tx.Gas() < intrGas {
   826  		return ErrIntrinsicGas
   827  	}
   828  
   829  	// "tx.Validate()" conducts additional validation for each new txType.
   830  	// Validate humanReadable address when this tx has "true" in the humanReadable field.
   831  	// Validate accountKey when the this create or update an account
   832  	// Validate the existence of the address which will be created though this Tx
   833  	// Validate a contract account whether it is executable
   834  	if err := tx.Validate(pool.currentState, pool.currentBlockNumber); err != nil {
   835  		return err
   836  	}
   837  
   838  	return nil
   839  }
   840  
   841  // getMaxTxFromQueueWhenNonceIsMissing finds and returns a trasaction with max nonce in queue when a given Tx has missing nonce.
   842  // Otherwise it returns a given Tx itself.
   843  func (pool *TxPool) getMaxTxFromQueueWhenNonceIsMissing(tx *types.Transaction, from *common.Address) *types.Transaction {
   844  	txs := pool.queue[*from].txs
   845  
   846  	maxTx := tx
   847  	if txs.Get(tx.Nonce()) != nil {
   848  		return maxTx
   849  	}
   850  
   851  	for _, t := range txs.items {
   852  		if maxTx.Nonce() < t.Nonce() {
   853  			maxTx = t
   854  		}
   855  	}
   856  	return maxTx
   857  }
   858  
   859  // add validates a transaction and inserts it into the non-executable queue for
   860  // later pending promotion and execution. If the transaction is a replacement for
   861  // an already pending or queued one, it overwrites the previous and returns this
   862  // so outer code doesn't uselessly call promote.
   863  //
   864  // If a newly added transaction is marked as local, its sending account will be
   865  // whitelisted, preventing any associated transaction from being dropped out of
   866  // the pool due to pricing constraints.
   867  func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
   868  	// If the transaction is already known, discard it
   869  	hash := tx.Hash()
   870  	if pool.all.Get(hash) != nil {
   871  		logger.Trace("Discarding already known transaction", "hash", hash)
   872  		return false, fmt.Errorf("known transaction: %x", hash)
   873  	}
   874  	// If the transaction fails basic validation, discard it
   875  	if err := pool.validateTx(tx); err != nil {
   876  		logger.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   877  		invalidTxCounter.Inc(1)
   878  		return false, err
   879  	}
   880  
   881  	// If the transaction pool is full and new Tx is valid,
   882  	// (1) discard a new Tx if there is no room for the account of the Tx
   883  	// (2) remove an old Tx with the largest nonce from queue to make a room for a new Tx with missing nonce
   884  	// (3) discard a new Tx if the new Tx does not have a missing nonce
   885  	// (4) discard underpriced transactions
   886  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.ExecSlotsAll+pool.config.NonExecSlotsAll {
   887  		// (1) discard a new Tx if there is no room for the account of the Tx
   888  		from, _ := types.Sender(pool.signer, tx)
   889  		if pool.queue[from] == nil {
   890  			logger.Trace("Rejecting a new Tx, because TxPool is full and there is no room for the account", "hash", tx.Hash(), "account", from)
   891  			refusedTxCounter.Inc(1)
   892  			return false, fmt.Errorf("txpool is full: %d", uint64(pool.all.Count()))
   893  		}
   894  
   895  		maxTx := pool.getMaxTxFromQueueWhenNonceIsMissing(tx, &from)
   896  		if maxTx != tx {
   897  			// (2) remove an old Tx with the largest nonce from queue to make a room for a new Tx with missing nonce
   898  			pool.removeTx(maxTx.Hash(), true)
   899  			logger.Trace("Removing an old Tx with the max nonce to insert a new Tx with missing nonce, because TxPool is full", "account", from, "new nonce(previously missing)", tx.Nonce(), "removed max nonce", maxTx.Nonce())
   900  		} else {
   901  			// (3) discard a new Tx if the new Tx does not have a missing nonce
   902  			logger.Trace("Rejecting a new Tx, because TxPool is full and a new TX does not have missing nonce", "hash", tx.Hash())
   903  			refusedTxCounter.Inc(1)
   904  			return false, fmt.Errorf("txpool is full and the new tx does not have missing nonce: %d", uint64(pool.all.Count()))
   905  		}
   906  
   907  		// (4) discard underpriced transactions
   908  		// If the new transaction is underpriced, don't accept it
   909  		if !local && pool.priced.Underpriced(tx, pool.locals) {
   910  			logger.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
   911  			underpricedTxCounter.Inc(1)
   912  			return false, ErrUnderpriced
   913  		}
   914  		// New transaction is better than our worse ones, make room for it
   915  		drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.ExecSlotsAll+pool.config.NonExecSlotsAll)+numSlots(tx), pool.locals)
   916  		for _, tx := range drop {
   917  			logger.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
   918  			underpricedTxCounter.Inc(1)
   919  			pool.removeTx(tx.Hash(), false)
   920  		}
   921  	}
   922  	// If the transaction is replacing an already pending one, do directly
   923  	from, _ := types.Sender(pool.signer, tx) // already validated
   924  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   925  		// Nonce already pending, check if required price bump is met
   926  		inserted, old := list.Add(tx, pool.config.PriceBump, pool.magma)
   927  		if !inserted {
   928  			pendingDiscardCounter.Inc(1)
   929  			return false, ErrAlreadyNonceExistInPool
   930  		}
   931  		// New transaction is better, replace old one
   932  		if old != nil {
   933  			pool.all.Remove(old.Hash())
   934  			pool.priced.Removed()
   935  			pendingReplaceCounter.Inc(1)
   936  		}
   937  		pool.all.Add(tx)
   938  		pool.priced.Put(tx)
   939  		pool.journalTx(from, tx)
   940  
   941  		logger.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   942  
   943  		// We've directly injected a replacement transaction, notify subsystems
   944  		go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
   945  
   946  		return old != nil, nil
   947  	}
   948  	// New transaction isn't replacing a pending one, push into queue
   949  	replace, err := pool.enqueueTx(hash, tx)
   950  	if err != nil {
   951  		return false, err
   952  	}
   953  	// Mark local addresses and journal local transactions
   954  	if local {
   955  		pool.locals.add(from)
   956  	}
   957  	pool.journalTx(from, tx)
   958  
   959  	logger.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   960  	return replace, nil
   961  }
   962  
   963  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   964  //
   965  // Note, this method assumes the pool lock is held!
   966  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) {
   967  	// Try to insert the transaction into the future queue
   968  	from, _ := types.Sender(pool.signer, tx) // already validated
   969  	if pool.queue[from] == nil {
   970  		pool.queue[from] = newTxList(false)
   971  	}
   972  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump, pool.magma)
   973  	if !inserted {
   974  		// An older transaction was better, discard this
   975  		queuedDiscardCounter.Inc(1)
   976  		return false, ErrAlreadyNonceExistInPool
   977  	}
   978  	// Discard any previous transaction and mark this
   979  	if old != nil {
   980  		pool.all.Remove(old.Hash())
   981  		pool.priced.Removed()
   982  		queuedReplaceCounter.Inc(1)
   983  	}
   984  	if pool.all.Get(hash) == nil {
   985  		pool.all.Add(tx)
   986  		pool.priced.Put(tx)
   987  	}
   988  
   989  	pool.checkAndSetBeat(from)
   990  	return old != nil, nil
   991  }
   992  
   993  // journalTx adds the specified transaction to the local disk journal if it is
   994  // deemed to have been sent from a local account.
   995  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   996  	// Only journal if it's enabled and the transaction is local
   997  	if pool.journal == nil || !pool.locals.contains(from) {
   998  		return
   999  	}
  1000  	if err := pool.journal.insert(tx); err != nil {
  1001  		logger.Error("Failed to journal local transaction", "err", err)
  1002  	}
  1003  }
  1004  
  1005  // promoteTx adds a transaction to the pending (processable) list of transactions
  1006  // and returns whether it was inserted or an older was better.
  1007  //
  1008  // Note, this method assumes the pool lock is held!
  1009  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
  1010  	// Try to insert the transaction into the pending queue
  1011  	if pool.pending[addr] == nil {
  1012  		pool.pending[addr] = newTxList(true)
  1013  	}
  1014  	list := pool.pending[addr]
  1015  
  1016  	inserted, old := list.Add(tx, pool.config.PriceBump, pool.magma)
  1017  	if !inserted {
  1018  		// An older transaction was better, discard this
  1019  		pool.all.Remove(hash)
  1020  		pool.priced.Removed()
  1021  
  1022  		pendingDiscardCounter.Inc(1)
  1023  		return false
  1024  	}
  1025  	// Otherwise discard any previous transaction and mark this
  1026  	if old != nil {
  1027  		pool.all.Remove(old.Hash())
  1028  		pool.priced.Removed()
  1029  
  1030  		pendingReplaceCounter.Inc(1)
  1031  	}
  1032  	// Failsafe to work around direct pending inserts (tests)
  1033  	if pool.all.Get(hash) == nil {
  1034  		pool.all.Add(tx)
  1035  		pool.priced.Put(tx)
  1036  	}
  1037  	// Set the potentially new pending nonce and notify any subsystems of the new tx
  1038  	pool.beats[addr] = time.Now()
  1039  	pool.setPendingNonce(addr, tx.Nonce()+1)
  1040  
  1041  	return true
  1042  }
  1043  
  1044  // HandleTxMsg transfers transactions to a channel where handleTxMsg calls AddRemotes
  1045  // to handle them. This is made not to wait from the results from TxPool.AddRemotes.
  1046  func (pool *TxPool) HandleTxMsg(txs types.Transactions) {
  1047  	if pool.config.DenyRemoteTx {
  1048  		return
  1049  	}
  1050  
  1051  	// Filter spam txs based on to-address of failed txs
  1052  	spamThrottler := GetSpamThrottler()
  1053  	if spamThrottler != nil {
  1054  		pool.mu.RLock()
  1055  		poolSize := uint64(pool.all.Count())
  1056  		pool.mu.RUnlock()
  1057  
  1058  		// Activate spam throttler when pool has enough txs
  1059  		if poolSize > uint64(spamThrottler.config.ActivateTxPoolSize) {
  1060  			allowTxs, throttleTxs := spamThrottler.classifyTxs(txs)
  1061  
  1062  			for _, tx := range throttleTxs {
  1063  				select {
  1064  				case spamThrottler.throttleCh <- tx:
  1065  				default:
  1066  					logger.Trace("drop a tx when throttleTxs channel is full", "txHash", tx.Hash())
  1067  					throttlerDropCount.Inc(1)
  1068  				}
  1069  			}
  1070  
  1071  			txs = allowTxs
  1072  		}
  1073  	}
  1074  
  1075  	// TODO-Klaytn: Consider removing the next line and move the above logic to `addTx` or `AddRemotes`
  1076  	senderCacher.recover(pool.signer, txs)
  1077  	pool.txMsgCh <- txs
  1078  }
  1079  
  1080  func (pool *TxPool) throttleLoop(spamThrottler *throttler) {
  1081  	ticker := time.Tick(time.Second)
  1082  	throttleNum := int(spamThrottler.config.ThrottleTPS)
  1083  
  1084  	for {
  1085  		select {
  1086  		case <-spamThrottler.quitCh:
  1087  			logger.Info("Stop spam throttler loop")
  1088  			return
  1089  
  1090  		case <-ticker:
  1091  			txs := types.Transactions{}
  1092  
  1093  			iterNum := len(spamThrottler.throttleCh)
  1094  			if iterNum > throttleNum {
  1095  				iterNum = throttleNum
  1096  			}
  1097  
  1098  			for i := 0; i < iterNum; i++ {
  1099  				tx := <-spamThrottler.throttleCh
  1100  				txs = append(txs, tx)
  1101  			}
  1102  
  1103  			if len(txs) > 0 {
  1104  				pool.AddRemotes(txs)
  1105  			}
  1106  		}
  1107  	}
  1108  }
  1109  
  1110  func (pool *TxPool) StartSpamThrottler(conf *ThrottlerConfig) error {
  1111  	spamThrottlerMu.Lock()
  1112  	defer spamThrottlerMu.Unlock()
  1113  
  1114  	if spamThrottler != nil {
  1115  		return errors.New("spam throttler was already running")
  1116  	}
  1117  
  1118  	if conf == nil {
  1119  		conf = DefaultSpamThrottlerConfig
  1120  	}
  1121  
  1122  	if err := validateConfig(conf); err != nil {
  1123  		return err
  1124  	}
  1125  
  1126  	t := &throttler{
  1127  		config:     conf,
  1128  		candidates: make(map[common.Address]int),
  1129  		throttled:  make(map[common.Address]int),
  1130  		allowed:    make(map[common.Address]bool),
  1131  		mu:         new(sync.RWMutex),
  1132  		threshold:  conf.InitialThreshold,
  1133  		throttleCh: make(chan *types.Transaction, conf.ThrottleTPS*5),
  1134  		quitCh:     make(chan struct{}),
  1135  	}
  1136  
  1137  	go pool.throttleLoop(t)
  1138  
  1139  	spamThrottler = t
  1140  	logger.Info("Start spam throttler", "config", *conf)
  1141  	return nil
  1142  }
  1143  
  1144  func (pool *TxPool) StopSpamThrottler() {
  1145  	spamThrottlerMu.Lock()
  1146  	defer spamThrottlerMu.Unlock()
  1147  
  1148  	if spamThrottler != nil {
  1149  		close(spamThrottler.quitCh)
  1150  	}
  1151  
  1152  	spamThrottler = nil
  1153  	candidateSizeGauge.Update(0)
  1154  	throttledSizeGauge.Update(0)
  1155  	allowedSizeGauge.Update(0)
  1156  	throttlerUpdateTimeGauge.Update(0)
  1157  	throttlerDropCount.Clear()
  1158  }
  1159  
  1160  // handleTxMsg calls TxPool.AddRemotes by retrieving transactions from TxPool.txMsgCh.
  1161  func (pool *TxPool) handleTxMsg() {
  1162  	defer pool.wg.Done()
  1163  
  1164  	for {
  1165  		select {
  1166  		case txs := <-pool.txMsgCh:
  1167  			pool.AddRemotes(txs)
  1168  		case <-pool.chainHeadSub.Err():
  1169  			return
  1170  		}
  1171  	}
  1172  }
  1173  
  1174  // AddLocal enqueues a single transaction into the pool if it is valid, marking
  1175  // the sender as a local one in the mean time, ensuring it goes around the local
  1176  // pricing constraints.
  1177  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
  1178  	if tx.Type().IsChainDataAnchoring() && !pool.config.AllowLocalAnchorTx {
  1179  		return errNotAllowedAnchoringTx
  1180  	}
  1181  
  1182  	pool.mu.RLock()
  1183  	poolSize := uint64(pool.all.Count())
  1184  	pool.mu.RUnlock()
  1185  	if poolSize >= pool.config.ExecSlotsAll+pool.config.NonExecSlotsAll {
  1186  		return fmt.Errorf("txpool is full: %d", poolSize)
  1187  	}
  1188  	return pool.addTx(tx, !pool.config.NoLocals)
  1189  }
  1190  
  1191  // AddRemote enqueues a single transaction into the pool if it is valid. If the
  1192  // sender is not among the locally tracked ones, full pricing constraints will
  1193  // apply.
  1194  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
  1195  	return pool.addTx(tx, false)
  1196  }
  1197  
  1198  // AddLocals enqueues a batch of transactions into the pool if they are valid,
  1199  // marking the senders as a local ones in the mean time, ensuring they go around
  1200  // the local pricing constraints.
  1201  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
  1202  	return pool.checkAndAddTxs(txs, !pool.config.NoLocals)
  1203  }
  1204  
  1205  // AddRemotes enqueues a batch of transactions into the pool if they are valid.
  1206  // If the senders are not among the locally tracked ones, full pricing constraints
  1207  // will apply.
  1208  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
  1209  	return pool.checkAndAddTxs(txs, false)
  1210  }
  1211  
  1212  // checkAndAddTxs compares the size of given transactions and the capacity of TxPool.
  1213  // If given transactions exceed the capacity of TxPool, it slices the given transactions
  1214  // so it can fit into TxPool's capacity.
  1215  func (pool *TxPool) checkAndAddTxs(txs []*types.Transaction, local bool) []error {
  1216  	pool.mu.RLock()
  1217  	poolSize := uint64(pool.all.Count())
  1218  	pool.mu.RUnlock()
  1219  	poolCapacity := int(pool.config.ExecSlotsAll + pool.config.NonExecSlotsAll - poolSize)
  1220  	numTxs := len(txs)
  1221  
  1222  	if poolCapacity < numTxs {
  1223  		txs = txs[:poolCapacity]
  1224  	}
  1225  
  1226  	errs := pool.addTxs(txs, local)
  1227  
  1228  	if poolCapacity < numTxs {
  1229  		for i := 0; i < numTxs-poolCapacity; i++ {
  1230  			errs = append(errs, txPoolIsFullErr)
  1231  		}
  1232  	}
  1233  
  1234  	return errs
  1235  }
  1236  
  1237  // addTx enqueues a single transaction into the pool if it is valid.
  1238  func (pool *TxPool) addTx(tx *types.Transaction, local bool) error {
  1239  	senderCacher.recover(pool.signer, []*types.Transaction{tx})
  1240  
  1241  	pool.mu.Lock()
  1242  	defer pool.mu.Unlock()
  1243  
  1244  	// Try to inject the transaction and update any state
  1245  	replace, err := pool.add(tx, local)
  1246  	if err != nil {
  1247  		return err
  1248  	}
  1249  	// If we added a new transaction, run promotion checks and return
  1250  	if !replace {
  1251  		from, _ := types.Sender(pool.signer, tx) // already validated
  1252  		pool.promoteExecutables([]common.Address{from})
  1253  	}
  1254  	return nil
  1255  }
  1256  
  1257  // addTxs attempts to queue a batch of transactions if they are valid.
  1258  func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
  1259  	senderCacher.recover(pool.signer, txs)
  1260  
  1261  	pool.mu.Lock()
  1262  	defer pool.mu.Unlock()
  1263  
  1264  	return pool.addTxsLocked(txs, local)
  1265  }
  1266  
  1267  // addTxsLocked attempts to queue a batch of transactions if they are valid,
  1268  // whilst assuming the transaction pool lock is already held.
  1269  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
  1270  	// Add the batch of transaction, tracking the accepted ones
  1271  	dirty := make(map[common.Address]struct{})
  1272  	errs := make([]error, len(txs))
  1273  
  1274  	for i, tx := range txs {
  1275  		var replace bool
  1276  		if replace, errs[i] = pool.add(tx, local); errs[i] == nil {
  1277  			if !replace {
  1278  				from, _ := types.Sender(pool.signer, tx) // already validated
  1279  				dirty[from] = struct{}{}
  1280  			}
  1281  		}
  1282  	}
  1283  
  1284  	// Only reprocess the internal state if something was actually added
  1285  	if len(dirty) > 0 {
  1286  		addrs := make([]common.Address, 0, len(dirty))
  1287  		for addr := range dirty {
  1288  			addrs = append(addrs, addr)
  1289  		}
  1290  		pool.promoteExecutables(addrs)
  1291  	}
  1292  	return errs
  1293  }
  1294  
  1295  // Status returns the status (unknown/pending/queued) of a batch of transactions
  1296  // identified by their hashes.
  1297  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
  1298  	pool.mu.RLock()
  1299  	defer pool.mu.RUnlock()
  1300  
  1301  	status := make([]TxStatus, len(hashes))
  1302  	for i, hash := range hashes {
  1303  		if tx := pool.all.Get(hash); tx != nil {
  1304  			from, _ := types.Sender(pool.signer, tx) // already validated
  1305  			if pool.pending[from] != nil && pool.pending[from].txs.items[tx.Nonce()] != nil {
  1306  				status[i] = TxStatusPending
  1307  			} else {
  1308  				status[i] = TxStatusQueued
  1309  			}
  1310  		}
  1311  	}
  1312  	return status
  1313  }
  1314  
  1315  // Get returns a transaction if it is contained in the pool
  1316  // and nil otherwise.
  1317  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
  1318  	return pool.all.Get(hash)
  1319  }
  1320  
  1321  // checkAndSetBeat sets the beat of the account if there is no beat of the account.
  1322  func (pool *TxPool) checkAndSetBeat(addr common.Address) {
  1323  	_, exist := pool.beats[addr]
  1324  
  1325  	if !exist {
  1326  		pool.beats[addr] = time.Now()
  1327  	}
  1328  }
  1329  
  1330  // removeTx removes a single transaction from the queue, moving all subsequent
  1331  // transactions back to the future queue.
  1332  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
  1333  	// Fetch the transaction we wish to delete
  1334  	tx := pool.all.Get(hash)
  1335  	if tx == nil {
  1336  		return
  1337  	}
  1338  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1339  
  1340  	// Remove it from the list of known transactions
  1341  	pool.all.Remove(hash)
  1342  	if outofbound {
  1343  		pool.priced.Removed()
  1344  	}
  1345  	// Remove the transaction from the pending lists and reset the account nonce
  1346  	if pending := pool.pending[addr]; pending != nil {
  1347  		if removed, invalids := pending.Remove(tx); removed {
  1348  			// If no more pending transactions are left, remove the list
  1349  			if pending.Empty() {
  1350  				delete(pool.pending, addr)
  1351  			}
  1352  			// Postpone any invalidated transactions
  1353  			for _, tx := range invalids {
  1354  				pool.enqueueTx(tx.Hash(), tx)
  1355  			}
  1356  			pool.updatePendingNonce(addr, tx.Nonce())
  1357  			return
  1358  		}
  1359  	}
  1360  	// Transaction is in the future queue
  1361  	if future := pool.queue[addr]; future != nil {
  1362  		future.Remove(tx)
  1363  		if future.Empty() {
  1364  			delete(pool.queue, addr)
  1365  		}
  1366  	}
  1367  }
  1368  
  1369  // promoteExecutables moves transactions that have become processable from the
  1370  // future queue to the set of pending transactions. During this process, all
  1371  // invalidated transactions (low nonce, low balance) are deleted.
  1372  func (pool *TxPool) promoteExecutables(accounts []common.Address) {
  1373  	pool.txMu.Lock()
  1374  	defer pool.txMu.Unlock()
  1375  	// Track the promoted transactions to broadcast them at once
  1376  	var promoted []*types.Transaction
  1377  
  1378  	// Gather all the accounts potentially needing updates
  1379  	if accounts == nil {
  1380  		accounts = make([]common.Address, 0, len(pool.queue))
  1381  		for addr := range pool.queue {
  1382  			accounts = append(accounts, addr)
  1383  		}
  1384  	}
  1385  
  1386  	// Iterate over all accounts and promote any executable transactions
  1387  	for _, addr := range accounts {
  1388  		list := pool.queue[addr]
  1389  		if list == nil {
  1390  			continue // Just in case someone calls with a non existing account
  1391  		}
  1392  		// Drop all transactions that are deemed too old (low nonce)
  1393  		for _, tx := range list.Forward(pool.getNonce(addr)) {
  1394  			hash := tx.Hash()
  1395  			logger.Trace("Removed old queued transaction", "hash", hash)
  1396  			pool.all.Remove(hash)
  1397  			pool.priced.Removed()
  1398  		}
  1399  		// Drop all transactions that are too costly (low balance)
  1400  		drops, _ := list.Filter(addr, pool)
  1401  		for _, tx := range drops {
  1402  			hash := tx.Hash()
  1403  			logger.Trace("Removed unpayable queued transaction", "hash", hash)
  1404  			pool.all.Remove(hash)
  1405  			pool.priced.Removed()
  1406  			queuedNofundsCounter.Inc(1)
  1407  		}
  1408  
  1409  		// Gather all executable transactions and promote them
  1410  		var readyTxs types.Transactions
  1411  		if pool.magma {
  1412  			readyTxs = list.ReadyWithGasPrice(pool.getPendingNonce(addr), pool.gasPrice)
  1413  		} else {
  1414  			readyTxs = list.Ready(pool.getPendingNonce(addr))
  1415  		}
  1416  		for _, tx := range readyTxs {
  1417  			hash := tx.Hash()
  1418  			if pool.promoteTx(addr, hash, tx) {
  1419  				logger.Trace("Promoting queued transaction", "hash", hash)
  1420  				promoted = append(promoted, tx)
  1421  			}
  1422  		}
  1423  
  1424  		// Drop all transactions over the allowed limit
  1425  		if !pool.locals.contains(addr) {
  1426  			for _, tx := range list.Cap(int(pool.config.NonExecSlotsAccount)) {
  1427  				hash := tx.Hash()
  1428  				pool.all.Remove(hash)
  1429  				pool.priced.Removed()
  1430  				queuedRateLimitCounter.Inc(1)
  1431  				logger.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1432  			}
  1433  		}
  1434  		// Delete the entire queue entry if it became empty.
  1435  		if list.Empty() {
  1436  			delete(pool.queue, addr)
  1437  		}
  1438  	}
  1439  	// Notify subsystem for new promoted transactions.
  1440  	if len(promoted) > 0 {
  1441  		pool.txFeed.Send(NewTxsEvent{promoted})
  1442  	}
  1443  	// If the pending limit is overflown, start equalizing allowances
  1444  	pending := uint64(0)
  1445  	for _, list := range pool.pending {
  1446  		pending += uint64(list.Len())
  1447  	}
  1448  
  1449  	if pending > pool.config.ExecSlotsAll {
  1450  		pendingBeforeCap := pending
  1451  		// Assemble a spam order to penalize large transactors first
  1452  		spammers := prque.New()
  1453  		for addr, list := range pool.pending {
  1454  			// Only evict transactions from high rollers
  1455  			if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.ExecSlotsAccount {
  1456  				spammers.Push(addr, int64(list.Len()))
  1457  			}
  1458  		}
  1459  		// Gradually drop transactions from offenders
  1460  		offenders := []common.Address{}
  1461  		for pending > pool.config.ExecSlotsAll && !spammers.Empty() {
  1462  			// Retrieve the next offender if not local address
  1463  			offender, _ := spammers.Pop()
  1464  			offenders = append(offenders, offender.(common.Address))
  1465  
  1466  			// Equalize balances until all the same or below threshold
  1467  			if len(offenders) > 1 {
  1468  				// Calculate the equalization threshold for all current offenders
  1469  				threshold := pool.pending[offender.(common.Address)].Len()
  1470  
  1471  				// Iteratively reduce all offenders until below limit or threshold reached
  1472  				for pending > pool.config.ExecSlotsAll && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1473  					for i := 0; i < len(offenders)-1; i++ {
  1474  						list := pool.pending[offenders[i]]
  1475  						for _, tx := range list.Cap(list.Len() - 1) {
  1476  							// Drop the transaction from the global pools too
  1477  							hash := tx.Hash()
  1478  							pool.all.Remove(hash)
  1479  							pool.priced.Removed()
  1480  
  1481  							// Update the account nonce to the dropped transaction
  1482  							pool.updatePendingNonce(offenders[i], tx.Nonce())
  1483  							logger.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1484  						}
  1485  						pending--
  1486  					}
  1487  				}
  1488  			}
  1489  		}
  1490  		// If still above threshold, reduce to limit or min allowance
  1491  		if pending > pool.config.ExecSlotsAll && len(offenders) > 0 {
  1492  			for pending > pool.config.ExecSlotsAll && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.ExecSlotsAccount {
  1493  				for _, addr := range offenders {
  1494  					list := pool.pending[addr]
  1495  					for _, tx := range list.Cap(list.Len() - 1) {
  1496  						// Drop the transaction from the global pools too
  1497  						hash := tx.Hash()
  1498  						pool.all.Remove(hash)
  1499  						pool.priced.Removed()
  1500  
  1501  						// Update the account nonce to the dropped transaction
  1502  						pool.updatePendingNonce(addr, tx.Nonce())
  1503  						logger.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1504  					}
  1505  					pending--
  1506  				}
  1507  			}
  1508  		}
  1509  		pendingRateLimitCounter.Inc(int64(pendingBeforeCap - pending))
  1510  	}
  1511  	// If we've queued more transactions than the hard limit, drop oldest ones
  1512  	queued := uint64(0)
  1513  	for _, list := range pool.queue {
  1514  		queued += uint64(list.Len())
  1515  	}
  1516  
  1517  	if queued > pool.config.NonExecSlotsAll {
  1518  		// Sort all accounts with queued transactions by heartbeat
  1519  		addresses := make(addresssByHeartbeat, 0, len(pool.queue))
  1520  		for addr := range pool.queue {
  1521  			if !pool.locals.contains(addr) { // don't drop locals
  1522  				addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1523  			}
  1524  		}
  1525  		sort.Sort(addresses)
  1526  
  1527  		// Drop transactions until the total is below the limit or only locals remain
  1528  		for drop := queued - pool.config.NonExecSlotsAll; drop > 0 && len(addresses) > 0; {
  1529  			addr := addresses[len(addresses)-1]
  1530  			list := pool.queue[addr.address]
  1531  
  1532  			addresses = addresses[:len(addresses)-1]
  1533  
  1534  			// Drop all transactions if they are less than the overflow
  1535  			if size := uint64(list.Len()); size <= drop {
  1536  				for _, tx := range list.Flatten() {
  1537  					pool.removeTx(tx.Hash(), true)
  1538  				}
  1539  				drop -= size
  1540  				queuedRateLimitCounter.Inc(int64(size))
  1541  				continue
  1542  			}
  1543  			// Otherwise drop only last few transactions
  1544  			txs := list.Flatten()
  1545  			for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1546  				pool.removeTx(txs[i].Hash(), true)
  1547  				drop--
  1548  				queuedRateLimitCounter.Inc(1)
  1549  			}
  1550  		}
  1551  	}
  1552  }
  1553  
  1554  // demoteUnexecutables removes invalid and processed transactions from the pools
  1555  // executable/pending queue and any subsequent transactions that become unexecutable
  1556  // are moved back into the future queue.
  1557  func (pool *TxPool) demoteUnexecutables() {
  1558  	pool.txMu.Lock()
  1559  	defer pool.txMu.Unlock()
  1560  
  1561  	// full-validation count. demoteUnexecutables does full-validation for a limited number of txs.
  1562  	cnt := 0
  1563  	// Iterate over all accounts and demote any non-executable transactions
  1564  	for addr, list := range pool.pending {
  1565  		nonce := pool.getNonce(addr)
  1566  		var drops, invalids types.Transactions
  1567  
  1568  		// Drop all transactions that are deemed too old (low nonce)
  1569  		for _, tx := range list.Forward(nonce) {
  1570  			hash := tx.Hash()
  1571  			logger.Trace("Removed old pending transaction", "hash", hash)
  1572  			pool.all.Remove(hash)
  1573  			pool.priced.Removed()
  1574  		}
  1575  
  1576  		// demoteUnexecutables does full-validation for a limited number of txs. Otherwise, it only validate nonce.
  1577  		// The logic below loosely checks the tx count for the efficiency and the simplicity.
  1578  		if cnt < demoteUnexecutablesFullValidationTxLimit {
  1579  			cnt += list.Len()
  1580  			drops, invalids = list.Filter(addr, pool)
  1581  		} else {
  1582  			drops, invalids = list.FilterUnexecutable()
  1583  		}
  1584  
  1585  		// Drop all transactions that are unexecutable, and queue any invalids back for later
  1586  		for _, tx := range drops {
  1587  			hash := tx.Hash()
  1588  			logger.Trace("Removed unexecutable pending transaction", "hash", hash)
  1589  			pool.all.Remove(hash)
  1590  			pool.priced.Removed()
  1591  			pendingNofundsCounter.Inc(1)
  1592  		}
  1593  
  1594  		for _, tx := range invalids {
  1595  			hash := tx.Hash()
  1596  			logger.Trace("Demoting pending transaction", "hash", hash)
  1597  			pool.enqueueTx(hash, tx)
  1598  		}
  1599  		// If there's a gap in front, warn (should never happen) and postpone all transactions
  1600  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1601  			for _, tx := range list.Cap(0) {
  1602  				hash := tx.Hash()
  1603  				logger.Error("Demoting invalidated transaction", "hash", hash)
  1604  				pool.enqueueTx(hash, tx)
  1605  			}
  1606  		}
  1607  
  1608  		// Enqueue transaction if gasPrice of transaction is lower than gasPrice of txPool.
  1609  		// All transactions with a nonce greater than enqueued transaction also stored queue.
  1610  		if pool.magma && list.Len() > 0 {
  1611  			for _, tx := range list.Flatten() {
  1612  				hash := tx.Hash()
  1613  				if tx.GasPrice().Cmp(pool.gasPrice) < 0 {
  1614  					logger.Trace("Demoting the tx that is lower than the baseFee and those greater than the nonce of the tx.", "txhash", hash)
  1615  					removed, invalids := list.Remove(tx) // delete all transactions satisfying the nonce value > tx.Nonce()
  1616  					if removed {
  1617  						for _, invalidTx := range invalids {
  1618  							pool.enqueueTx(invalidTx.Hash(), invalidTx)
  1619  						}
  1620  						pool.enqueueTx(hash, tx)
  1621  					}
  1622  					break
  1623  				}
  1624  			}
  1625  		}
  1626  
  1627  		// Delete the entire queue entry if it became empty.
  1628  		if list.Empty() {
  1629  			delete(pool.pending, addr)
  1630  		}
  1631  	}
  1632  }
  1633  
  1634  // getNonce returns the nonce of the account from the cache. If it is not in the cache, it gets the nonce from the stateDB.
  1635  func (pool *TxPool) getNonce(addr common.Address) uint64 {
  1636  	return pool.currentState.GetNonce(addr)
  1637  }
  1638  
  1639  // getBalance returns the balance of the account from the cache. If it is not in the cache, it gets the balance from the stateDB.
  1640  func (pool *TxPool) getBalance(addr common.Address) *big.Int {
  1641  	return pool.currentState.GetBalance(addr)
  1642  }
  1643  
  1644  // GetPendingNonce is a method to check the last nonce value of pending in external API.
  1645  // Use getPendingNonce to get the nonce value inside txpool because it catches the lock.
  1646  func (pool *TxPool) GetPendingNonce(addr common.Address) uint64 {
  1647  	pool.mu.Lock()
  1648  	defer pool.mu.Unlock()
  1649  
  1650  	return pool.getPendingNonce(addr)
  1651  }
  1652  
  1653  // getPendingNonce returns the canonical nonce for the managed or unmanaged account.
  1654  func (pool *TxPool) getPendingNonce(addr common.Address) uint64 {
  1655  	cNonce := pool.getNonce(addr)
  1656  	if pNonce, exist := pool.pendingNonce[addr]; !exist || pNonce < cNonce {
  1657  		pool.pendingNonce[addr] = cNonce
  1658  	}
  1659  
  1660  	return pool.pendingNonce[addr]
  1661  }
  1662  
  1663  // setPendingNonce sets the new canonical nonce for the managed state.
  1664  func (pool *TxPool) setPendingNonce(addr common.Address, nonce uint64) {
  1665  	pool.pendingNonce[addr] = nonce
  1666  }
  1667  
  1668  // updatePendingNonce updates the account nonce to the dropped transaction.
  1669  func (pool *TxPool) updatePendingNonce(addr common.Address, nonce uint64) {
  1670  	if pool.getPendingNonce(addr) > nonce {
  1671  		pool.setPendingNonce(addr, nonce)
  1672  	}
  1673  }
  1674  
  1675  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1676  type addressByHeartbeat struct {
  1677  	address   common.Address
  1678  	heartbeat time.Time
  1679  }
  1680  
  1681  type addresssByHeartbeat []addressByHeartbeat
  1682  
  1683  func (a addresssByHeartbeat) Len() int           { return len(a) }
  1684  func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1685  func (a addresssByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1686  
  1687  // accountSet is simply a set of addresses to check for existence, and a signer
  1688  // capable of deriving addresses from transactions.
  1689  type accountSet struct {
  1690  	accounts map[common.Address]struct{}
  1691  	signer   types.Signer
  1692  }
  1693  
  1694  // newAccountSet creates a new address set with an associated signer for sender
  1695  // derivations.
  1696  func newAccountSet(signer types.Signer) *accountSet {
  1697  	return &accountSet{
  1698  		accounts: make(map[common.Address]struct{}),
  1699  		signer:   signer,
  1700  	}
  1701  }
  1702  
  1703  // contains checks if a given address is contained within the set.
  1704  func (as *accountSet) contains(addr common.Address) bool {
  1705  	_, exist := as.accounts[addr]
  1706  	return exist
  1707  }
  1708  
  1709  // containsTx checks if the sender of a given tx is within the set. If the sender
  1710  // cannot be derived, this method returns false.
  1711  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1712  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1713  		return as.contains(addr)
  1714  	}
  1715  	return false
  1716  }
  1717  
  1718  // add inserts a new address into the set to track.
  1719  func (as *accountSet) add(addr common.Address) {
  1720  	as.accounts[addr] = struct{}{}
  1721  }
  1722  
  1723  // txLookup is used internally by TxPool to track transactions while allowing lookup without
  1724  // mutex contention.
  1725  //
  1726  // Note, although this type is properly protected against concurrent access, it
  1727  // is **not** a type that should ever be mutated or even exposed outside of the
  1728  // transaction pool, since its internal state is tightly coupled with the pools
  1729  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1730  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1731  // TxPool.mu mutex.
  1732  type txLookup struct {
  1733  	all   map[common.Hash]*types.Transaction
  1734  	slots int
  1735  	lock  sync.RWMutex
  1736  }
  1737  
  1738  // newTxLookup returns a new txLookup structure.
  1739  func newTxLookup() *txLookup {
  1740  	slotsGauge.Update(int64(0))
  1741  	return &txLookup{
  1742  		all: make(map[common.Hash]*types.Transaction),
  1743  	}
  1744  }
  1745  
  1746  // Slots returns the current number of slots used in the lookup.
  1747  func (t *txLookup) Slots() int {
  1748  	t.lock.RLock()
  1749  	defer t.lock.RUnlock()
  1750  
  1751  	return t.slots
  1752  }
  1753  
  1754  // Range calls f on each key and value present in the map.
  1755  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
  1756  	t.lock.RLock()
  1757  	defer t.lock.RUnlock()
  1758  
  1759  	for key, value := range t.all {
  1760  		if !f(key, value) {
  1761  			break
  1762  		}
  1763  	}
  1764  }
  1765  
  1766  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1767  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1768  	t.lock.RLock()
  1769  	defer t.lock.RUnlock()
  1770  
  1771  	return t.all[hash]
  1772  }
  1773  
  1774  // Count returns the current number of items in the lookup.
  1775  func (t *txLookup) Count() int {
  1776  	t.lock.RLock()
  1777  	defer t.lock.RUnlock()
  1778  
  1779  	return len(t.all)
  1780  }
  1781  
  1782  // Add adds a transaction to the lookup.
  1783  func (t *txLookup) Add(tx *types.Transaction) {
  1784  	t.lock.Lock()
  1785  	defer t.lock.Unlock()
  1786  
  1787  	t.slots += numSlots(tx)
  1788  	slotsGauge.Update(int64(t.slots))
  1789  
  1790  	t.all[tx.Hash()] = tx
  1791  }
  1792  
  1793  // Remove removes a transaction from the lookup.
  1794  func (t *txLookup) Remove(hash common.Hash) {
  1795  	t.lock.Lock()
  1796  	defer t.lock.Unlock()
  1797  
  1798  	t.slots -= numSlots(t.all[hash])
  1799  	slotsGauge.Update(int64(t.slots))
  1800  
  1801  	delete(t.all, hash)
  1802  }
  1803  
  1804  // numSlots calculates the number of slots needed for a single transaction.
  1805  func numSlots(tx *types.Transaction) int {
  1806  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1807  }