github.com/MetalBlockchain/subnet-evm@v0.4.9/core/tx_pool.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2014 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  package core
    28  
    29  import (
    30  	"errors"
    31  	"fmt"
    32  	"math"
    33  	"math/big"
    34  	"sort"
    35  	"sync"
    36  	"sync/atomic"
    37  	"time"
    38  
    39  	"github.com/MetalBlockchain/subnet-evm/commontype"
    40  	"github.com/MetalBlockchain/subnet-evm/consensus/dummy"
    41  	"github.com/MetalBlockchain/subnet-evm/core/state"
    42  	"github.com/MetalBlockchain/subnet-evm/core/types"
    43  	"github.com/MetalBlockchain/subnet-evm/metrics"
    44  	"github.com/MetalBlockchain/subnet-evm/params"
    45  	"github.com/MetalBlockchain/subnet-evm/precompile"
    46  	"github.com/ethereum/go-ethereum/common"
    47  	"github.com/ethereum/go-ethereum/common/prque"
    48  	"github.com/ethereum/go-ethereum/event"
    49  	"github.com/ethereum/go-ethereum/log"
    50  )
    51  
    52  const (
    53  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    54  	chainHeadChanSize = 10
    55  
    56  	// txSlotSize is used to calculate how many data slots a single transaction
    57  	// takes up based on its size. The slots are used as DoS protection, ensuring
    58  	// that validating a new transaction remains a constant operation (in reality
    59  	// O(maxslots), where max slots are 4 currently).
    60  	txSlotSize = 32 * 1024
    61  
    62  	// txMaxSize is the maximum size a single transaction can have. This field has
    63  	// non-trivial consequences: larger transactions are significantly harder and
    64  	// more expensive to propagate; larger transactions also take more resources
    65  	// to validate whether they fit into the pool or not.
    66  	//
    67  	// Note: the max contract size is 24KB
    68  	txMaxSize = 4 * txSlotSize // 128KB
    69  )
    70  
    71  var (
    72  	// ErrAlreadyKnown is returned if the transactions is already contained
    73  	// within the pool.
    74  	ErrAlreadyKnown = errors.New("already known")
    75  
    76  	// ErrInvalidSender is returned if the transaction contains an invalid signature.
    77  	ErrInvalidSender = errors.New("invalid sender")
    78  
    79  	// ErrUnderpriced is returned if a transaction's gas price is below the minimum
    80  	// configured for the transaction pool.
    81  	ErrUnderpriced = errors.New("transaction underpriced")
    82  
    83  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet
    84  	// another remote transaction.
    85  	ErrTxPoolOverflow = errors.New("txpool is full")
    86  
    87  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    88  	// with a different one without the required price bump.
    89  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    90  
    91  	// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
    92  	// maximum allowance of the current block.
    93  	ErrGasLimit = errors.New("exceeds block gas limit")
    94  
    95  	// ErrNegativeValue is a sanity error to ensure no one is able to specify a
    96  	// transaction with a negative value.
    97  	ErrNegativeValue = errors.New("negative value")
    98  
    99  	// ErrOversizedData is returned if the input data of a transaction is greater
   100  	// than some meaningful limit a user might use. This is not a consensus error
   101  	// making the transaction invalid, rather a DOS protection.
   102  	ErrOversizedData = errors.New("oversized data")
   103  )
   104  
   105  var (
   106  	evictionInterval      = time.Minute      // Time interval to check for evictable transactions
   107  	statsReportInterval   = 8 * time.Second  // Time interval to report transaction pool stats
   108  	baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after SubnetEVM is enabled
   109  )
   110  
   111  var (
   112  	// Metrics for the pending pool
   113  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
   114  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
   115  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
   116  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
   117  
   118  	// Metrics for the queued pool
   119  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
   120  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
   121  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
   122  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
   123  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
   124  
   125  	// General tx metrics
   126  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   127  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   128  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   129  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   130  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
   131  	// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
   132  	// txpool reorgs.
   133  	throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
   134  	// reorgDurationTimer measures how long time a txpool reorg takes.
   135  	reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
   136  	// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
   137  	// that this number is pretty low, since txpool reorgs happen very frequently.
   138  	dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
   139  
   140  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   141  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   142  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   143  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   144  
   145  	reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
   146  )
   147  
   148  // TxStatus is the current status of a transaction as seen by the pool.
   149  type TxStatus uint
   150  
   151  const (
   152  	TxStatusUnknown TxStatus = iota
   153  	TxStatusQueued
   154  	TxStatusPending
   155  )
   156  
   157  // blockChain provides the state of blockchain and current gas limit to do
   158  // some pre checks in tx pool and event subscribers.
   159  type blockChain interface {
   160  	CurrentBlock() *types.Block
   161  	GetBlock(hash common.Hash, number uint64) *types.Block
   162  	StateAt(root common.Hash) (*state.StateDB, error)
   163  	SenderCacher() *TxSenderCacher
   164  	GetFeeConfigAt(parent *types.Header) (commontype.FeeConfig, *big.Int, error)
   165  
   166  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   167  }
   168  
   169  // TxPoolConfig are the configuration parameters of the transaction pool.
   170  type TxPoolConfig struct {
   171  	Locals    []common.Address // Addresses that should be treated by default as local
   172  	NoLocals  bool             // Whether local transaction handling should be disabled
   173  	Journal   string           // Journal of local transactions to survive node restarts
   174  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   175  
   176  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   177  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   178  
   179  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   180  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   181  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   182  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   183  
   184  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   185  }
   186  
   187  // DefaultTxPoolConfig contains the default configurations for the transaction
   188  // pool.
   189  var DefaultTxPoolConfig = TxPoolConfig{
   190  	Journal:   "transactions.rlp",
   191  	Rejournal: time.Hour,
   192  
   193  	PriceLimit: 1,
   194  	PriceBump:  10,
   195  
   196  	AccountSlots: 16,
   197  	GlobalSlots:  4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
   198  	AccountQueue: 64,
   199  	GlobalQueue:  1024,
   200  
   201  	Lifetime: 3 * time.Hour,
   202  }
   203  
   204  // sanitize checks the provided user configurations and changes anything that's
   205  // unreasonable or unworkable.
   206  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   207  	conf := *config
   208  	if conf.Rejournal < time.Second {
   209  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   210  		conf.Rejournal = time.Second
   211  	}
   212  	if conf.PriceLimit < 1 {
   213  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   214  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   215  	}
   216  	if conf.PriceBump < 1 {
   217  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   218  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   219  	}
   220  	if conf.AccountSlots < 1 {
   221  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   222  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   223  	}
   224  	if conf.GlobalSlots < 1 {
   225  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   226  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   227  	}
   228  	if conf.AccountQueue < 1 {
   229  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   230  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   231  	}
   232  	if conf.GlobalQueue < 1 {
   233  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   234  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   235  	}
   236  	if conf.Lifetime < 1 {
   237  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   238  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   239  	}
   240  	return conf
   241  }
   242  
   243  // TxPool contains all currently known transactions. Transactions
   244  // enter the pool when they are received from the network or submitted
   245  // locally. They exit the pool when they are included in the blockchain.
   246  //
   247  // The pool separates processable transactions (which can be applied to the
   248  // current state) and future transactions. Transactions move between those
   249  // two states over time as they are received and processed.
   250  type TxPool struct {
   251  	config      TxPoolConfig
   252  	chainconfig *params.ChainConfig
   253  	chain       blockChain
   254  	gasPrice    *big.Int
   255  	minimumFee  *big.Int
   256  	txFeed      event.Feed
   257  	headFeed    event.Feed
   258  	reorgFeed   event.Feed
   259  	scope       event.SubscriptionScope
   260  	signer      types.Signer
   261  	mu          sync.RWMutex
   262  
   263  	istanbul bool // Fork indicator whether we are in the istanbul stage.
   264  	eip2718  bool // Fork indicator whether we are using EIP-2718 type transactions.
   265  	eip1559  bool // Fork indicator whether we are using EIP-1559 type transactions.
   266  
   267  	currentHead *types.Header
   268  	// [currentState] is the state of the blockchain head. It is reset whenever
   269  	// head changes.
   270  	currentState *state.StateDB
   271  	// [currentStateLock] is required to allow concurrent access to address nonces
   272  	// and balances during reorgs and gossip handling.
   273  	currentStateLock sync.Mutex
   274  
   275  	pendingNonces *txNoncer // Pending state tracking virtual nonces
   276  	currentMaxGas uint64    // Current gas limit for transaction caps
   277  
   278  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   279  	journal *txJournal  // Journal of local transaction to back up to disk
   280  
   281  	pending map[common.Address]*txList   // All currently processable transactions
   282  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   283  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   284  	all     *txLookup                    // All transactions to allow lookups
   285  	priced  *txPricedList                // All transactions sorted by price
   286  
   287  	chainHeadCh         chan ChainHeadEvent
   288  	chainHeadSub        event.Subscription
   289  	reqResetCh          chan *txpoolResetRequest
   290  	reqPromoteCh        chan *accountSet
   291  	queueTxEventCh      chan *types.Transaction
   292  	reorgDoneCh         chan chan struct{}
   293  	reorgShutdownCh     chan struct{} // requests shutdown of scheduleReorgLoop
   294  	generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen
   295  	// to this to be notified if it should shut down.
   296  	wg         sync.WaitGroup // tracks loop, scheduleReorgLoop
   297  	initDoneCh chan struct{}  // is closed once the pool is initialized (for tests)
   298  
   299  	changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
   300  }
   301  
   302  type txpoolResetRequest struct {
   303  	oldHead, newHead *types.Header
   304  }
   305  
   306  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   307  // transactions from the network.
   308  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   309  	// Sanitize the input to ensure no vulnerable gas prices are set
   310  	config = (&config).sanitize()
   311  
   312  	// Create the transaction pool with its initial settings
   313  	pool := &TxPool{
   314  		config:              config,
   315  		chainconfig:         chainconfig,
   316  		chain:               chain,
   317  		signer:              types.LatestSigner(chainconfig),
   318  		pending:             make(map[common.Address]*txList),
   319  		queue:               make(map[common.Address]*txList),
   320  		beats:               make(map[common.Address]time.Time),
   321  		all:                 newTxLookup(),
   322  		chainHeadCh:         make(chan ChainHeadEvent, chainHeadChanSize),
   323  		reqResetCh:          make(chan *txpoolResetRequest),
   324  		reqPromoteCh:        make(chan *accountSet),
   325  		queueTxEventCh:      make(chan *types.Transaction),
   326  		reorgDoneCh:         make(chan chan struct{}),
   327  		reorgShutdownCh:     make(chan struct{}),
   328  		initDoneCh:          make(chan struct{}),
   329  		generalShutdownChan: make(chan struct{}),
   330  		gasPrice:            new(big.Int).SetUint64(config.PriceLimit),
   331  	}
   332  	pool.locals = newAccountSet(pool.signer)
   333  	for _, addr := range config.Locals {
   334  		log.Info("Setting new local account", "address", addr)
   335  		pool.locals.add(addr)
   336  	}
   337  	pool.priced = newTxPricedList(pool.all)
   338  	pool.reset(nil, chain.CurrentBlock().Header())
   339  
   340  	// Start the reorg loop early so it can handle requests generated during journal loading.
   341  	pool.wg.Add(1)
   342  	go pool.scheduleReorgLoop()
   343  
   344  	// If local transactions and journaling is enabled, load from disk
   345  	if !config.NoLocals && config.Journal != "" {
   346  		pool.journal = newTxJournal(config.Journal)
   347  
   348  		if err := pool.journal.load(pool.AddLocals); err != nil {
   349  			log.Warn("Failed to load transaction journal", "err", err)
   350  		}
   351  		if err := pool.journal.rotate(pool.local()); err != nil {
   352  			log.Warn("Failed to rotate transaction journal", "err", err)
   353  		}
   354  	}
   355  
   356  	// Subscribe events from blockchain and start the main event loop.
   357  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   358  	pool.wg.Add(1)
   359  	go pool.loop()
   360  
   361  	pool.startPeriodicFeeUpdate()
   362  
   363  	return pool
   364  }
   365  
   366  // loop is the transaction pool's main event loop, waiting for and reacting to
   367  // outside blockchain events as well as for various reporting and transaction
   368  // eviction events.
   369  func (pool *TxPool) loop() {
   370  	defer pool.wg.Done()
   371  
   372  	var (
   373  		prevPending, prevQueued, prevStales int
   374  		// Start the stats reporting and transaction eviction tickers
   375  		report  = time.NewTicker(statsReportInterval)
   376  		evict   = time.NewTicker(evictionInterval)
   377  		journal = time.NewTicker(pool.config.Rejournal)
   378  		// Track the previous head headers for transaction reorgs
   379  		head = pool.chain.CurrentBlock()
   380  	)
   381  	defer report.Stop()
   382  	defer evict.Stop()
   383  	defer journal.Stop()
   384  
   385  	// Notify tests that the init phase is done
   386  	close(pool.initDoneCh)
   387  	for {
   388  		select {
   389  		// Handle ChainHeadEvent
   390  		case ev := <-pool.chainHeadCh:
   391  			if ev.Block != nil {
   392  				pool.requestReset(head.Header(), ev.Block.Header())
   393  				head = ev.Block
   394  				pool.headFeed.Send(NewTxPoolHeadEvent{Block: head})
   395  			}
   396  
   397  		// System shutdown.
   398  		case <-pool.chainHeadSub.Err():
   399  			close(pool.reorgShutdownCh)
   400  			return
   401  
   402  		// Handle stats reporting ticks
   403  		case <-report.C:
   404  			pool.mu.RLock()
   405  			pending, queued := pool.stats()
   406  			pool.mu.RUnlock()
   407  			stales := int(atomic.LoadInt64(&pool.priced.stales))
   408  
   409  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   410  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   411  				prevPending, prevQueued, prevStales = pending, queued, stales
   412  			}
   413  
   414  		// Handle inactive account transaction eviction
   415  		case <-evict.C:
   416  			pool.mu.Lock()
   417  			for addr := range pool.queue {
   418  				// Skip local transactions from the eviction mechanism
   419  				if pool.locals.contains(addr) {
   420  					continue
   421  				}
   422  				// Any non-locals old enough should be removed
   423  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   424  					list := pool.queue[addr].Flatten()
   425  					for _, tx := range list {
   426  						pool.removeTx(tx.Hash(), true)
   427  					}
   428  					queuedEvictionMeter.Mark(int64(len(list)))
   429  				}
   430  			}
   431  			pool.mu.Unlock()
   432  
   433  		// Handle local transaction journal rotation
   434  		case <-journal.C:
   435  			if pool.journal != nil {
   436  				pool.mu.Lock()
   437  				if err := pool.journal.rotate(pool.local()); err != nil {
   438  					log.Warn("Failed to rotate local tx journal", "err", err)
   439  				}
   440  				pool.mu.Unlock()
   441  			}
   442  		}
   443  	}
   444  }
   445  
   446  // Stop terminates the transaction pool.
   447  func (pool *TxPool) Stop() {
   448  	// Unsubscribe all subscriptions registered from txpool
   449  	pool.scope.Close()
   450  
   451  	close(pool.generalShutdownChan)
   452  	// Unsubscribe subscriptions registered from blockchain
   453  	pool.chainHeadSub.Unsubscribe()
   454  	pool.wg.Wait()
   455  
   456  	if pool.journal != nil {
   457  		pool.journal.close()
   458  	}
   459  	log.Info("Transaction pool stopped")
   460  }
   461  
   462  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   463  // starts sending event to the given channel.
   464  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   465  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   466  }
   467  
   468  // SubscribeNewHeadEvent registers a subscription of NewHeadEvent and
   469  // starts sending event to the given channel.
   470  func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- NewTxPoolHeadEvent) event.Subscription {
   471  	return pool.scope.Track(pool.headFeed.Subscribe(ch))
   472  }
   473  
   474  // SubscribeNewReorgEvent registers a subscription of NewReorgEvent and
   475  // starts sending event to the given channel.
   476  func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- NewTxPoolReorgEvent) event.Subscription {
   477  	return pool.scope.Track(pool.reorgFeed.Subscribe(ch))
   478  }
   479  
   480  // GasPrice returns the current gas price enforced by the transaction pool.
   481  func (pool *TxPool) GasPrice() *big.Int {
   482  	pool.mu.RLock()
   483  	defer pool.mu.RUnlock()
   484  
   485  	return new(big.Int).Set(pool.gasPrice)
   486  }
   487  
   488  // SetGasPrice updates the minimum price required by the transaction pool for a
   489  // new transaction, and drops all transactions below this threshold.
   490  func (pool *TxPool) SetGasPrice(price *big.Int) {
   491  	pool.mu.Lock()
   492  	defer pool.mu.Unlock()
   493  
   494  	old := pool.gasPrice
   495  	pool.gasPrice = price
   496  	// if the min miner fee increased, remove transactions below the new threshold
   497  	if price.Cmp(old) > 0 {
   498  		// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
   499  		drop := pool.all.RemotesBelowTip(price)
   500  		for _, tx := range drop {
   501  			pool.removeTx(tx.Hash(), false)
   502  		}
   503  		pool.priced.Removed(len(drop))
   504  	}
   505  
   506  	log.Info("Transaction pool price threshold updated", "price", price)
   507  }
   508  
   509  func (pool *TxPool) SetMinFee(minFee *big.Int) {
   510  	pool.mu.Lock()
   511  	defer pool.mu.Unlock()
   512  
   513  	pool.minimumFee = minFee
   514  }
   515  
   516  // Nonce returns the next nonce of an account, with all transactions executable
   517  // by the pool already applied on top.
   518  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   519  	pool.mu.RLock()
   520  	defer pool.mu.RUnlock()
   521  
   522  	return pool.pendingNonces.get(addr)
   523  }
   524  
   525  // Stats retrieves the current pool stats, namely the number of pending and the
   526  // number of queued (non-executable) transactions.
   527  func (pool *TxPool) Stats() (int, int) {
   528  	pool.mu.RLock()
   529  	defer pool.mu.RUnlock()
   530  
   531  	return pool.stats()
   532  }
   533  
   534  // stats retrieves the current pool stats, namely the number of pending and the
   535  // number of queued (non-executable) transactions.
   536  func (pool *TxPool) stats() (int, int) {
   537  	pending := 0
   538  	for _, list := range pool.pending {
   539  		pending += list.Len()
   540  	}
   541  	queued := 0
   542  	for _, list := range pool.queue {
   543  		queued += list.Len()
   544  	}
   545  	return pending, queued
   546  }
   547  
   548  // Content retrieves the data content of the transaction pool, returning all the
   549  // pending as well as queued transactions, grouped by account and sorted by nonce.
   550  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   551  	pool.mu.Lock()
   552  	defer pool.mu.Unlock()
   553  
   554  	pending := make(map[common.Address]types.Transactions)
   555  	for addr, list := range pool.pending {
   556  		pending[addr] = list.Flatten()
   557  	}
   558  	queued := make(map[common.Address]types.Transactions)
   559  	for addr, list := range pool.queue {
   560  		queued[addr] = list.Flatten()
   561  	}
   562  	return pending, queued
   563  }
   564  
   565  // ContentFrom retrieves the data content of the transaction pool, returning the
   566  // pending as well as queued transactions of this address, grouped by nonce.
   567  func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
   568  	pool.mu.RLock()
   569  	defer pool.mu.RUnlock()
   570  
   571  	var pending types.Transactions
   572  	if list, ok := pool.pending[addr]; ok {
   573  		pending = list.Flatten()
   574  	}
   575  	var queued types.Transactions
   576  	if list, ok := pool.queue[addr]; ok {
   577  		queued = list.Flatten()
   578  	}
   579  	return pending, queued
   580  }
   581  
   582  // Pending retrieves all currently processable transactions, grouped by origin
   583  // account and sorted by nonce. The returned transaction set is a copy and can be
   584  // freely modified by calling code.
   585  //
   586  // The enforceTips parameter can be used to do an extra filtering on the pending
   587  // transactions and only return those whose **effective** tip is large enough in
   588  // the next pending execution environment.
   589  func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
   590  	pool.mu.Lock()
   591  	defer pool.mu.Unlock()
   592  
   593  	pending := make(map[common.Address]types.Transactions)
   594  	for addr, list := range pool.pending {
   595  		txs := list.Flatten()
   596  
   597  		// If the miner requests tip enforcement, cap the lists now
   598  		if enforceTips && !pool.locals.contains(addr) {
   599  			for i, tx := range txs {
   600  				if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
   601  					txs = txs[:i]
   602  					break
   603  				}
   604  			}
   605  		}
   606  		if len(txs) > 0 {
   607  			pending[addr] = txs
   608  		}
   609  	}
   610  	return pending
   611  }
   612  
   613  // PendingSize returns the number of pending txs in the tx pool.
   614  func (pool *TxPool) PendingSize() int {
   615  	pending := pool.Pending(true)
   616  	count := 0
   617  	for _, txs := range pending {
   618  		count += len(txs)
   619  	}
   620  	return count
   621  }
   622  
   623  // PendingFrom returns the same set of transactions that would be returned from Pending restricted to only
   624  // transactions from [addrs].
   625  func (pool *TxPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address]types.Transactions {
   626  	pool.mu.Lock()
   627  	defer pool.mu.Unlock()
   628  
   629  	pending := make(map[common.Address]types.Transactions)
   630  	for _, addr := range addrs {
   631  		list, ok := pool.pending[addr]
   632  		if !ok {
   633  			continue
   634  		}
   635  		txs := list.Flatten()
   636  
   637  		// If the miner requests tip enforcement, cap the lists now
   638  		if enforceTips && !pool.locals.contains(addr) {
   639  			for i, tx := range txs {
   640  				if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
   641  					txs = txs[:i]
   642  					break
   643  				}
   644  			}
   645  		}
   646  		if len(txs) > 0 {
   647  			pending[addr] = txs
   648  		}
   649  	}
   650  	return pending
   651  }
   652  
   653  // Locals retrieves the accounts currently considered local by the pool.
   654  func (pool *TxPool) Locals() []common.Address {
   655  	pool.mu.Lock()
   656  	defer pool.mu.Unlock()
   657  
   658  	return pool.locals.flatten()
   659  }
   660  
   661  // local retrieves all currently known local transactions, grouped by origin
   662  // account and sorted by nonce. The returned transaction set is a copy and can be
   663  // freely modified by calling code.
   664  func (pool *TxPool) local() map[common.Address]types.Transactions {
   665  	txs := make(map[common.Address]types.Transactions)
   666  	for addr := range pool.locals.accounts {
   667  		if pending := pool.pending[addr]; pending != nil {
   668  			txs[addr] = append(txs[addr], pending.Flatten()...)
   669  		}
   670  		if queued := pool.queue[addr]; queued != nil {
   671  			txs[addr] = append(txs[addr], queued.Flatten()...)
   672  		}
   673  	}
   674  	return txs
   675  }
   676  
   677  // checks transaction validity against the current state.
   678  func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) error {
   679  	pool.currentStateLock.Lock()
   680  	defer pool.currentStateLock.Unlock()
   681  
   682  	// cost == V + GP * GL
   683  	if balance, cost := pool.currentState.GetBalance(from), tx.Cost(); balance.Cmp(cost) < 0 {
   684  		return fmt.Errorf("%w: address %s have (%d) want (%d)", ErrInsufficientFunds, from.Hex(), balance, cost)
   685  	}
   686  
   687  	txNonce := tx.Nonce()
   688  	// Ensure the transaction adheres to nonce ordering
   689  	if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce {
   690  		return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)",
   691  			ErrNonceTooLow, from.Hex(), currentNonce, txNonce)
   692  	}
   693  
   694  	// If the tx allow list is enabled, return an error if the from address is not allow listed.
   695  	headTimestamp := big.NewInt(int64(pool.currentHead.Time))
   696  	if pool.chainconfig.IsTxAllowList(headTimestamp) {
   697  		txAllowListRole := precompile.GetTxAllowListStatus(pool.currentState, from)
   698  		if !txAllowListRole.IsEnabled() {
   699  			return fmt.Errorf("%w: %s", precompile.ErrSenderAddressNotAllowListed, from)
   700  		}
   701  	}
   702  	return nil
   703  }
   704  
   705  // validateTx checks whether a transaction is valid according to the consensus
   706  // rules and adheres to some heuristic limits of the local node (price and size).
   707  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   708  	// Accept only legacy transactions until EIP-2718/2930 activates.
   709  	if !pool.eip2718 && tx.Type() != types.LegacyTxType {
   710  		return ErrTxTypeNotSupported
   711  	}
   712  	// Reject dynamic fee transactions until EIP-1559 activates.
   713  	if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
   714  		return ErrTxTypeNotSupported
   715  	}
   716  	// Reject transactions over defined size to prevent DOS attacks
   717  	if txSize := uint64(tx.Size()); txSize > txMaxSize {
   718  		return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize)
   719  	}
   720  	// Transactions can't be negative. This may never happen using RLP decoded
   721  	// transactions but may occur if you create a transaction using the RPC.
   722  	if tx.Value().Sign() < 0 {
   723  		return ErrNegativeValue
   724  	}
   725  	// Ensure the transaction doesn't exceed the current block limit gas.
   726  	if txGas := tx.Gas(); pool.currentMaxGas < txGas {
   727  		return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, txGas, pool.currentMaxGas)
   728  	}
   729  	// Sanity check for extremely large numbers
   730  	if tx.GasFeeCap().BitLen() > 256 {
   731  		return ErrFeeCapVeryHigh
   732  	}
   733  	if tx.GasTipCap().BitLen() > 256 {
   734  		return ErrTipVeryHigh
   735  	}
   736  	// Ensure gasFeeCap is greater than or equal to gasTipCap.
   737  	if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
   738  		return ErrTipAboveFeeCap
   739  	}
   740  	// Make sure the transaction is signed properly.
   741  	from, err := types.Sender(pool.signer, tx)
   742  	if err != nil {
   743  		return ErrInvalidSender
   744  	}
   745  	// Drop non-local transactions under our own minimal accepted gas price or tip
   746  	if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
   747  		return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice)
   748  	}
   749  	// Drop the transaction if the gas fee cap is below the pool's minimum fee
   750  	if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 {
   751  		return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee)
   752  	}
   753  
   754  	// Ensure the transaction adheres to nonce ordering
   755  	if err := pool.checkTxState(from, tx); err != nil {
   756  		return err
   757  	}
   758  	// Transactor should have enough funds to cover the costs
   759  
   760  	// Ensure the transaction has more gas than the basic tx fee.
   761  	intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
   762  	if err != nil {
   763  		return err
   764  	}
   765  	if txGas := tx.Gas(); txGas < intrGas {
   766  		return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas)
   767  	}
   768  	return nil
   769  }
   770  
   771  // add validates a transaction and inserts it into the non-executable queue for later
   772  // pending promotion and execution. If the transaction is a replacement for an already
   773  // pending or queued one, it overwrites the previous transaction if its price is higher.
   774  //
   775  // If a newly added transaction is marked as local, its sending account will be
   776  // be added to the allowlist, preventing any associated transaction from being dropped
   777  // out of the pool due to pricing constraints.
   778  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   779  	// If the transaction is already known, discard it
   780  	hash := tx.Hash()
   781  	if pool.all.Get(hash) != nil {
   782  		log.Trace("Discarding already known transaction", "hash", hash)
   783  		knownTxMeter.Mark(1)
   784  		return false, ErrAlreadyKnown
   785  	}
   786  	// Make the local flag. If it's from local source or it's from the network but
   787  	// the sender is marked as local previously, treat it as the local transaction.
   788  	isLocal := local || pool.locals.containsTx(tx)
   789  
   790  	// If the transaction fails basic validation, discard it
   791  	if err := pool.validateTx(tx, isLocal); err != nil {
   792  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   793  		invalidTxMeter.Mark(1)
   794  		return false, err
   795  	}
   796  	// If the transaction pool is full, discard underpriced transactions
   797  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   798  		// If the new transaction is underpriced, don't accept it
   799  		if !isLocal && pool.priced.Underpriced(tx) {
   800  			log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   801  			underpricedTxMeter.Mark(1)
   802  			return false, ErrUnderpriced
   803  		}
   804  		// We're about to replace a transaction. The reorg does a more thorough
   805  		// analysis of what to remove and how, but it runs async. We don't want to
   806  		// do too many replacements between reorg-runs, so we cap the number of
   807  		// replacements to 25% of the slots
   808  		if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
   809  			throttleTxMeter.Mark(1)
   810  			return false, ErrTxPoolOverflow
   811  		}
   812  
   813  		// New transaction is better than our worse ones, make room for it.
   814  		// If it's a local transaction, forcibly discard all available transactions.
   815  		// Otherwise if we can't make enough room for new one, abort the operation.
   816  		drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
   817  
   818  		// Special case, we still can't make the room for the new remote one.
   819  		if !isLocal && !success {
   820  			log.Trace("Discarding overflown transaction", "hash", hash)
   821  			overflowedTxMeter.Mark(1)
   822  			return false, ErrTxPoolOverflow
   823  		}
   824  		// Bump the counter of rejections-since-reorg
   825  		pool.changesSinceReorg += len(drop)
   826  		// Kick out the underpriced remote transactions.
   827  		for _, tx := range drop {
   828  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   829  			underpricedTxMeter.Mark(1)
   830  			pool.removeTx(tx.Hash(), false)
   831  		}
   832  	}
   833  	// Try to replace an existing transaction in the pending pool
   834  	from, _ := types.Sender(pool.signer, tx) // already validated
   835  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   836  		// Nonce already pending, check if required price bump is met
   837  		inserted, old := list.Add(tx, pool.config.PriceBump)
   838  		if !inserted {
   839  			pendingDiscardMeter.Mark(1)
   840  			return false, ErrReplaceUnderpriced
   841  		}
   842  		// New transaction is better, replace old one
   843  		if old != nil {
   844  			pool.all.Remove(old.Hash())
   845  			pool.priced.Removed(1)
   846  			pendingReplaceMeter.Mark(1)
   847  		}
   848  		pool.all.Add(tx, isLocal)
   849  		pool.priced.Put(tx, isLocal)
   850  		pool.journalTx(from, tx)
   851  		pool.queueTxEvent(tx)
   852  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   853  
   854  		// Successful promotion, bump the heartbeat
   855  		pool.beats[from] = time.Now()
   856  		return old != nil, nil
   857  	}
   858  	// New transaction isn't replacing a pending one, push into queue
   859  	replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
   860  	if err != nil {
   861  		return false, err
   862  	}
   863  	// Mark local addresses and journal local transactions
   864  	if local && !pool.locals.contains(from) {
   865  		log.Info("Setting new local account", "address", from)
   866  		pool.locals.add(from)
   867  		pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
   868  	}
   869  	if isLocal {
   870  		localGauge.Inc(1)
   871  	}
   872  	pool.journalTx(from, tx)
   873  
   874  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   875  	return replaced, nil
   876  }
   877  
   878  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   879  //
   880  // Note, this method assumes the pool lock is held!
   881  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
   882  	// Try to insert the transaction into the future queue
   883  	from, _ := types.Sender(pool.signer, tx) // already validated
   884  	if pool.queue[from] == nil {
   885  		pool.queue[from] = newTxList(false)
   886  	}
   887  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   888  	if !inserted {
   889  		// An older transaction was better, discard this
   890  		queuedDiscardMeter.Mark(1)
   891  		return false, ErrReplaceUnderpriced
   892  	}
   893  	// Discard any previous transaction and mark this
   894  	if old != nil {
   895  		pool.all.Remove(old.Hash())
   896  		pool.priced.Removed(1)
   897  		queuedReplaceMeter.Mark(1)
   898  	} else {
   899  		// Nothing was replaced, bump the queued counter
   900  		queuedGauge.Inc(1)
   901  	}
   902  	// If the transaction isn't in lookup set but it's expected to be there,
   903  	// show the error log.
   904  	if pool.all.Get(hash) == nil && !addAll {
   905  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
   906  	}
   907  	if addAll {
   908  		pool.all.Add(tx, local)
   909  		pool.priced.Put(tx, local)
   910  	}
   911  	// If we never record the heartbeat, do it right now.
   912  	if _, exist := pool.beats[from]; !exist {
   913  		pool.beats[from] = time.Now()
   914  	}
   915  	return old != nil, nil
   916  }
   917  
   918  // journalTx adds the specified transaction to the local disk journal if it is
   919  // deemed to have been sent from a local account.
   920  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   921  	// Only journal if it's enabled and the transaction is local
   922  	if pool.journal == nil || !pool.locals.contains(from) {
   923  		return
   924  	}
   925  	if err := pool.journal.insert(tx); err != nil {
   926  		log.Warn("Failed to journal local transaction", "err", err)
   927  	}
   928  }
   929  
   930  // promoteTx adds a transaction to the pending (processable) list of transactions
   931  // and returns whether it was inserted or an older was better.
   932  //
   933  // Note, this method assumes the pool lock is held!
   934  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   935  	// Try to insert the transaction into the pending queue
   936  	if pool.pending[addr] == nil {
   937  		pool.pending[addr] = newTxList(true)
   938  	}
   939  	list := pool.pending[addr]
   940  
   941  	inserted, old := list.Add(tx, pool.config.PriceBump)
   942  	if !inserted {
   943  		// An older transaction was better, discard this
   944  		pool.all.Remove(hash)
   945  		pool.priced.Removed(1)
   946  		pendingDiscardMeter.Mark(1)
   947  		return false
   948  	}
   949  	// Otherwise discard any previous transaction and mark this
   950  	if old != nil {
   951  		pool.all.Remove(old.Hash())
   952  		pool.priced.Removed(1)
   953  		pendingReplaceMeter.Mark(1)
   954  	} else {
   955  		// Nothing was replaced, bump the pending counter
   956  		pendingGauge.Inc(1)
   957  	}
   958  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   959  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   960  
   961  	// Successful promotion, bump the heartbeat
   962  	pool.beats[addr] = time.Now()
   963  	return true
   964  }
   965  
   966  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
   967  // senders as a local ones, ensuring they go around the local pricing constraints.
   968  //
   969  // This method is used to add transactions from the RPC API and performs synchronous pool
   970  // reorganization and event propagation.
   971  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
   972  	return pool.addTxs(txs, !pool.config.NoLocals, true)
   973  }
   974  
   975  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
   976  // a convenience wrapper aroundd AddLocals.
   977  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
   978  	errs := pool.AddLocals([]*types.Transaction{tx})
   979  	return errs[0]
   980  }
   981  
   982  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
   983  // senders are not among the locally tracked ones, full pricing constraints will apply.
   984  //
   985  // This method is used to add transactions from the p2p network and does not wait for pool
   986  // reorganization and internal event propagation.
   987  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
   988  	return pool.addTxs(txs, false, false)
   989  }
   990  
   991  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
   992  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
   993  	return pool.addTxs(txs, false, true)
   994  }
   995  
   996  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   997  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
   998  	errs := pool.AddRemotesSync([]*types.Transaction{tx})
   999  	return errs[0]
  1000  }
  1001  
  1002  // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
  1003  // wrapper around AddRemotes.
  1004  //
  1005  // Deprecated: use AddRemotes
  1006  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
  1007  	errs := pool.AddRemotes([]*types.Transaction{tx})
  1008  	return errs[0]
  1009  }
  1010  
  1011  // addTxs attempts to queue a batch of transactions if they are valid.
  1012  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
  1013  	// Filter out known ones without obtaining the pool lock or recovering signatures
  1014  	var (
  1015  		errs = make([]error, len(txs))
  1016  		news = make([]*types.Transaction, 0, len(txs))
  1017  	)
  1018  	for i, tx := range txs {
  1019  		// If the transaction is known, pre-set the error slot
  1020  		if pool.all.Get(tx.Hash()) != nil {
  1021  			errs[i] = ErrAlreadyKnown
  1022  			knownTxMeter.Mark(1)
  1023  			continue
  1024  		}
  1025  		// Exclude transactions with invalid signatures as soon as
  1026  		// possible and cache senders in transactions before
  1027  		// obtaining lock
  1028  		_, err := types.Sender(pool.signer, tx)
  1029  		if err != nil {
  1030  			errs[i] = ErrInvalidSender
  1031  			invalidTxMeter.Mark(1)
  1032  			continue
  1033  		}
  1034  		// Accumulate all unknown transactions for deeper processing
  1035  		news = append(news, tx)
  1036  	}
  1037  	if len(news) == 0 {
  1038  		return errs
  1039  	}
  1040  
  1041  	// Process all the new transaction and merge any errors into the original slice
  1042  	pool.mu.Lock()
  1043  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
  1044  	pool.mu.Unlock()
  1045  
  1046  	nilSlot := 0
  1047  	for _, err := range newErrs {
  1048  		for errs[nilSlot] != nil {
  1049  			nilSlot++
  1050  		}
  1051  		errs[nilSlot] = err
  1052  		nilSlot++
  1053  	}
  1054  	// Reorg the pool internals if needed and return
  1055  	done := pool.requestPromoteExecutables(dirtyAddrs)
  1056  	if sync {
  1057  		<-done
  1058  	}
  1059  	return errs
  1060  }
  1061  
  1062  // addTxsLocked attempts to queue a batch of transactions if they are valid.
  1063  // The transaction pool lock must be held.
  1064  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
  1065  	dirty := newAccountSet(pool.signer)
  1066  	errs := make([]error, len(txs))
  1067  	for i, tx := range txs {
  1068  		replaced, err := pool.add(tx, local)
  1069  		errs[i] = err
  1070  		if err == nil && !replaced {
  1071  			dirty.addTx(tx)
  1072  		}
  1073  	}
  1074  	validTxMeter.Mark(int64(len(dirty.accounts)))
  1075  	return errs, dirty
  1076  }
  1077  
  1078  // Status returns the status (unknown/pending/queued) of a batch of transactions
  1079  // identified by their hashes.
  1080  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
  1081  	status := make([]TxStatus, len(hashes))
  1082  	for i, hash := range hashes {
  1083  		tx := pool.Get(hash)
  1084  		if tx == nil {
  1085  			continue
  1086  		}
  1087  		from, _ := types.Sender(pool.signer, tx) // already validated
  1088  		pool.mu.RLock()
  1089  		if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1090  			status[i] = TxStatusPending
  1091  		} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1092  			status[i] = TxStatusQueued
  1093  		}
  1094  		// implicit else: the tx may have been included into a block between
  1095  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
  1096  		pool.mu.RUnlock()
  1097  	}
  1098  	return status
  1099  }
  1100  
  1101  // Get returns a transaction if it is contained in the pool and nil otherwise.
  1102  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
  1103  	return pool.all.Get(hash)
  1104  }
  1105  
  1106  // Has returns an indicator whether txpool has a transaction cached with the
  1107  // given hash.
  1108  func (pool *TxPool) Has(hash common.Hash) bool {
  1109  	return pool.all.Get(hash) != nil
  1110  }
  1111  
  1112  // Has returns an indicator whether txpool has a local transaction cached with
  1113  // the given hash.
  1114  func (pool *TxPool) HasLocal(hash common.Hash) bool {
  1115  	return pool.all.GetLocal(hash) != nil
  1116  }
  1117  
  1118  func (pool *TxPool) RemoveTx(hash common.Hash) {
  1119  	pool.mu.Lock()
  1120  	defer pool.mu.Unlock()
  1121  
  1122  	pool.removeTx(hash, true)
  1123  }
  1124  
  1125  // removeTx removes a single transaction from the queue, moving all subsequent
  1126  // transactions back to the future queue.
  1127  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
  1128  	// Fetch the transaction we wish to delete
  1129  	tx := pool.all.Get(hash)
  1130  	if tx == nil {
  1131  		return
  1132  	}
  1133  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1134  
  1135  	// Remove it from the list of known transactions
  1136  	pool.all.Remove(hash)
  1137  	if outofbound {
  1138  		pool.priced.Removed(1)
  1139  	}
  1140  	if pool.locals.contains(addr) {
  1141  		localGauge.Dec(1)
  1142  	}
  1143  	// Remove the transaction from the pending lists and reset the account nonce
  1144  	if pending := pool.pending[addr]; pending != nil {
  1145  		if removed, invalids := pending.Remove(tx); removed {
  1146  			// If no more pending transactions are left, remove the list
  1147  			if pending.Empty() {
  1148  				delete(pool.pending, addr)
  1149  			}
  1150  			// Postpone any invalidated transactions
  1151  			for _, tx := range invalids {
  1152  				// Internal shuffle shouldn't touch the lookup set.
  1153  				pool.enqueueTx(tx.Hash(), tx, false, false)
  1154  			}
  1155  			// Update the account nonce if needed
  1156  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1157  			// Reduce the pending counter
  1158  			pendingGauge.Dec(int64(1 + len(invalids)))
  1159  			return
  1160  		}
  1161  	}
  1162  	// Transaction is in the future queue
  1163  	if future := pool.queue[addr]; future != nil {
  1164  		if removed, _ := future.Remove(tx); removed {
  1165  			// Reduce the queued counter
  1166  			queuedGauge.Dec(1)
  1167  		}
  1168  		if future.Empty() {
  1169  			delete(pool.queue, addr)
  1170  			delete(pool.beats, addr)
  1171  		}
  1172  	}
  1173  }
  1174  
  1175  // requestReset requests a pool reset to the new head block.
  1176  // The returned channel is closed when the reset has occurred.
  1177  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1178  	select {
  1179  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1180  		return <-pool.reorgDoneCh
  1181  	case <-pool.reorgShutdownCh:
  1182  		return pool.reorgShutdownCh
  1183  	}
  1184  }
  1185  
  1186  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1187  // The returned channel is closed when the promotion checks have occurred.
  1188  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1189  	select {
  1190  	case pool.reqPromoteCh <- set:
  1191  		return <-pool.reorgDoneCh
  1192  	case <-pool.reorgShutdownCh:
  1193  		return pool.reorgShutdownCh
  1194  	}
  1195  }
  1196  
  1197  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1198  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
  1199  	select {
  1200  	case pool.queueTxEventCh <- tx:
  1201  	case <-pool.reorgShutdownCh:
  1202  	}
  1203  }
  1204  
  1205  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1206  // call those methods directly, but request them being run using requestReset and
  1207  // requestPromoteExecutables instead.
  1208  func (pool *TxPool) scheduleReorgLoop() {
  1209  	defer pool.wg.Done()
  1210  
  1211  	var (
  1212  		curDone       chan struct{} // non-nil while runReorg is active
  1213  		nextDone      = make(chan struct{})
  1214  		launchNextRun bool
  1215  		reset         *txpoolResetRequest
  1216  		dirtyAccounts *accountSet
  1217  		queuedEvents  = make(map[common.Address]*txSortedMap)
  1218  	)
  1219  	for {
  1220  		// Launch next background reorg if needed
  1221  		if curDone == nil && launchNextRun {
  1222  			// Run the background reorg and announcements
  1223  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
  1224  
  1225  			// Prepare everything for the next round of reorg
  1226  			curDone, nextDone = nextDone, make(chan struct{})
  1227  			launchNextRun = false
  1228  
  1229  			reset, dirtyAccounts = nil, nil
  1230  			queuedEvents = make(map[common.Address]*txSortedMap)
  1231  		}
  1232  
  1233  		select {
  1234  		case req := <-pool.reqResetCh:
  1235  			// Reset request: update head if request is already pending.
  1236  			if reset == nil {
  1237  				reset = req
  1238  			} else {
  1239  				reset.newHead = req.newHead
  1240  			}
  1241  			launchNextRun = true
  1242  			pool.reorgDoneCh <- nextDone
  1243  
  1244  		case req := <-pool.reqPromoteCh:
  1245  			// Promote request: update address set if request is already pending.
  1246  			if dirtyAccounts == nil {
  1247  				dirtyAccounts = req
  1248  			} else {
  1249  				dirtyAccounts.merge(req)
  1250  			}
  1251  			launchNextRun = true
  1252  			pool.reorgDoneCh <- nextDone
  1253  
  1254  		case tx := <-pool.queueTxEventCh:
  1255  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1256  			// request one later if they want the events sent.
  1257  			addr, _ := types.Sender(pool.signer, tx)
  1258  			if _, ok := queuedEvents[addr]; !ok {
  1259  				queuedEvents[addr] = newTxSortedMap()
  1260  			}
  1261  			queuedEvents[addr].Put(tx)
  1262  
  1263  		case <-curDone:
  1264  			curDone = nil
  1265  
  1266  		case <-pool.reorgShutdownCh:
  1267  			// Wait for current run to finish.
  1268  			if curDone != nil {
  1269  				<-curDone
  1270  			}
  1271  			close(nextDone)
  1272  			return
  1273  		}
  1274  	}
  1275  }
  1276  
  1277  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1278  func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1279  	defer func(t0 time.Time) {
  1280  		reorgDurationTimer.Update(time.Since(t0))
  1281  	}(time.Now())
  1282  	defer close(done)
  1283  
  1284  	var promoteAddrs []common.Address
  1285  	if dirtyAccounts != nil && reset == nil {
  1286  		// Only dirty accounts need to be promoted, unless we're resetting.
  1287  		// For resets, all addresses in the tx queue will be promoted and
  1288  		// the flatten operation can be avoided.
  1289  		promoteAddrs = dirtyAccounts.flatten()
  1290  	}
  1291  	pool.mu.Lock()
  1292  	if reset != nil {
  1293  		// Reset from the old head to the new, rescheduling any reorged transactions
  1294  		pool.reset(reset.oldHead, reset.newHead)
  1295  
  1296  		// Nonces were reset, discard any events that became stale
  1297  		for addr := range events {
  1298  			events[addr].Forward(pool.pendingNonces.get(addr))
  1299  			if events[addr].Len() == 0 {
  1300  				delete(events, addr)
  1301  			}
  1302  		}
  1303  		// Reset needs promote for all addresses
  1304  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1305  		for addr := range pool.queue {
  1306  			promoteAddrs = append(promoteAddrs, addr)
  1307  		}
  1308  	}
  1309  	// Check for pending transactions for every account that sent new ones
  1310  	promoted := pool.promoteExecutables(promoteAddrs)
  1311  
  1312  	// If a new block appeared, validate the pool of pending transactions. This will
  1313  	// remove any transaction that has been included in the block or was invalidated
  1314  	// because of another transaction (e.g. higher gas price).
  1315  	if reset != nil {
  1316  		pool.demoteUnexecutables()
  1317  		if reset.newHead != nil && pool.chainconfig.IsSubnetEVM(new(big.Int).SetUint64(reset.newHead.Time)) {
  1318  			if err := pool.updateBaseFeeAt(reset.newHead); err != nil {
  1319  				log.Error("error at updating base fee in tx pool", "error", err)
  1320  			}
  1321  		}
  1322  
  1323  		// Update all accounts to the latest known pending nonce
  1324  		nonces := make(map[common.Address]uint64, len(pool.pending))
  1325  		for addr, list := range pool.pending {
  1326  			highestPending := list.LastElement()
  1327  			nonces[addr] = highestPending.Nonce() + 1
  1328  		}
  1329  		pool.pendingNonces.setAll(nonces)
  1330  	}
  1331  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1332  	pool.truncatePending()
  1333  	pool.truncateQueue()
  1334  
  1335  	dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
  1336  	pool.changesSinceReorg = 0 // Reset change counter
  1337  	pool.mu.Unlock()
  1338  
  1339  	if reset != nil && reset.newHead != nil {
  1340  		pool.reorgFeed.Send(NewTxPoolReorgEvent{reset.newHead})
  1341  	}
  1342  
  1343  	// Notify subsystems for newly added transactions
  1344  	for _, tx := range promoted {
  1345  		addr, _ := types.Sender(pool.signer, tx)
  1346  		if _, ok := events[addr]; !ok {
  1347  			events[addr] = newTxSortedMap()
  1348  		}
  1349  		events[addr].Put(tx)
  1350  	}
  1351  	if len(events) > 0 {
  1352  		var txs []*types.Transaction
  1353  		for _, set := range events {
  1354  			txs = append(txs, set.Flatten()...)
  1355  		}
  1356  		pool.txFeed.Send(NewTxsEvent{txs})
  1357  	}
  1358  }
  1359  
  1360  // reset retrieves the current state of the blockchain and ensures the content
  1361  // of the transaction pool is valid with regard to the chain state.
  1362  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1363  	// If we're reorging an old state, reinject all dropped transactions
  1364  	var reinject types.Transactions
  1365  
  1366  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1367  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1368  		oldNum := oldHead.Number.Uint64()
  1369  		newNum := newHead.Number.Uint64()
  1370  
  1371  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1372  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1373  		} else {
  1374  			// Reorg seems shallow enough to pull in all transactions into memory
  1375  			var discarded, included types.Transactions
  1376  			var (
  1377  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1378  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1379  			)
  1380  			if rem == nil {
  1381  				// This can happen if a setHead is performed, where we simply discard the old
  1382  				// head from the chain.
  1383  				// If that is the case, we don't have the lost transactions any more, and
  1384  				// there's nothing to add
  1385  				if newNum >= oldNum {
  1386  					// If we reorged to a same or higher number, then it's not a case of setHead
  1387  					log.Warn("Transaction pool reset with missing oldhead",
  1388  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1389  					return
  1390  				}
  1391  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1392  				log.Debug("Skipping transaction reset caused by setHead",
  1393  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1394  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1395  			} else {
  1396  				for rem.NumberU64() > add.NumberU64() {
  1397  					discarded = append(discarded, rem.Transactions()...)
  1398  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1399  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1400  						return
  1401  					}
  1402  				}
  1403  				for add.NumberU64() > rem.NumberU64() {
  1404  					included = append(included, add.Transactions()...)
  1405  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1406  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1407  						return
  1408  					}
  1409  				}
  1410  				for rem.Hash() != add.Hash() {
  1411  					discarded = append(discarded, rem.Transactions()...)
  1412  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1413  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1414  						return
  1415  					}
  1416  					included = append(included, add.Transactions()...)
  1417  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1418  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1419  						return
  1420  					}
  1421  				}
  1422  				reinject = types.TxDifference(discarded, included)
  1423  			}
  1424  		}
  1425  	}
  1426  	// Initialize the internal state to the current head
  1427  	if newHead == nil {
  1428  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1429  	}
  1430  	statedb, err := pool.chain.StateAt(newHead.Root)
  1431  	if err != nil {
  1432  		log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
  1433  		return
  1434  	}
  1435  	pool.currentHead = newHead
  1436  	pool.currentStateLock.Lock()
  1437  	pool.currentState = statedb
  1438  	pool.currentStateLock.Unlock()
  1439  	pool.pendingNonces = newTxNoncer(statedb)
  1440  	pool.currentMaxGas = newHead.GasLimit
  1441  
  1442  	// when we reset txPool we should explicitly check if fee struct for min base fee has changed
  1443  	// so that we can correctly drop txs with < minBaseFee from tx pool.
  1444  	// TODO: this should be checking IsSubnetEVM since we also support minimumFee for SubnetEVM
  1445  	// without requiring FeeConfigManager is enabled.
  1446  	// This is already being set by SetMinFee when gas price updater starts.
  1447  	// However tests are currently failing if we change this check to IsSubnetEVM.
  1448  	if pool.chainconfig.IsFeeConfigManager(new(big.Int).SetUint64(newHead.Time)) {
  1449  		feeConfig, _, err := pool.chain.GetFeeConfigAt(newHead)
  1450  		if err != nil {
  1451  			log.Error("Failed to get fee config state", "err", err, "root", newHead.Root)
  1452  			return
  1453  		}
  1454  		pool.minimumFee = feeConfig.MinBaseFee
  1455  	}
  1456  
  1457  	// Inject any transactions discarded due to reorgs
  1458  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1459  	pool.chain.SenderCacher().Recover(pool.signer, reinject)
  1460  	pool.addTxsLocked(reinject, false)
  1461  
  1462  	// Update all fork indicator by next pending block number.
  1463  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
  1464  	pool.istanbul = pool.chainconfig.IsIstanbul(next)
  1465  
  1466  	isSubnetEVM := pool.chainconfig.IsSubnetEVM(new(big.Int).SetUint64(newHead.Time))
  1467  	pool.eip2718 = isSubnetEVM
  1468  	pool.eip1559 = isSubnetEVM
  1469  }
  1470  
  1471  // promoteExecutables moves transactions that have become processable from the
  1472  // future queue to the set of pending transactions. During this process, all
  1473  // invalidated transactions (low nonce, low balance) are deleted.
  1474  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1475  	pool.currentStateLock.Lock()
  1476  	defer pool.currentStateLock.Unlock()
  1477  
  1478  	// Track the promoted transactions to broadcast them at once
  1479  	var promoted []*types.Transaction
  1480  
  1481  	// Iterate over all accounts and promote any executable transactions
  1482  	for _, addr := range accounts {
  1483  		list := pool.queue[addr]
  1484  		if list == nil {
  1485  			continue // Just in case someone calls with a non existing account
  1486  		}
  1487  		// Drop all transactions that are deemed too old (low nonce)
  1488  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1489  		for _, tx := range forwards {
  1490  			hash := tx.Hash()
  1491  			pool.all.Remove(hash)
  1492  		}
  1493  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1494  		// Drop all transactions that are too costly (low balance or out of gas)
  1495  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1496  		for _, tx := range drops {
  1497  			hash := tx.Hash()
  1498  			pool.all.Remove(hash)
  1499  		}
  1500  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1501  		queuedNofundsMeter.Mark(int64(len(drops)))
  1502  
  1503  		// Gather all executable transactions and promote them
  1504  		readies := list.Ready(pool.pendingNonces.get(addr))
  1505  		for _, tx := range readies {
  1506  			hash := tx.Hash()
  1507  			if pool.promoteTx(addr, hash, tx) {
  1508  				promoted = append(promoted, tx)
  1509  			}
  1510  		}
  1511  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1512  		queuedGauge.Dec(int64(len(readies)))
  1513  
  1514  		// Drop all transactions over the allowed limit
  1515  		var caps types.Transactions
  1516  		if !pool.locals.contains(addr) {
  1517  			caps = list.Cap(int(pool.config.AccountQueue))
  1518  			for _, tx := range caps {
  1519  				hash := tx.Hash()
  1520  				pool.all.Remove(hash)
  1521  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1522  			}
  1523  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1524  		}
  1525  		// Mark all the items dropped as removed
  1526  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1527  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1528  		if pool.locals.contains(addr) {
  1529  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1530  		}
  1531  		// Delete the entire queue entry if it became empty.
  1532  		if list.Empty() {
  1533  			delete(pool.queue, addr)
  1534  			delete(pool.beats, addr)
  1535  		}
  1536  	}
  1537  	return promoted
  1538  }
  1539  
  1540  // truncatePending removes transactions from the pending queue if the pool is above the
  1541  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1542  // equal number for all for accounts with many pending transactions.
  1543  func (pool *TxPool) truncatePending() {
  1544  	pending := uint64(0)
  1545  	for _, list := range pool.pending {
  1546  		pending += uint64(list.Len())
  1547  	}
  1548  	if pending <= pool.config.GlobalSlots {
  1549  		return
  1550  	}
  1551  
  1552  	pendingBeforeCap := pending
  1553  	// Assemble a spam order to penalize large transactors first
  1554  	spammers := prque.New(nil)
  1555  	for addr, list := range pool.pending {
  1556  		// Only evict transactions from high rollers
  1557  		if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
  1558  			spammers.Push(addr, int64(list.Len()))
  1559  		}
  1560  	}
  1561  	// Gradually drop transactions from offenders
  1562  	offenders := []common.Address{}
  1563  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1564  		// Retrieve the next offender if not local address
  1565  		offender, _ := spammers.Pop()
  1566  		offenders = append(offenders, offender.(common.Address))
  1567  
  1568  		// Equalize balances until all the same or below threshold
  1569  		if len(offenders) > 1 {
  1570  			// Calculate the equalization threshold for all current offenders
  1571  			threshold := pool.pending[offender.(common.Address)].Len()
  1572  
  1573  			// Iteratively reduce all offenders until below limit or threshold reached
  1574  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1575  				for i := 0; i < len(offenders)-1; i++ {
  1576  					list := pool.pending[offenders[i]]
  1577  
  1578  					caps := list.Cap(list.Len() - 1)
  1579  					for _, tx := range caps {
  1580  						// Drop the transaction from the global pools too
  1581  						hash := tx.Hash()
  1582  						pool.all.Remove(hash)
  1583  
  1584  						// Update the account nonce to the dropped transaction
  1585  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1586  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1587  					}
  1588  					pool.priced.Removed(len(caps))
  1589  					pendingGauge.Dec(int64(len(caps)))
  1590  					if pool.locals.contains(offenders[i]) {
  1591  						localGauge.Dec(int64(len(caps)))
  1592  					}
  1593  					pending--
  1594  				}
  1595  			}
  1596  		}
  1597  	}
  1598  
  1599  	// If still above threshold, reduce to limit or min allowance
  1600  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1601  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1602  			for _, addr := range offenders {
  1603  				list := pool.pending[addr]
  1604  
  1605  				caps := list.Cap(list.Len() - 1)
  1606  				for _, tx := range caps {
  1607  					// Drop the transaction from the global pools too
  1608  					hash := tx.Hash()
  1609  					pool.all.Remove(hash)
  1610  
  1611  					// Update the account nonce to the dropped transaction
  1612  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1613  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1614  				}
  1615  				pool.priced.Removed(len(caps))
  1616  				pendingGauge.Dec(int64(len(caps)))
  1617  				if pool.locals.contains(addr) {
  1618  					localGauge.Dec(int64(len(caps)))
  1619  				}
  1620  				pending--
  1621  			}
  1622  		}
  1623  	}
  1624  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1625  }
  1626  
  1627  // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
  1628  func (pool *TxPool) truncateQueue() {
  1629  	queued := uint64(0)
  1630  	for _, list := range pool.queue {
  1631  		queued += uint64(list.Len())
  1632  	}
  1633  	if queued <= pool.config.GlobalQueue {
  1634  		return
  1635  	}
  1636  
  1637  	// Sort all accounts with queued transactions by heartbeat
  1638  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1639  	for addr := range pool.queue {
  1640  		if !pool.locals.contains(addr) { // don't drop locals
  1641  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1642  		}
  1643  	}
  1644  	sort.Sort(sort.Reverse(addresses))
  1645  
  1646  	// Drop transactions until the total is below the limit or only locals remain
  1647  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1648  		addr := addresses[len(addresses)-1]
  1649  		list := pool.queue[addr.address]
  1650  
  1651  		addresses = addresses[:len(addresses)-1]
  1652  
  1653  		// Drop all transactions if they are less than the overflow
  1654  		if size := uint64(list.Len()); size <= drop {
  1655  			for _, tx := range list.Flatten() {
  1656  				pool.removeTx(tx.Hash(), true)
  1657  			}
  1658  			drop -= size
  1659  			queuedRateLimitMeter.Mark(int64(size))
  1660  			continue
  1661  		}
  1662  		// Otherwise drop only last few transactions
  1663  		txs := list.Flatten()
  1664  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1665  			pool.removeTx(txs[i].Hash(), true)
  1666  			drop--
  1667  			queuedRateLimitMeter.Mark(1)
  1668  		}
  1669  	}
  1670  }
  1671  
  1672  // demoteUnexecutables removes invalid and processed transactions from the pools
  1673  // executable/pending queue and any subsequent transactions that become unexecutable
  1674  // are moved back into the future queue.
  1675  //
  1676  // Note: transactions are not marked as removed in the priced list because re-heaping
  1677  // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
  1678  // to trigger a re-heap is this function
  1679  func (pool *TxPool) demoteUnexecutables() {
  1680  	pool.currentStateLock.Lock()
  1681  	defer pool.currentStateLock.Unlock()
  1682  
  1683  	// Iterate over all accounts and demote any non-executable transactions
  1684  	for addr, list := range pool.pending {
  1685  		nonce := pool.currentState.GetNonce(addr)
  1686  
  1687  		// Drop all transactions that are deemed too old (low nonce)
  1688  		olds := list.Forward(nonce)
  1689  		for _, tx := range olds {
  1690  			hash := tx.Hash()
  1691  			pool.all.Remove(hash)
  1692  			log.Trace("Removed old pending transaction", "hash", hash)
  1693  		}
  1694  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1695  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1696  		for _, tx := range drops {
  1697  			hash := tx.Hash()
  1698  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1699  			pool.all.Remove(hash)
  1700  		}
  1701  		pendingNofundsMeter.Mark(int64(len(drops)))
  1702  
  1703  		for _, tx := range invalids {
  1704  			hash := tx.Hash()
  1705  			log.Trace("Demoting pending transaction", "hash", hash)
  1706  
  1707  			// Internal shuffle shouldn't touch the lookup set.
  1708  			pool.enqueueTx(hash, tx, false, false)
  1709  		}
  1710  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1711  		if pool.locals.contains(addr) {
  1712  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1713  		}
  1714  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1715  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1716  			gapped := list.Cap(0)
  1717  			for _, tx := range gapped {
  1718  				hash := tx.Hash()
  1719  				log.Error("Demoting invalidated transaction", "hash", hash)
  1720  
  1721  				// Internal shuffle shouldn't touch the lookup set.
  1722  				pool.enqueueTx(hash, tx, false, false)
  1723  			}
  1724  			// This might happen in a reorg, so log it to the metering
  1725  			pendingGauge.Dec(int64(len(gapped)))
  1726  		}
  1727  		// Delete the entire pending entry if it became empty.
  1728  		if list.Empty() {
  1729  			delete(pool.pending, addr)
  1730  		}
  1731  	}
  1732  }
  1733  
  1734  func (pool *TxPool) startPeriodicFeeUpdate() {
  1735  	if pool.chainconfig.SubnetEVMTimestamp == nil {
  1736  		return
  1737  	}
  1738  
  1739  	// Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
  1740  	// when starting up in Subnet EVM before the base fee is updated.
  1741  	if time.Now().After(time.Unix(pool.chainconfig.SubnetEVMTimestamp.Int64(), 0)) {
  1742  		pool.updateBaseFee()
  1743  	}
  1744  
  1745  	pool.wg.Add(1)
  1746  	go pool.periodicBaseFeeUpdate()
  1747  }
  1748  
  1749  func (pool *TxPool) periodicBaseFeeUpdate() {
  1750  	defer pool.wg.Done()
  1751  
  1752  	// Sleep until its time to start the periodic base fee update or the tx pool is shutting down
  1753  	select {
  1754  	case <-time.After(time.Until(time.Unix(pool.chainconfig.SubnetEVMTimestamp.Int64(), 0))):
  1755  	case <-pool.generalShutdownChan:
  1756  		return // Return early if shutting down
  1757  	}
  1758  
  1759  	// Update the base fee every [baseFeeUpdateInterval]
  1760  	// and shutdown when [generalShutdownChan] is closed by Stop()
  1761  	for {
  1762  		select {
  1763  		case <-time.After(baseFeeUpdateInterval):
  1764  			pool.updateBaseFee()
  1765  		case <-pool.generalShutdownChan:
  1766  			return
  1767  		}
  1768  	}
  1769  }
  1770  
  1771  func (pool *TxPool) updateBaseFee() {
  1772  	pool.mu.Lock()
  1773  	defer pool.mu.Unlock()
  1774  
  1775  	err := pool.updateBaseFeeAt(pool.currentHead)
  1776  	if err != nil {
  1777  		log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err)
  1778  	}
  1779  }
  1780  
  1781  // assumes lock is already held
  1782  func (pool *TxPool) updateBaseFeeAt(head *types.Header) error {
  1783  	feeConfig, _, err := pool.chain.GetFeeConfigAt(head)
  1784  	if err != nil {
  1785  		return err
  1786  	}
  1787  	_, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, feeConfig, head, uint64(time.Now().Unix()))
  1788  	if err != nil {
  1789  		return err
  1790  	}
  1791  	pool.priced.SetBaseFee(baseFeeEstimate)
  1792  	return nil
  1793  }
  1794  
  1795  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1796  type addressByHeartbeat struct {
  1797  	address   common.Address
  1798  	heartbeat time.Time
  1799  }
  1800  
  1801  type addressesByHeartbeat []addressByHeartbeat
  1802  
  1803  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1804  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1805  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1806  
  1807  // accountSet is simply a set of addresses to check for existence, and a signer
  1808  // capable of deriving addresses from transactions.
  1809  type accountSet struct {
  1810  	accounts map[common.Address]struct{}
  1811  	signer   types.Signer
  1812  	cache    *[]common.Address
  1813  }
  1814  
  1815  // newAccountSet creates a new address set with an associated signer for sender
  1816  // derivations.
  1817  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1818  	as := &accountSet{
  1819  		accounts: make(map[common.Address]struct{}),
  1820  		signer:   signer,
  1821  	}
  1822  	for _, addr := range addrs {
  1823  		as.add(addr)
  1824  	}
  1825  	return as
  1826  }
  1827  
  1828  // contains checks if a given address is contained within the set.
  1829  func (as *accountSet) contains(addr common.Address) bool {
  1830  	_, exist := as.accounts[addr]
  1831  	return exist
  1832  }
  1833  
  1834  // containsTx checks if the sender of a given tx is within the set. If the sender
  1835  // cannot be derived, this method returns false.
  1836  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1837  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1838  		return as.contains(addr)
  1839  	}
  1840  	return false
  1841  }
  1842  
  1843  // add inserts a new address into the set to track.
  1844  func (as *accountSet) add(addr common.Address) {
  1845  	as.accounts[addr] = struct{}{}
  1846  	as.cache = nil
  1847  }
  1848  
  1849  // addTx adds the sender of tx into the set.
  1850  func (as *accountSet) addTx(tx *types.Transaction) {
  1851  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1852  		as.add(addr)
  1853  	}
  1854  }
  1855  
  1856  // flatten returns the list of addresses within this set, also caching it for later
  1857  // reuse. The returned slice should not be changed!
  1858  func (as *accountSet) flatten() []common.Address {
  1859  	if as.cache == nil {
  1860  		accounts := make([]common.Address, 0, len(as.accounts))
  1861  		for account := range as.accounts {
  1862  			accounts = append(accounts, account)
  1863  		}
  1864  		as.cache = &accounts
  1865  	}
  1866  	return *as.cache
  1867  }
  1868  
  1869  // merge adds all addresses from the 'other' set into 'as'.
  1870  func (as *accountSet) merge(other *accountSet) {
  1871  	for addr := range other.accounts {
  1872  		as.accounts[addr] = struct{}{}
  1873  	}
  1874  	as.cache = nil
  1875  }
  1876  
  1877  // txLookup is used internally by TxPool to track transactions while allowing
  1878  // lookup without mutex contention.
  1879  //
  1880  // Note, although this type is properly protected against concurrent access, it
  1881  // is **not** a type that should ever be mutated or even exposed outside of the
  1882  // transaction pool, since its internal state is tightly coupled with the pools
  1883  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1884  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1885  // TxPool.mu mutex.
  1886  //
  1887  // This lookup set combines the notion of "local transactions", which is useful
  1888  // to build upper-level structure.
  1889  type txLookup struct {
  1890  	slots   int
  1891  	lock    sync.RWMutex
  1892  	locals  map[common.Hash]*types.Transaction
  1893  	remotes map[common.Hash]*types.Transaction
  1894  }
  1895  
  1896  // newTxLookup returns a new txLookup structure.
  1897  func newTxLookup() *txLookup {
  1898  	return &txLookup{
  1899  		locals:  make(map[common.Hash]*types.Transaction),
  1900  		remotes: make(map[common.Hash]*types.Transaction),
  1901  	}
  1902  }
  1903  
  1904  // Range calls f on each key and value present in the map. The callback passed
  1905  // should return the indicator whether the iteration needs to be continued.
  1906  // Callers need to specify which set (or both) to be iterated.
  1907  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
  1908  	t.lock.RLock()
  1909  	defer t.lock.RUnlock()
  1910  
  1911  	if local {
  1912  		for key, value := range t.locals {
  1913  			if !f(key, value, true) {
  1914  				return
  1915  			}
  1916  		}
  1917  	}
  1918  	if remote {
  1919  		for key, value := range t.remotes {
  1920  			if !f(key, value, false) {
  1921  				return
  1922  			}
  1923  		}
  1924  	}
  1925  }
  1926  
  1927  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1928  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1929  	t.lock.RLock()
  1930  	defer t.lock.RUnlock()
  1931  
  1932  	if tx := t.locals[hash]; tx != nil {
  1933  		return tx
  1934  	}
  1935  	return t.remotes[hash]
  1936  }
  1937  
  1938  // GetLocal returns a transaction if it exists in the lookup, or nil if not found.
  1939  func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction {
  1940  	t.lock.RLock()
  1941  	defer t.lock.RUnlock()
  1942  
  1943  	return t.locals[hash]
  1944  }
  1945  
  1946  // GetRemote returns a transaction if it exists in the lookup, or nil if not found.
  1947  func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction {
  1948  	t.lock.RLock()
  1949  	defer t.lock.RUnlock()
  1950  
  1951  	return t.remotes[hash]
  1952  }
  1953  
  1954  // Count returns the current number of transactions in the lookup.
  1955  func (t *txLookup) Count() int {
  1956  	t.lock.RLock()
  1957  	defer t.lock.RUnlock()
  1958  
  1959  	return len(t.locals) + len(t.remotes)
  1960  }
  1961  
  1962  // LocalCount returns the current number of local transactions in the lookup.
  1963  func (t *txLookup) LocalCount() int {
  1964  	t.lock.RLock()
  1965  	defer t.lock.RUnlock()
  1966  
  1967  	return len(t.locals)
  1968  }
  1969  
  1970  // RemoteCount returns the current number of remote transactions in the lookup.
  1971  func (t *txLookup) RemoteCount() int {
  1972  	t.lock.RLock()
  1973  	defer t.lock.RUnlock()
  1974  
  1975  	return len(t.remotes)
  1976  }
  1977  
  1978  // Slots returns the current number of slots used in the lookup.
  1979  func (t *txLookup) Slots() int {
  1980  	t.lock.RLock()
  1981  	defer t.lock.RUnlock()
  1982  
  1983  	return t.slots
  1984  }
  1985  
  1986  // Add adds a transaction to the lookup.
  1987  func (t *txLookup) Add(tx *types.Transaction, local bool) {
  1988  	t.lock.Lock()
  1989  	defer t.lock.Unlock()
  1990  
  1991  	t.slots += numSlots(tx)
  1992  	slotsGauge.Update(int64(t.slots))
  1993  
  1994  	if local {
  1995  		t.locals[tx.Hash()] = tx
  1996  	} else {
  1997  		t.remotes[tx.Hash()] = tx
  1998  	}
  1999  }
  2000  
  2001  // Remove removes a transaction from the lookup.
  2002  func (t *txLookup) Remove(hash common.Hash) {
  2003  	t.lock.Lock()
  2004  	defer t.lock.Unlock()
  2005  
  2006  	tx, ok := t.locals[hash]
  2007  	if !ok {
  2008  		tx, ok = t.remotes[hash]
  2009  	}
  2010  	if !ok {
  2011  		log.Error("No transaction found to be deleted", "hash", hash)
  2012  		return
  2013  	}
  2014  	t.slots -= numSlots(tx)
  2015  	slotsGauge.Update(int64(t.slots))
  2016  
  2017  	delete(t.locals, hash)
  2018  	delete(t.remotes, hash)
  2019  }
  2020  
  2021  // RemoteToLocals migrates the transactions belongs to the given locals to locals
  2022  // set. The assumption is held the locals set is thread-safe to be used.
  2023  func (t *txLookup) RemoteToLocals(locals *accountSet) int {
  2024  	t.lock.Lock()
  2025  	defer t.lock.Unlock()
  2026  
  2027  	var migrated int
  2028  	for hash, tx := range t.remotes {
  2029  		if locals.containsTx(tx) {
  2030  			t.locals[hash] = tx
  2031  			delete(t.remotes, hash)
  2032  			migrated += 1
  2033  		}
  2034  	}
  2035  	return migrated
  2036  }
  2037  
  2038  // RemotesBelowTip finds all remote transactions below the given tip threshold.
  2039  func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
  2040  	found := make(types.Transactions, 0, 128)
  2041  	t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
  2042  		if tx.GasTipCapIntCmp(threshold) < 0 {
  2043  			found = append(found, tx)
  2044  		}
  2045  		return true
  2046  	}, false, true) // Only iterate remotes
  2047  	return found
  2048  }
  2049  
  2050  // numSlots calculates the number of slots needed for a single transaction.
  2051  func numSlots(tx *types.Transaction) int {
  2052  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  2053  }