github.com/ethereum/go-ethereum@v1.16.1/core/txpool/legacypool/legacypool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package legacypool implements the normal EVM execution transaction pool.
    18  package legacypool
    19  
    20  import (
    21  	"errors"
    22  	"maps"
    23  	"math"
    24  	"math/big"
    25  	"slices"
    26  	"sort"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/common/prque"
    33  	"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
    34  	"github.com/ethereum/go-ethereum/core"
    35  	"github.com/ethereum/go-ethereum/core/state"
    36  	"github.com/ethereum/go-ethereum/core/txpool"
    37  	"github.com/ethereum/go-ethereum/core/types"
    38  	"github.com/ethereum/go-ethereum/event"
    39  	"github.com/ethereum/go-ethereum/log"
    40  	"github.com/ethereum/go-ethereum/metrics"
    41  	"github.com/ethereum/go-ethereum/params"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/holiman/uint256"
    44  )
    45  
    46  const (
    47  	// txSlotSize is used to calculate how many data slots a single transaction
    48  	// takes up based on its size. The slots are used as DoS protection, ensuring
    49  	// that validating a new transaction remains a constant operation (in reality
    50  	// O(maxslots), where max slots are 4 currently).
    51  	txSlotSize = 32 * 1024
    52  
    53  	// txMaxSize is the maximum size a single transaction can have. This field has
    54  	// non-trivial consequences: larger transactions are significantly harder and
    55  	// more expensive to propagate; larger transactions also take more resources
    56  	// to validate whether they fit into the pool or not.
    57  	txMaxSize = 4 * txSlotSize // 128KB
    58  )
    59  
    60  var (
    61  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
    62  	// another remote transaction.
    63  	ErrTxPoolOverflow = errors.New("txpool is full")
    64  
    65  	// ErrOutOfOrderTxFromDelegated is returned when the transaction with gapped
    66  	// nonce received from the accounts with delegation or pending delegation.
    67  	ErrOutOfOrderTxFromDelegated = errors.New("gapped-nonce tx from delegated accounts")
    68  
    69  	// ErrAuthorityReserved is returned if a transaction has an authorization
    70  	// signed by an address which already has in-flight transactions known to the
    71  	// pool.
    72  	ErrAuthorityReserved = errors.New("authority already reserved")
    73  
    74  	// ErrFutureReplacePending is returned if a future transaction replaces a pending
    75  	// one. Future transactions should only be able to replace other future transactions.
    76  	ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
    77  )
    78  
    79  var (
    80  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    81  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    82  )
    83  
    84  var (
    85  	// Metrics for the pending pool
    86  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
    87  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
    88  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
    89  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
    90  
    91  	// Metrics for the queued pool
    92  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
    93  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
    94  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
    95  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
    96  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
    97  
    98  	// General tx metrics
    99  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   100  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   101  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   102  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   103  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
   104  
   105  	// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
   106  	// txpool reorgs.
   107  	throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
   108  	// reorgDurationTimer measures how long time a txpool reorg takes.
   109  	reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
   110  	// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
   111  	// that this number is pretty low, since txpool reorgs happen very frequently.
   112  	dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
   113  
   114  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   115  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   116  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   117  
   118  	reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
   119  )
   120  
   121  // BlockChain defines the minimal set of methods needed to back a tx pool with
   122  // a chain. Exists to allow mocking the live chain out of tests.
   123  type BlockChain interface {
   124  	// Config retrieves the chain's fork configuration.
   125  	Config() *params.ChainConfig
   126  
   127  	// CurrentBlock returns the current head of the chain.
   128  	CurrentBlock() *types.Header
   129  
   130  	// GetBlock retrieves a specific block, used during pool resets.
   131  	GetBlock(hash common.Hash, number uint64) *types.Block
   132  
   133  	// StateAt returns a state database for a given root hash (generally the head).
   134  	StateAt(root common.Hash) (*state.StateDB, error)
   135  }
   136  
   137  // Config are the configuration parameters of the transaction pool.
   138  type Config struct {
   139  	Locals    []common.Address // Addresses that should be treated by default as local
   140  	NoLocals  bool             // Whether local transaction handling should be disabled
   141  	Journal   string           // Journal of local transactions to survive node restarts
   142  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   143  
   144  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   145  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   146  
   147  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   148  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   149  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   150  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   151  
   152  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   153  }
   154  
   155  // DefaultConfig contains the default configurations for the transaction pool.
   156  var DefaultConfig = Config{
   157  	Journal:   "transactions.rlp",
   158  	Rejournal: time.Hour,
   159  
   160  	PriceLimit: 1,
   161  	PriceBump:  10,
   162  
   163  	AccountSlots: 16,
   164  	GlobalSlots:  4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
   165  	AccountQueue: 64,
   166  	GlobalQueue:  1024,
   167  
   168  	Lifetime: 3 * time.Hour,
   169  }
   170  
   171  // sanitize checks the provided user configurations and changes anything that's
   172  // unreasonable or unworkable.
   173  func (config *Config) sanitize() Config {
   174  	conf := *config
   175  	if conf.PriceLimit < 1 {
   176  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
   177  		conf.PriceLimit = DefaultConfig.PriceLimit
   178  	}
   179  	if conf.PriceBump < 1 {
   180  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
   181  		conf.PriceBump = DefaultConfig.PriceBump
   182  	}
   183  	if conf.AccountSlots < 1 {
   184  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
   185  		conf.AccountSlots = DefaultConfig.AccountSlots
   186  	}
   187  	if conf.GlobalSlots < 1 {
   188  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
   189  		conf.GlobalSlots = DefaultConfig.GlobalSlots
   190  	}
   191  	if conf.AccountQueue < 1 {
   192  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
   193  		conf.AccountQueue = DefaultConfig.AccountQueue
   194  	}
   195  	if conf.GlobalQueue < 1 {
   196  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
   197  		conf.GlobalQueue = DefaultConfig.GlobalQueue
   198  	}
   199  	if conf.Lifetime < 1 {
   200  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
   201  		conf.Lifetime = DefaultConfig.Lifetime
   202  	}
   203  	return conf
   204  }
   205  
   206  // LegacyPool contains all currently known transactions. Transactions
   207  // enter the pool when they are received from the network or submitted
   208  // locally. They exit the pool when they are included in the blockchain.
   209  //
   210  // The pool separates processable transactions (which can be applied to the
   211  // current state) and future transactions. Transactions move between those
   212  // two states over time as they are received and processed.
   213  //
   214  // In addition to tracking transactions, the pool also tracks a set of pending SetCode
   215  // authorizations (EIP7702). This helps minimize number of transactions that can be
   216  // trivially churned in the pool. As a standard rule, any account with a deployed
   217  // delegation or an in-flight authorization to deploy a delegation will only be allowed a
   218  // single transaction slot instead of the standard number. This is due to the possibility
   219  // of the account being sweeped by an unrelated account.
   220  //
   221  // Because SetCode transactions can have many authorizations included, we avoid explicitly
   222  // checking their validity to save the state lookup. So long as the encompassing
   223  // transaction is valid, the authorization will be accepted and tracked by the pool. In
   224  // case the pool is tracking a pending / queued transaction from a specific account, it
   225  // will reject new transactions with delegations from that account with standard in-flight
   226  // transactions.
   227  type LegacyPool struct {
   228  	config      Config
   229  	chainconfig *params.ChainConfig
   230  	chain       BlockChain
   231  	gasTip      atomic.Pointer[uint256.Int]
   232  	txFeed      event.Feed
   233  	signer      types.Signer
   234  	mu          sync.RWMutex
   235  
   236  	currentHead   atomic.Pointer[types.Header] // Current head of the blockchain
   237  	currentState  *state.StateDB               // Current state in the blockchain head
   238  	pendingNonces *noncer                      // Pending state tracking virtual nonces
   239  	reserver      txpool.Reserver              // Address reserver to ensure exclusivity across subpools
   240  
   241  	pending map[common.Address]*list     // All currently processable transactions
   242  	queue   map[common.Address]*list     // Queued but non-processable transactions
   243  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   244  	all     *lookup                      // All transactions to allow lookups
   245  	priced  *pricedList                  // All transactions sorted by price
   246  
   247  	reqResetCh      chan *txpoolResetRequest
   248  	reqPromoteCh    chan *accountSet
   249  	queueTxEventCh  chan *types.Transaction
   250  	reorgDoneCh     chan chan struct{}
   251  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   252  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   253  	initDoneCh      chan struct{}  // is closed once the pool is initialized (for tests)
   254  
   255  	changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
   256  }
   257  
   258  type txpoolResetRequest struct {
   259  	oldHead, newHead *types.Header
   260  }
   261  
   262  // New creates a new transaction pool to gather, sort and filter inbound
   263  // transactions from the network.
   264  func New(config Config, chain BlockChain) *LegacyPool {
   265  	// Sanitize the input to ensure no vulnerable gas prices are set
   266  	config = (&config).sanitize()
   267  
   268  	// Create the transaction pool with its initial settings
   269  	pool := &LegacyPool{
   270  		config:          config,
   271  		chain:           chain,
   272  		chainconfig:     chain.Config(),
   273  		signer:          types.LatestSigner(chain.Config()),
   274  		pending:         make(map[common.Address]*list),
   275  		queue:           make(map[common.Address]*list),
   276  		beats:           make(map[common.Address]time.Time),
   277  		all:             newLookup(),
   278  		reqResetCh:      make(chan *txpoolResetRequest),
   279  		reqPromoteCh:    make(chan *accountSet),
   280  		queueTxEventCh:  make(chan *types.Transaction),
   281  		reorgDoneCh:     make(chan chan struct{}),
   282  		reorgShutdownCh: make(chan struct{}),
   283  		initDoneCh:      make(chan struct{}),
   284  	}
   285  	pool.priced = newPricedList(pool.all)
   286  
   287  	return pool
   288  }
   289  
   290  // Filter returns whether the given transaction can be consumed by the legacy
   291  // pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction.
   292  func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
   293  	switch tx.Type() {
   294  	case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.SetCodeTxType:
   295  		return true
   296  	default:
   297  		return false
   298  	}
   299  }
   300  
   301  // Init sets the gas price needed to keep a transaction in the pool and the chain
   302  // head to allow balance / nonce checks. The internal
   303  // goroutines will be spun up and the pool deemed operational afterwards.
   304  func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reserver) error {
   305  	// Set the address reserver to request exclusive access to pooled accounts
   306  	pool.reserver = reserver
   307  
   308  	// Set the basic pool parameters
   309  	pool.gasTip.Store(uint256.NewInt(gasTip))
   310  
   311  	// Initialize the state with head block, or fallback to empty one in
   312  	// case the head state is not available (might occur when node is not
   313  	// fully synced).
   314  	statedb, err := pool.chain.StateAt(head.Root)
   315  	if err != nil {
   316  		statedb, err = pool.chain.StateAt(types.EmptyRootHash)
   317  	}
   318  	if err != nil {
   319  		return err
   320  	}
   321  	pool.currentHead.Store(head)
   322  	pool.currentState = statedb
   323  	pool.pendingNonces = newNoncer(statedb)
   324  
   325  	pool.wg.Add(1)
   326  	go pool.scheduleReorgLoop()
   327  
   328  	pool.wg.Add(1)
   329  	go pool.loop()
   330  	return nil
   331  }
   332  
   333  // loop is the transaction pool's main event loop, waiting for and reacting to
   334  // outside blockchain events as well as for various reporting and transaction
   335  // eviction events.
   336  func (pool *LegacyPool) loop() {
   337  	defer pool.wg.Done()
   338  
   339  	var (
   340  		prevPending, prevQueued, prevStales int
   341  
   342  		// Start the stats reporting and transaction eviction tickers
   343  		report = time.NewTicker(statsReportInterval)
   344  		evict  = time.NewTicker(evictionInterval)
   345  	)
   346  	defer report.Stop()
   347  	defer evict.Stop()
   348  
   349  	// Notify tests that the init phase is done
   350  	close(pool.initDoneCh)
   351  	for {
   352  		select {
   353  		// Handle pool shutdown
   354  		case <-pool.reorgShutdownCh:
   355  			return
   356  
   357  		// Handle stats reporting ticks
   358  		case <-report.C:
   359  			pool.mu.RLock()
   360  			pending, queued := pool.stats()
   361  			pool.mu.RUnlock()
   362  			stales := int(pool.priced.stales.Load())
   363  
   364  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   365  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   366  				prevPending, prevQueued, prevStales = pending, queued, stales
   367  			}
   368  
   369  		// Handle inactive account transaction eviction
   370  		case <-evict.C:
   371  			pool.mu.Lock()
   372  			for addr := range pool.queue {
   373  				// Any old enough should be removed
   374  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   375  					list := pool.queue[addr].Flatten()
   376  					for _, tx := range list {
   377  						pool.removeTx(tx.Hash(), true, true)
   378  					}
   379  					queuedEvictionMeter.Mark(int64(len(list)))
   380  				}
   381  			}
   382  			pool.mu.Unlock()
   383  		}
   384  	}
   385  }
   386  
   387  // Close terminates the transaction pool.
   388  func (pool *LegacyPool) Close() error {
   389  	// Terminate the pool reorger and return
   390  	close(pool.reorgShutdownCh)
   391  	pool.wg.Wait()
   392  
   393  	log.Info("Transaction pool stopped")
   394  	return nil
   395  }
   396  
   397  // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
   398  // kept in sync with the main transaction pool's internal state.
   399  func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
   400  	wait := pool.requestReset(oldHead, newHead)
   401  	<-wait
   402  }
   403  
   404  // SubscribeTransactions registers a subscription for new transaction events,
   405  // supporting feeding only newly seen or also resurrected transactions.
   406  func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
   407  	// The legacy pool has a very messed up internal shuffling, so it's kind of
   408  	// hard to separate newly discovered transaction from resurrected ones. This
   409  	// is because the new txs are added to the queue, resurrected ones too and
   410  	// reorgs run lazily, so separating the two would need a marker.
   411  	return pool.txFeed.Subscribe(ch)
   412  }
   413  
   414  // SetGasTip updates the minimum gas tip required by the transaction pool for a
   415  // new transaction, and drops all transactions below this threshold.
   416  func (pool *LegacyPool) SetGasTip(tip *big.Int) {
   417  	pool.mu.Lock()
   418  	defer pool.mu.Unlock()
   419  
   420  	var (
   421  		newTip = uint256.MustFromBig(tip)
   422  		old    = pool.gasTip.Load()
   423  	)
   424  	pool.gasTip.Store(newTip)
   425  	// If the min miner fee increased, remove transactions below the new threshold
   426  	if newTip.Cmp(old) > 0 {
   427  		// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
   428  		drop := pool.all.TxsBelowTip(tip)
   429  		for _, tx := range drop {
   430  			pool.removeTx(tx.Hash(), false, true)
   431  		}
   432  		pool.priced.Removed(len(drop))
   433  	}
   434  	log.Info("Legacy pool tip threshold updated", "tip", newTip)
   435  }
   436  
   437  // Nonce returns the next nonce of an account, with all transactions executable
   438  // by the pool already applied on top.
   439  func (pool *LegacyPool) Nonce(addr common.Address) uint64 {
   440  	pool.mu.RLock()
   441  	defer pool.mu.RUnlock()
   442  
   443  	return pool.pendingNonces.get(addr)
   444  }
   445  
   446  // Stats retrieves the current pool stats, namely the number of pending and the
   447  // number of queued (non-executable) transactions.
   448  func (pool *LegacyPool) Stats() (int, int) {
   449  	pool.mu.RLock()
   450  	defer pool.mu.RUnlock()
   451  
   452  	return pool.stats()
   453  }
   454  
   455  // stats retrieves the current pool stats, namely the number of pending and the
   456  // number of queued (non-executable) transactions.
   457  func (pool *LegacyPool) stats() (int, int) {
   458  	pending := 0
   459  	for _, list := range pool.pending {
   460  		pending += list.Len()
   461  	}
   462  	queued := 0
   463  	for _, list := range pool.queue {
   464  		queued += list.Len()
   465  	}
   466  	return pending, queued
   467  }
   468  
   469  // Content retrieves the data content of the transaction pool, returning all the
   470  // pending as well as queued transactions, grouped by account and sorted by nonce.
   471  func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
   472  	pool.mu.Lock()
   473  	defer pool.mu.Unlock()
   474  
   475  	pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
   476  	for addr, list := range pool.pending {
   477  		pending[addr] = list.Flatten()
   478  	}
   479  	queued := make(map[common.Address][]*types.Transaction, len(pool.queue))
   480  	for addr, list := range pool.queue {
   481  		queued[addr] = list.Flatten()
   482  	}
   483  	return pending, queued
   484  }
   485  
   486  // ContentFrom retrieves the data content of the transaction pool, returning the
   487  // pending as well as queued transactions of this address, grouped by nonce.
   488  func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
   489  	pool.mu.RLock()
   490  	defer pool.mu.RUnlock()
   491  
   492  	var pending []*types.Transaction
   493  	if list, ok := pool.pending[addr]; ok {
   494  		pending = list.Flatten()
   495  	}
   496  	var queued []*types.Transaction
   497  	if list, ok := pool.queue[addr]; ok {
   498  		queued = list.Flatten()
   499  	}
   500  	return pending, queued
   501  }
   502  
   503  // Pending retrieves all currently processable transactions, grouped by origin
   504  // account and sorted by nonce.
   505  //
   506  // The transactions can also be pre-filtered by the dynamic fee components to
   507  // reduce allocations and load on downstream subsystems.
   508  func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
   509  	// If only blob transactions are requested, this pool is unsuitable as it
   510  	// contains none, don't even bother.
   511  	if filter.OnlyBlobTxs {
   512  		return nil
   513  	}
   514  	pool.mu.Lock()
   515  	defer pool.mu.Unlock()
   516  
   517  	// Convert the new uint256.Int types to the old big.Int ones used by the legacy pool
   518  	var (
   519  		minTipBig  *big.Int
   520  		baseFeeBig *big.Int
   521  	)
   522  	if filter.MinTip != nil {
   523  		minTipBig = filter.MinTip.ToBig()
   524  	}
   525  	if filter.BaseFee != nil {
   526  		baseFeeBig = filter.BaseFee.ToBig()
   527  	}
   528  	pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
   529  	for addr, list := range pool.pending {
   530  		txs := list.Flatten()
   531  
   532  		// If the miner requests tip enforcement, cap the lists now
   533  		if minTipBig != nil {
   534  			for i, tx := range txs {
   535  				if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
   536  					txs = txs[:i]
   537  					break
   538  				}
   539  			}
   540  		}
   541  		if len(txs) > 0 {
   542  			lazies := make([]*txpool.LazyTransaction, len(txs))
   543  			for i := 0; i < len(txs); i++ {
   544  				lazies[i] = &txpool.LazyTransaction{
   545  					Pool:      pool,
   546  					Hash:      txs[i].Hash(),
   547  					Tx:        txs[i],
   548  					Time:      txs[i].Time(),
   549  					GasFeeCap: uint256.MustFromBig(txs[i].GasFeeCap()),
   550  					GasTipCap: uint256.MustFromBig(txs[i].GasTipCap()),
   551  					Gas:       txs[i].Gas(),
   552  					BlobGas:   txs[i].BlobGas(),
   553  				}
   554  			}
   555  			pending[addr] = lazies
   556  		}
   557  	}
   558  	return pending
   559  }
   560  
   561  // ValidateTxBasics checks whether a transaction is valid according to the consensus
   562  // rules, but does not check state-dependent validation such as sufficient balance.
   563  // This check is meant as an early check which only needs to be performed once,
   564  // and does not require the pool mutex to be held.
   565  func (pool *LegacyPool) ValidateTxBasics(tx *types.Transaction) error {
   566  	opts := &txpool.ValidationOptions{
   567  		Config: pool.chainconfig,
   568  		Accept: 0 |
   569  			1<<types.LegacyTxType |
   570  			1<<types.AccessListTxType |
   571  			1<<types.DynamicFeeTxType |
   572  			1<<types.SetCodeTxType,
   573  		MaxSize: txMaxSize,
   574  		MinTip:  pool.gasTip.Load().ToBig(),
   575  	}
   576  	return txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts)
   577  }
   578  
   579  // validateTx checks whether a transaction is valid according to the consensus
   580  // rules and adheres to some heuristic limits of the local node (price and size).
   581  func (pool *LegacyPool) validateTx(tx *types.Transaction) error {
   582  	opts := &txpool.ValidationOptionsWithState{
   583  		State: pool.currentState,
   584  
   585  		FirstNonceGap:    nil, // Pool allows arbitrary arrival order, don't invalidate nonce gaps
   586  		UsedAndLeftSlots: nil, // Pool has own mechanism to limit the number of transactions
   587  		ExistingExpenditure: func(addr common.Address) *big.Int {
   588  			if list := pool.pending[addr]; list != nil {
   589  				return list.totalcost.ToBig()
   590  			}
   591  			return new(big.Int)
   592  		},
   593  		ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
   594  			if list := pool.pending[addr]; list != nil {
   595  				if tx := list.txs.Get(nonce); tx != nil {
   596  					return tx.Cost()
   597  				}
   598  			}
   599  			return nil
   600  		},
   601  	}
   602  	if err := txpool.ValidateTransactionWithState(tx, pool.signer, opts); err != nil {
   603  		return err
   604  	}
   605  	return pool.validateAuth(tx)
   606  }
   607  
   608  // checkDelegationLimit determines if the tx sender is delegated or has a
   609  // pending delegation, and if so, ensures they have at most one in-flight
   610  // **executable** transaction, e.g. disallow stacked and gapped transactions
   611  // from the account.
   612  func (pool *LegacyPool) checkDelegationLimit(tx *types.Transaction) error {
   613  	from, _ := types.Sender(pool.signer, tx) // validated
   614  
   615  	// Short circuit if the sender has neither delegation nor pending delegation.
   616  	if pool.currentState.GetCodeHash(from) == types.EmptyCodeHash && !pool.all.hasAuth(from) {
   617  		return nil
   618  	}
   619  	pending := pool.pending[from]
   620  	if pending == nil {
   621  		// Transaction with gapped nonce is not supported for delegated accounts
   622  		if pool.pendingNonces.get(from) != tx.Nonce() {
   623  			return ErrOutOfOrderTxFromDelegated
   624  		}
   625  		return nil
   626  	}
   627  	// Transaction replacement is supported
   628  	if pending.Contains(tx.Nonce()) {
   629  		return nil
   630  	}
   631  	return txpool.ErrInflightTxLimitReached
   632  }
   633  
   634  // validateAuth verifies that the transaction complies with code authorization
   635  // restrictions brought by SetCode transaction type.
   636  func (pool *LegacyPool) validateAuth(tx *types.Transaction) error {
   637  	// Allow at most one in-flight tx for delegated accounts or those with a
   638  	// pending authorization.
   639  	if err := pool.checkDelegationLimit(tx); err != nil {
   640  		return err
   641  	}
   642  	// For symmetry, allow at most one in-flight tx for any authority with a
   643  	// pending transaction.
   644  	if auths := tx.SetCodeAuthorities(); len(auths) > 0 {
   645  		for _, auth := range auths {
   646  			var count int
   647  			if pending := pool.pending[auth]; pending != nil {
   648  				count += pending.Len()
   649  			}
   650  			if queue := pool.queue[auth]; queue != nil {
   651  				count += queue.Len()
   652  			}
   653  			if count > 1 {
   654  				return ErrAuthorityReserved
   655  			}
   656  			// Because there is no exclusive lock held between different subpools
   657  			// when processing transactions, the SetCode transaction may be accepted
   658  			// while other transactions with the same sender address are also
   659  			// accepted simultaneously in the other pools.
   660  			//
   661  			// This scenario is considered acceptable, as the rule primarily ensures
   662  			// that attackers cannot easily stack a SetCode transaction when the sender
   663  			// is reserved by other pools.
   664  			if pool.reserver.Has(auth) {
   665  				return ErrAuthorityReserved
   666  			}
   667  		}
   668  	}
   669  	return nil
   670  }
   671  
   672  // add validates a transaction and inserts it into the non-executable queue for later
   673  // pending promotion and execution. If the transaction is a replacement for an already
   674  // pending or queued one, it overwrites the previous transaction if its price is higher.
   675  func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) {
   676  	// If the transaction is already known, discard it
   677  	hash := tx.Hash()
   678  	if pool.all.Get(hash) != nil {
   679  		log.Trace("Discarding already known transaction", "hash", hash)
   680  		knownTxMeter.Mark(1)
   681  		return false, txpool.ErrAlreadyKnown
   682  	}
   683  
   684  	// If the transaction fails basic validation, discard it
   685  	if err := pool.validateTx(tx); err != nil {
   686  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   687  		invalidTxMeter.Mark(1)
   688  		return false, err
   689  	}
   690  	// already validated by this point
   691  	from, _ := types.Sender(pool.signer, tx)
   692  
   693  	// If the address is not yet known, request exclusivity to track the account
   694  	// only by this subpool until all transactions are evicted
   695  	var (
   696  		_, hasPending = pool.pending[from]
   697  		_, hasQueued  = pool.queue[from]
   698  	)
   699  	if !hasPending && !hasQueued {
   700  		if err := pool.reserver.Hold(from); err != nil {
   701  			return false, err
   702  		}
   703  		defer func() {
   704  			// If the transaction is rejected by some post-validation check, remove
   705  			// the lock on the reservation set.
   706  			//
   707  			// Note, `err` here is the named error return, which will be initialized
   708  			// by a return statement before running deferred methods. Take care with
   709  			// removing or subscoping err as it will break this clause.
   710  			if err != nil {
   711  				pool.reserver.Release(from)
   712  			}
   713  		}()
   714  	}
   715  	// If the transaction pool is full, discard underpriced transactions
   716  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   717  		// If the new transaction is underpriced, don't accept it
   718  		if pool.priced.Underpriced(tx) {
   719  			log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   720  			underpricedTxMeter.Mark(1)
   721  			return false, txpool.ErrUnderpriced
   722  		}
   723  
   724  		// We're about to replace a transaction. The reorg does a more thorough
   725  		// analysis of what to remove and how, but it runs async. We don't want to
   726  		// do too many replacements between reorg-runs, so we cap the number of
   727  		// replacements to 25% of the slots
   728  		if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
   729  			throttleTxMeter.Mark(1)
   730  			return false, ErrTxPoolOverflow
   731  		}
   732  
   733  		// New transaction is better than our worse ones, make room for it.
   734  		// If we can't make enough room for new one, abort the operation.
   735  		drop, success := pool.priced.Discard(pool.all.Slots() - int(pool.config.GlobalSlots+pool.config.GlobalQueue) + numSlots(tx))
   736  
   737  		// Special case, we still can't make the room for the new remote one.
   738  		if !success {
   739  			log.Trace("Discarding overflown transaction", "hash", hash)
   740  			overflowedTxMeter.Mark(1)
   741  			return false, ErrTxPoolOverflow
   742  		}
   743  
   744  		// If the new transaction is a future transaction it should never churn pending transactions
   745  		if pool.isGapped(from, tx) {
   746  			var replacesPending bool
   747  			for _, dropTx := range drop {
   748  				dropSender, _ := types.Sender(pool.signer, dropTx)
   749  				if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
   750  					replacesPending = true
   751  					break
   752  				}
   753  			}
   754  			// Add all transactions back to the priced queue
   755  			if replacesPending {
   756  				for _, dropTx := range drop {
   757  					pool.priced.Put(dropTx)
   758  				}
   759  				log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
   760  				return false, ErrFutureReplacePending
   761  			}
   762  		}
   763  
   764  		// Kick out the underpriced remote transactions.
   765  		for _, tx := range drop {
   766  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   767  			underpricedTxMeter.Mark(1)
   768  
   769  			sender, _ := types.Sender(pool.signer, tx)
   770  			dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc
   771  
   772  			pool.changesSinceReorg += dropped
   773  		}
   774  	}
   775  
   776  	// Try to replace an existing transaction in the pending pool
   777  	if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
   778  		// Nonce already pending, check if required price bump is met
   779  		inserted, old := list.Add(tx, pool.config.PriceBump)
   780  		if !inserted {
   781  			pendingDiscardMeter.Mark(1)
   782  			return false, txpool.ErrReplaceUnderpriced
   783  		}
   784  		// New transaction is better, replace old one
   785  		if old != nil {
   786  			pool.all.Remove(old.Hash())
   787  			pool.priced.Removed(1)
   788  			pendingReplaceMeter.Mark(1)
   789  		}
   790  		pool.all.Add(tx)
   791  		pool.priced.Put(tx)
   792  		pool.queueTxEvent(tx)
   793  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   794  
   795  		// Successful promotion, bump the heartbeat
   796  		pool.beats[from] = time.Now()
   797  		return old != nil, nil
   798  	}
   799  	// New transaction isn't replacing a pending one, push into queue
   800  	replaced, err = pool.enqueueTx(hash, tx, true)
   801  	if err != nil {
   802  		return false, err
   803  	}
   804  
   805  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   806  	return replaced, nil
   807  }
   808  
   809  // isGapped reports whether the given transaction is immediately executable.
   810  func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
   811  	// Short circuit if transaction falls within the scope of the pending list
   812  	// or matches the next pending nonce which can be promoted as an executable
   813  	// transaction afterwards. Note, the tx staleness is already checked in
   814  	// 'validateTx' function previously.
   815  	next := pool.pendingNonces.get(from)
   816  	if tx.Nonce() <= next {
   817  		return false
   818  	}
   819  	// The transaction has a nonce gap with pending list, it's only considered
   820  	// as executable if transactions in queue can fill up the nonce gap.
   821  	queue, ok := pool.queue[from]
   822  	if !ok {
   823  		return true
   824  	}
   825  	for nonce := next; nonce < tx.Nonce(); nonce++ {
   826  		if !queue.Contains(nonce) {
   827  			return true // txs in queue can't fill up the nonce gap
   828  		}
   829  	}
   830  	return false
   831  }
   832  
   833  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   834  //
   835  // Note, this method assumes the pool lock is held!
   836  func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) {
   837  	// Try to insert the transaction into the future queue
   838  	from, _ := types.Sender(pool.signer, tx) // already validated
   839  	if pool.queue[from] == nil {
   840  		pool.queue[from] = newList(false)
   841  	}
   842  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   843  	if !inserted {
   844  		// An older transaction was better, discard this
   845  		queuedDiscardMeter.Mark(1)
   846  		return false, txpool.ErrReplaceUnderpriced
   847  	}
   848  	// Discard any previous transaction and mark this
   849  	if old != nil {
   850  		pool.all.Remove(old.Hash())
   851  		pool.priced.Removed(1)
   852  		queuedReplaceMeter.Mark(1)
   853  	} else {
   854  		// Nothing was replaced, bump the queued counter
   855  		queuedGauge.Inc(1)
   856  	}
   857  	// If the transaction isn't in lookup set but it's expected to be there,
   858  	// show the error log.
   859  	if pool.all.Get(hash) == nil && !addAll {
   860  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
   861  	}
   862  	if addAll {
   863  		pool.all.Add(tx)
   864  		pool.priced.Put(tx)
   865  	}
   866  	// If we never record the heartbeat, do it right now.
   867  	if _, exist := pool.beats[from]; !exist {
   868  		pool.beats[from] = time.Now()
   869  	}
   870  	return old != nil, nil
   871  }
   872  
   873  // promoteTx adds a transaction to the pending (processable) list of transactions
   874  // and returns whether it was inserted or an older was better.
   875  //
   876  // Note, this method assumes the pool lock is held!
   877  func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   878  	// Try to insert the transaction into the pending queue
   879  	if pool.pending[addr] == nil {
   880  		pool.pending[addr] = newList(true)
   881  	}
   882  	list := pool.pending[addr]
   883  
   884  	inserted, old := list.Add(tx, pool.config.PriceBump)
   885  	if !inserted {
   886  		// An older transaction was better, discard this
   887  		pool.all.Remove(hash)
   888  		pool.priced.Removed(1)
   889  		pendingDiscardMeter.Mark(1)
   890  		return false
   891  	}
   892  	// Otherwise discard any previous transaction and mark this
   893  	if old != nil {
   894  		pool.all.Remove(old.Hash())
   895  		pool.priced.Removed(1)
   896  		pendingReplaceMeter.Mark(1)
   897  	} else {
   898  		// Nothing was replaced, bump the pending counter
   899  		pendingGauge.Inc(1)
   900  	}
   901  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   902  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   903  
   904  	// Successful promotion, bump the heartbeat
   905  	pool.beats[addr] = time.Now()
   906  	return true
   907  }
   908  
   909  // addRemotes enqueues a batch of transactions into the pool if they are valid.
   910  // Full pricing constraints will apply.
   911  //
   912  // This method is used to add transactions from the p2p network and does not wait for pool
   913  // reorganization and internal event propagation.
   914  func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
   915  	return pool.Add(txs, false)
   916  }
   917  
   918  // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
   919  // wrapper around addRemotes.
   920  func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
   921  	return pool.addRemotes([]*types.Transaction{tx})[0]
   922  }
   923  
   924  // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
   925  func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
   926  	return pool.Add(txs, true)
   927  }
   928  
   929  // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   930  func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
   931  	return pool.Add([]*types.Transaction{tx}, true)[0]
   932  }
   933  
   934  // Add enqueues a batch of transactions into the pool if they are valid.
   935  //
   936  // Note, if sync is set the method will block until all internal maintenance
   937  // related to the add is finished. Only use this during tests for determinism.
   938  func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error {
   939  	// Filter out known ones without obtaining the pool lock or recovering signatures
   940  	var (
   941  		errs = make([]error, len(txs))
   942  		news = make([]*types.Transaction, 0, len(txs))
   943  	)
   944  	for i, tx := range txs {
   945  		// If the transaction is known, pre-set the error slot
   946  		if pool.all.Get(tx.Hash()) != nil {
   947  			errs[i] = txpool.ErrAlreadyKnown
   948  			knownTxMeter.Mark(1)
   949  			continue
   950  		}
   951  		// Exclude transactions with basic errors, e.g invalid signatures and
   952  		// insufficient intrinsic gas as soon as possible and cache senders
   953  		// in transactions before obtaining lock
   954  		if err := pool.ValidateTxBasics(tx); err != nil {
   955  			errs[i] = err
   956  			log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err)
   957  			invalidTxMeter.Mark(1)
   958  			continue
   959  		}
   960  		// Accumulate all unknown transactions for deeper processing
   961  		news = append(news, tx)
   962  	}
   963  	if len(news) == 0 {
   964  		return errs
   965  	}
   966  
   967  	// Process all the new transaction and merge any errors into the original slice
   968  	pool.mu.Lock()
   969  	newErrs, dirtyAddrs := pool.addTxsLocked(news)
   970  	pool.mu.Unlock()
   971  
   972  	var nilSlot = 0
   973  	for _, err := range newErrs {
   974  		for errs[nilSlot] != nil {
   975  			nilSlot++
   976  		}
   977  		errs[nilSlot] = err
   978  		nilSlot++
   979  	}
   980  	// Reorg the pool internals if needed and return
   981  	done := pool.requestPromoteExecutables(dirtyAddrs)
   982  	if sync {
   983  		<-done
   984  	}
   985  	return errs
   986  }
   987  
   988  // addTxsLocked attempts to queue a batch of transactions if they are valid.
   989  // The transaction pool lock must be held.
   990  func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) {
   991  	dirty := newAccountSet(pool.signer)
   992  	errs := make([]error, len(txs))
   993  	for i, tx := range txs {
   994  		replaced, err := pool.add(tx)
   995  		errs[i] = err
   996  		if err == nil && !replaced {
   997  			dirty.addTx(tx)
   998  		}
   999  	}
  1000  	validTxMeter.Mark(int64(len(dirty.accounts)))
  1001  	return errs, dirty
  1002  }
  1003  
  1004  // Status returns the status (unknown/pending/queued) of a batch of transactions
  1005  // identified by their hashes.
  1006  func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
  1007  	tx := pool.get(hash)
  1008  	if tx == nil {
  1009  		return txpool.TxStatusUnknown
  1010  	}
  1011  	from, _ := types.Sender(pool.signer, tx) // already validated
  1012  
  1013  	pool.mu.RLock()
  1014  	defer pool.mu.RUnlock()
  1015  
  1016  	if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1017  		return txpool.TxStatusPending
  1018  	} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1019  		return txpool.TxStatusQueued
  1020  	}
  1021  	return txpool.TxStatusUnknown
  1022  }
  1023  
  1024  // Get returns a transaction if it is contained in the pool and nil otherwise.
  1025  func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction {
  1026  	tx := pool.get(hash)
  1027  	if tx == nil {
  1028  		return nil
  1029  	}
  1030  	return tx
  1031  }
  1032  
  1033  // get returns a transaction if it is contained in the pool and nil otherwise.
  1034  func (pool *LegacyPool) get(hash common.Hash) *types.Transaction {
  1035  	return pool.all.Get(hash)
  1036  }
  1037  
  1038  // GetRLP returns a RLP-encoded transaction if it is contained in the pool.
  1039  func (pool *LegacyPool) GetRLP(hash common.Hash) []byte {
  1040  	tx := pool.all.Get(hash)
  1041  	if tx == nil {
  1042  		return nil
  1043  	}
  1044  	encoded, err := rlp.EncodeToBytes(tx)
  1045  	if err != nil {
  1046  		log.Error("Failed to encoded transaction in legacy pool", "hash", hash, "err", err)
  1047  		return nil
  1048  	}
  1049  	return encoded
  1050  }
  1051  
  1052  // GetMetadata returns the transaction type and transaction size with the
  1053  // given transaction hash.
  1054  func (pool *LegacyPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
  1055  	tx := pool.all.Get(hash)
  1056  	if tx == nil {
  1057  		return nil
  1058  	}
  1059  	return &txpool.TxMetadata{
  1060  		Type: tx.Type(),
  1061  		Size: tx.Size(),
  1062  	}
  1063  }
  1064  
  1065  // Has returns an indicator whether txpool has a transaction cached with the
  1066  // given hash.
  1067  func (pool *LegacyPool) Has(hash common.Hash) bool {
  1068  	return pool.all.Get(hash) != nil
  1069  }
  1070  
  1071  // removeTx removes a single transaction from the queue, moving all subsequent
  1072  // transactions back to the future queue.
  1073  //
  1074  // In unreserve is false, the account will not be relinquished to the main txpool
  1075  // even if there are no more references to it. This is used to handle a race when
  1076  // a tx being added, and it evicts a previously scheduled tx from the same account,
  1077  // which could lead to a premature release of the lock.
  1078  //
  1079  // Returns the number of transactions removed from the pending queue.
  1080  func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int {
  1081  	// Fetch the transaction we wish to delete
  1082  	tx := pool.all.Get(hash)
  1083  	if tx == nil {
  1084  		return 0
  1085  	}
  1086  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1087  
  1088  	// If after deletion there are no more transactions belonging to this account,
  1089  	// relinquish the address reservation. It's a bit convoluted do this, via a
  1090  	// defer, but it's safer vs. the many return pathways.
  1091  	if unreserve {
  1092  		defer func() {
  1093  			var (
  1094  				_, hasPending = pool.pending[addr]
  1095  				_, hasQueued  = pool.queue[addr]
  1096  			)
  1097  			if !hasPending && !hasQueued {
  1098  				pool.reserver.Release(addr)
  1099  			}
  1100  		}()
  1101  	}
  1102  	// Remove it from the list of known transactions
  1103  	pool.all.Remove(hash)
  1104  	if outofbound {
  1105  		pool.priced.Removed(1)
  1106  	}
  1107  	// Remove the transaction from the pending lists and reset the account nonce
  1108  	if pending := pool.pending[addr]; pending != nil {
  1109  		if removed, invalids := pending.Remove(tx); removed {
  1110  			// If no more pending transactions are left, remove the list
  1111  			if pending.Empty() {
  1112  				delete(pool.pending, addr)
  1113  			}
  1114  			// Postpone any invalidated transactions
  1115  			for _, tx := range invalids {
  1116  				// Internal shuffle shouldn't touch the lookup set.
  1117  				pool.enqueueTx(tx.Hash(), tx, false)
  1118  			}
  1119  			// Update the account nonce if needed
  1120  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1121  			// Reduce the pending counter
  1122  			pendingGauge.Dec(int64(1 + len(invalids)))
  1123  			return 1 + len(invalids)
  1124  		}
  1125  	}
  1126  	// Transaction is in the future queue
  1127  	if future := pool.queue[addr]; future != nil {
  1128  		if removed, _ := future.Remove(tx); removed {
  1129  			// Reduce the queued counter
  1130  			queuedGauge.Dec(1)
  1131  		}
  1132  		if future.Empty() {
  1133  			delete(pool.queue, addr)
  1134  			delete(pool.beats, addr)
  1135  		}
  1136  	}
  1137  	return 0
  1138  }
  1139  
  1140  // requestReset requests a pool reset to the new head block.
  1141  // The returned channel is closed when the reset has occurred.
  1142  func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1143  	select {
  1144  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1145  		return <-pool.reorgDoneCh
  1146  	case <-pool.reorgShutdownCh:
  1147  		return pool.reorgShutdownCh
  1148  	}
  1149  }
  1150  
  1151  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1152  // The returned channel is closed when the promotion checks have occurred.
  1153  func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1154  	select {
  1155  	case pool.reqPromoteCh <- set:
  1156  		return <-pool.reorgDoneCh
  1157  	case <-pool.reorgShutdownCh:
  1158  		return pool.reorgShutdownCh
  1159  	}
  1160  }
  1161  
  1162  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1163  func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) {
  1164  	select {
  1165  	case pool.queueTxEventCh <- tx:
  1166  	case <-pool.reorgShutdownCh:
  1167  	}
  1168  }
  1169  
  1170  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1171  // call those methods directly, but request them being run using requestReset and
  1172  // requestPromoteExecutables instead.
  1173  func (pool *LegacyPool) scheduleReorgLoop() {
  1174  	defer pool.wg.Done()
  1175  
  1176  	var (
  1177  		curDone       chan struct{} // non-nil while runReorg is active
  1178  		nextDone      = make(chan struct{})
  1179  		launchNextRun bool
  1180  		reset         *txpoolResetRequest
  1181  		dirtyAccounts *accountSet
  1182  		queuedEvents  = make(map[common.Address]*SortedMap)
  1183  	)
  1184  	for {
  1185  		// Launch next background reorg if needed
  1186  		if curDone == nil && launchNextRun {
  1187  			// Run the background reorg and announcements
  1188  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
  1189  
  1190  			// Prepare everything for the next round of reorg
  1191  			curDone, nextDone = nextDone, make(chan struct{})
  1192  			launchNextRun = false
  1193  
  1194  			reset, dirtyAccounts = nil, nil
  1195  			queuedEvents = make(map[common.Address]*SortedMap)
  1196  		}
  1197  
  1198  		select {
  1199  		case req := <-pool.reqResetCh:
  1200  			// Reset request: update head if request is already pending.
  1201  			if reset == nil {
  1202  				reset = req
  1203  			} else {
  1204  				reset.newHead = req.newHead
  1205  			}
  1206  			launchNextRun = true
  1207  			pool.reorgDoneCh <- nextDone
  1208  
  1209  		case req := <-pool.reqPromoteCh:
  1210  			// Promote request: update address set if request is already pending.
  1211  			if dirtyAccounts == nil {
  1212  				dirtyAccounts = req
  1213  			} else {
  1214  				dirtyAccounts.merge(req)
  1215  			}
  1216  			launchNextRun = true
  1217  			pool.reorgDoneCh <- nextDone
  1218  
  1219  		case tx := <-pool.queueTxEventCh:
  1220  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1221  			// request one later if they want the events sent.
  1222  			addr, _ := types.Sender(pool.signer, tx)
  1223  			if _, ok := queuedEvents[addr]; !ok {
  1224  				queuedEvents[addr] = NewSortedMap()
  1225  			}
  1226  			queuedEvents[addr].Put(tx)
  1227  
  1228  		case <-curDone:
  1229  			curDone = nil
  1230  
  1231  		case <-pool.reorgShutdownCh:
  1232  			// Wait for current run to finish.
  1233  			if curDone != nil {
  1234  				<-curDone
  1235  			}
  1236  			close(nextDone)
  1237  			return
  1238  		}
  1239  	}
  1240  }
  1241  
  1242  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1243  func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*SortedMap) {
  1244  	defer func(t0 time.Time) {
  1245  		reorgDurationTimer.Update(time.Since(t0))
  1246  	}(time.Now())
  1247  	defer close(done)
  1248  
  1249  	var promoteAddrs []common.Address
  1250  	if dirtyAccounts != nil && reset == nil {
  1251  		// Only dirty accounts need to be promoted, unless we're resetting.
  1252  		// For resets, all addresses in the tx queue will be promoted and
  1253  		// the flatten operation can be avoided.
  1254  		promoteAddrs = dirtyAccounts.flatten()
  1255  	}
  1256  	pool.mu.Lock()
  1257  	if reset != nil {
  1258  		// Reset from the old head to the new, rescheduling any reorged transactions
  1259  		pool.reset(reset.oldHead, reset.newHead)
  1260  
  1261  		// Nonces were reset, discard any events that became stale
  1262  		for addr := range events {
  1263  			events[addr].Forward(pool.pendingNonces.get(addr))
  1264  			if events[addr].Len() == 0 {
  1265  				delete(events, addr)
  1266  			}
  1267  		}
  1268  		// Reset needs promote for all addresses
  1269  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1270  		for addr := range pool.queue {
  1271  			promoteAddrs = append(promoteAddrs, addr)
  1272  		}
  1273  	}
  1274  	// Check for pending transactions for every account that sent new ones
  1275  	promoted := pool.promoteExecutables(promoteAddrs)
  1276  
  1277  	// If a new block appeared, validate the pool of pending transactions. This will
  1278  	// remove any transaction that has been included in the block or was invalidated
  1279  	// because of another transaction (e.g. higher gas price).
  1280  	if reset != nil {
  1281  		pool.demoteUnexecutables()
  1282  		if reset.newHead != nil {
  1283  			if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
  1284  				pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead)
  1285  				pool.priced.SetBaseFee(pendingBaseFee)
  1286  			} else {
  1287  				pool.priced.Reheap()
  1288  			}
  1289  		}
  1290  		// Update all accounts to the latest known pending nonce
  1291  		nonces := make(map[common.Address]uint64, len(pool.pending))
  1292  		for addr, list := range pool.pending {
  1293  			highestPending := list.LastElement()
  1294  			nonces[addr] = highestPending.Nonce() + 1
  1295  		}
  1296  		pool.pendingNonces.setAll(nonces)
  1297  	}
  1298  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1299  	pool.truncatePending()
  1300  	pool.truncateQueue()
  1301  
  1302  	dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
  1303  	pool.changesSinceReorg = 0 // Reset change counter
  1304  	pool.mu.Unlock()
  1305  
  1306  	// Notify subsystems for newly added transactions
  1307  	for _, tx := range promoted {
  1308  		addr, _ := types.Sender(pool.signer, tx)
  1309  		if _, ok := events[addr]; !ok {
  1310  			events[addr] = NewSortedMap()
  1311  		}
  1312  		events[addr].Put(tx)
  1313  	}
  1314  	if len(events) > 0 {
  1315  		var txs []*types.Transaction
  1316  		for _, set := range events {
  1317  			txs = append(txs, set.Flatten()...)
  1318  		}
  1319  		pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
  1320  	}
  1321  }
  1322  
  1323  // reset retrieves the current state of the blockchain and ensures the content
  1324  // of the transaction pool is valid with regard to the chain state.
  1325  func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
  1326  	// If we're reorging an old state, reinject all dropped transactions
  1327  	var reinject types.Transactions
  1328  
  1329  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1330  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1331  		oldNum := oldHead.Number.Uint64()
  1332  		newNum := newHead.Number.Uint64()
  1333  
  1334  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1335  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1336  		} else {
  1337  			// Reorg seems shallow enough to pull in all transactions into memory
  1338  			var (
  1339  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1340  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1341  			)
  1342  			if rem == nil {
  1343  				// This can happen if a setHead is performed, where we simply discard the old
  1344  				// head from the chain.
  1345  				// If that is the case, we don't have the lost transactions anymore, and
  1346  				// there's nothing to add
  1347  				if newNum >= oldNum {
  1348  					// If we reorged to a same or higher number, then it's not a case of setHead
  1349  					log.Warn("Transaction pool reset with missing old head",
  1350  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1351  					return
  1352  				}
  1353  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1354  				log.Debug("Skipping transaction reset caused by setHead",
  1355  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1356  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1357  			} else {
  1358  				if add == nil {
  1359  					// if the new head is nil, it means that something happened between
  1360  					// the firing of newhead-event and _now_: most likely a
  1361  					// reorg caused by sync-reversion or explicit sethead back to an
  1362  					// earlier block.
  1363  					log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
  1364  					return
  1365  				}
  1366  				var discarded, included types.Transactions
  1367  				for rem.NumberU64() > add.NumberU64() {
  1368  					discarded = append(discarded, rem.Transactions()...)
  1369  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1370  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1371  						return
  1372  					}
  1373  				}
  1374  				for add.NumberU64() > rem.NumberU64() {
  1375  					included = append(included, add.Transactions()...)
  1376  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1377  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1378  						return
  1379  					}
  1380  				}
  1381  				for rem.Hash() != add.Hash() {
  1382  					discarded = append(discarded, rem.Transactions()...)
  1383  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1384  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1385  						return
  1386  					}
  1387  					included = append(included, add.Transactions()...)
  1388  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1389  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1390  						return
  1391  					}
  1392  				}
  1393  				lost := make([]*types.Transaction, 0, len(discarded))
  1394  				for _, tx := range types.TxDifference(discarded, included) {
  1395  					if pool.Filter(tx) {
  1396  						lost = append(lost, tx)
  1397  					}
  1398  				}
  1399  				reinject = lost
  1400  			}
  1401  		}
  1402  	}
  1403  	// Initialize the internal state to the current head
  1404  	if newHead == nil {
  1405  		newHead = pool.chain.CurrentBlock() // Special case during testing
  1406  	}
  1407  	statedb, err := pool.chain.StateAt(newHead.Root)
  1408  	if err != nil {
  1409  		log.Error("Failed to reset txpool state", "err", err)
  1410  		return
  1411  	}
  1412  	pool.currentHead.Store(newHead)
  1413  	pool.currentState = statedb
  1414  	pool.pendingNonces = newNoncer(statedb)
  1415  
  1416  	// Inject any transactions discarded due to reorgs
  1417  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1418  	core.SenderCacher().Recover(pool.signer, reinject)
  1419  	pool.addTxsLocked(reinject)
  1420  }
  1421  
  1422  // promoteExecutables moves transactions that have become processable from the
  1423  // future queue to the set of pending transactions. During this process, all
  1424  // invalidated transactions (low nonce, low balance) are deleted.
  1425  func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1426  	// Track the promoted transactions to broadcast them at once
  1427  	var promoted []*types.Transaction
  1428  
  1429  	// Iterate over all accounts and promote any executable transactions
  1430  	gasLimit := pool.currentHead.Load().GasLimit
  1431  	for _, addr := range accounts {
  1432  		list := pool.queue[addr]
  1433  		if list == nil {
  1434  			continue // Just in case someone calls with a non existing account
  1435  		}
  1436  		// Drop all transactions that are deemed too old (low nonce)
  1437  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1438  		for _, tx := range forwards {
  1439  			pool.all.Remove(tx.Hash())
  1440  		}
  1441  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1442  		// Drop all transactions that are too costly (low balance or out of gas)
  1443  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
  1444  		for _, tx := range drops {
  1445  			pool.all.Remove(tx.Hash())
  1446  		}
  1447  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1448  		queuedNofundsMeter.Mark(int64(len(drops)))
  1449  
  1450  		// Gather all executable transactions and promote them
  1451  		readies := list.Ready(pool.pendingNonces.get(addr))
  1452  		for _, tx := range readies {
  1453  			hash := tx.Hash()
  1454  			if pool.promoteTx(addr, hash, tx) {
  1455  				promoted = append(promoted, tx)
  1456  			}
  1457  		}
  1458  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1459  		queuedGauge.Dec(int64(len(readies)))
  1460  
  1461  		// Drop all transactions over the allowed limit
  1462  		var caps = list.Cap(int(pool.config.AccountQueue))
  1463  		for _, tx := range caps {
  1464  			hash := tx.Hash()
  1465  			pool.all.Remove(hash)
  1466  			log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1467  		}
  1468  		queuedRateLimitMeter.Mark(int64(len(caps)))
  1469  		// Mark all the items dropped as removed
  1470  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1471  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1472  
  1473  		// Delete the entire queue entry if it became empty.
  1474  		if list.Empty() {
  1475  			delete(pool.queue, addr)
  1476  			delete(pool.beats, addr)
  1477  			if _, ok := pool.pending[addr]; !ok {
  1478  				pool.reserver.Release(addr)
  1479  			}
  1480  		}
  1481  	}
  1482  	return promoted
  1483  }
  1484  
  1485  // truncatePending removes transactions from the pending queue if the pool is above the
  1486  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1487  // equal number for all for accounts with many pending transactions.
  1488  func (pool *LegacyPool) truncatePending() {
  1489  	pending := uint64(0)
  1490  
  1491  	// Assemble a spam order to penalize large transactors first
  1492  	spammers := prque.New[uint64, common.Address](nil)
  1493  	for addr, list := range pool.pending {
  1494  		// Only evict transactions from high rollers
  1495  		length := uint64(list.Len())
  1496  		pending += length
  1497  		if length > pool.config.AccountSlots {
  1498  			spammers.Push(addr, length)
  1499  		}
  1500  	}
  1501  	if pending <= pool.config.GlobalSlots {
  1502  		return
  1503  	}
  1504  	pendingBeforeCap := pending
  1505  
  1506  	// Gradually drop transactions from offenders
  1507  	offenders := []common.Address{}
  1508  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1509  		// Retrieve the next offender
  1510  		offender, _ := spammers.Pop()
  1511  		offenders = append(offenders, offender)
  1512  
  1513  		// Equalize balances until all the same or below threshold
  1514  		if len(offenders) > 1 {
  1515  			// Calculate the equalization threshold for all current offenders
  1516  			threshold := pool.pending[offender].Len()
  1517  
  1518  			// Iteratively reduce all offenders until below limit or threshold reached
  1519  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1520  				for i := 0; i < len(offenders)-1; i++ {
  1521  					list := pool.pending[offenders[i]]
  1522  
  1523  					caps := list.Cap(list.Len() - 1)
  1524  					for _, tx := range caps {
  1525  						// Drop the transaction from the global pools too
  1526  						hash := tx.Hash()
  1527  						pool.all.Remove(hash)
  1528  
  1529  						// Update the account nonce to the dropped transaction
  1530  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1531  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1532  					}
  1533  					pool.priced.Removed(len(caps))
  1534  					pendingGauge.Dec(int64(len(caps)))
  1535  
  1536  					pending--
  1537  				}
  1538  			}
  1539  		}
  1540  	}
  1541  
  1542  	// If still above threshold, reduce to limit or min allowance
  1543  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1544  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1545  			for _, addr := range offenders {
  1546  				list := pool.pending[addr]
  1547  
  1548  				caps := list.Cap(list.Len() - 1)
  1549  				for _, tx := range caps {
  1550  					// Drop the transaction from the global pools too
  1551  					hash := tx.Hash()
  1552  					pool.all.Remove(hash)
  1553  
  1554  					// Update the account nonce to the dropped transaction
  1555  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1556  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1557  				}
  1558  				pool.priced.Removed(len(caps))
  1559  				pendingGauge.Dec(int64(len(caps)))
  1560  				pending--
  1561  			}
  1562  		}
  1563  	}
  1564  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1565  }
  1566  
  1567  // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
  1568  func (pool *LegacyPool) truncateQueue() {
  1569  	queued := uint64(0)
  1570  	for _, list := range pool.queue {
  1571  		queued += uint64(list.Len())
  1572  	}
  1573  	if queued <= pool.config.GlobalQueue {
  1574  		return
  1575  	}
  1576  
  1577  	// Sort all accounts with queued transactions by heartbeat
  1578  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1579  	for addr := range pool.queue {
  1580  		addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1581  	}
  1582  	sort.Sort(sort.Reverse(addresses))
  1583  
  1584  	// Drop transactions until the total is below the limit
  1585  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1586  		addr := addresses[len(addresses)-1]
  1587  		list := pool.queue[addr.address]
  1588  
  1589  		addresses = addresses[:len(addresses)-1]
  1590  
  1591  		// Drop all transactions if they are less than the overflow
  1592  		if size := uint64(list.Len()); size <= drop {
  1593  			for _, tx := range list.Flatten() {
  1594  				pool.removeTx(tx.Hash(), true, true)
  1595  			}
  1596  			drop -= size
  1597  			queuedRateLimitMeter.Mark(int64(size))
  1598  			continue
  1599  		}
  1600  		// Otherwise drop only last few transactions
  1601  		txs := list.Flatten()
  1602  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1603  			pool.removeTx(txs[i].Hash(), true, true)
  1604  			drop--
  1605  			queuedRateLimitMeter.Mark(1)
  1606  		}
  1607  	}
  1608  }
  1609  
  1610  // demoteUnexecutables removes invalid and processed transactions from the pools
  1611  // executable/pending queue and any subsequent transactions that become unexecutable
  1612  // are moved back into the future queue.
  1613  //
  1614  // Note: transactions are not marked as removed in the priced list because re-heaping
  1615  // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
  1616  // to trigger a re-heap is this function
  1617  func (pool *LegacyPool) demoteUnexecutables() {
  1618  	// Iterate over all accounts and demote any non-executable transactions
  1619  	gasLimit := pool.currentHead.Load().GasLimit
  1620  	for addr, list := range pool.pending {
  1621  		nonce := pool.currentState.GetNonce(addr)
  1622  
  1623  		// Drop all transactions that are deemed too old (low nonce)
  1624  		olds := list.Forward(nonce)
  1625  		for _, tx := range olds {
  1626  			hash := tx.Hash()
  1627  			pool.all.Remove(hash)
  1628  			log.Trace("Removed old pending transaction", "hash", hash)
  1629  		}
  1630  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1631  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
  1632  		for _, tx := range drops {
  1633  			hash := tx.Hash()
  1634  			pool.all.Remove(hash)
  1635  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1636  		}
  1637  		pendingNofundsMeter.Mark(int64(len(drops)))
  1638  
  1639  		for _, tx := range invalids {
  1640  			hash := tx.Hash()
  1641  			log.Trace("Demoting pending transaction", "hash", hash)
  1642  
  1643  			// Internal shuffle shouldn't touch the lookup set.
  1644  			pool.enqueueTx(hash, tx, false)
  1645  		}
  1646  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1647  
  1648  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1649  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1650  			gapped := list.Cap(0)
  1651  			for _, tx := range gapped {
  1652  				hash := tx.Hash()
  1653  				log.Warn("Demoting invalidated transaction", "hash", hash)
  1654  
  1655  				// Internal shuffle shouldn't touch the lookup set.
  1656  				pool.enqueueTx(hash, tx, false)
  1657  			}
  1658  			pendingGauge.Dec(int64(len(gapped)))
  1659  		}
  1660  		// Delete the entire pending entry if it became empty.
  1661  		if list.Empty() {
  1662  			delete(pool.pending, addr)
  1663  			if _, ok := pool.queue[addr]; !ok {
  1664  				pool.reserver.Release(addr)
  1665  			}
  1666  		}
  1667  	}
  1668  }
  1669  
  1670  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1671  type addressByHeartbeat struct {
  1672  	address   common.Address
  1673  	heartbeat time.Time
  1674  }
  1675  
  1676  type addressesByHeartbeat []addressByHeartbeat
  1677  
  1678  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1679  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1680  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1681  
  1682  // accountSet is simply a set of addresses to check for existence, and a signer
  1683  // capable of deriving addresses from transactions.
  1684  type accountSet struct {
  1685  	accounts map[common.Address]struct{}
  1686  	signer   types.Signer
  1687  	cache    []common.Address
  1688  }
  1689  
  1690  // newAccountSet creates a new address set with an associated signer for sender
  1691  // derivations.
  1692  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1693  	as := &accountSet{
  1694  		accounts: make(map[common.Address]struct{}, len(addrs)),
  1695  		signer:   signer,
  1696  	}
  1697  	for _, addr := range addrs {
  1698  		as.add(addr)
  1699  	}
  1700  	return as
  1701  }
  1702  
  1703  // add inserts a new address into the set to track.
  1704  func (as *accountSet) add(addr common.Address) {
  1705  	as.accounts[addr] = struct{}{}
  1706  	as.cache = nil
  1707  }
  1708  
  1709  // addTx adds the sender of tx into the set.
  1710  func (as *accountSet) addTx(tx *types.Transaction) {
  1711  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1712  		as.add(addr)
  1713  	}
  1714  }
  1715  
  1716  // flatten returns the list of addresses within this set, also caching it for later
  1717  // reuse. The returned slice should not be changed!
  1718  func (as *accountSet) flatten() []common.Address {
  1719  	if as.cache == nil {
  1720  		as.cache = slices.Collect(maps.Keys(as.accounts))
  1721  	}
  1722  	return as.cache
  1723  }
  1724  
  1725  // merge adds all addresses from the 'other' set into 'as'.
  1726  func (as *accountSet) merge(other *accountSet) {
  1727  	maps.Copy(as.accounts, other.accounts)
  1728  	as.cache = nil
  1729  }
  1730  
  1731  // lookup is used internally by LegacyPool to track transactions while allowing
  1732  // lookup without mutex contention.
  1733  //
  1734  // Note, although this type is properly protected against concurrent access, it
  1735  // is **not** a type that should ever be mutated or even exposed outside of the
  1736  // transaction pool, since its internal state is tightly coupled with the pools
  1737  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1738  // peeking into the pool in LegacyPool.Get without having to acquire the widely scoped
  1739  // LegacyPool.mu mutex.
  1740  type lookup struct {
  1741  	slots int
  1742  	lock  sync.RWMutex
  1743  	txs   map[common.Hash]*types.Transaction
  1744  
  1745  	auths map[common.Address][]common.Hash // All accounts with a pooled authorization
  1746  }
  1747  
  1748  // newLookup returns a new lookup structure.
  1749  func newLookup() *lookup {
  1750  	return &lookup{
  1751  		txs:   make(map[common.Hash]*types.Transaction),
  1752  		auths: make(map[common.Address][]common.Hash),
  1753  	}
  1754  }
  1755  
  1756  // Range calls f on each key and value present in the map. The callback passed
  1757  // should return the indicator whether the iteration needs to be continued.
  1758  // Callers need to specify which set (or both) to be iterated.
  1759  func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
  1760  	t.lock.RLock()
  1761  	defer t.lock.RUnlock()
  1762  
  1763  	for key, value := range t.txs {
  1764  		if !f(key, value) {
  1765  			return
  1766  		}
  1767  	}
  1768  }
  1769  
  1770  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1771  func (t *lookup) Get(hash common.Hash) *types.Transaction {
  1772  	t.lock.RLock()
  1773  	defer t.lock.RUnlock()
  1774  
  1775  	return t.txs[hash]
  1776  }
  1777  
  1778  // Count returns the current number of transactions in the lookup.
  1779  func (t *lookup) Count() int {
  1780  	t.lock.RLock()
  1781  	defer t.lock.RUnlock()
  1782  
  1783  	return len(t.txs)
  1784  }
  1785  
  1786  // Slots returns the current number of slots used in the lookup.
  1787  func (t *lookup) Slots() int {
  1788  	t.lock.RLock()
  1789  	defer t.lock.RUnlock()
  1790  
  1791  	return t.slots
  1792  }
  1793  
  1794  // Add adds a transaction to the lookup.
  1795  func (t *lookup) Add(tx *types.Transaction) {
  1796  	t.lock.Lock()
  1797  	defer t.lock.Unlock()
  1798  
  1799  	t.slots += numSlots(tx)
  1800  	slotsGauge.Update(int64(t.slots))
  1801  
  1802  	t.txs[tx.Hash()] = tx
  1803  	t.addAuthorities(tx)
  1804  }
  1805  
  1806  // Remove removes a transaction from the lookup.
  1807  func (t *lookup) Remove(hash common.Hash) {
  1808  	t.lock.Lock()
  1809  	defer t.lock.Unlock()
  1810  
  1811  	tx, ok := t.txs[hash]
  1812  	if !ok {
  1813  		log.Error("No transaction found to be deleted", "hash", hash)
  1814  		return
  1815  	}
  1816  	t.removeAuthorities(tx)
  1817  	t.slots -= numSlots(tx)
  1818  	slotsGauge.Update(int64(t.slots))
  1819  
  1820  	delete(t.txs, hash)
  1821  }
  1822  
  1823  // Clear resets the lookup structure, removing all stored entries.
  1824  func (t *lookup) Clear() {
  1825  	t.lock.Lock()
  1826  	defer t.lock.Unlock()
  1827  
  1828  	t.slots = 0
  1829  	t.txs = make(map[common.Hash]*types.Transaction)
  1830  	t.auths = make(map[common.Address][]common.Hash)
  1831  }
  1832  
  1833  // TxsBelowTip finds all remote transactions below the given tip threshold.
  1834  func (t *lookup) TxsBelowTip(threshold *big.Int) types.Transactions {
  1835  	found := make(types.Transactions, 0, 128)
  1836  	t.Range(func(hash common.Hash, tx *types.Transaction) bool {
  1837  		if tx.GasTipCapIntCmp(threshold) < 0 {
  1838  			found = append(found, tx)
  1839  		}
  1840  		return true
  1841  	})
  1842  	return found
  1843  }
  1844  
  1845  // addAuthorities tracks the supplied tx in relation to each authority it
  1846  // specifies.
  1847  func (t *lookup) addAuthorities(tx *types.Transaction) {
  1848  	for _, addr := range tx.SetCodeAuthorities() {
  1849  		list, ok := t.auths[addr]
  1850  		if !ok {
  1851  			list = []common.Hash{}
  1852  		}
  1853  		if slices.Contains(list, tx.Hash()) {
  1854  			// Don't add duplicates.
  1855  			continue
  1856  		}
  1857  		list = append(list, tx.Hash())
  1858  		t.auths[addr] = list
  1859  	}
  1860  }
  1861  
  1862  // removeAuthorities stops tracking the supplied tx in relation to its
  1863  // authorities.
  1864  func (t *lookup) removeAuthorities(tx *types.Transaction) {
  1865  	hash := tx.Hash()
  1866  	for _, addr := range tx.SetCodeAuthorities() {
  1867  		list := t.auths[addr]
  1868  		// Remove tx from tracker.
  1869  		if i := slices.Index(list, hash); i >= 0 {
  1870  			list = append(list[:i], list[i+1:]...)
  1871  		} else {
  1872  			log.Error("Authority with untracked tx", "addr", addr, "hash", hash)
  1873  		}
  1874  		if len(list) == 0 {
  1875  			// If list is newly empty, delete it entirely.
  1876  			delete(t.auths, addr)
  1877  			continue
  1878  		}
  1879  		t.auths[addr] = list
  1880  	}
  1881  }
  1882  
  1883  // hasAuth returns a flag indicating whether there are pending authorizations
  1884  // from the specified address.
  1885  func (t *lookup) hasAuth(addr common.Address) bool {
  1886  	t.lock.RLock()
  1887  	defer t.lock.RUnlock()
  1888  
  1889  	return len(t.auths[addr]) > 0
  1890  }
  1891  
  1892  // numSlots calculates the number of slots needed for a single transaction.
  1893  func numSlots(tx *types.Transaction) int {
  1894  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1895  }
  1896  
  1897  // Clear implements txpool.SubPool, removing all tracked txs from the pool
  1898  // and rotating the journal.
  1899  //
  1900  // Note, do not use this in production / live code. In live code, the pool is
  1901  // meant to reset on a separate thread to avoid DoS vectors.
  1902  func (pool *LegacyPool) Clear() {
  1903  	pool.mu.Lock()
  1904  	defer pool.mu.Unlock()
  1905  
  1906  	// unreserve each tracked account. Ideally, we could just clear the
  1907  	// reservation map in the parent txpool context. However, if we clear in
  1908  	// parent context, to avoid exposing the subpool lock, we have to lock the
  1909  	// reservations and then lock each subpool.
  1910  	//
  1911  	// This creates the potential for a deadlock situation:
  1912  	//
  1913  	// * TxPool.Clear locks the reservations
  1914  	// * a new transaction is received which locks the subpool mutex
  1915  	// * TxPool.Clear attempts to lock subpool mutex
  1916  	//
  1917  	// The transaction addition may attempt to reserve the sender addr which
  1918  	// can't happen until Clear releases the reservation lock. Clear cannot
  1919  	// acquire the subpool lock until the transaction addition is completed.
  1920  
  1921  	for addr := range pool.pending {
  1922  		if _, ok := pool.queue[addr]; !ok {
  1923  			pool.reserver.Release(addr)
  1924  		}
  1925  	}
  1926  	for addr := range pool.queue {
  1927  		pool.reserver.Release(addr)
  1928  	}
  1929  	pool.all.Clear()
  1930  	pool.priced.Reheap()
  1931  	pool.pending = make(map[common.Address]*list)
  1932  	pool.queue = make(map[common.Address]*list)
  1933  	pool.pendingNonces = newNoncer(pool.currentState)
  1934  }
  1935  
  1936  // HasPendingAuth returns a flag indicating whether there are pending
  1937  // authorizations from the specific address cached in the pool.
  1938  func (pool *LegacyPool) HasPendingAuth(addr common.Address) bool {
  1939  	return pool.all.hasAuth(addr)
  1940  }