github.com/ethereum/go-ethereum@v1.14.4-0.20240516095835-473ee8fc07a3/core/txpool/legacypool/legacypool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package legacypool implements the normal EVM execution transaction pool.
    18  package legacypool
    19  
    20  import (
    21  	"errors"
    22  	"math"
    23  	"math/big"
    24  	"sort"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/common/prque"
    31  	"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/state"
    34  	"github.com/ethereum/go-ethereum/core/txpool"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/metrics"
    39  	"github.com/ethereum/go-ethereum/params"
    40  	"github.com/holiman/uint256"
    41  )
    42  
    43  const (
    44  	// txSlotSize is used to calculate how many data slots a single transaction
    45  	// takes up based on its size. The slots are used as DoS protection, ensuring
    46  	// that validating a new transaction remains a constant operation (in reality
    47  	// O(maxslots), where max slots are 4 currently).
    48  	txSlotSize = 32 * 1024
    49  
    50  	// txMaxSize is the maximum size a single transaction can have. This field has
    51  	// non-trivial consequences: larger transactions are significantly harder and
    52  	// more expensive to propagate; larger transactions also take more resources
    53  	// to validate whether they fit into the pool or not.
    54  	txMaxSize = 4 * txSlotSize // 128KB
    55  )
    56  
    57  var (
    58  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
    59  	// another remote transaction.
    60  	ErrTxPoolOverflow = errors.New("txpool is full")
    61  )
    62  
    63  var (
    64  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    65  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    66  )
    67  
    68  var (
    69  	// Metrics for the pending pool
    70  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
    71  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
    72  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
    73  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
    74  
    75  	// Metrics for the queued pool
    76  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
    77  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
    78  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
    79  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
    80  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
    81  
    82  	// General tx metrics
    83  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
    84  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
    85  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
    86  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
    87  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
    88  
    89  	// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
    90  	// txpool reorgs.
    91  	throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
    92  	// reorgDurationTimer measures how long time a txpool reorg takes.
    93  	reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
    94  	// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
    95  	// that this number is pretty low, since txpool reorgs happen very frequently.
    96  	dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
    97  
    98  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
    99  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   100  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   101  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   102  
   103  	reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
   104  )
   105  
   106  // BlockChain defines the minimal set of methods needed to back a tx pool with
   107  // a chain. Exists to allow mocking the live chain out of tests.
   108  type BlockChain interface {
   109  	// Config retrieves the chain's fork configuration.
   110  	Config() *params.ChainConfig
   111  
   112  	// CurrentBlock returns the current head of the chain.
   113  	CurrentBlock() *types.Header
   114  
   115  	// GetBlock retrieves a specific block, used during pool resets.
   116  	GetBlock(hash common.Hash, number uint64) *types.Block
   117  
   118  	// StateAt returns a state database for a given root hash (generally the head).
   119  	StateAt(root common.Hash) (*state.StateDB, error)
   120  }
   121  
   122  // Config are the configuration parameters of the transaction pool.
   123  type Config struct {
   124  	Locals    []common.Address // Addresses that should be treated by default as local
   125  	NoLocals  bool             // Whether local transaction handling should be disabled
   126  	Journal   string           // Journal of local transactions to survive node restarts
   127  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   128  
   129  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   130  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   131  
   132  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   133  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   134  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   135  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   136  
   137  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   138  }
   139  
   140  // DefaultConfig contains the default configurations for the transaction pool.
   141  var DefaultConfig = Config{
   142  	Journal:   "transactions.rlp",
   143  	Rejournal: time.Hour,
   144  
   145  	PriceLimit: 1,
   146  	PriceBump:  10,
   147  
   148  	AccountSlots: 16,
   149  	GlobalSlots:  4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
   150  	AccountQueue: 64,
   151  	GlobalQueue:  1024,
   152  
   153  	Lifetime: 3 * time.Hour,
   154  }
   155  
   156  // sanitize checks the provided user configurations and changes anything that's
   157  // unreasonable or unworkable.
   158  func (config *Config) sanitize() Config {
   159  	conf := *config
   160  	if conf.Rejournal < time.Second {
   161  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   162  		conf.Rejournal = time.Second
   163  	}
   164  	if conf.PriceLimit < 1 {
   165  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
   166  		conf.PriceLimit = DefaultConfig.PriceLimit
   167  	}
   168  	if conf.PriceBump < 1 {
   169  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump)
   170  		conf.PriceBump = DefaultConfig.PriceBump
   171  	}
   172  	if conf.AccountSlots < 1 {
   173  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots)
   174  		conf.AccountSlots = DefaultConfig.AccountSlots
   175  	}
   176  	if conf.GlobalSlots < 1 {
   177  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots)
   178  		conf.GlobalSlots = DefaultConfig.GlobalSlots
   179  	}
   180  	if conf.AccountQueue < 1 {
   181  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue)
   182  		conf.AccountQueue = DefaultConfig.AccountQueue
   183  	}
   184  	if conf.GlobalQueue < 1 {
   185  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue)
   186  		conf.GlobalQueue = DefaultConfig.GlobalQueue
   187  	}
   188  	if conf.Lifetime < 1 {
   189  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime)
   190  		conf.Lifetime = DefaultConfig.Lifetime
   191  	}
   192  	return conf
   193  }
   194  
   195  // LegacyPool contains all currently known transactions. Transactions
   196  // enter the pool when they are received from the network or submitted
   197  // locally. They exit the pool when they are included in the blockchain.
   198  //
   199  // The pool separates processable transactions (which can be applied to the
   200  // current state) and future transactions. Transactions move between those
   201  // two states over time as they are received and processed.
   202  type LegacyPool struct {
   203  	config      Config
   204  	chainconfig *params.ChainConfig
   205  	chain       BlockChain
   206  	gasTip      atomic.Pointer[uint256.Int]
   207  	txFeed      event.Feed
   208  	signer      types.Signer
   209  	mu          sync.RWMutex
   210  
   211  	currentHead   atomic.Pointer[types.Header] // Current head of the blockchain
   212  	currentState  *state.StateDB               // Current state in the blockchain head
   213  	pendingNonces *noncer                      // Pending state tracking virtual nonces
   214  
   215  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   216  	journal *journal    // Journal of local transaction to back up to disk
   217  
   218  	reserve txpool.AddressReserver       // Address reserver to ensure exclusivity across subpools
   219  	pending map[common.Address]*list     // All currently processable transactions
   220  	queue   map[common.Address]*list     // Queued but non-processable transactions
   221  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   222  	all     *lookup                      // All transactions to allow lookups
   223  	priced  *pricedList                  // All transactions sorted by price
   224  
   225  	reqResetCh      chan *txpoolResetRequest
   226  	reqPromoteCh    chan *accountSet
   227  	queueTxEventCh  chan *types.Transaction
   228  	reorgDoneCh     chan chan struct{}
   229  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   230  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   231  	initDoneCh      chan struct{}  // is closed once the pool is initialized (for tests)
   232  
   233  	changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
   234  }
   235  
   236  type txpoolResetRequest struct {
   237  	oldHead, newHead *types.Header
   238  }
   239  
   240  // New creates a new transaction pool to gather, sort and filter inbound
   241  // transactions from the network.
   242  func New(config Config, chain BlockChain) *LegacyPool {
   243  	// Sanitize the input to ensure no vulnerable gas prices are set
   244  	config = (&config).sanitize()
   245  
   246  	// Create the transaction pool with its initial settings
   247  	pool := &LegacyPool{
   248  		config:          config,
   249  		chain:           chain,
   250  		chainconfig:     chain.Config(),
   251  		signer:          types.LatestSigner(chain.Config()),
   252  		pending:         make(map[common.Address]*list),
   253  		queue:           make(map[common.Address]*list),
   254  		beats:           make(map[common.Address]time.Time),
   255  		all:             newLookup(),
   256  		reqResetCh:      make(chan *txpoolResetRequest),
   257  		reqPromoteCh:    make(chan *accountSet),
   258  		queueTxEventCh:  make(chan *types.Transaction),
   259  		reorgDoneCh:     make(chan chan struct{}),
   260  		reorgShutdownCh: make(chan struct{}),
   261  		initDoneCh:      make(chan struct{}),
   262  	}
   263  	pool.locals = newAccountSet(pool.signer)
   264  	for _, addr := range config.Locals {
   265  		log.Info("Setting new local account", "address", addr)
   266  		pool.locals.add(addr)
   267  	}
   268  	pool.priced = newPricedList(pool.all)
   269  
   270  	if !config.NoLocals && config.Journal != "" {
   271  		pool.journal = newTxJournal(config.Journal)
   272  	}
   273  	return pool
   274  }
   275  
   276  // Filter returns whether the given transaction can be consumed by the legacy
   277  // pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction.
   278  func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
   279  	switch tx.Type() {
   280  	case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType:
   281  		return true
   282  	default:
   283  		return false
   284  	}
   285  }
   286  
   287  // Init sets the gas price needed to keep a transaction in the pool and the chain
   288  // head to allow balance / nonce checks. The transaction journal will be loaded
   289  // from disk and filtered based on the provided starting settings. The internal
   290  // goroutines will be spun up and the pool deemed operational afterwards.
   291  func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
   292  	// Set the address reserver to request exclusive access to pooled accounts
   293  	pool.reserve = reserve
   294  
   295  	// Set the basic pool parameters
   296  	pool.gasTip.Store(uint256.NewInt(gasTip))
   297  
   298  	// Initialize the state with head block, or fallback to empty one in
   299  	// case the head state is not available (might occur when node is not
   300  	// fully synced).
   301  	statedb, err := pool.chain.StateAt(head.Root)
   302  	if err != nil {
   303  		statedb, err = pool.chain.StateAt(types.EmptyRootHash)
   304  	}
   305  	if err != nil {
   306  		return err
   307  	}
   308  	pool.currentHead.Store(head)
   309  	pool.currentState = statedb
   310  	pool.pendingNonces = newNoncer(statedb)
   311  
   312  	// Start the reorg loop early, so it can handle requests generated during
   313  	// journal loading.
   314  	pool.wg.Add(1)
   315  	go pool.scheduleReorgLoop()
   316  
   317  	// If local transactions and journaling is enabled, load from disk
   318  	if pool.journal != nil {
   319  		if err := pool.journal.load(pool.addLocals); err != nil {
   320  			log.Warn("Failed to load transaction journal", "err", err)
   321  		}
   322  		if err := pool.journal.rotate(pool.local()); err != nil {
   323  			log.Warn("Failed to rotate transaction journal", "err", err)
   324  		}
   325  	}
   326  	pool.wg.Add(1)
   327  	go pool.loop()
   328  	return nil
   329  }
   330  
   331  // loop is the transaction pool's main event loop, waiting for and reacting to
   332  // outside blockchain events as well as for various reporting and transaction
   333  // eviction events.
   334  func (pool *LegacyPool) loop() {
   335  	defer pool.wg.Done()
   336  
   337  	var (
   338  		prevPending, prevQueued, prevStales int
   339  
   340  		// Start the stats reporting and transaction eviction tickers
   341  		report  = time.NewTicker(statsReportInterval)
   342  		evict   = time.NewTicker(evictionInterval)
   343  		journal = time.NewTicker(pool.config.Rejournal)
   344  	)
   345  	defer report.Stop()
   346  	defer evict.Stop()
   347  	defer journal.Stop()
   348  
   349  	// Notify tests that the init phase is done
   350  	close(pool.initDoneCh)
   351  	for {
   352  		select {
   353  		// Handle pool shutdown
   354  		case <-pool.reorgShutdownCh:
   355  			return
   356  
   357  		// Handle stats reporting ticks
   358  		case <-report.C:
   359  			pool.mu.RLock()
   360  			pending, queued := pool.stats()
   361  			pool.mu.RUnlock()
   362  			stales := int(pool.priced.stales.Load())
   363  
   364  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   365  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   366  				prevPending, prevQueued, prevStales = pending, queued, stales
   367  			}
   368  
   369  		// Handle inactive account transaction eviction
   370  		case <-evict.C:
   371  			pool.mu.Lock()
   372  			for addr := range pool.queue {
   373  				// Skip local transactions from the eviction mechanism
   374  				if pool.locals.contains(addr) {
   375  					continue
   376  				}
   377  				// Any non-locals old enough should be removed
   378  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   379  					list := pool.queue[addr].Flatten()
   380  					for _, tx := range list {
   381  						pool.removeTx(tx.Hash(), true, true)
   382  					}
   383  					queuedEvictionMeter.Mark(int64(len(list)))
   384  				}
   385  			}
   386  			pool.mu.Unlock()
   387  
   388  		// Handle local transaction journal rotation
   389  		case <-journal.C:
   390  			if pool.journal != nil {
   391  				pool.mu.Lock()
   392  				if err := pool.journal.rotate(pool.local()); err != nil {
   393  					log.Warn("Failed to rotate local tx journal", "err", err)
   394  				}
   395  				pool.mu.Unlock()
   396  			}
   397  		}
   398  	}
   399  }
   400  
   401  // Close terminates the transaction pool.
   402  func (pool *LegacyPool) Close() error {
   403  	// Terminate the pool reorger and return
   404  	close(pool.reorgShutdownCh)
   405  	pool.wg.Wait()
   406  
   407  	if pool.journal != nil {
   408  		pool.journal.close()
   409  	}
   410  	log.Info("Transaction pool stopped")
   411  	return nil
   412  }
   413  
   414  // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
   415  // kept in sync with the main transaction pool's internal state.
   416  func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
   417  	wait := pool.requestReset(oldHead, newHead)
   418  	<-wait
   419  }
   420  
   421  // SubscribeTransactions registers a subscription for new transaction events,
   422  // supporting feeding only newly seen or also resurrected transactions.
   423  func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
   424  	// The legacy pool has a very messed up internal shuffling, so it's kind of
   425  	// hard to separate newly discovered transaction from resurrected ones. This
   426  	// is because the new txs are added to the queue, resurrected ones too and
   427  	// reorgs run lazily, so separating the two would need a marker.
   428  	return pool.txFeed.Subscribe(ch)
   429  }
   430  
   431  // SetGasTip updates the minimum gas tip required by the transaction pool for a
   432  // new transaction, and drops all transactions below this threshold.
   433  func (pool *LegacyPool) SetGasTip(tip *big.Int) {
   434  	pool.mu.Lock()
   435  	defer pool.mu.Unlock()
   436  
   437  	var (
   438  		newTip = uint256.MustFromBig(tip)
   439  		old    = pool.gasTip.Load()
   440  	)
   441  	pool.gasTip.Store(newTip)
   442  	// If the min miner fee increased, remove transactions below the new threshold
   443  	if newTip.Cmp(old) > 0 {
   444  		// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
   445  		drop := pool.all.RemotesBelowTip(tip)
   446  		for _, tx := range drop {
   447  			pool.removeTx(tx.Hash(), false, true)
   448  		}
   449  		pool.priced.Removed(len(drop))
   450  	}
   451  	log.Info("Legacy pool tip threshold updated", "tip", newTip)
   452  }
   453  
   454  // Nonce returns the next nonce of an account, with all transactions executable
   455  // by the pool already applied on top.
   456  func (pool *LegacyPool) Nonce(addr common.Address) uint64 {
   457  	pool.mu.RLock()
   458  	defer pool.mu.RUnlock()
   459  
   460  	return pool.pendingNonces.get(addr)
   461  }
   462  
   463  // Stats retrieves the current pool stats, namely the number of pending and the
   464  // number of queued (non-executable) transactions.
   465  func (pool *LegacyPool) Stats() (int, int) {
   466  	pool.mu.RLock()
   467  	defer pool.mu.RUnlock()
   468  
   469  	return pool.stats()
   470  }
   471  
   472  // stats retrieves the current pool stats, namely the number of pending and the
   473  // number of queued (non-executable) transactions.
   474  func (pool *LegacyPool) stats() (int, int) {
   475  	pending := 0
   476  	for _, list := range pool.pending {
   477  		pending += list.Len()
   478  	}
   479  	queued := 0
   480  	for _, list := range pool.queue {
   481  		queued += list.Len()
   482  	}
   483  	return pending, queued
   484  }
   485  
   486  // Content retrieves the data content of the transaction pool, returning all the
   487  // pending as well as queued transactions, grouped by account and sorted by nonce.
   488  func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
   489  	pool.mu.Lock()
   490  	defer pool.mu.Unlock()
   491  
   492  	pending := make(map[common.Address][]*types.Transaction, len(pool.pending))
   493  	for addr, list := range pool.pending {
   494  		pending[addr] = list.Flatten()
   495  	}
   496  	queued := make(map[common.Address][]*types.Transaction, len(pool.queue))
   497  	for addr, list := range pool.queue {
   498  		queued[addr] = list.Flatten()
   499  	}
   500  	return pending, queued
   501  }
   502  
   503  // ContentFrom retrieves the data content of the transaction pool, returning the
   504  // pending as well as queued transactions of this address, grouped by nonce.
   505  func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
   506  	pool.mu.RLock()
   507  	defer pool.mu.RUnlock()
   508  
   509  	var pending []*types.Transaction
   510  	if list, ok := pool.pending[addr]; ok {
   511  		pending = list.Flatten()
   512  	}
   513  	var queued []*types.Transaction
   514  	if list, ok := pool.queue[addr]; ok {
   515  		queued = list.Flatten()
   516  	}
   517  	return pending, queued
   518  }
   519  
   520  // Pending retrieves all currently processable transactions, grouped by origin
   521  // account and sorted by nonce.
   522  //
   523  // The transactions can also be pre-filtered by the dynamic fee components to
   524  // reduce allocations and load on downstream subsystems.
   525  func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
   526  	// If only blob transactions are requested, this pool is unsuitable as it
   527  	// contains none, don't even bother.
   528  	if filter.OnlyBlobTxs {
   529  		return nil
   530  	}
   531  	pool.mu.Lock()
   532  	defer pool.mu.Unlock()
   533  
   534  	// Convert the new uint256.Int types to the old big.Int ones used by the legacy pool
   535  	var (
   536  		minTipBig  *big.Int
   537  		baseFeeBig *big.Int
   538  	)
   539  	if filter.MinTip != nil {
   540  		minTipBig = filter.MinTip.ToBig()
   541  	}
   542  	if filter.BaseFee != nil {
   543  		baseFeeBig = filter.BaseFee.ToBig()
   544  	}
   545  	pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
   546  	for addr, list := range pool.pending {
   547  		txs := list.Flatten()
   548  
   549  		// If the miner requests tip enforcement, cap the lists now
   550  		if minTipBig != nil && !pool.locals.contains(addr) {
   551  			for i, tx := range txs {
   552  				if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
   553  					txs = txs[:i]
   554  					break
   555  				}
   556  			}
   557  		}
   558  		if len(txs) > 0 {
   559  			lazies := make([]*txpool.LazyTransaction, len(txs))
   560  			for i := 0; i < len(txs); i++ {
   561  				lazies[i] = &txpool.LazyTransaction{
   562  					Pool:      pool,
   563  					Hash:      txs[i].Hash(),
   564  					Tx:        txs[i],
   565  					Time:      txs[i].Time(),
   566  					GasFeeCap: uint256.MustFromBig(txs[i].GasFeeCap()),
   567  					GasTipCap: uint256.MustFromBig(txs[i].GasTipCap()),
   568  					Gas:       txs[i].Gas(),
   569  					BlobGas:   txs[i].BlobGas(),
   570  				}
   571  			}
   572  			pending[addr] = lazies
   573  		}
   574  	}
   575  	return pending
   576  }
   577  
   578  // Locals retrieves the accounts currently considered local by the pool.
   579  func (pool *LegacyPool) Locals() []common.Address {
   580  	pool.mu.Lock()
   581  	defer pool.mu.Unlock()
   582  
   583  	return pool.locals.flatten()
   584  }
   585  
   586  // local retrieves all currently known local transactions, grouped by origin
   587  // account and sorted by nonce. The returned transaction set is a copy and can be
   588  // freely modified by calling code.
   589  func (pool *LegacyPool) local() map[common.Address]types.Transactions {
   590  	txs := make(map[common.Address]types.Transactions)
   591  	for addr := range pool.locals.accounts {
   592  		if pending := pool.pending[addr]; pending != nil {
   593  			txs[addr] = append(txs[addr], pending.Flatten()...)
   594  		}
   595  		if queued := pool.queue[addr]; queued != nil {
   596  			txs[addr] = append(txs[addr], queued.Flatten()...)
   597  		}
   598  	}
   599  	return txs
   600  }
   601  
   602  // validateTxBasics checks whether a transaction is valid according to the consensus
   603  // rules, but does not check state-dependent validation such as sufficient balance.
   604  // This check is meant as an early check which only needs to be performed once,
   605  // and does not require the pool mutex to be held.
   606  func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error {
   607  	opts := &txpool.ValidationOptions{
   608  		Config: pool.chainconfig,
   609  		Accept: 0 |
   610  			1<<types.LegacyTxType |
   611  			1<<types.AccessListTxType |
   612  			1<<types.DynamicFeeTxType,
   613  		MaxSize: txMaxSize,
   614  		MinTip:  pool.gasTip.Load().ToBig(),
   615  	}
   616  	if local {
   617  		opts.MinTip = new(big.Int)
   618  	}
   619  	if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil {
   620  		return err
   621  	}
   622  	return nil
   623  }
   624  
   625  // validateTx checks whether a transaction is valid according to the consensus
   626  // rules and adheres to some heuristic limits of the local node (price and size).
   627  func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error {
   628  	opts := &txpool.ValidationOptionsWithState{
   629  		State: pool.currentState,
   630  
   631  		FirstNonceGap: nil, // Pool allows arbitrary arrival order, don't invalidate nonce gaps
   632  		UsedAndLeftSlots: func(addr common.Address) (int, int) {
   633  			var have int
   634  			if list := pool.pending[addr]; list != nil {
   635  				have += list.Len()
   636  			}
   637  			if list := pool.queue[addr]; list != nil {
   638  				have += list.Len()
   639  			}
   640  			return have, math.MaxInt
   641  		},
   642  		ExistingExpenditure: func(addr common.Address) *big.Int {
   643  			if list := pool.pending[addr]; list != nil {
   644  				return list.totalcost.ToBig()
   645  			}
   646  			return new(big.Int)
   647  		},
   648  		ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
   649  			if list := pool.pending[addr]; list != nil {
   650  				if tx := list.txs.Get(nonce); tx != nil {
   651  					return tx.Cost()
   652  				}
   653  			}
   654  			return nil
   655  		},
   656  	}
   657  	if err := txpool.ValidateTransactionWithState(tx, pool.signer, opts); err != nil {
   658  		return err
   659  	}
   660  	return nil
   661  }
   662  
   663  // add validates a transaction and inserts it into the non-executable queue for later
   664  // pending promotion and execution. If the transaction is a replacement for an already
   665  // pending or queued one, it overwrites the previous transaction if its price is higher.
   666  //
   667  // If a newly added transaction is marked as local, its sending account will be
   668  // added to the allowlist, preventing any associated transaction from being dropped
   669  // out of the pool due to pricing constraints.
   670  func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   671  	// If the transaction is already known, discard it
   672  	hash := tx.Hash()
   673  	if pool.all.Get(hash) != nil {
   674  		log.Trace("Discarding already known transaction", "hash", hash)
   675  		knownTxMeter.Mark(1)
   676  		return false, txpool.ErrAlreadyKnown
   677  	}
   678  	// Make the local flag. If it's from local source or it's from the network but
   679  	// the sender is marked as local previously, treat it as the local transaction.
   680  	isLocal := local || pool.locals.containsTx(tx)
   681  
   682  	// If the transaction fails basic validation, discard it
   683  	if err := pool.validateTx(tx, isLocal); err != nil {
   684  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   685  		invalidTxMeter.Mark(1)
   686  		return false, err
   687  	}
   688  	// already validated by this point
   689  	from, _ := types.Sender(pool.signer, tx)
   690  
   691  	// If the address is not yet known, request exclusivity to track the account
   692  	// only by this subpool until all transactions are evicted
   693  	var (
   694  		_, hasPending = pool.pending[from]
   695  		_, hasQueued  = pool.queue[from]
   696  	)
   697  	if !hasPending && !hasQueued {
   698  		if err := pool.reserve(from, true); err != nil {
   699  			return false, err
   700  		}
   701  		defer func() {
   702  			// If the transaction is rejected by some post-validation check, remove
   703  			// the lock on the reservation set.
   704  			//
   705  			// Note, `err` here is the named error return, which will be initialized
   706  			// by a return statement before running deferred methods. Take care with
   707  			// removing or subscoping err as it will break this clause.
   708  			if err != nil {
   709  				pool.reserve(from, false)
   710  			}
   711  		}()
   712  	}
   713  	// If the transaction pool is full, discard underpriced transactions
   714  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   715  		// If the new transaction is underpriced, don't accept it
   716  		if !isLocal && pool.priced.Underpriced(tx) {
   717  			log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   718  			underpricedTxMeter.Mark(1)
   719  			return false, txpool.ErrUnderpriced
   720  		}
   721  
   722  		// We're about to replace a transaction. The reorg does a more thorough
   723  		// analysis of what to remove and how, but it runs async. We don't want to
   724  		// do too many replacements between reorg-runs, so we cap the number of
   725  		// replacements to 25% of the slots
   726  		if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
   727  			throttleTxMeter.Mark(1)
   728  			return false, ErrTxPoolOverflow
   729  		}
   730  
   731  		// New transaction is better than our worse ones, make room for it.
   732  		// If it's a local transaction, forcibly discard all available transactions.
   733  		// Otherwise if we can't make enough room for new one, abort the operation.
   734  		drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
   735  
   736  		// Special case, we still can't make the room for the new remote one.
   737  		if !isLocal && !success {
   738  			log.Trace("Discarding overflown transaction", "hash", hash)
   739  			overflowedTxMeter.Mark(1)
   740  			return false, ErrTxPoolOverflow
   741  		}
   742  
   743  		// If the new transaction is a future transaction it should never churn pending transactions
   744  		if !isLocal && pool.isGapped(from, tx) {
   745  			var replacesPending bool
   746  			for _, dropTx := range drop {
   747  				dropSender, _ := types.Sender(pool.signer, dropTx)
   748  				if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
   749  					replacesPending = true
   750  					break
   751  				}
   752  			}
   753  			// Add all transactions back to the priced queue
   754  			if replacesPending {
   755  				for _, dropTx := range drop {
   756  					pool.priced.Put(dropTx, false)
   757  				}
   758  				log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
   759  				return false, txpool.ErrFutureReplacePending
   760  			}
   761  		}
   762  
   763  		// Kick out the underpriced remote transactions.
   764  		for _, tx := range drop {
   765  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   766  			underpricedTxMeter.Mark(1)
   767  
   768  			sender, _ := types.Sender(pool.signer, tx)
   769  			dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc
   770  
   771  			pool.changesSinceReorg += dropped
   772  		}
   773  	}
   774  
   775  	// Try to replace an existing transaction in the pending pool
   776  	if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
   777  		// Nonce already pending, check if required price bump is met
   778  		inserted, old := list.Add(tx, pool.config.PriceBump)
   779  		if !inserted {
   780  			pendingDiscardMeter.Mark(1)
   781  			return false, txpool.ErrReplaceUnderpriced
   782  		}
   783  		// New transaction is better, replace old one
   784  		if old != nil {
   785  			pool.all.Remove(old.Hash())
   786  			pool.priced.Removed(1)
   787  			pendingReplaceMeter.Mark(1)
   788  		}
   789  		pool.all.Add(tx, isLocal)
   790  		pool.priced.Put(tx, isLocal)
   791  		pool.journalTx(from, tx)
   792  		pool.queueTxEvent(tx)
   793  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   794  
   795  		// Successful promotion, bump the heartbeat
   796  		pool.beats[from] = time.Now()
   797  		return old != nil, nil
   798  	}
   799  	// New transaction isn't replacing a pending one, push into queue
   800  	replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
   801  	if err != nil {
   802  		return false, err
   803  	}
   804  	// Mark local addresses and journal local transactions
   805  	if local && !pool.locals.contains(from) {
   806  		log.Info("Setting new local account", "address", from)
   807  		pool.locals.add(from)
   808  		pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
   809  	}
   810  	if isLocal {
   811  		localGauge.Inc(1)
   812  	}
   813  	pool.journalTx(from, tx)
   814  
   815  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   816  	return replaced, nil
   817  }
   818  
   819  // isGapped reports whether the given transaction is immediately executable.
   820  func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool {
   821  	// Short circuit if transaction falls within the scope of the pending list
   822  	// or matches the next pending nonce which can be promoted as an executable
   823  	// transaction afterwards. Note, the tx staleness is already checked in
   824  	// 'validateTx' function previously.
   825  	next := pool.pendingNonces.get(from)
   826  	if tx.Nonce() <= next {
   827  		return false
   828  	}
   829  	// The transaction has a nonce gap with pending list, it's only considered
   830  	// as executable if transactions in queue can fill up the nonce gap.
   831  	queue, ok := pool.queue[from]
   832  	if !ok {
   833  		return true
   834  	}
   835  	for nonce := next; nonce < tx.Nonce(); nonce++ {
   836  		if !queue.Contains(nonce) {
   837  			return true // txs in queue can't fill up the nonce gap
   838  		}
   839  	}
   840  	return false
   841  }
   842  
   843  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   844  //
   845  // Note, this method assumes the pool lock is held!
   846  func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
   847  	// Try to insert the transaction into the future queue
   848  	from, _ := types.Sender(pool.signer, tx) // already validated
   849  	if pool.queue[from] == nil {
   850  		pool.queue[from] = newList(false)
   851  	}
   852  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   853  	if !inserted {
   854  		// An older transaction was better, discard this
   855  		queuedDiscardMeter.Mark(1)
   856  		return false, txpool.ErrReplaceUnderpriced
   857  	}
   858  	// Discard any previous transaction and mark this
   859  	if old != nil {
   860  		pool.all.Remove(old.Hash())
   861  		pool.priced.Removed(1)
   862  		queuedReplaceMeter.Mark(1)
   863  	} else {
   864  		// Nothing was replaced, bump the queued counter
   865  		queuedGauge.Inc(1)
   866  	}
   867  	// If the transaction isn't in lookup set but it's expected to be there,
   868  	// show the error log.
   869  	if pool.all.Get(hash) == nil && !addAll {
   870  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
   871  	}
   872  	if addAll {
   873  		pool.all.Add(tx, local)
   874  		pool.priced.Put(tx, local)
   875  	}
   876  	// If we never record the heartbeat, do it right now.
   877  	if _, exist := pool.beats[from]; !exist {
   878  		pool.beats[from] = time.Now()
   879  	}
   880  	return old != nil, nil
   881  }
   882  
   883  // journalTx adds the specified transaction to the local disk journal if it is
   884  // deemed to have been sent from a local account.
   885  func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) {
   886  	// Only journal if it's enabled and the transaction is local
   887  	if pool.journal == nil || !pool.locals.contains(from) {
   888  		return
   889  	}
   890  	if err := pool.journal.insert(tx); err != nil {
   891  		log.Warn("Failed to journal local transaction", "err", err)
   892  	}
   893  }
   894  
   895  // promoteTx adds a transaction to the pending (processable) list of transactions
   896  // and returns whether it was inserted or an older was better.
   897  //
   898  // Note, this method assumes the pool lock is held!
   899  func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   900  	// Try to insert the transaction into the pending queue
   901  	if pool.pending[addr] == nil {
   902  		pool.pending[addr] = newList(true)
   903  	}
   904  	list := pool.pending[addr]
   905  
   906  	inserted, old := list.Add(tx, pool.config.PriceBump)
   907  	if !inserted {
   908  		// An older transaction was better, discard this
   909  		pool.all.Remove(hash)
   910  		pool.priced.Removed(1)
   911  		pendingDiscardMeter.Mark(1)
   912  		return false
   913  	}
   914  	// Otherwise discard any previous transaction and mark this
   915  	if old != nil {
   916  		pool.all.Remove(old.Hash())
   917  		pool.priced.Removed(1)
   918  		pendingReplaceMeter.Mark(1)
   919  	} else {
   920  		// Nothing was replaced, bump the pending counter
   921  		pendingGauge.Inc(1)
   922  	}
   923  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   924  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   925  
   926  	// Successful promotion, bump the heartbeat
   927  	pool.beats[addr] = time.Now()
   928  	return true
   929  }
   930  
   931  // addLocals enqueues a batch of transactions into the pool if they are valid, marking the
   932  // senders as local ones, ensuring they go around the local pricing constraints.
   933  //
   934  // This method is used to add transactions from the RPC API and performs synchronous pool
   935  // reorganization and event propagation.
   936  func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
   937  	return pool.Add(txs, !pool.config.NoLocals, true)
   938  }
   939  
   940  // addLocal enqueues a single local transaction into the pool if it is valid. This is
   941  // a convenience wrapper around addLocals.
   942  func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
   943  	return pool.addLocals([]*types.Transaction{tx})[0]
   944  }
   945  
   946  // addRemotes enqueues a batch of transactions into the pool if they are valid. If the
   947  // senders are not among the locally tracked ones, full pricing constraints will apply.
   948  //
   949  // This method is used to add transactions from the p2p network and does not wait for pool
   950  // reorganization and internal event propagation.
   951  func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
   952  	return pool.Add(txs, false, false)
   953  }
   954  
   955  // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
   956  // wrapper around addRemotes.
   957  func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
   958  	return pool.addRemotes([]*types.Transaction{tx})[0]
   959  }
   960  
   961  // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
   962  func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
   963  	return pool.Add(txs, false, true)
   964  }
   965  
   966  // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   967  func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
   968  	return pool.Add([]*types.Transaction{tx}, false, true)[0]
   969  }
   970  
   971  // Add enqueues a batch of transactions into the pool if they are valid. Depending
   972  // on the local flag, full pricing constraints will or will not be applied.
   973  //
   974  // If sync is set, the method will block until all internal maintenance related
   975  // to the add is finished. Only use this during tests for determinism!
   976  func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error {
   977  	// Do not treat as local if local transactions have been disabled
   978  	local = local && !pool.config.NoLocals
   979  
   980  	// Filter out known ones without obtaining the pool lock or recovering signatures
   981  	var (
   982  		errs = make([]error, len(txs))
   983  		news = make([]*types.Transaction, 0, len(txs))
   984  	)
   985  	for i, tx := range txs {
   986  		// If the transaction is known, pre-set the error slot
   987  		if pool.all.Get(tx.Hash()) != nil {
   988  			errs[i] = txpool.ErrAlreadyKnown
   989  			knownTxMeter.Mark(1)
   990  			continue
   991  		}
   992  		// Exclude transactions with basic errors, e.g invalid signatures and
   993  		// insufficient intrinsic gas as soon as possible and cache senders
   994  		// in transactions before obtaining lock
   995  		if err := pool.validateTxBasics(tx, local); err != nil {
   996  			errs[i] = err
   997  			log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err)
   998  			invalidTxMeter.Mark(1)
   999  			continue
  1000  		}
  1001  		// Accumulate all unknown transactions for deeper processing
  1002  		news = append(news, tx)
  1003  	}
  1004  	if len(news) == 0 {
  1005  		return errs
  1006  	}
  1007  
  1008  	// Process all the new transaction and merge any errors into the original slice
  1009  	pool.mu.Lock()
  1010  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
  1011  	pool.mu.Unlock()
  1012  
  1013  	var nilSlot = 0
  1014  	for _, err := range newErrs {
  1015  		for errs[nilSlot] != nil {
  1016  			nilSlot++
  1017  		}
  1018  		errs[nilSlot] = err
  1019  		nilSlot++
  1020  	}
  1021  	// Reorg the pool internals if needed and return
  1022  	done := pool.requestPromoteExecutables(dirtyAddrs)
  1023  	if sync {
  1024  		<-done
  1025  	}
  1026  	return errs
  1027  }
  1028  
  1029  // addTxsLocked attempts to queue a batch of transactions if they are valid.
  1030  // The transaction pool lock must be held.
  1031  func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
  1032  	dirty := newAccountSet(pool.signer)
  1033  	errs := make([]error, len(txs))
  1034  	for i, tx := range txs {
  1035  		replaced, err := pool.add(tx, local)
  1036  		errs[i] = err
  1037  		if err == nil && !replaced {
  1038  			dirty.addTx(tx)
  1039  		}
  1040  	}
  1041  	validTxMeter.Mark(int64(len(dirty.accounts)))
  1042  	return errs, dirty
  1043  }
  1044  
  1045  // Status returns the status (unknown/pending/queued) of a batch of transactions
  1046  // identified by their hashes.
  1047  func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus {
  1048  	tx := pool.get(hash)
  1049  	if tx == nil {
  1050  		return txpool.TxStatusUnknown
  1051  	}
  1052  	from, _ := types.Sender(pool.signer, tx) // already validated
  1053  
  1054  	pool.mu.RLock()
  1055  	defer pool.mu.RUnlock()
  1056  
  1057  	if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1058  		return txpool.TxStatusPending
  1059  	} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
  1060  		return txpool.TxStatusQueued
  1061  	}
  1062  	return txpool.TxStatusUnknown
  1063  }
  1064  
  1065  // Get returns a transaction if it is contained in the pool and nil otherwise.
  1066  func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction {
  1067  	tx := pool.get(hash)
  1068  	if tx == nil {
  1069  		return nil
  1070  	}
  1071  	return tx
  1072  }
  1073  
  1074  // get returns a transaction if it is contained in the pool and nil otherwise.
  1075  func (pool *LegacyPool) get(hash common.Hash) *types.Transaction {
  1076  	return pool.all.Get(hash)
  1077  }
  1078  
  1079  // Has returns an indicator whether txpool has a transaction cached with the
  1080  // given hash.
  1081  func (pool *LegacyPool) Has(hash common.Hash) bool {
  1082  	return pool.all.Get(hash) != nil
  1083  }
  1084  
  1085  // removeTx removes a single transaction from the queue, moving all subsequent
  1086  // transactions back to the future queue.
  1087  //
  1088  // In unreserve is false, the account will not be relinquished to the main txpool
  1089  // even if there are no more references to it. This is used to handle a race when
  1090  // a tx being added, and it evicts a previously scheduled tx from the same account,
  1091  // which could lead to a premature release of the lock.
  1092  //
  1093  // Returns the number of transactions removed from the pending queue.
  1094  func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int {
  1095  	// Fetch the transaction we wish to delete
  1096  	tx := pool.all.Get(hash)
  1097  	if tx == nil {
  1098  		return 0
  1099  	}
  1100  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1101  
  1102  	// If after deletion there are no more transactions belonging to this account,
  1103  	// relinquish the address reservation. It's a bit convoluted do this, via a
  1104  	// defer, but it's safer vs. the many return pathways.
  1105  	if unreserve {
  1106  		defer func() {
  1107  			var (
  1108  				_, hasPending = pool.pending[addr]
  1109  				_, hasQueued  = pool.queue[addr]
  1110  			)
  1111  			if !hasPending && !hasQueued {
  1112  				pool.reserve(addr, false)
  1113  			}
  1114  		}()
  1115  	}
  1116  	// Remove it from the list of known transactions
  1117  	pool.all.Remove(hash)
  1118  	if outofbound {
  1119  		pool.priced.Removed(1)
  1120  	}
  1121  	if pool.locals.contains(addr) {
  1122  		localGauge.Dec(1)
  1123  	}
  1124  	// Remove the transaction from the pending lists and reset the account nonce
  1125  	if pending := pool.pending[addr]; pending != nil {
  1126  		if removed, invalids := pending.Remove(tx); removed {
  1127  			// If no more pending transactions are left, remove the list
  1128  			if pending.Empty() {
  1129  				delete(pool.pending, addr)
  1130  			}
  1131  			// Postpone any invalidated transactions
  1132  			for _, tx := range invalids {
  1133  				// Internal shuffle shouldn't touch the lookup set.
  1134  				pool.enqueueTx(tx.Hash(), tx, false, false)
  1135  			}
  1136  			// Update the account nonce if needed
  1137  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1138  			// Reduce the pending counter
  1139  			pendingGauge.Dec(int64(1 + len(invalids)))
  1140  			return 1 + len(invalids)
  1141  		}
  1142  	}
  1143  	// Transaction is in the future queue
  1144  	if future := pool.queue[addr]; future != nil {
  1145  		if removed, _ := future.Remove(tx); removed {
  1146  			// Reduce the queued counter
  1147  			queuedGauge.Dec(1)
  1148  		}
  1149  		if future.Empty() {
  1150  			delete(pool.queue, addr)
  1151  			delete(pool.beats, addr)
  1152  		}
  1153  	}
  1154  	return 0
  1155  }
  1156  
  1157  // requestReset requests a pool reset to the new head block.
  1158  // The returned channel is closed when the reset has occurred.
  1159  func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1160  	select {
  1161  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1162  		return <-pool.reorgDoneCh
  1163  	case <-pool.reorgShutdownCh:
  1164  		return pool.reorgShutdownCh
  1165  	}
  1166  }
  1167  
  1168  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1169  // The returned channel is closed when the promotion checks have occurred.
  1170  func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1171  	select {
  1172  	case pool.reqPromoteCh <- set:
  1173  		return <-pool.reorgDoneCh
  1174  	case <-pool.reorgShutdownCh:
  1175  		return pool.reorgShutdownCh
  1176  	}
  1177  }
  1178  
  1179  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1180  func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) {
  1181  	select {
  1182  	case pool.queueTxEventCh <- tx:
  1183  	case <-pool.reorgShutdownCh:
  1184  	}
  1185  }
  1186  
  1187  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1188  // call those methods directly, but request them being run using requestReset and
  1189  // requestPromoteExecutables instead.
  1190  func (pool *LegacyPool) scheduleReorgLoop() {
  1191  	defer pool.wg.Done()
  1192  
  1193  	var (
  1194  		curDone       chan struct{} // non-nil while runReorg is active
  1195  		nextDone      = make(chan struct{})
  1196  		launchNextRun bool
  1197  		reset         *txpoolResetRequest
  1198  		dirtyAccounts *accountSet
  1199  		queuedEvents  = make(map[common.Address]*sortedMap)
  1200  	)
  1201  	for {
  1202  		// Launch next background reorg if needed
  1203  		if curDone == nil && launchNextRun {
  1204  			// Run the background reorg and announcements
  1205  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
  1206  
  1207  			// Prepare everything for the next round of reorg
  1208  			curDone, nextDone = nextDone, make(chan struct{})
  1209  			launchNextRun = false
  1210  
  1211  			reset, dirtyAccounts = nil, nil
  1212  			queuedEvents = make(map[common.Address]*sortedMap)
  1213  		}
  1214  
  1215  		select {
  1216  		case req := <-pool.reqResetCh:
  1217  			// Reset request: update head if request is already pending.
  1218  			if reset == nil {
  1219  				reset = req
  1220  			} else {
  1221  				reset.newHead = req.newHead
  1222  			}
  1223  			launchNextRun = true
  1224  			pool.reorgDoneCh <- nextDone
  1225  
  1226  		case req := <-pool.reqPromoteCh:
  1227  			// Promote request: update address set if request is already pending.
  1228  			if dirtyAccounts == nil {
  1229  				dirtyAccounts = req
  1230  			} else {
  1231  				dirtyAccounts.merge(req)
  1232  			}
  1233  			launchNextRun = true
  1234  			pool.reorgDoneCh <- nextDone
  1235  
  1236  		case tx := <-pool.queueTxEventCh:
  1237  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1238  			// request one later if they want the events sent.
  1239  			addr, _ := types.Sender(pool.signer, tx)
  1240  			if _, ok := queuedEvents[addr]; !ok {
  1241  				queuedEvents[addr] = newSortedMap()
  1242  			}
  1243  			queuedEvents[addr].Put(tx)
  1244  
  1245  		case <-curDone:
  1246  			curDone = nil
  1247  
  1248  		case <-pool.reorgShutdownCh:
  1249  			// Wait for current run to finish.
  1250  			if curDone != nil {
  1251  				<-curDone
  1252  			}
  1253  			close(nextDone)
  1254  			return
  1255  		}
  1256  	}
  1257  }
  1258  
  1259  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1260  func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) {
  1261  	defer func(t0 time.Time) {
  1262  		reorgDurationTimer.Update(time.Since(t0))
  1263  	}(time.Now())
  1264  	defer close(done)
  1265  
  1266  	var promoteAddrs []common.Address
  1267  	if dirtyAccounts != nil && reset == nil {
  1268  		// Only dirty accounts need to be promoted, unless we're resetting.
  1269  		// For resets, all addresses in the tx queue will be promoted and
  1270  		// the flatten operation can be avoided.
  1271  		promoteAddrs = dirtyAccounts.flatten()
  1272  	}
  1273  	pool.mu.Lock()
  1274  	if reset != nil {
  1275  		// Reset from the old head to the new, rescheduling any reorged transactions
  1276  		pool.reset(reset.oldHead, reset.newHead)
  1277  
  1278  		// Nonces were reset, discard any events that became stale
  1279  		for addr := range events {
  1280  			events[addr].Forward(pool.pendingNonces.get(addr))
  1281  			if events[addr].Len() == 0 {
  1282  				delete(events, addr)
  1283  			}
  1284  		}
  1285  		// Reset needs promote for all addresses
  1286  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1287  		for addr := range pool.queue {
  1288  			promoteAddrs = append(promoteAddrs, addr)
  1289  		}
  1290  	}
  1291  	// Check for pending transactions for every account that sent new ones
  1292  	promoted := pool.promoteExecutables(promoteAddrs)
  1293  
  1294  	// If a new block appeared, validate the pool of pending transactions. This will
  1295  	// remove any transaction that has been included in the block or was invalidated
  1296  	// because of another transaction (e.g. higher gas price).
  1297  	if reset != nil {
  1298  		pool.demoteUnexecutables()
  1299  		if reset.newHead != nil {
  1300  			if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
  1301  				pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead)
  1302  				pool.priced.SetBaseFee(pendingBaseFee)
  1303  			} else {
  1304  				pool.priced.Reheap()
  1305  			}
  1306  		}
  1307  		// Update all accounts to the latest known pending nonce
  1308  		nonces := make(map[common.Address]uint64, len(pool.pending))
  1309  		for addr, list := range pool.pending {
  1310  			highestPending := list.LastElement()
  1311  			nonces[addr] = highestPending.Nonce() + 1
  1312  		}
  1313  		pool.pendingNonces.setAll(nonces)
  1314  	}
  1315  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1316  	pool.truncatePending()
  1317  	pool.truncateQueue()
  1318  
  1319  	dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
  1320  	pool.changesSinceReorg = 0 // Reset change counter
  1321  	pool.mu.Unlock()
  1322  
  1323  	// Notify subsystems for newly added transactions
  1324  	for _, tx := range promoted {
  1325  		addr, _ := types.Sender(pool.signer, tx)
  1326  		if _, ok := events[addr]; !ok {
  1327  			events[addr] = newSortedMap()
  1328  		}
  1329  		events[addr].Put(tx)
  1330  	}
  1331  	if len(events) > 0 {
  1332  		var txs []*types.Transaction
  1333  		for _, set := range events {
  1334  			txs = append(txs, set.Flatten()...)
  1335  		}
  1336  		pool.txFeed.Send(core.NewTxsEvent{Txs: txs})
  1337  	}
  1338  }
  1339  
  1340  // reset retrieves the current state of the blockchain and ensures the content
  1341  // of the transaction pool is valid with regard to the chain state.
  1342  func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
  1343  	// If we're reorging an old state, reinject all dropped transactions
  1344  	var reinject types.Transactions
  1345  
  1346  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1347  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1348  		oldNum := oldHead.Number.Uint64()
  1349  		newNum := newHead.Number.Uint64()
  1350  
  1351  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1352  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1353  		} else {
  1354  			// Reorg seems shallow enough to pull in all transactions into memory
  1355  			var (
  1356  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1357  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1358  			)
  1359  			if rem == nil {
  1360  				// This can happen if a setHead is performed, where we simply discard the old
  1361  				// head from the chain.
  1362  				// If that is the case, we don't have the lost transactions anymore, and
  1363  				// there's nothing to add
  1364  				if newNum >= oldNum {
  1365  					// If we reorged to a same or higher number, then it's not a case of setHead
  1366  					log.Warn("Transaction pool reset with missing old head",
  1367  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1368  					return
  1369  				}
  1370  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1371  				log.Debug("Skipping transaction reset caused by setHead",
  1372  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1373  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1374  			} else {
  1375  				if add == nil {
  1376  					// if the new head is nil, it means that something happened between
  1377  					// the firing of newhead-event and _now_: most likely a
  1378  					// reorg caused by sync-reversion or explicit sethead back to an
  1379  					// earlier block.
  1380  					log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
  1381  					return
  1382  				}
  1383  				var discarded, included types.Transactions
  1384  				for rem.NumberU64() > add.NumberU64() {
  1385  					discarded = append(discarded, rem.Transactions()...)
  1386  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1387  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1388  						return
  1389  					}
  1390  				}
  1391  				for add.NumberU64() > rem.NumberU64() {
  1392  					included = append(included, add.Transactions()...)
  1393  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1394  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1395  						return
  1396  					}
  1397  				}
  1398  				for rem.Hash() != add.Hash() {
  1399  					discarded = append(discarded, rem.Transactions()...)
  1400  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1401  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1402  						return
  1403  					}
  1404  					included = append(included, add.Transactions()...)
  1405  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1406  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1407  						return
  1408  					}
  1409  				}
  1410  				lost := make([]*types.Transaction, 0, len(discarded))
  1411  				for _, tx := range types.TxDifference(discarded, included) {
  1412  					if pool.Filter(tx) {
  1413  						lost = append(lost, tx)
  1414  					}
  1415  				}
  1416  				reinject = lost
  1417  			}
  1418  		}
  1419  	}
  1420  	// Initialize the internal state to the current head
  1421  	if newHead == nil {
  1422  		newHead = pool.chain.CurrentBlock() // Special case during testing
  1423  	}
  1424  	statedb, err := pool.chain.StateAt(newHead.Root)
  1425  	if err != nil {
  1426  		log.Error("Failed to reset txpool state", "err", err)
  1427  		return
  1428  	}
  1429  	pool.currentHead.Store(newHead)
  1430  	pool.currentState = statedb
  1431  	pool.pendingNonces = newNoncer(statedb)
  1432  
  1433  	// Inject any transactions discarded due to reorgs
  1434  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1435  	core.SenderCacher.Recover(pool.signer, reinject)
  1436  	pool.addTxsLocked(reinject, false)
  1437  }
  1438  
  1439  // promoteExecutables moves transactions that have become processable from the
  1440  // future queue to the set of pending transactions. During this process, all
  1441  // invalidated transactions (low nonce, low balance) are deleted.
  1442  func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1443  	// Track the promoted transactions to broadcast them at once
  1444  	var promoted []*types.Transaction
  1445  
  1446  	// Iterate over all accounts and promote any executable transactions
  1447  	gasLimit := pool.currentHead.Load().GasLimit
  1448  	for _, addr := range accounts {
  1449  		list := pool.queue[addr]
  1450  		if list == nil {
  1451  			continue // Just in case someone calls with a non existing account
  1452  		}
  1453  		// Drop all transactions that are deemed too old (low nonce)
  1454  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1455  		for _, tx := range forwards {
  1456  			hash := tx.Hash()
  1457  			pool.all.Remove(hash)
  1458  		}
  1459  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1460  		// Drop all transactions that are too costly (low balance or out of gas)
  1461  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
  1462  		for _, tx := range drops {
  1463  			hash := tx.Hash()
  1464  			pool.all.Remove(hash)
  1465  		}
  1466  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1467  		queuedNofundsMeter.Mark(int64(len(drops)))
  1468  
  1469  		// Gather all executable transactions and promote them
  1470  		readies := list.Ready(pool.pendingNonces.get(addr))
  1471  		for _, tx := range readies {
  1472  			hash := tx.Hash()
  1473  			if pool.promoteTx(addr, hash, tx) {
  1474  				promoted = append(promoted, tx)
  1475  			}
  1476  		}
  1477  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1478  		queuedGauge.Dec(int64(len(readies)))
  1479  
  1480  		// Drop all transactions over the allowed limit
  1481  		var caps types.Transactions
  1482  		if !pool.locals.contains(addr) {
  1483  			caps = list.Cap(int(pool.config.AccountQueue))
  1484  			for _, tx := range caps {
  1485  				hash := tx.Hash()
  1486  				pool.all.Remove(hash)
  1487  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1488  			}
  1489  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1490  		}
  1491  		// Mark all the items dropped as removed
  1492  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1493  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1494  		if pool.locals.contains(addr) {
  1495  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1496  		}
  1497  		// Delete the entire queue entry if it became empty.
  1498  		if list.Empty() {
  1499  			delete(pool.queue, addr)
  1500  			delete(pool.beats, addr)
  1501  			if _, ok := pool.pending[addr]; !ok {
  1502  				pool.reserve(addr, false)
  1503  			}
  1504  		}
  1505  	}
  1506  	return promoted
  1507  }
  1508  
  1509  // truncatePending removes transactions from the pending queue if the pool is above the
  1510  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1511  // equal number for all for accounts with many pending transactions.
  1512  func (pool *LegacyPool) truncatePending() {
  1513  	pending := uint64(0)
  1514  	for _, list := range pool.pending {
  1515  		pending += uint64(list.Len())
  1516  	}
  1517  	if pending <= pool.config.GlobalSlots {
  1518  		return
  1519  	}
  1520  
  1521  	pendingBeforeCap := pending
  1522  	// Assemble a spam order to penalize large transactors first
  1523  	spammers := prque.New[int64, common.Address](nil)
  1524  	for addr, list := range pool.pending {
  1525  		// Only evict transactions from high rollers
  1526  		if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
  1527  			spammers.Push(addr, int64(list.Len()))
  1528  		}
  1529  	}
  1530  	// Gradually drop transactions from offenders
  1531  	offenders := []common.Address{}
  1532  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1533  		// Retrieve the next offender if not local address
  1534  		offender, _ := spammers.Pop()
  1535  		offenders = append(offenders, offender)
  1536  
  1537  		// Equalize balances until all the same or below threshold
  1538  		if len(offenders) > 1 {
  1539  			// Calculate the equalization threshold for all current offenders
  1540  			threshold := pool.pending[offender].Len()
  1541  
  1542  			// Iteratively reduce all offenders until below limit or threshold reached
  1543  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1544  				for i := 0; i < len(offenders)-1; i++ {
  1545  					list := pool.pending[offenders[i]]
  1546  
  1547  					caps := list.Cap(list.Len() - 1)
  1548  					for _, tx := range caps {
  1549  						// Drop the transaction from the global pools too
  1550  						hash := tx.Hash()
  1551  						pool.all.Remove(hash)
  1552  
  1553  						// Update the account nonce to the dropped transaction
  1554  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1555  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1556  					}
  1557  					pool.priced.Removed(len(caps))
  1558  					pendingGauge.Dec(int64(len(caps)))
  1559  					if pool.locals.contains(offenders[i]) {
  1560  						localGauge.Dec(int64(len(caps)))
  1561  					}
  1562  					pending--
  1563  				}
  1564  			}
  1565  		}
  1566  	}
  1567  
  1568  	// If still above threshold, reduce to limit or min allowance
  1569  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1570  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1571  			for _, addr := range offenders {
  1572  				list := pool.pending[addr]
  1573  
  1574  				caps := list.Cap(list.Len() - 1)
  1575  				for _, tx := range caps {
  1576  					// Drop the transaction from the global pools too
  1577  					hash := tx.Hash()
  1578  					pool.all.Remove(hash)
  1579  
  1580  					// Update the account nonce to the dropped transaction
  1581  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1582  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1583  				}
  1584  				pool.priced.Removed(len(caps))
  1585  				pendingGauge.Dec(int64(len(caps)))
  1586  				if pool.locals.contains(addr) {
  1587  					localGauge.Dec(int64(len(caps)))
  1588  				}
  1589  				pending--
  1590  			}
  1591  		}
  1592  	}
  1593  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1594  }
  1595  
  1596  // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit.
  1597  func (pool *LegacyPool) truncateQueue() {
  1598  	queued := uint64(0)
  1599  	for _, list := range pool.queue {
  1600  		queued += uint64(list.Len())
  1601  	}
  1602  	if queued <= pool.config.GlobalQueue {
  1603  		return
  1604  	}
  1605  
  1606  	// Sort all accounts with queued transactions by heartbeat
  1607  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1608  	for addr := range pool.queue {
  1609  		if !pool.locals.contains(addr) { // don't drop locals
  1610  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1611  		}
  1612  	}
  1613  	sort.Sort(sort.Reverse(addresses))
  1614  
  1615  	// Drop transactions until the total is below the limit or only locals remain
  1616  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1617  		addr := addresses[len(addresses)-1]
  1618  		list := pool.queue[addr.address]
  1619  
  1620  		addresses = addresses[:len(addresses)-1]
  1621  
  1622  		// Drop all transactions if they are less than the overflow
  1623  		if size := uint64(list.Len()); size <= drop {
  1624  			for _, tx := range list.Flatten() {
  1625  				pool.removeTx(tx.Hash(), true, true)
  1626  			}
  1627  			drop -= size
  1628  			queuedRateLimitMeter.Mark(int64(size))
  1629  			continue
  1630  		}
  1631  		// Otherwise drop only last few transactions
  1632  		txs := list.Flatten()
  1633  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1634  			pool.removeTx(txs[i].Hash(), true, true)
  1635  			drop--
  1636  			queuedRateLimitMeter.Mark(1)
  1637  		}
  1638  	}
  1639  }
  1640  
  1641  // demoteUnexecutables removes invalid and processed transactions from the pools
  1642  // executable/pending queue and any subsequent transactions that become unexecutable
  1643  // are moved back into the future queue.
  1644  //
  1645  // Note: transactions are not marked as removed in the priced list because re-heaping
  1646  // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
  1647  // to trigger a re-heap is this function
  1648  func (pool *LegacyPool) demoteUnexecutables() {
  1649  	// Iterate over all accounts and demote any non-executable transactions
  1650  	gasLimit := pool.currentHead.Load().GasLimit
  1651  	for addr, list := range pool.pending {
  1652  		nonce := pool.currentState.GetNonce(addr)
  1653  
  1654  		// Drop all transactions that are deemed too old (low nonce)
  1655  		olds := list.Forward(nonce)
  1656  		for _, tx := range olds {
  1657  			hash := tx.Hash()
  1658  			pool.all.Remove(hash)
  1659  			log.Trace("Removed old pending transaction", "hash", hash)
  1660  		}
  1661  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1662  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit)
  1663  		for _, tx := range drops {
  1664  			hash := tx.Hash()
  1665  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1666  			pool.all.Remove(hash)
  1667  		}
  1668  		pendingNofundsMeter.Mark(int64(len(drops)))
  1669  
  1670  		for _, tx := range invalids {
  1671  			hash := tx.Hash()
  1672  			log.Trace("Demoting pending transaction", "hash", hash)
  1673  
  1674  			// Internal shuffle shouldn't touch the lookup set.
  1675  			pool.enqueueTx(hash, tx, false, false)
  1676  		}
  1677  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1678  		if pool.locals.contains(addr) {
  1679  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1680  		}
  1681  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1682  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1683  			gapped := list.Cap(0)
  1684  			for _, tx := range gapped {
  1685  				hash := tx.Hash()
  1686  				log.Error("Demoting invalidated transaction", "hash", hash)
  1687  
  1688  				// Internal shuffle shouldn't touch the lookup set.
  1689  				pool.enqueueTx(hash, tx, false, false)
  1690  			}
  1691  			pendingGauge.Dec(int64(len(gapped)))
  1692  		}
  1693  		// Delete the entire pending entry if it became empty.
  1694  		if list.Empty() {
  1695  			delete(pool.pending, addr)
  1696  			if _, ok := pool.queue[addr]; !ok {
  1697  				pool.reserve(addr, false)
  1698  			}
  1699  		}
  1700  	}
  1701  }
  1702  
  1703  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1704  type addressByHeartbeat struct {
  1705  	address   common.Address
  1706  	heartbeat time.Time
  1707  }
  1708  
  1709  type addressesByHeartbeat []addressByHeartbeat
  1710  
  1711  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1712  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1713  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1714  
  1715  // accountSet is simply a set of addresses to check for existence, and a signer
  1716  // capable of deriving addresses from transactions.
  1717  type accountSet struct {
  1718  	accounts map[common.Address]struct{}
  1719  	signer   types.Signer
  1720  	cache    *[]common.Address
  1721  }
  1722  
  1723  // newAccountSet creates a new address set with an associated signer for sender
  1724  // derivations.
  1725  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1726  	as := &accountSet{
  1727  		accounts: make(map[common.Address]struct{}, len(addrs)),
  1728  		signer:   signer,
  1729  	}
  1730  	for _, addr := range addrs {
  1731  		as.add(addr)
  1732  	}
  1733  	return as
  1734  }
  1735  
  1736  // contains checks if a given address is contained within the set.
  1737  func (as *accountSet) contains(addr common.Address) bool {
  1738  	_, exist := as.accounts[addr]
  1739  	return exist
  1740  }
  1741  
  1742  // containsTx checks if the sender of a given tx is within the set. If the sender
  1743  // cannot be derived, this method returns false.
  1744  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1745  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1746  		return as.contains(addr)
  1747  	}
  1748  	return false
  1749  }
  1750  
  1751  // add inserts a new address into the set to track.
  1752  func (as *accountSet) add(addr common.Address) {
  1753  	as.accounts[addr] = struct{}{}
  1754  	as.cache = nil
  1755  }
  1756  
  1757  // addTx adds the sender of tx into the set.
  1758  func (as *accountSet) addTx(tx *types.Transaction) {
  1759  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1760  		as.add(addr)
  1761  	}
  1762  }
  1763  
  1764  // flatten returns the list of addresses within this set, also caching it for later
  1765  // reuse. The returned slice should not be changed!
  1766  func (as *accountSet) flatten() []common.Address {
  1767  	if as.cache == nil {
  1768  		accounts := make([]common.Address, 0, len(as.accounts))
  1769  		for account := range as.accounts {
  1770  			accounts = append(accounts, account)
  1771  		}
  1772  		as.cache = &accounts
  1773  	}
  1774  	return *as.cache
  1775  }
  1776  
  1777  // merge adds all addresses from the 'other' set into 'as'.
  1778  func (as *accountSet) merge(other *accountSet) {
  1779  	for addr := range other.accounts {
  1780  		as.accounts[addr] = struct{}{}
  1781  	}
  1782  	as.cache = nil
  1783  }
  1784  
  1785  // lookup is used internally by LegacyPool to track transactions while allowing
  1786  // lookup without mutex contention.
  1787  //
  1788  // Note, although this type is properly protected against concurrent access, it
  1789  // is **not** a type that should ever be mutated or even exposed outside of the
  1790  // transaction pool, since its internal state is tightly coupled with the pools
  1791  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1792  // peeking into the pool in LegacyPool.Get without having to acquire the widely scoped
  1793  // LegacyPool.mu mutex.
  1794  //
  1795  // This lookup set combines the notion of "local transactions", which is useful
  1796  // to build upper-level structure.
  1797  type lookup struct {
  1798  	slots   int
  1799  	lock    sync.RWMutex
  1800  	locals  map[common.Hash]*types.Transaction
  1801  	remotes map[common.Hash]*types.Transaction
  1802  }
  1803  
  1804  // newLookup returns a new lookup structure.
  1805  func newLookup() *lookup {
  1806  	return &lookup{
  1807  		locals:  make(map[common.Hash]*types.Transaction),
  1808  		remotes: make(map[common.Hash]*types.Transaction),
  1809  	}
  1810  }
  1811  
  1812  // Range calls f on each key and value present in the map. The callback passed
  1813  // should return the indicator whether the iteration needs to be continued.
  1814  // Callers need to specify which set (or both) to be iterated.
  1815  func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
  1816  	t.lock.RLock()
  1817  	defer t.lock.RUnlock()
  1818  
  1819  	if local {
  1820  		for key, value := range t.locals {
  1821  			if !f(key, value, true) {
  1822  				return
  1823  			}
  1824  		}
  1825  	}
  1826  	if remote {
  1827  		for key, value := range t.remotes {
  1828  			if !f(key, value, false) {
  1829  				return
  1830  			}
  1831  		}
  1832  	}
  1833  }
  1834  
  1835  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1836  func (t *lookup) Get(hash common.Hash) *types.Transaction {
  1837  	t.lock.RLock()
  1838  	defer t.lock.RUnlock()
  1839  
  1840  	if tx := t.locals[hash]; tx != nil {
  1841  		return tx
  1842  	}
  1843  	return t.remotes[hash]
  1844  }
  1845  
  1846  // GetLocal returns a transaction if it exists in the lookup, or nil if not found.
  1847  func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
  1848  	t.lock.RLock()
  1849  	defer t.lock.RUnlock()
  1850  
  1851  	return t.locals[hash]
  1852  }
  1853  
  1854  // GetRemote returns a transaction if it exists in the lookup, or nil if not found.
  1855  func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
  1856  	t.lock.RLock()
  1857  	defer t.lock.RUnlock()
  1858  
  1859  	return t.remotes[hash]
  1860  }
  1861  
  1862  // Count returns the current number of transactions in the lookup.
  1863  func (t *lookup) Count() int {
  1864  	t.lock.RLock()
  1865  	defer t.lock.RUnlock()
  1866  
  1867  	return len(t.locals) + len(t.remotes)
  1868  }
  1869  
  1870  // LocalCount returns the current number of local transactions in the lookup.
  1871  func (t *lookup) LocalCount() int {
  1872  	t.lock.RLock()
  1873  	defer t.lock.RUnlock()
  1874  
  1875  	return len(t.locals)
  1876  }
  1877  
  1878  // RemoteCount returns the current number of remote transactions in the lookup.
  1879  func (t *lookup) RemoteCount() int {
  1880  	t.lock.RLock()
  1881  	defer t.lock.RUnlock()
  1882  
  1883  	return len(t.remotes)
  1884  }
  1885  
  1886  // Slots returns the current number of slots used in the lookup.
  1887  func (t *lookup) Slots() int {
  1888  	t.lock.RLock()
  1889  	defer t.lock.RUnlock()
  1890  
  1891  	return t.slots
  1892  }
  1893  
  1894  // Add adds a transaction to the lookup.
  1895  func (t *lookup) Add(tx *types.Transaction, local bool) {
  1896  	t.lock.Lock()
  1897  	defer t.lock.Unlock()
  1898  
  1899  	t.slots += numSlots(tx)
  1900  	slotsGauge.Update(int64(t.slots))
  1901  
  1902  	if local {
  1903  		t.locals[tx.Hash()] = tx
  1904  	} else {
  1905  		t.remotes[tx.Hash()] = tx
  1906  	}
  1907  }
  1908  
  1909  // Remove removes a transaction from the lookup.
  1910  func (t *lookup) Remove(hash common.Hash) {
  1911  	t.lock.Lock()
  1912  	defer t.lock.Unlock()
  1913  
  1914  	tx, ok := t.locals[hash]
  1915  	if !ok {
  1916  		tx, ok = t.remotes[hash]
  1917  	}
  1918  	if !ok {
  1919  		log.Error("No transaction found to be deleted", "hash", hash)
  1920  		return
  1921  	}
  1922  	t.slots -= numSlots(tx)
  1923  	slotsGauge.Update(int64(t.slots))
  1924  
  1925  	delete(t.locals, hash)
  1926  	delete(t.remotes, hash)
  1927  }
  1928  
  1929  // RemoteToLocals migrates the transactions belongs to the given locals to locals
  1930  // set. The assumption is held the locals set is thread-safe to be used.
  1931  func (t *lookup) RemoteToLocals(locals *accountSet) int {
  1932  	t.lock.Lock()
  1933  	defer t.lock.Unlock()
  1934  
  1935  	var migrated int
  1936  	for hash, tx := range t.remotes {
  1937  		if locals.containsTx(tx) {
  1938  			t.locals[hash] = tx
  1939  			delete(t.remotes, hash)
  1940  			migrated += 1
  1941  		}
  1942  	}
  1943  	return migrated
  1944  }
  1945  
  1946  // RemotesBelowTip finds all remote transactions below the given tip threshold.
  1947  func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
  1948  	found := make(types.Transactions, 0, 128)
  1949  	t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
  1950  		if tx.GasTipCapIntCmp(threshold) < 0 {
  1951  			found = append(found, tx)
  1952  		}
  1953  		return true
  1954  	}, false, true) // Only iterate remotes
  1955  	return found
  1956  }
  1957  
  1958  // numSlots calculates the number of slots needed for a single transaction.
  1959  func numSlots(tx *types.Transaction) int {
  1960  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1961  }