github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/core/tx_pool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	"container/heap"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"math"
    25  	"math/big"
    26  	"sort"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/holiman/uint256"
    32  	"go.opentelemetry.io/otel/attribute"
    33  	"go.opentelemetry.io/otel/trace"
    34  
    35  	"github.com/ethereum/go-ethereum/common"
    36  	"github.com/ethereum/go-ethereum/common/tracing"
    37  	"github.com/ethereum/go-ethereum/consensus/misc"
    38  	"github.com/ethereum/go-ethereum/core/state"
    39  	"github.com/ethereum/go-ethereum/core/types"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/metrics"
    43  	"github.com/ethereum/go-ethereum/params"
    44  )
    45  
    46  const (
    47  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    48  	chainHeadChanSize = 10
    49  
    50  	// txSlotSize is used to calculate how many data slots a single transaction
    51  	// takes up based on its size. The slots are used as DoS protection, ensuring
    52  	// that validating a new transaction remains a constant operation (in reality
    53  	// O(maxslots), where max slots are 4 currently).
    54  	txSlotSize = 32 * 1024
    55  
    56  	// txMaxSize is the maximum size a single transaction can have. This field has
    57  	// non-trivial consequences: larger transactions are significantly harder and
    58  	// more expensive to propagate; larger transactions also take more resources
    59  	// to validate whether they fit into the pool or not.
    60  	txMaxSize = 4 * txSlotSize // 128KB
    61  )
    62  
    63  var (
    64  	// ErrAlreadyKnown is returned if the transactions is already contained
    65  	// within the pool.
    66  	ErrAlreadyKnown = errors.New("already known")
    67  
    68  	// ErrInvalidSender is returned if the transaction contains an invalid signature.
    69  	ErrInvalidSender = errors.New("invalid sender")
    70  
    71  	// ErrUnderpriced is returned if a transaction's gas price is below the minimum
    72  	// configured for the transaction pool.
    73  	ErrUnderpriced = errors.New("transaction underpriced")
    74  
    75  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet
    76  	// another remote transaction.
    77  	ErrTxPoolOverflow = errors.New("txpool is full")
    78  
    79  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    80  	// with a different one without the required price bump.
    81  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    82  
    83  	// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
    84  	// maximum allowance of the current block.
    85  	ErrGasLimit = errors.New("exceeds block gas limit")
    86  
    87  	// ErrNegativeValue is a sanity error to ensure no one is able to specify a
    88  	// transaction with a negative value.
    89  	ErrNegativeValue = errors.New("negative value")
    90  
    91  	// ErrOversizedData is returned if the input data of a transaction is greater
    92  	// than some meaningful limit a user might use. This is not a consensus error
    93  	// making the transaction invalid, rather a DOS protection.
    94  	ErrOversizedData = errors.New("oversized data")
    95  
    96  	// ErrFutureReplacePending is returned if a future transaction replaces a pending
    97  	// transaction. Future transactions should only be able to replace other future transactions.
    98  	ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
    99  
   100  	// ErrOverdraft is returned if a transaction would cause the senders balance to go negative
   101  	// thus invalidating a potential large number of transactions.
   102  	ErrOverdraft = errors.New("transaction would cause overdraft")
   103  )
   104  
   105  var (
   106  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
   107  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
   108  )
   109  
   110  var (
   111  	// Metrics for the pending pool
   112  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
   113  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
   114  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
   115  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
   116  
   117  	// Metrics for the queued pool
   118  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
   119  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
   120  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
   121  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
   122  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
   123  
   124  	// General tx metrics
   125  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   126  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   127  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   128  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   129  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
   130  	// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
   131  	// txpool reorgs.
   132  	throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
   133  	// reorgDurationTimer measures how long time a txpool reorg takes.
   134  	reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
   135  	// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
   136  	// that this number is pretty low, since txpool reorgs happen very frequently.
   137  	dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
   138  
   139  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   140  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   141  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   142  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   143  
   144  	resetCacheGauge  = metrics.NewRegisteredGauge("txpool/resetcache", nil)
   145  	reinitCacheGauge = metrics.NewRegisteredGauge("txpool/reinittcache", nil)
   146  	hitCacheCounter  = metrics.NewRegisteredCounter("txpool/cachehit", nil)
   147  	missCacheCounter = metrics.NewRegisteredCounter("txpool/cachemiss", nil)
   148  
   149  	reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
   150  )
   151  
   152  // TxStatus is the current status of a transaction as seen by the pool.
   153  type TxStatus uint
   154  
   155  const (
   156  	TxStatusUnknown TxStatus = iota
   157  	TxStatusQueued
   158  	TxStatusPending
   159  	TxStatusIncluded
   160  )
   161  
   162  // blockChain provides the state of blockchain and current gas limit to do
   163  // some pre checks in tx pool and event subscribers.
   164  type blockChain interface {
   165  	CurrentBlock() *types.Block
   166  	GetBlock(hash common.Hash, number uint64) *types.Block
   167  	StateAt(root common.Hash) (*state.StateDB, error)
   168  
   169  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   170  }
   171  
   172  // TxPoolConfig are the configuration parameters of the transaction pool.
   173  type TxPoolConfig struct {
   174  	Locals    []common.Address // Addresses that should be treated by default as local
   175  	NoLocals  bool             // Whether local transaction handling should be disabled
   176  	Journal   string           // Journal of local transactions to survive node restarts
   177  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   178  
   179  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   180  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   181  
   182  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   183  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   184  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   185  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   186  
   187  	Lifetime            time.Duration // Maximum amount of time non-executable transaction are queued
   188  	AllowUnprotectedTxs bool          // Allow non-EIP-155 transactions
   189  }
   190  
   191  // DefaultTxPoolConfig contains the default configurations for the transaction
   192  // pool.
   193  var DefaultTxPoolConfig = TxPoolConfig{
   194  	Journal:   "transactions.rlp",
   195  	Rejournal: time.Hour,
   196  
   197  	PriceLimit: 1,
   198  	PriceBump:  10,
   199  
   200  	AccountSlots: 16,
   201  	GlobalSlots:  4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
   202  	AccountQueue: 64,
   203  	GlobalQueue:  1024,
   204  
   205  	Lifetime:            3 * time.Hour,
   206  	AllowUnprotectedTxs: false,
   207  }
   208  
   209  // sanitize checks the provided user configurations and changes anything that's
   210  // unreasonable or unworkable.
   211  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   212  	conf := *config
   213  	if conf.Rejournal < time.Second {
   214  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   215  		conf.Rejournal = time.Second
   216  	}
   217  	if conf.PriceLimit < 1 {
   218  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   219  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   220  	}
   221  	if conf.PriceBump < 1 {
   222  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   223  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   224  	}
   225  	if conf.AccountSlots < 1 {
   226  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   227  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   228  	}
   229  	if conf.GlobalSlots < 1 {
   230  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   231  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   232  	}
   233  	if conf.AccountQueue < 1 {
   234  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   235  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   236  	}
   237  	if conf.GlobalQueue < 1 {
   238  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   239  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   240  	}
   241  	if conf.Lifetime < 1 {
   242  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   243  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   244  	}
   245  	return conf
   246  }
   247  
   248  // TxPool contains all currently known transactions. Transactions
   249  // enter the pool when they are received from the network or submitted
   250  // locally. They exit the pool when they are included in the blockchain.
   251  //
   252  // The pool separates processable transactions (which can be applied to the
   253  // current state) and future transactions. Transactions move between those
   254  // two states over time as they are received and processed.
   255  type TxPool struct {
   256  	config       TxPoolConfig
   257  	chainconfig  *params.ChainConfig
   258  	chain        blockChain
   259  	gasPrice     *big.Int
   260  	gasPriceUint *uint256.Int
   261  	gasPriceMu   sync.RWMutex
   262  
   263  	txFeed event.Feed
   264  	scope  event.SubscriptionScope
   265  	signer types.Signer
   266  	mu     sync.RWMutex
   267  
   268  	istanbul bool // Fork indicator whether we are in the istanbul stage.
   269  	eip2718  bool // Fork indicator whether we are using EIP-2718 type transactions.
   270  	eip1559  bool // Fork indicator whether we are using EIP-1559 type transactions.
   271  
   272  	currentState  *state.StateDB // Current state in the blockchain head
   273  	pendingNonces *txNoncer      // Pending state tracking virtual nonces
   274  	currentMaxGas uint64         // Current gas limit for transaction caps
   275  
   276  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   277  	journal *txJournal  // Journal of local transaction to back up to disk
   278  
   279  	pending      map[common.Address]*txList // All currently processable transactions
   280  	pendingCount int
   281  	pendingMu    sync.RWMutex
   282  	queue        map[common.Address]*txList   // Queued but non-processable transactions
   283  	beats        map[common.Address]time.Time // Last heartbeat from each known account
   284  	all          *txLookup                    // All transactions to allow lookups
   285  	priced       *txPricedList                // All transactions sorted by price
   286  
   287  	chainHeadCh     chan ChainHeadEvent
   288  	chainHeadSub    event.Subscription
   289  	reqResetCh      chan *txpoolResetRequest
   290  	reqPromoteCh    chan *accountSet
   291  	queueTxEventCh  chan *types.Transaction
   292  	reorgDoneCh     chan chan struct{}
   293  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   294  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   295  	initDoneCh      chan struct{}  // is closed once the pool is initialized (for tests)
   296  
   297  	changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
   298  
   299  	promoteTxCh chan struct{} // should be used only for tests
   300  }
   301  
   302  type txpoolResetRequest struct {
   303  	oldHead, newHead *types.Header
   304  }
   305  
   306  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   307  // transactions from the network.
   308  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain, options ...func(pool *TxPool)) *TxPool {
   309  	// Sanitize the input to ensure no vulnerable gas prices are set
   310  	config = (&config).sanitize()
   311  
   312  	// Create the transaction pool with its initial settings
   313  	pool := &TxPool{
   314  		config:          config,
   315  		chainconfig:     chainconfig,
   316  		chain:           chain,
   317  		signer:          types.LatestSigner(chainconfig),
   318  		pending:         make(map[common.Address]*txList),
   319  		queue:           make(map[common.Address]*txList),
   320  		beats:           make(map[common.Address]time.Time),
   321  		all:             newTxLookup(),
   322  		chainHeadCh:     make(chan ChainHeadEvent, chainHeadChanSize),
   323  		reqResetCh:      make(chan *txpoolResetRequest),
   324  		reqPromoteCh:    make(chan *accountSet),
   325  		queueTxEventCh:  make(chan *types.Transaction),
   326  		reorgDoneCh:     make(chan chan struct{}),
   327  		reorgShutdownCh: make(chan struct{}),
   328  		initDoneCh:      make(chan struct{}),
   329  		gasPrice:        new(big.Int).SetUint64(config.PriceLimit),
   330  		gasPriceUint:    uint256.NewInt(config.PriceLimit),
   331  	}
   332  
   333  	pool.locals = newAccountSet(pool.signer)
   334  	for _, addr := range config.Locals {
   335  		log.Info("Setting new local account", "address", addr)
   336  		pool.locals.add(addr)
   337  	}
   338  	pool.priced = newTxPricedList(pool.all)
   339  	pool.reset(nil, chain.CurrentBlock().Header())
   340  
   341  	// apply options
   342  	for _, fn := range options {
   343  		fn(pool)
   344  	}
   345  
   346  	// Start the reorg loop early so it can handle requests generated during journal loading.
   347  	pool.wg.Add(1)
   348  	go pool.scheduleReorgLoop()
   349  
   350  	// If local transactions and journaling is enabled, load from disk
   351  	if !config.NoLocals && config.Journal != "" {
   352  		pool.journal = newTxJournal(config.Journal)
   353  
   354  		if err := pool.journal.load(pool.AddLocals); err != nil {
   355  			log.Warn("Failed to load transaction journal", "err", err)
   356  		}
   357  		if err := pool.journal.rotate(pool.local()); err != nil {
   358  			log.Warn("Failed to rotate transaction journal", "err", err)
   359  		}
   360  	}
   361  
   362  	// Subscribe events from blockchain and start the main event loop.
   363  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   364  	pool.wg.Add(1)
   365  	go pool.loop()
   366  
   367  	return pool
   368  }
   369  
   370  // loop is the transaction pool's main event loop, waiting for and reacting to
   371  // outside blockchain events as well as for various reporting and transaction
   372  // eviction events.
   373  func (pool *TxPool) loop() {
   374  	defer pool.wg.Done()
   375  
   376  	var (
   377  		prevPending, prevQueued, prevStales int
   378  		// Start the stats reporting and transaction eviction tickers
   379  		report  = time.NewTicker(statsReportInterval)
   380  		evict   = time.NewTicker(evictionInterval)
   381  		journal = time.NewTicker(pool.config.Rejournal)
   382  		// Track the previous head headers for transaction reorgs
   383  		head = pool.chain.CurrentBlock()
   384  	)
   385  	defer report.Stop()
   386  	defer evict.Stop()
   387  	defer journal.Stop()
   388  
   389  	// Notify tests that the init phase is done
   390  	close(pool.initDoneCh)
   391  	for {
   392  		select {
   393  		// Handle ChainHeadEvent
   394  		case ev := <-pool.chainHeadCh:
   395  			if ev.Block != nil {
   396  				pool.requestReset(head.Header(), ev.Block.Header())
   397  				head = ev.Block
   398  			}
   399  
   400  		// System shutdown.
   401  		case <-pool.chainHeadSub.Err():
   402  			close(pool.reorgShutdownCh)
   403  			return
   404  
   405  		// Handle stats reporting ticks
   406  		case <-report.C:
   407  			pending, queued := pool.stats()
   408  			stales := int(atomic.LoadInt64(&pool.priced.stales))
   409  
   410  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   411  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   412  				prevPending, prevQueued, prevStales = pending, queued, stales
   413  			}
   414  
   415  		// Handle inactive account transaction eviction
   416  		case <-evict.C:
   417  			now := time.Now()
   418  
   419  			var (
   420  				list     types.Transactions
   421  				tx       *types.Transaction
   422  				toRemove []common.Hash
   423  			)
   424  
   425  			pool.mu.RLock()
   426  			for addr := range pool.queue {
   427  				// Skip local transactions from the eviction mechanism
   428  				if pool.locals.contains(addr) {
   429  					continue
   430  				}
   431  
   432  				// Any non-locals old enough should be removed
   433  				if now.Sub(pool.beats[addr]) > pool.config.Lifetime {
   434  					list = pool.queue[addr].Flatten()
   435  					for _, tx = range list {
   436  						toRemove = append(toRemove, tx.Hash())
   437  					}
   438  
   439  					queuedEvictionMeter.Mark(int64(len(list)))
   440  				}
   441  			}
   442  
   443  			pool.mu.RUnlock()
   444  
   445  			if len(toRemove) > 0 {
   446  				pool.mu.Lock()
   447  
   448  				var hash common.Hash
   449  
   450  				for _, hash = range toRemove {
   451  					pool.removeTx(hash, true)
   452  				}
   453  
   454  				pool.mu.Unlock()
   455  			}
   456  
   457  		// Handle local transaction journal rotation
   458  		case <-journal.C:
   459  			if pool.journal != nil {
   460  				pool.mu.Lock()
   461  				if err := pool.journal.rotate(pool.local()); err != nil {
   462  					log.Warn("Failed to rotate local tx journal", "err", err)
   463  				}
   464  				pool.mu.Unlock()
   465  			}
   466  		}
   467  	}
   468  }
   469  
   470  // Stop terminates the transaction pool.
   471  func (pool *TxPool) Stop() {
   472  	// Unsubscribe all subscriptions registered from txpool
   473  	pool.scope.Close()
   474  
   475  	// Unsubscribe subscriptions registered from blockchain
   476  	pool.chainHeadSub.Unsubscribe()
   477  	pool.wg.Wait()
   478  
   479  	if pool.journal != nil {
   480  		pool.journal.close()
   481  	}
   482  	log.Info("Transaction pool stopped")
   483  }
   484  
   485  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   486  // starts sending event to the given channel.
   487  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   488  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   489  }
   490  
   491  // GasPrice returns the current gas price enforced by the transaction pool.
   492  func (pool *TxPool) GasPrice() *big.Int {
   493  	pool.gasPriceMu.RLock()
   494  	defer pool.gasPriceMu.RUnlock()
   495  
   496  	return new(big.Int).Set(pool.gasPrice)
   497  }
   498  
   499  func (pool *TxPool) GasPriceUint256() *uint256.Int {
   500  	pool.gasPriceMu.RLock()
   501  	defer pool.gasPriceMu.RUnlock()
   502  
   503  	return pool.gasPriceUint.Clone()
   504  }
   505  
   506  // SetGasPrice updates the minimum price required by the transaction pool for a
   507  // new transaction, and drops all transactions below this threshold.
   508  func (pool *TxPool) SetGasPrice(price *big.Int) {
   509  	pool.gasPriceMu.Lock()
   510  	defer pool.gasPriceMu.Unlock()
   511  
   512  	old := pool.gasPrice
   513  	pool.gasPrice = price
   514  
   515  	if pool.gasPriceUint == nil {
   516  		pool.gasPriceUint, _ = uint256.FromBig(price)
   517  	} else {
   518  		pool.gasPriceUint.SetFromBig(price)
   519  	}
   520  
   521  	// if the min miner fee increased, remove transactions below the new threshold
   522  	if price.Cmp(old) > 0 {
   523  		pool.mu.Lock()
   524  		defer pool.mu.Unlock()
   525  
   526  		// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
   527  		drop := pool.all.RemotesBelowTip(price)
   528  		for _, tx := range drop {
   529  			pool.removeTx(tx.Hash(), false)
   530  		}
   531  
   532  		pool.priced.Removed(len(drop))
   533  	}
   534  
   535  	log.Info("Transaction pool price threshold updated", "price", price)
   536  }
   537  
   538  // Nonce returns the next nonce of an account, with all transactions executable
   539  // by the pool already applied on top.
   540  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   541  	pool.mu.RLock()
   542  	defer pool.mu.RUnlock()
   543  
   544  	return pool.pendingNonces.get(addr)
   545  }
   546  
   547  // Stats retrieves the current pool stats, namely the number of pending and the
   548  // number of queued (non-executable) transactions.
   549  func (pool *TxPool) Stats() (int, int) {
   550  	return pool.stats()
   551  }
   552  
   553  // stats retrieves the current pool stats, namely the number of pending and the
   554  // number of queued (non-executable) transactions.
   555  func (pool *TxPool) stats() (int, int) {
   556  	pending := 0
   557  
   558  	pool.pendingMu.RLock()
   559  	for _, list := range pool.pending {
   560  		pending += list.Len()
   561  	}
   562  	pool.pendingMu.RUnlock()
   563  
   564  	pool.mu.RLock()
   565  
   566  	queued := 0
   567  	for _, list := range pool.queue {
   568  		queued += list.Len()
   569  	}
   570  
   571  	pool.mu.RUnlock()
   572  
   573  	return pending, queued
   574  }
   575  
   576  // Content retrieves the data content of the transaction pool, returning all the
   577  // pending as well as queued transactions, grouped by account and sorted by nonce.
   578  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   579  	pending := make(map[common.Address]types.Transactions)
   580  
   581  	pool.pendingMu.RLock()
   582  	for addr, list := range pool.pending {
   583  		pending[addr] = list.Flatten()
   584  	}
   585  	pool.pendingMu.RUnlock()
   586  
   587  	queued := make(map[common.Address]types.Transactions)
   588  
   589  	pool.mu.RLock()
   590  
   591  	for addr, list := range pool.queue {
   592  		queued[addr] = list.Flatten()
   593  	}
   594  
   595  	pool.mu.RUnlock()
   596  
   597  	return pending, queued
   598  }
   599  
   600  // ContentFrom retrieves the data content of the transaction pool, returning the
   601  // pending as well as queued transactions of this address, grouped by nonce.
   602  func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
   603  	var pending types.Transactions
   604  
   605  	pool.pendingMu.RLock()
   606  	if list, ok := pool.pending[addr]; ok {
   607  		pending = list.Flatten()
   608  	}
   609  	pool.pendingMu.RUnlock()
   610  
   611  	pool.mu.RLock()
   612  
   613  	var queued types.Transactions
   614  	if list, ok := pool.queue[addr]; ok {
   615  		queued = list.Flatten()
   616  	}
   617  
   618  	pool.mu.RUnlock()
   619  
   620  	return pending, queued
   621  }
   622  
   623  // Pending retrieves all currently processable transactions, grouped by origin
   624  // account and sorted by nonce. The returned transaction set is a copy and can be
   625  // freely modified by calling code.
   626  //
   627  // The enforceTips parameter can be used to do an extra filtering on the pending
   628  // transactions and only return those whose **effective** tip is large enough in
   629  // the next pending execution environment.
   630  //
   631  //nolint:gocognit
   632  func (pool *TxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions {
   633  	pending := make(map[common.Address]types.Transactions, 10)
   634  
   635  	tracing.Exec(ctx, "TxpoolPending", "txpool.Pending()", func(ctx context.Context, span trace.Span) {
   636  		tracing.ElapsedTime(ctx, span, "txpool.Pending.RLock()", func(ctx context.Context, s trace.Span) {
   637  			pool.pendingMu.RLock()
   638  		})
   639  
   640  		defer pool.pendingMu.RUnlock()
   641  
   642  		pendingAccounts := len(pool.pending)
   643  
   644  		var pendingTxs int
   645  
   646  		tracing.ElapsedTime(ctx, span, "Loop", func(ctx context.Context, s trace.Span) {
   647  			gasPriceUint := uint256.NewInt(0)
   648  			baseFee := uint256.NewInt(0)
   649  
   650  			for addr, list := range pool.pending {
   651  				txs := list.Flatten()
   652  
   653  				// If the miner requests tip enforcement, cap the lists now
   654  				if enforceTips && !pool.locals.contains(addr) {
   655  					for i, tx := range txs {
   656  						pool.pendingMu.RUnlock()
   657  
   658  						pool.gasPriceMu.RLock()
   659  						if pool.gasPriceUint != nil {
   660  							gasPriceUint.Set(pool.gasPriceUint)
   661  						}
   662  
   663  						pool.priced.urgent.baseFeeMu.Lock()
   664  						if pool.priced.urgent.baseFee != nil {
   665  							baseFee.Set(pool.priced.urgent.baseFee)
   666  						}
   667  						pool.priced.urgent.baseFeeMu.Unlock()
   668  
   669  						pool.gasPriceMu.RUnlock()
   670  
   671  						pool.pendingMu.RLock()
   672  
   673  						if tx.EffectiveGasTipUintLt(gasPriceUint, baseFee) {
   674  							txs = txs[:i]
   675  							break
   676  						}
   677  					}
   678  				}
   679  
   680  				if len(txs) > 0 {
   681  					pending[addr] = txs
   682  					pendingTxs += len(txs)
   683  				}
   684  			}
   685  
   686  			tracing.SetAttributes(span,
   687  				attribute.Int("pending-transactions", pendingTxs),
   688  				attribute.Int("pending-accounts", pendingAccounts),
   689  			)
   690  		})
   691  	})
   692  
   693  	return pending
   694  }
   695  
   696  // Locals retrieves the accounts currently considered local by the pool.
   697  func (pool *TxPool) Locals() []common.Address {
   698  	return pool.locals.flatten()
   699  }
   700  
   701  // local retrieves all currently known local transactions, grouped by origin
   702  // account and sorted by nonce. The returned transaction set is a copy and can be
   703  // freely modified by calling code.
   704  func (pool *TxPool) local() map[common.Address]types.Transactions {
   705  	txs := make(map[common.Address]types.Transactions)
   706  
   707  	pool.locals.m.RLock()
   708  	defer pool.locals.m.RUnlock()
   709  
   710  	for addr := range pool.locals.accounts {
   711  		pool.pendingMu.RLock()
   712  		if pending := pool.pending[addr]; pending != nil {
   713  			txs[addr] = append(txs[addr], pending.Flatten()...)
   714  		}
   715  		pool.pendingMu.RUnlock()
   716  
   717  		if queued := pool.queue[addr]; queued != nil {
   718  			txs[addr] = append(txs[addr], queued.Flatten()...)
   719  		}
   720  	}
   721  
   722  	return txs
   723  }
   724  
   725  // validateTx checks whether a transaction is valid according to the consensus
   726  // rules and adheres to some heuristic limits of the local node (price and size).
   727  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   728  	// Accept only legacy transactions until EIP-2718/2930 activates.
   729  	if !pool.eip2718 && tx.Type() != types.LegacyTxType {
   730  		return ErrTxTypeNotSupported
   731  	}
   732  
   733  	// Reject dynamic fee transactions until EIP-1559 activates.
   734  	if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
   735  		return ErrTxTypeNotSupported
   736  	}
   737  
   738  	// Reject transactions over defined size to prevent DOS attacks
   739  	if uint64(tx.Size()) > txMaxSize {
   740  		return ErrOversizedData
   741  	}
   742  	// Check whether the init code size has been exceeded.
   743  	// (TODO): Add a hardfork check here while pulling upstream changes.
   744  	if tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
   745  		return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
   746  	}
   747  	// Transactions can't be negative. This may never happen using RLP decoded
   748  	// transactions but may occur if you create a transaction using the RPC.
   749  	if tx.Value().Sign() < 0 {
   750  		return ErrNegativeValue
   751  	}
   752  
   753  	// Ensure the transaction doesn't exceed the current block limit gas.
   754  	if pool.currentMaxGas < tx.Gas() {
   755  		return ErrGasLimit
   756  	}
   757  
   758  	// Sanity check for extremely large numbers
   759  	gasFeeCap := tx.GasFeeCapRef()
   760  	if gasFeeCap.BitLen() > 256 {
   761  		return ErrFeeCapVeryHigh
   762  	}
   763  
   764  	// do NOT use uint256 here. results vs *big.Int are different
   765  	gasTipCap := tx.GasTipCapRef()
   766  	if gasTipCap.BitLen() > 256 {
   767  		return ErrTipVeryHigh
   768  	}
   769  
   770  	// Ensure gasFeeCap is greater than or equal to gasTipCap.
   771  	gasTipCapU, _ := uint256.FromBig(gasTipCap)
   772  	if tx.GasFeeCapUIntLt(gasTipCapU) {
   773  		return ErrTipAboveFeeCap
   774  	}
   775  
   776  	// Make sure the transaction is signed properly.
   777  	from, err := types.Sender(pool.signer, tx)
   778  	if err != nil && !pool.config.AllowUnprotectedTxs {
   779  		return ErrInvalidSender
   780  	}
   781  
   782  	// Drop non-local transactions under our own minimal accepted gas price or tip
   783  	pool.gasPriceMu.RLock()
   784  
   785  	if !local && tx.GasTipCapUIntLt(pool.gasPriceUint) {
   786  		pool.gasPriceMu.RUnlock()
   787  
   788  		return ErrUnderpriced
   789  	}
   790  
   791  	pool.gasPriceMu.RUnlock()
   792  
   793  	// Ensure the transaction adheres to nonce ordering
   794  	if pool.currentState.GetNonce(from) > tx.Nonce() {
   795  		return ErrNonceTooLow
   796  	}
   797  
   798  	// Transactor should have enough funds to cover the costs
   799  	// cost == V + GP * GL
   800  	balance := pool.currentState.GetBalance(from)
   801  	if balance.Cmp(tx.Cost()) < 0 {
   802  		return ErrInsufficientFunds
   803  	}
   804  	// Verify that replacing transactions will not result in overdraft
   805  	list := pool.pending[from]
   806  	if list != nil { // Sender already has pending txs
   807  		sum := new(big.Int).Add(tx.Cost(), list.totalcost)
   808  		if repl := list.txs.Get(tx.Nonce()); repl != nil {
   809  			// Deduct the cost of a transaction replaced by this
   810  			sum.Sub(sum, repl.Cost())
   811  		}
   812  
   813  		if balance.Cmp(sum) < 0 {
   814  			log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum)
   815  			return ErrOverdraft
   816  		}
   817  	}
   818  	// Ensure the transaction has more gas than the basic tx fee.
   819  	intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
   820  	if err != nil {
   821  		return err
   822  	}
   823  
   824  	if tx.Gas() < intrGas {
   825  		return ErrIntrinsicGas
   826  	}
   827  
   828  	return nil
   829  }
   830  
   831  // add validates a transaction and inserts it into the non-executable queue for later
   832  // pending promotion and execution. If the transaction is a replacement for an already
   833  // pending or queued one, it overwrites the previous transaction if its price is higher.
   834  //
   835  // If a newly added transaction is marked as local, its sending account will be
   836  // be added to the allowlist, preventing any associated transaction from being dropped
   837  // out of the pool due to pricing constraints.
   838  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   839  	// If the transaction is already known, discard it
   840  	hash := tx.Hash()
   841  	if pool.all.Get(hash) != nil {
   842  		log.Trace("Discarding already known transaction", "hash", hash)
   843  		knownTxMeter.Mark(1)
   844  		return false, ErrAlreadyKnown
   845  	}
   846  	// Make the local flag. If it's from local source or it's from the network but
   847  	// the sender is marked as local previously, treat it as the local transaction.
   848  	isLocal := local || pool.locals.containsTx(tx)
   849  
   850  	// If the transaction fails basic validation, discard it
   851  	if err := pool.validateTx(tx, isLocal); err != nil {
   852  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   853  		invalidTxMeter.Mark(1)
   854  		return false, err
   855  	}
   856  
   857  	// already validated by this point
   858  	from, _ := types.Sender(pool.signer, tx)
   859  
   860  	// If the transaction pool is full, discard underpriced transactions
   861  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   862  		// If the new transaction is underpriced, don't accept it
   863  		if !isLocal && pool.priced.Underpriced(tx) {
   864  			log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint())
   865  			underpricedTxMeter.Mark(1)
   866  			return false, ErrUnderpriced
   867  		}
   868  
   869  		// We're about to replace a transaction. The reorg does a more thorough
   870  		// analysis of what to remove and how, but it runs async. We don't want to
   871  		// do too many replacements between reorg-runs, so we cap the number of
   872  		// replacements to 25% of the slots
   873  		if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
   874  			throttleTxMeter.Mark(1)
   875  			return false, ErrTxPoolOverflow
   876  		}
   877  
   878  		// New transaction is better than our worse ones, make room for it.
   879  		// If it's a local transaction, forcibly discard all available transactions.
   880  		// Otherwise if we can't make enough room for new one, abort the operation.
   881  		drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
   882  
   883  		// Special case, we still can't make the room for the new remote one.
   884  		if !isLocal && !success {
   885  			log.Trace("Discarding overflown transaction", "hash", hash)
   886  			overflowedTxMeter.Mark(1)
   887  			return false, ErrTxPoolOverflow
   888  		}
   889  		// If the new transaction is a future transaction it should never churn pending transactions
   890  		if pool.isFuture(from, tx) {
   891  			var replacesPending bool
   892  
   893  			for _, dropTx := range drop {
   894  				dropSender, _ := types.Sender(pool.signer, dropTx)
   895  				if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) {
   896  					replacesPending = true
   897  					break
   898  				}
   899  			}
   900  			// Add all transactions back to the priced queue
   901  			if replacesPending {
   902  				for _, dropTx := range drop {
   903  					heap.Push(&pool.priced.urgent, dropTx)
   904  				}
   905  
   906  				log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
   907  
   908  				return false, ErrFutureReplacePending
   909  			}
   910  		}
   911  		// Kick out the underpriced remote transactions.
   912  		for _, tx := range drop {
   913  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint())
   914  			underpricedTxMeter.Mark(1)
   915  
   916  			dropped := pool.removeTx(tx.Hash(), false)
   917  			pool.changesSinceReorg += dropped
   918  		}
   919  	}
   920  
   921  	// Try to replace an existing transaction in the pending pool
   922  	pool.pendingMu.RLock()
   923  
   924  	list := pool.pending[from]
   925  
   926  	if list != nil && list.Overlaps(tx) {
   927  		// Nonce already pending, check if required price bump is met
   928  		inserted, old := list.Add(tx, pool.config.PriceBump)
   929  		pool.pendingCount++
   930  		pool.pendingMu.RUnlock()
   931  
   932  		if !inserted {
   933  			pendingDiscardMeter.Mark(1)
   934  			return false, ErrReplaceUnderpriced
   935  		}
   936  
   937  		// New transaction is better, replace old one
   938  		if old != nil {
   939  			pool.all.Remove(old.Hash())
   940  			pool.priced.Removed(1)
   941  			pendingReplaceMeter.Mark(1)
   942  		}
   943  
   944  		pool.all.Add(tx, isLocal)
   945  		pool.priced.Put(tx, isLocal)
   946  		pool.journalTx(from, tx)
   947  		pool.queueTxEvent(tx)
   948  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   949  
   950  		// Successful promotion, bump the heartbeat
   951  		pool.beats[from] = time.Now()
   952  
   953  		return old != nil, nil
   954  	}
   955  
   956  	// it is not an unlocking of unlocked because of the return in previous 'if'
   957  	pool.pendingMu.RUnlock()
   958  
   959  	// New transaction isn't replacing a pending one, push into queue
   960  	replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
   961  	if err != nil {
   962  		return false, err
   963  	}
   964  	// Mark local addresses and journal local transactions
   965  	if local && !pool.locals.contains(from) {
   966  		log.Info("Setting new local account", "address", from)
   967  		pool.locals.add(from)
   968  		pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
   969  	}
   970  	if isLocal {
   971  		localGauge.Inc(1)
   972  	}
   973  	pool.journalTx(from, tx)
   974  
   975  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   976  	return replaced, nil
   977  }
   978  
   979  // isFuture reports whether the given transaction is immediately executable.
   980  func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool {
   981  	list := pool.pending[from]
   982  	if list == nil {
   983  		return pool.pendingNonces.get(from) != tx.Nonce()
   984  	}
   985  	// Sender has pending transactions.
   986  	if old := list.txs.Get(tx.Nonce()); old != nil {
   987  		return false // It replaces a pending transaction.
   988  	}
   989  	// Not replacing, check if parent nonce exists in pending.
   990  	return list.txs.Get(tx.Nonce()-1) == nil
   991  }
   992  
   993  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   994  //
   995  // Note, this method assumes the pool lock is held!
   996  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
   997  	// Try to insert the transaction into the future queue
   998  	from, _ := types.Sender(pool.signer, tx) // already validated
   999  	if pool.queue[from] == nil {
  1000  		pool.queue[from] = newTxList(false)
  1001  	}
  1002  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
  1003  	if !inserted {
  1004  		// An older transaction was better, discard this
  1005  		queuedDiscardMeter.Mark(1)
  1006  		return false, ErrReplaceUnderpriced
  1007  	}
  1008  	// Discard any previous transaction and mark this
  1009  	if old != nil {
  1010  		pool.all.Remove(old.Hash())
  1011  		pool.priced.Removed(1)
  1012  		queuedReplaceMeter.Mark(1)
  1013  	} else {
  1014  		// Nothing was replaced, bump the queued counter
  1015  		queuedGauge.Inc(1)
  1016  	}
  1017  	// If the transaction isn't in lookup set but it's expected to be there,
  1018  	// show the error log.
  1019  	if pool.all.Get(hash) == nil && !addAll {
  1020  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
  1021  	}
  1022  	if addAll {
  1023  		pool.all.Add(tx, local)
  1024  		pool.priced.Put(tx, local)
  1025  	}
  1026  	// If we never record the heartbeat, do it right now.
  1027  	if _, exist := pool.beats[from]; !exist {
  1028  		pool.beats[from] = time.Now()
  1029  	}
  1030  	return old != nil, nil
  1031  }
  1032  
  1033  // journalTx adds the specified transaction to the local disk journal if it is
  1034  // deemed to have been sent from a local account.
  1035  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
  1036  	// Only journal if it's enabled and the transaction is local
  1037  	if pool.journal == nil || !pool.locals.contains(from) {
  1038  		return
  1039  	}
  1040  	if err := pool.journal.insert(tx); err != nil {
  1041  		log.Warn("Failed to journal local transaction", "err", err)
  1042  	}
  1043  }
  1044  
  1045  // promoteTx adds a transaction to the pending (processable) list of transactions
  1046  // and returns whether it was inserted or an older was better.
  1047  //
  1048  // Note, this method assumes the pool lock is held!
  1049  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
  1050  	defer func() {
  1051  		if pool.promoteTxCh == nil {
  1052  			return
  1053  		}
  1054  
  1055  		select {
  1056  		case pool.promoteTxCh <- struct{}{}:
  1057  		default:
  1058  		}
  1059  	}()
  1060  
  1061  	// Try to insert the transaction into the pending queue
  1062  	pool.pendingMu.Lock()
  1063  	if pool.pending[addr] == nil {
  1064  		pool.pending[addr] = newTxList(true)
  1065  	}
  1066  	list := pool.pending[addr]
  1067  
  1068  	inserted, old := list.Add(tx, pool.config.PriceBump)
  1069  	pool.pendingCount++
  1070  	pool.pendingMu.Unlock()
  1071  
  1072  	if !inserted {
  1073  		// An older transaction was better, discard this
  1074  		pool.all.Remove(hash)
  1075  		pool.priced.Removed(1)
  1076  		pendingDiscardMeter.Mark(1)
  1077  
  1078  		return false
  1079  	}
  1080  
  1081  	// Otherwise discard any previous transaction and mark this
  1082  	if old != nil {
  1083  		pool.all.Remove(old.Hash())
  1084  		pool.priced.Removed(1)
  1085  		pendingReplaceMeter.Mark(1)
  1086  	} else {
  1087  		// Nothing was replaced, bump the pending counter
  1088  		pendingGauge.Inc(1)
  1089  	}
  1090  
  1091  	// Set the potentially new pending nonce and notify any subsystems of the new tx
  1092  	pool.pendingNonces.set(addr, tx.Nonce()+1)
  1093  
  1094  	// Successful promotion, bump the heartbeat
  1095  	pool.beats[addr] = time.Now()
  1096  
  1097  	return true
  1098  }
  1099  
  1100  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
  1101  // senders as a local ones, ensuring they go around the local pricing constraints.
  1102  //
  1103  // This method is used to add transactions from the RPC API and performs synchronous pool
  1104  // reorganization and event propagation.
  1105  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
  1106  	return pool.addTxs(txs, !pool.config.NoLocals, true)
  1107  }
  1108  
  1109  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
  1110  // a convenience wrapper aroundd AddLocals.
  1111  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
  1112  	return pool.addTx(tx, !pool.config.NoLocals, true)
  1113  }
  1114  
  1115  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
  1116  // senders are not among the locally tracked ones, full pricing constraints will apply.
  1117  //
  1118  // This method is used to add transactions from the p2p network and does not wait for pool
  1119  // reorganization and internal event propagation.
  1120  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
  1121  	return pool.addTxs(txs, false, false)
  1122  }
  1123  
  1124  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
  1125  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
  1126  	return pool.addTxs(txs, false, true)
  1127  }
  1128  
  1129  func (pool *TxPool) AddRemoteSync(txs *types.Transaction) error {
  1130  	return pool.addTx(txs, false, true)
  1131  }
  1132  
  1133  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
  1134  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
  1135  	return pool.AddRemoteSync(tx)
  1136  }
  1137  
  1138  // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
  1139  // wrapper around AddRemotes.
  1140  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
  1141  	return pool.addTx(tx, false, false)
  1142  }
  1143  
  1144  // addTxs attempts to queue a batch of transactions if they are valid.
  1145  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
  1146  	// Filter out known ones without obtaining the pool lock or recovering signatures
  1147  	var (
  1148  		errs []error
  1149  		news = make([]*types.Transaction, 0, len(txs))
  1150  		err  error
  1151  
  1152  		hash common.Hash
  1153  	)
  1154  
  1155  	for _, tx := range txs {
  1156  		// If the transaction is known, pre-set the error slot
  1157  		hash = tx.Hash()
  1158  
  1159  		if pool.all.Get(hash) != nil {
  1160  			errs = append(errs, ErrAlreadyKnown)
  1161  			knownTxMeter.Mark(1)
  1162  
  1163  			continue
  1164  		}
  1165  
  1166  		if pool.config.AllowUnprotectedTxs {
  1167  			pool.signer = types.NewFakeSigner(tx.ChainId())
  1168  		}
  1169  
  1170  		// Exclude transactions with invalid signatures as soon as
  1171  		// possible and cache senders in transactions before
  1172  		// obtaining lock
  1173  		_, err = types.Sender(pool.signer, tx)
  1174  		if err != nil {
  1175  			errs = append(errs, ErrInvalidSender)
  1176  			invalidTxMeter.Mark(1)
  1177  
  1178  			continue
  1179  		}
  1180  
  1181  		// Accumulate all unknown transactions for deeper processing
  1182  		news = append(news, tx)
  1183  	}
  1184  
  1185  	if len(news) == 0 {
  1186  		return errs
  1187  	}
  1188  
  1189  	// Process all the new transaction and merge any errors into the original slice
  1190  	pool.mu.Lock()
  1191  	errs, dirtyAddrs := pool.addTxsLocked(news, local)
  1192  	pool.mu.Unlock()
  1193  
  1194  	// Reorg the pool internals if needed and return
  1195  	done := pool.requestPromoteExecutables(dirtyAddrs)
  1196  	if sync {
  1197  		<-done
  1198  	}
  1199  
  1200  	return errs
  1201  }
  1202  
  1203  // addTxs attempts to queue a batch of transactions if they are valid.
  1204  func (pool *TxPool) addTx(tx *types.Transaction, local, sync bool) error {
  1205  	// Filter out known ones without obtaining the pool lock or recovering signatures
  1206  	var (
  1207  		err  error
  1208  		hash common.Hash
  1209  	)
  1210  
  1211  	func() {
  1212  		// If the transaction is known, pre-set the error slot
  1213  		hash = tx.Hash()
  1214  
  1215  		if pool.all.Get(hash) != nil {
  1216  			err = ErrAlreadyKnown
  1217  
  1218  			knownTxMeter.Mark(1)
  1219  
  1220  			return
  1221  		}
  1222  
  1223  		// Exclude transactions with invalid signatures as soon as
  1224  		// possible and cache senders in transactions before
  1225  		// obtaining lock
  1226  		if pool.config.AllowUnprotectedTxs {
  1227  			pool.signer = types.NewFakeSigner(tx.ChainId())
  1228  		}
  1229  
  1230  		_, err = types.Sender(pool.signer, tx)
  1231  		if err != nil {
  1232  			invalidTxMeter.Mark(1)
  1233  
  1234  			return
  1235  		}
  1236  	}()
  1237  
  1238  	if err != nil {
  1239  		return err
  1240  	}
  1241  
  1242  	var dirtyAddrs *accountSet
  1243  
  1244  	// Process all the new transaction and merge any errors into the original slice
  1245  	pool.mu.Lock()
  1246  	err, dirtyAddrs = pool.addTxLocked(tx, local)
  1247  	pool.mu.Unlock()
  1248  
  1249  	// Reorg the pool internals if needed and return
  1250  	done := pool.requestPromoteExecutables(dirtyAddrs)
  1251  	if sync {
  1252  		<-done
  1253  	}
  1254  
  1255  	return err
  1256  }
  1257  
  1258  // addTxsLocked attempts to queue a batch of transactions if they are valid.
  1259  // The transaction pool lock must be held.
  1260  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
  1261  	dirty := newAccountSet(pool.signer)
  1262  
  1263  	var (
  1264  		replaced bool
  1265  		errs     []error
  1266  	)
  1267  
  1268  	for _, tx := range txs {
  1269  		var err error
  1270  
  1271  		replaced, err = pool.add(tx, local)
  1272  		if err == nil && !replaced {
  1273  			dirty.addTx(tx)
  1274  		}
  1275  
  1276  		if err != nil {
  1277  			errs = append(errs, err)
  1278  		}
  1279  	}
  1280  
  1281  	validTxMeter.Mark(int64(len(dirty.accounts)))
  1282  
  1283  	return errs, dirty
  1284  }
  1285  
  1286  func (pool *TxPool) addTxLocked(tx *types.Transaction, local bool) (error, *accountSet) {
  1287  	dirty := newAccountSet(pool.signer)
  1288  
  1289  	var (
  1290  		replaced bool
  1291  		err      error
  1292  	)
  1293  
  1294  	replaced, err = pool.add(tx, local)
  1295  	if err == nil && !replaced {
  1296  		dirty.addTx(tx)
  1297  	}
  1298  
  1299  	validTxMeter.Mark(int64(len(dirty.accounts)))
  1300  
  1301  	return err, dirty
  1302  }
  1303  
  1304  // Status returns the status (unknown/pending/queued) of a batch of transactions
  1305  // identified by their hashes.
  1306  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
  1307  	status := make([]TxStatus, len(hashes))
  1308  
  1309  	var (
  1310  		txList    *txList
  1311  		isPending bool
  1312  	)
  1313  
  1314  	for i, hash := range hashes {
  1315  		tx := pool.Get(hash)
  1316  		if tx == nil {
  1317  			continue
  1318  		}
  1319  
  1320  		from, _ := types.Sender(pool.signer, tx) // already validated
  1321  
  1322  		pool.pendingMu.RLock()
  1323  
  1324  		if txList = pool.pending[from]; txList != nil && txList.txs.Has(tx.Nonce()) {
  1325  			status[i] = TxStatusPending
  1326  			isPending = true
  1327  		} else {
  1328  			isPending = false
  1329  		}
  1330  
  1331  		pool.pendingMu.RUnlock()
  1332  
  1333  		if !isPending {
  1334  			pool.mu.RLock()
  1335  
  1336  			if txList := pool.queue[from]; txList != nil && txList.txs.Has(tx.Nonce()) {
  1337  				status[i] = TxStatusQueued
  1338  			}
  1339  
  1340  			pool.mu.RUnlock()
  1341  		}
  1342  
  1343  		// implicit else: the tx may have been included into a block between
  1344  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
  1345  	}
  1346  
  1347  	return status
  1348  }
  1349  
  1350  // Get returns a transaction if it is contained in the pool and nil otherwise.
  1351  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
  1352  	return pool.all.Get(hash)
  1353  }
  1354  
  1355  // Has returns an indicator whether txpool has a transaction cached with the
  1356  // given hash.
  1357  func (pool *TxPool) Has(hash common.Hash) bool {
  1358  	return pool.all.Get(hash) != nil
  1359  }
  1360  
  1361  // removeTx removes a single transaction from the queue, moving all subsequent
  1362  // transactions back to the future queue.
  1363  // Returns the number of transactions removed from the pending queue.
  1364  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int {
  1365  	// Fetch the transaction we wish to delete
  1366  	tx := pool.all.Get(hash)
  1367  	if tx == nil {
  1368  		return 0
  1369  	}
  1370  
  1371  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1372  
  1373  	// Remove it from the list of known transactions
  1374  	pool.all.Remove(hash)
  1375  	if outofbound {
  1376  		pool.priced.Removed(1)
  1377  	}
  1378  
  1379  	if pool.locals.contains(addr) {
  1380  		localGauge.Dec(1)
  1381  	}
  1382  
  1383  	// Remove the transaction from the pending lists and reset the account nonce
  1384  	pool.pendingMu.Lock()
  1385  
  1386  	if pending := pool.pending[addr]; pending != nil {
  1387  		if removed, invalids := pending.Remove(tx); removed {
  1388  			pool.pendingCount--
  1389  
  1390  			// If no more pending transactions are left, remove the list
  1391  			if pending.Empty() {
  1392  				delete(pool.pending, addr)
  1393  			}
  1394  
  1395  			pool.pendingMu.Unlock()
  1396  
  1397  			// Postpone any invalidated transactions
  1398  			for _, tx := range invalids {
  1399  				// Internal shuffle shouldn't touch the lookup set.
  1400  				pool.enqueueTx(tx.Hash(), tx, false, false)
  1401  			}
  1402  
  1403  			// Update the account nonce if needed
  1404  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1405  
  1406  			// Reduce the pending counter
  1407  			pendingGauge.Dec(int64(1 + len(invalids)))
  1408  
  1409  			return 1 + len(invalids)
  1410  		}
  1411  
  1412  		pool.pendingMu.TryLock()
  1413  	}
  1414  
  1415  	pool.pendingMu.Unlock()
  1416  
  1417  	// Transaction is in the future queue
  1418  	if future := pool.queue[addr]; future != nil {
  1419  		if removed, _ := future.Remove(tx); removed {
  1420  			// Reduce the queued counter
  1421  			queuedGauge.Dec(1)
  1422  		}
  1423  
  1424  		if future.Empty() {
  1425  			delete(pool.queue, addr)
  1426  			delete(pool.beats, addr)
  1427  		}
  1428  	}
  1429  
  1430  	return 0
  1431  }
  1432  
  1433  // requestReset requests a pool reset to the new head block.
  1434  // The returned channel is closed when the reset has occurred.
  1435  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1436  	select {
  1437  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1438  		return <-pool.reorgDoneCh
  1439  	case <-pool.reorgShutdownCh:
  1440  		return pool.reorgShutdownCh
  1441  	}
  1442  }
  1443  
  1444  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1445  // The returned channel is closed when the promotion checks have occurred.
  1446  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1447  	select {
  1448  	case pool.reqPromoteCh <- set:
  1449  		return <-pool.reorgDoneCh
  1450  	case <-pool.reorgShutdownCh:
  1451  		return pool.reorgShutdownCh
  1452  	}
  1453  }
  1454  
  1455  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1456  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
  1457  	select {
  1458  	case pool.queueTxEventCh <- tx:
  1459  	case <-pool.reorgShutdownCh:
  1460  	}
  1461  }
  1462  
  1463  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1464  // call those methods directly, but request them being run using requestReset and
  1465  // requestPromoteExecutables instead.
  1466  func (pool *TxPool) scheduleReorgLoop() {
  1467  	defer pool.wg.Done()
  1468  
  1469  	var (
  1470  		curDone       chan struct{} // non-nil while runReorg is active
  1471  		nextDone      = make(chan struct{})
  1472  		launchNextRun bool
  1473  		reset         *txpoolResetRequest
  1474  		dirtyAccounts *accountSet
  1475  		queuedEvents  = make(map[common.Address]*txSortedMap)
  1476  	)
  1477  
  1478  	for {
  1479  		// Launch next background reorg if needed
  1480  		if curDone == nil && launchNextRun {
  1481  			ctx := context.Background()
  1482  
  1483  			// Run the background reorg and announcements
  1484  			go pool.runReorg(ctx, nextDone, reset, dirtyAccounts, queuedEvents)
  1485  
  1486  			// Prepare everything for the next round of reorg
  1487  			curDone, nextDone = nextDone, make(chan struct{})
  1488  			launchNextRun = false
  1489  
  1490  			reset, dirtyAccounts = nil, nil
  1491  			queuedEvents = make(map[common.Address]*txSortedMap)
  1492  		}
  1493  
  1494  		select {
  1495  		case req := <-pool.reqResetCh:
  1496  			// Reset request: update head if request is already pending.
  1497  			if reset == nil {
  1498  				reset = req
  1499  			} else {
  1500  				reset.newHead = req.newHead
  1501  			}
  1502  			launchNextRun = true
  1503  			pool.reorgDoneCh <- nextDone
  1504  
  1505  		case req := <-pool.reqPromoteCh:
  1506  			// Promote request: update address set if request is already pending.
  1507  			if dirtyAccounts == nil {
  1508  				dirtyAccounts = req
  1509  			} else {
  1510  				dirtyAccounts.merge(req)
  1511  			}
  1512  			launchNextRun = true
  1513  			pool.reorgDoneCh <- nextDone
  1514  
  1515  		case tx := <-pool.queueTxEventCh:
  1516  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1517  			// request one later if they want the events sent.
  1518  			addr, _ := types.Sender(pool.signer, tx)
  1519  			if _, ok := queuedEvents[addr]; !ok {
  1520  				queuedEvents[addr] = newTxSortedMap()
  1521  			}
  1522  			queuedEvents[addr].Put(tx)
  1523  
  1524  		case <-curDone:
  1525  			curDone = nil
  1526  
  1527  		case <-pool.reorgShutdownCh:
  1528  			// Wait for current run to finish.
  1529  			if curDone != nil {
  1530  				<-curDone
  1531  			}
  1532  			close(nextDone)
  1533  			return
  1534  		}
  1535  	}
  1536  }
  1537  
  1538  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1539  //
  1540  //nolint:gocognit
  1541  func (pool *TxPool) runReorg(ctx context.Context, done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1542  	tracing.Exec(ctx, "TxPoolReorg", "txpool-reorg", func(ctx context.Context, span trace.Span) {
  1543  		defer func(t0 time.Time) {
  1544  			reorgDurationTimer.Update(time.Since(t0))
  1545  		}(time.Now())
  1546  
  1547  		defer close(done)
  1548  
  1549  		var promoteAddrs []common.Address
  1550  
  1551  		tracing.ElapsedTime(ctx, span, "01 dirty accounts flattening", func(_ context.Context, innerSpan trace.Span) {
  1552  			if dirtyAccounts != nil && reset == nil {
  1553  				// Only dirty accounts need to be promoted, unless we're resetting.
  1554  				// For resets, all addresses in the tx queue will be promoted and
  1555  				// the flatten operation can be avoided.
  1556  				promoteAddrs = dirtyAccounts.flatten()
  1557  			}
  1558  
  1559  			tracing.SetAttributes(
  1560  				innerSpan,
  1561  				attribute.Int("promoteAddresses-flatten", len(promoteAddrs)),
  1562  			)
  1563  		})
  1564  
  1565  		tracing.ElapsedTime(ctx, span, "02 obtaining pool.WMutex", func(_ context.Context, _ trace.Span) {
  1566  			pool.mu.Lock()
  1567  		})
  1568  
  1569  		if reset != nil {
  1570  			tracing.ElapsedTime(ctx, span, "03 reset-head reorg", func(_ context.Context, innerSpan trace.Span) {
  1571  
  1572  				// Reset from the old head to the new, rescheduling any reorged transactions
  1573  				tracing.ElapsedTime(ctx, innerSpan, "04 reset-head-itself reorg", func(_ context.Context, innerSpan trace.Span) {
  1574  					pool.reset(reset.oldHead, reset.newHead)
  1575  				})
  1576  
  1577  				tracing.SetAttributes(
  1578  					innerSpan,
  1579  					attribute.Int("events-reset-head", len(events)),
  1580  				)
  1581  
  1582  				// Nonces were reset, discard any events that became stale
  1583  				for addr := range events {
  1584  					events[addr].Forward(pool.pendingNonces.get(addr))
  1585  
  1586  					if events[addr].Len() == 0 {
  1587  						delete(events, addr)
  1588  					}
  1589  				}
  1590  
  1591  				// Reset needs promote for all addresses
  1592  				promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1593  				for addr := range pool.queue {
  1594  					promoteAddrs = append(promoteAddrs, addr)
  1595  				}
  1596  
  1597  				tracing.SetAttributes(
  1598  					innerSpan,
  1599  					attribute.Int("promoteAddresses-reset-head", len(promoteAddrs)),
  1600  				)
  1601  			})
  1602  		}
  1603  
  1604  		// Check for pending transactions for every account that sent new ones
  1605  		var promoted []*types.Transaction
  1606  
  1607  		tracing.ElapsedTime(ctx, span, "05 promoteExecutables", func(_ context.Context, _ trace.Span) {
  1608  			promoted = pool.promoteExecutables(promoteAddrs)
  1609  		})
  1610  
  1611  		tracing.SetAttributes(
  1612  			span,
  1613  			attribute.Int("count.promoteAddresses-reset-head", len(promoteAddrs)),
  1614  			attribute.Int("count.all", pool.all.Count()),
  1615  			attribute.Int("count.pending", len(pool.pending)),
  1616  			attribute.Int("count.queue", len(pool.queue)),
  1617  		)
  1618  
  1619  		// If a new block appeared, validate the pool of pending transactions. This will
  1620  		// remove any transaction that has been included in the block or was invalidated
  1621  		// because of another transaction (e.g. higher gas price).
  1622  
  1623  		//nolint:nestif
  1624  		if reset != nil {
  1625  			tracing.ElapsedTime(ctx, span, "new block", func(_ context.Context, innerSpan trace.Span) {
  1626  
  1627  				tracing.ElapsedTime(ctx, innerSpan, "06 demoteUnexecutables", func(_ context.Context, _ trace.Span) {
  1628  					pool.demoteUnexecutables()
  1629  				})
  1630  
  1631  				var nonces map[common.Address]uint64
  1632  
  1633  				tracing.ElapsedTime(ctx, innerSpan, "07 set_base_fee", func(_ context.Context, _ trace.Span) {
  1634  					if reset.newHead != nil {
  1635  						if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
  1636  							// london fork enabled, reset given the base fee
  1637  							pendingBaseFee := misc.CalcBaseFeeUint(pool.chainconfig, reset.newHead)
  1638  							pool.priced.SetBaseFee(pendingBaseFee)
  1639  						} else {
  1640  							// london fork not enabled, reheap to "reset" the priced list
  1641  							pool.priced.Reheap()
  1642  						}
  1643  					}
  1644  
  1645  					// Update all accounts to the latest known pending nonce
  1646  					nonces = make(map[common.Address]uint64, len(pool.pending))
  1647  				})
  1648  
  1649  				tracing.ElapsedTime(ctx, innerSpan, "08 obtaining pendingMu.RMutex", func(_ context.Context, _ trace.Span) {
  1650  					pool.pendingMu.RLock()
  1651  				})
  1652  
  1653  				var highestPending *types.Transaction
  1654  
  1655  				tracing.ElapsedTime(ctx, innerSpan, "09 fill nonces", func(_ context.Context, innerSpan trace.Span) {
  1656  					for addr, list := range pool.pending {
  1657  						highestPending = list.LastElement()
  1658  						if highestPending != nil {
  1659  							nonces[addr] = highestPending.Nonce() + 1
  1660  						}
  1661  					}
  1662  				})
  1663  
  1664  				pool.pendingMu.RUnlock()
  1665  
  1666  				tracing.ElapsedTime(ctx, innerSpan, "10 reset nonces", func(_ context.Context, _ trace.Span) {
  1667  					pool.pendingNonces.setAll(nonces)
  1668  				})
  1669  			})
  1670  		}
  1671  
  1672  		// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1673  		tracing.ElapsedTime(ctx, span, "11 truncatePending", func(_ context.Context, _ trace.Span) {
  1674  			pool.truncatePending()
  1675  		})
  1676  
  1677  		tracing.ElapsedTime(ctx, span, "12 truncateQueue", func(_ context.Context, _ trace.Span) {
  1678  			pool.truncateQueue()
  1679  		})
  1680  
  1681  		dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
  1682  		pool.changesSinceReorg = 0 // Reset change counter
  1683  
  1684  		pool.mu.Unlock()
  1685  
  1686  		// Notify subsystems for newly added transactions
  1687  		tracing.ElapsedTime(ctx, span, "13 notify about new transactions", func(_ context.Context, _ trace.Span) {
  1688  			for _, tx := range promoted {
  1689  				addr, _ := types.Sender(pool.signer, tx)
  1690  
  1691  				if _, ok := events[addr]; !ok {
  1692  					events[addr] = newTxSortedMap()
  1693  				}
  1694  
  1695  				events[addr].Put(tx)
  1696  			}
  1697  		})
  1698  
  1699  		if len(events) > 0 {
  1700  			tracing.ElapsedTime(ctx, span, "14 txFeed", func(_ context.Context, _ trace.Span) {
  1701  				var txs []*types.Transaction
  1702  
  1703  				for _, set := range events {
  1704  					txs = append(txs, set.Flatten()...)
  1705  				}
  1706  
  1707  				pool.txFeed.Send(NewTxsEvent{txs})
  1708  			})
  1709  		}
  1710  	})
  1711  }
  1712  
  1713  // reset retrieves the current state of the blockchain and ensures the content
  1714  // of the transaction pool is valid with regard to the chain state.
  1715  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1716  	// If we're reorging an old state, reinject all dropped transactions
  1717  	var reinject types.Transactions
  1718  
  1719  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1720  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1721  		oldNum := oldHead.Number.Uint64()
  1722  		newNum := newHead.Number.Uint64()
  1723  
  1724  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1725  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1726  		} else {
  1727  			// Reorg seems shallow enough to pull in all transactions into memory
  1728  			var discarded, included types.Transactions
  1729  			var (
  1730  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1731  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1732  			)
  1733  			if rem == nil {
  1734  				// This can happen if a setHead is performed, where we simply discard the old
  1735  				// head from the chain.
  1736  				// If that is the case, we don't have the lost transactions any more, and
  1737  				// there's nothing to add
  1738  				if newNum >= oldNum {
  1739  					// If we reorged to a same or higher number, then it's not a case of setHead
  1740  					log.Warn("Transaction pool reset with missing oldhead",
  1741  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1742  					return
  1743  				}
  1744  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1745  				log.Debug("Skipping transaction reset caused by setHead",
  1746  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1747  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1748  			} else {
  1749  				for rem.NumberU64() > add.NumberU64() {
  1750  					discarded = append(discarded, rem.Transactions()...)
  1751  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1752  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1753  						return
  1754  					}
  1755  				}
  1756  				for add.NumberU64() > rem.NumberU64() {
  1757  					included = append(included, add.Transactions()...)
  1758  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1759  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1760  						return
  1761  					}
  1762  				}
  1763  				for rem.Hash() != add.Hash() {
  1764  					discarded = append(discarded, rem.Transactions()...)
  1765  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1766  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1767  						return
  1768  					}
  1769  					included = append(included, add.Transactions()...)
  1770  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1771  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1772  						return
  1773  					}
  1774  				}
  1775  				reinject = types.TxDifference(discarded, included)
  1776  			}
  1777  		}
  1778  	}
  1779  	// Initialize the internal state to the current head
  1780  	if newHead == nil {
  1781  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1782  	}
  1783  	statedb, err := pool.chain.StateAt(newHead.Root)
  1784  	if err != nil {
  1785  		log.Error("Failed to reset txpool state", "err", err)
  1786  		return
  1787  	}
  1788  	pool.currentState = statedb
  1789  	pool.pendingNonces = newTxNoncer(statedb)
  1790  	pool.currentMaxGas = newHead.GasLimit
  1791  
  1792  	// Inject any transactions discarded due to reorgs
  1793  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1794  	senderCacher.recover(pool.signer, reinject)
  1795  	pool.addTxsLocked(reinject, false)
  1796  
  1797  	// Update all fork indicator by next pending block number.
  1798  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
  1799  	pool.istanbul = pool.chainconfig.IsIstanbul(next)
  1800  	pool.eip2718 = pool.chainconfig.IsBerlin(next)
  1801  	pool.eip1559 = pool.chainconfig.IsLondon(next)
  1802  }
  1803  
  1804  // promoteExecutables moves transactions that have become processable from the
  1805  // future queue to the set of pending transactions. During this process, all
  1806  // invalidated transactions (low nonce, low balance) are deleted.
  1807  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1808  	// Track the promoted transactions to broadcast them at once
  1809  	var (
  1810  		promoted    []*types.Transaction
  1811  		promotedLen int
  1812  		forwards    types.Transactions
  1813  		forwardsLen int
  1814  		caps        types.Transactions
  1815  		capsLen     int
  1816  		drops       types.Transactions
  1817  		dropsLen    int
  1818  		list        *txList
  1819  		hash        common.Hash
  1820  		readies     types.Transactions
  1821  		readiesLen  int
  1822  	)
  1823  
  1824  	balance := uint256.NewInt(0)
  1825  
  1826  	// Iterate over all accounts and promote any executable transactions
  1827  	for _, addr := range accounts {
  1828  		list = pool.queue[addr]
  1829  		if list == nil {
  1830  			continue // Just in case someone calls with a non existing account
  1831  		}
  1832  
  1833  		// Drop all transactions that are deemed too old (low nonce)
  1834  		forwards = list.Forward(pool.currentState.GetNonce(addr))
  1835  		forwardsLen = len(forwards)
  1836  
  1837  		for _, tx := range forwards {
  1838  			hash = tx.Hash()
  1839  			pool.all.Remove(hash)
  1840  		}
  1841  
  1842  		log.Trace("Removed old queued transactions", "count", forwardsLen)
  1843  
  1844  		// Drop all transactions that are too costly (low balance or out of gas)
  1845  		balance.SetFromBig(pool.currentState.GetBalance(addr))
  1846  
  1847  		drops, _ = list.Filter(balance, pool.currentMaxGas)
  1848  		dropsLen = len(drops)
  1849  
  1850  		for _, tx := range drops {
  1851  			hash = tx.Hash()
  1852  			pool.all.Remove(hash)
  1853  		}
  1854  
  1855  		log.Trace("Removed unpayable queued transactions", "count", dropsLen)
  1856  		queuedNofundsMeter.Mark(int64(dropsLen))
  1857  
  1858  		// Gather all executable transactions and promote them
  1859  		readies = list.Ready(pool.pendingNonces.get(addr))
  1860  		readiesLen = len(readies)
  1861  
  1862  		for _, tx := range readies {
  1863  			hash = tx.Hash()
  1864  			if pool.promoteTx(addr, hash, tx) {
  1865  				promoted = append(promoted, tx)
  1866  			}
  1867  		}
  1868  
  1869  		log.Trace("Promoted queued transactions", "count", promotedLen)
  1870  		queuedGauge.Dec(int64(readiesLen))
  1871  
  1872  		// Drop all transactions over the allowed limit
  1873  		if !pool.locals.contains(addr) {
  1874  			caps = list.Cap(int(pool.config.AccountQueue))
  1875  			capsLen = len(caps)
  1876  
  1877  			for _, tx := range caps {
  1878  				hash = tx.Hash()
  1879  				pool.all.Remove(hash)
  1880  
  1881  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1882  			}
  1883  
  1884  			queuedRateLimitMeter.Mark(int64(capsLen))
  1885  		}
  1886  
  1887  		// Mark all the items dropped as removed
  1888  		pool.priced.Removed(forwardsLen + dropsLen + capsLen)
  1889  
  1890  		queuedGauge.Dec(int64(forwardsLen + dropsLen + capsLen))
  1891  
  1892  		if pool.locals.contains(addr) {
  1893  			localGauge.Dec(int64(forwardsLen + dropsLen + capsLen))
  1894  		}
  1895  
  1896  		// Delete the entire queue entry if it became empty.
  1897  		if list.Empty() {
  1898  			delete(pool.queue, addr)
  1899  			delete(pool.beats, addr)
  1900  		}
  1901  	}
  1902  
  1903  	return promoted
  1904  }
  1905  
  1906  // truncatePending removes transactions from the pending queue if the pool is above the
  1907  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1908  // equal number for all for accounts with many pending transactions.
  1909  func (pool *TxPool) truncatePending() {
  1910  	pending := uint64(pool.pendingCount)
  1911  	if pending <= pool.config.GlobalSlots {
  1912  		return
  1913  	}
  1914  
  1915  	pendingBeforeCap := pending
  1916  
  1917  	var listLen int
  1918  
  1919  	type pair struct {
  1920  		address common.Address
  1921  		value   int64
  1922  	}
  1923  
  1924  	// Assemble a spam order to penalize large transactors first
  1925  	spammers := make([]pair, 0, 8)
  1926  	count := 0
  1927  
  1928  	var ok bool
  1929  
  1930  	pool.pendingMu.RLock()
  1931  	for addr, list := range pool.pending {
  1932  		// Only evict transactions from high rollers
  1933  		listLen = len(list.txs.items)
  1934  
  1935  		pool.pendingMu.RUnlock()
  1936  
  1937  		pool.locals.m.RLock()
  1938  
  1939  		if uint64(listLen) > pool.config.AccountSlots {
  1940  			if _, ok = pool.locals.accounts[addr]; ok {
  1941  				pool.locals.m.RUnlock()
  1942  
  1943  				pool.pendingMu.RLock()
  1944  
  1945  				continue
  1946  			}
  1947  
  1948  			count++
  1949  
  1950  			spammers = append(spammers, pair{addr, int64(listLen)})
  1951  		}
  1952  
  1953  		pool.locals.m.RUnlock()
  1954  
  1955  		pool.pendingMu.RLock()
  1956  	}
  1957  
  1958  	pool.pendingMu.RUnlock()
  1959  
  1960  	// Gradually drop transactions from offenders
  1961  	offenders := make([]common.Address, 0, len(spammers))
  1962  	sort.Slice(spammers, func(i, j int) bool {
  1963  		return spammers[i].value < spammers[j].value
  1964  	})
  1965  
  1966  	var (
  1967  		offender common.Address
  1968  		caps     types.Transactions
  1969  		capsLen  int
  1970  		list     *txList
  1971  		hash     common.Hash
  1972  	)
  1973  
  1974  	// todo: metrics: spammers, offenders, total loops
  1975  	for len(spammers) != 0 && pending > pool.config.GlobalSlots {
  1976  		// Retrieve the next offender if not local address
  1977  		offender, spammers = spammers[len(spammers)-1].address, spammers[:len(spammers)-1]
  1978  		offenders = append(offenders, offender)
  1979  
  1980  		var threshold int
  1981  
  1982  		// Equalize balances until all the same or below threshold
  1983  		if len(offenders) > 1 {
  1984  			// Calculate the equalization threshold for all current offenders
  1985  			pool.pendingMu.RLock()
  1986  			threshold = len(pool.pending[offender].txs.items)
  1987  
  1988  			// Iteratively reduce all offenders until below limit or threshold reached
  1989  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1990  				for i := 0; i < len(offenders)-1; i++ {
  1991  					list = pool.pending[offenders[i]]
  1992  
  1993  					caps = list.Cap(len(list.txs.items) - 1)
  1994  					capsLen = len(caps)
  1995  
  1996  					pool.pendingMu.RUnlock()
  1997  
  1998  					for _, tx := range caps {
  1999  						// Drop the transaction from the global pools too
  2000  						hash = tx.Hash()
  2001  						pool.all.Remove(hash)
  2002  
  2003  						// Update the account nonce to the dropped transaction
  2004  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  2005  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  2006  					}
  2007  
  2008  					pool.priced.Removed(capsLen)
  2009  
  2010  					pendingGauge.Dec(int64(capsLen))
  2011  					if pool.locals.contains(offenders[i]) {
  2012  						localGauge.Dec(int64(capsLen))
  2013  					}
  2014  
  2015  					pending--
  2016  
  2017  					pool.pendingMu.RLock()
  2018  				}
  2019  			}
  2020  
  2021  			pool.pendingMu.RUnlock()
  2022  		}
  2023  	}
  2024  
  2025  	// If still above threshold, reduce to limit or min allowance
  2026  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  2027  
  2028  		pool.pendingMu.RLock()
  2029  
  2030  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  2031  			for _, addr := range offenders {
  2032  				list = pool.pending[addr]
  2033  
  2034  				caps = list.Cap(len(list.txs.items) - 1)
  2035  				capsLen = len(caps)
  2036  
  2037  				pool.pendingMu.RUnlock()
  2038  
  2039  				for _, tx := range caps {
  2040  					// Drop the transaction from the global pools too
  2041  					hash = tx.Hash()
  2042  					pool.all.Remove(hash)
  2043  
  2044  					// Update the account nonce to the dropped transaction
  2045  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  2046  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  2047  				}
  2048  
  2049  				pool.priced.Removed(capsLen)
  2050  
  2051  				pendingGauge.Dec(int64(capsLen))
  2052  
  2053  				if _, ok = pool.locals.accounts[addr]; ok {
  2054  					localGauge.Dec(int64(capsLen))
  2055  				}
  2056  
  2057  				pending--
  2058  
  2059  				pool.pendingMu.RLock()
  2060  			}
  2061  		}
  2062  
  2063  		pool.pendingMu.RUnlock()
  2064  	}
  2065  
  2066  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  2067  }
  2068  
  2069  // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit.
  2070  func (pool *TxPool) truncateQueue() {
  2071  	queued := uint64(0)
  2072  	for _, list := range pool.queue {
  2073  		queued += uint64(list.Len())
  2074  	}
  2075  	if queued <= pool.config.GlobalQueue {
  2076  		return
  2077  	}
  2078  
  2079  	// Sort all accounts with queued transactions by heartbeat
  2080  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  2081  	for addr := range pool.queue {
  2082  		if !pool.locals.contains(addr) { // don't drop locals
  2083  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  2084  		}
  2085  	}
  2086  	sort.Sort(addresses)
  2087  
  2088  	var (
  2089  		tx   *types.Transaction
  2090  		txs  types.Transactions
  2091  		list *txList
  2092  		addr addressByHeartbeat
  2093  		size uint64
  2094  	)
  2095  
  2096  	// Drop transactions until the total is below the limit or only locals remain
  2097  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  2098  		addr = addresses[len(addresses)-1]
  2099  		list = pool.queue[addr.address]
  2100  
  2101  		addresses = addresses[:len(addresses)-1]
  2102  
  2103  		var (
  2104  			listFlatten types.Transactions
  2105  			isSet       bool
  2106  		)
  2107  
  2108  		// Drop all transactions if they are less than the overflow
  2109  		if size = uint64(list.Len()); size <= drop {
  2110  			listFlatten = list.Flatten()
  2111  			isSet = true
  2112  
  2113  			for _, tx = range listFlatten {
  2114  				pool.removeTx(tx.Hash(), true)
  2115  			}
  2116  
  2117  			drop -= size
  2118  			queuedRateLimitMeter.Mark(int64(size))
  2119  
  2120  			continue
  2121  		}
  2122  
  2123  		// Otherwise drop only last few transactions
  2124  		if !isSet {
  2125  			listFlatten = list.Flatten()
  2126  		}
  2127  
  2128  		txs = listFlatten
  2129  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  2130  			pool.removeTx(txs[i].Hash(), true)
  2131  
  2132  			drop--
  2133  
  2134  			queuedRateLimitMeter.Mark(1)
  2135  		}
  2136  	}
  2137  }
  2138  
  2139  // demoteUnexecutables removes invalid and processed transactions from the pools
  2140  // executable/pending queue and any subsequent transactions that become unexecutable
  2141  // are moved back into the future queue.
  2142  //
  2143  // Note: transactions are not marked as removed in the priced list because re-heaping
  2144  // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
  2145  // to trigger a re-heap is this function
  2146  func (pool *TxPool) demoteUnexecutables() {
  2147  	balance := uint256.NewInt(0)
  2148  
  2149  	var (
  2150  		olds        types.Transactions
  2151  		oldsLen     int
  2152  		hash        common.Hash
  2153  		drops       types.Transactions
  2154  		dropsLen    int
  2155  		invalids    types.Transactions
  2156  		invalidsLen int
  2157  		gapped      types.Transactions
  2158  		gappedLen   int
  2159  	)
  2160  
  2161  	// Iterate over all accounts and demote any non-executable transactions
  2162  	pool.pendingMu.RLock()
  2163  
  2164  	for addr, list := range pool.pending {
  2165  		nonce := pool.currentState.GetNonce(addr)
  2166  
  2167  		// Drop all transactions that are deemed too old (low nonce)
  2168  		olds = list.Forward(nonce)
  2169  		oldsLen = len(olds)
  2170  
  2171  		for _, tx := range olds {
  2172  			hash = tx.Hash()
  2173  			pool.all.Remove(hash)
  2174  			log.Trace("Removed old pending transaction", "hash", hash)
  2175  		}
  2176  
  2177  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  2178  		balance.SetFromBig(pool.currentState.GetBalance(addr))
  2179  		drops, invalids = list.Filter(balance, pool.currentMaxGas)
  2180  		dropsLen = len(drops)
  2181  		invalidsLen = len(invalids)
  2182  
  2183  		for _, tx := range drops {
  2184  			hash = tx.Hash()
  2185  
  2186  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  2187  
  2188  			pool.all.Remove(hash)
  2189  		}
  2190  
  2191  		pendingNofundsMeter.Mark(int64(dropsLen))
  2192  
  2193  		for _, tx := range invalids {
  2194  			hash = tx.Hash()
  2195  
  2196  			log.Trace("Demoting pending transaction", "hash", hash)
  2197  
  2198  			// Internal shuffle shouldn't touch the lookup set.
  2199  			pool.enqueueTx(hash, tx, false, false)
  2200  		}
  2201  
  2202  		pendingGauge.Dec(int64(oldsLen + dropsLen + invalidsLen))
  2203  
  2204  		if pool.locals.contains(addr) {
  2205  			localGauge.Dec(int64(oldsLen + dropsLen + invalidsLen))
  2206  		}
  2207  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  2208  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  2209  			gapped = list.Cap(0)
  2210  			gappedLen = len(gapped)
  2211  
  2212  			for _, tx := range gapped {
  2213  				hash = tx.Hash()
  2214  				log.Error("Demoting invalidated transaction", "hash", hash)
  2215  
  2216  				// Internal shuffle shouldn't touch the lookup set.
  2217  				pool.enqueueTx(hash, tx, false, false)
  2218  			}
  2219  
  2220  			pendingGauge.Dec(int64(gappedLen))
  2221  			// This might happen in a reorg, so log it to the metering
  2222  			blockReorgInvalidatedTx.Mark(int64(gappedLen))
  2223  		}
  2224  
  2225  		// Delete the entire pending entry if it became empty.
  2226  		if list.Empty() {
  2227  			pool.pendingMu.RUnlock()
  2228  			pool.pendingMu.Lock()
  2229  
  2230  			pool.pendingCount -= pool.pending[addr].Len()
  2231  			delete(pool.pending, addr)
  2232  
  2233  			pool.pendingMu.Unlock()
  2234  			pool.pendingMu.RLock()
  2235  		}
  2236  	}
  2237  
  2238  	pool.pendingMu.RUnlock()
  2239  }
  2240  
  2241  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  2242  type addressByHeartbeat struct {
  2243  	address   common.Address
  2244  	heartbeat time.Time
  2245  }
  2246  
  2247  type addressesByHeartbeat []addressByHeartbeat
  2248  
  2249  func (a addressesByHeartbeat) Len() int           { return len(a) }
  2250  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  2251  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  2252  
  2253  // accountSet is simply a set of addresses to check for existence, and a signer
  2254  // capable of deriving addresses from transactions.
  2255  type accountSet struct {
  2256  	accounts        map[common.Address]struct{}
  2257  	accountsFlatted []common.Address
  2258  	signer          types.Signer
  2259  	m               sync.RWMutex
  2260  }
  2261  
  2262  // newAccountSet creates a new address set with an associated signer for sender
  2263  // derivations.
  2264  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  2265  	as := &accountSet{
  2266  		accounts: make(map[common.Address]struct{}),
  2267  		signer:   signer,
  2268  	}
  2269  	for _, addr := range addrs {
  2270  		as.add(addr)
  2271  	}
  2272  	return as
  2273  }
  2274  
  2275  // contains checks if a given address is contained within the set.
  2276  func (as *accountSet) contains(addr common.Address) bool {
  2277  	as.m.RLock()
  2278  	defer as.m.RUnlock()
  2279  
  2280  	_, exist := as.accounts[addr]
  2281  	return exist
  2282  }
  2283  
  2284  func (as *accountSet) empty() bool {
  2285  	as.m.RLock()
  2286  	defer as.m.RUnlock()
  2287  
  2288  	return len(as.accounts) == 0
  2289  }
  2290  
  2291  // containsTx checks if the sender of a given tx is within the set. If the sender
  2292  // cannot be derived, this method returns false.
  2293  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  2294  	as.m.RLock()
  2295  	defer as.m.RUnlock()
  2296  
  2297  	if addr, err := types.Sender(as.signer, tx); err == nil {
  2298  		return as.contains(addr)
  2299  	}
  2300  	return false
  2301  }
  2302  
  2303  // add inserts a new address into the set to track.
  2304  func (as *accountSet) add(addr common.Address) {
  2305  	as.m.Lock()
  2306  	defer as.m.Unlock()
  2307  
  2308  	if _, ok := as.accounts[addr]; !ok {
  2309  		as.accountsFlatted = append(as.accountsFlatted, addr)
  2310  	}
  2311  
  2312  	as.accounts[addr] = struct{}{}
  2313  }
  2314  
  2315  // addTx adds the sender of tx into the set.
  2316  func (as *accountSet) addTx(tx *types.Transaction) {
  2317  	if addr, err := types.Sender(as.signer, tx); err == nil {
  2318  		as.add(addr)
  2319  	}
  2320  }
  2321  
  2322  // flatten returns the list of addresses within this set, also caching it for later
  2323  // reuse. The returned slice should not be changed!
  2324  func (as *accountSet) flatten() []common.Address {
  2325  	as.m.RLock()
  2326  	defer as.m.RUnlock()
  2327  
  2328  	return as.accountsFlatted
  2329  }
  2330  
  2331  // merge adds all addresses from the 'other' set into 'as'.
  2332  func (as *accountSet) merge(other *accountSet) {
  2333  	var ok bool
  2334  
  2335  	as.m.Lock()
  2336  	defer as.m.Unlock()
  2337  
  2338  	for addr := range other.accounts {
  2339  		if _, ok = as.accounts[addr]; !ok {
  2340  			as.accountsFlatted = append(as.accountsFlatted, addr)
  2341  		}
  2342  		as.accounts[addr] = struct{}{}
  2343  	}
  2344  }
  2345  
  2346  // txLookup is used internally by TxPool to track transactions while allowing
  2347  // lookup without mutex contention.
  2348  //
  2349  // Note, although this type is properly protected against concurrent access, it
  2350  // is **not** a type that should ever be mutated or even exposed outside of the
  2351  // transaction pool, since its internal state is tightly coupled with the pools
  2352  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  2353  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  2354  // TxPool.mu mutex.
  2355  //
  2356  // This lookup set combines the notion of "local transactions", which is useful
  2357  // to build upper-level structure.
  2358  type txLookup struct {
  2359  	slots   int
  2360  	lock    sync.RWMutex
  2361  	locals  map[common.Hash]*types.Transaction
  2362  	remotes map[common.Hash]*types.Transaction
  2363  }
  2364  
  2365  // newTxLookup returns a new txLookup structure.
  2366  func newTxLookup() *txLookup {
  2367  	return &txLookup{
  2368  		locals:  make(map[common.Hash]*types.Transaction),
  2369  		remotes: make(map[common.Hash]*types.Transaction),
  2370  	}
  2371  }
  2372  
  2373  // Range calls f on each key and value present in the map. The callback passed
  2374  // should return the indicator whether the iteration needs to be continued.
  2375  // Callers need to specify which set (or both) to be iterated.
  2376  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
  2377  	t.lock.RLock()
  2378  	defer t.lock.RUnlock()
  2379  
  2380  	if local {
  2381  		for key, value := range t.locals {
  2382  			if !f(key, value, true) {
  2383  				return
  2384  			}
  2385  		}
  2386  	}
  2387  	if remote {
  2388  		for key, value := range t.remotes {
  2389  			if !f(key, value, false) {
  2390  				return
  2391  			}
  2392  		}
  2393  	}
  2394  }
  2395  
  2396  // Get returns a transaction if it exists in the lookup, or nil if not found.
  2397  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  2398  	t.lock.RLock()
  2399  	defer t.lock.RUnlock()
  2400  
  2401  	if tx := t.locals[hash]; tx != nil {
  2402  		return tx
  2403  	}
  2404  	return t.remotes[hash]
  2405  }
  2406  
  2407  // GetLocal returns a transaction if it exists in the lookup, or nil if not found.
  2408  func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction {
  2409  	t.lock.RLock()
  2410  	defer t.lock.RUnlock()
  2411  
  2412  	return t.locals[hash]
  2413  }
  2414  
  2415  // GetRemote returns a transaction if it exists in the lookup, or nil if not found.
  2416  func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction {
  2417  	t.lock.RLock()
  2418  	defer t.lock.RUnlock()
  2419  
  2420  	return t.remotes[hash]
  2421  }
  2422  
  2423  // Count returns the current number of transactions in the lookup.
  2424  func (t *txLookup) Count() int {
  2425  	t.lock.RLock()
  2426  	defer t.lock.RUnlock()
  2427  
  2428  	return len(t.locals) + len(t.remotes)
  2429  }
  2430  
  2431  // LocalCount returns the current number of local transactions in the lookup.
  2432  func (t *txLookup) LocalCount() int {
  2433  	t.lock.RLock()
  2434  	defer t.lock.RUnlock()
  2435  
  2436  	return len(t.locals)
  2437  }
  2438  
  2439  // RemoteCount returns the current number of remote transactions in the lookup.
  2440  func (t *txLookup) RemoteCount() int {
  2441  	t.lock.RLock()
  2442  	defer t.lock.RUnlock()
  2443  
  2444  	return len(t.remotes)
  2445  }
  2446  
  2447  // Slots returns the current number of slots used in the lookup.
  2448  func (t *txLookup) Slots() int {
  2449  	t.lock.RLock()
  2450  	defer t.lock.RUnlock()
  2451  
  2452  	return t.slots
  2453  }
  2454  
  2455  // Add adds a transaction to the lookup.
  2456  func (t *txLookup) Add(tx *types.Transaction, local bool) {
  2457  	t.lock.Lock()
  2458  	defer t.lock.Unlock()
  2459  
  2460  	t.slots += numSlots(tx)
  2461  	slotsGauge.Update(int64(t.slots))
  2462  
  2463  	if local {
  2464  		t.locals[tx.Hash()] = tx
  2465  	} else {
  2466  		t.remotes[tx.Hash()] = tx
  2467  	}
  2468  }
  2469  
  2470  // Remove removes a transaction from the lookup.
  2471  func (t *txLookup) Remove(hash common.Hash) {
  2472  	t.lock.Lock()
  2473  	defer t.lock.Unlock()
  2474  
  2475  	tx, ok := t.locals[hash]
  2476  	if !ok {
  2477  		tx, ok = t.remotes[hash]
  2478  	}
  2479  	if !ok {
  2480  		log.Error("No transaction found to be deleted", "hash", hash)
  2481  		return
  2482  	}
  2483  	t.slots -= numSlots(tx)
  2484  	slotsGauge.Update(int64(t.slots))
  2485  
  2486  	delete(t.locals, hash)
  2487  	delete(t.remotes, hash)
  2488  }
  2489  
  2490  // RemoteToLocals migrates the transactions belongs to the given locals to locals
  2491  // set. The assumption is held the locals set is thread-safe to be used.
  2492  func (t *txLookup) RemoteToLocals(locals *accountSet) int {
  2493  	t.lock.Lock()
  2494  	defer t.lock.Unlock()
  2495  
  2496  	var migrated int
  2497  	for hash, tx := range t.remotes {
  2498  		if locals.containsTx(tx) {
  2499  			locals.m.Lock()
  2500  			t.locals[hash] = tx
  2501  			locals.m.Unlock()
  2502  
  2503  			delete(t.remotes, hash)
  2504  			migrated += 1
  2505  		}
  2506  	}
  2507  	return migrated
  2508  }
  2509  
  2510  // RemotesBelowTip finds all remote transactions below the given tip threshold.
  2511  func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
  2512  	found := make(types.Transactions, 0, 128)
  2513  	t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
  2514  		if tx.GasTipCapIntCmp(threshold) < 0 {
  2515  			found = append(found, tx)
  2516  		}
  2517  		return true
  2518  	}, false, true) // Only iterate remotes
  2519  	return found
  2520  }
  2521  
  2522  // numSlots calculates the number of slots needed for a single transaction.
  2523  func numSlots(tx *types.Transaction) int {
  2524  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  2525  }