gitlab.com/flarenetwork/coreth@v0.1.1/core/tx_pool.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2014 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  package core
    28  
    29  import (
    30  	"errors"
    31  	"fmt"
    32  	"math"
    33  	"math/big"
    34  	"sort"
    35  	"sync"
    36  	"time"
    37  
    38  	"github.com/ethereum/go-ethereum/common"
    39  	"github.com/ethereum/go-ethereum/common/prque"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/metrics"
    43  	"gitlab.com/flarenetwork/coreth/consensus/dummy"
    44  	"gitlab.com/flarenetwork/coreth/core/state"
    45  	"gitlab.com/flarenetwork/coreth/core/types"
    46  	"gitlab.com/flarenetwork/coreth/params"
    47  )
    48  
    49  const (
    50  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    51  	chainHeadChanSize = 10
    52  
    53  	// txSlotSize is used to calculate how many data slots a single transaction
    54  	// takes up based on its size. The slots are used as DoS protection, ensuring
    55  	// that validating a new transaction remains a constant operation (in reality
    56  	// O(maxslots), where max slots are 4 currently).
    57  	txSlotSize = 32 * 1024
    58  
    59  	// txMaxSize is the maximum size a single transaction can have. This field has
    60  	// non-trivial consequences: larger transactions are significantly harder and
    61  	// more expensive to propagate; larger transactions also take more resources
    62  	// to validate whether they fit into the pool or not.
    63  	txMaxSize = 4 * txSlotSize // 128KB
    64  )
    65  
    66  var (
    67  	// ErrAlreadyKnown is returned if the transactions is already contained
    68  	// within the pool.
    69  	ErrAlreadyKnown = errors.New("already known")
    70  
    71  	// ErrInvalidSender is returned if the transaction contains an invalid signature.
    72  	ErrInvalidSender = errors.New("invalid sender")
    73  
    74  	// ErrUnderpriced is returned if a transaction's gas price is below the minimum
    75  	// configured for the transaction pool.
    76  	ErrUnderpriced = errors.New("transaction underpriced")
    77  
    78  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet
    79  	// another remote transaction.
    80  	ErrTxPoolOverflow = errors.New("txpool is full")
    81  
    82  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    83  	// with a different one without the required price bump.
    84  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    85  
    86  	// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
    87  	// maximum allowance of the current block.
    88  	ErrGasLimit = errors.New("exceeds block gas limit")
    89  
    90  	// ErrNegativeValue is a sanity error to ensure no one is able to specify a
    91  	// transaction with a negative value.
    92  	ErrNegativeValue = errors.New("negative value")
    93  
    94  	// ErrOversizedData is returned if the input data of a transaction is greater
    95  	// than some meaningful limit a user might use. This is not a consensus error
    96  	// making the transaction invalid, rather a DOS protection.
    97  	ErrOversizedData = errors.New("oversized data")
    98  )
    99  
   100  var (
   101  	evictionInterval      = time.Minute      // Time interval to check for evictable transactions
   102  	statsReportInterval   = 8 * time.Second  // Time interval to report transaction pool stats
   103  	baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after Apricot Phase 3 is enabled
   104  )
   105  
   106  var (
   107  	// Metrics for the pending pool
   108  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
   109  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
   110  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
   111  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
   112  
   113  	// Metrics for the queued pool
   114  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
   115  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
   116  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
   117  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
   118  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
   119  
   120  	// General tx metrics
   121  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   122  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   123  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   124  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   125  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
   126  
   127  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   128  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   129  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   130  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   131  
   132  	reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
   133  )
   134  
   135  // TxStatus is the current status of a transaction as seen by the pool.
   136  type TxStatus uint
   137  
   138  const (
   139  	TxStatusUnknown TxStatus = iota
   140  	TxStatusQueued
   141  	TxStatusPending
   142  	TxStatusIncluded
   143  )
   144  
   145  // blockChain provides the state of blockchain and current gas limit to do
   146  // some pre checks in tx pool and event subscribers.
   147  type blockChain interface {
   148  	CurrentBlock() *types.Block
   149  	GetBlock(hash common.Hash, number uint64) *types.Block
   150  	StateAt(root common.Hash) (*state.StateDB, error)
   151  
   152  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   153  }
   154  
   155  // TxPoolConfig are the configuration parameters of the transaction pool.
   156  type TxPoolConfig struct {
   157  	Locals    []common.Address // Addresses that should be treated by default as local
   158  	NoLocals  bool             // Whether local transaction handling should be disabled
   159  	Journal   string           // Journal of local transactions to survive node restarts
   160  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   161  
   162  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   163  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   164  
   165  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   166  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   167  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   168  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   169  
   170  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   171  }
   172  
   173  // DefaultTxPoolConfig contains the default configurations for the transaction
   174  // pool.
   175  var DefaultTxPoolConfig = TxPoolConfig{
   176  	Journal:   "transactions.rlp",
   177  	Rejournal: time.Hour,
   178  
   179  	PriceLimit: 1,
   180  	PriceBump:  10,
   181  
   182  	AccountSlots: 16,
   183  	GlobalSlots:  4096 + 1024, // urgent + floating queue capacity with 4:1 ratio
   184  	AccountQueue: 64,
   185  	GlobalQueue:  1024,
   186  
   187  	Lifetime: 3 * time.Hour,
   188  }
   189  
   190  // sanitize checks the provided user configurations and changes anything that's
   191  // unreasonable or unworkable.
   192  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   193  	conf := *config
   194  	if conf.Rejournal < time.Second {
   195  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   196  		conf.Rejournal = time.Second
   197  	}
   198  	if conf.PriceLimit < 1 {
   199  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   200  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   201  	}
   202  	if conf.PriceBump < 1 {
   203  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   204  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   205  	}
   206  	if conf.AccountSlots < 1 {
   207  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   208  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   209  	}
   210  	if conf.GlobalSlots < 1 {
   211  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   212  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   213  	}
   214  	if conf.AccountQueue < 1 {
   215  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   216  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   217  	}
   218  	if conf.GlobalQueue < 1 {
   219  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   220  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   221  	}
   222  	if conf.Lifetime < 1 {
   223  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   224  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   225  	}
   226  	return conf
   227  }
   228  
   229  // TxPool contains all currently known transactions. Transactions
   230  // enter the pool when they are received from the network or submitted
   231  // locally. They exit the pool when they are included in the blockchain.
   232  //
   233  // The pool separates processable transactions (which can be applied to the
   234  // current state) and future transactions. Transactions move between those
   235  // two states over time as they are received and processed.
   236  type TxPool struct {
   237  	config      TxPoolConfig
   238  	chainconfig *params.ChainConfig
   239  	chain       blockChain
   240  	gasPrice    *big.Int
   241  	minimumFee  *big.Int
   242  	txFeed      event.Feed
   243  	headFeed    event.Feed
   244  	reorgFeed   event.Feed
   245  	scope       event.SubscriptionScope
   246  	signer      types.Signer
   247  	mu          sync.RWMutex
   248  
   249  	istanbul bool // Fork indicator whether we are in the istanbul stage.
   250  	eip2718  bool // Fork indicator whether we are using EIP-2718 type transactions.
   251  	eip1559  bool // Fork indicator whether we are using EIP-1559 type transactions.
   252  
   253  	currentHead   *types.Header
   254  	currentState  *state.StateDB // Current state in the blockchain head
   255  	pendingNonces *txNoncer      // Pending state tracking virtual nonces
   256  	currentMaxGas uint64         // Current gas limit for transaction caps
   257  
   258  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   259  	journal *txJournal  // Journal of local transaction to back up to disk
   260  
   261  	pending map[common.Address]*txList   // All currently processable transactions
   262  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   263  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   264  	all     *txLookup                    // All transactions to allow lookups
   265  	priced  *txPricedList                // All transactions sorted by price
   266  
   267  	chainHeadCh         chan ChainHeadEvent
   268  	chainHeadSub        event.Subscription
   269  	reqResetCh          chan *txpoolResetRequest
   270  	reqPromoteCh        chan *accountSet
   271  	queueTxEventCh      chan *types.Transaction
   272  	reorgDoneCh         chan chan struct{}
   273  	reorgShutdownCh     chan struct{} // requests shutdown of scheduleReorgLoop
   274  	generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen
   275  	// to this to be notified if it should shut down.
   276  	wg sync.WaitGroup // tracks loop, scheduleReorgLoop
   277  }
   278  
   279  type txpoolResetRequest struct {
   280  	oldHead, newHead *types.Header
   281  }
   282  
   283  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   284  // transactions from the network.
   285  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   286  	// Sanitize the input to ensure no vulnerable gas prices are set
   287  	config = (&config).sanitize()
   288  
   289  	// Create the transaction pool with its initial settings
   290  	pool := &TxPool{
   291  		config:              config,
   292  		chainconfig:         chainconfig,
   293  		chain:               chain,
   294  		signer:              types.LatestSigner(chainconfig),
   295  		pending:             make(map[common.Address]*txList),
   296  		queue:               make(map[common.Address]*txList),
   297  		beats:               make(map[common.Address]time.Time),
   298  		all:                 newTxLookup(),
   299  		chainHeadCh:         make(chan ChainHeadEvent, chainHeadChanSize),
   300  		reqResetCh:          make(chan *txpoolResetRequest),
   301  		reqPromoteCh:        make(chan *accountSet),
   302  		queueTxEventCh:      make(chan *types.Transaction),
   303  		reorgDoneCh:         make(chan chan struct{}),
   304  		reorgShutdownCh:     make(chan struct{}),
   305  		generalShutdownChan: make(chan struct{}),
   306  		gasPrice:            new(big.Int).SetUint64(config.PriceLimit),
   307  	}
   308  	pool.locals = newAccountSet(pool.signer)
   309  	for _, addr := range config.Locals {
   310  		log.Info("Setting new local account", "address", addr)
   311  		pool.locals.add(addr)
   312  	}
   313  	pool.priced = newTxPricedList(pool.all)
   314  	pool.reset(nil, chain.CurrentBlock().Header())
   315  
   316  	// Start the reorg loop early so it can handle requests generated during journal loading.
   317  	pool.wg.Add(1)
   318  	go pool.scheduleReorgLoop()
   319  
   320  	// If local transactions and journaling is enabled, load from disk
   321  	if !config.NoLocals && config.Journal != "" {
   322  		pool.journal = newTxJournal(config.Journal)
   323  
   324  		if err := pool.journal.load(pool.AddLocals); err != nil {
   325  			log.Warn("Failed to load transaction journal", "err", err)
   326  		}
   327  		if err := pool.journal.rotate(pool.local()); err != nil {
   328  			log.Warn("Failed to rotate transaction journal", "err", err)
   329  		}
   330  	}
   331  
   332  	// Subscribe events from blockchain and start the main event loop.
   333  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   334  	pool.wg.Add(1)
   335  	go pool.loop()
   336  
   337  	pool.startPeriodicFeeUpdate()
   338  
   339  	return pool
   340  }
   341  
   342  // loop is the transaction pool's main event loop, waiting for and reacting to
   343  // outside blockchain events as well as for various reporting and transaction
   344  // eviction events.
   345  func (pool *TxPool) loop() {
   346  	defer pool.wg.Done()
   347  
   348  	var (
   349  		prevPending, prevQueued, prevStales int
   350  		// Start the stats reporting and transaction eviction tickers
   351  		report  = time.NewTicker(statsReportInterval)
   352  		evict   = time.NewTicker(evictionInterval)
   353  		journal = time.NewTicker(pool.config.Rejournal)
   354  		// Track the previous head headers for transaction reorgs
   355  		head = pool.chain.CurrentBlock()
   356  	)
   357  	defer report.Stop()
   358  	defer evict.Stop()
   359  	defer journal.Stop()
   360  
   361  	for {
   362  		select {
   363  		// Handle ChainHeadEvent
   364  		case ev := <-pool.chainHeadCh:
   365  			if ev.Block != nil {
   366  				pool.requestReset(head.Header(), ev.Block.Header())
   367  				head = ev.Block
   368  				pool.headFeed.Send(NewTxPoolHeadEvent{Block: head})
   369  			}
   370  
   371  		// System shutdown.
   372  		case <-pool.chainHeadSub.Err():
   373  			close(pool.reorgShutdownCh)
   374  			return
   375  
   376  		// Handle stats reporting ticks
   377  		case <-report.C:
   378  			pool.mu.RLock()
   379  			pending, queued := pool.stats()
   380  			stales := pool.priced.stales
   381  			pool.mu.RUnlock()
   382  
   383  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   384  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   385  				prevPending, prevQueued, prevStales = pending, queued, stales
   386  			}
   387  
   388  		// Handle inactive account transaction eviction
   389  		case <-evict.C:
   390  			pool.mu.Lock()
   391  			for addr := range pool.queue {
   392  				// Skip local transactions from the eviction mechanism
   393  				if pool.locals.contains(addr) {
   394  					continue
   395  				}
   396  				// Any non-locals old enough should be removed
   397  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   398  					list := pool.queue[addr].Flatten()
   399  					for _, tx := range list {
   400  						pool.removeTx(tx.Hash(), true)
   401  					}
   402  					queuedEvictionMeter.Mark(int64(len(list)))
   403  				}
   404  			}
   405  			pool.mu.Unlock()
   406  
   407  		// Handle local transaction journal rotation
   408  		case <-journal.C:
   409  			if pool.journal != nil {
   410  				pool.mu.Lock()
   411  				if err := pool.journal.rotate(pool.local()); err != nil {
   412  					log.Warn("Failed to rotate local tx journal", "err", err)
   413  				}
   414  				pool.mu.Unlock()
   415  			}
   416  		}
   417  	}
   418  }
   419  
   420  // Stop terminates the transaction pool.
   421  func (pool *TxPool) Stop() {
   422  	// Unsubscribe all subscriptions registered from txpool
   423  	pool.scope.Close()
   424  
   425  	close(pool.generalShutdownChan)
   426  	// Unsubscribe subscriptions registered from blockchain
   427  	pool.chainHeadSub.Unsubscribe()
   428  	pool.wg.Wait()
   429  
   430  	if pool.journal != nil {
   431  		pool.journal.close()
   432  	}
   433  	log.Info("Transaction pool stopped")
   434  }
   435  
   436  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   437  // starts sending event to the given channel.
   438  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   439  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   440  }
   441  
   442  // SubscribeNewHeadEvent registers a subscription of NewHeadEvent and
   443  // starts sending event to the given channel.
   444  func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- NewTxPoolHeadEvent) event.Subscription {
   445  	return pool.scope.Track(pool.headFeed.Subscribe(ch))
   446  }
   447  
   448  // SubscribeNewReorgEvent registers a subscription of NewReorgEvent and
   449  // starts sending event to the given channel.
   450  func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- NewTxPoolReorgEvent) event.Subscription {
   451  	return pool.scope.Track(pool.reorgFeed.Subscribe(ch))
   452  }
   453  
   454  // GasPrice returns the current gas price enforced by the transaction pool.
   455  func (pool *TxPool) GasPrice() *big.Int {
   456  	pool.mu.RLock()
   457  	defer pool.mu.RUnlock()
   458  
   459  	return new(big.Int).Set(pool.gasPrice)
   460  }
   461  
   462  // SetGasPrice updates the minimum price required by the transaction pool for a
   463  // new transaction, and drops all transactions below this threshold.
   464  func (pool *TxPool) SetGasPrice(price *big.Int) {
   465  	pool.mu.Lock()
   466  	defer pool.mu.Unlock()
   467  
   468  	old := pool.gasPrice
   469  	pool.gasPrice = price
   470  	// if the min miner fee increased, remove transactions below the new threshold
   471  	if price.Cmp(old) > 0 {
   472  		// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
   473  		drop := pool.all.RemotesBelowTip(price)
   474  		for _, tx := range drop {
   475  			pool.removeTx(tx.Hash(), false)
   476  		}
   477  		pool.priced.Removed(len(drop))
   478  	}
   479  
   480  	log.Info("Transaction pool price threshold updated", "price", price)
   481  }
   482  
   483  func (pool *TxPool) SetMinFee(minFee *big.Int) {
   484  	pool.mu.Lock()
   485  	defer pool.mu.Unlock()
   486  
   487  	pool.minimumFee = minFee
   488  }
   489  
   490  // Nonce returns the next nonce of an account, with all transactions executable
   491  // by the pool already applied on top.
   492  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   493  	pool.mu.RLock()
   494  	defer pool.mu.RUnlock()
   495  
   496  	return pool.pendingNonces.get(addr)
   497  }
   498  
   499  // Stats retrieves the current pool stats, namely the number of pending and the
   500  // number of queued (non-executable) transactions.
   501  func (pool *TxPool) Stats() (int, int) {
   502  	pool.mu.RLock()
   503  	defer pool.mu.RUnlock()
   504  
   505  	return pool.stats()
   506  }
   507  
   508  // stats retrieves the current pool stats, namely the number of pending and the
   509  // number of queued (non-executable) transactions.
   510  func (pool *TxPool) stats() (int, int) {
   511  	pending := 0
   512  	for _, list := range pool.pending {
   513  		pending += list.Len()
   514  	}
   515  	queued := 0
   516  	for _, list := range pool.queue {
   517  		queued += list.Len()
   518  	}
   519  	return pending, queued
   520  }
   521  
   522  // Content retrieves the data content of the transaction pool, returning all the
   523  // pending as well as queued transactions, grouped by account and sorted by nonce.
   524  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   525  	pool.mu.Lock()
   526  	defer pool.mu.Unlock()
   527  
   528  	pending := make(map[common.Address]types.Transactions)
   529  	for addr, list := range pool.pending {
   530  		pending[addr] = list.Flatten()
   531  	}
   532  	queued := make(map[common.Address]types.Transactions)
   533  	for addr, list := range pool.queue {
   534  		queued[addr] = list.Flatten()
   535  	}
   536  	return pending, queued
   537  }
   538  
   539  // ContentFrom retrieves the data content of the transaction pool, returning the
   540  // pending as well as queued transactions of this address, grouped by nonce.
   541  func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
   542  	pool.mu.RLock()
   543  	defer pool.mu.RUnlock()
   544  
   545  	var pending types.Transactions
   546  	if list, ok := pool.pending[addr]; ok {
   547  		pending = list.Flatten()
   548  	}
   549  	var queued types.Transactions
   550  	if list, ok := pool.queue[addr]; ok {
   551  		queued = list.Flatten()
   552  	}
   553  	return pending, queued
   554  }
   555  
   556  // Pending retrieves all currently processable transactions, grouped by origin
   557  // account and sorted by nonce. The returned transaction set is a copy and can be
   558  // freely modified by calling code.
   559  //
   560  // The enforceTips parameter can be used to do an extra filtering on the pending
   561  // transactions and only return those whose **effective** tip is large enough in
   562  // the next pending execution environment.
   563  func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transactions, error) {
   564  	pool.mu.Lock()
   565  	defer pool.mu.Unlock()
   566  
   567  	pending := make(map[common.Address]types.Transactions)
   568  	for addr, list := range pool.pending {
   569  		txs := list.Flatten()
   570  
   571  		// If the miner requests tip enforcement, cap the lists now
   572  		if enforceTips && !pool.locals.contains(addr) {
   573  			for i, tx := range txs {
   574  				if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
   575  					txs = txs[:i]
   576  					break
   577  				}
   578  			}
   579  		}
   580  		if len(txs) > 0 {
   581  			pending[addr] = txs
   582  		}
   583  	}
   584  	return pending, nil
   585  }
   586  
   587  // Locals retrieves the accounts currently considered local by the pool.
   588  func (pool *TxPool) Locals() []common.Address {
   589  	pool.mu.Lock()
   590  	defer pool.mu.Unlock()
   591  
   592  	return pool.locals.flatten()
   593  }
   594  
   595  // local retrieves all currently known local transactions, grouped by origin
   596  // account and sorted by nonce. The returned transaction set is a copy and can be
   597  // freely modified by calling code.
   598  func (pool *TxPool) local() map[common.Address]types.Transactions {
   599  	txs := make(map[common.Address]types.Transactions)
   600  	for addr := range pool.locals.accounts {
   601  		if pending := pool.pending[addr]; pending != nil {
   602  			txs[addr] = append(txs[addr], pending.Flatten()...)
   603  		}
   604  		if queued := pool.queue[addr]; queued != nil {
   605  			txs[addr] = append(txs[addr], queued.Flatten()...)
   606  		}
   607  	}
   608  	return txs
   609  }
   610  
   611  // validateTx checks whether a transaction is valid according to the consensus
   612  // rules and adheres to some heuristic limits of the local node (price and size).
   613  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   614  	// Accept only legacy transactions until EIP-2718/2930 activates.
   615  	if !pool.eip2718 && tx.Type() != types.LegacyTxType {
   616  		return ErrTxTypeNotSupported
   617  	}
   618  	// Reject dynamic fee transactions until EIP-1559 activates.
   619  	if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
   620  		return ErrTxTypeNotSupported
   621  	}
   622  	// Reject transactions over defined size to prevent DOS attacks
   623  	if uint64(tx.Size()) > txMaxSize {
   624  		return ErrOversizedData
   625  	}
   626  	// Transactions can't be negative. This may never happen using RLP decoded
   627  	// transactions but may occur if you create a transaction using the RPC.
   628  	if tx.Value().Sign() < 0 {
   629  		return ErrNegativeValue
   630  	}
   631  	// Ensure the transaction doesn't exceed the current block limit gas.
   632  	if txGas := tx.Gas(); pool.currentMaxGas < txGas {
   633  		return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, txGas, pool.currentMaxGas)
   634  	}
   635  	// Sanity check for extremely large numbers
   636  	if tx.GasFeeCap().BitLen() > 256 {
   637  		return ErrFeeCapVeryHigh
   638  	}
   639  	if tx.GasTipCap().BitLen() > 256 {
   640  		return ErrTipVeryHigh
   641  	}
   642  	// Ensure gasFeeCap is greater than or equal to gasTipCap.
   643  	if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
   644  		return ErrTipAboveFeeCap
   645  	}
   646  	// Make sure the transaction is signed properly.
   647  	from, err := types.Sender(pool.signer, tx)
   648  	if err != nil {
   649  		return ErrInvalidSender
   650  	}
   651  	// Drop non-local transactions under our own minimal accepted gas price or tip
   652  	if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
   653  		return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice)
   654  	}
   655  	// Drop the transaction if the gas fee cap is below the pool's minimum fee
   656  	if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 {
   657  		return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee)
   658  	}
   659  	// Ensure the transaction adheres to nonce ordering
   660  	if currentNonce, txNonce := pool.currentState.GetNonce(from), tx.Nonce(); currentNonce > txNonce {
   661  		return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)", ErrNonceTooLow, from.Hex(), currentNonce, txNonce)
   662  	}
   663  	// Transactor should have enough funds to cover the costs
   664  	// cost == V + GP * GL
   665  	if balance, cost := pool.currentState.GetBalance(from), tx.Cost(); balance.Cmp(cost) < 0 {
   666  		return fmt.Errorf("%w: address %s have (%d) want (%d)", ErrInsufficientFunds, from.Hex(), balance, cost)
   667  	}
   668  	// Ensure the transaction has more gas than the basic tx fee.
   669  	intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
   670  	if err != nil {
   671  		return err
   672  	}
   673  	if txGas := tx.Gas(); txGas < intrGas {
   674  		return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas)
   675  	}
   676  	return nil
   677  }
   678  
   679  // add validates a transaction and inserts it into the non-executable queue for later
   680  // pending promotion and execution. If the transaction is a replacement for an already
   681  // pending or queued one, it overwrites the previous transaction if its price is higher.
   682  //
   683  // If a newly added transaction is marked as local, its sending account will be
   684  // be added to the allowlist, preventing any associated transaction from being dropped
   685  // out of the pool due to pricing constraints.
   686  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   687  	// If the transaction is already known, discard it
   688  	hash := tx.Hash()
   689  	if pool.all.Get(hash) != nil {
   690  		log.Trace("Discarding already known transaction", "hash", hash)
   691  		knownTxMeter.Mark(1)
   692  		return false, ErrAlreadyKnown
   693  	}
   694  	// Make the local flag. If it's from local source or it's from the network but
   695  	// the sender is marked as local previously, treat it as the local transaction.
   696  	isLocal := local || pool.locals.containsTx(tx)
   697  
   698  	// If the transaction fails basic validation, discard it
   699  	if err := pool.validateTx(tx, isLocal); err != nil {
   700  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   701  		invalidTxMeter.Mark(1)
   702  		return false, err
   703  	}
   704  	// If the transaction pool is full, discard underpriced transactions
   705  	if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   706  		// If the new transaction is underpriced, don't accept it
   707  		if !isLocal && pool.priced.Underpriced(tx) {
   708  			log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   709  			underpricedTxMeter.Mark(1)
   710  			return false, ErrUnderpriced
   711  		}
   712  		// New transaction is better than our worse ones, make room for it.
   713  		// If it's a local transaction, forcibly discard all available transactions.
   714  		// Otherwise if we can't make enough room for new one, abort the operation.
   715  		drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
   716  
   717  		// Special case, we still can't make the room for the new remote one.
   718  		if !isLocal && !success {
   719  			log.Trace("Discarding overflown transaction", "hash", hash)
   720  			overflowedTxMeter.Mark(1)
   721  			return false, ErrTxPoolOverflow
   722  		}
   723  		// Kick out the underpriced remote transactions.
   724  		for _, tx := range drop {
   725  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
   726  			underpricedTxMeter.Mark(1)
   727  			pool.removeTx(tx.Hash(), false)
   728  		}
   729  	}
   730  	// Try to replace an existing transaction in the pending pool
   731  	from, _ := types.Sender(pool.signer, tx) // already validated
   732  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   733  		// Nonce already pending, check if required price bump is met
   734  		inserted, old := list.Add(tx, pool.config.PriceBump)
   735  		if !inserted {
   736  			pendingDiscardMeter.Mark(1)
   737  			return false, ErrReplaceUnderpriced
   738  		}
   739  		// New transaction is better, replace old one
   740  		if old != nil {
   741  			pool.all.Remove(old.Hash())
   742  			pool.priced.Removed(1)
   743  			pendingReplaceMeter.Mark(1)
   744  		}
   745  		pool.all.Add(tx, isLocal)
   746  		pool.priced.Put(tx, isLocal)
   747  		pool.journalTx(from, tx)
   748  		pool.queueTxEvent(tx)
   749  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   750  
   751  		// Successful promotion, bump the heartbeat
   752  		pool.beats[from] = time.Now()
   753  		return old != nil, nil
   754  	}
   755  	// New transaction isn't replacing a pending one, push into queue
   756  	replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
   757  	if err != nil {
   758  		return false, err
   759  	}
   760  	// Mark local addresses and journal local transactions
   761  	if local && !pool.locals.contains(from) {
   762  		log.Info("Setting new local account", "address", from)
   763  		pool.locals.add(from)
   764  		pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
   765  	}
   766  	if isLocal {
   767  		localGauge.Inc(1)
   768  	}
   769  	pool.journalTx(from, tx)
   770  
   771  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   772  	return replaced, nil
   773  }
   774  
   775  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   776  //
   777  // Note, this method assumes the pool lock is held!
   778  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
   779  	// Try to insert the transaction into the future queue
   780  	from, _ := types.Sender(pool.signer, tx) // already validated
   781  	if pool.queue[from] == nil {
   782  		pool.queue[from] = newTxList(false)
   783  	}
   784  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   785  	if !inserted {
   786  		// An older transaction was better, discard this
   787  		queuedDiscardMeter.Mark(1)
   788  		return false, ErrReplaceUnderpriced
   789  	}
   790  	// Discard any previous transaction and mark this
   791  	if old != nil {
   792  		pool.all.Remove(old.Hash())
   793  		pool.priced.Removed(1)
   794  		queuedReplaceMeter.Mark(1)
   795  	} else {
   796  		// Nothing was replaced, bump the queued counter
   797  		queuedGauge.Inc(1)
   798  	}
   799  	// If the transaction isn't in lookup set but it's expected to be there,
   800  	// show the error log.
   801  	if pool.all.Get(hash) == nil && !addAll {
   802  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
   803  	}
   804  	if addAll {
   805  		pool.all.Add(tx, local)
   806  		pool.priced.Put(tx, local)
   807  	}
   808  	// If we never record the heartbeat, do it right now.
   809  	if _, exist := pool.beats[from]; !exist {
   810  		pool.beats[from] = time.Now()
   811  	}
   812  	return old != nil, nil
   813  }
   814  
   815  // journalTx adds the specified transaction to the local disk journal if it is
   816  // deemed to have been sent from a local account.
   817  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   818  	// Only journal if it's enabled and the transaction is local
   819  	if pool.journal == nil || !pool.locals.contains(from) {
   820  		return
   821  	}
   822  	if err := pool.journal.insert(tx); err != nil {
   823  		log.Warn("Failed to journal local transaction", "err", err)
   824  	}
   825  }
   826  
   827  // promoteTx adds a transaction to the pending (processable) list of transactions
   828  // and returns whether it was inserted or an older was better.
   829  //
   830  // Note, this method assumes the pool lock is held!
   831  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   832  	// Try to insert the transaction into the pending queue
   833  	if pool.pending[addr] == nil {
   834  		pool.pending[addr] = newTxList(true)
   835  	}
   836  	list := pool.pending[addr]
   837  
   838  	inserted, old := list.Add(tx, pool.config.PriceBump)
   839  	if !inserted {
   840  		// An older transaction was better, discard this
   841  		pool.all.Remove(hash)
   842  		pool.priced.Removed(1)
   843  		pendingDiscardMeter.Mark(1)
   844  		return false
   845  	}
   846  	// Otherwise discard any previous transaction and mark this
   847  	if old != nil {
   848  		pool.all.Remove(old.Hash())
   849  		pool.priced.Removed(1)
   850  		pendingReplaceMeter.Mark(1)
   851  	} else {
   852  		// Nothing was replaced, bump the pending counter
   853  		pendingGauge.Inc(1)
   854  	}
   855  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   856  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   857  
   858  	// Successful promotion, bump the heartbeat
   859  	pool.beats[addr] = time.Now()
   860  	return true
   861  }
   862  
   863  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
   864  // senders as a local ones, ensuring they go around the local pricing constraints.
   865  //
   866  // This method is used to add transactions from the RPC API and performs synchronous pool
   867  // reorganization and event propagation.
   868  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
   869  	return pool.addTxs(txs, !pool.config.NoLocals, true)
   870  }
   871  
   872  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
   873  // a convenience wrapper aroundd AddLocals.
   874  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
   875  	errs := pool.AddLocals([]*types.Transaction{tx})
   876  	return errs[0]
   877  }
   878  
   879  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
   880  // senders are not among the locally tracked ones, full pricing constraints will apply.
   881  //
   882  // This method is used to add transactions from the p2p network and does not wait for pool
   883  // reorganization and internal event propagation.
   884  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
   885  	return pool.addTxs(txs, false, false)
   886  }
   887  
   888  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
   889  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
   890  	return pool.addTxs(txs, false, true)
   891  }
   892  
   893  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   894  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
   895  	errs := pool.AddRemotesSync([]*types.Transaction{tx})
   896  	return errs[0]
   897  }
   898  
   899  // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
   900  // wrapper around AddRemotes.
   901  //
   902  // Deprecated: use AddRemotes
   903  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
   904  	errs := pool.AddRemotes([]*types.Transaction{tx})
   905  	return errs[0]
   906  }
   907  
   908  // addTxs attempts to queue a batch of transactions if they are valid.
   909  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
   910  	// Filter out known ones without obtaining the pool lock or recovering signatures
   911  	var (
   912  		errs = make([]error, len(txs))
   913  		news = make([]*types.Transaction, 0, len(txs))
   914  	)
   915  	for i, tx := range txs {
   916  		// If the transaction is known, pre-set the error slot
   917  		if pool.all.Get(tx.Hash()) != nil {
   918  			errs[i] = ErrAlreadyKnown
   919  			knownTxMeter.Mark(1)
   920  			continue
   921  		}
   922  		// Exclude transactions with invalid signatures as soon as
   923  		// possible and cache senders in transactions before
   924  		// obtaining lock
   925  		_, err := types.Sender(pool.signer, tx)
   926  		if err != nil {
   927  			errs[i] = ErrInvalidSender
   928  			invalidTxMeter.Mark(1)
   929  			continue
   930  		}
   931  		// Accumulate all unknown transactions for deeper processing
   932  		news = append(news, tx)
   933  	}
   934  	if len(news) == 0 {
   935  		return errs
   936  	}
   937  
   938  	// Process all the new transaction and merge any errors into the original slice
   939  	pool.mu.Lock()
   940  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
   941  	pool.mu.Unlock()
   942  
   943  	var nilSlot = 0
   944  	for _, err := range newErrs {
   945  		for errs[nilSlot] != nil {
   946  			nilSlot++
   947  		}
   948  		errs[nilSlot] = err
   949  		nilSlot++
   950  	}
   951  	// Reorg the pool internals if needed and return
   952  	done := pool.requestPromoteExecutables(dirtyAddrs)
   953  	if sync {
   954  		<-done
   955  	}
   956  	return errs
   957  }
   958  
   959  // addTxsLocked attempts to queue a batch of transactions if they are valid.
   960  // The transaction pool lock must be held.
   961  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
   962  	dirty := newAccountSet(pool.signer)
   963  	errs := make([]error, len(txs))
   964  	for i, tx := range txs {
   965  		replaced, err := pool.add(tx, local)
   966  		errs[i] = err
   967  		if err == nil && !replaced {
   968  			dirty.addTx(tx)
   969  		}
   970  	}
   971  	validTxMeter.Mark(int64(len(dirty.accounts)))
   972  	return errs, dirty
   973  }
   974  
   975  // Status returns the status (unknown/pending/queued) of a batch of transactions
   976  // identified by their hashes.
   977  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
   978  	status := make([]TxStatus, len(hashes))
   979  	for i, hash := range hashes {
   980  		tx := pool.Get(hash)
   981  		if tx == nil {
   982  			continue
   983  		}
   984  		from, _ := types.Sender(pool.signer, tx) // already validated
   985  		pool.mu.RLock()
   986  		if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   987  			status[i] = TxStatusPending
   988  		} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   989  			status[i] = TxStatusQueued
   990  		}
   991  		// implicit else: the tx may have been included into a block between
   992  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
   993  		pool.mu.RUnlock()
   994  	}
   995  	return status
   996  }
   997  
   998  // Get returns a transaction if it is contained in the pool and nil otherwise.
   999  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
  1000  	return pool.all.Get(hash)
  1001  }
  1002  
  1003  // Has returns an indicator whether txpool has a transaction cached with the
  1004  // given hash.
  1005  func (pool *TxPool) Has(hash common.Hash) bool {
  1006  	return pool.all.Get(hash) != nil
  1007  }
  1008  
  1009  // removeTx removes a single transaction from the queue, moving all subsequent
  1010  // transactions back to the future queue.
  1011  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
  1012  	// Fetch the transaction we wish to delete
  1013  	tx := pool.all.Get(hash)
  1014  	if tx == nil {
  1015  		return
  1016  	}
  1017  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
  1018  
  1019  	// Remove it from the list of known transactions
  1020  	pool.all.Remove(hash)
  1021  	if outofbound {
  1022  		pool.priced.Removed(1)
  1023  	}
  1024  	if pool.locals.contains(addr) {
  1025  		localGauge.Dec(1)
  1026  	}
  1027  	// Remove the transaction from the pending lists and reset the account nonce
  1028  	if pending := pool.pending[addr]; pending != nil {
  1029  		if removed, invalids := pending.Remove(tx); removed {
  1030  			// If no more pending transactions are left, remove the list
  1031  			if pending.Empty() {
  1032  				delete(pool.pending, addr)
  1033  			}
  1034  			// Postpone any invalidated transactions
  1035  			for _, tx := range invalids {
  1036  				// Internal shuffle shouldn't touch the lookup set.
  1037  				pool.enqueueTx(tx.Hash(), tx, false, false)
  1038  			}
  1039  			// Update the account nonce if needed
  1040  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1041  			// Reduce the pending counter
  1042  			pendingGauge.Dec(int64(1 + len(invalids)))
  1043  			return
  1044  		}
  1045  	}
  1046  	// Transaction is in the future queue
  1047  	if future := pool.queue[addr]; future != nil {
  1048  		if removed, _ := future.Remove(tx); removed {
  1049  			// Reduce the queued counter
  1050  			queuedGauge.Dec(1)
  1051  		}
  1052  		if future.Empty() {
  1053  			delete(pool.queue, addr)
  1054  			delete(pool.beats, addr)
  1055  		}
  1056  	}
  1057  }
  1058  
  1059  // requestReset requests a pool reset to the new head block.
  1060  // The returned channel is closed when the reset has occurred.
  1061  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1062  	select {
  1063  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1064  		return <-pool.reorgDoneCh
  1065  	case <-pool.reorgShutdownCh:
  1066  		return pool.reorgShutdownCh
  1067  	}
  1068  }
  1069  
  1070  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1071  // The returned channel is closed when the promotion checks have occurred.
  1072  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1073  	select {
  1074  	case pool.reqPromoteCh <- set:
  1075  		return <-pool.reorgDoneCh
  1076  	case <-pool.reorgShutdownCh:
  1077  		return pool.reorgShutdownCh
  1078  	}
  1079  }
  1080  
  1081  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1082  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
  1083  	select {
  1084  	case pool.queueTxEventCh <- tx:
  1085  	case <-pool.reorgShutdownCh:
  1086  	}
  1087  }
  1088  
  1089  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1090  // call those methods directly, but request them being run using requestReset and
  1091  // requestPromoteExecutables instead.
  1092  func (pool *TxPool) scheduleReorgLoop() {
  1093  	defer pool.wg.Done()
  1094  
  1095  	var (
  1096  		curDone       chan struct{} // non-nil while runReorg is active
  1097  		nextDone      = make(chan struct{})
  1098  		launchNextRun bool
  1099  		reset         *txpoolResetRequest
  1100  		dirtyAccounts *accountSet
  1101  		queuedEvents  = make(map[common.Address]*txSortedMap)
  1102  	)
  1103  	for {
  1104  		// Launch next background reorg if needed
  1105  		if curDone == nil && launchNextRun {
  1106  			// Run the background reorg and announcements
  1107  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
  1108  
  1109  			// Prepare everything for the next round of reorg
  1110  			curDone, nextDone = nextDone, make(chan struct{})
  1111  			launchNextRun = false
  1112  
  1113  			reset, dirtyAccounts = nil, nil
  1114  			queuedEvents = make(map[common.Address]*txSortedMap)
  1115  		}
  1116  
  1117  		select {
  1118  		case req := <-pool.reqResetCh:
  1119  			// Reset request: update head if request is already pending.
  1120  			if reset == nil {
  1121  				reset = req
  1122  			} else {
  1123  				reset.newHead = req.newHead
  1124  			}
  1125  			launchNextRun = true
  1126  			pool.reorgDoneCh <- nextDone
  1127  
  1128  		case req := <-pool.reqPromoteCh:
  1129  			// Promote request: update address set if request is already pending.
  1130  			if dirtyAccounts == nil {
  1131  				dirtyAccounts = req
  1132  			} else {
  1133  				dirtyAccounts.merge(req)
  1134  			}
  1135  			launchNextRun = true
  1136  			pool.reorgDoneCh <- nextDone
  1137  
  1138  		case tx := <-pool.queueTxEventCh:
  1139  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1140  			// request one later if they want the events sent.
  1141  			addr, _ := types.Sender(pool.signer, tx)
  1142  			if _, ok := queuedEvents[addr]; !ok {
  1143  				queuedEvents[addr] = newTxSortedMap()
  1144  			}
  1145  			queuedEvents[addr].Put(tx)
  1146  
  1147  		case <-curDone:
  1148  			curDone = nil
  1149  
  1150  		case <-pool.reorgShutdownCh:
  1151  			// Wait for current run to finish.
  1152  			if curDone != nil {
  1153  				<-curDone
  1154  			}
  1155  			close(nextDone)
  1156  			return
  1157  		}
  1158  	}
  1159  }
  1160  
  1161  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1162  func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1163  	defer close(done)
  1164  
  1165  	var promoteAddrs []common.Address
  1166  	if dirtyAccounts != nil && reset == nil {
  1167  		// Only dirty accounts need to be promoted, unless we're resetting.
  1168  		// For resets, all addresses in the tx queue will be promoted and
  1169  		// the flatten operation can be avoided.
  1170  		promoteAddrs = dirtyAccounts.flatten()
  1171  	}
  1172  	pool.mu.Lock()
  1173  	if reset != nil {
  1174  		// Reset from the old head to the new, rescheduling any reorged transactions
  1175  		pool.reset(reset.oldHead, reset.newHead)
  1176  
  1177  		// Nonces were reset, discard any events that became stale
  1178  		for addr := range events {
  1179  			events[addr].Forward(pool.pendingNonces.get(addr))
  1180  			if events[addr].Len() == 0 {
  1181  				delete(events, addr)
  1182  			}
  1183  		}
  1184  		// Reset needs promote for all addresses
  1185  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1186  		for addr := range pool.queue {
  1187  			promoteAddrs = append(promoteAddrs, addr)
  1188  		}
  1189  	}
  1190  	// Check for pending transactions for every account that sent new ones
  1191  	promoted := pool.promoteExecutables(promoteAddrs)
  1192  
  1193  	// If a new block appeared, validate the pool of pending transactions. This will
  1194  	// remove any transaction that has been included in the block or was invalidated
  1195  	// because of another transaction (e.g. higher gas price).
  1196  	if reset != nil {
  1197  		pool.demoteUnexecutables()
  1198  		if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(new(big.Int).SetUint64(reset.newHead.Time)) {
  1199  			_, baseFeeEstimate, err := dummy.CalcBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix()))
  1200  			if err == nil {
  1201  				pool.priced.SetBaseFee(baseFeeEstimate)
  1202  			}
  1203  		}
  1204  	}
  1205  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1206  	pool.truncatePending()
  1207  	pool.truncateQueue()
  1208  
  1209  	// Update all accounts to the latest known pending nonce
  1210  	for addr, list := range pool.pending {
  1211  		highestPending := list.LastElement()
  1212  		pool.pendingNonces.set(addr, highestPending.Nonce()+1)
  1213  	}
  1214  	pool.mu.Unlock()
  1215  
  1216  	if reset != nil && reset.newHead != nil {
  1217  		pool.reorgFeed.Send(NewTxPoolReorgEvent{reset.newHead})
  1218  	}
  1219  
  1220  	// Notify subsystems for newly added transactions
  1221  	for _, tx := range promoted {
  1222  		addr, _ := types.Sender(pool.signer, tx)
  1223  		if _, ok := events[addr]; !ok {
  1224  			events[addr] = newTxSortedMap()
  1225  		}
  1226  		events[addr].Put(tx)
  1227  	}
  1228  	if len(events) > 0 {
  1229  		var txs []*types.Transaction
  1230  		for _, set := range events {
  1231  			txs = append(txs, set.Flatten()...)
  1232  		}
  1233  		pool.txFeed.Send(NewTxsEvent{txs})
  1234  	}
  1235  }
  1236  
  1237  // reset retrieves the current state of the blockchain and ensures the content
  1238  // of the transaction pool is valid with regard to the chain state.
  1239  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1240  	// If we're reorging an old state, reinject all dropped transactions
  1241  	var reinject types.Transactions
  1242  
  1243  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1244  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1245  		oldNum := oldHead.Number.Uint64()
  1246  		newNum := newHead.Number.Uint64()
  1247  
  1248  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1249  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1250  		} else {
  1251  			// Reorg seems shallow enough to pull in all transactions into memory
  1252  			var discarded, included types.Transactions
  1253  			var (
  1254  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1255  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1256  			)
  1257  			if rem == nil {
  1258  				// This can happen if a setHead is performed, where we simply discard the old
  1259  				// head from the chain.
  1260  				// If that is the case, we don't have the lost transactions any more, and
  1261  				// there's nothing to add
  1262  				if newNum >= oldNum {
  1263  					// If we reorged to a same or higher number, then it's not a case of setHead
  1264  					log.Warn("Transaction pool reset with missing oldhead",
  1265  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1266  					return
  1267  				}
  1268  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1269  				log.Debug("Skipping transaction reset caused by setHead",
  1270  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1271  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1272  			} else {
  1273  				for rem.NumberU64() > add.NumberU64() {
  1274  					discarded = append(discarded, rem.Transactions()...)
  1275  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1276  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1277  						return
  1278  					}
  1279  				}
  1280  				for add.NumberU64() > rem.NumberU64() {
  1281  					included = append(included, add.Transactions()...)
  1282  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1283  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1284  						return
  1285  					}
  1286  				}
  1287  				for rem.Hash() != add.Hash() {
  1288  					discarded = append(discarded, rem.Transactions()...)
  1289  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1290  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1291  						return
  1292  					}
  1293  					included = append(included, add.Transactions()...)
  1294  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1295  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1296  						return
  1297  					}
  1298  				}
  1299  				reinject = types.TxDifference(discarded, included)
  1300  			}
  1301  		}
  1302  	}
  1303  	// Initialize the internal state to the current head
  1304  	if newHead == nil {
  1305  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1306  	}
  1307  	statedb, err := pool.chain.StateAt(newHead.Root)
  1308  	if err != nil {
  1309  		log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root)
  1310  		return
  1311  	}
  1312  	pool.currentHead = newHead
  1313  	pool.currentState = statedb
  1314  	pool.pendingNonces = newTxNoncer(statedb)
  1315  	pool.currentMaxGas = newHead.GasLimit
  1316  
  1317  	// Inject any transactions discarded due to reorgs
  1318  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1319  	senderCacher.recover(pool.signer, reinject)
  1320  	pool.addTxsLocked(reinject, false)
  1321  
  1322  	// Update all fork indicator by next pending block number.
  1323  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
  1324  	pool.istanbul = pool.chainconfig.IsIstanbul(next)
  1325  
  1326  	timestamp := new(big.Int).SetUint64(newHead.Time)
  1327  	pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp)
  1328  	pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp)
  1329  }
  1330  
  1331  // promoteExecutables moves transactions that have become processable from the
  1332  // future queue to the set of pending transactions. During this process, all
  1333  // invalidated transactions (low nonce, low balance) are deleted.
  1334  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1335  	// Track the promoted transactions to broadcast them at once
  1336  	var promoted []*types.Transaction
  1337  
  1338  	// Iterate over all accounts and promote any executable transactions
  1339  	for _, addr := range accounts {
  1340  		list := pool.queue[addr]
  1341  		if list == nil {
  1342  			continue // Just in case someone calls with a non existing account
  1343  		}
  1344  		// Drop all transactions that are deemed too old (low nonce)
  1345  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1346  		for _, tx := range forwards {
  1347  			hash := tx.Hash()
  1348  			pool.all.Remove(hash)
  1349  		}
  1350  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1351  		// Drop all transactions that are too costly (low balance or out of gas)
  1352  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1353  		for _, tx := range drops {
  1354  			hash := tx.Hash()
  1355  			pool.all.Remove(hash)
  1356  		}
  1357  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1358  		queuedNofundsMeter.Mark(int64(len(drops)))
  1359  
  1360  		// Gather all executable transactions and promote them
  1361  		readies := list.Ready(pool.pendingNonces.get(addr))
  1362  		for _, tx := range readies {
  1363  			hash := tx.Hash()
  1364  			if pool.promoteTx(addr, hash, tx) {
  1365  				promoted = append(promoted, tx)
  1366  			}
  1367  		}
  1368  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1369  		queuedGauge.Dec(int64(len(readies)))
  1370  
  1371  		// Drop all transactions over the allowed limit
  1372  		var caps types.Transactions
  1373  		if !pool.locals.contains(addr) {
  1374  			caps = list.Cap(int(pool.config.AccountQueue))
  1375  			for _, tx := range caps {
  1376  				hash := tx.Hash()
  1377  				pool.all.Remove(hash)
  1378  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1379  			}
  1380  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1381  		}
  1382  		// Mark all the items dropped as removed
  1383  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1384  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1385  		if pool.locals.contains(addr) {
  1386  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1387  		}
  1388  		// Delete the entire queue entry if it became empty.
  1389  		if list.Empty() {
  1390  			delete(pool.queue, addr)
  1391  			delete(pool.beats, addr)
  1392  		}
  1393  	}
  1394  	return promoted
  1395  }
  1396  
  1397  // truncatePending removes transactions from the pending queue if the pool is above the
  1398  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1399  // equal number for all for accounts with many pending transactions.
  1400  func (pool *TxPool) truncatePending() {
  1401  	pending := uint64(0)
  1402  	for _, list := range pool.pending {
  1403  		pending += uint64(list.Len())
  1404  	}
  1405  	if pending <= pool.config.GlobalSlots {
  1406  		return
  1407  	}
  1408  
  1409  	pendingBeforeCap := pending
  1410  	// Assemble a spam order to penalize large transactors first
  1411  	spammers := prque.New(nil)
  1412  	for addr, list := range pool.pending {
  1413  		// Only evict transactions from high rollers
  1414  		if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
  1415  			spammers.Push(addr, int64(list.Len()))
  1416  		}
  1417  	}
  1418  	// Gradually drop transactions from offenders
  1419  	offenders := []common.Address{}
  1420  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1421  		// Retrieve the next offender if not local address
  1422  		offender, _ := spammers.Pop()
  1423  		offenders = append(offenders, offender.(common.Address))
  1424  
  1425  		// Equalize balances until all the same or below threshold
  1426  		if len(offenders) > 1 {
  1427  			// Calculate the equalization threshold for all current offenders
  1428  			threshold := pool.pending[offender.(common.Address)].Len()
  1429  
  1430  			// Iteratively reduce all offenders until below limit or threshold reached
  1431  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1432  				for i := 0; i < len(offenders)-1; i++ {
  1433  					list := pool.pending[offenders[i]]
  1434  
  1435  					caps := list.Cap(list.Len() - 1)
  1436  					for _, tx := range caps {
  1437  						// Drop the transaction from the global pools too
  1438  						hash := tx.Hash()
  1439  						pool.all.Remove(hash)
  1440  
  1441  						// Update the account nonce to the dropped transaction
  1442  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1443  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1444  					}
  1445  					pool.priced.Removed(len(caps))
  1446  					pendingGauge.Dec(int64(len(caps)))
  1447  					if pool.locals.contains(offenders[i]) {
  1448  						localGauge.Dec(int64(len(caps)))
  1449  					}
  1450  					pending--
  1451  				}
  1452  			}
  1453  		}
  1454  	}
  1455  
  1456  	// If still above threshold, reduce to limit or min allowance
  1457  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1458  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1459  			for _, addr := range offenders {
  1460  				list := pool.pending[addr]
  1461  
  1462  				caps := list.Cap(list.Len() - 1)
  1463  				for _, tx := range caps {
  1464  					// Drop the transaction from the global pools too
  1465  					hash := tx.Hash()
  1466  					pool.all.Remove(hash)
  1467  
  1468  					// Update the account nonce to the dropped transaction
  1469  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1470  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1471  				}
  1472  				pool.priced.Removed(len(caps))
  1473  				pendingGauge.Dec(int64(len(caps)))
  1474  				if pool.locals.contains(addr) {
  1475  					localGauge.Dec(int64(len(caps)))
  1476  				}
  1477  				pending--
  1478  			}
  1479  		}
  1480  	}
  1481  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1482  }
  1483  
  1484  // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit.
  1485  func (pool *TxPool) truncateQueue() {
  1486  	queued := uint64(0)
  1487  	for _, list := range pool.queue {
  1488  		queued += uint64(list.Len())
  1489  	}
  1490  	if queued <= pool.config.GlobalQueue {
  1491  		return
  1492  	}
  1493  
  1494  	// Sort all accounts with queued transactions by heartbeat
  1495  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1496  	for addr := range pool.queue {
  1497  		if !pool.locals.contains(addr) { // don't drop locals
  1498  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1499  		}
  1500  	}
  1501  	sort.Sort(addresses)
  1502  
  1503  	// Drop transactions until the total is below the limit or only locals remain
  1504  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1505  		addr := addresses[len(addresses)-1]
  1506  		list := pool.queue[addr.address]
  1507  
  1508  		addresses = addresses[:len(addresses)-1]
  1509  
  1510  		// Drop all transactions if they are less than the overflow
  1511  		if size := uint64(list.Len()); size <= drop {
  1512  			for _, tx := range list.Flatten() {
  1513  				pool.removeTx(tx.Hash(), true)
  1514  			}
  1515  			drop -= size
  1516  			queuedRateLimitMeter.Mark(int64(size))
  1517  			continue
  1518  		}
  1519  		// Otherwise drop only last few transactions
  1520  		txs := list.Flatten()
  1521  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1522  			pool.removeTx(txs[i].Hash(), true)
  1523  			drop--
  1524  			queuedRateLimitMeter.Mark(1)
  1525  		}
  1526  	}
  1527  }
  1528  
  1529  // demoteUnexecutables removes invalid and processed transactions from the pools
  1530  // executable/pending queue and any subsequent transactions that become unexecutable
  1531  // are moved back into the future queue.
  1532  //
  1533  // Note: transactions are not marked as removed in the priced list because re-heaping
  1534  // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
  1535  // to trigger a re-heap is this function
  1536  func (pool *TxPool) demoteUnexecutables() {
  1537  	// Iterate over all accounts and demote any non-executable transactions
  1538  	for addr, list := range pool.pending {
  1539  		nonce := pool.currentState.GetNonce(addr)
  1540  
  1541  		// Drop all transactions that are deemed too old (low nonce)
  1542  		olds := list.Forward(nonce)
  1543  		for _, tx := range olds {
  1544  			hash := tx.Hash()
  1545  			pool.all.Remove(hash)
  1546  			log.Trace("Removed old pending transaction", "hash", hash)
  1547  		}
  1548  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1549  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1550  		for _, tx := range drops {
  1551  			hash := tx.Hash()
  1552  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1553  			pool.all.Remove(hash)
  1554  		}
  1555  		pendingNofundsMeter.Mark(int64(len(drops)))
  1556  
  1557  		for _, tx := range invalids {
  1558  			hash := tx.Hash()
  1559  			log.Trace("Demoting pending transaction", "hash", hash)
  1560  
  1561  			// Internal shuffle shouldn't touch the lookup set.
  1562  			pool.enqueueTx(hash, tx, false, false)
  1563  		}
  1564  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1565  		if pool.locals.contains(addr) {
  1566  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1567  		}
  1568  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1569  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1570  			gapped := list.Cap(0)
  1571  			for _, tx := range gapped {
  1572  				hash := tx.Hash()
  1573  				log.Error("Demoting invalidated transaction", "hash", hash)
  1574  
  1575  				// Internal shuffle shouldn't touch the lookup set.
  1576  				pool.enqueueTx(hash, tx, false, false)
  1577  			}
  1578  			// This might happen in a reorg, so log it to the metering
  1579  			pendingGauge.Dec(int64(len(gapped)))
  1580  		}
  1581  		// Delete the entire pending entry if it became empty.
  1582  		if list.Empty() {
  1583  			delete(pool.pending, addr)
  1584  		}
  1585  	}
  1586  }
  1587  
  1588  func (pool *TxPool) startPeriodicFeeUpdate() {
  1589  	if pool.chainconfig.ApricotPhase3BlockTimestamp == nil {
  1590  		return
  1591  	}
  1592  
  1593  	// Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay
  1594  	// when starting up in ApricotPhase3 before the base fee is updated.
  1595  	if time.Now().After(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0)) {
  1596  		pool.updateBaseFee()
  1597  	}
  1598  
  1599  	pool.wg.Add(1)
  1600  	go pool.periodicBaseFeeUpdate()
  1601  }
  1602  
  1603  func (pool *TxPool) periodicBaseFeeUpdate() {
  1604  	defer pool.wg.Done()
  1605  
  1606  	// Sleep until its time to start the periodic base fee update or the tx pool is shutting down
  1607  	select {
  1608  	case <-time.After(time.Until(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0))):
  1609  	case <-pool.generalShutdownChan:
  1610  		return // Return early if shutting down
  1611  	}
  1612  
  1613  	// Update the base fee every [baseFeeUpdateInterval]
  1614  	// and shutdown when [generalShutdownChan] is closed by Stop()
  1615  	for {
  1616  		select {
  1617  		case <-time.After(baseFeeUpdateInterval):
  1618  			pool.updateBaseFee()
  1619  		case <-pool.generalShutdownChan:
  1620  			return
  1621  		}
  1622  	}
  1623  }
  1624  
  1625  func (pool *TxPool) updateBaseFee() {
  1626  	pool.mu.Lock()
  1627  	defer pool.mu.Unlock()
  1628  
  1629  	_, baseFeeEstimate, err := dummy.CalcBaseFee(pool.chainconfig, pool.currentHead, uint64(time.Now().Unix()))
  1630  	if err == nil {
  1631  		pool.priced.SetBaseFee(baseFeeEstimate)
  1632  	} else {
  1633  		log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err)
  1634  	}
  1635  }
  1636  
  1637  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1638  type addressByHeartbeat struct {
  1639  	address   common.Address
  1640  	heartbeat time.Time
  1641  }
  1642  
  1643  type addressesByHeartbeat []addressByHeartbeat
  1644  
  1645  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1646  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1647  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1648  
  1649  // accountSet is simply a set of addresses to check for existence, and a signer
  1650  // capable of deriving addresses from transactions.
  1651  type accountSet struct {
  1652  	accounts map[common.Address]struct{}
  1653  	signer   types.Signer
  1654  	cache    *[]common.Address
  1655  }
  1656  
  1657  // newAccountSet creates a new address set with an associated signer for sender
  1658  // derivations.
  1659  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1660  	as := &accountSet{
  1661  		accounts: make(map[common.Address]struct{}),
  1662  		signer:   signer,
  1663  	}
  1664  	for _, addr := range addrs {
  1665  		as.add(addr)
  1666  	}
  1667  	return as
  1668  }
  1669  
  1670  // contains checks if a given address is contained within the set.
  1671  func (as *accountSet) contains(addr common.Address) bool {
  1672  	_, exist := as.accounts[addr]
  1673  	return exist
  1674  }
  1675  
  1676  func (as *accountSet) empty() bool {
  1677  	return len(as.accounts) == 0
  1678  }
  1679  
  1680  // containsTx checks if the sender of a given tx is within the set. If the sender
  1681  // cannot be derived, this method returns false.
  1682  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1683  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1684  		return as.contains(addr)
  1685  	}
  1686  	return false
  1687  }
  1688  
  1689  // add inserts a new address into the set to track.
  1690  func (as *accountSet) add(addr common.Address) {
  1691  	as.accounts[addr] = struct{}{}
  1692  	as.cache = nil
  1693  }
  1694  
  1695  // addTx adds the sender of tx into the set.
  1696  func (as *accountSet) addTx(tx *types.Transaction) {
  1697  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1698  		as.add(addr)
  1699  	}
  1700  }
  1701  
  1702  // flatten returns the list of addresses within this set, also caching it for later
  1703  // reuse. The returned slice should not be changed!
  1704  func (as *accountSet) flatten() []common.Address {
  1705  	if as.cache == nil {
  1706  		accounts := make([]common.Address, 0, len(as.accounts))
  1707  		for account := range as.accounts {
  1708  			accounts = append(accounts, account)
  1709  		}
  1710  		as.cache = &accounts
  1711  	}
  1712  	return *as.cache
  1713  }
  1714  
  1715  // merge adds all addresses from the 'other' set into 'as'.
  1716  func (as *accountSet) merge(other *accountSet) {
  1717  	for addr := range other.accounts {
  1718  		as.accounts[addr] = struct{}{}
  1719  	}
  1720  	as.cache = nil
  1721  }
  1722  
  1723  // txLookup is used internally by TxPool to track transactions while allowing
  1724  // lookup without mutex contention.
  1725  //
  1726  // Note, although this type is properly protected against concurrent access, it
  1727  // is **not** a type that should ever be mutated or even exposed outside of the
  1728  // transaction pool, since its internal state is tightly coupled with the pools
  1729  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1730  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1731  // TxPool.mu mutex.
  1732  //
  1733  // This lookup set combines the notion of "local transactions", which is useful
  1734  // to build upper-level structure.
  1735  type txLookup struct {
  1736  	slots   int
  1737  	lock    sync.RWMutex
  1738  	locals  map[common.Hash]*types.Transaction
  1739  	remotes map[common.Hash]*types.Transaction
  1740  }
  1741  
  1742  // newTxLookup returns a new txLookup structure.
  1743  func newTxLookup() *txLookup {
  1744  	return &txLookup{
  1745  		locals:  make(map[common.Hash]*types.Transaction),
  1746  		remotes: make(map[common.Hash]*types.Transaction),
  1747  	}
  1748  }
  1749  
  1750  // Range calls f on each key and value present in the map. The callback passed
  1751  // should return the indicator whether the iteration needs to be continued.
  1752  // Callers need to specify which set (or both) to be iterated.
  1753  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
  1754  	t.lock.RLock()
  1755  	defer t.lock.RUnlock()
  1756  
  1757  	if local {
  1758  		for key, value := range t.locals {
  1759  			if !f(key, value, true) {
  1760  				return
  1761  			}
  1762  		}
  1763  	}
  1764  	if remote {
  1765  		for key, value := range t.remotes {
  1766  			if !f(key, value, false) {
  1767  				return
  1768  			}
  1769  		}
  1770  	}
  1771  }
  1772  
  1773  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1774  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1775  	t.lock.RLock()
  1776  	defer t.lock.RUnlock()
  1777  
  1778  	if tx := t.locals[hash]; tx != nil {
  1779  		return tx
  1780  	}
  1781  	return t.remotes[hash]
  1782  }
  1783  
  1784  // GetLocal returns a transaction if it exists in the lookup, or nil if not found.
  1785  func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction {
  1786  	t.lock.RLock()
  1787  	defer t.lock.RUnlock()
  1788  
  1789  	return t.locals[hash]
  1790  }
  1791  
  1792  // GetRemote returns a transaction if it exists in the lookup, or nil if not found.
  1793  func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction {
  1794  	t.lock.RLock()
  1795  	defer t.lock.RUnlock()
  1796  
  1797  	return t.remotes[hash]
  1798  }
  1799  
  1800  // Count returns the current number of transactions in the lookup.
  1801  func (t *txLookup) Count() int {
  1802  	t.lock.RLock()
  1803  	defer t.lock.RUnlock()
  1804  
  1805  	return len(t.locals) + len(t.remotes)
  1806  }
  1807  
  1808  // LocalCount returns the current number of local transactions in the lookup.
  1809  func (t *txLookup) LocalCount() int {
  1810  	t.lock.RLock()
  1811  	defer t.lock.RUnlock()
  1812  
  1813  	return len(t.locals)
  1814  }
  1815  
  1816  // RemoteCount returns the current number of remote transactions in the lookup.
  1817  func (t *txLookup) RemoteCount() int {
  1818  	t.lock.RLock()
  1819  	defer t.lock.RUnlock()
  1820  
  1821  	return len(t.remotes)
  1822  }
  1823  
  1824  // Slots returns the current number of slots used in the lookup.
  1825  func (t *txLookup) Slots() int {
  1826  	t.lock.RLock()
  1827  	defer t.lock.RUnlock()
  1828  
  1829  	return t.slots
  1830  }
  1831  
  1832  // Add adds a transaction to the lookup.
  1833  func (t *txLookup) Add(tx *types.Transaction, local bool) {
  1834  	t.lock.Lock()
  1835  	defer t.lock.Unlock()
  1836  
  1837  	t.slots += numSlots(tx)
  1838  	slotsGauge.Update(int64(t.slots))
  1839  
  1840  	if local {
  1841  		t.locals[tx.Hash()] = tx
  1842  	} else {
  1843  		t.remotes[tx.Hash()] = tx
  1844  	}
  1845  }
  1846  
  1847  // Remove removes a transaction from the lookup.
  1848  func (t *txLookup) Remove(hash common.Hash) {
  1849  	t.lock.Lock()
  1850  	defer t.lock.Unlock()
  1851  
  1852  	tx, ok := t.locals[hash]
  1853  	if !ok {
  1854  		tx, ok = t.remotes[hash]
  1855  	}
  1856  	if !ok {
  1857  		log.Error("No transaction found to be deleted", "hash", hash)
  1858  		return
  1859  	}
  1860  	t.slots -= numSlots(tx)
  1861  	slotsGauge.Update(int64(t.slots))
  1862  
  1863  	delete(t.locals, hash)
  1864  	delete(t.remotes, hash)
  1865  }
  1866  
  1867  // RemoteToLocals migrates the transactions belongs to the given locals to locals
  1868  // set. The assumption is held the locals set is thread-safe to be used.
  1869  func (t *txLookup) RemoteToLocals(locals *accountSet) int {
  1870  	t.lock.Lock()
  1871  	defer t.lock.Unlock()
  1872  
  1873  	var migrated int
  1874  	for hash, tx := range t.remotes {
  1875  		if locals.containsTx(tx) {
  1876  			t.locals[hash] = tx
  1877  			delete(t.remotes, hash)
  1878  			migrated += 1
  1879  		}
  1880  	}
  1881  	return migrated
  1882  }
  1883  
  1884  // RemotesBelowTip finds all remote transactions below the given tip threshold.
  1885  func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
  1886  	found := make(types.Transactions, 0, 128)
  1887  	t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
  1888  		if tx.GasTipCapIntCmp(threshold) < 0 {
  1889  			found = append(found, tx)
  1890  		}
  1891  		return true
  1892  	}, false, true) // Only iterate remotes
  1893  	return found
  1894  }
  1895  
  1896  // numSlots calculates the number of slots needed for a single transaction.
  1897  func numSlots(tx *types.Transaction) int {
  1898  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1899  }