github.com/ethereum/go-ethereum@v1.14.4-0.20240516095835-473ee8fc07a3/core/txpool/txpool.go (about)

     1  // Copyright 2023 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package txpool
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/core"
    27  	"github.com/ethereum/go-ethereum/core/types"
    28  	"github.com/ethereum/go-ethereum/event"
    29  	"github.com/ethereum/go-ethereum/log"
    30  	"github.com/ethereum/go-ethereum/metrics"
    31  )
    32  
    33  // TxStatus is the current status of a transaction as seen by the pool.
    34  type TxStatus uint
    35  
    36  const (
    37  	TxStatusUnknown TxStatus = iota
    38  	TxStatusQueued
    39  	TxStatusPending
    40  	TxStatusIncluded
    41  )
    42  
    43  var (
    44  	// reservationsGaugeName is the prefix of a per-subpool address reservation
    45  	// metric.
    46  	//
    47  	// This is mostly a sanity metric to ensure there's no bug that would make
    48  	// some subpool hog all the reservations due to mis-accounting.
    49  	reservationsGaugeName = "txpool/reservations"
    50  )
    51  
    52  // BlockChain defines the minimal set of methods needed to back a tx pool with
    53  // a chain. Exists to allow mocking the live chain out of tests.
    54  type BlockChain interface {
    55  	// CurrentBlock returns the current head of the chain.
    56  	CurrentBlock() *types.Header
    57  
    58  	// SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
    59  	SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
    60  }
    61  
    62  // TxPool is an aggregator for various transaction specific pools, collectively
    63  // tracking all the transactions deemed interesting by the node. Transactions
    64  // enter the pool when they are received from the network or submitted locally.
    65  // They exit the pool when they are included in the blockchain or evicted due to
    66  // resource constraints.
    67  type TxPool struct {
    68  	subpools []SubPool // List of subpools for specialized transaction handling
    69  
    70  	reservations map[common.Address]SubPool // Map with the account to pool reservations
    71  	reserveLock  sync.Mutex                 // Lock protecting the account reservations
    72  
    73  	subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
    74  	quit chan chan error         // Quit channel to tear down the head updater
    75  	term chan struct{}           // Termination channel to detect a closed pool
    76  
    77  	sync chan chan error // Testing / simulator channel to block until internal reset is done
    78  }
    79  
    80  // New creates a new transaction pool to gather, sort and filter inbound
    81  // transactions from the network.
    82  func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
    83  	// Retrieve the current head so that all subpools and this main coordinator
    84  	// pool will have the same starting state, even if the chain moves forward
    85  	// during initialization.
    86  	head := chain.CurrentBlock()
    87  
    88  	pool := &TxPool{
    89  		subpools:     subpools,
    90  		reservations: make(map[common.Address]SubPool),
    91  		quit:         make(chan chan error),
    92  		term:         make(chan struct{}),
    93  		sync:         make(chan chan error),
    94  	}
    95  	for i, subpool := range subpools {
    96  		if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
    97  			for j := i - 1; j >= 0; j-- {
    98  				subpools[j].Close()
    99  			}
   100  			return nil, err
   101  		}
   102  	}
   103  	go pool.loop(head, chain)
   104  	return pool, nil
   105  }
   106  
   107  // reserver is a method to create an address reservation callback to exclusively
   108  // assign/deassign addresses to/from subpools. This can ensure that at any point
   109  // in time, only a single subpool is able to manage an account, avoiding cross
   110  // subpool eviction issues and nonce conflicts.
   111  func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
   112  	return func(addr common.Address, reserve bool) error {
   113  		p.reserveLock.Lock()
   114  		defer p.reserveLock.Unlock()
   115  
   116  		owner, exists := p.reservations[addr]
   117  		if reserve {
   118  			// Double reservations are forbidden even from the same pool to
   119  			// avoid subtle bugs in the long term.
   120  			if exists {
   121  				if owner == subpool {
   122  					log.Error("pool attempted to reserve already-owned address", "address", addr)
   123  					return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
   124  				}
   125  				return ErrAlreadyReserved
   126  			}
   127  			p.reservations[addr] = subpool
   128  			if metrics.Enabled {
   129  				m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
   130  				metrics.GetOrRegisterGauge(m, nil).Inc(1)
   131  			}
   132  			return nil
   133  		}
   134  		// Ensure subpools only attempt to unreserve their own owned addresses,
   135  		// otherwise flag as a programming error.
   136  		if !exists {
   137  			log.Error("pool attempted to unreserve non-reserved address", "address", addr)
   138  			return errors.New("address not reserved")
   139  		}
   140  		if subpool != owner {
   141  			log.Error("pool attempted to unreserve non-owned address", "address", addr)
   142  			return errors.New("address not owned")
   143  		}
   144  		delete(p.reservations, addr)
   145  		if metrics.Enabled {
   146  			m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
   147  			metrics.GetOrRegisterGauge(m, nil).Dec(1)
   148  		}
   149  		return nil
   150  	}
   151  }
   152  
   153  // Close terminates the transaction pool and all its subpools.
   154  func (p *TxPool) Close() error {
   155  	var errs []error
   156  
   157  	// Terminate the reset loop and wait for it to finish
   158  	errc := make(chan error)
   159  	p.quit <- errc
   160  	if err := <-errc; err != nil {
   161  		errs = append(errs, err)
   162  	}
   163  	// Terminate each subpool
   164  	for _, subpool := range p.subpools {
   165  		if err := subpool.Close(); err != nil {
   166  			errs = append(errs, err)
   167  		}
   168  	}
   169  	// Unsubscribe anyone still listening for tx events
   170  	p.subs.Close()
   171  
   172  	if len(errs) > 0 {
   173  		return fmt.Errorf("subpool close errors: %v", errs)
   174  	}
   175  	return nil
   176  }
   177  
   178  // loop is the transaction pool's main event loop, waiting for and reacting to
   179  // outside blockchain events as well as for various reporting and transaction
   180  // eviction events.
   181  func (p *TxPool) loop(head *types.Header, chain BlockChain) {
   182  	// Close the termination marker when the pool stops
   183  	defer close(p.term)
   184  
   185  	// Subscribe to chain head events to trigger subpool resets
   186  	var (
   187  		newHeadCh  = make(chan core.ChainHeadEvent)
   188  		newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
   189  	)
   190  	defer newHeadSub.Unsubscribe()
   191  
   192  	// Track the previous and current head to feed to an idle reset
   193  	var (
   194  		oldHead = head
   195  		newHead = oldHead
   196  	)
   197  	// Consume chain head events and start resets when none is running
   198  	var (
   199  		resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
   200  		resetDone = make(chan *types.Header)
   201  
   202  		resetForced bool       // Whether a forced reset was requested, only used in simulator mode
   203  		resetWaiter chan error // Channel waiting on a forced reset, only used in simulator mode
   204  	)
   205  	// Notify the live reset waiter to not block if the txpool is closed.
   206  	defer func() {
   207  		if resetWaiter != nil {
   208  			resetWaiter <- errors.New("pool already terminated")
   209  			resetWaiter = nil
   210  		}
   211  	}()
   212  	var errc chan error
   213  	for errc == nil {
   214  		// Something interesting might have happened, run a reset if there is
   215  		// one needed but none is running. The resetter will run on its own
   216  		// goroutine to allow chain head events to be consumed contiguously.
   217  		if newHead != oldHead || resetForced {
   218  			// Try to inject a busy marker and start a reset if successful
   219  			select {
   220  			case resetBusy <- struct{}{}:
   221  				// Busy marker injected, start a new subpool reset
   222  				go func(oldHead, newHead *types.Header) {
   223  					for _, subpool := range p.subpools {
   224  						subpool.Reset(oldHead, newHead)
   225  					}
   226  					resetDone <- newHead
   227  				}(oldHead, newHead)
   228  
   229  				// If the reset operation was explicitly requested, consider it
   230  				// being fulfilled and drop the request marker. If it was not,
   231  				// this is a noop.
   232  				resetForced = false
   233  
   234  			default:
   235  				// Reset already running, wait until it finishes.
   236  				//
   237  				// Note, this will not drop any forced reset request. If a forced
   238  				// reset was requested, but we were busy, then when the currently
   239  				// running reset finishes, a new one will be spun up.
   240  			}
   241  		}
   242  		// Wait for the next chain head event or a previous reset finish
   243  		select {
   244  		case event := <-newHeadCh:
   245  			// Chain moved forward, store the head for later consumption
   246  			newHead = event.Block.Header()
   247  
   248  		case head := <-resetDone:
   249  			// Previous reset finished, update the old head and allow a new reset
   250  			oldHead = head
   251  			<-resetBusy
   252  
   253  			// If someone is waiting for a reset to finish, notify them, unless
   254  			// the forced op is still pending. In that case, wait another round
   255  			// of resets.
   256  			if resetWaiter != nil && !resetForced {
   257  				resetWaiter <- nil
   258  				resetWaiter = nil
   259  			}
   260  
   261  		case errc = <-p.quit:
   262  			// Termination requested, break out on the next loop round
   263  
   264  		case syncc := <-p.sync:
   265  			// Transaction pool is running inside a simulator, and we are about
   266  			// to create a new block. Request a forced sync operation to ensure
   267  			// that any running reset operation finishes to make block imports
   268  			// deterministic. On top of that, run a new reset operation to make
   269  			// transaction insertions deterministic instead of being stuck in a
   270  			// queue waiting for a reset.
   271  			resetForced = true
   272  			resetWaiter = syncc
   273  		}
   274  	}
   275  	// Notify the closer of termination (no error possible for now)
   276  	errc <- nil
   277  }
   278  
   279  // SetGasTip updates the minimum gas tip required by the transaction pool for a
   280  // new transaction, and drops all transactions below this threshold.
   281  func (p *TxPool) SetGasTip(tip *big.Int) {
   282  	for _, subpool := range p.subpools {
   283  		subpool.SetGasTip(tip)
   284  	}
   285  }
   286  
   287  // Has returns an indicator whether the pool has a transaction cached with the
   288  // given hash.
   289  func (p *TxPool) Has(hash common.Hash) bool {
   290  	for _, subpool := range p.subpools {
   291  		if subpool.Has(hash) {
   292  			return true
   293  		}
   294  	}
   295  	return false
   296  }
   297  
   298  // Get returns a transaction if it is contained in the pool, or nil otherwise.
   299  func (p *TxPool) Get(hash common.Hash) *types.Transaction {
   300  	for _, subpool := range p.subpools {
   301  		if tx := subpool.Get(hash); tx != nil {
   302  			return tx
   303  		}
   304  	}
   305  	return nil
   306  }
   307  
   308  // Add enqueues a batch of transactions into the pool if they are valid. Due
   309  // to the large transaction churn, add may postpone fully integrating the tx
   310  // to a later point to batch multiple ones together.
   311  func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
   312  	// Split the input transactions between the subpools. It shouldn't really
   313  	// happen that we receive merged batches, but better graceful than strange
   314  	// errors.
   315  	//
   316  	// We also need to track how the transactions were split across the subpools,
   317  	// so we can piece back the returned errors into the original order.
   318  	txsets := make([][]*types.Transaction, len(p.subpools))
   319  	splits := make([]int, len(txs))
   320  
   321  	for i, tx := range txs {
   322  		// Mark this transaction belonging to no-subpool
   323  		splits[i] = -1
   324  
   325  		// Try to find a subpool that accepts the transaction
   326  		for j, subpool := range p.subpools {
   327  			if subpool.Filter(tx) {
   328  				txsets[j] = append(txsets[j], tx)
   329  				splits[i] = j
   330  				break
   331  			}
   332  		}
   333  	}
   334  	// Add the transactions split apart to the individual subpools and piece
   335  	// back the errors into the original sort order.
   336  	errsets := make([][]error, len(p.subpools))
   337  	for i := 0; i < len(p.subpools); i++ {
   338  		errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
   339  	}
   340  	errs := make([]error, len(txs))
   341  	for i, split := range splits {
   342  		// If the transaction was rejected by all subpools, mark it unsupported
   343  		if split == -1 {
   344  			errs[i] = core.ErrTxTypeNotSupported
   345  			continue
   346  		}
   347  		// Find which subpool handled it and pull in the corresponding error
   348  		errs[i] = errsets[split][0]
   349  		errsets[split] = errsets[split][1:]
   350  	}
   351  	return errs
   352  }
   353  
   354  // Pending retrieves all currently processable transactions, grouped by origin
   355  // account and sorted by nonce.
   356  //
   357  // The transactions can also be pre-filtered by the dynamic fee components to
   358  // reduce allocations and load on downstream subsystems.
   359  func (p *TxPool) Pending(filter PendingFilter) map[common.Address][]*LazyTransaction {
   360  	txs := make(map[common.Address][]*LazyTransaction)
   361  	for _, subpool := range p.subpools {
   362  		for addr, set := range subpool.Pending(filter) {
   363  			txs[addr] = set
   364  		}
   365  	}
   366  	return txs
   367  }
   368  
   369  // SubscribeTransactions registers a subscription for new transaction events,
   370  // supporting feeding only newly seen or also resurrected transactions.
   371  func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
   372  	subs := make([]event.Subscription, len(p.subpools))
   373  	for i, subpool := range p.subpools {
   374  		subs[i] = subpool.SubscribeTransactions(ch, reorgs)
   375  	}
   376  	return p.subs.Track(event.JoinSubscriptions(subs...))
   377  }
   378  
   379  // Nonce returns the next nonce of an account, with all transactions executable
   380  // by the pool already applied on top.
   381  func (p *TxPool) Nonce(addr common.Address) uint64 {
   382  	// Since (for now) accounts are unique to subpools, only one pool will have
   383  	// (at max) a non-state nonce. To avoid stateful lookups, just return the
   384  	// highest nonce for now.
   385  	var nonce uint64
   386  	for _, subpool := range p.subpools {
   387  		if next := subpool.Nonce(addr); nonce < next {
   388  			nonce = next
   389  		}
   390  	}
   391  	return nonce
   392  }
   393  
   394  // Stats retrieves the current pool stats, namely the number of pending and the
   395  // number of queued (non-executable) transactions.
   396  func (p *TxPool) Stats() (int, int) {
   397  	var runnable, blocked int
   398  	for _, subpool := range p.subpools {
   399  		run, block := subpool.Stats()
   400  
   401  		runnable += run
   402  		blocked += block
   403  	}
   404  	return runnable, blocked
   405  }
   406  
   407  // Content retrieves the data content of the transaction pool, returning all the
   408  // pending as well as queued transactions, grouped by account and sorted by nonce.
   409  func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
   410  	var (
   411  		runnable = make(map[common.Address][]*types.Transaction)
   412  		blocked  = make(map[common.Address][]*types.Transaction)
   413  	)
   414  	for _, subpool := range p.subpools {
   415  		run, block := subpool.Content()
   416  
   417  		for addr, txs := range run {
   418  			runnable[addr] = txs
   419  		}
   420  		for addr, txs := range block {
   421  			blocked[addr] = txs
   422  		}
   423  	}
   424  	return runnable, blocked
   425  }
   426  
   427  // ContentFrom retrieves the data content of the transaction pool, returning the
   428  // pending as well as queued transactions of this address, grouped by nonce.
   429  func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
   430  	for _, subpool := range p.subpools {
   431  		run, block := subpool.ContentFrom(addr)
   432  		if len(run) != 0 || len(block) != 0 {
   433  			return run, block
   434  		}
   435  	}
   436  	return []*types.Transaction{}, []*types.Transaction{}
   437  }
   438  
   439  // Locals retrieves the accounts currently considered local by the pool.
   440  func (p *TxPool) Locals() []common.Address {
   441  	// Retrieve the locals from each subpool and deduplicate them
   442  	locals := make(map[common.Address]struct{})
   443  	for _, subpool := range p.subpools {
   444  		for _, local := range subpool.Locals() {
   445  			locals[local] = struct{}{}
   446  		}
   447  	}
   448  	// Flatten and return the deduplicated local set
   449  	flat := make([]common.Address, 0, len(locals))
   450  	for local := range locals {
   451  		flat = append(flat, local)
   452  	}
   453  	return flat
   454  }
   455  
   456  // Status returns the known status (unknown/pending/queued) of a transaction
   457  // identified by its hash.
   458  func (p *TxPool) Status(hash common.Hash) TxStatus {
   459  	for _, subpool := range p.subpools {
   460  		if status := subpool.Status(hash); status != TxStatusUnknown {
   461  			return status
   462  		}
   463  	}
   464  	return TxStatusUnknown
   465  }
   466  
   467  // Sync is a helper method for unit tests or simulator runs where the chain events
   468  // are arriving in quick succession, without any time in between them to run the
   469  // internal background reset operations. This method will run an explicit reset
   470  // operation to ensure the pool stabilises, thus avoiding flakey behavior.
   471  //
   472  // Note, do not use this in production / live code. In live code, the pool is
   473  // meant to reset on a separate thread to avoid DoS vectors.
   474  func (p *TxPool) Sync() error {
   475  	sync := make(chan error)
   476  	select {
   477  	case p.sync <- sync:
   478  		return <-sync
   479  	case <-p.term:
   480  		return errors.New("pool already terminated")
   481  	}
   482  }