github.com/ethereum/go-ethereum@v1.16.1/core/txpool/txpool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package txpool
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/core"
    27  	"github.com/ethereum/go-ethereum/core/state"
    28  	"github.com/ethereum/go-ethereum/core/types"
    29  	"github.com/ethereum/go-ethereum/event"
    30  	"github.com/ethereum/go-ethereum/log"
    31  	"github.com/ethereum/go-ethereum/params"
    32  )
    33  
    34  // TxStatus is the current status of a transaction as seen by the pool.
    35  type TxStatus uint
    36  
    37  const (
    38  	TxStatusUnknown TxStatus = iota
    39  	TxStatusQueued
    40  	TxStatusPending
    41  	TxStatusIncluded
    42  )
    43  
    44  // BlockChain defines the minimal set of methods needed to back a tx pool with
    45  // a chain. Exists to allow mocking the live chain out of tests.
    46  type BlockChain interface {
    47  	// Config retrieves the chain's fork configuration.
    48  	Config() *params.ChainConfig
    49  
    50  	// CurrentBlock returns the current head of the chain.
    51  	CurrentBlock() *types.Header
    52  
    53  	// SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
    54  	SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
    55  
    56  	// StateAt returns a state database for a given root hash (generally the head).
    57  	StateAt(root common.Hash) (*state.StateDB, error)
    58  }
    59  
    60  // TxPool is an aggregator for various transaction specific pools, collectively
    61  // tracking all the transactions deemed interesting by the node. Transactions
    62  // enter the pool when they are received from the network or submitted locally.
    63  // They exit the pool when they are included in the blockchain or evicted due to
    64  // resource constraints.
    65  type TxPool struct {
    66  	subpools []SubPool // List of subpools for specialized transaction handling
    67  	chain    BlockChain
    68  	signer   types.Signer
    69  
    70  	stateLock sync.RWMutex   // The lock for protecting state instance
    71  	state     *state.StateDB // Current state at the blockchain head
    72  
    73  	subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
    74  	quit chan chan error         // Quit channel to tear down the head updater
    75  	term chan struct{}           // Termination channel to detect a closed pool
    76  
    77  	sync chan chan error // Testing / simulator channel to block until internal reset is done
    78  }
    79  
    80  // New creates a new transaction pool to gather, sort and filter inbound
    81  // transactions from the network.
    82  func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
    83  	// Retrieve the current head so that all subpools and this main coordinator
    84  	// pool will have the same starting state, even if the chain moves forward
    85  	// during initialization.
    86  	head := chain.CurrentBlock()
    87  
    88  	// Initialize the state with head block, or fallback to empty one in
    89  	// case the head state is not available (might occur when node is not
    90  	// fully synced).
    91  	statedb, err := chain.StateAt(head.Root)
    92  	if err != nil {
    93  		statedb, err = chain.StateAt(types.EmptyRootHash)
    94  	}
    95  	if err != nil {
    96  		return nil, err
    97  	}
    98  	pool := &TxPool{
    99  		subpools: subpools,
   100  		chain:    chain,
   101  		signer:   types.LatestSigner(chain.Config()),
   102  		state:    statedb,
   103  		quit:     make(chan chan error),
   104  		term:     make(chan struct{}),
   105  		sync:     make(chan chan error),
   106  	}
   107  	reserver := NewReservationTracker()
   108  	for i, subpool := range subpools {
   109  		if err := subpool.Init(gasTip, head, reserver.NewHandle(i)); err != nil {
   110  			for j := i - 1; j >= 0; j-- {
   111  				subpools[j].Close()
   112  			}
   113  			return nil, err
   114  		}
   115  	}
   116  	go pool.loop(head)
   117  	return pool, nil
   118  }
   119  
   120  // Close terminates the transaction pool and all its subpools.
   121  func (p *TxPool) Close() error {
   122  	var errs []error
   123  
   124  	// Terminate the reset loop and wait for it to finish
   125  	errc := make(chan error)
   126  	p.quit <- errc
   127  	if err := <-errc; err != nil {
   128  		errs = append(errs, err)
   129  	}
   130  	// Terminate each subpool
   131  	for _, subpool := range p.subpools {
   132  		if err := subpool.Close(); err != nil {
   133  			errs = append(errs, err)
   134  		}
   135  	}
   136  	// Unsubscribe anyone still listening for tx events
   137  	p.subs.Close()
   138  
   139  	if len(errs) > 0 {
   140  		return fmt.Errorf("subpool close errors: %v", errs)
   141  	}
   142  	return nil
   143  }
   144  
   145  // loop is the transaction pool's main event loop, waiting for and reacting to
   146  // outside blockchain events as well as for various reporting and transaction
   147  // eviction events.
   148  func (p *TxPool) loop(head *types.Header) {
   149  	// Close the termination marker when the pool stops
   150  	defer close(p.term)
   151  
   152  	// Subscribe to chain head events to trigger subpool resets
   153  	var (
   154  		newHeadCh  = make(chan core.ChainHeadEvent)
   155  		newHeadSub = p.chain.SubscribeChainHeadEvent(newHeadCh)
   156  	)
   157  	defer newHeadSub.Unsubscribe()
   158  
   159  	// Track the previous and current head to feed to an idle reset
   160  	var (
   161  		oldHead = head
   162  		newHead = oldHead
   163  	)
   164  	// Consume chain head events and start resets when none is running
   165  	var (
   166  		resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
   167  		resetDone = make(chan *types.Header)
   168  
   169  		resetForced bool       // Whether a forced reset was requested, only used in simulator mode
   170  		resetWaiter chan error // Channel waiting on a forced reset, only used in simulator mode
   171  	)
   172  	// Notify the live reset waiter to not block if the txpool is closed.
   173  	defer func() {
   174  		if resetWaiter != nil {
   175  			resetWaiter <- errors.New("pool already terminated")
   176  			resetWaiter = nil
   177  		}
   178  	}()
   179  	var errc chan error
   180  	for errc == nil {
   181  		// Something interesting might have happened, run a reset if there is
   182  		// one needed but none is running. The resetter will run on its own
   183  		// goroutine to allow chain head events to be consumed contiguously.
   184  		if newHead != oldHead || resetForced {
   185  			// Try to inject a busy marker and start a reset if successful
   186  			select {
   187  			case resetBusy <- struct{}{}:
   188  				// Updates the statedb with the new chain head. The head state may be
   189  				// unavailable if the initial state sync has not yet completed.
   190  				if statedb, err := p.chain.StateAt(newHead.Root); err != nil {
   191  					log.Error("Failed to reset txpool state", "err", err)
   192  				} else {
   193  					p.stateLock.Lock()
   194  					p.state = statedb
   195  					p.stateLock.Unlock()
   196  				}
   197  
   198  				// Busy marker injected, start a new subpool reset
   199  				go func(oldHead, newHead *types.Header) {
   200  					for _, subpool := range p.subpools {
   201  						subpool.Reset(oldHead, newHead)
   202  					}
   203  					select {
   204  					case resetDone <- newHead:
   205  					case <-p.term:
   206  					}
   207  				}(oldHead, newHead)
   208  
   209  				// If the reset operation was explicitly requested, consider it
   210  				// being fulfilled and drop the request marker. If it was not,
   211  				// this is a noop.
   212  				resetForced = false
   213  
   214  			default:
   215  				// Reset already running, wait until it finishes.
   216  				//
   217  				// Note, this will not drop any forced reset request. If a forced
   218  				// reset was requested, but we were busy, then when the currently
   219  				// running reset finishes, a new one will be spun up.
   220  			}
   221  		}
   222  		// Wait for the next chain head event or a previous reset finish
   223  		select {
   224  		case event := <-newHeadCh:
   225  			// Chain moved forward, store the head for later consumption
   226  			newHead = event.Header
   227  
   228  		case head := <-resetDone:
   229  			// Previous reset finished, update the old head and allow a new reset
   230  			oldHead = head
   231  			<-resetBusy
   232  
   233  			// If someone is waiting for a reset to finish, notify them, unless
   234  			// the forced op is still pending. In that case, wait another round
   235  			// of resets.
   236  			if resetWaiter != nil && !resetForced {
   237  				resetWaiter <- nil
   238  				resetWaiter = nil
   239  			}
   240  
   241  		case errc = <-p.quit:
   242  			// Termination requested, break out on the next loop round
   243  
   244  		case syncc := <-p.sync:
   245  			// Transaction pool is running inside a simulator, and we are about
   246  			// to create a new block. Request a forced sync operation to ensure
   247  			// that any running reset operation finishes to make block imports
   248  			// deterministic. On top of that, run a new reset operation to make
   249  			// transaction insertions deterministic instead of being stuck in a
   250  			// queue waiting for a reset.
   251  			resetForced = true
   252  			resetWaiter = syncc
   253  		}
   254  	}
   255  	// Notify the closer of termination (no error possible for now)
   256  	errc <- nil
   257  }
   258  
   259  // SetGasTip updates the minimum gas tip required by the transaction pool for a
   260  // new transaction, and drops all transactions below this threshold.
   261  func (p *TxPool) SetGasTip(tip *big.Int) {
   262  	for _, subpool := range p.subpools {
   263  		subpool.SetGasTip(tip)
   264  	}
   265  }
   266  
   267  // Has returns an indicator whether the pool has a transaction cached with the
   268  // given hash.
   269  func (p *TxPool) Has(hash common.Hash) bool {
   270  	for _, subpool := range p.subpools {
   271  		if subpool.Has(hash) {
   272  			return true
   273  		}
   274  	}
   275  	return false
   276  }
   277  
   278  // Get returns a transaction if it is contained in the pool, or nil otherwise.
   279  func (p *TxPool) Get(hash common.Hash) *types.Transaction {
   280  	for _, subpool := range p.subpools {
   281  		if tx := subpool.Get(hash); tx != nil {
   282  			return tx
   283  		}
   284  	}
   285  	return nil
   286  }
   287  
   288  // GetRLP returns a RLP-encoded transaction if it is contained in the pool.
   289  func (p *TxPool) GetRLP(hash common.Hash) []byte {
   290  	for _, subpool := range p.subpools {
   291  		encoded := subpool.GetRLP(hash)
   292  		if len(encoded) != 0 {
   293  			return encoded
   294  		}
   295  	}
   296  	return nil
   297  }
   298  
   299  // GetMetadata returns the transaction type and transaction size with the given
   300  // hash.
   301  func (p *TxPool) GetMetadata(hash common.Hash) *TxMetadata {
   302  	for _, subpool := range p.subpools {
   303  		if meta := subpool.GetMetadata(hash); meta != nil {
   304  			return meta
   305  		}
   306  	}
   307  	return nil
   308  }
   309  
   310  // Add enqueues a batch of transactions into the pool if they are valid. Due
   311  // to the large transaction churn, add may postpone fully integrating the tx
   312  // to a later point to batch multiple ones together.
   313  //
   314  // Note, if sync is set the method will block until all internal maintenance
   315  // related to the add is finished. Only use this during tests for determinism.
   316  func (p *TxPool) Add(txs []*types.Transaction, sync bool) []error {
   317  	// Split the input transactions between the subpools. It shouldn't really
   318  	// happen that we receive merged batches, but better graceful than strange
   319  	// errors.
   320  	//
   321  	// We also need to track how the transactions were split across the subpools,
   322  	// so we can piece back the returned errors into the original order.
   323  	txsets := make([][]*types.Transaction, len(p.subpools))
   324  	splits := make([]int, len(txs))
   325  
   326  	for i, tx := range txs {
   327  		// Mark this transaction belonging to no-subpool
   328  		splits[i] = -1
   329  
   330  		// Try to find a subpool that accepts the transaction
   331  		for j, subpool := range p.subpools {
   332  			if subpool.Filter(tx) {
   333  				txsets[j] = append(txsets[j], tx)
   334  				splits[i] = j
   335  				break
   336  			}
   337  		}
   338  	}
   339  	// Add the transactions split apart to the individual subpools and piece
   340  	// back the errors into the original sort order.
   341  	errsets := make([][]error, len(p.subpools))
   342  	for i := 0; i < len(p.subpools); i++ {
   343  		errsets[i] = p.subpools[i].Add(txsets[i], sync)
   344  	}
   345  	errs := make([]error, len(txs))
   346  	for i, split := range splits {
   347  		// If the transaction was rejected by all subpools, mark it unsupported
   348  		if split == -1 {
   349  			errs[i] = fmt.Errorf("%w: received type %d", core.ErrTxTypeNotSupported, txs[i].Type())
   350  			continue
   351  		}
   352  		// Find which subpool handled it and pull in the corresponding error
   353  		errs[i] = errsets[split][0]
   354  		errsets[split] = errsets[split][1:]
   355  	}
   356  	return errs
   357  }
   358  
   359  // Pending retrieves all currently processable transactions, grouped by origin
   360  // account and sorted by nonce.
   361  //
   362  // The transactions can also be pre-filtered by the dynamic fee components to
   363  // reduce allocations and load on downstream subsystems.
   364  func (p *TxPool) Pending(filter PendingFilter) map[common.Address][]*LazyTransaction {
   365  	txs := make(map[common.Address][]*LazyTransaction)
   366  	for _, subpool := range p.subpools {
   367  		for addr, set := range subpool.Pending(filter) {
   368  			txs[addr] = set
   369  		}
   370  	}
   371  	return txs
   372  }
   373  
   374  // SubscribeTransactions registers a subscription for new transaction events,
   375  // supporting feeding only newly seen or also resurrected transactions.
   376  func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
   377  	subs := make([]event.Subscription, len(p.subpools))
   378  	for i, subpool := range p.subpools {
   379  		subs[i] = subpool.SubscribeTransactions(ch, reorgs)
   380  	}
   381  	return p.subs.Track(event.JoinSubscriptions(subs...))
   382  }
   383  
   384  // PoolNonce returns the next nonce of an account, with all transactions executable
   385  // by the pool already applied on top.
   386  func (p *TxPool) PoolNonce(addr common.Address) uint64 {
   387  	// Since (for now) accounts are unique to subpools, only one pool will have
   388  	// (at max) a non-state nonce. To avoid stateful lookups, just return the
   389  	// highest nonce for now.
   390  	var nonce uint64
   391  	for _, subpool := range p.subpools {
   392  		if next := subpool.Nonce(addr); nonce < next {
   393  			nonce = next
   394  		}
   395  	}
   396  	return nonce
   397  }
   398  
   399  // Nonce returns the next nonce of an account at the current chain head. Unlike
   400  // PoolNonce, this function does not account for pending executable transactions.
   401  func (p *TxPool) Nonce(addr common.Address) uint64 {
   402  	p.stateLock.RLock()
   403  	defer p.stateLock.RUnlock()
   404  
   405  	return p.state.GetNonce(addr)
   406  }
   407  
   408  // Stats retrieves the current pool stats, namely the number of pending and the
   409  // number of queued (non-executable) transactions.
   410  func (p *TxPool) Stats() (int, int) {
   411  	var runnable, blocked int
   412  	for _, subpool := range p.subpools {
   413  		run, block := subpool.Stats()
   414  
   415  		runnable += run
   416  		blocked += block
   417  	}
   418  	return runnable, blocked
   419  }
   420  
   421  // Content retrieves the data content of the transaction pool, returning all the
   422  // pending as well as queued transactions, grouped by account and sorted by nonce.
   423  func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
   424  	var (
   425  		runnable = make(map[common.Address][]*types.Transaction)
   426  		blocked  = make(map[common.Address][]*types.Transaction)
   427  	)
   428  	for _, subpool := range p.subpools {
   429  		run, block := subpool.Content()
   430  
   431  		for addr, txs := range run {
   432  			runnable[addr] = txs
   433  		}
   434  		for addr, txs := range block {
   435  			blocked[addr] = txs
   436  		}
   437  	}
   438  	return runnable, blocked
   439  }
   440  
   441  // ContentFrom retrieves the data content of the transaction pool, returning the
   442  // pending as well as queued transactions of this address, grouped by nonce.
   443  func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
   444  	for _, subpool := range p.subpools {
   445  		run, block := subpool.ContentFrom(addr)
   446  		if len(run) != 0 || len(block) != 0 {
   447  			return run, block
   448  		}
   449  	}
   450  	return []*types.Transaction{}, []*types.Transaction{}
   451  }
   452  
   453  // Status returns the known status (unknown/pending/queued) of a transaction
   454  // identified by its hash.
   455  func (p *TxPool) Status(hash common.Hash) TxStatus {
   456  	for _, subpool := range p.subpools {
   457  		if status := subpool.Status(hash); status != TxStatusUnknown {
   458  			return status
   459  		}
   460  	}
   461  	return TxStatusUnknown
   462  }
   463  
   464  // Sync is a helper method for unit tests or simulator runs where the chain events
   465  // are arriving in quick succession, without any time in between them to run the
   466  // internal background reset operations. This method will run an explicit reset
   467  // operation to ensure the pool stabilises, thus avoiding flakey behavior.
   468  //
   469  // Note, this method is only used for testing and is susceptible to DoS vectors.
   470  // In production code, the pool is meant to reset on a separate thread.
   471  func (p *TxPool) Sync() error {
   472  	sync := make(chan error)
   473  	select {
   474  	case p.sync <- sync:
   475  		return <-sync
   476  	case <-p.term:
   477  		return errors.New("pool already terminated")
   478  	}
   479  }
   480  
   481  // Clear removes all tracked txs from the subpools.
   482  //
   483  // Note, this method invokes Sync() and is only used for testing, because it is
   484  // susceptible to DoS vectors. In production code, the pool is meant to reset on
   485  // a separate thread.
   486  func (p *TxPool) Clear() {
   487  	// Invoke Sync to ensure that txs pending addition don't get added to the pool after
   488  	// the subpools are subsequently cleared
   489  	p.Sync()
   490  	for _, subpool := range p.subpools {
   491  		subpool.Clear()
   492  	}
   493  }