github.com/theQRL/go-zond@v0.2.1/core/txpool/txpool.go (about)

     1  // Copyright 2023 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package txpool
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  
    25  	"github.com/theQRL/go-zond/common"
    26  	"github.com/theQRL/go-zond/core"
    27  	"github.com/theQRL/go-zond/core/types"
    28  	"github.com/theQRL/go-zond/event"
    29  	"github.com/theQRL/go-zond/log"
    30  	"github.com/theQRL/go-zond/metrics"
    31  )
    32  
    33  // TxStatus is the current status of a transaction as seen by the pool.
    34  type TxStatus uint
    35  
    36  const (
    37  	TxStatusUnknown TxStatus = iota
    38  	TxStatusQueued
    39  	TxStatusPending
    40  	TxStatusIncluded
    41  )
    42  
    43  var (
    44  	// reservationsGaugeName is the prefix of a per-subpool address reservation
    45  	// metric.
    46  	//
    47  	// This is mostly a sanity metric to ensure there's no bug that would make
    48  	// some subpool hog all the reservations due to mis-accounting.
    49  	reservationsGaugeName = "txpool/reservations"
    50  )
    51  
    52  // BlockChain defines the minimal set of methods needed to back a tx pool with
    53  // a chain. Exists to allow mocking the live chain out of tests.
    54  type BlockChain interface {
    55  	// CurrentBlock returns the current head of the chain.
    56  	CurrentBlock() *types.Header
    57  
    58  	// SubscribeChainHeadEvent subscribes to new blocks being added to the chain.
    59  	SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
    60  }
    61  
    62  // TxPool is an aggregator for various transaction specific pools, collectively
    63  // tracking all the transactions deemed interesting by the node. Transactions
    64  // enter the pool when they are received from the network or submitted locally.
    65  // They exit the pool when they are included in the blockchain or evicted due to
    66  // resource constraints.
    67  type TxPool struct {
    68  	subpools []SubPool // List of subpools for specialized transaction handling
    69  
    70  	reservations map[common.Address]SubPool // Map with the account to pool reservations
    71  	reserveLock  sync.Mutex                 // Lock protecting the account reservations
    72  
    73  	subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
    74  	quit chan chan error         // Quit channel to tear down the head updater
    75  	term chan struct{}           // Termination channel to detect a closed pool
    76  
    77  	sync chan chan error // Testing / simulator channel to block until internal reset is done
    78  }
    79  
    80  // New creates a new transaction pool to gather, sort and filter inbound
    81  // transactions from the network.
    82  func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) {
    83  	// Retrieve the current head so that all subpools and this main coordinator
    84  	// pool will have the same starting state, even if the chain moves forward
    85  	// during initialization.
    86  	head := chain.CurrentBlock()
    87  
    88  	pool := &TxPool{
    89  		subpools:     subpools,
    90  		reservations: make(map[common.Address]SubPool),
    91  		quit:         make(chan chan error),
    92  		term:         make(chan struct{}),
    93  		sync:         make(chan chan error),
    94  	}
    95  	for i, subpool := range subpools {
    96  		if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
    97  			for j := i - 1; j >= 0; j-- {
    98  				subpools[j].Close()
    99  			}
   100  			return nil, err
   101  		}
   102  	}
   103  	go pool.loop(head, chain)
   104  	return pool, nil
   105  }
   106  
   107  // reserver is a method to create an address reservation callback to exclusively
   108  // assign/deassign addresses to/from subpools. This can ensure that at any point
   109  // in time, only a single subpool is able to manage an account, avoiding cross
   110  // subpool eviction issues and nonce conflicts.
   111  func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
   112  	return func(addr common.Address, reserve bool) error {
   113  		p.reserveLock.Lock()
   114  		defer p.reserveLock.Unlock()
   115  
   116  		owner, exists := p.reservations[addr]
   117  		if reserve {
   118  			// Double reservations are forbidden even from the same pool to
   119  			// avoid subtle bugs in the long term.
   120  			if exists {
   121  				if owner == subpool {
   122  					log.Error("pool attempted to reserve already-owned address", "address", addr)
   123  					return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
   124  				}
   125  				return ErrAlreadyReserved
   126  			}
   127  			p.reservations[addr] = subpool
   128  			if metrics.Enabled {
   129  				m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
   130  				metrics.GetOrRegisterGauge(m, nil).Inc(1)
   131  			}
   132  			return nil
   133  		}
   134  		// Ensure subpools only attempt to unreserve their own owned addresses,
   135  		// otherwise flag as a programming error.
   136  		if !exists {
   137  			log.Error("pool attempted to unreserve non-reserved address", "address", addr)
   138  			return errors.New("address not reserved")
   139  		}
   140  		if subpool != owner {
   141  			log.Error("pool attempted to unreserve non-owned address", "address", addr)
   142  			return errors.New("address not owned")
   143  		}
   144  		delete(p.reservations, addr)
   145  		if metrics.Enabled {
   146  			m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
   147  			metrics.GetOrRegisterGauge(m, nil).Dec(1)
   148  		}
   149  		return nil
   150  	}
   151  }
   152  
   153  // Close terminates the transaction pool and all its subpools.
   154  func (p *TxPool) Close() error {
   155  	var errs []error
   156  
   157  	// Terminate the reset loop and wait for it to finish
   158  	errc := make(chan error)
   159  	p.quit <- errc
   160  	if err := <-errc; err != nil {
   161  		errs = append(errs, err)
   162  	}
   163  
   164  	// Terminate each subpool
   165  	for _, subpool := range p.subpools {
   166  		if err := subpool.Close(); err != nil {
   167  			errs = append(errs, err)
   168  		}
   169  	}
   170  	// Unsubscribe anyone still listening for tx events
   171  	p.subs.Close()
   172  
   173  	if len(errs) > 0 {
   174  		return fmt.Errorf("subpool close errors: %v", errs)
   175  	}
   176  	return nil
   177  }
   178  
   179  // loop is the transaction pool's main event loop, waiting for and reacting to
   180  // outside blockchain events as well as for various reporting and transaction
   181  // eviction events.
   182  func (p *TxPool) loop(head *types.Header, chain BlockChain) {
   183  	// Close the termination marker when the pool stops
   184  	defer close(p.term)
   185  
   186  	// Subscribe to chain head events to trigger subpool resets
   187  	var (
   188  		newHeadCh  = make(chan core.ChainHeadEvent)
   189  		newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
   190  	)
   191  	defer newHeadSub.Unsubscribe()
   192  
   193  	// Track the previous and current head to feed to an idle reset
   194  	var (
   195  		oldHead = head
   196  		newHead = oldHead
   197  	)
   198  	// Consume chain head events and start resets when none is running
   199  	var (
   200  		resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
   201  		resetDone = make(chan *types.Header)
   202  
   203  		resetForced bool       // Whether a forced reset was requested, only used in simulator mode
   204  		resetWaiter chan error // Channel waiting on a forced reset, only used in simulator mode
   205  	)
   206  	// Notify the live reset waiter to not block if the txpool is closed.
   207  	defer func() {
   208  		if resetWaiter != nil {
   209  			resetWaiter <- errors.New("pool already terminated")
   210  			resetWaiter = nil
   211  		}
   212  	}()
   213  	var errc chan error
   214  	for errc == nil {
   215  		// Something interesting might have happened, run a reset if there is
   216  		// one needed but none is running. The resetter will run on its own
   217  		// goroutine to allow chain head events to be consumed contiguously.
   218  		if newHead != oldHead || resetForced {
   219  			// Try to inject a busy marker and start a reset if successful
   220  			select {
   221  			case resetBusy <- struct{}{}:
   222  				// Busy marker injected, start a new subpool reset
   223  				go func(oldHead, newHead *types.Header) {
   224  					for _, subpool := range p.subpools {
   225  						subpool.Reset(oldHead, newHead)
   226  					}
   227  					resetDone <- newHead
   228  				}(oldHead, newHead)
   229  
   230  				// If the reset operation was explicitly requested, consider it
   231  				// being fulfilled and drop the request marker. If it was not,
   232  				// this is a noop.
   233  				resetForced = false
   234  			default:
   235  				// Reset already running, wait until it finishes.
   236  				//
   237  				// Note, this will not drop any forced reset request. If a forced
   238  				// reset was requested, but we were busy, then when the currently
   239  				// running reset finishes, a new one will be spun up.
   240  			}
   241  		}
   242  		// Wait for the next chain head event or a previous reset finish
   243  		select {
   244  		case event := <-newHeadCh:
   245  			// Chain moved forward, store the head for later consumption
   246  			newHead = event.Block.Header()
   247  
   248  		case head := <-resetDone:
   249  			// Previous reset finished, update the old head and allow a new reset
   250  			oldHead = head
   251  			<-resetBusy
   252  
   253  			// If someone is waiting for a reset to finish, notify them, unless
   254  			// the forced op is still pending. In that case, wait another round
   255  			// of resets.
   256  			if resetWaiter != nil && !resetForced {
   257  				resetWaiter <- nil
   258  				resetWaiter = nil
   259  			}
   260  		case errc = <-p.quit:
   261  			// Termination requested, break out on the next loop round
   262  		case syncc := <-p.sync:
   263  			// Transaction pool is running inside a simulator, and we are about
   264  			// to create a new block. Request a forced sync operation to ensure
   265  			// that any running reset operation finishes to make block imports
   266  			// deterministic. On top of that, run a new reset operation to make
   267  			// transaction insertions deterministic instead of being stuck in a
   268  			// queue waiting for a reset.
   269  			resetForced = true
   270  			resetWaiter = syncc
   271  		}
   272  	}
   273  	// Notify the closer of termination (no error possible for now)
   274  	errc <- nil
   275  }
   276  
   277  // SetGasTip updates the minimum gas tip required by the transaction pool for a
   278  // new transaction, and drops all transactions below this threshold.
   279  func (p *TxPool) SetGasTip(tip *big.Int) {
   280  	for _, subpool := range p.subpools {
   281  		subpool.SetGasTip(tip)
   282  	}
   283  }
   284  
   285  // Has returns an indicator whether the pool has a transaction cached with the
   286  // given hash.
   287  func (p *TxPool) Has(hash common.Hash) bool {
   288  	for _, subpool := range p.subpools {
   289  		if subpool.Has(hash) {
   290  			return true
   291  		}
   292  	}
   293  	return false
   294  }
   295  
   296  // Get returns a transaction if it is contained in the pool, or nil otherwise.
   297  func (p *TxPool) Get(hash common.Hash) *types.Transaction {
   298  	for _, subpool := range p.subpools {
   299  		if tx := subpool.Get(hash); tx != nil {
   300  			return tx
   301  		}
   302  	}
   303  	return nil
   304  }
   305  
   306  // Add enqueues a batch of transactions into the pool if they are valid. Due
   307  // to the large transaction churn, add may postpone fully integrating the tx
   308  // to a later point to batch multiple ones together.
   309  func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
   310  	// Split the input transactions between the subpools. It shouldn't really
   311  	// happen that we receive merged batches, but better graceful than strange
   312  	// errors.
   313  	//
   314  	// We also need to track how the transactions were split across the subpools,
   315  	// so we can piece back the returned errors into the original order.
   316  	txsets := make([][]*types.Transaction, len(p.subpools))
   317  	splits := make([]int, len(txs))
   318  
   319  	for i, tx := range txs {
   320  		// Mark this transaction belonging to no-subpool
   321  		splits[i] = -1
   322  
   323  		// Try to find a subpool that accepts the transaction
   324  		for j, subpool := range p.subpools {
   325  			if subpool.Filter(tx) {
   326  				txsets[j] = append(txsets[j], tx)
   327  				splits[i] = j
   328  				break
   329  			}
   330  		}
   331  	}
   332  	// Add the transactions split apart to the individual subpools and piece
   333  	// back the errors into the original sort order.
   334  	errsets := make([][]error, len(p.subpools))
   335  	for i := 0; i < len(p.subpools); i++ {
   336  		errsets[i] = p.subpools[i].Add(txsets[i], local, sync)
   337  	}
   338  	errs := make([]error, len(txs))
   339  	for i, split := range splits {
   340  		// If the transaction was rejected by all subpools, mark it unsupported
   341  		if split == -1 {
   342  			errs[i] = core.ErrTxTypeNotSupported
   343  			continue
   344  		}
   345  		// Find which subpool handled it and pull in the corresponding error
   346  		errs[i] = errsets[split][0]
   347  		errsets[split] = errsets[split][1:]
   348  	}
   349  	return errs
   350  }
   351  
   352  // Pending retrieves all currently processable transactions, grouped by origin
   353  // account and sorted by nonce.
   354  //
   355  // The transactions can also be pre-filtered by the dynamic fee components to
   356  // reduce allocations and load on downstream subsystems.
   357  func (p *TxPool) Pending(filter PendingFilter) map[common.Address][]*LazyTransaction {
   358  	txs := make(map[common.Address][]*LazyTransaction)
   359  	for _, subpool := range p.subpools {
   360  		for addr, set := range subpool.Pending(filter) {
   361  			txs[addr] = set
   362  		}
   363  	}
   364  	return txs
   365  }
   366  
   367  // SubscribeTransactions registers a subscription for new transaction events,
   368  // supporting feeding only newly seen or also resurrected transactions.
   369  func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
   370  	subs := make([]event.Subscription, len(p.subpools))
   371  	for i, subpool := range p.subpools {
   372  		subs[i] = subpool.SubscribeTransactions(ch)
   373  	}
   374  	return p.subs.Track(event.JoinSubscriptions(subs...))
   375  }
   376  
   377  // Nonce returns the next nonce of an account, with all transactions executable
   378  // by the pool already applied on top.
   379  func (p *TxPool) Nonce(addr common.Address) uint64 {
   380  	// Since (for now) accounts are unique to subpools, only one pool will have
   381  	// (at max) a non-state nonce. To avoid stateful lookups, just return the
   382  	// highest nonce for now.
   383  	var nonce uint64
   384  	for _, subpool := range p.subpools {
   385  		if next := subpool.Nonce(addr); nonce < next {
   386  			nonce = next
   387  		}
   388  	}
   389  	return nonce
   390  }
   391  
   392  // Stats retrieves the current pool stats, namely the number of pending and the
   393  // number of queued (non-executable) transactions.
   394  func (p *TxPool) Stats() (int, int) {
   395  	var runnable, blocked int
   396  	for _, subpool := range p.subpools {
   397  		run, block := subpool.Stats()
   398  
   399  		runnable += run
   400  		blocked += block
   401  	}
   402  	return runnable, blocked
   403  }
   404  
   405  // Content retrieves the data content of the transaction pool, returning all the
   406  // pending as well as queued transactions, grouped by account and sorted by nonce.
   407  func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
   408  	var (
   409  		runnable = make(map[common.Address][]*types.Transaction)
   410  		blocked  = make(map[common.Address][]*types.Transaction)
   411  	)
   412  	for _, subpool := range p.subpools {
   413  		run, block := subpool.Content()
   414  
   415  		for addr, txs := range run {
   416  			runnable[addr] = txs
   417  		}
   418  		for addr, txs := range block {
   419  			blocked[addr] = txs
   420  		}
   421  	}
   422  	return runnable, blocked
   423  }
   424  
   425  // ContentFrom retrieves the data content of the transaction pool, returning the
   426  // pending as well as queued transactions of this address, grouped by nonce.
   427  func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
   428  	for _, subpool := range p.subpools {
   429  		run, block := subpool.ContentFrom(addr)
   430  		if len(run) != 0 || len(block) != 0 {
   431  			return run, block
   432  		}
   433  	}
   434  	return []*types.Transaction{}, []*types.Transaction{}
   435  }
   436  
   437  // Locals retrieves the accounts currently considered local by the pool.
   438  func (p *TxPool) Locals() []common.Address {
   439  	// Retrieve the locals from each subpool and deduplicate them
   440  	locals := make(map[common.Address]struct{})
   441  	for _, subpool := range p.subpools {
   442  		for _, local := range subpool.Locals() {
   443  			locals[local] = struct{}{}
   444  		}
   445  	}
   446  	// Flatten and return the deduplicated local set
   447  	flat := make([]common.Address, 0, len(locals))
   448  	for local := range locals {
   449  		flat = append(flat, local)
   450  	}
   451  	return flat
   452  }
   453  
   454  // Status returns the known status (unknown/pending/queued) of a transaction
   455  // identified by its hash.
   456  func (p *TxPool) Status(hash common.Hash) TxStatus {
   457  	for _, subpool := range p.subpools {
   458  		if status := subpool.Status(hash); status != TxStatusUnknown {
   459  			return status
   460  		}
   461  	}
   462  	return TxStatusUnknown
   463  }
   464  
   465  // Sync is a helper method for unit tests or simulator runs where the chain events
   466  // are arriving in quick succession, without any time in between them to run the
   467  // internal background reset operations. This method will run an explicit reset
   468  // operation to ensure the pool stabilises, thus avoiding flakey behavior.
   469  //
   470  // Note, do not use this in production / live code. In live code, the pool is
   471  // meant to reset on a separate thread to avoid DoS vectors.
   472  func (p *TxPool) Sync() error {
   473  	sync := make(chan error)
   474  	select {
   475  	case p.sync <- sync:
   476  		return <-sync
   477  	case <-p.term:
   478  		return errors.New("pool already terminated")
   479  	}
   480  }