github.com/ethereum/go-ethereum@v1.16.1/core/txpool/blobpool/blobpool.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package blobpool implements the EIP-4844 blob transaction pool.
    18  package blobpool
    19  
    20  import (
    21  	"container/heap"
    22  	"errors"
    23  	"fmt"
    24  	"math"
    25  	"math/big"
    26  	"os"
    27  	"path/filepath"
    28  	"sort"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
    34  	"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
    35  	"github.com/ethereum/go-ethereum/core"
    36  	"github.com/ethereum/go-ethereum/core/state"
    37  	"github.com/ethereum/go-ethereum/core/txpool"
    38  	"github.com/ethereum/go-ethereum/core/types"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/metrics"
    42  	"github.com/ethereum/go-ethereum/params"
    43  	"github.com/ethereum/go-ethereum/rlp"
    44  	"github.com/holiman/billy"
    45  	"github.com/holiman/uint256"
    46  )
    47  
    48  const (
    49  	// blobSize is the protocol constrained byte size of a single blob in a
    50  	// transaction. There can be multiple of these embedded into a single tx.
    51  	blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
    52  
    53  	// txAvgSize is an approximate byte size of a transaction metadata to avoid
    54  	// tiny overflows causing all txs to move a shelf higher, wasting disk space.
    55  	txAvgSize = 4 * 1024
    56  
    57  	// txMaxSize is the maximum size a single transaction can have, outside
    58  	// the included blobs. Since blob transactions are pulled instead of pushed,
    59  	// and only a small metadata is kept in ram, the rest is on disk, there is
    60  	// no critical limit that should be enforced. Still, capping it to some sane
    61  	// limit can never hurt.
    62  	txMaxSize = 1024 * 1024
    63  
    64  	// maxBlobsPerTx is the maximum number of blobs that a single transaction can
    65  	// carry. We choose a smaller limit than the protocol-permitted MaxBlobsPerBlock
    66  	// in order to ensure network and txpool stability.
    67  	// Note: if you increase this, validation will fail on txMaxSize.
    68  	maxBlobsPerTx = 7
    69  
    70  	// maxTxsPerAccount is the maximum number of blob transactions admitted from
    71  	// a single account. The limit is enforced to minimize the DoS potential of
    72  	// a private tx cancelling publicly propagated blobs.
    73  	//
    74  	// Note, transactions resurrected by a reorg are also subject to this limit,
    75  	// so pushing it down too aggressively might make resurrections non-functional.
    76  	maxTxsPerAccount = 16
    77  
    78  	// pendingTransactionStore is the subfolder containing the currently queued
    79  	// blob transactions.
    80  	pendingTransactionStore = "queue"
    81  
    82  	// limboedTransactionStore is the subfolder containing the currently included
    83  	// but not yet finalized transaction blobs.
    84  	limboedTransactionStore = "limbo"
    85  )
    86  
    87  // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
    88  // schedule the blob transactions into the following blocks. Only ever add the
    89  // bare minimum needed fields to keep the size down (and thus number of entries
    90  // larger with the same memory consumption).
    91  type blobTxMeta struct {
    92  	hash    common.Hash   // Transaction hash to maintain the lookup table
    93  	vhashes []common.Hash // Blob versioned hashes to maintain the lookup table
    94  
    95  	id          uint64 // Storage ID in the pool's persistent store
    96  	storageSize uint32 // Byte size in the pool's persistent store
    97  	size        uint64 // RLP-encoded size of transaction including the attached blob
    98  
    99  	nonce      uint64       // Needed to prioritize inclusion order within an account
   100  	costCap    *uint256.Int // Needed to validate cumulative balance sufficiency
   101  	execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
   102  	execFeeCap *uint256.Int // Needed to validate replacement price bump
   103  	blobFeeCap *uint256.Int // Needed to validate replacement price bump
   104  	execGas    uint64       // Needed to check inclusion validity before reading the blob
   105  	blobGas    uint64       // Needed to check inclusion validity before reading the blob
   106  
   107  	basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
   108  	blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
   109  
   110  	evictionExecTip      *uint256.Int // Worst gas tip across all previous nonces
   111  	evictionExecFeeJumps float64      // Worst base fee (converted to fee jumps) across all previous nonces
   112  	evictionBlobFeeJumps float64      // Worse blob fee (converted to fee jumps) across all previous nonces
   113  }
   114  
   115  // newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
   116  // and assembles a helper struct to track in memory.
   117  func newBlobTxMeta(id uint64, size uint64, storageSize uint32, tx *types.Transaction) *blobTxMeta {
   118  	meta := &blobTxMeta{
   119  		hash:        tx.Hash(),
   120  		vhashes:     tx.BlobHashes(),
   121  		id:          id,
   122  		storageSize: storageSize,
   123  		size:        size,
   124  		nonce:       tx.Nonce(),
   125  		costCap:     uint256.MustFromBig(tx.Cost()),
   126  		execTipCap:  uint256.MustFromBig(tx.GasTipCap()),
   127  		execFeeCap:  uint256.MustFromBig(tx.GasFeeCap()),
   128  		blobFeeCap:  uint256.MustFromBig(tx.BlobGasFeeCap()),
   129  		execGas:     tx.Gas(),
   130  		blobGas:     tx.BlobGas(),
   131  	}
   132  	meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
   133  	meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
   134  
   135  	return meta
   136  }
   137  
   138  // BlobPool is the transaction pool dedicated to EIP-4844 blob transactions.
   139  //
   140  // Blob transactions are special snowflakes that are designed for a very specific
   141  // purpose (rollups) and are expected to adhere to that specific use case. These
   142  // behavioural expectations allow us to design a transaction pool that is more robust
   143  // (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush
   144  // attacks) than the generic tx pool. These improvements will also mean, however,
   145  // that we enforce a significantly more aggressive strategy on entering and exiting
   146  // the pool:
   147  //
   148  //   - Blob transactions are large. With the initial design aiming for 128KB blobs,
   149  //     we must ensure that these only traverse the network the absolute minimum
   150  //     number of times. Broadcasting to sqrt(peers) is out of the question, rather
   151  //     these should only ever be announced and the remote side should request it if
   152  //     it wants to.
   153  //
   154  //   - Block blob-space is limited. With blocks being capped to a few blob txs, we
   155  //     can make use of the very low expected churn rate within the pool. Notably,
   156  //     we should be able to use a persistent disk backend for the pool, solving
   157  //     the tx resend issue that plagues the generic tx pool, as long as there's no
   158  //     artificial churn (i.e. pool wars).
   159  //
   160  //   - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to
   161  //     commit to their own current state, which is independent of Ethereum mainnet
   162  //     (state, txs). This means that there's no reason for blob tx cancellation or
   163  //     replacement, apart from a potential basefee / miner tip adjustment.
   164  //
   165  //   - Replacements are expensive. Given their size, propagating a replacement
   166  //     blob transaction to an existing one should be aggressively discouraged.
   167  //     Whilst generic transactions can start at 1 Wei gas cost and require a 10%
   168  //     fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei)
   169  //     and a more aggressive bump (100%).
   170  //
   171  //   - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge
   172  //     DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate
   173  //     already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob
   174  //     txs are disallowed; c) the presence of blob transactions exclude non-blob
   175  //     transactions.
   176  //
   177  //   - Malicious cancellations are possible. Although the pool might prevent txs
   178  //     that cancel blobs, blocks might contain such transaction (malicious miner
   179  //     or flashbotter). The pool should cap the total number of blob transactions
   180  //     per account as to prevent propagating too much data before cancelling it
   181  //     via a normal transaction. It should nonetheless be high enough to support
   182  //     resurrecting reorged transactions. Perhaps 4-16.
   183  //
   184  //   - Local txs are meaningless. Mining pools historically used local transactions
   185  //     for payouts or for backdoor deals. With 1559 in place, the basefee usually
   186  //     dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs
   187  //     retain the 1559 2D gas pricing (and introduce on top a dynamic blob gas fee),
   188  //     so locality is moot. With a disk backed blob pool avoiding the resend issue,
   189  //     there's also no need to save own transactions for later.
   190  //
   191  //   - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow
   192  //     blob txs containing 0 blobs. In practice, admitting such txs into the pool
   193  //     breaks the low-churn invariant as blob constraints don't apply anymore. Even
   194  //     though we could accept blocks containing such txs, a reorg would require moving
   195  //     them back into the blob pool, which can break invariants.
   196  //
   197  //   - Dropping blobs needs delay. When normal transactions are included, they
   198  //     are immediately evicted from the pool since they are contained in the
   199  //     including block. Blobs however are not included in the execution chain,
   200  //     so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs,
   201  //     blobs are retained on disk until they are finalised.
   202  //
   203  //   - Blobs can arrive via flashbots. Blocks might contain blob transactions we
   204  //     have never seen on the network. Since we cannot recover them from blocks
   205  //     either, the engine_newPayload needs to give them to us, and we cache them
   206  //     until finality to support reorgs without tx losses.
   207  //
   208  // Whilst some constraints above might sound overly aggressive, the general idea is
   209  // that the blob pool should work robustly for its intended use case and whilst
   210  // anyone is free to use blob transactions for arbitrary non-rollup use cases,
   211  // they should not be allowed to run amok the network.
   212  //
   213  // Implementation wise there are a few interesting design choices:
   214  //
   215  //   - Adding a transaction to the pool blocks until persisted to disk. This is
   216  //     viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at
   217  //     peak), so natural churn is a couple MB per block. Replacements doing O(n)
   218  //     updates are forbidden and transaction propagation is pull based (i.e. no
   219  //     pileup of pending data).
   220  //
   221  //   - When transactions are chosen for inclusion, the primary criteria is the
   222  //     signer tip (and having a basefee/data fee high enough of course). However,
   223  //     same-tip transactions will be split by their basefee/datafee, preferring
   224  //     those that are closer to the current network limits. The idea being that
   225  //     very relaxed ones can be included even if the fees go up, when the closer
   226  //     ones could already be invalid.
   227  //
   228  //   - Because the maximum number of blobs allowed in a block can change per
   229  //     fork, the pool is designed to handle the maximum number of blobs allowed
   230  //     in the chain's latest defined fork -- even if it isn't active. This
   231  //     avoids needing to upgrade the database around the fork boundary.
   232  //
   233  // When the pool eventually reaches saturation, some old transactions - that may
   234  // never execute - will need to be evicted in favor of newer ones. The eviction
   235  // strategy is quite complex:
   236  //
   237  //   - Exceeding capacity evicts the highest-nonce of the account with the lowest
   238  //     paying blob transaction anywhere in the pooled nonce-sequence, as that tx
   239  //     would be executed the furthest in the future and is thus blocking anything
   240  //     after it. The smallest is deliberately not evicted to avoid a nonce-gap.
   241  //
   242  //   - Analogously, if the pool is full, the consideration price of a new tx for
   243  //     evicting an old one is the smallest price in the entire nonce-sequence of
   244  //     the account. This avoids malicious users DoSing the pool with seemingly
   245  //     high paying transactions hidden behind a low-paying blocked one.
   246  //
   247  //   - Since blob transactions have 3 price parameters: execution tip, execution
   248  //     fee cap and data fee cap, there's no singular parameter to create a total
   249  //     price ordering on. What's more, since the base fee and blob fee can move
   250  //     independently of one another, there's no pre-defined way to combine them
   251  //     into a stable order either. This leads to a multi-dimensional problem to
   252  //     solve after every block.
   253  //
   254  //   - The first observation is that comparing 1559 base fees or 4844 blob fees
   255  //     needs to happen in the context of their dynamism. Since these fees jump
   256  //     up or down in ~1.125 multipliers (at max) across blocks, comparing fees
   257  //     in two transactions should be based on log1.125(fee) to eliminate noise.
   258  //
   259  //   - The second observation is that the basefee and blobfee move independently,
   260  //     so there's no way to split mixed txs on their own (A has higher base fee,
   261  //     B has higher blob fee). Rather than look at the absolute fees, the useful
   262  //     metric is the max time it can take to exceed the transaction's fee caps.
   263  //     Specifically, we're interested in the number of jumps needed to go from
   264  //     the current fee to the transaction's cap:
   265  //
   266  //     jumps = log1.125(txfee) - log1.125(basefee)
   267  //
   268  //   - The third observation is that the base fee tends to hover around rather
   269  //     than swing wildly. The number of jumps needed from the current fee starts
   270  //     to get less relevant the higher it is. To remove the noise here too, the
   271  //     pool will use log(jumps) as the delta for comparing transactions.
   272  //
   273  //     delta = sign(jumps) * log(abs(jumps))
   274  //
   275  //   - To establish a total order, we need to reduce the dimensionality of the
   276  //     two base fees (log jumps) to a single value. The interesting aspect from
   277  //     the pool's perspective is how fast will a tx get executable (fees going
   278  //     down, crossing the smaller negative jump counter) or non-executable (fees
   279  //     going up, crossing the smaller positive jump counter). As such, the pool
   280  //     cares only about the min of the two delta values for eviction priority.
   281  //
   282  //     priority = min(deltaBasefee, deltaBlobfee)
   283  //
   284  //   - The above very aggressive dimensionality and noise reduction should result
   285  //     in transaction being grouped into a small number of buckets, the further
   286  //     the fees the larger the buckets. This is good because it allows us to use
   287  //     the miner tip meaningfully as a splitter.
   288  //
   289  //   - For the scenario where the pool does not contain non-executable blob txs
   290  //     anymore, it does not make sense to grant a later eviction priority to txs
   291  //     with high fee caps since it could enable pool wars. As such, any positive
   292  //     priority will be grouped together.
   293  //
   294  //     priority = min(deltaBasefee, deltaBlobfee, 0)
   295  //
   296  // Optimisation tradeoffs:
   297  //
   298  //   - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob
   299  //     cap). Maintaining these values across all transactions from the account is
   300  //     problematic as each transaction replacement or inclusion would require a
   301  //     rescan of all other transactions to recalculate the minimum. Instead, the
   302  //     pool maintains a rolling minimum across the nonce range. Updating all the
   303  //     minimums will need to be done only starting at the swapped in/out nonce
   304  //     and leading up to the first no-change.
   305  type BlobPool struct {
   306  	config         Config                    // Pool configuration
   307  	reserver       txpool.Reserver           // Address reserver to ensure exclusivity across subpools
   308  	hasPendingAuth func(common.Address) bool // Determine whether the specified address has a pending 7702-auth
   309  
   310  	store  billy.Database // Persistent data store for the tx metadata and blobs
   311  	stored uint64         // Useful data size of all transactions on disk
   312  	limbo  *limbo         // Persistent data store for the non-finalized blobs
   313  
   314  	signer types.Signer // Transaction signer to use for sender recovery
   315  	chain  BlockChain   // Chain object to access the state through
   316  
   317  	head   *types.Header  // Current head of the chain
   318  	state  *state.StateDB // Current state at the head of the chain
   319  	gasTip *uint256.Int   // Currently accepted minimum gas tip
   320  
   321  	lookup *lookup                          // Lookup table mapping blobs to txs and txs to billy entries
   322  	index  map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
   323  	spent  map[common.Address]*uint256.Int  // Expenditure tracking for individual accounts
   324  	evict  *evictHeap                       // Heap of cheapest accounts for eviction when full
   325  
   326  	discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
   327  	insertFeed   event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
   328  
   329  	// txValidationFn defaults to txpool.ValidateTransaction, but can be
   330  	// overridden for testing purposes.
   331  	txValidationFn txpool.ValidationFunction
   332  
   333  	lock sync.RWMutex // Mutex protecting the pool during reorg handling
   334  }
   335  
   336  // New creates a new blob transaction pool to gather, sort and filter inbound
   337  // blob transactions from the network.
   338  func New(config Config, chain BlockChain, hasPendingAuth func(common.Address) bool) *BlobPool {
   339  	// Sanitize the input to ensure no vulnerable gas prices are set
   340  	config = (&config).sanitize()
   341  
   342  	// Create the transaction pool with its initial settings
   343  	return &BlobPool{
   344  		config:         config,
   345  		hasPendingAuth: hasPendingAuth,
   346  		signer:         types.LatestSigner(chain.Config()),
   347  		chain:          chain,
   348  		lookup:         newLookup(),
   349  		index:          make(map[common.Address][]*blobTxMeta),
   350  		spent:          make(map[common.Address]*uint256.Int),
   351  		txValidationFn: txpool.ValidateTransaction,
   352  	}
   353  }
   354  
   355  // Filter returns whether the given transaction can be consumed by the blob pool.
   356  func (p *BlobPool) Filter(tx *types.Transaction) bool {
   357  	return tx.Type() == types.BlobTxType
   358  }
   359  
   360  // Init sets the gas price needed to keep a transaction in the pool and the chain
   361  // head to allow balance / nonce checks. The transaction journal will be loaded
   362  // from disk and filtered based on the provided starting settings.
   363  func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reserver) error {
   364  	p.reserver = reserver
   365  
   366  	var (
   367  		queuedir string
   368  		limbodir string
   369  	)
   370  	if p.config.Datadir != "" {
   371  		queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore)
   372  		if err := os.MkdirAll(queuedir, 0700); err != nil {
   373  			return err
   374  		}
   375  		limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore)
   376  		if err := os.MkdirAll(limbodir, 0700); err != nil {
   377  			return err
   378  		}
   379  	}
   380  	// Initialize the state with head block, or fallback to empty one in
   381  	// case the head state is not available (might occur when node is not
   382  	// fully synced).
   383  	state, err := p.chain.StateAt(head.Root)
   384  	if err != nil {
   385  		state, err = p.chain.StateAt(types.EmptyRootHash)
   386  	}
   387  	if err != nil {
   388  		return err
   389  	}
   390  	p.head, p.state = head, state
   391  
   392  	// Index all transactions on disk and delete anything unprocessable
   393  	var fails []uint64
   394  	index := func(id uint64, size uint32, blob []byte) {
   395  		if p.parseTransaction(id, size, blob) != nil {
   396  			fails = append(fails, id)
   397  		}
   398  	}
   399  	slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
   400  	store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index)
   401  	if err != nil {
   402  		return err
   403  	}
   404  	p.store = store
   405  
   406  	if len(fails) > 0 {
   407  		log.Warn("Dropping invalidated blob transactions", "ids", fails)
   408  		dropInvalidMeter.Mark(int64(len(fails)))
   409  
   410  		for _, id := range fails {
   411  			if err := p.store.Delete(id); err != nil {
   412  				p.Close()
   413  				return err
   414  			}
   415  		}
   416  	}
   417  	// Sort the indexed transactions by nonce and delete anything gapped, create
   418  	// the eviction heap of anyone still standing
   419  	for addr := range p.index {
   420  		p.recheck(addr, nil)
   421  	}
   422  	var (
   423  		basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
   424  		blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
   425  	)
   426  	if p.head.ExcessBlobGas != nil {
   427  		blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), p.head))
   428  	}
   429  	p.evict = newPriceHeap(basefee, blobfee, p.index)
   430  
   431  	// Pool initialized, attach the blob limbo to it to track blobs included
   432  	// recently but not yet finalized
   433  	p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
   434  	if err != nil {
   435  		p.Close()
   436  		return err
   437  	}
   438  	// Set the configured gas tip, triggering a filtering of anything just loaded
   439  	basefeeGauge.Update(int64(basefee.Uint64()))
   440  	blobfeeGauge.Update(int64(blobfee.Uint64()))
   441  
   442  	p.SetGasTip(new(big.Int).SetUint64(gasTip))
   443  
   444  	// Since the user might have modified their pool's capacity, evict anything
   445  	// above the current allowance
   446  	for p.stored > p.config.Datacap {
   447  		p.drop()
   448  	}
   449  	// Update the metrics and return the constructed pool
   450  	datacapGauge.Update(int64(p.config.Datacap))
   451  	p.updateStorageMetrics()
   452  	return nil
   453  }
   454  
   455  // Close closes down the underlying persistent store.
   456  func (p *BlobPool) Close() error {
   457  	var errs []error
   458  	if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
   459  		if err := p.limbo.Close(); err != nil {
   460  			errs = append(errs, err)
   461  		}
   462  	}
   463  	if err := p.store.Close(); err != nil {
   464  		errs = append(errs, err)
   465  	}
   466  	switch {
   467  	case errs == nil:
   468  		return nil
   469  	case len(errs) == 1:
   470  		return errs[0]
   471  	default:
   472  		return fmt.Errorf("%v", errs)
   473  	}
   474  }
   475  
   476  // parseTransaction is a callback method on pool creation that gets called for
   477  // each transaction on disk to create the in-memory metadata index.
   478  func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
   479  	tx := new(types.Transaction)
   480  	if err := rlp.DecodeBytes(blob, tx); err != nil {
   481  		// This path is impossible unless the disk data representation changes
   482  		// across restarts. For that ever improbable case, recover gracefully
   483  		// by ignoring this data entry.
   484  		log.Error("Failed to decode blob pool entry", "id", id, "err", err)
   485  		return err
   486  	}
   487  	if tx.BlobTxSidecar() == nil {
   488  		log.Error("Missing sidecar in blob pool entry", "id", id, "hash", tx.Hash())
   489  		return errors.New("missing blob sidecar")
   490  	}
   491  
   492  	meta := newBlobTxMeta(id, tx.Size(), size, tx)
   493  	if p.lookup.exists(meta.hash) {
   494  		// This path is only possible after a crash, where deleted items are not
   495  		// removed via the normal shutdown-startup procedure and thus may get
   496  		// partially resurrected.
   497  		log.Error("Rejecting duplicate blob pool entry", "id", id, "hash", tx.Hash())
   498  		return errors.New("duplicate blob entry")
   499  	}
   500  	sender, err := types.Sender(p.signer, tx)
   501  	if err != nil {
   502  		// This path is impossible unless the signature validity changes across
   503  		// restarts. For that ever improbable case, recover gracefully by ignoring
   504  		// this data entry.
   505  		log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
   506  		return err
   507  	}
   508  	if _, ok := p.index[sender]; !ok {
   509  		if err := p.reserver.Hold(sender); err != nil {
   510  			return err
   511  		}
   512  		p.index[sender] = []*blobTxMeta{}
   513  		p.spent[sender] = new(uint256.Int)
   514  	}
   515  	p.index[sender] = append(p.index[sender], meta)
   516  	p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
   517  
   518  	p.lookup.track(meta)
   519  	p.stored += uint64(meta.storageSize)
   520  	return nil
   521  }
   522  
   523  // recheck verifies the pool's content for a specific account and drops anything
   524  // that does not fit anymore (dangling or filled nonce, overdraft).
   525  func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) {
   526  	// Sort the transactions belonging to the account so reinjects can be simpler
   527  	txs := p.index[addr]
   528  	if inclusions != nil && txs == nil { // during reorgs, we might find new accounts
   529  		return
   530  	}
   531  	sort.Slice(txs, func(i, j int) bool {
   532  		return txs[i].nonce < txs[j].nonce
   533  	})
   534  	// If there is a gap between the chain state and the blob pool, drop
   535  	// all the transactions as they are non-executable. Similarly, if the
   536  	// entire tx range was included, drop all.
   537  	var (
   538  		next   = p.state.GetNonce(addr)
   539  		gapped = txs[0].nonce > next
   540  		filled = txs[len(txs)-1].nonce < next
   541  	)
   542  	if gapped || filled {
   543  		var (
   544  			ids    []uint64
   545  			nonces []uint64
   546  		)
   547  		for i := 0; i < len(txs); i++ {
   548  			ids = append(ids, txs[i].id)
   549  			nonces = append(nonces, txs[i].nonce)
   550  
   551  			p.stored -= uint64(txs[i].storageSize)
   552  			p.lookup.untrack(txs[i])
   553  
   554  			// Included transactions blobs need to be moved to the limbo
   555  			if filled && inclusions != nil {
   556  				p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
   557  			}
   558  		}
   559  		delete(p.index, addr)
   560  		delete(p.spent, addr)
   561  		if inclusions != nil { // only during reorgs will the heap be initialized
   562  			heap.Remove(p.evict, p.evict.index[addr])
   563  		}
   564  		p.reserver.Release(addr)
   565  
   566  		if gapped {
   567  			log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
   568  			dropDanglingMeter.Mark(int64(len(ids)))
   569  		} else {
   570  			log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
   571  			dropFilledMeter.Mark(int64(len(ids)))
   572  		}
   573  		for _, id := range ids {
   574  			if err := p.store.Delete(id); err != nil {
   575  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   576  			}
   577  		}
   578  		return
   579  	}
   580  	// If there is overlap between the chain state and the blob pool, drop
   581  	// anything below the current state
   582  	if txs[0].nonce < next {
   583  		var (
   584  			ids    []uint64
   585  			nonces []uint64
   586  		)
   587  		for len(txs) > 0 && txs[0].nonce < next {
   588  			ids = append(ids, txs[0].id)
   589  			nonces = append(nonces, txs[0].nonce)
   590  
   591  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
   592  			p.stored -= uint64(txs[0].storageSize)
   593  			p.lookup.untrack(txs[0])
   594  
   595  			// Included transactions blobs need to be moved to the limbo
   596  			if inclusions != nil {
   597  				p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
   598  			}
   599  			txs = txs[1:]
   600  		}
   601  		log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
   602  		dropOverlappedMeter.Mark(int64(len(ids)))
   603  
   604  		for _, id := range ids {
   605  			if err := p.store.Delete(id); err != nil {
   606  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   607  			}
   608  		}
   609  		p.index[addr] = txs
   610  	}
   611  	// Iterate over the transactions to initialize their eviction thresholds
   612  	// and to detect any nonce gaps
   613  	txs[0].evictionExecTip = txs[0].execTipCap
   614  	txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
   615  	txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
   616  
   617  	for i := 1; i < len(txs); i++ {
   618  		// If there's no nonce gap, initialize the eviction thresholds as the
   619  		// minimum between the cumulative thresholds and the current tx fees
   620  		if txs[i].nonce == txs[i-1].nonce+1 {
   621  			txs[i].evictionExecTip = txs[i-1].evictionExecTip
   622  			if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
   623  				txs[i].evictionExecTip = txs[i].execTipCap
   624  			}
   625  			txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
   626  			if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
   627  				txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
   628  			}
   629  			txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
   630  			if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
   631  				txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
   632  			}
   633  			continue
   634  		}
   635  		// Sanity check that there's no double nonce. This case would generally
   636  		// be a coding error, so better know about it.
   637  		//
   638  		// Also, Billy behind the blobpool does not journal deletes. A process
   639  		// crash would result in previously deleted entities being resurrected.
   640  		// That could potentially cause a duplicate nonce to appear.
   641  		if txs[i].nonce == txs[i-1].nonce {
   642  			id, _ := p.lookup.storeidOfTx(txs[i].hash)
   643  
   644  			log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
   645  			dropRepeatedMeter.Mark(1)
   646  
   647  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
   648  			p.stored -= uint64(txs[i].storageSize)
   649  			p.lookup.untrack(txs[i])
   650  
   651  			if err := p.store.Delete(id); err != nil {
   652  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   653  			}
   654  			txs = append(txs[:i], txs[i+1:]...)
   655  			p.index[addr] = txs
   656  
   657  			i--
   658  			continue
   659  		}
   660  		// Otherwise if there's a nonce gap evict all later transactions
   661  		var (
   662  			ids    []uint64
   663  			nonces []uint64
   664  		)
   665  		for j := i; j < len(txs); j++ {
   666  			ids = append(ids, txs[j].id)
   667  			nonces = append(nonces, txs[j].nonce)
   668  
   669  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
   670  			p.stored -= uint64(txs[j].storageSize)
   671  			p.lookup.untrack(txs[j])
   672  		}
   673  		txs = txs[:i]
   674  
   675  		log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
   676  		dropGappedMeter.Mark(int64(len(ids)))
   677  
   678  		for _, id := range ids {
   679  			if err := p.store.Delete(id); err != nil {
   680  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   681  			}
   682  		}
   683  		p.index[addr] = txs
   684  		break
   685  	}
   686  	// Ensure that there's no over-draft, this is expected to happen when some
   687  	// transactions get included without publishing on the network
   688  	var (
   689  		balance = p.state.GetBalance(addr)
   690  		spent   = p.spent[addr]
   691  	)
   692  	if spent.Cmp(balance) > 0 {
   693  		// Evict the highest nonce transactions until the pending set falls under
   694  		// the account's available balance
   695  		var (
   696  			ids    []uint64
   697  			nonces []uint64
   698  		)
   699  		for p.spent[addr].Cmp(balance) > 0 {
   700  			last := txs[len(txs)-1]
   701  			txs[len(txs)-1] = nil
   702  			txs = txs[:len(txs)-1]
   703  
   704  			ids = append(ids, last.id)
   705  			nonces = append(nonces, last.nonce)
   706  
   707  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
   708  			p.stored -= uint64(last.storageSize)
   709  			p.lookup.untrack(last)
   710  		}
   711  		if len(txs) == 0 {
   712  			delete(p.index, addr)
   713  			delete(p.spent, addr)
   714  			if inclusions != nil { // only during reorgs will the heap be initialized
   715  				heap.Remove(p.evict, p.evict.index[addr])
   716  			}
   717  			p.reserver.Release(addr)
   718  		} else {
   719  			p.index[addr] = txs
   720  		}
   721  		log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
   722  		dropOverdraftedMeter.Mark(int64(len(ids)))
   723  
   724  		for _, id := range ids {
   725  			if err := p.store.Delete(id); err != nil {
   726  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   727  			}
   728  		}
   729  	}
   730  	// Sanity check that no account can have more queued transactions than the
   731  	// DoS protection threshold.
   732  	if len(txs) > maxTxsPerAccount {
   733  		// Evict the highest nonce transactions until the pending set falls under
   734  		// the account's transaction cap
   735  		var (
   736  			ids    []uint64
   737  			nonces []uint64
   738  		)
   739  		for len(txs) > maxTxsPerAccount {
   740  			last := txs[len(txs)-1]
   741  			txs[len(txs)-1] = nil
   742  			txs = txs[:len(txs)-1]
   743  
   744  			ids = append(ids, last.id)
   745  			nonces = append(nonces, last.nonce)
   746  
   747  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
   748  			p.stored -= uint64(last.storageSize)
   749  			p.lookup.untrack(last)
   750  		}
   751  		p.index[addr] = txs
   752  
   753  		log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
   754  		dropOvercappedMeter.Mark(int64(len(ids)))
   755  
   756  		for _, id := range ids {
   757  			if err := p.store.Delete(id); err != nil {
   758  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   759  			}
   760  		}
   761  	}
   762  	// Included cheap transactions might have left the remaining ones better from
   763  	// an eviction point, fix any potential issues in the heap.
   764  	if _, ok := p.index[addr]; ok && inclusions != nil {
   765  		heap.Fix(p.evict, p.evict.index[addr])
   766  	}
   767  }
   768  
   769  // offload removes a tracked blob transaction from the pool and moves it into the
   770  // limbo for tracking until finality.
   771  //
   772  // The method may log errors for various unexpected scenarios but will not return
   773  // any of it since there's no clear error case. Some errors may be due to coding
   774  // issues, others caused by signers mining MEV stuff or swapping transactions. In
   775  // all cases, the pool needs to continue operating.
   776  func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) {
   777  	data, err := p.store.Get(id)
   778  	if err != nil {
   779  		log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
   780  		return
   781  	}
   782  	var tx types.Transaction
   783  	if err = rlp.DecodeBytes(data, &tx); err != nil {
   784  		log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
   785  		return
   786  	}
   787  	block, ok := inclusions[tx.Hash()]
   788  	if !ok {
   789  		log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
   790  		return
   791  	}
   792  	if err := p.limbo.push(&tx, block); err != nil {
   793  		log.Warn("Failed to offload blob tx into limbo", "err", err)
   794  		return
   795  	}
   796  }
   797  
   798  // Reset implements txpool.SubPool, allowing the blob pool's internal state to be
   799  // kept in sync with the main transaction pool's internal state.
   800  func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
   801  	waitStart := time.Now()
   802  	p.lock.Lock()
   803  	resetwaitHist.Update(time.Since(waitStart).Nanoseconds())
   804  	defer p.lock.Unlock()
   805  
   806  	defer func(start time.Time) {
   807  		resettimeHist.Update(time.Since(start).Nanoseconds())
   808  	}(time.Now())
   809  
   810  	statedb, err := p.chain.StateAt(newHead.Root)
   811  	if err != nil {
   812  		log.Error("Failed to reset blobpool state", "err", err)
   813  		return
   814  	}
   815  	p.head = newHead
   816  	p.state = statedb
   817  
   818  	// Run the reorg between the old and new head and figure out which accounts
   819  	// need to be rechecked and which transactions need to be readded
   820  	if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
   821  		var adds []*types.Transaction
   822  		for addr, txs := range reinject {
   823  			// Blindly push all the lost transactions back into the pool
   824  			for _, tx := range txs {
   825  				if err := p.reinject(addr, tx.Hash()); err == nil {
   826  					adds = append(adds, tx.WithoutBlobTxSidecar())
   827  				}
   828  			}
   829  			// Recheck the account's pooled transactions to drop included and
   830  			// invalidated ones
   831  			p.recheck(addr, inclusions)
   832  		}
   833  		if len(adds) > 0 {
   834  			p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
   835  		}
   836  	}
   837  	// Flush out any blobs from limbo that are older than the latest finality
   838  	if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
   839  		p.limbo.finalize(p.chain.CurrentFinalBlock())
   840  	}
   841  	// Reset the price heap for the new set of basefee/blobfee pairs
   842  	var (
   843  		basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead))
   844  		blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
   845  	)
   846  	if newHead.ExcessBlobGas != nil {
   847  		blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), newHead))
   848  	}
   849  	p.evict.reinit(basefee, blobfee, false)
   850  
   851  	basefeeGauge.Update(int64(basefee.Uint64()))
   852  	blobfeeGauge.Update(int64(blobfee.Uint64()))
   853  	p.updateStorageMetrics()
   854  }
   855  
   856  // reorg assembles all the transactors and missing transactions between an old
   857  // and new head to figure out which account's tx set needs to be rechecked and
   858  // which transactions need to be requeued.
   859  //
   860  // The transactionblock inclusion infos are also returned to allow tracking any
   861  // just-included blocks by block number in the limbo.
   862  func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) {
   863  	// If the pool was not yet initialized, don't do anything
   864  	if oldHead == nil {
   865  		return nil, nil
   866  	}
   867  	// If the reorg is too deep, avoid doing it (will happen during snap sync)
   868  	oldNum := oldHead.Number.Uint64()
   869  	newNum := newHead.Number.Uint64()
   870  
   871  	if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
   872  		return nil, nil
   873  	}
   874  	// Reorg seems shallow enough to pull in all transactions into memory
   875  	var (
   876  		transactors = make(map[common.Address]struct{})
   877  		discarded   = make(map[common.Address][]*types.Transaction)
   878  		included    = make(map[common.Address][]*types.Transaction)
   879  		inclusions  = make(map[common.Hash]uint64)
   880  
   881  		rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
   882  		add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
   883  	)
   884  	if add == nil {
   885  		// if the new head is nil, it means that something happened between
   886  		// the firing of newhead-event and _now_: most likely a
   887  		// reorg caused by sync-reversion or explicit sethead back to an
   888  		// earlier block.
   889  		log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
   890  		return nil, nil
   891  	}
   892  	if rem == nil {
   893  		// This can happen if a setHead is performed, where we simply discard
   894  		// the old head from the chain. If that is the case, we don't have the
   895  		// lost transactions anymore, and there's nothing to add.
   896  		if newNum >= oldNum {
   897  			// If we reorged to a same or higher number, then it's not a case
   898  			// of setHead
   899  			log.Warn("Blobpool reset with missing old head",
   900  				"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   901  			return nil, nil
   902  		}
   903  		// If the reorg ended up on a lower number, it's indicative of setHead
   904  		// being the cause
   905  		log.Debug("Skipping blobpool reset caused by setHead",
   906  			"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   907  		return nil, nil
   908  	}
   909  	// Both old and new blocks exist, traverse through the progression chain
   910  	// and accumulate the transactors and transactions
   911  	for rem.NumberU64() > add.NumberU64() {
   912  		for _, tx := range rem.Transactions() {
   913  			from, _ := types.Sender(p.signer, tx)
   914  
   915  			discarded[from] = append(discarded[from], tx)
   916  			transactors[from] = struct{}{}
   917  		}
   918  		if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   919  			log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
   920  			return nil, nil
   921  		}
   922  	}
   923  	for add.NumberU64() > rem.NumberU64() {
   924  		for _, tx := range add.Transactions() {
   925  			from, _ := types.Sender(p.signer, tx)
   926  
   927  			included[from] = append(included[from], tx)
   928  			inclusions[tx.Hash()] = add.NumberU64()
   929  			transactors[from] = struct{}{}
   930  		}
   931  		if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   932  			log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
   933  			return nil, nil
   934  		}
   935  	}
   936  	for rem.Hash() != add.Hash() {
   937  		for _, tx := range rem.Transactions() {
   938  			from, _ := types.Sender(p.signer, tx)
   939  
   940  			discarded[from] = append(discarded[from], tx)
   941  			transactors[from] = struct{}{}
   942  		}
   943  		if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   944  			log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
   945  			return nil, nil
   946  		}
   947  		for _, tx := range add.Transactions() {
   948  			from, _ := types.Sender(p.signer, tx)
   949  
   950  			included[from] = append(included[from], tx)
   951  			inclusions[tx.Hash()] = add.NumberU64()
   952  			transactors[from] = struct{}{}
   953  		}
   954  		if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   955  			log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
   956  			return nil, nil
   957  		}
   958  	}
   959  	// Generate the set of transactions per address to pull back into the pool,
   960  	// also updating the rest along the way
   961  	reinject := make(map[common.Address][]*types.Transaction, len(transactors))
   962  	for addr := range transactors {
   963  		// Generate the set that was lost to reinject into the pool
   964  		lost := make([]*types.Transaction, 0, len(discarded[addr]))
   965  		for _, tx := range types.TxDifference(discarded[addr], included[addr]) {
   966  			if p.Filter(tx) {
   967  				lost = append(lost, tx)
   968  			}
   969  		}
   970  		reinject[addr] = lost
   971  
   972  		// Update the set that was already reincluded to track the blocks in limbo
   973  		for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
   974  			if p.Filter(tx) {
   975  				p.limbo.update(tx.Hash(), inclusions[tx.Hash()])
   976  			}
   977  		}
   978  	}
   979  	return reinject, inclusions
   980  }
   981  
   982  // reinject blindly pushes a transaction previously included in the chain - and
   983  // just reorged out - into the pool. The transaction is assumed valid (having
   984  // been in the chain), thus the only validation needed is nonce sorting and over-
   985  // draft checks after injection.
   986  //
   987  // Note, the method will not initialize the eviction cache values as those will
   988  // be done once for all transactions belonging to an account after all individual
   989  // transactions are injected back into the pool.
   990  func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
   991  	// Retrieve the associated blob from the limbo. Without the blobs, we cannot
   992  	// add the transaction back into the pool as it is not mineable.
   993  	tx, err := p.limbo.pull(txhash)
   994  	if err != nil {
   995  		log.Error("Blobs unavailable, dropping reorged tx", "err", err)
   996  		return err
   997  	}
   998  	// TODO: seems like an easy optimization here would be getting the serialized tx
   999  	// from limbo instead of re-serializing it here.
  1000  
  1001  	// Serialize the transaction back into the primary datastore.
  1002  	blob, err := rlp.EncodeToBytes(tx)
  1003  	if err != nil {
  1004  		log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
  1005  		return err
  1006  	}
  1007  	id, err := p.store.Put(blob)
  1008  	if err != nil {
  1009  		log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
  1010  		return err
  1011  	}
  1012  
  1013  	// Update the indices and metrics
  1014  	meta := newBlobTxMeta(id, tx.Size(), p.store.Size(id), tx)
  1015  	if _, ok := p.index[addr]; !ok {
  1016  		if err := p.reserver.Hold(addr); err != nil {
  1017  			log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
  1018  			return err
  1019  		}
  1020  		p.index[addr] = []*blobTxMeta{meta}
  1021  		p.spent[addr] = meta.costCap
  1022  		p.evict.Push(addr)
  1023  	} else {
  1024  		p.index[addr] = append(p.index[addr], meta)
  1025  		p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
  1026  	}
  1027  	p.lookup.track(meta)
  1028  	p.stored += uint64(meta.storageSize)
  1029  	return nil
  1030  }
  1031  
  1032  // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
  1033  // to be kept in sync with the main transaction pool's gas requirements.
  1034  func (p *BlobPool) SetGasTip(tip *big.Int) {
  1035  	p.lock.Lock()
  1036  	defer p.lock.Unlock()
  1037  
  1038  	// Store the new minimum gas tip
  1039  	old := p.gasTip
  1040  	p.gasTip = uint256.MustFromBig(tip)
  1041  
  1042  	// If the min miner fee increased, remove transactions below the new threshold
  1043  	if old == nil || p.gasTip.Cmp(old) > 0 {
  1044  		for addr, txs := range p.index {
  1045  			for i, tx := range txs {
  1046  				if tx.execTipCap.Cmp(p.gasTip) < 0 {
  1047  					// Drop the offending transaction
  1048  					var (
  1049  						ids    = []uint64{tx.id}
  1050  						nonces = []uint64{tx.nonce}
  1051  					)
  1052  					p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
  1053  					p.stored -= uint64(tx.storageSize)
  1054  					p.lookup.untrack(tx)
  1055  					txs[i] = nil
  1056  
  1057  					// Drop everything afterwards, no gaps allowed
  1058  					for j, tx := range txs[i+1:] {
  1059  						ids = append(ids, tx.id)
  1060  						nonces = append(nonces, tx.nonce)
  1061  
  1062  						p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
  1063  						p.stored -= uint64(tx.storageSize)
  1064  						p.lookup.untrack(tx)
  1065  						txs[i+1+j] = nil
  1066  					}
  1067  					// Clear out the dropped transactions from the index
  1068  					if i > 0 {
  1069  						p.index[addr] = txs[:i]
  1070  						heap.Fix(p.evict, p.evict.index[addr])
  1071  					} else {
  1072  						delete(p.index, addr)
  1073  						delete(p.spent, addr)
  1074  
  1075  						heap.Remove(p.evict, p.evict.index[addr])
  1076  						p.reserver.Release(addr)
  1077  					}
  1078  					// Clear out the transactions from the data store
  1079  					log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
  1080  					dropUnderpricedMeter.Mark(int64(len(ids)))
  1081  
  1082  					for _, id := range ids {
  1083  						if err := p.store.Delete(id); err != nil {
  1084  							log.Error("Failed to delete dropped transaction", "id", id, "err", err)
  1085  						}
  1086  					}
  1087  					break
  1088  				}
  1089  			}
  1090  		}
  1091  	}
  1092  	log.Debug("Blobpool tip threshold updated", "tip", tip)
  1093  	pooltipGauge.Update(tip.Int64())
  1094  	p.updateStorageMetrics()
  1095  }
  1096  
  1097  // ValidateTxBasics checks whether a transaction is valid according to the consensus
  1098  // rules, but does not check state-dependent validation such as sufficient balance.
  1099  // This check is meant as an early check which only needs to be performed once,
  1100  // and does not require the pool mutex to be held.
  1101  func (p *BlobPool) ValidateTxBasics(tx *types.Transaction) error {
  1102  	opts := &txpool.ValidationOptions{
  1103  		Config:       p.chain.Config(),
  1104  		Accept:       1 << types.BlobTxType,
  1105  		MaxSize:      txMaxSize,
  1106  		MinTip:       p.gasTip.ToBig(),
  1107  		MaxBlobCount: maxBlobsPerTx,
  1108  	}
  1109  	return txpool.ValidateTransaction(tx, p.head, p.signer, opts)
  1110  }
  1111  
  1112  // checkDelegationLimit determines if the tx sender is delegated or has a
  1113  // pending delegation, and if so, ensures they have at most one in-flight
  1114  // **executable** transaction, e.g. disallow stacked and gapped transactions
  1115  // from the account.
  1116  func (p *BlobPool) checkDelegationLimit(tx *types.Transaction) error {
  1117  	from, _ := types.Sender(p.signer, tx) // validated
  1118  
  1119  	// Short circuit if the sender has neither delegation nor pending delegation.
  1120  	if p.state.GetCodeHash(from) == types.EmptyCodeHash {
  1121  		// Because there is no exclusive lock held between different subpools
  1122  		// when processing transactions, a blob transaction may be accepted
  1123  		// while other SetCode transactions with pending authorities from the
  1124  		// same address are also accepted simultaneously.
  1125  		//
  1126  		// This scenario is considered acceptable, as the rule primarily ensures
  1127  		// that attackers cannot easily and endlessly stack blob transactions
  1128  		// with a delegated or pending delegated sender.
  1129  		if p.hasPendingAuth == nil || !p.hasPendingAuth(from) {
  1130  			return nil
  1131  		}
  1132  	}
  1133  	// Allow a single in-flight pending transaction.
  1134  	pending := p.index[from]
  1135  	if len(pending) == 0 {
  1136  		return nil
  1137  	}
  1138  	// If account already has a pending transaction, allow replacement only.
  1139  	if len(pending) == 1 && pending[0].nonce == tx.Nonce() {
  1140  		return nil
  1141  	}
  1142  	return txpool.ErrInflightTxLimitReached
  1143  }
  1144  
  1145  // validateTx checks whether a transaction is valid according to the consensus
  1146  // rules and adheres to some heuristic limits of the local node (price and size).
  1147  func (p *BlobPool) validateTx(tx *types.Transaction) error {
  1148  	if err := p.ValidateTxBasics(tx); err != nil {
  1149  		return err
  1150  	}
  1151  	// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
  1152  	stateOpts := &txpool.ValidationOptionsWithState{
  1153  		State: p.state,
  1154  
  1155  		FirstNonceGap: func(addr common.Address) uint64 {
  1156  			// Nonce gaps are not permitted in the blob pool, the first gap will
  1157  			// be the next nonce shifted by however many transactions we already
  1158  			// have pooled.
  1159  			return p.state.GetNonce(addr) + uint64(len(p.index[addr]))
  1160  		},
  1161  		UsedAndLeftSlots: func(addr common.Address) (int, int) {
  1162  			have := len(p.index[addr])
  1163  			if have >= maxTxsPerAccount {
  1164  				return have, 0
  1165  			}
  1166  			return have, maxTxsPerAccount - have
  1167  		},
  1168  		ExistingExpenditure: func(addr common.Address) *big.Int {
  1169  			if spent := p.spent[addr]; spent != nil {
  1170  				return spent.ToBig()
  1171  			}
  1172  			return new(big.Int)
  1173  		},
  1174  		ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
  1175  			next := p.state.GetNonce(addr)
  1176  			if uint64(len(p.index[addr])) > nonce-next {
  1177  				return p.index[addr][int(nonce-next)].costCap.ToBig()
  1178  			}
  1179  			return nil
  1180  		},
  1181  	}
  1182  	if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil {
  1183  		return err
  1184  	}
  1185  	if err := p.checkDelegationLimit(tx); err != nil {
  1186  		return err
  1187  	}
  1188  	// If the transaction replaces an existing one, ensure that price bumps are
  1189  	// adhered to.
  1190  	var (
  1191  		from, _ = types.Sender(p.signer, tx) // already validated above
  1192  		next    = p.state.GetNonce(from)
  1193  	)
  1194  	if uint64(len(p.index[from])) > tx.Nonce()-next {
  1195  		prev := p.index[from][int(tx.Nonce()-next)]
  1196  		// Ensure the transaction is different than the one tracked locally
  1197  		if prev.hash == tx.Hash() {
  1198  			return txpool.ErrAlreadyKnown
  1199  		}
  1200  		// Account can support the replacement, but the price bump must also be met
  1201  		switch {
  1202  		case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
  1203  			return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
  1204  		case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0:
  1205  			return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap)
  1206  		case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0:
  1207  			return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap)
  1208  		}
  1209  		var (
  1210  			multiplier = uint256.NewInt(100 + p.config.PriceBump)
  1211  			onehundred = uint256.NewInt(100)
  1212  
  1213  			minGasFeeCap     = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred)
  1214  			minGasTipCap     = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred)
  1215  			minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred)
  1216  		)
  1217  		switch {
  1218  		case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0:
  1219  			return fmt.Errorf("%w: new tx gas fee cap %v < %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump)
  1220  		case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0:
  1221  			return fmt.Errorf("%w: new tx gas tip cap %v < %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump)
  1222  		case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0:
  1223  			return fmt.Errorf("%w: new tx blob gas fee cap %v < %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump)
  1224  		}
  1225  	}
  1226  	return nil
  1227  }
  1228  
  1229  // Has returns an indicator whether subpool has a transaction cached with the
  1230  // given hash.
  1231  func (p *BlobPool) Has(hash common.Hash) bool {
  1232  	p.lock.RLock()
  1233  	defer p.lock.RUnlock()
  1234  
  1235  	return p.lookup.exists(hash)
  1236  }
  1237  
  1238  func (p *BlobPool) getRLP(hash common.Hash) []byte {
  1239  	// Track the amount of time waiting to retrieve a fully resolved blob tx from
  1240  	// the pool and the amount of time actually spent on pulling the data from disk.
  1241  	getStart := time.Now()
  1242  	p.lock.RLock()
  1243  	getwaitHist.Update(time.Since(getStart).Nanoseconds())
  1244  	defer p.lock.RUnlock()
  1245  
  1246  	defer func(start time.Time) {
  1247  		gettimeHist.Update(time.Since(start).Nanoseconds())
  1248  	}(time.Now())
  1249  
  1250  	// Pull the blob from disk and return an assembled response
  1251  	id, ok := p.lookup.storeidOfTx(hash)
  1252  	if !ok {
  1253  		return nil
  1254  	}
  1255  	data, err := p.store.Get(id)
  1256  	if err != nil {
  1257  		log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
  1258  		return nil
  1259  	}
  1260  	return data
  1261  }
  1262  
  1263  // Get returns a transaction if it is contained in the pool, or nil otherwise.
  1264  func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
  1265  	data := p.getRLP(hash)
  1266  	if len(data) == 0 {
  1267  		return nil
  1268  	}
  1269  	item := new(types.Transaction)
  1270  	if err := rlp.DecodeBytes(data, item); err != nil {
  1271  		id, _ := p.lookup.storeidOfTx(hash)
  1272  
  1273  		log.Error("Blobs corrupted for traced transaction",
  1274  			"hash", hash, "id", id, "err", err)
  1275  		return nil
  1276  	}
  1277  	return item
  1278  }
  1279  
  1280  // GetRLP returns a RLP-encoded transaction if it is contained in the pool.
  1281  func (p *BlobPool) GetRLP(hash common.Hash) []byte {
  1282  	return p.getRLP(hash)
  1283  }
  1284  
  1285  // GetMetadata returns the transaction type and transaction size with the
  1286  // given transaction hash.
  1287  //
  1288  // The size refers the length of the 'rlp encoding' of a blob transaction
  1289  // including the attached blobs.
  1290  func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
  1291  	p.lock.RLock()
  1292  	defer p.lock.RUnlock()
  1293  
  1294  	size, ok := p.lookup.sizeOfTx(hash)
  1295  	if !ok {
  1296  		return nil
  1297  	}
  1298  	return &txpool.TxMetadata{
  1299  		Type: types.BlobTxType,
  1300  		Size: size,
  1301  	}
  1302  }
  1303  
  1304  // GetBlobs returns a number of blobs and proofs for the given versioned hashes.
  1305  // This is a utility method for the engine API, enabling consensus clients to
  1306  // retrieve blobs from the pools directly instead of the network.
  1307  func (p *BlobPool) GetBlobs(vhashes []common.Hash) []*types.BlobTxSidecar {
  1308  	sidecars := make([]*types.BlobTxSidecar, len(vhashes))
  1309  	for idx, vhash := range vhashes {
  1310  		// Retrieve the datastore item (in a short lock)
  1311  		p.lock.RLock()
  1312  		id, exists := p.lookup.storeidOfBlob(vhash)
  1313  		if !exists {
  1314  			p.lock.RUnlock()
  1315  			continue
  1316  		}
  1317  		data, err := p.store.Get(id)
  1318  		p.lock.RUnlock()
  1319  
  1320  		// After releasing the lock, try to fill any blobs requested
  1321  		if err != nil {
  1322  			log.Error("Tracked blob transaction missing from store", "id", id, "err", err)
  1323  			continue
  1324  		}
  1325  		item := new(types.Transaction)
  1326  		if err = rlp.DecodeBytes(data, item); err != nil {
  1327  			log.Error("Blobs corrupted for traced transaction", "id", id, "err", err)
  1328  			continue
  1329  		}
  1330  		sidecars[idx] = item.BlobTxSidecar()
  1331  	}
  1332  	return sidecars
  1333  }
  1334  
  1335  // AvailableBlobs returns the number of blobs that are available in the subpool.
  1336  func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
  1337  	available := 0
  1338  	for _, vhash := range vhashes {
  1339  		// Retrieve the datastore item (in a short lock)
  1340  		p.lock.RLock()
  1341  		_, exists := p.lookup.storeidOfBlob(vhash)
  1342  		p.lock.RUnlock()
  1343  		if exists {
  1344  			available++
  1345  		}
  1346  	}
  1347  	return available
  1348  }
  1349  
  1350  // Add inserts a set of blob transactions into the pool if they pass validation (both
  1351  // consensus validity and pool restrictions).
  1352  //
  1353  // Note, if sync is set the method will block until all internal maintenance
  1354  // related to the add is finished. Only use this during tests for determinism.
  1355  func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
  1356  	var (
  1357  		adds = make([]*types.Transaction, 0, len(txs))
  1358  		errs = make([]error, len(txs))
  1359  	)
  1360  	for i, tx := range txs {
  1361  		errs[i] = p.add(tx)
  1362  		if errs[i] == nil {
  1363  			adds = append(adds, tx.WithoutBlobTxSidecar())
  1364  		}
  1365  	}
  1366  	if len(adds) > 0 {
  1367  		p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
  1368  		p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
  1369  	}
  1370  	return errs
  1371  }
  1372  
  1373  // add inserts a new blob transaction into the pool if it passes validation (both
  1374  // consensus validity and pool restrictions).
  1375  func (p *BlobPool) add(tx *types.Transaction) (err error) {
  1376  	// The blob pool blocks on adding a transaction. This is because blob txs are
  1377  	// only even pulled from the network, so this method will act as the overload
  1378  	// protection for fetches.
  1379  	waitStart := time.Now()
  1380  	p.lock.Lock()
  1381  	addwaitHist.Update(time.Since(waitStart).Nanoseconds())
  1382  	defer p.lock.Unlock()
  1383  
  1384  	defer func(start time.Time) {
  1385  		addtimeHist.Update(time.Since(start).Nanoseconds())
  1386  	}(time.Now())
  1387  
  1388  	// Ensure the transaction is valid from all perspectives
  1389  	if err := p.validateTx(tx); err != nil {
  1390  		log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
  1391  		switch {
  1392  		case errors.Is(err, txpool.ErrUnderpriced):
  1393  			addUnderpricedMeter.Mark(1)
  1394  		case errors.Is(err, txpool.ErrTxGasPriceTooLow):
  1395  			addUnderpricedMeter.Mark(1)
  1396  		case errors.Is(err, core.ErrNonceTooLow):
  1397  			addStaleMeter.Mark(1)
  1398  		case errors.Is(err, core.ErrNonceTooHigh):
  1399  			addGappedMeter.Mark(1)
  1400  		case errors.Is(err, core.ErrInsufficientFunds):
  1401  			addOverdraftedMeter.Mark(1)
  1402  		case errors.Is(err, txpool.ErrAccountLimitExceeded):
  1403  			addOvercappedMeter.Mark(1)
  1404  		case errors.Is(err, txpool.ErrReplaceUnderpriced):
  1405  			addNoreplaceMeter.Mark(1)
  1406  		default:
  1407  			addInvalidMeter.Mark(1)
  1408  		}
  1409  		return err
  1410  	}
  1411  	// If the address is not yet known, request exclusivity to track the account
  1412  	// only by this subpool until all transactions are evicted
  1413  	from, _ := types.Sender(p.signer, tx) // already validated above
  1414  	if _, ok := p.index[from]; !ok {
  1415  		if err := p.reserver.Hold(from); err != nil {
  1416  			addNonExclusiveMeter.Mark(1)
  1417  			return err
  1418  		}
  1419  		defer func() {
  1420  			// If the transaction is rejected by some post-validation check, remove
  1421  			// the lock on the reservation set.
  1422  			//
  1423  			// Note, `err` here is the named error return, which will be initialized
  1424  			// by a return statement before running deferred methods. Take care with
  1425  			// removing or subscoping err as it will break this clause.
  1426  			if err != nil {
  1427  				p.reserver.Release(from)
  1428  			}
  1429  		}()
  1430  	}
  1431  	// Transaction permitted into the pool from a nonce and cost perspective,
  1432  	// insert it into the database and update the indices
  1433  	blob, err := rlp.EncodeToBytes(tx)
  1434  	if err != nil {
  1435  		log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
  1436  		return err
  1437  	}
  1438  	id, err := p.store.Put(blob)
  1439  	if err != nil {
  1440  		return err
  1441  	}
  1442  	meta := newBlobTxMeta(id, tx.Size(), p.store.Size(id), tx)
  1443  
  1444  	var (
  1445  		next   = p.state.GetNonce(from)
  1446  		offset = int(tx.Nonce() - next)
  1447  		newacc = false
  1448  	)
  1449  	var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64
  1450  	if txs, ok := p.index[from]; ok {
  1451  		oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps
  1452  		oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps
  1453  	}
  1454  	if len(p.index[from]) > offset {
  1455  		// Transaction replaces a previously queued one
  1456  		dropReplacedMeter.Mark(1)
  1457  
  1458  		prev := p.index[from][offset]
  1459  		if err := p.store.Delete(prev.id); err != nil {
  1460  			// Shitty situation, but try to recover gracefully instead of going boom
  1461  			log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err)
  1462  		}
  1463  		// Update the transaction index
  1464  		p.index[from][offset] = meta
  1465  		p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
  1466  		p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
  1467  
  1468  		p.lookup.untrack(prev)
  1469  		p.lookup.track(meta)
  1470  		p.stored += uint64(meta.storageSize) - uint64(prev.storageSize)
  1471  	} else {
  1472  		// Transaction extends previously scheduled ones
  1473  		p.index[from] = append(p.index[from], meta)
  1474  		if _, ok := p.spent[from]; !ok {
  1475  			p.spent[from] = new(uint256.Int)
  1476  			newacc = true
  1477  		}
  1478  		p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
  1479  		p.lookup.track(meta)
  1480  		p.stored += uint64(meta.storageSize)
  1481  	}
  1482  	// Recompute the rolling eviction fields. In case of a replacement, this will
  1483  	// recompute all subsequent fields. In case of an append, this will only do
  1484  	// the fresh calculation.
  1485  	txs := p.index[from]
  1486  
  1487  	for i := offset; i < len(txs); i++ {
  1488  		// The first transaction will always use itself
  1489  		if i == 0 {
  1490  			txs[0].evictionExecTip = txs[0].execTipCap
  1491  			txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
  1492  			txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
  1493  
  1494  			continue
  1495  		}
  1496  		// Subsequent transactions will use a rolling calculation
  1497  		txs[i].evictionExecTip = txs[i-1].evictionExecTip
  1498  		if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
  1499  			txs[i].evictionExecTip = txs[i].execTipCap
  1500  		}
  1501  		txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
  1502  		if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
  1503  			txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
  1504  		}
  1505  		txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
  1506  		if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
  1507  			txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
  1508  		}
  1509  	}
  1510  	// Update the eviction heap with the new information:
  1511  	//   - If the transaction is from a new account, add it to the heap
  1512  	//   - If the account had a singleton tx replaced, update the heap (new price caps)
  1513  	//   - If the account has a transaction replaced or appended, update the heap if significantly changed
  1514  	switch {
  1515  	case newacc:
  1516  		heap.Push(p.evict, from)
  1517  
  1518  	case len(txs) == 1: // 1 tx and not a new acc, must be replacement
  1519  		heap.Fix(p.evict, p.evict.index[from])
  1520  
  1521  	default: // replacement or new append
  1522  		evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps
  1523  		evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps
  1524  
  1525  		if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down
  1526  			heap.Fix(p.evict, p.evict.index[from])
  1527  		}
  1528  	}
  1529  	// If the pool went over the allowed data limit, evict transactions until
  1530  	// we're again below the threshold
  1531  	for p.stored > p.config.Datacap {
  1532  		p.drop()
  1533  	}
  1534  	p.updateStorageMetrics()
  1535  
  1536  	addValidMeter.Mark(1)
  1537  	return nil
  1538  }
  1539  
  1540  // drop removes the worst transaction from the pool. It is primarily used when a
  1541  // freshly added transaction overflows the pool and needs to evict something. The
  1542  // method is also called on startup if the user resizes their storage, might be an
  1543  // expensive run but it should be fine-ish.
  1544  func (p *BlobPool) drop() {
  1545  	// Peek at the account with the worse transaction set to evict from (Go's heap
  1546  	// stores the minimum at index zero of the heap slice) and retrieve it's last
  1547  	// transaction.
  1548  	var (
  1549  		from = p.evict.addrs[0] // cannot call drop on empty pool
  1550  
  1551  		txs  = p.index[from]
  1552  		drop = txs[len(txs)-1]
  1553  		last = len(txs) == 1
  1554  	)
  1555  	// Remove the transaction from the pool's index
  1556  	if last {
  1557  		delete(p.index, from)
  1558  		delete(p.spent, from)
  1559  		p.reserver.Release(from)
  1560  	} else {
  1561  		txs[len(txs)-1] = nil
  1562  		txs = txs[:len(txs)-1]
  1563  
  1564  		p.index[from] = txs
  1565  		p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
  1566  	}
  1567  	p.stored -= uint64(drop.storageSize)
  1568  	p.lookup.untrack(drop)
  1569  
  1570  	// Remove the transaction from the pool's eviction heap:
  1571  	//   - If the entire account was dropped, pop off the address
  1572  	//   - Otherwise, if the new tail has better eviction caps, fix the heap
  1573  	if last {
  1574  		heap.Pop(p.evict)
  1575  	} else {
  1576  		tail := txs[len(txs)-1] // new tail, surely exists
  1577  
  1578  		evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps
  1579  		evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps
  1580  
  1581  		if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
  1582  			heap.Fix(p.evict, 0)
  1583  		}
  1584  	}
  1585  	// Remove the transaction from the data store
  1586  	log.Debug("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
  1587  	dropOverflownMeter.Mark(1)
  1588  
  1589  	if err := p.store.Delete(drop.id); err != nil {
  1590  		log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
  1591  	}
  1592  }
  1593  
  1594  // Pending retrieves all currently processable transactions, grouped by origin
  1595  // account and sorted by nonce.
  1596  //
  1597  // The transactions can also be pre-filtered by the dynamic fee components to
  1598  // reduce allocations and load on downstream subsystems.
  1599  func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
  1600  	// If only plain transactions are requested, this pool is unsuitable as it
  1601  	// contains none, don't even bother.
  1602  	if filter.OnlyPlainTxs {
  1603  		return nil
  1604  	}
  1605  	// Track the amount of time waiting to retrieve the list of pending blob txs
  1606  	// from the pool and the amount of time actually spent on assembling the data.
  1607  	// The latter will be pretty much moot, but we've kept it to have symmetric
  1608  	// across all user operations.
  1609  	pendStart := time.Now()
  1610  	p.lock.RLock()
  1611  	pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
  1612  	defer p.lock.RUnlock()
  1613  
  1614  	execStart := time.Now()
  1615  	defer func() {
  1616  		pendtimeHist.Update(time.Since(execStart).Nanoseconds())
  1617  	}()
  1618  
  1619  	pending := make(map[common.Address][]*txpool.LazyTransaction, len(p.index))
  1620  	for addr, txs := range p.index {
  1621  		lazies := make([]*txpool.LazyTransaction, 0, len(txs))
  1622  		for _, tx := range txs {
  1623  			// If transaction filtering was requested, discard badly priced ones
  1624  			if filter.MinTip != nil && filter.BaseFee != nil {
  1625  				if tx.execFeeCap.Lt(filter.BaseFee) {
  1626  					break // basefee too low, cannot be included, discard rest of txs from the account
  1627  				}
  1628  				tip := new(uint256.Int).Sub(tx.execFeeCap, filter.BaseFee)
  1629  				if tip.Gt(tx.execTipCap) {
  1630  					tip = tx.execTipCap
  1631  				}
  1632  				if tip.Lt(filter.MinTip) {
  1633  					break // allowed or remaining tip too low, cannot be included, discard rest of txs from the account
  1634  				}
  1635  			}
  1636  			if filter.BlobFee != nil {
  1637  				if tx.blobFeeCap.Lt(filter.BlobFee) {
  1638  					break // blobfee too low, cannot be included, discard rest of txs from the account
  1639  				}
  1640  			}
  1641  			// Transaction was accepted according to the filter, append to the pending list
  1642  			lazies = append(lazies, &txpool.LazyTransaction{
  1643  				Pool:      p,
  1644  				Hash:      tx.hash,
  1645  				Time:      execStart, // TODO(karalabe): Maybe save these and use that?
  1646  				GasFeeCap: tx.execFeeCap,
  1647  				GasTipCap: tx.execTipCap,
  1648  				Gas:       tx.execGas,
  1649  				BlobGas:   tx.blobGas,
  1650  			})
  1651  		}
  1652  		if len(lazies) > 0 {
  1653  			pending[addr] = lazies
  1654  		}
  1655  	}
  1656  	return pending
  1657  }
  1658  
  1659  // updateStorageMetrics retrieves a bunch of stats from the data store and pushes
  1660  // them out as metrics.
  1661  func (p *BlobPool) updateStorageMetrics() {
  1662  	stats := p.store.Infos()
  1663  
  1664  	var (
  1665  		dataused uint64
  1666  		datareal uint64
  1667  		slotused uint64
  1668  
  1669  		oversizedDataused uint64
  1670  		oversizedDatagaps uint64
  1671  		oversizedSlotused uint64
  1672  		oversizedSlotgaps uint64
  1673  	)
  1674  	for _, shelf := range stats.Shelves {
  1675  		slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
  1676  		slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
  1677  
  1678  		dataused += slotDataused
  1679  		datareal += slotDataused + slotDatagaps
  1680  		slotused += shelf.FilledSlots
  1681  
  1682  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
  1683  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
  1684  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
  1685  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
  1686  
  1687  		maxBlobs := eip4844.LatestMaxBlobsPerBlock(p.chain.Config())
  1688  		if shelf.SlotSize/blobSize > uint32(maxBlobs) {
  1689  			oversizedDataused += slotDataused
  1690  			oversizedDatagaps += slotDatagaps
  1691  			oversizedSlotused += shelf.FilledSlots
  1692  			oversizedSlotgaps += shelf.GappedSlots
  1693  		}
  1694  	}
  1695  	datausedGauge.Update(int64(dataused))
  1696  	datarealGauge.Update(int64(datareal))
  1697  	slotusedGauge.Update(int64(slotused))
  1698  
  1699  	oversizedDatausedGauge.Update(int64(oversizedDataused))
  1700  	oversizedDatagapsGauge.Update(int64(oversizedDatagaps))
  1701  	oversizedSlotusedGauge.Update(int64(oversizedSlotused))
  1702  	oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps))
  1703  
  1704  	p.updateLimboMetrics()
  1705  }
  1706  
  1707  // updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
  1708  // them out as metrics.
  1709  func (p *BlobPool) updateLimboMetrics() {
  1710  	stats := p.limbo.store.Infos()
  1711  
  1712  	var (
  1713  		dataused uint64
  1714  		datareal uint64
  1715  		slotused uint64
  1716  	)
  1717  	for _, shelf := range stats.Shelves {
  1718  		slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
  1719  		slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
  1720  
  1721  		dataused += slotDataused
  1722  		datareal += slotDataused + slotDatagaps
  1723  		slotused += shelf.FilledSlots
  1724  
  1725  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
  1726  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
  1727  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
  1728  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
  1729  	}
  1730  	limboDatausedGauge.Update(int64(dataused))
  1731  	limboDatarealGauge.Update(int64(datareal))
  1732  	limboSlotusedGauge.Update(int64(slotused))
  1733  }
  1734  
  1735  // SubscribeTransactions registers a subscription for new transaction events,
  1736  // supporting feeding only newly seen or also resurrected transactions.
  1737  func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
  1738  	if reorgs {
  1739  		return p.insertFeed.Subscribe(ch)
  1740  	} else {
  1741  		return p.discoverFeed.Subscribe(ch)
  1742  	}
  1743  }
  1744  
  1745  // Nonce returns the next nonce of an account, with all transactions executable
  1746  // by the pool already applied on top.
  1747  func (p *BlobPool) Nonce(addr common.Address) uint64 {
  1748  	// We need a write lock here, since state.GetNonce might write the cache.
  1749  	p.lock.Lock()
  1750  	defer p.lock.Unlock()
  1751  
  1752  	if txs, ok := p.index[addr]; ok {
  1753  		return txs[len(txs)-1].nonce + 1
  1754  	}
  1755  	return p.state.GetNonce(addr)
  1756  }
  1757  
  1758  // Stats retrieves the current pool stats, namely the number of pending and the
  1759  // number of queued (non-executable) transactions.
  1760  func (p *BlobPool) Stats() (int, int) {
  1761  	p.lock.RLock()
  1762  	defer p.lock.RUnlock()
  1763  
  1764  	var pending int
  1765  	for _, txs := range p.index {
  1766  		pending += len(txs)
  1767  	}
  1768  	return pending, 0 // No non-executable txs in the blob pool
  1769  }
  1770  
  1771  // Content retrieves the data content of the transaction pool, returning all the
  1772  // pending as well as queued transactions, grouped by account and sorted by nonce.
  1773  //
  1774  // For the blob pool, this method will return nothing for now.
  1775  // TODO(karalabe): Abstract out the returned metadata.
  1776  func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
  1777  	return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction)
  1778  }
  1779  
  1780  // ContentFrom retrieves the data content of the transaction pool, returning the
  1781  // pending as well as queued transactions of this address, grouped by nonce.
  1782  //
  1783  // For the blob pool, this method will return nothing for now.
  1784  // TODO(karalabe): Abstract out the returned metadata.
  1785  func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
  1786  	return []*types.Transaction{}, []*types.Transaction{}
  1787  }
  1788  
  1789  // Status returns the known status (unknown/pending/queued) of a transaction
  1790  // identified by their hashes.
  1791  func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
  1792  	if p.Has(hash) {
  1793  		return txpool.TxStatusPending
  1794  	}
  1795  	return txpool.TxStatusUnknown
  1796  }
  1797  
  1798  // Clear implements txpool.SubPool, removing all tracked transactions
  1799  // from the blob pool and persistent store.
  1800  //
  1801  // Note, do not use this in production / live code. In live code, the pool is
  1802  // meant to reset on a separate thread to avoid DoS vectors.
  1803  func (p *BlobPool) Clear() {
  1804  	p.lock.Lock()
  1805  	defer p.lock.Unlock()
  1806  
  1807  	// manually iterating and deleting every entry is super sub-optimal
  1808  	// However, Clear is not currently used in production so
  1809  	// performance is not critical at the moment.
  1810  	for hash := range p.lookup.txIndex {
  1811  		id, _ := p.lookup.storeidOfTx(hash)
  1812  		if err := p.store.Delete(id); err != nil {
  1813  			log.Warn("failed to delete blob tx from backing store", "err", err)
  1814  		}
  1815  	}
  1816  	for hash := range p.lookup.blobIndex {
  1817  		id, _ := p.lookup.storeidOfBlob(hash)
  1818  		if err := p.store.Delete(id); err != nil {
  1819  			log.Warn("failed to delete blob from backing store", "err", err)
  1820  		}
  1821  	}
  1822  
  1823  	// unreserve each tracked account.  Ideally, we could just clear the
  1824  	// reservation map in the parent txpool context.  However, if we clear in
  1825  	// parent context, to avoid exposing the subpool lock, we have to lock the
  1826  	// reservations and then lock each subpool.
  1827  	//
  1828  	// This creates the potential for a deadlock situation:
  1829  	//
  1830  	// * TxPool.Clear locks the reservations
  1831  	// * a new transaction is received which locks the subpool mutex
  1832  	// * TxPool.Clear attempts to lock subpool mutex
  1833  	//
  1834  	// The transaction addition may attempt to reserve the sender addr which
  1835  	// can't happen until Clear releases the reservation lock.  Clear cannot
  1836  	// acquire the subpool lock until the transaction addition is completed.
  1837  	for acct := range p.index {
  1838  		p.reserver.Release(acct)
  1839  	}
  1840  	p.lookup = newLookup()
  1841  	p.index = make(map[common.Address][]*blobTxMeta)
  1842  	p.spent = make(map[common.Address]*uint256.Int)
  1843  
  1844  	var (
  1845  		basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
  1846  		blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
  1847  	)
  1848  	p.evict = newPriceHeap(basefee, blobfee, p.index)
  1849  }