github.com/ethereum/go-ethereum@v1.14.4-0.20240516095835-473ee8fc07a3/core/txpool/blobpool/blobpool.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package blobpool implements the EIP-4844 blob transaction pool.
    18  package blobpool
    19  
    20  import (
    21  	"container/heap"
    22  	"errors"
    23  	"fmt"
    24  	"math"
    25  	"math/big"
    26  	"os"
    27  	"path/filepath"
    28  	"sort"
    29  	"sync"
    30  	"time"
    31  
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
    34  	"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
    35  	"github.com/ethereum/go-ethereum/core"
    36  	"github.com/ethereum/go-ethereum/core/state"
    37  	"github.com/ethereum/go-ethereum/core/txpool"
    38  	"github.com/ethereum/go-ethereum/core/types"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/metrics"
    42  	"github.com/ethereum/go-ethereum/params"
    43  	"github.com/ethereum/go-ethereum/rlp"
    44  	"github.com/holiman/billy"
    45  	"github.com/holiman/uint256"
    46  )
    47  
    48  const (
    49  	// blobSize is the protocol constrained byte size of a single blob in a
    50  	// transaction. There can be multiple of these embedded into a single tx.
    51  	blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
    52  
    53  	// maxBlobsPerTransaction is the maximum number of blobs a single transaction
    54  	// is allowed to contain. Whilst the spec states it's unlimited, the block
    55  	// data slots are protocol bound, which implicitly also limit this.
    56  	maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
    57  
    58  	// txAvgSize is an approximate byte size of a transaction metadata to avoid
    59  	// tiny overflows causing all txs to move a shelf higher, wasting disk space.
    60  	txAvgSize = 4 * 1024
    61  
    62  	// txMaxSize is the maximum size a single transaction can have, outside
    63  	// the included blobs. Since blob transactions are pulled instead of pushed,
    64  	// and only a small metadata is kept in ram, the rest is on disk, there is
    65  	// no critical limit that should be enforced. Still, capping it to some sane
    66  	// limit can never hurt.
    67  	txMaxSize = 1024 * 1024
    68  
    69  	// maxTxsPerAccount is the maximum number of blob transactions admitted from
    70  	// a single account. The limit is enforced to minimize the DoS potential of
    71  	// a private tx cancelling publicly propagated blobs.
    72  	//
    73  	// Note, transactions resurrected by a reorg are also subject to this limit,
    74  	// so pushing it down too aggressively might make resurrections non-functional.
    75  	maxTxsPerAccount = 16
    76  
    77  	// pendingTransactionStore is the subfolder containing the currently queued
    78  	// blob transactions.
    79  	pendingTransactionStore = "queue"
    80  
    81  	// limboedTransactionStore is the subfolder containing the currently included
    82  	// but not yet finalized transaction blobs.
    83  	limboedTransactionStore = "limbo"
    84  )
    85  
    86  // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
    87  // schedule the blob transactions into the following blocks. Only ever add the
    88  // bare minimum needed fields to keep the size down (and thus number of entries
    89  // larger with the same memory consumption).
    90  type blobTxMeta struct {
    91  	hash common.Hash // Transaction hash to maintain the lookup table
    92  	id   uint64      // Storage ID in the pool's persistent store
    93  	size uint32      // Byte size in the pool's persistent store
    94  
    95  	nonce      uint64       // Needed to prioritize inclusion order within an account
    96  	costCap    *uint256.Int // Needed to validate cumulative balance sufficiency
    97  	execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
    98  	execFeeCap *uint256.Int // Needed to validate replacement price bump
    99  	blobFeeCap *uint256.Int // Needed to validate replacement price bump
   100  	execGas    uint64       // Needed to check inclusion validity before reading the blob
   101  	blobGas    uint64       // Needed to check inclusion validity before reading the blob
   102  
   103  	basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
   104  	blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
   105  
   106  	evictionExecTip      *uint256.Int // Worst gas tip across all previous nonces
   107  	evictionExecFeeJumps float64      // Worst base fee (converted to fee jumps) across all previous nonces
   108  	evictionBlobFeeJumps float64      // Worse blob fee (converted to fee jumps) across all previous nonces
   109  }
   110  
   111  // newBlobTxMeta retrieves the indexed metadata fields from a blob transaction
   112  // and assembles a helper struct to track in memory.
   113  func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
   114  	meta := &blobTxMeta{
   115  		hash:       tx.Hash(),
   116  		id:         id,
   117  		size:       size,
   118  		nonce:      tx.Nonce(),
   119  		costCap:    uint256.MustFromBig(tx.Cost()),
   120  		execTipCap: uint256.MustFromBig(tx.GasTipCap()),
   121  		execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
   122  		blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
   123  		execGas:    tx.Gas(),
   124  		blobGas:    tx.BlobGas(),
   125  	}
   126  	meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
   127  	meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
   128  
   129  	return meta
   130  }
   131  
   132  // BlobPool is the transaction pool dedicated to EIP-4844 blob transactions.
   133  //
   134  // Blob transactions are special snowflakes that are designed for a very specific
   135  // purpose (rollups) and are expected to adhere to that specific use case. These
   136  // behavioural expectations allow us to design a transaction pool that is more robust
   137  // (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush
   138  // attacks) than the generic tx pool. These improvements will also mean, however,
   139  // that we enforce a significantly more aggressive strategy on entering and exiting
   140  // the pool:
   141  //
   142  //   - Blob transactions are large. With the initial design aiming for 128KB blobs,
   143  //     we must ensure that these only traverse the network the absolute minimum
   144  //     number of times. Broadcasting to sqrt(peers) is out of the question, rather
   145  //     these should only ever be announced and the remote side should request it if
   146  //     it wants to.
   147  //
   148  //   - Block blob-space is limited. With blocks being capped to a few blob txs, we
   149  //     can make use of the very low expected churn rate within the pool. Notably,
   150  //     we should be able to use a persistent disk backend for the pool, solving
   151  //     the tx resend issue that plagues the generic tx pool, as long as there's no
   152  //     artificial churn (i.e. pool wars).
   153  //
   154  //   - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to
   155  //     commit to their own current state, which is independent of Ethereum mainnet
   156  //     (state, txs). This means that there's no reason for blob tx cancellation or
   157  //     replacement, apart from a potential basefee / miner tip adjustment.
   158  //
   159  //   - Replacements are expensive. Given their size, propagating a replacement
   160  //     blob transaction to an existing one should be aggressively discouraged.
   161  //     Whilst generic transactions can start at 1 Wei gas cost and require a 10%
   162  //     fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei)
   163  //     and a more aggressive bump (100%).
   164  //
   165  //   - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge
   166  //     DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate
   167  //     already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob
   168  //     txs are disallowed; c) the presence of blob transactions exclude non-blob
   169  //     transactions.
   170  //
   171  //   - Malicious cancellations are possible. Although the pool might prevent txs
   172  //     that cancel blobs, blocks might contain such transaction (malicious miner
   173  //     or flashbotter). The pool should cap the total number of blob transactions
   174  //     per account as to prevent propagating too much data before cancelling it
   175  //     via a normal transaction. It should nonetheless be high enough to support
   176  //     resurrecting reorged transactions. Perhaps 4-16.
   177  //
   178  //   - Local txs are meaningless. Mining pools historically used local transactions
   179  //     for payouts or for backdoor deals. With 1559 in place, the basefee usually
   180  //     dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs
   181  //     retain the 1559 2D gas pricing (and introduce on top a dynamic blob gas fee),
   182  //     so locality is moot. With a disk backed blob pool avoiding the resend issue,
   183  //     there's also no need to save own transactions for later.
   184  //
   185  //   - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow
   186  //     blob txs containing 0 blobs. In practice, admitting such txs into the pool
   187  //     breaks the low-churn invariant as blob constraints don't apply anymore. Even
   188  //     though we could accept blocks containing such txs, a reorg would require moving
   189  //     them back into the blob pool, which can break invariants.
   190  //
   191  //   - Dropping blobs needs delay. When normal transactions are included, they
   192  //     are immediately evicted from the pool since they are contained in the
   193  //     including block. Blobs however are not included in the execution chain,
   194  //     so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs,
   195  //     blobs are retained on disk until they are finalised.
   196  //
   197  //   - Blobs can arrive via flashbots. Blocks might contain blob transactions we
   198  //     have never seen on the network. Since we cannot recover them from blocks
   199  //     either, the engine_newPayload needs to give them to us, and we cache them
   200  //     until finality to support reorgs without tx losses.
   201  //
   202  // Whilst some constraints above might sound overly aggressive, the general idea is
   203  // that the blob pool should work robustly for its intended use case and whilst
   204  // anyone is free to use blob transactions for arbitrary non-rollup use cases,
   205  // they should not be allowed to run amok the network.
   206  //
   207  // Implementation wise there are a few interesting design choices:
   208  //
   209  //   - Adding a transaction to the pool blocks until persisted to disk. This is
   210  //     viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at
   211  //     peak), so natural churn is a couple MB per block. Replacements doing O(n)
   212  //     updates are forbidden and transaction propagation is pull based (i.e. no
   213  //     pileup of pending data).
   214  //
   215  //   - When transactions are chosen for inclusion, the primary criteria is the
   216  //     signer tip (and having a basefee/data fee high enough of course). However,
   217  //     same-tip transactions will be split by their basefee/datafee, preferring
   218  //     those that are closer to the current network limits. The idea being that
   219  //     very relaxed ones can be included even if the fees go up, when the closer
   220  //     ones could already be invalid.
   221  //
   222  // When the pool eventually reaches saturation, some old transactions - that may
   223  // never execute - will need to be evicted in favor of newer ones. The eviction
   224  // strategy is quite complex:
   225  //
   226  //   - Exceeding capacity evicts the highest-nonce of the account with the lowest
   227  //     paying blob transaction anywhere in the pooled nonce-sequence, as that tx
   228  //     would be executed the furthest in the future and is thus blocking anything
   229  //     after it. The smallest is deliberately not evicted to avoid a nonce-gap.
   230  //
   231  //   - Analogously, if the pool is full, the consideration price of a new tx for
   232  //     evicting an old one is the smallest price in the entire nonce-sequence of
   233  //     the account. This avoids malicious users DoSing the pool with seemingly
   234  //     high paying transactions hidden behind a low-paying blocked one.
   235  //
   236  //   - Since blob transactions have 3 price parameters: execution tip, execution
   237  //     fee cap and data fee cap, there's no singular parameter to create a total
   238  //     price ordering on. What's more, since the base fee and blob fee can move
   239  //     independently of one another, there's no pre-defined way to combine them
   240  //     into a stable order either. This leads to a multi-dimensional problem to
   241  //     solve after every block.
   242  //
   243  //   - The first observation is that comparing 1559 base fees or 4844 blob fees
   244  //     needs to happen in the context of their dynamism. Since these fees jump
   245  //     up or down in ~1.125 multipliers (at max) across blocks, comparing fees
   246  //     in two transactions should be based on log1.125(fee) to eliminate noise.
   247  //
   248  //   - The second observation is that the basefee and blobfee move independently,
   249  //     so there's no way to split mixed txs on their own (A has higher base fee,
   250  //     B has higher blob fee). Rather than look at the absolute fees, the useful
   251  //     metric is the max time it can take to exceed the transaction's fee caps.
   252  //     Specifically, we're interested in the number of jumps needed to go from
   253  //     the current fee to the transaction's cap:
   254  //
   255  //     jumps = log1.125(txfee) - log1.125(basefee)
   256  //
   257  //   - The third observation is that the base fee tends to hover around rather
   258  //     than swing wildly. The number of jumps needed from the current fee starts
   259  //     to get less relevant the higher it is. To remove the noise here too, the
   260  //     pool will use log(jumps) as the delta for comparing transactions.
   261  //
   262  //     delta = sign(jumps) * log(abs(jumps))
   263  //
   264  //   - To establish a total order, we need to reduce the dimensionality of the
   265  //     two base fees (log jumps) to a single value. The interesting aspect from
   266  //     the pool's perspective is how fast will a tx get executable (fees going
   267  //     down, crossing the smaller negative jump counter) or non-executable (fees
   268  //     going up, crossing the smaller positive jump counter). As such, the pool
   269  //     cares only about the min of the two delta values for eviction priority.
   270  //
   271  //     priority = min(deltaBasefee, deltaBlobfee)
   272  //
   273  //   - The above very aggressive dimensionality and noise reduction should result
   274  //     in transaction being grouped into a small number of buckets, the further
   275  //     the fees the larger the buckets. This is good because it allows us to use
   276  //     the miner tip meaningfully as a splitter.
   277  //
   278  //   - For the scenario where the pool does not contain non-executable blob txs
   279  //     anymore, it does not make sense to grant a later eviction priority to txs
   280  //     with high fee caps since it could enable pool wars. As such, any positive
   281  //     priority will be grouped together.
   282  //
   283  //     priority = min(deltaBasefee, deltaBlobfee, 0)
   284  //
   285  // Optimisation tradeoffs:
   286  //
   287  //   - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob
   288  //     cap). Maintaining these values across all transactions from the account is
   289  //     problematic as each transaction replacement or inclusion would require a
   290  //     rescan of all other transactions to recalculate the minimum. Instead, the
   291  //     pool maintains a rolling minimum across the nonce range. Updating all the
   292  //     minimums will need to be done only starting at the swapped in/out nonce
   293  //     and leading up to the first no-change.
   294  type BlobPool struct {
   295  	config  Config                 // Pool configuration
   296  	reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
   297  
   298  	store  billy.Database // Persistent data store for the tx metadata and blobs
   299  	stored uint64         // Useful data size of all transactions on disk
   300  	limbo  *limbo         // Persistent data store for the non-finalized blobs
   301  
   302  	signer types.Signer // Transaction signer to use for sender recovery
   303  	chain  BlockChain   // Chain object to access the state through
   304  
   305  	head   *types.Header  // Current head of the chain
   306  	state  *state.StateDB // Current state at the head of the chain
   307  	gasTip *uint256.Int   // Currently accepted minimum gas tip
   308  
   309  	lookup map[common.Hash]uint64           // Lookup table mapping hashes to tx billy entries
   310  	index  map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
   311  	spent  map[common.Address]*uint256.Int  // Expenditure tracking for individual accounts
   312  	evict  *evictHeap                       // Heap of cheapest accounts for eviction when full
   313  
   314  	discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
   315  	insertFeed   event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
   316  
   317  	lock sync.RWMutex // Mutex protecting the pool during reorg handling
   318  }
   319  
   320  // New creates a new blob transaction pool to gather, sort and filter inbound
   321  // blob transactions from the network.
   322  func New(config Config, chain BlockChain) *BlobPool {
   323  	// Sanitize the input to ensure no vulnerable gas prices are set
   324  	config = (&config).sanitize()
   325  
   326  	// Create the transaction pool with its initial settings
   327  	return &BlobPool{
   328  		config: config,
   329  		signer: types.LatestSigner(chain.Config()),
   330  		chain:  chain,
   331  		lookup: make(map[common.Hash]uint64),
   332  		index:  make(map[common.Address][]*blobTxMeta),
   333  		spent:  make(map[common.Address]*uint256.Int),
   334  	}
   335  }
   336  
   337  // Filter returns whether the given transaction can be consumed by the blob pool.
   338  func (p *BlobPool) Filter(tx *types.Transaction) bool {
   339  	return tx.Type() == types.BlobTxType
   340  }
   341  
   342  // Init sets the gas price needed to keep a transaction in the pool and the chain
   343  // head to allow balance / nonce checks. The transaction journal will be loaded
   344  // from disk and filtered based on the provided starting settings.
   345  func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
   346  	p.reserve = reserve
   347  
   348  	var (
   349  		queuedir string
   350  		limbodir string
   351  	)
   352  	if p.config.Datadir != "" {
   353  		queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore)
   354  		if err := os.MkdirAll(queuedir, 0700); err != nil {
   355  			return err
   356  		}
   357  		limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore)
   358  		if err := os.MkdirAll(limbodir, 0700); err != nil {
   359  			return err
   360  		}
   361  	}
   362  	// Initialize the state with head block, or fallback to empty one in
   363  	// case the head state is not available (might occur when node is not
   364  	// fully synced).
   365  	state, err := p.chain.StateAt(head.Root)
   366  	if err != nil {
   367  		state, err = p.chain.StateAt(types.EmptyRootHash)
   368  	}
   369  	if err != nil {
   370  		return err
   371  	}
   372  	p.head, p.state = head, state
   373  
   374  	// Index all transactions on disk and delete anything unprocessable
   375  	var fails []uint64
   376  	index := func(id uint64, size uint32, blob []byte) {
   377  		if p.parseTransaction(id, size, blob) != nil {
   378  			fails = append(fails, id)
   379  		}
   380  	}
   381  	store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, newSlotter(), index)
   382  	if err != nil {
   383  		return err
   384  	}
   385  	p.store = store
   386  
   387  	if len(fails) > 0 {
   388  		log.Warn("Dropping invalidated blob transactions", "ids", fails)
   389  		dropInvalidMeter.Mark(int64(len(fails)))
   390  
   391  		for _, id := range fails {
   392  			if err := p.store.Delete(id); err != nil {
   393  				p.Close()
   394  				return err
   395  			}
   396  		}
   397  	}
   398  	// Sort the indexed transactions by nonce and delete anything gapped, create
   399  	// the eviction heap of anyone still standing
   400  	for addr := range p.index {
   401  		p.recheck(addr, nil)
   402  	}
   403  	var (
   404  		basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
   405  		blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
   406  	)
   407  	if p.head.ExcessBlobGas != nil {
   408  		blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas))
   409  	}
   410  	p.evict = newPriceHeap(basefee, blobfee, &p.index)
   411  
   412  	// Pool initialized, attach the blob limbo to it to track blobs included
   413  	// recently but not yet finalized
   414  	p.limbo, err = newLimbo(limbodir)
   415  	if err != nil {
   416  		p.Close()
   417  		return err
   418  	}
   419  	// Set the configured gas tip, triggering a filtering of anything just loaded
   420  	basefeeGauge.Update(int64(basefee.Uint64()))
   421  	blobfeeGauge.Update(int64(blobfee.Uint64()))
   422  
   423  	p.SetGasTip(new(big.Int).SetUint64(gasTip))
   424  
   425  	// Since the user might have modified their pool's capacity, evict anything
   426  	// above the current allowance
   427  	for p.stored > p.config.Datacap {
   428  		p.drop()
   429  	}
   430  	// Update the metrics and return the constructed pool
   431  	datacapGauge.Update(int64(p.config.Datacap))
   432  	p.updateStorageMetrics()
   433  	return nil
   434  }
   435  
   436  // Close closes down the underlying persistent store.
   437  func (p *BlobPool) Close() error {
   438  	var errs []error
   439  	if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
   440  		if err := p.limbo.Close(); err != nil {
   441  			errs = append(errs, err)
   442  		}
   443  	}
   444  	if err := p.store.Close(); err != nil {
   445  		errs = append(errs, err)
   446  	}
   447  	switch {
   448  	case errs == nil:
   449  		return nil
   450  	case len(errs) == 1:
   451  		return errs[0]
   452  	default:
   453  		return fmt.Errorf("%v", errs)
   454  	}
   455  }
   456  
   457  // parseTransaction is a callback method on pool creation that gets called for
   458  // each transaction on disk to create the in-memory metadata index.
   459  func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
   460  	tx := new(types.Transaction)
   461  	if err := rlp.DecodeBytes(blob, tx); err != nil {
   462  		// This path is impossible unless the disk data representation changes
   463  		// across restarts. For that ever improbable case, recover gracefully
   464  		// by ignoring this data entry.
   465  		log.Error("Failed to decode blob pool entry", "id", id, "err", err)
   466  		return err
   467  	}
   468  	if tx.BlobTxSidecar() == nil {
   469  		log.Error("Missing sidecar in blob pool entry", "id", id, "hash", tx.Hash())
   470  		return errors.New("missing blob sidecar")
   471  	}
   472  
   473  	meta := newBlobTxMeta(id, size, tx)
   474  	if _, exists := p.lookup[meta.hash]; exists {
   475  		// This path is only possible after a crash, where deleted items are not
   476  		// removed via the normal shutdown-startup procedure and thus may get
   477  		// partially resurrected.
   478  		log.Error("Rejecting duplicate blob pool entry", "id", id, "hash", tx.Hash())
   479  		return errors.New("duplicate blob entry")
   480  	}
   481  	sender, err := p.signer.Sender(tx)
   482  	if err != nil {
   483  		// This path is impossible unless the signature validity changes across
   484  		// restarts. For that ever improbable case, recover gracefully by ignoring
   485  		// this data entry.
   486  		log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
   487  		return err
   488  	}
   489  	if _, ok := p.index[sender]; !ok {
   490  		if err := p.reserve(sender, true); err != nil {
   491  			return err
   492  		}
   493  		p.index[sender] = []*blobTxMeta{}
   494  		p.spent[sender] = new(uint256.Int)
   495  	}
   496  	p.index[sender] = append(p.index[sender], meta)
   497  	p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap)
   498  
   499  	p.lookup[meta.hash] = meta.id
   500  	p.stored += uint64(meta.size)
   501  
   502  	return nil
   503  }
   504  
   505  // recheck verifies the pool's content for a specific account and drops anything
   506  // that does not fit anymore (dangling or filled nonce, overdraft).
   507  func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) {
   508  	// Sort the transactions belonging to the account so reinjects can be simpler
   509  	txs := p.index[addr]
   510  	if inclusions != nil && txs == nil { // during reorgs, we might find new accounts
   511  		return
   512  	}
   513  	sort.Slice(txs, func(i, j int) bool {
   514  		return txs[i].nonce < txs[j].nonce
   515  	})
   516  	// If there is a gap between the chain state and the blob pool, drop
   517  	// all the transactions as they are non-executable. Similarly, if the
   518  	// entire tx range was included, drop all.
   519  	var (
   520  		next   = p.state.GetNonce(addr)
   521  		gapped = txs[0].nonce > next
   522  		filled = txs[len(txs)-1].nonce < next
   523  	)
   524  	if gapped || filled {
   525  		var (
   526  			ids    []uint64
   527  			nonces []uint64
   528  		)
   529  		for i := 0; i < len(txs); i++ {
   530  			ids = append(ids, txs[i].id)
   531  			nonces = append(nonces, txs[i].nonce)
   532  
   533  			p.stored -= uint64(txs[i].size)
   534  			delete(p.lookup, txs[i].hash)
   535  
   536  			// Included transactions blobs need to be moved to the limbo
   537  			if filled && inclusions != nil {
   538  				p.offload(addr, txs[i].nonce, txs[i].id, inclusions)
   539  			}
   540  		}
   541  		delete(p.index, addr)
   542  		delete(p.spent, addr)
   543  		if inclusions != nil { // only during reorgs will the heap be initialized
   544  			heap.Remove(p.evict, p.evict.index[addr])
   545  		}
   546  		p.reserve(addr, false)
   547  
   548  		if gapped {
   549  			log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
   550  			dropDanglingMeter.Mark(int64(len(ids)))
   551  		} else {
   552  			log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
   553  			dropFilledMeter.Mark(int64(len(ids)))
   554  		}
   555  		for _, id := range ids {
   556  			if err := p.store.Delete(id); err != nil {
   557  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   558  			}
   559  		}
   560  		return
   561  	}
   562  	// If there is overlap between the chain state and the blob pool, drop
   563  	// anything below the current state
   564  	if txs[0].nonce < next {
   565  		var (
   566  			ids    []uint64
   567  			nonces []uint64
   568  		)
   569  		for txs[0].nonce < next {
   570  			ids = append(ids, txs[0].id)
   571  			nonces = append(nonces, txs[0].nonce)
   572  
   573  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap)
   574  			p.stored -= uint64(txs[0].size)
   575  			delete(p.lookup, txs[0].hash)
   576  
   577  			// Included transactions blobs need to be moved to the limbo
   578  			if inclusions != nil {
   579  				p.offload(addr, txs[0].nonce, txs[0].id, inclusions)
   580  			}
   581  			txs = txs[1:]
   582  		}
   583  		log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
   584  		dropOverlappedMeter.Mark(int64(len(ids)))
   585  
   586  		for _, id := range ids {
   587  			if err := p.store.Delete(id); err != nil {
   588  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   589  			}
   590  		}
   591  		p.index[addr] = txs
   592  	}
   593  	// Iterate over the transactions to initialize their eviction thresholds
   594  	// and to detect any nonce gaps
   595  	txs[0].evictionExecTip = txs[0].execTipCap
   596  	txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
   597  	txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
   598  
   599  	for i := 1; i < len(txs); i++ {
   600  		// If there's no nonce gap, initialize the eviction thresholds as the
   601  		// minimum between the cumulative thresholds and the current tx fees
   602  		if txs[i].nonce == txs[i-1].nonce+1 {
   603  			txs[i].evictionExecTip = txs[i-1].evictionExecTip
   604  			if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
   605  				txs[i].evictionExecTip = txs[i].execTipCap
   606  			}
   607  			txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
   608  			if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
   609  				txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
   610  			}
   611  			txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
   612  			if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
   613  				txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
   614  			}
   615  			continue
   616  		}
   617  		// Sanity check that there's no double nonce. This case would generally
   618  		// be a coding error, so better know about it.
   619  		//
   620  		// Also, Billy behind the blobpool does not journal deletes. A process
   621  		// crash would result in previously deleted entities being resurrected.
   622  		// That could potentially cause a duplicate nonce to appear.
   623  		if txs[i].nonce == txs[i-1].nonce {
   624  			id := p.lookup[txs[i].hash]
   625  
   626  			log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
   627  			dropRepeatedMeter.Mark(1)
   628  
   629  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
   630  			p.stored -= uint64(txs[i].size)
   631  			delete(p.lookup, txs[i].hash)
   632  
   633  			if err := p.store.Delete(id); err != nil {
   634  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   635  			}
   636  			txs = append(txs[:i], txs[i+1:]...)
   637  			p.index[addr] = txs
   638  
   639  			i--
   640  			continue
   641  		}
   642  		// Otherwise if there's a nonce gap evict all later transactions
   643  		var (
   644  			ids    []uint64
   645  			nonces []uint64
   646  		)
   647  		for j := i; j < len(txs); j++ {
   648  			ids = append(ids, txs[j].id)
   649  			nonces = append(nonces, txs[j].nonce)
   650  
   651  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap)
   652  			p.stored -= uint64(txs[j].size)
   653  			delete(p.lookup, txs[j].hash)
   654  		}
   655  		txs = txs[:i]
   656  
   657  		log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
   658  		dropGappedMeter.Mark(int64(len(ids)))
   659  
   660  		for _, id := range ids {
   661  			if err := p.store.Delete(id); err != nil {
   662  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   663  			}
   664  		}
   665  		p.index[addr] = txs
   666  		break
   667  	}
   668  	// Ensure that there's no over-draft, this is expected to happen when some
   669  	// transactions get included without publishing on the network
   670  	var (
   671  		balance = p.state.GetBalance(addr)
   672  		spent   = p.spent[addr]
   673  	)
   674  	if spent.Cmp(balance) > 0 {
   675  		// Evict the highest nonce transactions until the pending set falls under
   676  		// the account's available balance
   677  		var (
   678  			ids    []uint64
   679  			nonces []uint64
   680  		)
   681  		for p.spent[addr].Cmp(balance) > 0 {
   682  			last := txs[len(txs)-1]
   683  			txs[len(txs)-1] = nil
   684  			txs = txs[:len(txs)-1]
   685  
   686  			ids = append(ids, last.id)
   687  			nonces = append(nonces, last.nonce)
   688  
   689  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
   690  			p.stored -= uint64(last.size)
   691  			delete(p.lookup, last.hash)
   692  		}
   693  		if len(txs) == 0 {
   694  			delete(p.index, addr)
   695  			delete(p.spent, addr)
   696  			if inclusions != nil { // only during reorgs will the heap be initialized
   697  				heap.Remove(p.evict, p.evict.index[addr])
   698  			}
   699  			p.reserve(addr, false)
   700  		} else {
   701  			p.index[addr] = txs
   702  		}
   703  		log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
   704  		dropOverdraftedMeter.Mark(int64(len(ids)))
   705  
   706  		for _, id := range ids {
   707  			if err := p.store.Delete(id); err != nil {
   708  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   709  			}
   710  		}
   711  	}
   712  	// Sanity check that no account can have more queued transactions than the
   713  	// DoS protection threshold.
   714  	if len(txs) > maxTxsPerAccount {
   715  		// Evict the highest nonce transactions until the pending set falls under
   716  		// the account's transaction cap
   717  		var (
   718  			ids    []uint64
   719  			nonces []uint64
   720  		)
   721  		for len(txs) > maxTxsPerAccount {
   722  			last := txs[len(txs)-1]
   723  			txs[len(txs)-1] = nil
   724  			txs = txs[:len(txs)-1]
   725  
   726  			ids = append(ids, last.id)
   727  			nonces = append(nonces, last.nonce)
   728  
   729  			p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap)
   730  			p.stored -= uint64(last.size)
   731  			delete(p.lookup, last.hash)
   732  		}
   733  		p.index[addr] = txs
   734  
   735  		log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
   736  		dropOvercappedMeter.Mark(int64(len(ids)))
   737  
   738  		for _, id := range ids {
   739  			if err := p.store.Delete(id); err != nil {
   740  				log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
   741  			}
   742  		}
   743  	}
   744  	// Included cheap transactions might have left the remaining ones better from
   745  	// an eviction point, fix any potential issues in the heap.
   746  	if _, ok := p.index[addr]; ok && inclusions != nil {
   747  		heap.Fix(p.evict, p.evict.index[addr])
   748  	}
   749  }
   750  
   751  // offload removes a tracked blob transaction from the pool and moves it into the
   752  // limbo for tracking until finality.
   753  //
   754  // The method may log errors for various unexpected scenarios but will not return
   755  // any of it since there's no clear error case. Some errors may be due to coding
   756  // issues, others caused by signers mining MEV stuff or swapping transactions. In
   757  // all cases, the pool needs to continue operating.
   758  func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) {
   759  	data, err := p.store.Get(id)
   760  	if err != nil {
   761  		log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
   762  		return
   763  	}
   764  	var tx types.Transaction
   765  	if err = rlp.DecodeBytes(data, &tx); err != nil {
   766  		log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
   767  		return
   768  	}
   769  	block, ok := inclusions[tx.Hash()]
   770  	if !ok {
   771  		log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
   772  		return
   773  	}
   774  	if err := p.limbo.push(&tx, block); err != nil {
   775  		log.Warn("Failed to offload blob tx into limbo", "err", err)
   776  		return
   777  	}
   778  }
   779  
   780  // Reset implements txpool.SubPool, allowing the blob pool's internal state to be
   781  // kept in sync with the main transaction pool's internal state.
   782  func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
   783  	waitStart := time.Now()
   784  	p.lock.Lock()
   785  	resetwaitHist.Update(time.Since(waitStart).Nanoseconds())
   786  	defer p.lock.Unlock()
   787  
   788  	defer func(start time.Time) {
   789  		resettimeHist.Update(time.Since(start).Nanoseconds())
   790  	}(time.Now())
   791  
   792  	statedb, err := p.chain.StateAt(newHead.Root)
   793  	if err != nil {
   794  		log.Error("Failed to reset blobpool state", "err", err)
   795  		return
   796  	}
   797  	p.head = newHead
   798  	p.state = statedb
   799  
   800  	// Run the reorg between the old and new head and figure out which accounts
   801  	// need to be rechecked and which transactions need to be readded
   802  	if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
   803  		var adds []*types.Transaction
   804  		for addr, txs := range reinject {
   805  			// Blindly push all the lost transactions back into the pool
   806  			for _, tx := range txs {
   807  				if err := p.reinject(addr, tx.Hash()); err == nil {
   808  					adds = append(adds, tx.WithoutBlobTxSidecar())
   809  				}
   810  			}
   811  			// Recheck the account's pooled transactions to drop included and
   812  			// invalidated ones
   813  			p.recheck(addr, inclusions)
   814  		}
   815  		if len(adds) > 0 {
   816  			p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
   817  		}
   818  	}
   819  	// Flush out any blobs from limbo that are older than the latest finality
   820  	if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
   821  		p.limbo.finalize(p.chain.CurrentFinalBlock())
   822  	}
   823  	// Reset the price heap for the new set of basefee/blobfee pairs
   824  	var (
   825  		basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead))
   826  		blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
   827  	)
   828  	if newHead.ExcessBlobGas != nil {
   829  		blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas))
   830  	}
   831  	p.evict.reinit(basefee, blobfee, false)
   832  
   833  	basefeeGauge.Update(int64(basefee.Uint64()))
   834  	blobfeeGauge.Update(int64(blobfee.Uint64()))
   835  	p.updateStorageMetrics()
   836  }
   837  
   838  // reorg assembles all the transactors and missing transactions between an old
   839  // and new head to figure out which account's tx set needs to be rechecked and
   840  // which transactions need to be requeued.
   841  //
   842  // The transactionblock inclusion infos are also returned to allow tracking any
   843  // just-included blocks by block number in the limbo.
   844  func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) {
   845  	// If the pool was not yet initialized, don't do anything
   846  	if oldHead == nil {
   847  		return nil, nil
   848  	}
   849  	// If the reorg is too deep, avoid doing it (will happen during snap sync)
   850  	oldNum := oldHead.Number.Uint64()
   851  	newNum := newHead.Number.Uint64()
   852  
   853  	if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
   854  		return nil, nil
   855  	}
   856  	// Reorg seems shallow enough to pull in all transactions into memory
   857  	var (
   858  		transactors = make(map[common.Address]struct{})
   859  		discarded   = make(map[common.Address][]*types.Transaction)
   860  		included    = make(map[common.Address][]*types.Transaction)
   861  		inclusions  = make(map[common.Hash]uint64)
   862  
   863  		rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
   864  		add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
   865  	)
   866  	if add == nil {
   867  		// if the new head is nil, it means that something happened between
   868  		// the firing of newhead-event and _now_: most likely a
   869  		// reorg caused by sync-reversion or explicit sethead back to an
   870  		// earlier block.
   871  		log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash())
   872  		return nil, nil
   873  	}
   874  	if rem == nil {
   875  		// This can happen if a setHead is performed, where we simply discard
   876  		// the old head from the chain. If that is the case, we don't have the
   877  		// lost transactions anymore, and there's nothing to add.
   878  		if newNum >= oldNum {
   879  			// If we reorged to a same or higher number, then it's not a case
   880  			// of setHead
   881  			log.Warn("Blobpool reset with missing old head",
   882  				"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   883  			return nil, nil
   884  		}
   885  		// If the reorg ended up on a lower number, it's indicative of setHead
   886  		// being the cause
   887  		log.Debug("Skipping blobpool reset caused by setHead",
   888  			"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
   889  		return nil, nil
   890  	}
   891  	// Both old and new blocks exist, traverse through the progression chain
   892  	// and accumulate the transactors and transactions
   893  	for rem.NumberU64() > add.NumberU64() {
   894  		for _, tx := range rem.Transactions() {
   895  			from, _ := p.signer.Sender(tx)
   896  
   897  			discarded[from] = append(discarded[from], tx)
   898  			transactors[from] = struct{}{}
   899  		}
   900  		if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   901  			log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
   902  			return nil, nil
   903  		}
   904  	}
   905  	for add.NumberU64() > rem.NumberU64() {
   906  		for _, tx := range add.Transactions() {
   907  			from, _ := p.signer.Sender(tx)
   908  
   909  			included[from] = append(included[from], tx)
   910  			inclusions[tx.Hash()] = add.NumberU64()
   911  			transactors[from] = struct{}{}
   912  		}
   913  		if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   914  			log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
   915  			return nil, nil
   916  		}
   917  	}
   918  	for rem.Hash() != add.Hash() {
   919  		for _, tx := range rem.Transactions() {
   920  			from, _ := p.signer.Sender(tx)
   921  
   922  			discarded[from] = append(discarded[from], tx)
   923  			transactors[from] = struct{}{}
   924  		}
   925  		if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
   926  			log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash())
   927  			return nil, nil
   928  		}
   929  		for _, tx := range add.Transactions() {
   930  			from, _ := p.signer.Sender(tx)
   931  
   932  			included[from] = append(included[from], tx)
   933  			inclusions[tx.Hash()] = add.NumberU64()
   934  			transactors[from] = struct{}{}
   935  		}
   936  		if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
   937  			log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash())
   938  			return nil, nil
   939  		}
   940  	}
   941  	// Generate the set of transactions per address to pull back into the pool,
   942  	// also updating the rest along the way
   943  	reinject := make(map[common.Address][]*types.Transaction)
   944  	for addr := range transactors {
   945  		// Generate the set that was lost to reinject into the pool
   946  		lost := make([]*types.Transaction, 0, len(discarded[addr]))
   947  		for _, tx := range types.TxDifference(discarded[addr], included[addr]) {
   948  			if p.Filter(tx) {
   949  				lost = append(lost, tx)
   950  			}
   951  		}
   952  		reinject[addr] = lost
   953  
   954  		// Update the set that was already reincluded to track the blocks in limbo
   955  		for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
   956  			if p.Filter(tx) {
   957  				p.limbo.update(tx.Hash(), inclusions[tx.Hash()])
   958  			}
   959  		}
   960  	}
   961  	return reinject, inclusions
   962  }
   963  
   964  // reinject blindly pushes a transaction previously included in the chain - and
   965  // just reorged out - into the pool. The transaction is assumed valid (having
   966  // been in the chain), thus the only validation needed is nonce sorting and over-
   967  // draft checks after injection.
   968  //
   969  // Note, the method will not initialize the eviction cache values as those will
   970  // be done once for all transactions belonging to an account after all individual
   971  // transactions are injected back into the pool.
   972  func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
   973  	// Retrieve the associated blob from the limbo. Without the blobs, we cannot
   974  	// add the transaction back into the pool as it is not mineable.
   975  	tx, err := p.limbo.pull(txhash)
   976  	if err != nil {
   977  		log.Error("Blobs unavailable, dropping reorged tx", "err", err)
   978  		return err
   979  	}
   980  	// TODO: seems like an easy optimization here would be getting the serialized tx
   981  	// from limbo instead of re-serializing it here.
   982  
   983  	// Serialize the transaction back into the primary datastore.
   984  	blob, err := rlp.EncodeToBytes(tx)
   985  	if err != nil {
   986  		log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
   987  		return err
   988  	}
   989  	id, err := p.store.Put(blob)
   990  	if err != nil {
   991  		log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
   992  		return err
   993  	}
   994  
   995  	// Update the indices and metrics
   996  	meta := newBlobTxMeta(id, p.store.Size(id), tx)
   997  	if _, ok := p.index[addr]; !ok {
   998  		if err := p.reserve(addr, true); err != nil {
   999  			log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
  1000  			return err
  1001  		}
  1002  		p.index[addr] = []*blobTxMeta{meta}
  1003  		p.spent[addr] = meta.costCap
  1004  		p.evict.Push(addr)
  1005  	} else {
  1006  		p.index[addr] = append(p.index[addr], meta)
  1007  		p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
  1008  	}
  1009  	p.lookup[meta.hash] = meta.id
  1010  	p.stored += uint64(meta.size)
  1011  	return nil
  1012  }
  1013  
  1014  // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
  1015  // to be kept in sync with the main transaction pool's gas requirements.
  1016  func (p *BlobPool) SetGasTip(tip *big.Int) {
  1017  	p.lock.Lock()
  1018  	defer p.lock.Unlock()
  1019  
  1020  	// Store the new minimum gas tip
  1021  	old := p.gasTip
  1022  	p.gasTip = uint256.MustFromBig(tip)
  1023  
  1024  	// If the min miner fee increased, remove transactions below the new threshold
  1025  	if old == nil || p.gasTip.Cmp(old) > 0 {
  1026  		for addr, txs := range p.index {
  1027  			for i, tx := range txs {
  1028  				if tx.execTipCap.Cmp(p.gasTip) < 0 {
  1029  					// Drop the offending transaction
  1030  					var (
  1031  						ids    = []uint64{tx.id}
  1032  						nonces = []uint64{tx.nonce}
  1033  					)
  1034  					p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
  1035  					p.stored -= uint64(tx.size)
  1036  					delete(p.lookup, tx.hash)
  1037  					txs[i] = nil
  1038  
  1039  					// Drop everything afterwards, no gaps allowed
  1040  					for j, tx := range txs[i+1:] {
  1041  						ids = append(ids, tx.id)
  1042  						nonces = append(nonces, tx.nonce)
  1043  
  1044  						p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap)
  1045  						p.stored -= uint64(tx.size)
  1046  						delete(p.lookup, tx.hash)
  1047  						txs[i+1+j] = nil
  1048  					}
  1049  					// Clear out the dropped transactions from the index
  1050  					if i > 0 {
  1051  						p.index[addr] = txs[:i]
  1052  						heap.Fix(p.evict, p.evict.index[addr])
  1053  					} else {
  1054  						delete(p.index, addr)
  1055  						delete(p.spent, addr)
  1056  
  1057  						heap.Remove(p.evict, p.evict.index[addr])
  1058  						p.reserve(addr, false)
  1059  					}
  1060  					// Clear out the transactions from the data store
  1061  					log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
  1062  					dropUnderpricedMeter.Mark(int64(len(ids)))
  1063  
  1064  					for _, id := range ids {
  1065  						if err := p.store.Delete(id); err != nil {
  1066  							log.Error("Failed to delete dropped transaction", "id", id, "err", err)
  1067  						}
  1068  					}
  1069  					break
  1070  				}
  1071  			}
  1072  		}
  1073  	}
  1074  	log.Debug("Blobpool tip threshold updated", "tip", tip)
  1075  	pooltipGauge.Update(tip.Int64())
  1076  	p.updateStorageMetrics()
  1077  }
  1078  
  1079  // validateTx checks whether a transaction is valid according to the consensus
  1080  // rules and adheres to some heuristic limits of the local node (price and size).
  1081  func (p *BlobPool) validateTx(tx *types.Transaction) error {
  1082  	// Ensure the transaction adheres to basic pool filters (type, size, tip) and
  1083  	// consensus rules
  1084  	baseOpts := &txpool.ValidationOptions{
  1085  		Config:  p.chain.Config(),
  1086  		Accept:  1 << types.BlobTxType,
  1087  		MaxSize: txMaxSize,
  1088  		MinTip:  p.gasTip.ToBig(),
  1089  	}
  1090  	if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {
  1091  		return err
  1092  	}
  1093  	// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
  1094  	stateOpts := &txpool.ValidationOptionsWithState{
  1095  		State: p.state,
  1096  
  1097  		FirstNonceGap: func(addr common.Address) uint64 {
  1098  			// Nonce gaps are not permitted in the blob pool, the first gap will
  1099  			// be the next nonce shifted by however many transactions we already
  1100  			// have pooled.
  1101  			return p.state.GetNonce(addr) + uint64(len(p.index[addr]))
  1102  		},
  1103  		UsedAndLeftSlots: func(addr common.Address) (int, int) {
  1104  			have := len(p.index[addr])
  1105  			if have >= maxTxsPerAccount {
  1106  				return have, 0
  1107  			}
  1108  			return have, maxTxsPerAccount - have
  1109  		},
  1110  		ExistingExpenditure: func(addr common.Address) *big.Int {
  1111  			if spent := p.spent[addr]; spent != nil {
  1112  				return spent.ToBig()
  1113  			}
  1114  			return new(big.Int)
  1115  		},
  1116  		ExistingCost: func(addr common.Address, nonce uint64) *big.Int {
  1117  			next := p.state.GetNonce(addr)
  1118  			if uint64(len(p.index[addr])) > nonce-next {
  1119  				return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig()
  1120  			}
  1121  			return nil
  1122  		},
  1123  	}
  1124  	if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil {
  1125  		return err
  1126  	}
  1127  	// If the transaction replaces an existing one, ensure that price bumps are
  1128  	// adhered to.
  1129  	var (
  1130  		from, _ = p.signer.Sender(tx) // already validated above
  1131  		next    = p.state.GetNonce(from)
  1132  	)
  1133  	if uint64(len(p.index[from])) > tx.Nonce()-next {
  1134  		prev := p.index[from][int(tx.Nonce()-next)]
  1135  		// Ensure the transaction is different than the one tracked locally
  1136  		if prev.hash == tx.Hash() {
  1137  			return txpool.ErrAlreadyKnown
  1138  		}
  1139  		// Account can support the replacement, but the price bump must also be met
  1140  		switch {
  1141  		case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0:
  1142  			return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap)
  1143  		case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0:
  1144  			return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap)
  1145  		case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0:
  1146  			return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap)
  1147  		}
  1148  		var (
  1149  			multiplier = uint256.NewInt(100 + p.config.PriceBump)
  1150  			onehundred = uint256.NewInt(100)
  1151  
  1152  			minGasFeeCap     = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred)
  1153  			minGasTipCap     = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred)
  1154  			minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred)
  1155  		)
  1156  		switch {
  1157  		case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0:
  1158  			return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump)
  1159  		case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0:
  1160  			return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump)
  1161  		case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0:
  1162  			return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump)
  1163  		}
  1164  	}
  1165  	return nil
  1166  }
  1167  
  1168  // Has returns an indicator whether subpool has a transaction cached with the
  1169  // given hash.
  1170  func (p *BlobPool) Has(hash common.Hash) bool {
  1171  	p.lock.RLock()
  1172  	defer p.lock.RUnlock()
  1173  
  1174  	_, ok := p.lookup[hash]
  1175  	return ok
  1176  }
  1177  
  1178  // Get returns a transaction if it is contained in the pool, or nil otherwise.
  1179  func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
  1180  	// Track the amount of time waiting to retrieve a fully resolved blob tx from
  1181  	// the pool and the amount of time actually spent on pulling the data from disk.
  1182  	getStart := time.Now()
  1183  	p.lock.RLock()
  1184  	getwaitHist.Update(time.Since(getStart).Nanoseconds())
  1185  	defer p.lock.RUnlock()
  1186  
  1187  	defer func(start time.Time) {
  1188  		gettimeHist.Update(time.Since(start).Nanoseconds())
  1189  	}(time.Now())
  1190  
  1191  	// Pull the blob from disk and return an assembled response
  1192  	id, ok := p.lookup[hash]
  1193  	if !ok {
  1194  		return nil
  1195  	}
  1196  	data, err := p.store.Get(id)
  1197  	if err != nil {
  1198  		log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
  1199  		return nil
  1200  	}
  1201  	item := new(types.Transaction)
  1202  	if err = rlp.DecodeBytes(data, item); err != nil {
  1203  		log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
  1204  		return nil
  1205  	}
  1206  	return item
  1207  }
  1208  
  1209  // Add inserts a set of blob transactions into the pool if they pass validation (both
  1210  // consensus validity and pool restrictions).
  1211  func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
  1212  	var (
  1213  		adds = make([]*types.Transaction, 0, len(txs))
  1214  		errs = make([]error, len(txs))
  1215  	)
  1216  	for i, tx := range txs {
  1217  		errs[i] = p.add(tx)
  1218  		if errs[i] == nil {
  1219  			adds = append(adds, tx.WithoutBlobTxSidecar())
  1220  		}
  1221  	}
  1222  	if len(adds) > 0 {
  1223  		p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
  1224  		p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
  1225  	}
  1226  	return errs
  1227  }
  1228  
  1229  // add inserts a new blob transaction into the pool if it passes validation (both
  1230  // consensus validity and pool restrictions).
  1231  func (p *BlobPool) add(tx *types.Transaction) (err error) {
  1232  	// The blob pool blocks on adding a transaction. This is because blob txs are
  1233  	// only even pulled from the network, so this method will act as the overload
  1234  	// protection for fetches.
  1235  	waitStart := time.Now()
  1236  	p.lock.Lock()
  1237  	addwaitHist.Update(time.Since(waitStart).Nanoseconds())
  1238  	defer p.lock.Unlock()
  1239  
  1240  	defer func(start time.Time) {
  1241  		addtimeHist.Update(time.Since(start).Nanoseconds())
  1242  	}(time.Now())
  1243  
  1244  	// Ensure the transaction is valid from all perspectives
  1245  	if err := p.validateTx(tx); err != nil {
  1246  		log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
  1247  		switch {
  1248  		case errors.Is(err, txpool.ErrUnderpriced):
  1249  			addUnderpricedMeter.Mark(1)
  1250  		case errors.Is(err, core.ErrNonceTooLow):
  1251  			addStaleMeter.Mark(1)
  1252  		case errors.Is(err, core.ErrNonceTooHigh):
  1253  			addGappedMeter.Mark(1)
  1254  		case errors.Is(err, core.ErrInsufficientFunds):
  1255  			addOverdraftedMeter.Mark(1)
  1256  		case errors.Is(err, txpool.ErrAccountLimitExceeded):
  1257  			addOvercappedMeter.Mark(1)
  1258  		case errors.Is(err, txpool.ErrReplaceUnderpriced):
  1259  			addNoreplaceMeter.Mark(1)
  1260  		default:
  1261  			addInvalidMeter.Mark(1)
  1262  		}
  1263  		return err
  1264  	}
  1265  	// If the address is not yet known, request exclusivity to track the account
  1266  	// only by this subpool until all transactions are evicted
  1267  	from, _ := types.Sender(p.signer, tx) // already validated above
  1268  	if _, ok := p.index[from]; !ok {
  1269  		if err := p.reserve(from, true); err != nil {
  1270  			addNonExclusiveMeter.Mark(1)
  1271  			return err
  1272  		}
  1273  		defer func() {
  1274  			// If the transaction is rejected by some post-validation check, remove
  1275  			// the lock on the reservation set.
  1276  			//
  1277  			// Note, `err` here is the named error return, which will be initialized
  1278  			// by a return statement before running deferred methods. Take care with
  1279  			// removing or subscoping err as it will break this clause.
  1280  			if err != nil {
  1281  				p.reserve(from, false)
  1282  			}
  1283  		}()
  1284  	}
  1285  	// Transaction permitted into the pool from a nonce and cost perspective,
  1286  	// insert it into the database and update the indices
  1287  	blob, err := rlp.EncodeToBytes(tx)
  1288  	if err != nil {
  1289  		log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
  1290  		return err
  1291  	}
  1292  	id, err := p.store.Put(blob)
  1293  	if err != nil {
  1294  		return err
  1295  	}
  1296  	meta := newBlobTxMeta(id, p.store.Size(id), tx)
  1297  
  1298  	var (
  1299  		next   = p.state.GetNonce(from)
  1300  		offset = int(tx.Nonce() - next)
  1301  		newacc = false
  1302  	)
  1303  	var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64
  1304  	if txs, ok := p.index[from]; ok {
  1305  		oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps
  1306  		oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps
  1307  	}
  1308  	if len(p.index[from]) > offset {
  1309  		// Transaction replaces a previously queued one
  1310  		dropReplacedMeter.Mark(1)
  1311  
  1312  		prev := p.index[from][offset]
  1313  		if err := p.store.Delete(prev.id); err != nil {
  1314  			// Shitty situation, but try to recover gracefully instead of going boom
  1315  			log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err)
  1316  		}
  1317  		// Update the transaction index
  1318  		p.index[from][offset] = meta
  1319  		p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap)
  1320  		p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
  1321  
  1322  		delete(p.lookup, prev.hash)
  1323  		p.lookup[meta.hash] = meta.id
  1324  		p.stored += uint64(meta.size) - uint64(prev.size)
  1325  	} else {
  1326  		// Transaction extends previously scheduled ones
  1327  		p.index[from] = append(p.index[from], meta)
  1328  		if _, ok := p.spent[from]; !ok {
  1329  			p.spent[from] = new(uint256.Int)
  1330  			newacc = true
  1331  		}
  1332  		p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap)
  1333  		p.lookup[meta.hash] = meta.id
  1334  		p.stored += uint64(meta.size)
  1335  	}
  1336  	// Recompute the rolling eviction fields. In case of a replacement, this will
  1337  	// recompute all subsequent fields. In case of an append, this will only do
  1338  	// the fresh calculation.
  1339  	txs := p.index[from]
  1340  
  1341  	for i := offset; i < len(txs); i++ {
  1342  		// The first transaction will always use itself
  1343  		if i == 0 {
  1344  			txs[0].evictionExecTip = txs[0].execTipCap
  1345  			txs[0].evictionExecFeeJumps = txs[0].basefeeJumps
  1346  			txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
  1347  
  1348  			continue
  1349  		}
  1350  		// Subsequent transactions will use a rolling calculation
  1351  		txs[i].evictionExecTip = txs[i-1].evictionExecTip
  1352  		if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
  1353  			txs[i].evictionExecTip = txs[i].execTipCap
  1354  		}
  1355  		txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
  1356  		if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
  1357  			txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
  1358  		}
  1359  		txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
  1360  		if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
  1361  			txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
  1362  		}
  1363  	}
  1364  	// Update the eviction heap with the new information:
  1365  	//   - If the transaction is from a new account, add it to the heap
  1366  	//   - If the account had a singleton tx replaced, update the heap (new price caps)
  1367  	//   - If the account has a transaction replaced or appended, update the heap if significantly changed
  1368  	switch {
  1369  	case newacc:
  1370  		heap.Push(p.evict, from)
  1371  
  1372  	case len(txs) == 1: // 1 tx and not a new acc, must be replacement
  1373  		heap.Fix(p.evict, p.evict.index[from])
  1374  
  1375  	default: // replacement or new append
  1376  		evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps
  1377  		evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps
  1378  
  1379  		if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down
  1380  			heap.Fix(p.evict, p.evict.index[from])
  1381  		}
  1382  	}
  1383  	// If the pool went over the allowed data limit, evict transactions until
  1384  	// we're again below the threshold
  1385  	for p.stored > p.config.Datacap {
  1386  		p.drop()
  1387  	}
  1388  	p.updateStorageMetrics()
  1389  
  1390  	addValidMeter.Mark(1)
  1391  	return nil
  1392  }
  1393  
  1394  // drop removes the worst transaction from the pool. It is primarily used when a
  1395  // freshly added transaction overflows the pool and needs to evict something. The
  1396  // method is also called on startup if the user resizes their storage, might be an
  1397  // expensive run but it should be fine-ish.
  1398  func (p *BlobPool) drop() {
  1399  	// Peek at the account with the worse transaction set to evict from (Go's heap
  1400  	// stores the minimum at index zero of the heap slice) and retrieve it's last
  1401  	// transaction.
  1402  	var (
  1403  		from = p.evict.addrs[0] // cannot call drop on empty pool
  1404  
  1405  		txs  = p.index[from]
  1406  		drop = txs[len(txs)-1]
  1407  		last = len(txs) == 1
  1408  	)
  1409  	// Remove the transaction from the pool's index
  1410  	if last {
  1411  		delete(p.index, from)
  1412  		delete(p.spent, from)
  1413  		p.reserve(from, false)
  1414  	} else {
  1415  		txs[len(txs)-1] = nil
  1416  		txs = txs[:len(txs)-1]
  1417  
  1418  		p.index[from] = txs
  1419  		p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap)
  1420  	}
  1421  	p.stored -= uint64(drop.size)
  1422  	delete(p.lookup, drop.hash)
  1423  
  1424  	// Remove the transaction from the pool's eviction heap:
  1425  	//   - If the entire account was dropped, pop off the address
  1426  	//   - Otherwise, if the new tail has better eviction caps, fix the heap
  1427  	if last {
  1428  		heap.Pop(p.evict)
  1429  	} else {
  1430  		tail := txs[len(txs)-1] // new tail, surely exists
  1431  
  1432  		evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps
  1433  		evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps
  1434  
  1435  		if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing
  1436  			heap.Fix(p.evict, 0)
  1437  		}
  1438  	}
  1439  	// Remove the transaction from the data store
  1440  	log.Debug("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
  1441  	dropOverflownMeter.Mark(1)
  1442  
  1443  	if err := p.store.Delete(drop.id); err != nil {
  1444  		log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
  1445  	}
  1446  }
  1447  
  1448  // Pending retrieves all currently processable transactions, grouped by origin
  1449  // account and sorted by nonce.
  1450  //
  1451  // The transactions can also be pre-filtered by the dynamic fee components to
  1452  // reduce allocations and load on downstream subsystems.
  1453  func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
  1454  	// If only plain transactions are requested, this pool is unsuitable as it
  1455  	// contains none, don't even bother.
  1456  	if filter.OnlyPlainTxs {
  1457  		return nil
  1458  	}
  1459  	// Track the amount of time waiting to retrieve the list of pending blob txs
  1460  	// from the pool and the amount of time actually spent on assembling the data.
  1461  	// The latter will be pretty much moot, but we've kept it to have symmetric
  1462  	// across all user operations.
  1463  	pendStart := time.Now()
  1464  	p.lock.RLock()
  1465  	pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
  1466  	defer p.lock.RUnlock()
  1467  
  1468  	execStart := time.Now()
  1469  	defer func() {
  1470  		pendtimeHist.Update(time.Since(execStart).Nanoseconds())
  1471  	}()
  1472  
  1473  	pending := make(map[common.Address][]*txpool.LazyTransaction, len(p.index))
  1474  	for addr, txs := range p.index {
  1475  		lazies := make([]*txpool.LazyTransaction, 0, len(txs))
  1476  		for _, tx := range txs {
  1477  			// If transaction filtering was requested, discard badly priced ones
  1478  			if filter.MinTip != nil && filter.BaseFee != nil {
  1479  				if tx.execFeeCap.Lt(filter.BaseFee) {
  1480  					break // basefee too low, cannot be included, discard rest of txs from the account
  1481  				}
  1482  				tip := new(uint256.Int).Sub(tx.execFeeCap, filter.BaseFee)
  1483  				if tip.Gt(tx.execTipCap) {
  1484  					tip = tx.execTipCap
  1485  				}
  1486  				if tip.Lt(filter.MinTip) {
  1487  					break // allowed or remaining tip too low, cannot be included, discard rest of txs from the account
  1488  				}
  1489  			}
  1490  			if filter.BlobFee != nil {
  1491  				if tx.blobFeeCap.Lt(filter.BlobFee) {
  1492  					break // blobfee too low, cannot be included, discard rest of txs from the account
  1493  				}
  1494  			}
  1495  			// Transaction was accepted according to the filter, append to the pending list
  1496  			lazies = append(lazies, &txpool.LazyTransaction{
  1497  				Pool:      p,
  1498  				Hash:      tx.hash,
  1499  				Time:      execStart, // TODO(karalabe): Maybe save these and use that?
  1500  				GasFeeCap: tx.execFeeCap,
  1501  				GasTipCap: tx.execTipCap,
  1502  				Gas:       tx.execGas,
  1503  				BlobGas:   tx.blobGas,
  1504  			})
  1505  		}
  1506  		if len(lazies) > 0 {
  1507  			pending[addr] = lazies
  1508  		}
  1509  	}
  1510  	return pending
  1511  }
  1512  
  1513  // updateStorageMetrics retrieves a bunch of stats from the data store and pushes
  1514  // them out as metrics.
  1515  func (p *BlobPool) updateStorageMetrics() {
  1516  	stats := p.store.Infos()
  1517  
  1518  	var (
  1519  		dataused uint64
  1520  		datareal uint64
  1521  		slotused uint64
  1522  
  1523  		oversizedDataused uint64
  1524  		oversizedDatagaps uint64
  1525  		oversizedSlotused uint64
  1526  		oversizedSlotgaps uint64
  1527  	)
  1528  	for _, shelf := range stats.Shelves {
  1529  		slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
  1530  		slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
  1531  
  1532  		dataused += slotDataused
  1533  		datareal += slotDataused + slotDatagaps
  1534  		slotused += shelf.FilledSlots
  1535  
  1536  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
  1537  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
  1538  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
  1539  		metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
  1540  
  1541  		if shelf.SlotSize/blobSize > maxBlobsPerTransaction {
  1542  			oversizedDataused += slotDataused
  1543  			oversizedDatagaps += slotDatagaps
  1544  			oversizedSlotused += shelf.FilledSlots
  1545  			oversizedSlotgaps += shelf.GappedSlots
  1546  		}
  1547  	}
  1548  	datausedGauge.Update(int64(dataused))
  1549  	datarealGauge.Update(int64(datareal))
  1550  	slotusedGauge.Update(int64(slotused))
  1551  
  1552  	oversizedDatausedGauge.Update(int64(oversizedDataused))
  1553  	oversizedDatagapsGauge.Update(int64(oversizedDatagaps))
  1554  	oversizedSlotusedGauge.Update(int64(oversizedSlotused))
  1555  	oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps))
  1556  
  1557  	p.updateLimboMetrics()
  1558  }
  1559  
  1560  // updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
  1561  // them out as metrics.
  1562  func (p *BlobPool) updateLimboMetrics() {
  1563  	stats := p.limbo.store.Infos()
  1564  
  1565  	var (
  1566  		dataused uint64
  1567  		datareal uint64
  1568  		slotused uint64
  1569  	)
  1570  	for _, shelf := range stats.Shelves {
  1571  		slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize)
  1572  		slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize)
  1573  
  1574  		dataused += slotDataused
  1575  		datareal += slotDataused + slotDatagaps
  1576  		slotused += shelf.FilledSlots
  1577  
  1578  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused))
  1579  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps))
  1580  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
  1581  		metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
  1582  	}
  1583  	limboDatausedGauge.Update(int64(dataused))
  1584  	limboDatarealGauge.Update(int64(datareal))
  1585  	limboSlotusedGauge.Update(int64(slotused))
  1586  }
  1587  
  1588  // SubscribeTransactions registers a subscription for new transaction events,
  1589  // supporting feeding only newly seen or also resurrected transactions.
  1590  func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
  1591  	if reorgs {
  1592  		return p.insertFeed.Subscribe(ch)
  1593  	} else {
  1594  		return p.discoverFeed.Subscribe(ch)
  1595  	}
  1596  }
  1597  
  1598  // Nonce returns the next nonce of an account, with all transactions executable
  1599  // by the pool already applied on top.
  1600  func (p *BlobPool) Nonce(addr common.Address) uint64 {
  1601  	p.lock.Lock()
  1602  	defer p.lock.Unlock()
  1603  
  1604  	if txs, ok := p.index[addr]; ok {
  1605  		return txs[len(txs)-1].nonce + 1
  1606  	}
  1607  	return p.state.GetNonce(addr)
  1608  }
  1609  
  1610  // Stats retrieves the current pool stats, namely the number of pending and the
  1611  // number of queued (non-executable) transactions.
  1612  func (p *BlobPool) Stats() (int, int) {
  1613  	p.lock.Lock()
  1614  	defer p.lock.Unlock()
  1615  
  1616  	var pending int
  1617  	for _, txs := range p.index {
  1618  		pending += len(txs)
  1619  	}
  1620  	return pending, 0 // No non-executable txs in the blob pool
  1621  }
  1622  
  1623  // Content retrieves the data content of the transaction pool, returning all the
  1624  // pending as well as queued transactions, grouped by account and sorted by nonce.
  1625  //
  1626  // For the blob pool, this method will return nothing for now.
  1627  // TODO(karalabe): Abstract out the returned metadata.
  1628  func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
  1629  	return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction)
  1630  }
  1631  
  1632  // ContentFrom retrieves the data content of the transaction pool, returning the
  1633  // pending as well as queued transactions of this address, grouped by nonce.
  1634  //
  1635  // For the blob pool, this method will return nothing for now.
  1636  // TODO(karalabe): Abstract out the returned metadata.
  1637  func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
  1638  	return []*types.Transaction{}, []*types.Transaction{}
  1639  }
  1640  
  1641  // Locals retrieves the accounts currently considered local by the pool.
  1642  //
  1643  // There is no notion of local accounts in the blob pool.
  1644  func (p *BlobPool) Locals() []common.Address {
  1645  	return []common.Address{}
  1646  }
  1647  
  1648  // Status returns the known status (unknown/pending/queued) of a transaction
  1649  // identified by their hashes.
  1650  func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
  1651  	if p.Has(hash) {
  1652  		return txpool.TxStatusPending
  1653  	}
  1654  	return txpool.TxStatusUnknown
  1655  }