github.com/ledgerwatch/erigon-lib@v1.0.0/txpool/pool.go (about)

     1  /*
     2     Copyright 2022 The Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package txpool
    18  
    19  import (
    20  	"bytes"
    21  	"container/heap"
    22  	"context"
    23  	"encoding/binary"
    24  	"encoding/hex"
    25  	"encoding/json"
    26  	"errors"
    27  	"fmt"
    28  	"math"
    29  	"math/big"
    30  	"runtime"
    31  	"sort"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"github.com/VictoriaMetrics/metrics"
    37  	gokzg4844 "github.com/crate-crypto/go-kzg-4844"
    38  	mapset "github.com/deckarep/golang-set/v2"
    39  	"github.com/go-stack/stack"
    40  	"github.com/google/btree"
    41  	"github.com/hashicorp/golang-lru/v2/simplelru"
    42  	"github.com/holiman/uint256"
    43  	"github.com/ledgerwatch/log/v3"
    44  
    45  	"github.com/ledgerwatch/erigon-lib/chain"
    46  	"github.com/ledgerwatch/erigon-lib/common"
    47  	"github.com/ledgerwatch/erigon-lib/common/assert"
    48  	"github.com/ledgerwatch/erigon-lib/common/cmp"
    49  	"github.com/ledgerwatch/erigon-lib/common/dbg"
    50  	"github.com/ledgerwatch/erigon-lib/common/fixedgas"
    51  	"github.com/ledgerwatch/erigon-lib/common/u256"
    52  	libkzg "github.com/ledgerwatch/erigon-lib/crypto/kzg"
    53  	"github.com/ledgerwatch/erigon-lib/gointerfaces"
    54  	"github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil"
    55  	"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
    56  	proto_txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
    57  	"github.com/ledgerwatch/erigon-lib/kv"
    58  	"github.com/ledgerwatch/erigon-lib/kv/kvcache"
    59  	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
    60  	"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
    61  	"github.com/ledgerwatch/erigon-lib/types"
    62  )
    63  
    64  var (
    65  	processBatchTxsTimer    = metrics.NewSummary(`pool_process_remote_txs`)
    66  	addRemoteTxsTimer       = metrics.NewSummary(`pool_add_remote_txs`)
    67  	newBlockTimer           = metrics.NewSummary(`pool_new_block`)
    68  	writeToDBTimer          = metrics.NewSummary(`pool_write_to_db`)
    69  	propagateToNewPeerTimer = metrics.NewSummary(`pool_propagate_to_new_peer`)
    70  	propagateNewTxsTimer    = metrics.NewSummary(`pool_propagate_new_txs`)
    71  	writeToDBBytesCounter   = metrics.GetOrCreateCounter(`pool_write_to_db_bytes`)
    72  	pendingSubCounter       = metrics.GetOrCreateCounter(`txpool_pending`)
    73  	queuedSubCounter        = metrics.GetOrCreateCounter(`txpool_queued`)
    74  	basefeeSubCounter       = metrics.GetOrCreateCounter(`txpool_basefee`)
    75  )
    76  
    77  // Pool is interface for the transaction pool
    78  // This interface exists for the convenience of testing, and not yet because
    79  // there are multiple implementations
    80  type Pool interface {
    81  	ValidateSerializedTxn(serializedTxn []byte) error
    82  
    83  	// Handle 3 main events - new remote txs from p2p, new local txs from RPC, new blocks from execution layer
    84  	AddRemoteTxs(ctx context.Context, newTxs types.TxSlots)
    85  	AddLocalTxs(ctx context.Context, newTxs types.TxSlots, tx kv.Tx) ([]txpoolcfg.DiscardReason, error)
    86  	OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error
    87  	// IdHashKnown check whether transaction with given Id hash is known to the pool
    88  	IdHashKnown(tx kv.Tx, hash []byte) (bool, error)
    89  	FilterKnownIdHashes(tx kv.Tx, hashes types.Hashes) (unknownHashes types.Hashes, err error)
    90  	Started() bool
    91  	GetRlp(tx kv.Tx, hash []byte) ([]byte, error)
    92  	GetKnownBlobTxn(tx kv.Tx, hash []byte) (*metaTx, error)
    93  
    94  	AddNewGoodPeer(peerID types.PeerID)
    95  }
    96  
    97  var _ Pool = (*TxPool)(nil) // compile-time interface check
    98  
    99  // SubPoolMarker is an ordered bitset of six bits that's used to sort transactions into sub-pools. Bits meaning:
   100  // 1. Minimum fee requirement. Set to 1 if feeCap of the transaction is no less than in-protocol parameter of minimal base fee. Set to 0 if feeCap is less than minimum base fee, which means this transaction will never be included into this particular chain.
   101  // 2. Absence of nonce gaps. Set to 1 for transactions whose nonce is N, state nonce for the sender is M, and there are transactions for all nonces between M and N from the same sender. Set to 0 is the transaction's nonce is divided from the state nonce by one or more nonce gaps.
   102  // 3. Sufficient balance for gas. Set to 1 if the balance of sender's account in the state is B, nonce of the sender in the state is M, nonce of the transaction is N, and the sum of feeCap x gasLimit + transferred_value of all transactions from this sender with nonces N+1 ... M is no more than B. Set to 0 otherwise. In other words, this bit is set if there is currently a guarantee that the transaction and all its required prior transactions will be able to pay for gas.
   103  // 4. Not too much gas: Set to 1 if the transaction doesn't use too much gas
   104  // 5. Dynamic fee requirement. Set to 1 if feeCap of the transaction is no less than baseFee of the currently pending block. Set to 0 otherwise.
   105  // 6. Local transaction. Set to 1 if transaction is local.
   106  type SubPoolMarker uint8
   107  
   108  const (
   109  	EnoughFeeCapProtocol = 0b100000
   110  	NoNonceGaps          = 0b010000
   111  	EnoughBalance        = 0b001000
   112  	NotTooMuchGas        = 0b000100
   113  	EnoughFeeCapBlock    = 0b000010
   114  	IsLocal              = 0b000001
   115  
   116  	BaseFeePoolBits = EnoughFeeCapProtocol + NoNonceGaps + EnoughBalance + NotTooMuchGas
   117  	QueuedPoolBits  = EnoughFeeCapProtocol
   118  )
   119  
   120  // metaTx holds transaction and some metadata
   121  type metaTx struct {
   122  	Tx                        *types.TxSlot
   123  	minFeeCap                 uint256.Int
   124  	nonceDistance             uint64 // how far their nonces are from the state's nonce for the sender
   125  	cumulativeBalanceDistance uint64 // how far their cumulativeRequiredBalance are from the state's balance for the sender
   126  	minTip                    uint64
   127  	bestIndex                 int
   128  	worstIndex                int
   129  	timestamp                 uint64 // when it was added to pool
   130  	subPool                   SubPoolMarker
   131  	currentSubPool            SubPoolType
   132  	alreadyYielded            bool
   133  	minedBlockNum             uint64
   134  }
   135  
   136  func newMetaTx(slot *types.TxSlot, isLocal bool, timestmap uint64) *metaTx {
   137  	mt := &metaTx{Tx: slot, worstIndex: -1, bestIndex: -1, timestamp: timestmap}
   138  	if isLocal {
   139  		mt.subPool = IsLocal
   140  	}
   141  	return mt
   142  }
   143  
   144  type SubPoolType uint8
   145  
   146  const PendingSubPool SubPoolType = 1
   147  const BaseFeeSubPool SubPoolType = 2
   148  const QueuedSubPool SubPoolType = 3
   149  
   150  func (sp SubPoolType) String() string {
   151  	switch sp {
   152  	case PendingSubPool:
   153  		return "Pending"
   154  	case BaseFeeSubPool:
   155  		return "BaseFee"
   156  	case QueuedSubPool:
   157  		return "Queued"
   158  	}
   159  	return fmt.Sprintf("Unknown:%d", sp)
   160  }
   161  
   162  // sender - immutable structure which stores only nonce and balance of account
   163  type sender struct {
   164  	balance uint256.Int
   165  	nonce   uint64
   166  }
   167  
   168  func newSender(nonce uint64, balance uint256.Int) *sender {
   169  	return &sender{nonce: nonce, balance: balance}
   170  }
   171  
   172  var emptySender = newSender(0, *uint256.NewInt(0))
   173  
   174  func SortByNonceLess(a, b *metaTx) bool {
   175  	if a.Tx.SenderID != b.Tx.SenderID {
   176  		return a.Tx.SenderID < b.Tx.SenderID
   177  	}
   178  	return a.Tx.Nonce < b.Tx.Nonce
   179  }
   180  
   181  func calcProtocolBaseFee(baseFee uint64) uint64 {
   182  	return 7
   183  }
   184  
   185  // TxPool - holds all pool-related data structures and lock-based tiny methods
   186  // most of logic implemented by pure tests-friendly functions
   187  //
   188  // txpool doesn't start any goroutines - "leave concurrency to user" design
   189  // txpool has no DB-TX fields - "leave db transactions management to user" design
   190  // txpool has _chainDB field - but it must maximize local state cache hit-rate - and perform minimum _chainDB transactions
   191  //
   192  // It preserve TxSlot objects immutable
   193  type TxPool struct {
   194  	_chainDB               kv.RoDB // remote db - use it wisely
   195  	_stateCache            kvcache.Cache
   196  	lock                   *sync.Mutex
   197  	recentlyConnectedPeers *recentlyConnectedPeers // all txs will be propagated to this peers eventually, and clear list
   198  	senders                *sendersBatch
   199  	// batch processing of remote transactions
   200  	// handling is fast enough without batching, but batching allows:
   201  	//   - fewer _chainDB transactions
   202  	//   - batch notifications about new txs (reduced P2P spam to other nodes about txs propagation)
   203  	//   - and as a result reducing lock contention
   204  	unprocessedRemoteTxs    *types.TxSlots
   205  	unprocessedRemoteByHash map[string]int                                  // to reject duplicates
   206  	byHash                  map[string]*metaTx                              // tx_hash => tx : only those records not committed to db yet
   207  	discardReasonsLRU       *simplelru.LRU[string, txpoolcfg.DiscardReason] // tx_hash => discard_reason : non-persisted
   208  	pending                 *PendingPool
   209  	baseFee                 *SubPool
   210  	queued                  *SubPool
   211  	minedBlobTxsByBlock     map[uint64][]*metaTx             // (blockNum => slice): cache of recently mined blobs
   212  	minedBlobTxsByHash      map[string]*metaTx               // (hash => mt): map of recently mined blobs
   213  	isLocalLRU              *simplelru.LRU[string, struct{}] // tx_hash => is_local : to restore isLocal flag of unwinded transactions
   214  	newPendingTxs           chan types.Announcements         // notifications about new txs in Pending sub-pool
   215  	all                     *BySenderAndNonce                // senderID => (sorted map of tx nonce => *metaTx)
   216  	deletedTxs              []*metaTx                        // list of discarded txs since last db commit
   217  	promoted                types.Announcements
   218  	cfg                     txpoolcfg.Config
   219  	chainID                 uint256.Int
   220  	lastSeenBlock           atomic.Uint64
   221  	lastFinalizedBlock      atomic.Uint64
   222  	started                 atomic.Bool
   223  	pendingBaseFee          atomic.Uint64
   224  	pendingBlobFee          atomic.Uint64 // For gas accounting for blobs, which has its own dimension
   225  	blockGasLimit           atomic.Uint64
   226  	shanghaiTime            *uint64
   227  	isPostShanghai          atomic.Bool
   228  	cancunTime              *uint64
   229  	isPostCancun            atomic.Bool
   230  	logger                  log.Logger
   231  }
   232  
   233  func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, cache kvcache.Cache, chainID uint256.Int, shanghaiTime, cancunTime *big.Int, logger log.Logger) (*TxPool, error) {
   234  	var err error
   235  	localsHistory, err := simplelru.NewLRU[string, struct{}](10_000, nil)
   236  	if err != nil {
   237  		return nil, err
   238  	}
   239  	discardHistory, err := simplelru.NewLRU[string, txpoolcfg.DiscardReason](10_000, nil)
   240  	if err != nil {
   241  		return nil, err
   242  	}
   243  
   244  	byNonce := &BySenderAndNonce{
   245  		tree:              btree.NewG[*metaTx](32, SortByNonceLess),
   246  		search:            &metaTx{Tx: &types.TxSlot{}},
   247  		senderIDTxnCount:  map[uint64]int{},
   248  		senderIDBlobCount: map[uint64]uint64{},
   249  	}
   250  	tracedSenders := make(map[common.Address]struct{})
   251  	for _, sender := range cfg.TracedSenders {
   252  		tracedSenders[common.BytesToAddress([]byte(sender))] = struct{}{}
   253  	}
   254  
   255  	res := &TxPool{
   256  		lock:                    &sync.Mutex{},
   257  		byHash:                  map[string]*metaTx{},
   258  		isLocalLRU:              localsHistory,
   259  		discardReasonsLRU:       discardHistory,
   260  		all:                     byNonce,
   261  		recentlyConnectedPeers:  &recentlyConnectedPeers{},
   262  		pending:                 NewPendingSubPool(PendingSubPool, cfg.PendingSubPoolLimit),
   263  		baseFee:                 NewSubPool(BaseFeeSubPool, cfg.BaseFeeSubPoolLimit),
   264  		queued:                  NewSubPool(QueuedSubPool, cfg.QueuedSubPoolLimit),
   265  		newPendingTxs:           newTxs,
   266  		_stateCache:             cache,
   267  		senders:                 newSendersCache(tracedSenders),
   268  		_chainDB:                coreDB,
   269  		cfg:                     cfg,
   270  		chainID:                 chainID,
   271  		unprocessedRemoteTxs:    &types.TxSlots{},
   272  		unprocessedRemoteByHash: map[string]int{},
   273  		minedBlobTxsByBlock:     map[uint64][]*metaTx{},
   274  		minedBlobTxsByHash:      map[string]*metaTx{},
   275  		logger:                  logger,
   276  	}
   277  
   278  	if shanghaiTime != nil {
   279  		if !shanghaiTime.IsUint64() {
   280  			return nil, errors.New("shanghaiTime overflow")
   281  		}
   282  		shanghaiTimeU64 := shanghaiTime.Uint64()
   283  		res.shanghaiTime = &shanghaiTimeU64
   284  	}
   285  	if cancunTime != nil {
   286  		if !cancunTime.IsUint64() {
   287  			return nil, errors.New("cancunTime overflow")
   288  		}
   289  		cancunTimeU64 := cancunTime.Uint64()
   290  		res.cancunTime = &cancunTimeU64
   291  	}
   292  
   293  	return res, nil
   294  }
   295  
   296  func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error {
   297  	if err := minedTxs.Valid(); err != nil {
   298  		return err
   299  	}
   300  
   301  	defer newBlockTimer.UpdateDuration(time.Now())
   302  	//t := time.Now()
   303  
   304  	coreDB, cache := p.coreDBWithCache()
   305  	cache.OnNewBlock(stateChanges)
   306  	coreTx, err := coreDB.BeginRo(ctx)
   307  	if err != nil {
   308  		return err
   309  	}
   310  	defer coreTx.Rollback()
   311  
   312  	p.lastSeenBlock.Store(stateChanges.ChangeBatch[len(stateChanges.ChangeBatch)-1].BlockHeight)
   313  	if !p.started.Load() {
   314  		if err := p.fromDBWithLock(ctx, tx, coreTx); err != nil {
   315  			return fmt.Errorf("OnNewBlock: loading txs from DB: %w", err)
   316  		}
   317  	}
   318  	cacheView, err := cache.View(ctx, coreTx)
   319  	if err != nil {
   320  		return err
   321  	}
   322  
   323  	p.lock.Lock()
   324  	defer p.lock.Unlock()
   325  
   326  	if assert.Enable {
   327  		if _, err := kvcache.AssertCheckValues(ctx, coreTx, cache); err != nil {
   328  			p.logger.Error("AssertCheckValues", "err", err, "stack", stack.Trace().String())
   329  		}
   330  	}
   331  	baseFee := stateChanges.PendingBlockBaseFee
   332  
   333  	pendingBaseFee, baseFeeChanged := p.setBaseFee(baseFee)
   334  	// Update pendingBase for all pool queues and slices
   335  	if baseFeeChanged {
   336  		p.pending.best.pendingBaseFee = pendingBaseFee
   337  		p.pending.worst.pendingBaseFee = pendingBaseFee
   338  		p.baseFee.best.pendingBastFee = pendingBaseFee
   339  		p.baseFee.worst.pendingBaseFee = pendingBaseFee
   340  		p.queued.best.pendingBastFee = pendingBaseFee
   341  		p.queued.worst.pendingBaseFee = pendingBaseFee
   342  	}
   343  
   344  	pendingBlobFee := stateChanges.PendingBlobFeePerGas
   345  	p.setBlobFee(pendingBlobFee)
   346  
   347  	p.blockGasLimit.Store(stateChanges.BlockGasLimit)
   348  	if err := p.senders.onNewBlock(stateChanges, unwindTxs, minedTxs, p.logger); err != nil {
   349  		return err
   350  	}
   351  	_, unwindTxs, err = p.validateTxs(&unwindTxs, cacheView)
   352  	if err != nil {
   353  		return err
   354  	}
   355  
   356  	if assert.Enable {
   357  		for _, txn := range unwindTxs.Txs {
   358  			if txn.SenderID == 0 {
   359  				panic(fmt.Errorf("onNewBlock.unwindTxs: senderID can't be zero"))
   360  			}
   361  		}
   362  		for _, txn := range minedTxs.Txs {
   363  			if txn.SenderID == 0 {
   364  				panic(fmt.Errorf("onNewBlock.minedTxs: senderID can't be zero"))
   365  			}
   366  		}
   367  	}
   368  
   369  	if err := p.processMinedFinalizedBlobs(coreTx, minedTxs.Txs, stateChanges.FinalizedBlock); err != nil {
   370  		return err
   371  	}
   372  	if err := removeMined(p.all, minedTxs.Txs, p.pending, p.baseFee, p.queued, p.discardLocked, p.logger); err != nil {
   373  		return err
   374  	}
   375  
   376  	//p.logger.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight)
   377  
   378  	announcements, err := addTxsOnNewBlock(p.lastSeenBlock.Load(), cacheView, stateChanges, p.senders, unwindTxs, /* newTxs */
   379  		pendingBaseFee, stateChanges.BlockGasLimit,
   380  		p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, p.logger)
   381  	if err != nil {
   382  		return err
   383  	}
   384  	p.pending.EnforceWorstInvariants()
   385  	p.baseFee.EnforceInvariants()
   386  	p.queued.EnforceInvariants()
   387  	promote(p.pending, p.baseFee, p.queued, pendingBaseFee, pendingBlobFee, p.discardLocked, &announcements, p.logger)
   388  	p.pending.EnforceBestInvariants()
   389  	p.promoted.Reset()
   390  	p.promoted.AppendOther(announcements)
   391  
   392  	if p.started.CompareAndSwap(false, true) {
   393  		p.logger.Info("[txpool] Started")
   394  	}
   395  
   396  	if p.promoted.Len() > 0 {
   397  		select {
   398  		case p.newPendingTxs <- p.promoted.Copy():
   399  		default:
   400  		}
   401  	}
   402  
   403  	//p.logger.Info("[txpool] new block", "number", p.lastSeenBlock.Load(), "pendngBaseFee", pendingBaseFee, "in", time.Since(t))
   404  	return nil
   405  }
   406  
   407  func (p *TxPool) processRemoteTxs(ctx context.Context) error {
   408  	if !p.started.Load() {
   409  		return fmt.Errorf("txpool not started yet")
   410  	}
   411  
   412  	defer processBatchTxsTimer.UpdateDuration(time.Now())
   413  	coreDB, cache := p.coreDBWithCache()
   414  	coreTx, err := coreDB.BeginRo(ctx)
   415  	if err != nil {
   416  		return err
   417  	}
   418  	defer coreTx.Rollback()
   419  	cacheView, err := cache.View(ctx, coreTx)
   420  	if err != nil {
   421  		return err
   422  	}
   423  
   424  	//t := time.Now()
   425  	p.lock.Lock()
   426  	defer p.lock.Unlock()
   427  
   428  	l := len(p.unprocessedRemoteTxs.Txs)
   429  	if l == 0 {
   430  		return nil
   431  	}
   432  
   433  	err = p.senders.registerNewSenders(p.unprocessedRemoteTxs, p.logger)
   434  	if err != nil {
   435  		return err
   436  	}
   437  
   438  	_, newTxs, err := p.validateTxs(p.unprocessedRemoteTxs, cacheView)
   439  	if err != nil {
   440  		return err
   441  	}
   442  
   443  	announcements, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs,
   444  		p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true, p.logger)
   445  	if err != nil {
   446  		return err
   447  	}
   448  	p.promoted.Reset()
   449  	p.promoted.AppendOther(announcements)
   450  
   451  	if p.promoted.Len() > 0 {
   452  		select {
   453  		case <-ctx.Done():
   454  			return nil
   455  		case p.newPendingTxs <- p.promoted.Copy():
   456  		default:
   457  		}
   458  	}
   459  
   460  	p.unprocessedRemoteTxs.Resize(0)
   461  	p.unprocessedRemoteByHash = map[string]int{}
   462  
   463  	//p.logger.Info("[txpool] on new txs", "amount", len(newPendingTxs.txs), "in", time.Since(t))
   464  	return nil
   465  }
   466  func (p *TxPool) getRlpLocked(tx kv.Tx, hash []byte) (rlpTxn []byte, sender common.Address, isLocal bool, err error) {
   467  	txn, ok := p.byHash[string(hash)]
   468  	if ok && txn.Tx.Rlp != nil {
   469  		return txn.Tx.Rlp, p.senders.senderID2Addr[txn.Tx.SenderID], txn.subPool&IsLocal > 0, nil
   470  	}
   471  	v, err := tx.GetOne(kv.PoolTransaction, hash)
   472  	if err != nil {
   473  		return nil, common.Address{}, false, err
   474  	}
   475  	if v == nil {
   476  		return nil, common.Address{}, false, nil
   477  	}
   478  	return v[20:], *(*[20]byte)(v[:20]), txn != nil && txn.subPool&IsLocal > 0, nil
   479  }
   480  func (p *TxPool) GetRlp(tx kv.Tx, hash []byte) ([]byte, error) {
   481  	p.lock.Lock()
   482  	defer p.lock.Unlock()
   483  	rlpTx, _, _, err := p.getRlpLocked(tx, hash)
   484  	return common.Copy(rlpTx), err
   485  }
   486  func (p *TxPool) AppendLocalAnnouncements(types []byte, sizes []uint32, hashes []byte) ([]byte, []uint32, []byte) {
   487  	p.lock.Lock()
   488  	defer p.lock.Unlock()
   489  	for hash, txn := range p.byHash {
   490  		if txn.subPool&IsLocal == 0 {
   491  			continue
   492  		}
   493  		types = append(types, txn.Tx.Type)
   494  		sizes = append(sizes, txn.Tx.Size)
   495  		hashes = append(hashes, hash...)
   496  	}
   497  	return types, sizes, hashes
   498  }
   499  func (p *TxPool) AppendRemoteAnnouncements(types []byte, sizes []uint32, hashes []byte) ([]byte, []uint32, []byte) {
   500  	p.lock.Lock()
   501  	defer p.lock.Unlock()
   502  
   503  	for hash, txn := range p.byHash {
   504  		if txn.subPool&IsLocal != 0 {
   505  			continue
   506  		}
   507  		types = append(types, txn.Tx.Type)
   508  		sizes = append(sizes, txn.Tx.Size)
   509  		hashes = append(hashes, hash...)
   510  	}
   511  	for hash, txIdx := range p.unprocessedRemoteByHash {
   512  		txSlot := p.unprocessedRemoteTxs.Txs[txIdx]
   513  		types = append(types, txSlot.Type)
   514  		sizes = append(sizes, txSlot.Size)
   515  		hashes = append(hashes, hash...)
   516  	}
   517  	return types, sizes, hashes
   518  }
   519  func (p *TxPool) AppendAllAnnouncements(types []byte, sizes []uint32, hashes []byte) ([]byte, []uint32, []byte) {
   520  	types, sizes, hashes = p.AppendLocalAnnouncements(types, sizes, hashes)
   521  	types, sizes, hashes = p.AppendRemoteAnnouncements(types, sizes, hashes)
   522  	return types, sizes, hashes
   523  }
   524  func (p *TxPool) idHashKnown(tx kv.Tx, hash []byte, hashS string) (bool, error) {
   525  	if _, ok := p.unprocessedRemoteByHash[hashS]; ok {
   526  		return true, nil
   527  	}
   528  	if _, ok := p.discardReasonsLRU.Get(hashS); ok {
   529  		return true, nil
   530  	}
   531  	if _, ok := p.byHash[hashS]; ok {
   532  		return true, nil
   533  	}
   534  	if _, ok := p.minedBlobTxsByHash[hashS]; ok {
   535  		return true, nil
   536  	}
   537  	return tx.Has(kv.PoolTransaction, hash)
   538  }
   539  func (p *TxPool) IdHashKnown(tx kv.Tx, hash []byte) (bool, error) {
   540  	hashS := string(hash)
   541  	p.lock.Lock()
   542  	defer p.lock.Unlock()
   543  	return p.idHashKnown(tx, hash, hashS)
   544  }
   545  func (p *TxPool) FilterKnownIdHashes(tx kv.Tx, hashes types.Hashes) (unknownHashes types.Hashes, err error) {
   546  	p.lock.Lock()
   547  	defer p.lock.Unlock()
   548  	for i := 0; i < len(hashes); i += 32 {
   549  		known, err := p.idHashKnown(tx, hashes[i:i+32], string(hashes[i:i+32]))
   550  		if err != nil {
   551  			return unknownHashes, err
   552  		}
   553  		if !known {
   554  			unknownHashes = append(unknownHashes, hashes[i:i+32]...)
   555  		}
   556  	}
   557  	return unknownHashes, err
   558  }
   559  
   560  func (p *TxPool) getUnprocessedTxn(hashS string) (*types.TxSlot, bool) {
   561  	if i, ok := p.unprocessedRemoteByHash[hashS]; ok {
   562  		return p.unprocessedRemoteTxs.Txs[i], true
   563  	}
   564  	return nil, false
   565  }
   566  
   567  func (p *TxPool) GetKnownBlobTxn(tx kv.Tx, hash []byte) (*metaTx, error) {
   568  	hashS := string(hash)
   569  	p.lock.Lock()
   570  	defer p.lock.Unlock()
   571  	if mt, ok := p.minedBlobTxsByHash[hashS]; ok {
   572  		return mt, nil
   573  	}
   574  	if txn, ok := p.getUnprocessedTxn(hashS); ok {
   575  		return newMetaTx(txn, false, 0), nil
   576  	}
   577  	if mt, ok := p.byHash[hashS]; ok {
   578  		return mt, nil
   579  	}
   580  	has, err := tx.Has(kv.PoolTransaction, hash)
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  	if !has {
   585  		return nil, nil
   586  	}
   587  
   588  	txn, err := tx.GetOne(kv.PoolTransaction, hash)
   589  	if err != nil {
   590  		return nil, err
   591  	}
   592  	parseCtx := types.NewTxParseContext(p.chainID)
   593  	parseCtx.WithSender(false)
   594  	txSlot := &types.TxSlot{}
   595  	parseCtx.ParseTransaction(txn, 0, txSlot, nil, false, true, nil)
   596  	return newMetaTx(txSlot, false, 0), nil
   597  }
   598  
   599  func (p *TxPool) IsLocal(idHash []byte) bool {
   600  	hashS := string(idHash)
   601  	p.lock.Lock()
   602  	defer p.lock.Unlock()
   603  	return p.isLocalLRU.Contains(hashS)
   604  }
   605  func (p *TxPool) AddNewGoodPeer(peerID types.PeerID) { p.recentlyConnectedPeers.AddPeer(peerID) }
   606  func (p *TxPool) Started() bool                      { return p.started.Load() }
   607  
   608  func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) {
   609  	// First wait for the corresponding block to arrive
   610  	if p.lastSeenBlock.Load() < onTopOf {
   611  		return false, 0, nil // Too early
   612  	}
   613  
   614  	isShanghai := p.isShanghai()
   615  	best := p.pending.best
   616  
   617  	txs.Resize(uint(cmp.Min(int(n), len(best.ms))))
   618  	var toRemove []*metaTx
   619  	count := 0
   620  
   621  	for i := 0; count < int(n) && i < len(best.ms); i++ {
   622  		// if we wouldn't have enough gas for a standard transaction then quit out early
   623  		if availableGas < fixedgas.TxGas {
   624  			break
   625  		}
   626  
   627  		mt := best.ms[i]
   628  
   629  		if toSkip.Contains(mt.Tx.IDHash) {
   630  			continue
   631  		}
   632  
   633  		if mt.Tx.Gas >= p.blockGasLimit.Load() {
   634  			// Skip transactions with very large gas limit
   635  			continue
   636  		}
   637  
   638  		rlpTx, sender, isLocal, err := p.getRlpLocked(tx, mt.Tx.IDHash[:])
   639  		if err != nil {
   640  			return false, count, err
   641  		}
   642  		if len(rlpTx) == 0 {
   643  			toRemove = append(toRemove, mt)
   644  			continue
   645  		}
   646  
   647  		// Skip transactions that require more blob gas than is available
   648  		blobCount := uint64(len(mt.Tx.BlobHashes))
   649  		if blobCount*fixedgas.BlobGasPerBlob > availableBlobGas {
   650  			continue
   651  		}
   652  		availableBlobGas -= blobCount * fixedgas.BlobGasPerBlob
   653  
   654  		// make sure we have enough gas in the caller to add this transaction.
   655  		// not an exact science using intrinsic gas but as close as we could hope for at
   656  		// this stage
   657  		intrinsicGas, _ := txpoolcfg.CalcIntrinsicGas(uint64(mt.Tx.DataLen), uint64(mt.Tx.DataNonZeroLen), nil, mt.Tx.Creation, true, true, isShanghai)
   658  		if intrinsicGas > availableGas {
   659  			// we might find another TX with a low enough intrinsic gas to include so carry on
   660  			continue
   661  		}
   662  		availableGas -= intrinsicGas
   663  
   664  		txs.Txs[count] = rlpTx
   665  		copy(txs.Senders.At(count), sender.Bytes())
   666  		txs.IsLocal[count] = isLocal
   667  		toSkip.Add(mt.Tx.IDHash) // TODO: Is this unnecessary
   668  		count++
   669  	}
   670  
   671  	txs.Resize(uint(count))
   672  	if len(toRemove) > 0 {
   673  		for _, mt := range toRemove {
   674  			p.pending.Remove(mt)
   675  		}
   676  	}
   677  	return true, count, nil
   678  }
   679  
   680  func (p *TxPool) ResetYieldedStatus() {
   681  	p.lock.Lock()
   682  	defer p.lock.Unlock()
   683  	best := p.pending.best
   684  	for i := 0; i < len(best.ms); i++ {
   685  		best.ms[i].alreadyYielded = false
   686  	}
   687  }
   688  
   689  func (p *TxPool) YieldBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64, toSkip mapset.Set[[32]byte]) (bool, int, error) {
   690  	p.lock.Lock()
   691  	defer p.lock.Unlock()
   692  	return p.best(n, txs, tx, onTopOf, availableGas, availableBlobGas, toSkip)
   693  }
   694  
   695  func (p *TxPool) PeekBest(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableGas, availableBlobGas uint64) (bool, error) {
   696  	set := mapset.NewThreadUnsafeSet[[32]byte]()
   697  	p.lock.Lock()
   698  	defer p.lock.Unlock()
   699  	onTime, _, err := p.best(n, txs, tx, onTopOf, availableGas, availableBlobGas, set)
   700  	return onTime, err
   701  }
   702  
   703  func (p *TxPool) CountContent() (int, int, int) {
   704  	p.lock.Lock()
   705  	defer p.lock.Unlock()
   706  	return p.pending.Len(), p.baseFee.Len(), p.queued.Len()
   707  }
   708  func (p *TxPool) AddRemoteTxs(_ context.Context, newTxs types.TxSlots) {
   709  	defer addRemoteTxsTimer.UpdateDuration(time.Now())
   710  	p.lock.Lock()
   711  	defer p.lock.Unlock()
   712  	for i, txn := range newTxs.Txs {
   713  		hashS := string(txn.IDHash[:])
   714  		_, ok := p.unprocessedRemoteByHash[hashS]
   715  		if ok {
   716  			continue
   717  		}
   718  		p.unprocessedRemoteByHash[hashS] = len(p.unprocessedRemoteTxs.Txs)
   719  		p.unprocessedRemoteTxs.Append(txn, newTxs.Senders.At(i), false)
   720  	}
   721  }
   722  
   723  func toBlobs(_blobs [][]byte) []gokzg4844.Blob {
   724  	blobs := make([]gokzg4844.Blob, len(_blobs))
   725  	for i, _blob := range _blobs {
   726  		var b gokzg4844.Blob
   727  		copy(b[:], _blob)
   728  		blobs[i] = b
   729  	}
   730  	return blobs
   731  }
   732  
   733  func (p *TxPool) validateTx(txn *types.TxSlot, isLocal bool, stateCache kvcache.CacheView) txpoolcfg.DiscardReason {
   734  	isShanghai := p.isShanghai()
   735  	if isShanghai {
   736  		if txn.DataLen > fixedgas.MaxInitCodeSize {
   737  			return txpoolcfg.InitCodeTooLarge
   738  		}
   739  	}
   740  	if txn.Type == types.BlobTxType {
   741  		if !p.isCancun() {
   742  			return txpoolcfg.TypeNotActivated
   743  		}
   744  		if txn.Creation {
   745  			return txpoolcfg.CreateBlobTxn
   746  		}
   747  		blobCount := uint64(len(txn.BlobHashes))
   748  		if blobCount == 0 {
   749  			return txpoolcfg.NoBlobs
   750  		}
   751  		if blobCount > fixedgas.MaxBlobsPerBlock {
   752  			return txpoolcfg.TooManyBlobs
   753  		}
   754  		equalNumber := len(txn.BlobHashes) == len(txn.Blobs) &&
   755  			len(txn.Blobs) == len(txn.Commitments) &&
   756  			len(txn.Commitments) == len(txn.Proofs)
   757  
   758  		if !equalNumber {
   759  			return txpoolcfg.UnequalBlobTxExt
   760  		}
   761  
   762  		for i := 0; i < len(txn.Commitments); i++ {
   763  			if libkzg.KZGToVersionedHash(txn.Commitments[i]) != libkzg.VersionedHash(txn.BlobHashes[i]) {
   764  				return txpoolcfg.BlobHashCheckFail
   765  			}
   766  		}
   767  
   768  		// https://github.com/ethereum/consensus-specs/blob/017a8495f7671f5fff2075a9bfc9238c1a0982f8/specs/deneb/polynomial-commitments.md#verify_blob_kzg_proof_batch
   769  		kzgCtx := libkzg.Ctx()
   770  		err := kzgCtx.VerifyBlobKZGProofBatch(toBlobs(txn.Blobs), txn.Commitments, txn.Proofs)
   771  		if err != nil {
   772  			return txpoolcfg.UnmatchedBlobTxExt
   773  		}
   774  	}
   775  
   776  	// Drop non-local transactions under our own minimal accepted gas price or tip
   777  	if !isLocal && uint256.NewInt(p.cfg.MinFeeCap).Cmp(&txn.FeeCap) == 1 {
   778  		if txn.Traced {
   779  			p.logger.Info(fmt.Sprintf("TX TRACING: validateTx underpriced idHash=%x local=%t, feeCap=%d, cfg.MinFeeCap=%d", txn.IDHash, isLocal, txn.FeeCap, p.cfg.MinFeeCap))
   780  		}
   781  		return txpoolcfg.UnderPriced
   782  	}
   783  	gas, reason := txpoolcfg.CalcIntrinsicGas(uint64(txn.DataLen), uint64(txn.DataNonZeroLen), nil, txn.Creation, true, true, isShanghai)
   784  	if txn.Traced {
   785  		p.logger.Info(fmt.Sprintf("TX TRACING: validateTx intrinsic gas idHash=%x gas=%d", txn.IDHash, gas))
   786  	}
   787  	if reason != txpoolcfg.Success {
   788  		if txn.Traced {
   789  			p.logger.Info(fmt.Sprintf("TX TRACING: validateTx intrinsic gas calculated failed idHash=%x reason=%s", txn.IDHash, reason))
   790  		}
   791  		return reason
   792  	}
   793  	if gas > txn.Gas {
   794  		if txn.Traced {
   795  			p.logger.Info(fmt.Sprintf("TX TRACING: validateTx intrinsic gas > txn.gas idHash=%x gas=%d, txn.gas=%d", txn.IDHash, gas, txn.Gas))
   796  		}
   797  		return txpoolcfg.IntrinsicGas
   798  	}
   799  	if !isLocal && uint64(p.all.count(txn.SenderID)) > p.cfg.AccountSlots {
   800  		if txn.Traced {
   801  			log.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots))
   802  		}
   803  		return txpoolcfg.Spammer
   804  	}
   805  	if p.all.blobCount(txn.SenderID) > p.cfg.BlobSlots {
   806  		if txn.Traced {
   807  			log.Info(fmt.Sprintf("TX TRACING: validateTx marked as spamming (too many blobs) idHash=%x slots=%d, limit=%d", txn.IDHash, p.all.count(txn.SenderID), p.cfg.AccountSlots))
   808  		}
   809  		return txpoolcfg.Spammer
   810  	}
   811  
   812  	// check nonce and balance
   813  	senderNonce, senderBalance, _ := p.senders.info(stateCache, txn.SenderID)
   814  	if senderNonce > txn.Nonce {
   815  		if txn.Traced {
   816  			p.logger.Info(fmt.Sprintf("TX TRACING: validateTx nonce too low idHash=%x nonce in state=%d, txn.nonce=%d", txn.IDHash, senderNonce, txn.Nonce))
   817  		}
   818  		return txpoolcfg.NonceTooLow
   819  	}
   820  	// Transactor should have enough funds to cover the costs
   821  	total := requiredBalance(txn)
   822  	if senderBalance.Cmp(total) < 0 {
   823  		if txn.Traced {
   824  			p.logger.Info(fmt.Sprintf("TX TRACING: validateTx insufficient funds idHash=%x balance in state=%d, txn.gas*txn.tip=%d", txn.IDHash, senderBalance, total))
   825  		}
   826  		return txpoolcfg.InsufficientFunds
   827  	}
   828  	return txpoolcfg.Success
   829  }
   830  
   831  var maxUint256 = new(uint256.Int).SetAllOne()
   832  
   833  // Sender should have enough balance for: gasLimit x feeCap + blobGas x blobFeeCap + transferred_value
   834  // See YP, Eq (61) in Section 6.2 "Execution"
   835  func requiredBalance(txn *types.TxSlot) *uint256.Int {
   836  	// See https://github.com/ethereum/EIPs/pull/3594
   837  	total := uint256.NewInt(txn.Gas)
   838  	_, overflow := total.MulOverflow(total, &txn.FeeCap)
   839  	if overflow {
   840  		return maxUint256
   841  	}
   842  	// and https://eips.ethereum.org/EIPS/eip-4844#gas-accounting
   843  	blobCount := uint64(len(txn.BlobHashes))
   844  	if blobCount != 0 {
   845  		maxBlobGasCost := uint256.NewInt(fixedgas.BlobGasPerBlob)
   846  		maxBlobGasCost.Mul(maxBlobGasCost, uint256.NewInt(blobCount))
   847  		_, overflow = maxBlobGasCost.MulOverflow(maxBlobGasCost, &txn.BlobFeeCap)
   848  		if overflow {
   849  			return maxUint256
   850  		}
   851  		_, overflow = total.AddOverflow(total, maxBlobGasCost)
   852  		if overflow {
   853  			return maxUint256
   854  		}
   855  	}
   856  
   857  	_, overflow = total.AddOverflow(total, &txn.Value)
   858  	if overflow {
   859  		return maxUint256
   860  	}
   861  	return total
   862  }
   863  
   864  func (p *TxPool) isShanghai() bool {
   865  	// once this flag has been set for the first time we no longer need to check the timestamp
   866  	set := p.isPostShanghai.Load()
   867  	if set {
   868  		return true
   869  	}
   870  	if p.shanghaiTime == nil {
   871  		return false
   872  	}
   873  	shanghaiTime := *p.shanghaiTime
   874  
   875  	// a zero here means Shanghai is always active
   876  	if shanghaiTime == 0 {
   877  		p.isPostShanghai.Swap(true)
   878  		return true
   879  	}
   880  
   881  	now := time.Now().Unix()
   882  	activated := uint64(now) >= shanghaiTime
   883  	if activated {
   884  		p.isPostShanghai.Swap(true)
   885  	}
   886  	return activated
   887  }
   888  
   889  func (p *TxPool) isCancun() bool {
   890  	// once this flag has been set for the first time we no longer need to check the timestamp
   891  	set := p.isPostCancun.Load()
   892  	if set {
   893  		return true
   894  	}
   895  	if p.cancunTime == nil {
   896  		return false
   897  	}
   898  	cancunTime := *p.cancunTime
   899  
   900  	// a zero here means Cancun is always active
   901  	if cancunTime == 0 {
   902  		p.isPostCancun.Swap(true)
   903  		return true
   904  	}
   905  
   906  	now := time.Now().Unix()
   907  	activated := uint64(now) >= cancunTime
   908  	if activated {
   909  		p.isPostCancun.Swap(true)
   910  	}
   911  	return activated
   912  }
   913  
   914  // Check that that the serialized txn should not exceed a certain max size
   915  func (p *TxPool) ValidateSerializedTxn(serializedTxn []byte) error {
   916  	const (
   917  		// txSlotSize is used to calculate how many data slots a single transaction
   918  		// takes up based on its size. The slots are used as DoS protection, ensuring
   919  		// that validating a new transaction remains a constant operation (in reality
   920  		// O(maxslots), where max slots are 4 currently).
   921  		txSlotSize = 32 * 1024
   922  
   923  		// txMaxSize is the maximum size a single transaction can have. This field has
   924  		// non-trivial consequences: larger transactions are significantly harder and
   925  		// more expensive to propagate; larger transactions also take more resources
   926  		// to validate whether they fit into the pool or not.
   927  		txMaxSize = 4 * txSlotSize // 128KB
   928  
   929  		// Should be enough for a transaction with 6 blobs
   930  		blobTxMaxSize = 800_000
   931  	)
   932  	txType, err := types.PeekTransactionType(serializedTxn)
   933  	if err != nil {
   934  		return err
   935  	}
   936  	maxSize := txMaxSize
   937  	if txType == types.BlobTxType {
   938  		maxSize = blobTxMaxSize
   939  	}
   940  	if len(serializedTxn) > maxSize {
   941  		return types.ErrRlpTooBig
   942  	}
   943  	return nil
   944  }
   945  
   946  func (p *TxPool) validateTxs(txs *types.TxSlots, stateCache kvcache.CacheView) (reasons []txpoolcfg.DiscardReason, goodTxs types.TxSlots, err error) {
   947  	// reasons is pre-sized for direct indexing, with the default zero
   948  	// value DiscardReason of NotSet
   949  	reasons = make([]txpoolcfg.DiscardReason, len(txs.Txs))
   950  
   951  	if err := txs.Valid(); err != nil {
   952  		return reasons, goodTxs, err
   953  	}
   954  
   955  	goodCount := 0
   956  	for i, txn := range txs.Txs {
   957  		reason := p.validateTx(txn, txs.IsLocal[i], stateCache)
   958  		if reason == txpoolcfg.Success {
   959  			goodCount++
   960  			// Success here means no DiscardReason yet, so leave it NotSet
   961  			continue
   962  		}
   963  		if reason == txpoolcfg.Spammer {
   964  			p.punishSpammer(txn.SenderID)
   965  		}
   966  		reasons[i] = reason
   967  	}
   968  
   969  	goodTxs.Resize(uint(goodCount))
   970  
   971  	j := 0
   972  	for i, txn := range txs.Txs {
   973  		if reasons[i] == txpoolcfg.NotSet {
   974  			goodTxs.Txs[j] = txn
   975  			goodTxs.IsLocal[j] = txs.IsLocal[i]
   976  			copy(goodTxs.Senders.At(j), txs.Senders.At(i))
   977  			j++
   978  		}
   979  	}
   980  	return reasons, goodTxs, nil
   981  }
   982  
   983  // punishSpammer by drop half of it's transactions with high nonce
   984  func (p *TxPool) punishSpammer(spammer uint64) {
   985  	count := p.all.count(spammer) / 2
   986  	if count > 0 {
   987  		txsToDelete := make([]*metaTx, 0, count)
   988  		p.all.descend(spammer, func(mt *metaTx) bool {
   989  			txsToDelete = append(txsToDelete, mt)
   990  			count--
   991  			return count > 0
   992  		})
   993  		for _, mt := range txsToDelete {
   994  			p.discardLocked(mt, txpoolcfg.Spammer) // can't call it while iterating by all
   995  		}
   996  	}
   997  }
   998  
   999  func fillDiscardReasons(reasons []txpoolcfg.DiscardReason, newTxs types.TxSlots, discardReasonsLRU *simplelru.LRU[string, txpoolcfg.DiscardReason]) []txpoolcfg.DiscardReason {
  1000  	for i := range reasons {
  1001  		if reasons[i] != txpoolcfg.NotSet {
  1002  			continue
  1003  		}
  1004  		reason, ok := discardReasonsLRU.Get(string(newTxs.Txs[i].IDHash[:]))
  1005  		if ok {
  1006  			reasons[i] = reason
  1007  		} else {
  1008  			reasons[i] = txpoolcfg.Success
  1009  		}
  1010  	}
  1011  	return reasons
  1012  }
  1013  
  1014  func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, tx kv.Tx) ([]txpoolcfg.DiscardReason, error) {
  1015  	coreDb, cache := p.coreDBWithCache()
  1016  	coreTx, err := coreDb.BeginRo(ctx)
  1017  	if err != nil {
  1018  		return nil, err
  1019  	}
  1020  	defer coreTx.Rollback()
  1021  
  1022  	cacheView, err := cache.View(ctx, coreTx)
  1023  	if err != nil {
  1024  		return nil, err
  1025  	}
  1026  
  1027  	p.lock.Lock()
  1028  	defer p.lock.Unlock()
  1029  
  1030  	if !p.Started() {
  1031  		if err := p.fromDB(ctx, tx, coreTx); err != nil {
  1032  			return nil, fmt.Errorf("AddLocalTxs: loading txs from DB: %w", err)
  1033  		}
  1034  		if p.started.CompareAndSwap(false, true) {
  1035  			p.logger.Info("[txpool] Started")
  1036  		}
  1037  	}
  1038  
  1039  	if err = p.senders.registerNewSenders(&newTransactions, p.logger); err != nil {
  1040  		return nil, err
  1041  	}
  1042  
  1043  	reasons, newTxs, err := p.validateTxs(&newTransactions, cacheView)
  1044  	if err != nil {
  1045  		return nil, err
  1046  	}
  1047  
  1048  	announcements, addReasons, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs,
  1049  		p.pendingBaseFee.Load(), p.pendingBlobFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true, p.logger)
  1050  	if err == nil {
  1051  		for i, reason := range addReasons {
  1052  			if reason != txpoolcfg.NotSet {
  1053  				reasons[i] = reason
  1054  			}
  1055  		}
  1056  	} else {
  1057  		return nil, err
  1058  	}
  1059  	p.promoted.Reset()
  1060  	p.promoted.AppendOther(announcements)
  1061  
  1062  	reasons = fillDiscardReasons(reasons, newTxs, p.discardReasonsLRU)
  1063  	for i, reason := range reasons {
  1064  		if reason == txpoolcfg.Success {
  1065  			txn := newTxs.Txs[i]
  1066  			if txn.Traced {
  1067  				p.logger.Info(fmt.Sprintf("TX TRACING: AddLocalTxs promotes idHash=%x, senderId=%d", txn.IDHash, txn.SenderID))
  1068  			}
  1069  			p.promoted.Append(txn.Type, txn.Size, txn.IDHash[:])
  1070  		}
  1071  	}
  1072  	if p.promoted.Len() > 0 {
  1073  		select {
  1074  		case p.newPendingTxs <- p.promoted.Copy():
  1075  		default:
  1076  		}
  1077  	}
  1078  	return reasons, nil
  1079  }
  1080  func (p *TxPool) coreDBWithCache() (kv.RoDB, kvcache.Cache) {
  1081  	p.lock.Lock()
  1082  	defer p.lock.Unlock()
  1083  	return p._chainDB, p._stateCache
  1084  }
  1085  func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch,
  1086  	newTxs types.TxSlots, pendingBaseFee, pendingBlobFee, blockGasLimit uint64,
  1087  	pending *PendingPool, baseFee, queued *SubPool,
  1088  	byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) txpoolcfg.DiscardReason, discard func(*metaTx, txpoolcfg.DiscardReason), collect bool,
  1089  	logger log.Logger) (types.Announcements, []txpoolcfg.DiscardReason, error) {
  1090  	protocolBaseFee := calcProtocolBaseFee(pendingBaseFee)
  1091  	if assert.Enable {
  1092  		for _, txn := range newTxs.Txs {
  1093  			if txn.SenderID == 0 {
  1094  				panic(fmt.Errorf("senderID can't be zero"))
  1095  			}
  1096  		}
  1097  	}
  1098  	// This can be thought of a reverse operation from the one described before.
  1099  	// When a block that was deemed "the best" of its height, is no longer deemed "the best", the
  1100  	// transactions contained in it, are now viable for inclusion in other blocks, and therefore should
  1101  	// be returned into the transaction pool.
  1102  	// An interesting note here is that if the block contained any transactions local to the node,
  1103  	// by being first removed from the pool (from the "local" part of it), and then re-injected,
  1104  	// they effective lose their priority over the "remote" transactions. In order to prevent that,
  1105  	// somehow the fact that certain transactions were local, needs to be remembered for some
  1106  	// time (up to some "immutability threshold").
  1107  	sendersWithChangedState := map[uint64]struct{}{}
  1108  	discardReasons := make([]txpoolcfg.DiscardReason, len(newTxs.Txs))
  1109  	announcements := types.Announcements{}
  1110  	for i, txn := range newTxs.Txs {
  1111  		if found, ok := byHash[string(txn.IDHash[:])]; ok {
  1112  			discardReasons[i] = txpoolcfg.DuplicateHash
  1113  			// In case if the transation is stuck, "poke" it to rebroadcast
  1114  			if collect && newTxs.IsLocal[i] && (found.currentSubPool == PendingSubPool || found.currentSubPool == BaseFeeSubPool) {
  1115  				announcements.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:])
  1116  			}
  1117  			continue
  1118  		}
  1119  		mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum)
  1120  		if reason := add(mt, &announcements); reason != txpoolcfg.NotSet {
  1121  			discardReasons[i] = reason
  1122  			continue
  1123  		}
  1124  		discardReasons[i] = txpoolcfg.NotSet // unnecessary
  1125  		if txn.Traced {
  1126  			logger.Info(fmt.Sprintf("TX TRACING: schedule sendersWithChangedState idHash=%x senderId=%d", txn.IDHash, mt.Tx.SenderID))
  1127  		}
  1128  		sendersWithChangedState[mt.Tx.SenderID] = struct{}{}
  1129  	}
  1130  
  1131  	for senderID := range sendersWithChangedState {
  1132  		nonce, balance, err := senders.info(cacheView, senderID)
  1133  		if err != nil {
  1134  			return announcements, discardReasons, err
  1135  		}
  1136  		onSenderStateChange(senderID, nonce, balance, byNonce,
  1137  			protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard, logger)
  1138  	}
  1139  
  1140  	promote(pending, baseFee, queued, pendingBaseFee, pendingBlobFee, discard, &announcements, logger)
  1141  	pending.EnforceBestInvariants()
  1142  
  1143  	return announcements, discardReasons, nil
  1144  }
  1145  
  1146  // TODO: Looks like a copy of the above
  1147  func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges *remote.StateChangeBatch,
  1148  	senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee uint64, blockGasLimit uint64,
  1149  	pending *PendingPool, baseFee, queued *SubPool,
  1150  	byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) txpoolcfg.DiscardReason, discard func(*metaTx, txpoolcfg.DiscardReason),
  1151  	logger log.Logger) (types.Announcements, error) {
  1152  	protocolBaseFee := calcProtocolBaseFee(pendingBaseFee)
  1153  	if assert.Enable {
  1154  		for _, txn := range newTxs.Txs {
  1155  			if txn.SenderID == 0 {
  1156  				panic(fmt.Errorf("senderID can't be zero"))
  1157  			}
  1158  		}
  1159  	}
  1160  	// This can be thought of a reverse operation from the one described before.
  1161  	// When a block that was deemed "the best" of its height, is no longer deemed "the best", the
  1162  	// transactions contained in it, are now viable for inclusion in other blocks, and therefore should
  1163  	// be returned into the transaction pool.
  1164  	// An interesting note here is that if the block contained any transactions local to the node,
  1165  	// by being first removed from the pool (from the "local" part of it), and then re-injected,
  1166  	// they effective lose their priority over the "remote" transactions. In order to prevent that,
  1167  	// somehow the fact that certain transactions were local, needs to be remembered for some
  1168  	// time (up to some "immutability threshold").
  1169  	sendersWithChangedState := map[uint64]struct{}{}
  1170  	announcements := types.Announcements{}
  1171  	for i, txn := range newTxs.Txs {
  1172  		if _, ok := byHash[string(txn.IDHash[:])]; ok {
  1173  			continue
  1174  		}
  1175  		mt := newMetaTx(txn, newTxs.IsLocal[i], blockNum)
  1176  		if reason := add(mt, &announcements); reason != txpoolcfg.NotSet {
  1177  			discard(mt, reason)
  1178  			continue
  1179  		}
  1180  		sendersWithChangedState[mt.Tx.SenderID] = struct{}{}
  1181  	}
  1182  	// add senders changed in state to `sendersWithChangedState` list
  1183  	for _, changesList := range stateChanges.ChangeBatch {
  1184  		for _, change := range changesList.Changes {
  1185  			switch change.Action {
  1186  			case remote.Action_UPSERT, remote.Action_UPSERT_CODE:
  1187  				if change.Incarnation > 0 {
  1188  					continue
  1189  				}
  1190  				addr := gointerfaces.ConvertH160toAddress(change.Address)
  1191  				id, ok := senders.getID(addr)
  1192  				if !ok {
  1193  					continue
  1194  				}
  1195  				sendersWithChangedState[id] = struct{}{}
  1196  			}
  1197  		}
  1198  	}
  1199  
  1200  	for senderID := range sendersWithChangedState {
  1201  		nonce, balance, err := senders.info(cacheView, senderID)
  1202  		if err != nil {
  1203  			return announcements, err
  1204  		}
  1205  		onSenderStateChange(senderID, nonce, balance, byNonce,
  1206  			protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard, logger)
  1207  	}
  1208  
  1209  	return announcements, nil
  1210  }
  1211  
  1212  func (p *TxPool) setBaseFee(baseFee uint64) (uint64, bool) {
  1213  	changed := false
  1214  	if baseFee > 0 {
  1215  		changed = baseFee != p.pendingBaseFee.Load()
  1216  		p.pendingBaseFee.Store(baseFee)
  1217  	}
  1218  	return p.pendingBaseFee.Load(), changed
  1219  }
  1220  
  1221  func (p *TxPool) setBlobFee(blobFee uint64) {
  1222  	if blobFee > 0 {
  1223  		p.pendingBaseFee.Store(blobFee)
  1224  	}
  1225  }
  1226  
  1227  func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) txpoolcfg.DiscardReason {
  1228  	// Insert to pending pool, if pool doesn't have txn with same Nonce and bigger Tip
  1229  	found := p.all.get(mt.Tx.SenderID, mt.Tx.Nonce)
  1230  	if found != nil {
  1231  		if found.Tx.Type == types.BlobTxType && mt.Tx.Type != types.BlobTxType {
  1232  			return txpoolcfg.BlobTxReplace
  1233  		}
  1234  		priceBump := p.cfg.PriceBump
  1235  
  1236  		//Blob txn threshold checks for replace txn
  1237  		if mt.Tx.Type == types.BlobTxType {
  1238  			priceBump = p.cfg.BlobPriceBump
  1239  			blobFeeThreshold, overflow := (&uint256.Int{}).MulDivOverflow(
  1240  				&found.Tx.BlobFeeCap,
  1241  				uint256.NewInt(100+priceBump),
  1242  				uint256.NewInt(100),
  1243  			)
  1244  			if mt.Tx.BlobFeeCap.Lt(blobFeeThreshold) && !overflow {
  1245  				if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) {
  1246  					return txpoolcfg.NotSet
  1247  				}
  1248  				return txpoolcfg.ReplaceUnderpriced // TODO: This is the same as NotReplaced
  1249  			}
  1250  		}
  1251  
  1252  		//Regular txn threshold checks
  1253  		tipThreshold := uint256.NewInt(0)
  1254  		tipThreshold = tipThreshold.Mul(&found.Tx.Tip, uint256.NewInt(100+priceBump))
  1255  		tipThreshold.Div(tipThreshold, u256.N100)
  1256  		feecapThreshold := uint256.NewInt(0)
  1257  		feecapThreshold.Mul(&found.Tx.FeeCap, uint256.NewInt(100+priceBump))
  1258  		feecapThreshold.Div(feecapThreshold, u256.N100)
  1259  		if mt.Tx.Tip.Cmp(tipThreshold) < 0 || mt.Tx.FeeCap.Cmp(feecapThreshold) < 0 {
  1260  			// Both tip and feecap need to be larger than previously to replace the transaction
  1261  			// In case if the transation is stuck, "poke" it to rebroadcast
  1262  			if mt.subPool&IsLocal != 0 && (found.currentSubPool == PendingSubPool || found.currentSubPool == BaseFeeSubPool) {
  1263  				announcements.Append(found.Tx.Type, found.Tx.Size, found.Tx.IDHash[:])
  1264  			}
  1265  			if bytes.Equal(found.Tx.IDHash[:], mt.Tx.IDHash[:]) {
  1266  				return txpoolcfg.NotSet
  1267  			}
  1268  			return txpoolcfg.NotReplaced
  1269  		}
  1270  
  1271  		switch found.currentSubPool {
  1272  		case PendingSubPool:
  1273  			p.pending.Remove(found)
  1274  		case BaseFeeSubPool:
  1275  			p.baseFee.Remove(found)
  1276  		case QueuedSubPool:
  1277  			p.queued.Remove(found)
  1278  		default:
  1279  			//already removed
  1280  		}
  1281  
  1282  		p.discardLocked(found, txpoolcfg.ReplacedByHigherTip)
  1283  	}
  1284  
  1285  	// Don't add blob tx to queued if it's less than current pending blob base fee
  1286  	if mt.Tx.Type == types.BlobTxType && mt.Tx.BlobFeeCap.LtUint64(p.pendingBlobFee.Load()) {
  1287  		return txpoolcfg.FeeTooLow
  1288  	}
  1289  
  1290  	hashStr := string(mt.Tx.IDHash[:])
  1291  	p.byHash[hashStr] = mt
  1292  
  1293  	if replaced := p.all.replaceOrInsert(mt); replaced != nil {
  1294  		if assert.Enable {
  1295  			panic("must never happen")
  1296  		}
  1297  	}
  1298  
  1299  	if mt.subPool&IsLocal != 0 {
  1300  		p.isLocalLRU.Add(hashStr, struct{}{})
  1301  	}
  1302  	// All transactions are first added to the queued pool and then immediately promoted from there if required
  1303  	p.queued.Add(mt, p.logger)
  1304  	// Remove from mined cache as we are now "resurrecting" it to a sub-pool
  1305  	p.deleteMinedBlobTxn(hashStr)
  1306  	return txpoolcfg.NotSet
  1307  }
  1308  
  1309  // dropping transaction from all sub-structures and from db
  1310  // Important: don't call it while iterating by all
  1311  func (p *TxPool) discardLocked(mt *metaTx, reason txpoolcfg.DiscardReason) {
  1312  	hashStr := string(mt.Tx.IDHash[:])
  1313  	delete(p.byHash, hashStr)
  1314  	p.deletedTxs = append(p.deletedTxs, mt)
  1315  	p.all.delete(mt)
  1316  	p.discardReasonsLRU.Add(hashStr, reason)
  1317  }
  1318  
  1319  // Cache recently mined blobs in anticipation of reorg, delete finalized ones
  1320  func (p *TxPool) processMinedFinalizedBlobs(coreTx kv.Tx, minedTxs []*types.TxSlot, finalizedBlock uint64) error {
  1321  	p.lastFinalizedBlock.Store(finalizedBlock)
  1322  	// Remove blobs in the finalized block and older, loop through all entries
  1323  	for l := len(p.minedBlobTxsByBlock); l > 0 && finalizedBlock > 0; l-- {
  1324  		// delete individual hashes
  1325  		for _, mt := range p.minedBlobTxsByBlock[finalizedBlock] {
  1326  			delete(p.minedBlobTxsByHash, string(mt.Tx.IDHash[:]))
  1327  		}
  1328  		// delete the map entry for this block num
  1329  		delete(p.minedBlobTxsByBlock, finalizedBlock)
  1330  		// move on to older blocks, if present
  1331  		finalizedBlock--
  1332  	}
  1333  
  1334  	// Add mined blobs
  1335  	minedBlock := p.lastSeenBlock.Load()
  1336  	p.minedBlobTxsByBlock[minedBlock] = make([]*metaTx, 0)
  1337  	for _, txn := range minedTxs {
  1338  		if txn.Type == types.BlobTxType {
  1339  			mt := &metaTx{Tx: txn, minedBlockNum: minedBlock}
  1340  			p.minedBlobTxsByBlock[minedBlock] = append(p.minedBlobTxsByBlock[minedBlock], mt)
  1341  			mt.bestIndex = len(p.minedBlobTxsByBlock[minedBlock]) - 1
  1342  			p.minedBlobTxsByHash[string(txn.IDHash[:])] = mt
  1343  		}
  1344  	}
  1345  	return nil
  1346  }
  1347  
  1348  // Delete individual hash entries from minedBlobTxs cache
  1349  func (p *TxPool) deleteMinedBlobTxn(hash string) {
  1350  	mt, exists := p.minedBlobTxsByHash[hash]
  1351  	if !exists {
  1352  		return
  1353  	}
  1354  	l := len(p.minedBlobTxsByBlock[mt.minedBlockNum])
  1355  	if l > 1 {
  1356  		p.minedBlobTxsByBlock[mt.minedBlockNum][mt.bestIndex] = p.minedBlobTxsByBlock[mt.minedBlockNum][l-1]
  1357  	}
  1358  	p.minedBlobTxsByBlock[mt.minedBlockNum] = p.minedBlobTxsByBlock[mt.minedBlockNum][:l-1]
  1359  	delete(p.minedBlobTxsByHash, hash)
  1360  }
  1361  
  1362  func (p *TxPool) NonceFromAddress(addr [20]byte) (nonce uint64, inPool bool) {
  1363  	p.lock.Lock()
  1364  	defer p.lock.Unlock()
  1365  	senderID, found := p.senders.getID(addr)
  1366  	if !found {
  1367  		return 0, false
  1368  	}
  1369  	return p.all.nonce(senderID)
  1370  }
  1371  
  1372  // removeMined - apply new highest block (or batch of blocks)
  1373  //
  1374  // 1. New best block arrives, which potentially changes the balance and the nonce of some senders.
  1375  // We use senderIds data structure to find relevant senderId values, and then use senders data structure to
  1376  // modify state_balance and state_nonce, potentially remove some elements (if transaction with some nonce is
  1377  // included into a block), and finally, walk over the transaction records and update SubPool fields depending on
  1378  // the actual presence of nonce gaps and what the balance is.
  1379  func removeMined(byNonce *BySenderAndNonce, minedTxs []*types.TxSlot, pending *PendingPool, baseFee, queued *SubPool, discard func(*metaTx, txpoolcfg.DiscardReason), logger log.Logger) error {
  1380  	noncesToRemove := map[uint64]uint64{}
  1381  	for _, txn := range minedTxs {
  1382  		nonce, ok := noncesToRemove[txn.SenderID]
  1383  		if !ok || txn.Nonce > nonce {
  1384  			noncesToRemove[txn.SenderID] = txn.Nonce
  1385  		}
  1386  	}
  1387  
  1388  	var toDel []*metaTx // can't delete items while iterate them
  1389  	for senderID, nonce := range noncesToRemove {
  1390  		//if sender.all.Len() > 0 {
  1391  		//logger.Debug("[txpool] removing mined", "senderID", tx.senderID, "sender.all.len()", sender.all.Len())
  1392  		//}
  1393  		// delete mined transactions from everywhere
  1394  		byNonce.ascend(senderID, func(mt *metaTx) bool {
  1395  			//logger.Debug("[txpool] removing mined, cmp nonces", "tx.nonce", it.metaTx.Tx.nonce, "sender.nonce", sender.nonce)
  1396  			if mt.Tx.Nonce > nonce {
  1397  				return false
  1398  			}
  1399  			if mt.Tx.Traced {
  1400  				logger.Info(fmt.Sprintf("TX TRACING: removeMined idHash=%x senderId=%d, currentSubPool=%s", mt.Tx.IDHash, mt.Tx.SenderID, mt.currentSubPool))
  1401  			}
  1402  			toDel = append(toDel, mt)
  1403  			// del from sub-pool
  1404  			switch mt.currentSubPool {
  1405  			case PendingSubPool:
  1406  				pending.Remove(mt)
  1407  			case BaseFeeSubPool:
  1408  				baseFee.Remove(mt)
  1409  			case QueuedSubPool:
  1410  				queued.Remove(mt)
  1411  			default:
  1412  				//already removed
  1413  			}
  1414  			return true
  1415  		})
  1416  
  1417  		for _, mt := range toDel {
  1418  			discard(mt, txpoolcfg.Mined)
  1419  		}
  1420  		toDel = toDel[:0]
  1421  	}
  1422  	return nil
  1423  }
  1424  
  1425  // onSenderStateChange is the function that recalculates ephemeral fields of transactions and determines
  1426  // which sub pool they will need to go to. Since this depends on other transactions from the same sender by with lower
  1427  // nonces, and also affect other transactions from the same sender with higher nonce, it loops through all transactions
  1428  // for a given senderID
  1429  func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint256.Int, byNonce *BySenderAndNonce,
  1430  	protocolBaseFee, blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, discard func(*metaTx, txpoolcfg.DiscardReason), logger log.Logger) {
  1431  	noGapsNonce := senderNonce
  1432  	cumulativeRequiredBalance := uint256.NewInt(0)
  1433  	minFeeCap := uint256.NewInt(0).SetAllOne()
  1434  	minTip := uint64(math.MaxUint64)
  1435  	var toDel []*metaTx // can't delete items while iterate them
  1436  	byNonce.ascend(senderID, func(mt *metaTx) bool {
  1437  		if mt.Tx.Traced {
  1438  			logger.Info(fmt.Sprintf("TX TRACING: onSenderStateChange loop iteration idHash=%x senderID=%d, senderNonce=%d, txn.nonce=%d, currentSubPool=%s", mt.Tx.IDHash, senderID, senderNonce, mt.Tx.Nonce, mt.currentSubPool))
  1439  		}
  1440  		deleteAndContinueReasonLog := ""
  1441  		if senderNonce > mt.Tx.Nonce {
  1442  			deleteAndContinueReasonLog = "low nonce"
  1443  		} else if mt.Tx.Nonce != noGapsNonce && mt.Tx.Type == types.BlobTxType { // Discard nonce-gapped blob txns
  1444  			deleteAndContinueReasonLog = "nonce-gapped blob txn"
  1445  		}
  1446  		if deleteAndContinueReasonLog != "" {
  1447  			if mt.Tx.Traced {
  1448  				logger.Info(fmt.Sprintf("TX TRACING: removing due to %s for idHash=%x senderID=%d, senderNonce=%d, txn.nonce=%d, currentSubPool=%s", deleteAndContinueReasonLog, mt.Tx.IDHash, senderID, senderNonce, mt.Tx.Nonce, mt.currentSubPool))
  1449  			}
  1450  			// del from sub-pool
  1451  			switch mt.currentSubPool {
  1452  			case PendingSubPool:
  1453  				pending.Remove(mt)
  1454  			case BaseFeeSubPool:
  1455  				baseFee.Remove(mt)
  1456  			case QueuedSubPool:
  1457  				queued.Remove(mt)
  1458  			default:
  1459  				//already removed
  1460  			}
  1461  			toDel = append(toDel, mt)
  1462  			return true
  1463  		}
  1464  
  1465  		if minFeeCap.Gt(&mt.Tx.FeeCap) {
  1466  			*minFeeCap = mt.Tx.FeeCap
  1467  		}
  1468  		mt.minFeeCap = *minFeeCap
  1469  		if mt.Tx.Tip.IsUint64() {
  1470  			minTip = cmp.Min(minTip, mt.Tx.Tip.Uint64())
  1471  		}
  1472  		mt.minTip = minTip
  1473  
  1474  		mt.nonceDistance = 0
  1475  		if mt.Tx.Nonce > senderNonce { // no uint underflow
  1476  			mt.nonceDistance = mt.Tx.Nonce - senderNonce
  1477  		}
  1478  
  1479  		needBalance := requiredBalance(mt.Tx)
  1480  		// 1. Minimum fee requirement. Set to 1 if feeCap of the transaction is no less than in-protocol
  1481  		// parameter of minimal base fee. Set to 0 if feeCap is less than minimum base fee, which means
  1482  		// this transaction will never be included into this particular chain.
  1483  		mt.subPool &^= EnoughFeeCapProtocol
  1484  		if mt.minFeeCap.Cmp(uint256.NewInt(protocolBaseFee)) >= 0 {
  1485  			mt.subPool |= EnoughFeeCapProtocol
  1486  		} else {
  1487  			mt.subPool = 0 // TODO: we immediately drop all transactions if they have no first bit - then maybe we don't need this bit at all? And don't add such transactions to queue?
  1488  			return true
  1489  		}
  1490  
  1491  		// 2. Absence of nonce gaps. Set to 1 for transactions whose nonce is N, state nonce for
  1492  		// the sender is M, and there are transactions for all nonces between M and N from the same
  1493  		// sender. Set to 0 is the transaction's nonce is divided from the state nonce by one or more nonce gaps.
  1494  		mt.subPool &^= NoNonceGaps
  1495  		if noGapsNonce == mt.Tx.Nonce {
  1496  			mt.subPool |= NoNonceGaps
  1497  			noGapsNonce++
  1498  		}
  1499  
  1500  		// 3. Sufficient balance for gas. Set to 1 if the balance of sender's account in the
  1501  		// state is B, nonce of the sender in the state is M, nonce of the transaction is N, and the
  1502  		// sum of feeCap x gasLimit + transferred_value of all transactions from this sender with
  1503  		// nonces N+1 ... M is no more than B. Set to 0 otherwise. In other words, this bit is
  1504  		// set if there is currently a guarantee that the transaction and all its required prior
  1505  		// transactions will be able to pay for gas.
  1506  		mt.subPool &^= EnoughBalance
  1507  		mt.cumulativeBalanceDistance = math.MaxUint64
  1508  		if mt.Tx.Nonce >= senderNonce {
  1509  			cumulativeRequiredBalance = cumulativeRequiredBalance.Add(cumulativeRequiredBalance, needBalance) // already deleted all transactions with nonce <= sender.nonce
  1510  			if senderBalance.Gt(cumulativeRequiredBalance) || senderBalance.Eq(cumulativeRequiredBalance) {
  1511  				mt.subPool |= EnoughBalance
  1512  			} else {
  1513  				if cumulativeRequiredBalance.IsUint64() && senderBalance.IsUint64() {
  1514  					mt.cumulativeBalanceDistance = cumulativeRequiredBalance.Uint64() - senderBalance.Uint64()
  1515  				}
  1516  			}
  1517  		}
  1518  
  1519  		mt.subPool &^= NotTooMuchGas
  1520  		if mt.Tx.Gas < blockGasLimit {
  1521  			mt.subPool |= NotTooMuchGas
  1522  		}
  1523  
  1524  		if mt.Tx.Traced {
  1525  			logger.Info(fmt.Sprintf("TX TRACING: onSenderStateChange loop iteration idHash=%x senderId=%d subPool=%b", mt.Tx.IDHash, mt.Tx.SenderID, mt.subPool))
  1526  		}
  1527  
  1528  		// Some fields of mt might have changed, need to fix the invariants in the subpool best and worst queues
  1529  		switch mt.currentSubPool {
  1530  		case PendingSubPool:
  1531  			pending.Updated(mt)
  1532  		case BaseFeeSubPool:
  1533  			baseFee.Updated(mt)
  1534  		case QueuedSubPool:
  1535  			queued.Updated(mt)
  1536  		}
  1537  		return true
  1538  	})
  1539  	for _, mt := range toDel {
  1540  		discard(mt, txpoolcfg.NonceTooLow)
  1541  	}
  1542  }
  1543  
  1544  // promote reasserts invariants of the subpool and returns the list of transactions that ended up
  1545  // being promoted to the pending or basefee pool, for re-broadcasting
  1546  func promote(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint64, pendingBlobFee uint64, discard func(*metaTx, txpoolcfg.DiscardReason), announcements *types.Announcements,
  1547  	logger log.Logger) {
  1548  	// Demote worst transactions that do not qualify for pending sub pool anymore, to other sub pools, or discard
  1549  	for worst := pending.Worst(); pending.Len() > 0 && (worst.subPool < BaseFeePoolBits || worst.minFeeCap.LtUint64(pendingBaseFee) || (worst.Tx.Type == types.BlobTxType && worst.Tx.BlobFeeCap.LtUint64(pendingBlobFee))); worst = pending.Worst() {
  1550  		if worst.subPool >= BaseFeePoolBits {
  1551  			tx := pending.PopWorst()
  1552  			announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:])
  1553  			baseFee.Add(tx, logger)
  1554  		} else if worst.subPool >= QueuedPoolBits {
  1555  			queued.Add(pending.PopWorst(), logger)
  1556  		} else {
  1557  			discard(pending.PopWorst(), txpoolcfg.FeeTooLow)
  1558  		}
  1559  	}
  1560  
  1561  	// Promote best transactions from base fee pool to pending pool while they qualify
  1562  	for best := baseFee.Best(); baseFee.Len() > 0 && best.subPool >= BaseFeePoolBits && best.minFeeCap.CmpUint64(pendingBaseFee) >= 0 && (best.Tx.Type != types.BlobTxType || best.Tx.BlobFeeCap.CmpUint64(pendingBlobFee) >= 0); best = baseFee.Best() {
  1563  		tx := baseFee.PopBest()
  1564  		announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:])
  1565  		pending.Add(tx, logger)
  1566  	}
  1567  
  1568  	// Demote worst transactions that do not qualify for base fee pool anymore, to queued sub pool, or discard
  1569  	for worst := baseFee.Worst(); baseFee.Len() > 0 && worst.subPool < BaseFeePoolBits; worst = baseFee.Worst() {
  1570  		if worst.subPool >= QueuedPoolBits {
  1571  			queued.Add(baseFee.PopWorst(), logger)
  1572  		} else {
  1573  			discard(baseFee.PopWorst(), txpoolcfg.FeeTooLow)
  1574  		}
  1575  	}
  1576  
  1577  	// Promote best transactions from the queued pool to either pending or base fee pool, while they qualify
  1578  	for best := queued.Best(); queued.Len() > 0 && best.subPool >= BaseFeePoolBits; best = queued.Best() {
  1579  		if best.minFeeCap.Cmp(uint256.NewInt(pendingBaseFee)) >= 0 {
  1580  			tx := queued.PopBest()
  1581  			announcements.Append(tx.Tx.Type, tx.Tx.Size, tx.Tx.IDHash[:])
  1582  			pending.Add(tx, logger)
  1583  		} else {
  1584  			baseFee.Add(queued.PopBest(), logger)
  1585  		}
  1586  	}
  1587  
  1588  	// Discard worst transactions from the queued sub pool if they do not qualify
  1589  	for worst := queued.Worst(); queued.Len() > 0 && worst.subPool < QueuedPoolBits; worst = queued.Worst() {
  1590  		discard(queued.PopWorst(), txpoolcfg.FeeTooLow)
  1591  	}
  1592  
  1593  	// Discard worst transactions from pending pool until it is within capacity limit
  1594  	for pending.Len() > pending.limit {
  1595  		discard(pending.PopWorst(), txpoolcfg.PendingPoolOverflow)
  1596  	}
  1597  
  1598  	// Discard worst transactions from pending sub pool until it is within capacity limits
  1599  	for baseFee.Len() > baseFee.limit {
  1600  		discard(baseFee.PopWorst(), txpoolcfg.BaseFeePoolOverflow)
  1601  	}
  1602  
  1603  	// Discard worst transactions from the queued sub pool until it is within its capacity limits
  1604  	for _ = queued.Worst(); queued.Len() > queued.limit; _ = queued.Worst() {
  1605  		discard(queued.PopWorst(), txpoolcfg.QueuedPoolOverflow)
  1606  	}
  1607  }
  1608  
  1609  // MainLoop - does:
  1610  // send pending byHash to p2p:
  1611  //   - new byHash
  1612  //   - all pooled byHash to recently connected peers
  1613  //   - all local pooled byHash to random peers periodically
  1614  //
  1615  // promote/demote transactions
  1616  // reorgs
  1617  func MainLoop(ctx context.Context, db kv.RwDB, coreDB kv.RoDB, p *TxPool, newTxs chan types.Announcements, send *Send, newSlotsStreams *NewSlotsStreams, notifyMiningAboutNewSlots func()) {
  1618  	syncToNewPeersEvery := time.NewTicker(p.cfg.SyncToNewPeersEvery)
  1619  	defer syncToNewPeersEvery.Stop()
  1620  	processRemoteTxsEvery := time.NewTicker(p.cfg.ProcessRemoteTxsEvery)
  1621  	defer processRemoteTxsEvery.Stop()
  1622  	commitEvery := time.NewTicker(p.cfg.CommitEvery)
  1623  	defer commitEvery.Stop()
  1624  	logEvery := time.NewTicker(p.cfg.LogEvery)
  1625  	defer logEvery.Stop()
  1626  
  1627  	for {
  1628  		select {
  1629  		case <-ctx.Done():
  1630  			_, _ = p.flush(ctx, db)
  1631  			return
  1632  		case <-logEvery.C:
  1633  			p.logStats()
  1634  		case <-processRemoteTxsEvery.C:
  1635  			if !p.Started() {
  1636  				continue
  1637  			}
  1638  
  1639  			if err := p.processRemoteTxs(ctx); err != nil {
  1640  				if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
  1641  					time.Sleep(3 * time.Second)
  1642  					continue
  1643  				}
  1644  
  1645  				p.logger.Error("[txpool] process batch remote txs", "err", err)
  1646  			}
  1647  		case <-commitEvery.C:
  1648  			if db != nil && p.Started() {
  1649  				t := time.Now()
  1650  				written, err := p.flush(ctx, db)
  1651  				if err != nil {
  1652  					p.logger.Error("[txpool] flush is local history", "err", err)
  1653  					continue
  1654  				}
  1655  				writeToDBBytesCounter.Set(written)
  1656  				p.logger.Debug("[txpool] Commit", "written_kb", written/1024, "in", time.Since(t))
  1657  			}
  1658  		case announcements := <-newTxs:
  1659  			go func() {
  1660  				for i := 0; i < 16; i++ { // drain more events from channel, then merge and dedup them
  1661  					select {
  1662  					case a := <-newTxs:
  1663  						announcements.AppendOther(a)
  1664  						continue
  1665  					default:
  1666  					}
  1667  					break
  1668  				}
  1669  				if announcements.Len() == 0 {
  1670  					return
  1671  				}
  1672  				defer propagateNewTxsTimer.UpdateDuration(time.Now())
  1673  
  1674  				announcements = announcements.DedupCopy()
  1675  
  1676  				notifyMiningAboutNewSlots()
  1677  
  1678  				var localTxTypes []byte
  1679  				var localTxSizes []uint32
  1680  				var localTxHashes types.Hashes
  1681  				var localTxRlps [][]byte
  1682  				var remoteTxTypes []byte
  1683  				var remoteTxSizes []uint32
  1684  				var remoteTxHashes types.Hashes
  1685  				var remoteTxRlps [][]byte
  1686  				var broadCastedHashes types.Hashes
  1687  				slotsRlp := make([][]byte, 0, announcements.Len())
  1688  
  1689  				if err := db.View(ctx, func(tx kv.Tx) error {
  1690  					for i := 0; i < announcements.Len(); i++ {
  1691  						t, size, hash := announcements.At(i)
  1692  						slotRlp, err := p.GetRlp(tx, hash)
  1693  						if err != nil {
  1694  							return err
  1695  						}
  1696  						if len(slotRlp) == 0 {
  1697  							continue
  1698  						}
  1699  
  1700  						// Empty rlp can happen if a transaction we want to broadcast has just been mined, for example
  1701  						slotsRlp = append(slotsRlp, slotRlp)
  1702  						if p.IsLocal(hash) {
  1703  							localTxTypes = append(localTxTypes, t)
  1704  							localTxSizes = append(localTxSizes, size)
  1705  							localTxHashes = append(localTxHashes, hash...)
  1706  
  1707  							if t != types.BlobTxType { // "Nodes MUST NOT automatically broadcast blob transactions to their peers" - EIP-4844
  1708  								localTxRlps = append(localTxRlps, slotRlp)
  1709  								broadCastedHashes = append(broadCastedHashes, hash...)
  1710  							}
  1711  						} else {
  1712  							remoteTxTypes = append(remoteTxTypes, t)
  1713  							remoteTxSizes = append(remoteTxSizes, size)
  1714  							remoteTxHashes = append(remoteTxHashes, hash...)
  1715  							if t != types.BlobTxType { // "Nodes MUST NOT automatically broadcast blob transactions to their peers" - EIP-4844
  1716  								remoteTxRlps = append(remoteTxRlps, slotRlp)
  1717  							}
  1718  						}
  1719  					}
  1720  					return nil
  1721  				}); err != nil {
  1722  					p.logger.Error("[txpool] collect info to propagate", "err", err)
  1723  					return
  1724  				}
  1725  				if newSlotsStreams != nil {
  1726  					// TODO(eip-4844) What is this for? Is it OK to broadcast blob transactions?
  1727  					newSlotsStreams.Broadcast(&proto_txpool.OnAddReply{RplTxs: slotsRlp}, p.logger)
  1728  				}
  1729  
  1730  				// first broadcast all local txs to all peers, then non-local to random sqrt(peersAmount) peers
  1731  				txSentTo := send.BroadcastPooledTxs(localTxRlps)
  1732  				for i, peer := range txSentTo {
  1733  					p.logger.Info("Local tx broadcasted", "txHash", hex.EncodeToString(broadCastedHashes.At(i)), "to peer", peer)
  1734  				}
  1735  				hashSentTo := send.AnnouncePooledTxs(localTxTypes, localTxSizes, localTxHashes)
  1736  				for i := 0; i < localTxHashes.Len(); i++ {
  1737  					hash := localTxHashes.At(i)
  1738  					p.logger.Info("local tx announced", "tx_hash", hex.EncodeToString(hash), "to peer", hashSentTo[i], "baseFee", p.pendingBaseFee.Load())
  1739  				}
  1740  				send.BroadcastPooledTxs(remoteTxRlps)
  1741  				send.AnnouncePooledTxs(remoteTxTypes, remoteTxSizes, remoteTxHashes)
  1742  			}()
  1743  		case <-syncToNewPeersEvery.C: // new peer
  1744  			newPeers := p.recentlyConnectedPeers.GetAndClean()
  1745  			if len(newPeers) == 0 {
  1746  				continue
  1747  			}
  1748  			t := time.Now()
  1749  			var hashes types.Hashes
  1750  			var types []byte
  1751  			var sizes []uint32
  1752  			types, sizes, hashes = p.AppendAllAnnouncements(types, sizes, hashes[:0])
  1753  			go send.PropagatePooledTxsToPeersList(newPeers, types, sizes, hashes)
  1754  			propagateToNewPeerTimer.UpdateDuration(t)
  1755  		}
  1756  	}
  1757  }
  1758  
  1759  func (p *TxPool) flushNoFsync(ctx context.Context, db kv.RwDB) (written uint64, err error) {
  1760  	p.lock.Lock()
  1761  	defer p.lock.Unlock()
  1762  	//it's important that write db tx is done inside lock, to make last writes visible for all read operations
  1763  	if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error {
  1764  		err = p.flushLocked(tx)
  1765  		if err != nil {
  1766  			return err
  1767  		}
  1768  		written, _, err = tx.(*mdbx.MdbxTx).SpaceDirty()
  1769  		if err != nil {
  1770  			return err
  1771  		}
  1772  		return nil
  1773  	}); err != nil {
  1774  		return 0, err
  1775  	}
  1776  	return written, nil
  1777  }
  1778  
  1779  func (p *TxPool) flush(ctx context.Context, db kv.RwDB) (written uint64, err error) {
  1780  	defer writeToDBTimer.UpdateDuration(time.Now())
  1781  	// 1. get global lock on txpool and flush it to db, without fsync (to release lock asap)
  1782  	// 2. then fsync db without txpool lock
  1783  	written, err = p.flushNoFsync(ctx, db)
  1784  	if err != nil {
  1785  		return 0, err
  1786  	}
  1787  
  1788  	// fsync
  1789  	if err := db.Update(ctx, func(tx kv.RwTx) error { return nil }); err != nil {
  1790  		return 0, err
  1791  	}
  1792  	return written, nil
  1793  }
  1794  func (p *TxPool) flushLocked(tx kv.RwTx) (err error) {
  1795  	for i, mt := range p.deletedTxs {
  1796  		id := mt.Tx.SenderID
  1797  		idHash := mt.Tx.IDHash[:]
  1798  		if !p.all.hasTxs(id) {
  1799  			addr, ok := p.senders.senderID2Addr[id]
  1800  			if ok {
  1801  				delete(p.senders.senderID2Addr, id)
  1802  				delete(p.senders.senderIDs, addr)
  1803  			}
  1804  		}
  1805  		//fmt.Printf("del:%d,%d,%d\n", mt.Tx.senderID, mt.Tx.nonce, mt.Tx.tip)
  1806  		has, err := tx.Has(kv.PoolTransaction, idHash)
  1807  		if err != nil {
  1808  			return err
  1809  		}
  1810  		if has {
  1811  			if err := tx.Delete(kv.PoolTransaction, idHash); err != nil {
  1812  				return err
  1813  			}
  1814  		}
  1815  		p.deletedTxs[i] = nil // for gc
  1816  	}
  1817  
  1818  	txHashes := p.isLocalLRU.Keys()
  1819  	encID := make([]byte, 8)
  1820  	if err := tx.ClearBucket(kv.RecentLocalTransaction); err != nil {
  1821  		return err
  1822  	}
  1823  	for i, txHash := range txHashes {
  1824  		binary.BigEndian.PutUint64(encID, uint64(i))
  1825  		if err := tx.Append(kv.RecentLocalTransaction, encID, []byte(txHash)); err != nil {
  1826  			return err
  1827  		}
  1828  	}
  1829  
  1830  	v := make([]byte, 0, 1024)
  1831  	for txHash, metaTx := range p.byHash {
  1832  		if metaTx.Tx.Rlp == nil {
  1833  			continue
  1834  		}
  1835  		v = common.EnsureEnoughSize(v, 20+len(metaTx.Tx.Rlp))
  1836  
  1837  		addr, ok := p.senders.senderID2Addr[metaTx.Tx.SenderID]
  1838  		if !ok {
  1839  			p.logger.Warn("[txpool] flush: sender address not found by ID", "senderID", metaTx.Tx.SenderID)
  1840  			continue
  1841  		}
  1842  
  1843  		copy(v[:20], addr.Bytes())
  1844  		copy(v[20:], metaTx.Tx.Rlp)
  1845  
  1846  		has, err := tx.Has(kv.PoolTransaction, []byte(txHash))
  1847  		if err != nil {
  1848  			return err
  1849  		}
  1850  		if !has {
  1851  			if err := tx.Put(kv.PoolTransaction, []byte(txHash), v); err != nil {
  1852  				return err
  1853  			}
  1854  		}
  1855  		metaTx.Tx.Rlp = nil
  1856  	}
  1857  
  1858  	binary.BigEndian.PutUint64(encID, p.pendingBaseFee.Load())
  1859  	if err := tx.Put(kv.PoolInfo, PoolPendingBaseFeeKey, encID); err != nil {
  1860  		return err
  1861  	}
  1862  	binary.BigEndian.PutUint64(encID, p.pendingBlobFee.Load())
  1863  	if err := tx.Put(kv.PoolInfo, PoolPendingBlobFeeKey, encID); err != nil {
  1864  		return err
  1865  	}
  1866  	if err := PutLastSeenBlock(tx, p.lastSeenBlock.Load(), encID); err != nil {
  1867  		return err
  1868  	}
  1869  
  1870  	// clean - in-memory data structure as later as possible - because if during this Tx will happen error,
  1871  	// DB will stay consistent but some in-memory structures may be already cleaned, and retry will not work
  1872  	// failed write transaction must not create side-effects
  1873  	p.deletedTxs = p.deletedTxs[:0]
  1874  	return nil
  1875  }
  1876  
  1877  func (p *TxPool) fromDBWithLock(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error {
  1878  	p.lock.Lock()
  1879  	defer p.lock.Unlock()
  1880  	return p.fromDB(ctx, tx, coreTx)
  1881  }
  1882  func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error {
  1883  	if p.lastSeenBlock.Load() == 0 {
  1884  		lastSeenBlock, err := LastSeenBlock(tx)
  1885  		if err != nil {
  1886  			return err
  1887  		}
  1888  		p.lastSeenBlock.Store(lastSeenBlock)
  1889  	}
  1890  
  1891  	cacheView, err := p._stateCache.View(ctx, coreTx)
  1892  	if err != nil {
  1893  		return err
  1894  	}
  1895  	it, err := tx.Range(kv.RecentLocalTransaction, nil, nil)
  1896  	if err != nil {
  1897  		return err
  1898  	}
  1899  	for it.HasNext() {
  1900  		_, v, err := it.Next()
  1901  		if err != nil {
  1902  			return err
  1903  		}
  1904  		p.isLocalLRU.Add(string(v), struct{}{})
  1905  	}
  1906  
  1907  	txs := types.TxSlots{}
  1908  	parseCtx := types.NewTxParseContext(p.chainID)
  1909  	parseCtx.WithSender(false)
  1910  
  1911  	i := 0
  1912  	it, err = tx.Range(kv.PoolTransaction, nil, nil)
  1913  	if err != nil {
  1914  		return err
  1915  	}
  1916  	for it.HasNext() {
  1917  		k, v, err := it.Next()
  1918  		if err != nil {
  1919  			return err
  1920  		}
  1921  		addr, txRlp := *(*[20]byte)(v[:20]), v[20:]
  1922  		txn := &types.TxSlot{}
  1923  
  1924  		// TODO(eip-4844) ensure wrappedWithBlobs when transactions are saved to the DB
  1925  		_, err = parseCtx.ParseTransaction(txRlp, 0, txn, nil, false /* hasEnvelope */, true /*wrappedWithBlobs*/, nil)
  1926  		if err != nil {
  1927  			err = fmt.Errorf("err: %w, rlp: %x", err, txRlp)
  1928  			p.logger.Warn("[txpool] fromDB: parseTransaction", "err", err)
  1929  			continue
  1930  		}
  1931  		txn.Rlp = nil // means that we don't need store it in db anymore
  1932  
  1933  		txn.SenderID, txn.Traced = p.senders.getOrCreateID(addr, p.logger)
  1934  		binary.BigEndian.Uint64(v) // TODO - unnecessary line, remove
  1935  
  1936  		isLocalTx := p.isLocalLRU.Contains(string(k))
  1937  
  1938  		if reason := p.validateTx(txn, isLocalTx, cacheView); reason != txpoolcfg.NotSet && reason != txpoolcfg.Success {
  1939  			return nil // TODO: Clarify - if one of the txs has the wrong reason, no pooled txs!
  1940  		}
  1941  		txs.Resize(uint(i + 1))
  1942  		txs.Txs[i] = txn
  1943  		txs.IsLocal[i] = isLocalTx
  1944  		copy(txs.Senders.At(i), addr[:])
  1945  		i++
  1946  	}
  1947  
  1948  	var pendingBaseFee uint64
  1949  	{
  1950  		v, err := tx.GetOne(kv.PoolInfo, PoolPendingBaseFeeKey)
  1951  		if err != nil {
  1952  			return err
  1953  		}
  1954  		if len(v) > 0 {
  1955  			pendingBaseFee = binary.BigEndian.Uint64(v)
  1956  		}
  1957  	}
  1958  	var pendingBlobFee uint64 = 1 // MIN_BLOB_GAS_PRICE A/EIP-4844
  1959  	{
  1960  		v, err := tx.GetOne(kv.PoolInfo, PoolPendingBlobFeeKey)
  1961  		if err != nil {
  1962  			return err
  1963  		}
  1964  		if len(v) > 0 {
  1965  			pendingBlobFee = binary.BigEndian.Uint64(v)
  1966  		}
  1967  	}
  1968  
  1969  	err = p.senders.registerNewSenders(&txs, p.logger)
  1970  	if err != nil {
  1971  		return err
  1972  	}
  1973  	if _, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs,
  1974  		pendingBaseFee, pendingBlobFee, math.MaxUint64 /* blockGasLimit */, p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, false, p.logger); err != nil {
  1975  		return err
  1976  	}
  1977  	p.pendingBaseFee.Store(pendingBaseFee)
  1978  	p.pendingBlobFee.Store(pendingBlobFee)
  1979  	return nil
  1980  }
  1981  func LastSeenBlock(tx kv.Getter) (uint64, error) {
  1982  	v, err := tx.GetOne(kv.PoolInfo, PoolLastSeenBlockKey)
  1983  	if err != nil {
  1984  		return 0, err
  1985  	}
  1986  	if len(v) == 0 {
  1987  		return 0, nil
  1988  	}
  1989  	return binary.BigEndian.Uint64(v), nil
  1990  }
  1991  func PutLastSeenBlock(tx kv.Putter, n uint64, buf []byte) error {
  1992  	buf = common.EnsureEnoughSize(buf, 8)
  1993  	binary.BigEndian.PutUint64(buf, n)
  1994  	err := tx.Put(kv.PoolInfo, PoolLastSeenBlockKey, buf)
  1995  	if err != nil {
  1996  		return err
  1997  	}
  1998  	return nil
  1999  }
  2000  func ChainConfig(tx kv.Getter) (*chain.Config, error) {
  2001  	v, err := tx.GetOne(kv.PoolInfo, PoolChainConfigKey)
  2002  	if err != nil {
  2003  		return nil, err
  2004  	}
  2005  	if len(v) == 0 {
  2006  		return nil, nil
  2007  	}
  2008  	var config chain.Config
  2009  	if err := json.Unmarshal(v, &config); err != nil {
  2010  		return nil, fmt.Errorf("invalid chain config JSON in pool db: %w", err)
  2011  	}
  2012  	return &config, nil
  2013  }
  2014  func PutChainConfig(tx kv.Putter, cc *chain.Config, buf []byte) error {
  2015  	wr := bytes.NewBuffer(buf)
  2016  	if err := json.NewEncoder(wr).Encode(cc); err != nil {
  2017  		return fmt.Errorf("invalid chain config JSON in pool db: %w", err)
  2018  	}
  2019  	if err := tx.Put(kv.PoolInfo, PoolChainConfigKey, wr.Bytes()); err != nil {
  2020  		return err
  2021  	}
  2022  	return nil
  2023  }
  2024  
  2025  // nolint
  2026  func (p *TxPool) printDebug(prefix string) {
  2027  	fmt.Printf("%s.pool.byHash\n", prefix)
  2028  	for _, j := range p.byHash {
  2029  		fmt.Printf("\tsenderID=%d, nonce=%d, tip=%d\n", j.Tx.SenderID, j.Tx.Nonce, j.Tx.Tip)
  2030  	}
  2031  	fmt.Printf("%s.pool.queues.len: %d,%d,%d\n", prefix, p.pending.Len(), p.baseFee.Len(), p.queued.Len())
  2032  	for _, mt := range p.pending.best.ms {
  2033  		mt.Tx.PrintDebug(fmt.Sprintf("%s.pending: %b,%d,%d,%d", prefix, mt.subPool, mt.Tx.SenderID, mt.Tx.Nonce, mt.Tx.Tip))
  2034  	}
  2035  	for _, mt := range p.baseFee.best.ms {
  2036  		mt.Tx.PrintDebug(fmt.Sprintf("%s.baseFee : %b,%d,%d,%d", prefix, mt.subPool, mt.Tx.SenderID, mt.Tx.Nonce, mt.Tx.Tip))
  2037  	}
  2038  	for _, mt := range p.queued.best.ms {
  2039  		mt.Tx.PrintDebug(fmt.Sprintf("%s.queued : %b,%d,%d,%d", prefix, mt.subPool, mt.Tx.SenderID, mt.Tx.Nonce, mt.Tx.Tip))
  2040  	}
  2041  }
  2042  func (p *TxPool) logStats() {
  2043  	if !p.started.Load() {
  2044  		//p.logger.Info("[txpool] Not started yet, waiting for new blocks...")
  2045  		return
  2046  	}
  2047  
  2048  	p.lock.Lock()
  2049  	defer p.lock.Unlock()
  2050  
  2051  	var m runtime.MemStats
  2052  	dbg.ReadMemStats(&m)
  2053  	ctx := []interface{}{
  2054  		//"block", p.lastSeenBlock.Load(),
  2055  		"pending", p.pending.Len(),
  2056  		"baseFee", p.baseFee.Len(),
  2057  		"queued", p.queued.Len(),
  2058  	}
  2059  	cacheKeys := p._stateCache.Len()
  2060  	if cacheKeys > 0 {
  2061  		ctx = append(ctx, "cache_keys", cacheKeys)
  2062  	}
  2063  	ctx = append(ctx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
  2064  	p.logger.Info("[txpool] stat", ctx...)
  2065  	pendingSubCounter.Set(uint64(p.pending.Len()))
  2066  	basefeeSubCounter.Set(uint64(p.baseFee.Len()))
  2067  	queuedSubCounter.Set(uint64(p.queued.Len()))
  2068  }
  2069  
  2070  // Deprecated need switch to streaming-like
  2071  func (p *TxPool) deprecatedForEach(_ context.Context, f func(rlp []byte, sender common.Address, t SubPoolType), tx kv.Tx) {
  2072  	p.lock.Lock()
  2073  	defer p.lock.Unlock()
  2074  	p.all.ascendAll(func(mt *metaTx) bool {
  2075  		slot := mt.Tx
  2076  		slotRlp := slot.Rlp
  2077  		if slot.Rlp == nil {
  2078  			v, err := tx.GetOne(kv.PoolTransaction, slot.IDHash[:])
  2079  			if err != nil {
  2080  				p.logger.Warn("[txpool] foreach: get tx from db", "err", err)
  2081  				return true
  2082  			}
  2083  			if v == nil {
  2084  				p.logger.Warn("[txpool] foreach: tx not found in db")
  2085  				return true
  2086  			}
  2087  			slotRlp = v[20:]
  2088  		}
  2089  		if sender, found := p.senders.senderID2Addr[slot.SenderID]; found {
  2090  			f(slotRlp, sender, mt.currentSubPool)
  2091  		}
  2092  		return true
  2093  	})
  2094  }
  2095  
  2096  var PoolChainConfigKey = []byte("chain_config")
  2097  var PoolLastSeenBlockKey = []byte("last_seen_block")
  2098  var PoolPendingBaseFeeKey = []byte("pending_base_fee")
  2099  var PoolPendingBlobFeeKey = []byte("pending_blob_fee")
  2100  
  2101  // recentlyConnectedPeers does buffer IDs of recently connected good peers
  2102  // then sync of pooled Transaction can happen to all of then at once
  2103  // DoS protection and performance saving
  2104  // it doesn't track if peer disconnected, it's fine
  2105  type recentlyConnectedPeers struct {
  2106  	peers []types.PeerID
  2107  	lock  sync.Mutex
  2108  }
  2109  
  2110  func (l *recentlyConnectedPeers) AddPeer(p types.PeerID) {
  2111  	l.lock.Lock()
  2112  	defer l.lock.Unlock()
  2113  	l.peers = append(l.peers, p)
  2114  }
  2115  
  2116  func (l *recentlyConnectedPeers) GetAndClean() []types.PeerID {
  2117  	l.lock.Lock()
  2118  	defer l.lock.Unlock()
  2119  	peers := l.peers
  2120  	l.peers = nil
  2121  	return peers
  2122  }
  2123  
  2124  // nolint
  2125  func (sc *sendersBatch) printDebug(prefix string) {
  2126  	fmt.Printf("%s.sendersBatch.sender\n", prefix)
  2127  	//for i, j := range sc.senderInfo {
  2128  	//	fmt.Printf("\tid=%d,nonce=%d,balance=%d\n", i, j.nonce, j.balance.Uint64())
  2129  	//}
  2130  }
  2131  
  2132  // sendersBatch stores in-memory senders-related objects - which are different from DB (updated/dirty)
  2133  // flushing to db periodicaly. it doesn't play as read-cache (because db is small and memory-mapped - doesn't need cache)
  2134  // non thread-safe
  2135  type sendersBatch struct {
  2136  	senderIDs     map[common.Address]uint64
  2137  	senderID2Addr map[uint64]common.Address
  2138  	tracedSenders map[common.Address]struct{}
  2139  	senderID      uint64
  2140  }
  2141  
  2142  func newSendersCache(tracedSenders map[common.Address]struct{}) *sendersBatch {
  2143  	return &sendersBatch{senderIDs: map[common.Address]uint64{}, senderID2Addr: map[uint64]common.Address{}, tracedSenders: tracedSenders}
  2144  }
  2145  
  2146  func (sc *sendersBatch) getID(addr common.Address) (uint64, bool) {
  2147  	id, ok := sc.senderIDs[addr]
  2148  	return id, ok
  2149  }
  2150  func (sc *sendersBatch) getOrCreateID(addr common.Address, logger log.Logger) (uint64, bool) {
  2151  	_, traced := sc.tracedSenders[addr]
  2152  	id, ok := sc.senderIDs[addr]
  2153  	if !ok {
  2154  		sc.senderID++
  2155  		id = sc.senderID
  2156  		sc.senderIDs[addr] = id
  2157  		sc.senderID2Addr[id] = addr
  2158  		if traced {
  2159  			logger.Info(fmt.Sprintf("TX TRACING: allocated senderID %d to sender %x", id, addr))
  2160  		}
  2161  	}
  2162  	return id, traced
  2163  }
  2164  func (sc *sendersBatch) info(cacheView kvcache.CacheView, id uint64) (nonce uint64, balance uint256.Int, err error) {
  2165  	addr, ok := sc.senderID2Addr[id]
  2166  	if !ok {
  2167  		panic("must not happen")
  2168  	}
  2169  	encoded, err := cacheView.Get(addr.Bytes())
  2170  	if err != nil {
  2171  		return 0, emptySender.balance, err
  2172  	}
  2173  	if len(encoded) == 0 {
  2174  		return emptySender.nonce, emptySender.balance, nil
  2175  	}
  2176  	nonce, balance, err = types.DecodeSender(encoded)
  2177  	if err != nil {
  2178  		return 0, emptySender.balance, err
  2179  	}
  2180  	return nonce, balance, nil
  2181  }
  2182  
  2183  func (sc *sendersBatch) registerNewSenders(newTxs *types.TxSlots, logger log.Logger) (err error) {
  2184  	for i, txn := range newTxs.Txs {
  2185  		txn.SenderID, txn.Traced = sc.getOrCreateID(newTxs.Senders.AddressAt(i), logger)
  2186  	}
  2187  	return nil
  2188  }
  2189  func (sc *sendersBatch) onNewBlock(stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, logger log.Logger) error {
  2190  	for _, diff := range stateChanges.ChangeBatch {
  2191  		for _, change := range diff.Changes { // merge state changes
  2192  			addrB := gointerfaces.ConvertH160toAddress(change.Address)
  2193  			sc.getOrCreateID(addrB, logger)
  2194  		}
  2195  
  2196  		for i, txn := range unwindTxs.Txs {
  2197  			txn.SenderID, txn.Traced = sc.getOrCreateID(unwindTxs.Senders.AddressAt(i), logger)
  2198  		}
  2199  
  2200  		for i, txn := range minedTxs.Txs {
  2201  			txn.SenderID, txn.Traced = sc.getOrCreateID(minedTxs.Senders.AddressAt(i), logger)
  2202  		}
  2203  	}
  2204  	return nil
  2205  }
  2206  
  2207  // BySenderAndNonce - designed to perform most expensive operation in TxPool:
  2208  // "recalculate all ephemeral fields of all transactions" by algo
  2209  //   - for all senders - iterate over all transactions in nonce growing order
  2210  //
  2211  // Performane decisions:
  2212  //   - All senders stored inside 1 large BTree - because iterate over 1 BTree is faster than over map[senderId]BTree
  2213  //   - sortByNonce used as non-pointer wrapper - because iterate over BTree of pointers is 2x slower
  2214  type BySenderAndNonce struct {
  2215  	tree              *btree.BTreeG[*metaTx]
  2216  	search            *metaTx
  2217  	senderIDTxnCount  map[uint64]int    // count of sender's txns in the pool - may differ from nonce
  2218  	senderIDBlobCount map[uint64]uint64 // count of sender's total number of blobs in the pool
  2219  }
  2220  
  2221  func (b *BySenderAndNonce) nonce(senderID uint64) (nonce uint64, ok bool) {
  2222  	s := b.search
  2223  	s.Tx.SenderID = senderID
  2224  	s.Tx.Nonce = math.MaxUint64
  2225  
  2226  	b.tree.DescendLessOrEqual(s, func(mt *metaTx) bool {
  2227  		if mt.Tx.SenderID == senderID {
  2228  			nonce = mt.Tx.Nonce
  2229  			ok = true
  2230  		}
  2231  		return false
  2232  	})
  2233  	return nonce, ok
  2234  }
  2235  func (b *BySenderAndNonce) ascendAll(f func(*metaTx) bool) {
  2236  	b.tree.Ascend(func(mt *metaTx) bool {
  2237  		return f(mt)
  2238  	})
  2239  }
  2240  func (b *BySenderAndNonce) ascend(senderID uint64, f func(*metaTx) bool) {
  2241  	s := b.search
  2242  	s.Tx.SenderID = senderID
  2243  	s.Tx.Nonce = 0
  2244  	b.tree.AscendGreaterOrEqual(s, func(mt *metaTx) bool {
  2245  		if mt.Tx.SenderID != senderID {
  2246  			return false
  2247  		}
  2248  		return f(mt)
  2249  	})
  2250  }
  2251  func (b *BySenderAndNonce) descend(senderID uint64, f func(*metaTx) bool) {
  2252  	s := b.search
  2253  	s.Tx.SenderID = senderID
  2254  	s.Tx.Nonce = math.MaxUint64
  2255  	b.tree.DescendLessOrEqual(s, func(mt *metaTx) bool {
  2256  		if mt.Tx.SenderID != senderID {
  2257  			return false
  2258  		}
  2259  		return f(mt)
  2260  	})
  2261  }
  2262  func (b *BySenderAndNonce) count(senderID uint64) int {
  2263  	return b.senderIDTxnCount[senderID]
  2264  }
  2265  func (b *BySenderAndNonce) blobCount(senderID uint64) uint64 {
  2266  	return b.senderIDBlobCount[senderID]
  2267  }
  2268  func (b *BySenderAndNonce) hasTxs(senderID uint64) bool {
  2269  	has := false
  2270  	b.ascend(senderID, func(*metaTx) bool {
  2271  		has = true
  2272  		return false
  2273  	})
  2274  	return has
  2275  }
  2276  func (b *BySenderAndNonce) get(senderID, txNonce uint64) *metaTx {
  2277  	s := b.search
  2278  	s.Tx.SenderID = senderID
  2279  	s.Tx.Nonce = txNonce
  2280  	if found, ok := b.tree.Get(s); ok {
  2281  		return found
  2282  	}
  2283  	return nil
  2284  }
  2285  
  2286  // nolint
  2287  func (b *BySenderAndNonce) has(mt *metaTx) bool {
  2288  	return b.tree.Has(mt)
  2289  }
  2290  func (b *BySenderAndNonce) delete(mt *metaTx) {
  2291  	if _, ok := b.tree.Delete(mt); ok {
  2292  		senderID := mt.Tx.SenderID
  2293  		count := b.senderIDTxnCount[senderID]
  2294  		if count > 1 {
  2295  			b.senderIDTxnCount[senderID] = count - 1
  2296  		} else {
  2297  			delete(b.senderIDTxnCount, senderID)
  2298  		}
  2299  
  2300  		if mt.Tx.Type == types.BlobTxType && mt.Tx.Blobs != nil {
  2301  			accBlobCount := b.senderIDBlobCount[senderID]
  2302  			txnBlobCount := len(mt.Tx.Blobs)
  2303  			if txnBlobCount > 1 {
  2304  				b.senderIDBlobCount[senderID] = accBlobCount - uint64(txnBlobCount)
  2305  			} else {
  2306  				delete(b.senderIDBlobCount, senderID)
  2307  			}
  2308  		}
  2309  	}
  2310  }
  2311  func (b *BySenderAndNonce) replaceOrInsert(mt *metaTx) *metaTx {
  2312  	it, ok := b.tree.ReplaceOrInsert(mt)
  2313  	if ok {
  2314  		return it
  2315  	}
  2316  	b.senderIDTxnCount[mt.Tx.SenderID]++
  2317  	if mt.Tx.Type == types.BlobTxType && mt.Tx.Blobs != nil {
  2318  		b.senderIDBlobCount[mt.Tx.SenderID] += uint64(len(mt.Tx.Blobs))
  2319  	}
  2320  	return nil
  2321  }
  2322  
  2323  // PendingPool - is different from other pools - it's best is Slice instead of Heap
  2324  // It's more expensive to maintain "slice sort" invariant, but it allow do cheap copy of
  2325  // pending.best slice for mining (because we consider txs and metaTx are immutable)
  2326  type PendingPool struct {
  2327  	best  *bestSlice
  2328  	worst *WorstQueue
  2329  	limit int
  2330  	t     SubPoolType
  2331  }
  2332  
  2333  func NewPendingSubPool(t SubPoolType, limit int) *PendingPool {
  2334  	return &PendingPool{limit: limit, t: t, best: &bestSlice{ms: []*metaTx{}}, worst: &WorstQueue{ms: []*metaTx{}}}
  2335  }
  2336  
  2337  // bestSlice - is similar to best queue, but uses a linear structure with O(n log n) sort complexity and
  2338  // it maintains element.bestIndex field
  2339  type bestSlice struct {
  2340  	ms             []*metaTx
  2341  	pendingBaseFee uint64
  2342  }
  2343  
  2344  func (s *bestSlice) Len() int { return len(s.ms) }
  2345  func (s *bestSlice) Swap(i, j int) {
  2346  	s.ms[i], s.ms[j] = s.ms[j], s.ms[i]
  2347  	s.ms[i].bestIndex, s.ms[j].bestIndex = i, j
  2348  }
  2349  func (s *bestSlice) Less(i, j int) bool {
  2350  	return s.ms[i].better(s.ms[j], *uint256.NewInt(s.pendingBaseFee))
  2351  }
  2352  func (s *bestSlice) UnsafeRemove(i *metaTx) {
  2353  	s.Swap(i.bestIndex, len(s.ms)-1)
  2354  	s.ms[len(s.ms)-1].bestIndex = -1
  2355  	s.ms[len(s.ms)-1] = nil
  2356  	s.ms = s.ms[:len(s.ms)-1]
  2357  }
  2358  func (s *bestSlice) UnsafeAdd(i *metaTx) {
  2359  	i.bestIndex = len(s.ms)
  2360  	s.ms = append(s.ms, i)
  2361  }
  2362  
  2363  func (p *PendingPool) EnforceWorstInvariants() {
  2364  	heap.Init(p.worst)
  2365  }
  2366  func (p *PendingPool) EnforceBestInvariants() {
  2367  	sort.Sort(p.best)
  2368  }
  2369  
  2370  func (p *PendingPool) Best() *metaTx { //nolint
  2371  	if len(p.best.ms) == 0 {
  2372  		return nil
  2373  	}
  2374  	return p.best.ms[0]
  2375  }
  2376  func (p *PendingPool) Worst() *metaTx { //nolint
  2377  	if len(p.worst.ms) == 0 {
  2378  		return nil
  2379  	}
  2380  	return (p.worst.ms)[0]
  2381  }
  2382  func (p *PendingPool) PopWorst() *metaTx { //nolint
  2383  	i := heap.Pop(p.worst).(*metaTx)
  2384  	if i.bestIndex >= 0 {
  2385  		p.best.UnsafeRemove(i)
  2386  	}
  2387  	return i
  2388  }
  2389  func (p *PendingPool) Updated(mt *metaTx) {
  2390  	heap.Fix(p.worst, mt.worstIndex)
  2391  }
  2392  func (p *PendingPool) Len() int { return len(p.best.ms) }
  2393  
  2394  func (p *PendingPool) Remove(i *metaTx) {
  2395  	if i.worstIndex >= 0 {
  2396  		heap.Remove(p.worst, i.worstIndex)
  2397  	}
  2398  	if i.bestIndex >= 0 {
  2399  		p.best.UnsafeRemove(i)
  2400  	}
  2401  	i.currentSubPool = 0
  2402  }
  2403  
  2404  func (p *PendingPool) Add(i *metaTx, logger log.Logger) {
  2405  	if i.Tx.Traced {
  2406  		logger.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID))
  2407  	}
  2408  	i.currentSubPool = p.t
  2409  	heap.Push(p.worst, i)
  2410  	p.best.UnsafeAdd(i)
  2411  }
  2412  func (p *PendingPool) DebugPrint(prefix string) {
  2413  	for i, it := range p.best.ms {
  2414  		fmt.Printf("%s.best: %d, %d, %d,%d\n", prefix, i, it.subPool, it.bestIndex, it.Tx.Nonce)
  2415  	}
  2416  	for i, it := range p.worst.ms {
  2417  		fmt.Printf("%s.worst: %d, %d, %d,%d\n", prefix, i, it.subPool, it.worstIndex, it.Tx.Nonce)
  2418  	}
  2419  }
  2420  
  2421  type SubPool struct {
  2422  	best  *BestQueue
  2423  	worst *WorstQueue
  2424  	limit int
  2425  	t     SubPoolType
  2426  }
  2427  
  2428  func NewSubPool(t SubPoolType, limit int) *SubPool {
  2429  	return &SubPool{limit: limit, t: t, best: &BestQueue{}, worst: &WorstQueue{}}
  2430  }
  2431  
  2432  func (p *SubPool) EnforceInvariants() {
  2433  	heap.Init(p.worst)
  2434  	heap.Init(p.best)
  2435  }
  2436  func (p *SubPool) Best() *metaTx { //nolint
  2437  	if len(p.best.ms) == 0 {
  2438  		return nil
  2439  	}
  2440  	return p.best.ms[0]
  2441  }
  2442  func (p *SubPool) Worst() *metaTx { //nolint
  2443  	if len(p.worst.ms) == 0 {
  2444  		return nil
  2445  	}
  2446  	return p.worst.ms[0]
  2447  }
  2448  func (p *SubPool) PopBest() *metaTx { //nolint
  2449  	i := heap.Pop(p.best).(*metaTx)
  2450  	heap.Remove(p.worst, i.worstIndex)
  2451  	return i
  2452  }
  2453  func (p *SubPool) PopWorst() *metaTx { //nolint
  2454  	i := heap.Pop(p.worst).(*metaTx)
  2455  	heap.Remove(p.best, i.bestIndex)
  2456  	return i
  2457  }
  2458  func (p *SubPool) Len() int { return p.best.Len() }
  2459  func (p *SubPool) Add(i *metaTx, logger log.Logger) {
  2460  	if i.Tx.Traced {
  2461  		logger.Info(fmt.Sprintf("TX TRACING: moved to subpool %s, IdHash=%x, sender=%d", p.t, i.Tx.IDHash, i.Tx.SenderID))
  2462  	}
  2463  	i.currentSubPool = p.t
  2464  	heap.Push(p.best, i)
  2465  	heap.Push(p.worst, i)
  2466  }
  2467  
  2468  func (p *SubPool) Remove(i *metaTx) {
  2469  	heap.Remove(p.best, i.bestIndex)
  2470  	heap.Remove(p.worst, i.worstIndex)
  2471  	i.currentSubPool = 0
  2472  }
  2473  
  2474  func (p *SubPool) Updated(i *metaTx) {
  2475  	heap.Fix(p.best, i.bestIndex)
  2476  	heap.Fix(p.worst, i.worstIndex)
  2477  }
  2478  
  2479  func (p *SubPool) DebugPrint(prefix string) {
  2480  	for i, it := range p.best.ms {
  2481  		fmt.Printf("%s.best: %d, %d, %d\n", prefix, i, it.subPool, it.bestIndex)
  2482  	}
  2483  	for i, it := range p.worst.ms {
  2484  		fmt.Printf("%s.worst: %d, %d, %d\n", prefix, i, it.subPool, it.worstIndex)
  2485  	}
  2486  }
  2487  
  2488  type BestQueue struct {
  2489  	ms             []*metaTx
  2490  	pendingBastFee uint64
  2491  }
  2492  
  2493  // Returns true if the txn "mt" is better than the parameter txn "than"
  2494  // it first compares the subpool markers of the two meta txns, then,
  2495  // (since they have the same subpool marker, and thus same pool)
  2496  // depending on the pool - pending (P), basefee (B), queued (Q) -
  2497  // it compares the effective tip (for P), nonceDistance (for both P,Q)
  2498  // minFeeCap (for B), and cumulative balance distance (for P, Q)
  2499  func (mt *metaTx) better(than *metaTx, pendingBaseFee uint256.Int) bool {
  2500  	subPool := mt.subPool
  2501  	thanSubPool := than.subPool
  2502  	if mt.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2503  		subPool |= EnoughFeeCapBlock
  2504  	}
  2505  	if than.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2506  		thanSubPool |= EnoughFeeCapBlock
  2507  	}
  2508  	if subPool != thanSubPool {
  2509  		return subPool > thanSubPool
  2510  	}
  2511  
  2512  	switch mt.currentSubPool {
  2513  	case PendingSubPool:
  2514  		var effectiveTip, thanEffectiveTip uint256.Int
  2515  		if mt.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2516  			difference := uint256.NewInt(0)
  2517  			difference.Sub(&mt.minFeeCap, &pendingBaseFee)
  2518  			if difference.Cmp(uint256.NewInt(mt.minTip)) <= 0 {
  2519  				effectiveTip = *difference
  2520  			} else {
  2521  				effectiveTip = *uint256.NewInt(mt.minTip)
  2522  			}
  2523  		}
  2524  		if than.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2525  			difference := uint256.NewInt(0)
  2526  			difference.Sub(&than.minFeeCap, &pendingBaseFee)
  2527  			if difference.Cmp(uint256.NewInt(than.minTip)) <= 0 {
  2528  				thanEffectiveTip = *difference
  2529  			} else {
  2530  				thanEffectiveTip = *uint256.NewInt(than.minTip)
  2531  			}
  2532  		}
  2533  		if effectiveTip.Cmp(&thanEffectiveTip) != 0 {
  2534  			return effectiveTip.Cmp(&thanEffectiveTip) > 0
  2535  		}
  2536  		// Compare nonce and cumulative balance. Just as a side note, it doesn't
  2537  		// matter if they're from same sender or not because we're comparing
  2538  		// nonce distance of the sender from state's nonce and not the actual
  2539  		// value of nonce.
  2540  		if mt.nonceDistance != than.nonceDistance {
  2541  			return mt.nonceDistance < than.nonceDistance
  2542  		}
  2543  		if mt.cumulativeBalanceDistance != than.cumulativeBalanceDistance {
  2544  			return mt.cumulativeBalanceDistance < than.cumulativeBalanceDistance
  2545  		}
  2546  	case BaseFeeSubPool:
  2547  		if mt.minFeeCap.Cmp(&than.minFeeCap) != 0 {
  2548  			return mt.minFeeCap.Cmp(&than.minFeeCap) > 0
  2549  		}
  2550  	case QueuedSubPool:
  2551  		if mt.nonceDistance != than.nonceDistance {
  2552  			return mt.nonceDistance < than.nonceDistance
  2553  		}
  2554  		if mt.cumulativeBalanceDistance != than.cumulativeBalanceDistance {
  2555  			return mt.cumulativeBalanceDistance < than.cumulativeBalanceDistance
  2556  		}
  2557  	}
  2558  	return mt.timestamp < than.timestamp
  2559  }
  2560  
  2561  func (mt *metaTx) worse(than *metaTx, pendingBaseFee uint256.Int) bool {
  2562  	subPool := mt.subPool
  2563  	thanSubPool := than.subPool
  2564  	if mt.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2565  		subPool |= EnoughFeeCapBlock
  2566  	}
  2567  	if than.minFeeCap.Cmp(&pendingBaseFee) >= 0 {
  2568  		thanSubPool |= EnoughFeeCapBlock
  2569  	}
  2570  	if subPool != thanSubPool {
  2571  		return subPool < thanSubPool
  2572  	}
  2573  
  2574  	switch mt.currentSubPool {
  2575  	case PendingSubPool:
  2576  		if mt.minFeeCap != than.minFeeCap {
  2577  			return mt.minFeeCap.Cmp(&than.minFeeCap) < 0
  2578  		}
  2579  		if mt.nonceDistance != than.nonceDistance {
  2580  			return mt.nonceDistance > than.nonceDistance
  2581  		}
  2582  		if mt.cumulativeBalanceDistance != than.cumulativeBalanceDistance {
  2583  			return mt.cumulativeBalanceDistance > than.cumulativeBalanceDistance
  2584  		}
  2585  	case BaseFeeSubPool, QueuedSubPool:
  2586  		if mt.nonceDistance != than.nonceDistance {
  2587  			return mt.nonceDistance > than.nonceDistance
  2588  		}
  2589  		if mt.cumulativeBalanceDistance != than.cumulativeBalanceDistance {
  2590  			return mt.cumulativeBalanceDistance > than.cumulativeBalanceDistance
  2591  		}
  2592  	}
  2593  	return mt.timestamp > than.timestamp
  2594  }
  2595  
  2596  func (p BestQueue) Len() int { return len(p.ms) }
  2597  func (p BestQueue) Less(i, j int) bool {
  2598  	return p.ms[i].better(p.ms[j], *uint256.NewInt(p.pendingBastFee))
  2599  }
  2600  func (p BestQueue) Swap(i, j int) {
  2601  	p.ms[i], p.ms[j] = p.ms[j], p.ms[i]
  2602  	p.ms[i].bestIndex = i
  2603  	p.ms[j].bestIndex = j
  2604  }
  2605  func (p *BestQueue) Push(x interface{}) {
  2606  	n := len(p.ms)
  2607  	item := x.(*metaTx)
  2608  	item.bestIndex = n
  2609  	p.ms = append(p.ms, item)
  2610  }
  2611  
  2612  func (p *BestQueue) Pop() interface{} {
  2613  	old := p.ms
  2614  	n := len(old)
  2615  	item := old[n-1]
  2616  	old[n-1] = nil          // avoid memory leak
  2617  	item.bestIndex = -1     // for safety
  2618  	item.currentSubPool = 0 // for safety
  2619  	p.ms = old[0 : n-1]
  2620  	return item
  2621  }
  2622  
  2623  type WorstQueue struct {
  2624  	ms             []*metaTx
  2625  	pendingBaseFee uint64
  2626  }
  2627  
  2628  func (p WorstQueue) Len() int { return len(p.ms) }
  2629  func (p WorstQueue) Less(i, j int) bool {
  2630  	return p.ms[i].worse(p.ms[j], *uint256.NewInt(p.pendingBaseFee))
  2631  }
  2632  func (p WorstQueue) Swap(i, j int) {
  2633  	p.ms[i], p.ms[j] = p.ms[j], p.ms[i]
  2634  	p.ms[i].worstIndex = i
  2635  	p.ms[j].worstIndex = j
  2636  }
  2637  func (p *WorstQueue) Push(x interface{}) {
  2638  	n := len(p.ms)
  2639  	item := x.(*metaTx)
  2640  	item.worstIndex = n
  2641  	p.ms = append(p.ms, x.(*metaTx))
  2642  }
  2643  func (p *WorstQueue) Pop() interface{} {
  2644  	old := p.ms
  2645  	n := len(old)
  2646  	item := old[n-1]
  2647  	old[n-1] = nil          // avoid memory leak
  2648  	item.worstIndex = -1    // for safety
  2649  	item.currentSubPool = 0 // for safety
  2650  	p.ms = old[0 : n-1]
  2651  	return item
  2652  }