github.com/VietJr/bor@v1.0.3/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/common/prque" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core/state" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/event" 34 "github.com/ethereum/go-ethereum/log" 35 "github.com/ethereum/go-ethereum/metrics" 36 "github.com/ethereum/go-ethereum/params" 37 ) 38 39 const ( 40 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 41 chainHeadChanSize = 10 42 43 // txSlotSize is used to calculate how many data slots a single transaction 44 // takes up based on its size. The slots are used as DoS protection, ensuring 45 // that validating a new transaction remains a constant operation (in reality 46 // O(maxslots), where max slots are 4 currently). 47 txSlotSize = 32 * 1024 48 49 // txMaxSize is the maximum size a single transaction can have. This field has 50 // non-trivial consequences: larger transactions are significantly harder and 51 // more expensive to propagate; larger transactions also take more resources 52 // to validate whether they fit into the pool or not. 53 txMaxSize = 4 * txSlotSize // 128KB 54 ) 55 56 var ( 57 // ErrAlreadyKnown is returned if the transactions is already contained 58 // within the pool. 59 ErrAlreadyKnown = errors.New("already known") 60 61 // ErrInvalidSender is returned if the transaction contains an invalid signature. 62 ErrInvalidSender = errors.New("invalid sender") 63 64 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 65 // configured for the transaction pool. 66 ErrUnderpriced = errors.New("transaction underpriced") 67 68 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 69 // another remote transaction. 70 ErrTxPoolOverflow = errors.New("txpool is full") 71 72 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 73 // with a different one without the required price bump. 74 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 75 76 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 77 // maximum allowance of the current block. 78 ErrGasLimit = errors.New("exceeds block gas limit") 79 80 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 81 // transaction with a negative value. 82 ErrNegativeValue = errors.New("negative value") 83 84 // ErrOversizedData is returned if the input data of a transaction is greater 85 // than some meaningful limit a user might use. This is not a consensus error 86 // making the transaction invalid, rather a DOS protection. 87 ErrOversizedData = errors.New("oversized data") 88 ) 89 90 var ( 91 evictionInterval = time.Minute // Time interval to check for evictable transactions 92 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 93 ) 94 95 var ( 96 // Metrics for the pending pool 97 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 98 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 99 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 100 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 101 102 // Metrics for the queued pool 103 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 104 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 105 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 106 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 107 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 108 109 // General tx metrics 110 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 111 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 112 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 113 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 114 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 115 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 116 // txpool reorgs. 117 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 118 // reorgDurationTimer measures how long time a txpool reorg takes. 119 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 120 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 121 // that this number is pretty low, since txpool reorgs happen very frequently. 122 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 123 124 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 125 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 126 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 127 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 128 129 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 130 ) 131 132 // TxStatus is the current status of a transaction as seen by the pool. 133 type TxStatus uint 134 135 const ( 136 TxStatusUnknown TxStatus = iota 137 TxStatusQueued 138 TxStatusPending 139 TxStatusIncluded 140 ) 141 142 // blockChain provides the state of blockchain and current gas limit to do 143 // some pre checks in tx pool and event subscribers. 144 type blockChain interface { 145 CurrentBlock() *types.Block 146 GetBlock(hash common.Hash, number uint64) *types.Block 147 StateAt(root common.Hash) (*state.StateDB, error) 148 149 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 150 } 151 152 // TxPoolConfig are the configuration parameters of the transaction pool. 153 type TxPoolConfig struct { 154 Locals []common.Address // Addresses that should be treated by default as local 155 NoLocals bool // Whether local transaction handling should be disabled 156 Journal string // Journal of local transactions to survive node restarts 157 Rejournal time.Duration // Time interval to regenerate the local transaction journal 158 159 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 160 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 161 162 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 163 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 164 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 165 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 166 167 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 168 } 169 170 // DefaultTxPoolConfig contains the default configurations for the transaction 171 // pool. 172 var DefaultTxPoolConfig = TxPoolConfig{ 173 Journal: "transactions.rlp", 174 Rejournal: time.Hour, 175 176 PriceLimit: 1, 177 PriceBump: 10, 178 179 AccountSlots: 16, 180 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 181 AccountQueue: 64, 182 GlobalQueue: 1024, 183 184 Lifetime: 3 * time.Hour, 185 } 186 187 // sanitize checks the provided user configurations and changes anything that's 188 // unreasonable or unworkable. 189 func (config *TxPoolConfig) sanitize() TxPoolConfig { 190 conf := *config 191 if conf.Rejournal < time.Second { 192 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 193 conf.Rejournal = time.Second 194 } 195 if conf.PriceLimit < 1 { 196 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 197 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 198 } 199 if conf.PriceBump < 1 { 200 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 201 conf.PriceBump = DefaultTxPoolConfig.PriceBump 202 } 203 if conf.AccountSlots < 1 { 204 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 205 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 206 } 207 if conf.GlobalSlots < 1 { 208 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 209 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 210 } 211 if conf.AccountQueue < 1 { 212 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 213 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 214 } 215 if conf.GlobalQueue < 1 { 216 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 217 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 218 } 219 if conf.Lifetime < 1 { 220 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 221 conf.Lifetime = DefaultTxPoolConfig.Lifetime 222 } 223 return conf 224 } 225 226 // TxPool contains all currently known transactions. Transactions 227 // enter the pool when they are received from the network or submitted 228 // locally. They exit the pool when they are included in the blockchain. 229 // 230 // The pool separates processable transactions (which can be applied to the 231 // current state) and future transactions. Transactions move between those 232 // two states over time as they are received and processed. 233 type TxPool struct { 234 config TxPoolConfig 235 chainconfig *params.ChainConfig 236 chain blockChain 237 gasPrice *big.Int 238 txFeed event.Feed 239 scope event.SubscriptionScope 240 signer types.Signer 241 mu sync.RWMutex 242 243 istanbul bool // Fork indicator whether we are in the istanbul stage. 244 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 245 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 246 247 currentState *state.StateDB // Current state in the blockchain head 248 pendingNonces *txNoncer // Pending state tracking virtual nonces 249 currentMaxGas uint64 // Current gas limit for transaction caps 250 251 locals *accountSet // Set of local transaction to exempt from eviction rules 252 journal *txJournal // Journal of local transaction to back up to disk 253 254 pending map[common.Address]*txList // All currently processable transactions 255 queue map[common.Address]*txList // Queued but non-processable transactions 256 beats map[common.Address]time.Time // Last heartbeat from each known account 257 all *txLookup // All transactions to allow lookups 258 priced *txPricedList // All transactions sorted by price 259 260 chainHeadCh chan ChainHeadEvent 261 chainHeadSub event.Subscription 262 reqResetCh chan *txpoolResetRequest 263 reqPromoteCh chan *accountSet 264 queueTxEventCh chan *types.Transaction 265 reorgDoneCh chan chan struct{} 266 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 267 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 268 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 269 270 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 271 272 promoteTxCh chan struct{} // should be used only for tests 273 } 274 275 type txpoolResetRequest struct { 276 oldHead, newHead *types.Header 277 } 278 279 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 280 // transactions from the network. 281 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain, options ...func(pool *TxPool)) *TxPool { 282 // Sanitize the input to ensure no vulnerable gas prices are set 283 config = (&config).sanitize() 284 285 // Create the transaction pool with its initial settings 286 pool := &TxPool{ 287 config: config, 288 chainconfig: chainconfig, 289 chain: chain, 290 signer: types.LatestSigner(chainconfig), 291 pending: make(map[common.Address]*txList), 292 queue: make(map[common.Address]*txList), 293 beats: make(map[common.Address]time.Time), 294 all: newTxLookup(), 295 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 296 reqResetCh: make(chan *txpoolResetRequest), 297 reqPromoteCh: make(chan *accountSet), 298 queueTxEventCh: make(chan *types.Transaction), 299 reorgDoneCh: make(chan chan struct{}), 300 reorgShutdownCh: make(chan struct{}), 301 initDoneCh: make(chan struct{}), 302 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 303 } 304 305 pool.locals = newAccountSet(pool.signer) 306 for _, addr := range config.Locals { 307 log.Info("Setting new local account", "address", addr) 308 pool.locals.add(addr) 309 } 310 pool.priced = newTxPricedList(pool.all) 311 pool.reset(nil, chain.CurrentBlock().Header()) 312 313 // apply options 314 for _, fn := range options { 315 fn(pool) 316 } 317 318 // Start the reorg loop early so it can handle requests generated during journal loading. 319 pool.wg.Add(1) 320 go pool.scheduleReorgLoop() 321 322 // If local transactions and journaling is enabled, load from disk 323 if !config.NoLocals && config.Journal != "" { 324 pool.journal = newTxJournal(config.Journal) 325 326 if err := pool.journal.load(pool.AddLocals); err != nil { 327 log.Warn("Failed to load transaction journal", "err", err) 328 } 329 if err := pool.journal.rotate(pool.local()); err != nil { 330 log.Warn("Failed to rotate transaction journal", "err", err) 331 } 332 } 333 334 // Subscribe events from blockchain and start the main event loop. 335 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 336 pool.wg.Add(1) 337 go pool.loop() 338 339 return pool 340 } 341 342 // loop is the transaction pool's main event loop, waiting for and reacting to 343 // outside blockchain events as well as for various reporting and transaction 344 // eviction events. 345 func (pool *TxPool) loop() { 346 defer pool.wg.Done() 347 348 var ( 349 prevPending, prevQueued, prevStales int 350 // Start the stats reporting and transaction eviction tickers 351 report = time.NewTicker(statsReportInterval) 352 evict = time.NewTicker(evictionInterval) 353 journal = time.NewTicker(pool.config.Rejournal) 354 // Track the previous head headers for transaction reorgs 355 head = pool.chain.CurrentBlock() 356 ) 357 defer report.Stop() 358 defer evict.Stop() 359 defer journal.Stop() 360 361 // Notify tests that the init phase is done 362 close(pool.initDoneCh) 363 for { 364 select { 365 // Handle ChainHeadEvent 366 case ev := <-pool.chainHeadCh: 367 if ev.Block != nil { 368 pool.requestReset(head.Header(), ev.Block.Header()) 369 head = ev.Block 370 } 371 372 // System shutdown. 373 case <-pool.chainHeadSub.Err(): 374 close(pool.reorgShutdownCh) 375 return 376 377 // Handle stats reporting ticks 378 case <-report.C: 379 pool.mu.RLock() 380 pending, queued := pool.stats() 381 pool.mu.RUnlock() 382 stales := int(atomic.LoadInt64(&pool.priced.stales)) 383 384 if pending != prevPending || queued != prevQueued || stales != prevStales { 385 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 386 prevPending, prevQueued, prevStales = pending, queued, stales 387 } 388 389 // Handle inactive account transaction eviction 390 case <-evict.C: 391 pool.mu.Lock() 392 for addr := range pool.queue { 393 // Skip local transactions from the eviction mechanism 394 if pool.locals.contains(addr) { 395 continue 396 } 397 // Any non-locals old enough should be removed 398 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 399 list := pool.queue[addr].Flatten() 400 for _, tx := range list { 401 pool.removeTx(tx.Hash(), true) 402 } 403 queuedEvictionMeter.Mark(int64(len(list))) 404 } 405 } 406 pool.mu.Unlock() 407 408 // Handle local transaction journal rotation 409 case <-journal.C: 410 if pool.journal != nil { 411 pool.mu.Lock() 412 if err := pool.journal.rotate(pool.local()); err != nil { 413 log.Warn("Failed to rotate local tx journal", "err", err) 414 } 415 pool.mu.Unlock() 416 } 417 } 418 } 419 } 420 421 // Stop terminates the transaction pool. 422 func (pool *TxPool) Stop() { 423 // Unsubscribe all subscriptions registered from txpool 424 pool.scope.Close() 425 426 // Unsubscribe subscriptions registered from blockchain 427 pool.chainHeadSub.Unsubscribe() 428 pool.wg.Wait() 429 430 if pool.journal != nil { 431 pool.journal.close() 432 } 433 log.Info("Transaction pool stopped") 434 } 435 436 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 437 // starts sending event to the given channel. 438 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 439 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 440 } 441 442 // GasPrice returns the current gas price enforced by the transaction pool. 443 func (pool *TxPool) GasPrice() *big.Int { 444 pool.mu.RLock() 445 defer pool.mu.RUnlock() 446 447 return new(big.Int).Set(pool.gasPrice) 448 } 449 450 // SetGasPrice updates the minimum price required by the transaction pool for a 451 // new transaction, and drops all transactions below this threshold. 452 func (pool *TxPool) SetGasPrice(price *big.Int) { 453 pool.mu.Lock() 454 defer pool.mu.Unlock() 455 456 old := pool.gasPrice 457 pool.gasPrice = price 458 // if the min miner fee increased, remove transactions below the new threshold 459 if price.Cmp(old) > 0 { 460 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 461 drop := pool.all.RemotesBelowTip(price) 462 for _, tx := range drop { 463 pool.removeTx(tx.Hash(), false) 464 } 465 pool.priced.Removed(len(drop)) 466 } 467 468 log.Info("Transaction pool price threshold updated", "price", price) 469 } 470 471 // Nonce returns the next nonce of an account, with all transactions executable 472 // by the pool already applied on top. 473 func (pool *TxPool) Nonce(addr common.Address) uint64 { 474 pool.mu.RLock() 475 defer pool.mu.RUnlock() 476 477 return pool.pendingNonces.get(addr) 478 } 479 480 // Stats retrieves the current pool stats, namely the number of pending and the 481 // number of queued (non-executable) transactions. 482 func (pool *TxPool) Stats() (int, int) { 483 pool.mu.RLock() 484 defer pool.mu.RUnlock() 485 486 return pool.stats() 487 } 488 489 // stats retrieves the current pool stats, namely the number of pending and the 490 // number of queued (non-executable) transactions. 491 func (pool *TxPool) stats() (int, int) { 492 pending := 0 493 for _, list := range pool.pending { 494 pending += list.Len() 495 } 496 queued := 0 497 for _, list := range pool.queue { 498 queued += list.Len() 499 } 500 return pending, queued 501 } 502 503 // Content retrieves the data content of the transaction pool, returning all the 504 // pending as well as queued transactions, grouped by account and sorted by nonce. 505 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 506 pool.mu.Lock() 507 defer pool.mu.Unlock() 508 509 pending := make(map[common.Address]types.Transactions) 510 for addr, list := range pool.pending { 511 pending[addr] = list.Flatten() 512 } 513 queued := make(map[common.Address]types.Transactions) 514 for addr, list := range pool.queue { 515 queued[addr] = list.Flatten() 516 } 517 return pending, queued 518 } 519 520 // ContentFrom retrieves the data content of the transaction pool, returning the 521 // pending as well as queued transactions of this address, grouped by nonce. 522 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 523 pool.mu.RLock() 524 defer pool.mu.RUnlock() 525 526 var pending types.Transactions 527 if list, ok := pool.pending[addr]; ok { 528 pending = list.Flatten() 529 } 530 var queued types.Transactions 531 if list, ok := pool.queue[addr]; ok { 532 queued = list.Flatten() 533 } 534 return pending, queued 535 } 536 537 // Pending retrieves all currently processable transactions, grouped by origin 538 // account and sorted by nonce. The returned transaction set is a copy and can be 539 // freely modified by calling code. 540 // 541 // The enforceTips parameter can be used to do an extra filtering on the pending 542 // transactions and only return those whose **effective** tip is large enough in 543 // the next pending execution environment. 544 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 545 pool.mu.Lock() 546 defer pool.mu.Unlock() 547 548 pending := make(map[common.Address]types.Transactions) 549 for addr, list := range pool.pending { 550 txs := list.Flatten() 551 552 // If the miner requests tip enforcement, cap the lists now 553 if enforceTips && !pool.locals.contains(addr) { 554 for i, tx := range txs { 555 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 556 txs = txs[:i] 557 break 558 } 559 } 560 } 561 if len(txs) > 0 { 562 pending[addr] = txs 563 } 564 } 565 return pending 566 } 567 568 // Locals retrieves the accounts currently considered local by the pool. 569 func (pool *TxPool) Locals() []common.Address { 570 pool.mu.Lock() 571 defer pool.mu.Unlock() 572 573 return pool.locals.flatten() 574 } 575 576 // local retrieves all currently known local transactions, grouped by origin 577 // account and sorted by nonce. The returned transaction set is a copy and can be 578 // freely modified by calling code. 579 func (pool *TxPool) local() map[common.Address]types.Transactions { 580 txs := make(map[common.Address]types.Transactions) 581 for addr := range pool.locals.accounts { 582 if pending := pool.pending[addr]; pending != nil { 583 txs[addr] = append(txs[addr], pending.Flatten()...) 584 } 585 if queued := pool.queue[addr]; queued != nil { 586 txs[addr] = append(txs[addr], queued.Flatten()...) 587 } 588 } 589 return txs 590 } 591 592 // validateTx checks whether a transaction is valid according to the consensus 593 // rules and adheres to some heuristic limits of the local node (price and size). 594 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 595 // Accept only legacy transactions until EIP-2718/2930 activates. 596 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 597 return ErrTxTypeNotSupported 598 } 599 // Reject dynamic fee transactions until EIP-1559 activates. 600 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 601 return ErrTxTypeNotSupported 602 } 603 // Reject transactions over defined size to prevent DOS attacks 604 if uint64(tx.Size()) > txMaxSize { 605 return ErrOversizedData 606 } 607 // Transactions can't be negative. This may never happen using RLP decoded 608 // transactions but may occur if you create a transaction using the RPC. 609 if tx.Value().Sign() < 0 { 610 return ErrNegativeValue 611 } 612 // Ensure the transaction doesn't exceed the current block limit gas. 613 if pool.currentMaxGas < tx.Gas() { 614 return ErrGasLimit 615 } 616 // Sanity check for extremely large numbers 617 if tx.GasFeeCap().BitLen() > 256 { 618 return ErrFeeCapVeryHigh 619 } 620 if tx.GasTipCap().BitLen() > 256 { 621 return ErrTipVeryHigh 622 } 623 // Ensure gasFeeCap is greater than or equal to gasTipCap. 624 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 625 return ErrTipAboveFeeCap 626 } 627 // Make sure the transaction is signed properly. 628 from, err := types.Sender(pool.signer, tx) 629 if err != nil { 630 return ErrInvalidSender 631 } 632 // Drop non-local transactions under our own minimal accepted gas price or tip 633 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 634 return ErrUnderpriced 635 } 636 // Ensure the transaction adheres to nonce ordering 637 if pool.currentState.GetNonce(from) > tx.Nonce() { 638 return ErrNonceTooLow 639 } 640 // Transactor should have enough funds to cover the costs 641 // cost == V + GP * GL 642 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 643 return ErrInsufficientFunds 644 } 645 // Ensure the transaction has more gas than the basic tx fee. 646 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 647 if err != nil { 648 return err 649 } 650 if tx.Gas() < intrGas { 651 return ErrIntrinsicGas 652 } 653 return nil 654 } 655 656 // add validates a transaction and inserts it into the non-executable queue for later 657 // pending promotion and execution. If the transaction is a replacement for an already 658 // pending or queued one, it overwrites the previous transaction if its price is higher. 659 // 660 // If a newly added transaction is marked as local, its sending account will be 661 // be added to the allowlist, preventing any associated transaction from being dropped 662 // out of the pool due to pricing constraints. 663 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 664 // If the transaction is already known, discard it 665 hash := tx.Hash() 666 if pool.all.Get(hash) != nil { 667 log.Trace("Discarding already known transaction", "hash", hash) 668 knownTxMeter.Mark(1) 669 return false, ErrAlreadyKnown 670 } 671 // Make the local flag. If it's from local source or it's from the network but 672 // the sender is marked as local previously, treat it as the local transaction. 673 isLocal := local || pool.locals.containsTx(tx) 674 675 // If the transaction fails basic validation, discard it 676 if err := pool.validateTx(tx, isLocal); err != nil { 677 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 678 invalidTxMeter.Mark(1) 679 return false, err 680 } 681 // If the transaction pool is full, discard underpriced transactions 682 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 683 // If the new transaction is underpriced, don't accept it 684 if !isLocal && pool.priced.Underpriced(tx) { 685 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 686 underpricedTxMeter.Mark(1) 687 return false, ErrUnderpriced 688 } 689 // We're about to replace a transaction. The reorg does a more thorough 690 // analysis of what to remove and how, but it runs async. We don't want to 691 // do too many replacements between reorg-runs, so we cap the number of 692 // replacements to 25% of the slots 693 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 694 throttleTxMeter.Mark(1) 695 return false, ErrTxPoolOverflow 696 } 697 698 // New transaction is better than our worse ones, make room for it. 699 // If it's a local transaction, forcibly discard all available transactions. 700 // Otherwise if we can't make enough room for new one, abort the operation. 701 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 702 703 // Special case, we still can't make the room for the new remote one. 704 if !isLocal && !success { 705 log.Trace("Discarding overflown transaction", "hash", hash) 706 overflowedTxMeter.Mark(1) 707 return false, ErrTxPoolOverflow 708 } 709 // Bump the counter of rejections-since-reorg 710 pool.changesSinceReorg += len(drop) 711 // Kick out the underpriced remote transactions. 712 for _, tx := range drop { 713 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 714 underpricedTxMeter.Mark(1) 715 pool.removeTx(tx.Hash(), false) 716 } 717 } 718 // Try to replace an existing transaction in the pending pool 719 from, _ := types.Sender(pool.signer, tx) // already validated 720 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 721 // Nonce already pending, check if required price bump is met 722 inserted, old := list.Add(tx, pool.config.PriceBump) 723 if !inserted { 724 pendingDiscardMeter.Mark(1) 725 return false, ErrReplaceUnderpriced 726 } 727 // New transaction is better, replace old one 728 if old != nil { 729 pool.all.Remove(old.Hash()) 730 pool.priced.Removed(1) 731 pendingReplaceMeter.Mark(1) 732 } 733 pool.all.Add(tx, isLocal) 734 pool.priced.Put(tx, isLocal) 735 pool.journalTx(from, tx) 736 pool.queueTxEvent(tx) 737 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 738 739 // Successful promotion, bump the heartbeat 740 pool.beats[from] = time.Now() 741 return old != nil, nil 742 } 743 // New transaction isn't replacing a pending one, push into queue 744 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 745 if err != nil { 746 return false, err 747 } 748 // Mark local addresses and journal local transactions 749 if local && !pool.locals.contains(from) { 750 log.Info("Setting new local account", "address", from) 751 pool.locals.add(from) 752 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 753 } 754 if isLocal { 755 localGauge.Inc(1) 756 } 757 pool.journalTx(from, tx) 758 759 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 760 return replaced, nil 761 } 762 763 // enqueueTx inserts a new transaction into the non-executable transaction queue. 764 // 765 // Note, this method assumes the pool lock is held! 766 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 767 // Try to insert the transaction into the future queue 768 from, _ := types.Sender(pool.signer, tx) // already validated 769 if pool.queue[from] == nil { 770 pool.queue[from] = newTxList(false) 771 } 772 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 773 if !inserted { 774 // An older transaction was better, discard this 775 queuedDiscardMeter.Mark(1) 776 return false, ErrReplaceUnderpriced 777 } 778 // Discard any previous transaction and mark this 779 if old != nil { 780 pool.all.Remove(old.Hash()) 781 pool.priced.Removed(1) 782 queuedReplaceMeter.Mark(1) 783 } else { 784 // Nothing was replaced, bump the queued counter 785 queuedGauge.Inc(1) 786 } 787 // If the transaction isn't in lookup set but it's expected to be there, 788 // show the error log. 789 if pool.all.Get(hash) == nil && !addAll { 790 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 791 } 792 if addAll { 793 pool.all.Add(tx, local) 794 pool.priced.Put(tx, local) 795 } 796 // If we never record the heartbeat, do it right now. 797 if _, exist := pool.beats[from]; !exist { 798 pool.beats[from] = time.Now() 799 } 800 return old != nil, nil 801 } 802 803 // journalTx adds the specified transaction to the local disk journal if it is 804 // deemed to have been sent from a local account. 805 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 806 // Only journal if it's enabled and the transaction is local 807 if pool.journal == nil || !pool.locals.contains(from) { 808 return 809 } 810 if err := pool.journal.insert(tx); err != nil { 811 log.Warn("Failed to journal local transaction", "err", err) 812 } 813 } 814 815 // promoteTx adds a transaction to the pending (processable) list of transactions 816 // and returns whether it was inserted or an older was better. 817 // 818 // Note, this method assumes the pool lock is held! 819 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 820 defer func() { 821 if pool.promoteTxCh == nil { 822 return 823 } 824 825 select { 826 case pool.promoteTxCh <- struct{}{}: 827 default: 828 } 829 }() 830 831 // Try to insert the transaction into the pending queue 832 if pool.pending[addr] == nil { 833 pool.pending[addr] = newTxList(true) 834 } 835 list := pool.pending[addr] 836 837 inserted, old := list.Add(tx, pool.config.PriceBump) 838 if !inserted { 839 // An older transaction was better, discard this 840 pool.all.Remove(hash) 841 pool.priced.Removed(1) 842 pendingDiscardMeter.Mark(1) 843 return false 844 } 845 // Otherwise discard any previous transaction and mark this 846 if old != nil { 847 pool.all.Remove(old.Hash()) 848 pool.priced.Removed(1) 849 pendingReplaceMeter.Mark(1) 850 } else { 851 // Nothing was replaced, bump the pending counter 852 pendingGauge.Inc(1) 853 } 854 // Set the potentially new pending nonce and notify any subsystems of the new tx 855 pool.pendingNonces.set(addr, tx.Nonce()+1) 856 857 // Successful promotion, bump the heartbeat 858 pool.beats[addr] = time.Now() 859 return true 860 } 861 862 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 863 // senders as a local ones, ensuring they go around the local pricing constraints. 864 // 865 // This method is used to add transactions from the RPC API and performs synchronous pool 866 // reorganization and event propagation. 867 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 868 return pool.addTxs(txs, !pool.config.NoLocals, true) 869 } 870 871 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 872 // a convenience wrapper aroundd AddLocals. 873 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 874 errs := pool.AddLocals([]*types.Transaction{tx}) 875 return errs[0] 876 } 877 878 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 879 // senders are not among the locally tracked ones, full pricing constraints will apply. 880 // 881 // This method is used to add transactions from the p2p network and does not wait for pool 882 // reorganization and internal event propagation. 883 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 884 return pool.addTxs(txs, false, false) 885 } 886 887 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 888 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 889 return pool.addTxs(txs, false, true) 890 } 891 892 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 893 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 894 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 895 return errs[0] 896 } 897 898 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 899 // wrapper around AddRemotes. 900 // 901 // Deprecated: use AddRemotes 902 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 903 errs := pool.AddRemotes([]*types.Transaction{tx}) 904 return errs[0] 905 } 906 907 // addTxs attempts to queue a batch of transactions if they are valid. 908 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 909 // Filter out known ones without obtaining the pool lock or recovering signatures 910 var ( 911 errs = make([]error, len(txs)) 912 news = make([]*types.Transaction, 0, len(txs)) 913 ) 914 for i, tx := range txs { 915 // If the transaction is known, pre-set the error slot 916 if pool.all.Get(tx.Hash()) != nil { 917 errs[i] = ErrAlreadyKnown 918 knownTxMeter.Mark(1) 919 continue 920 } 921 // Exclude transactions with invalid signatures as soon as 922 // possible and cache senders in transactions before 923 // obtaining lock 924 _, err := types.Sender(pool.signer, tx) 925 if err != nil { 926 errs[i] = ErrInvalidSender 927 invalidTxMeter.Mark(1) 928 continue 929 } 930 // Accumulate all unknown transactions for deeper processing 931 news = append(news, tx) 932 } 933 if len(news) == 0 { 934 return errs 935 } 936 937 // Process all the new transaction and merge any errors into the original slice 938 pool.mu.Lock() 939 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 940 pool.mu.Unlock() 941 942 var nilSlot = 0 943 for _, err := range newErrs { 944 for errs[nilSlot] != nil { 945 nilSlot++ 946 } 947 errs[nilSlot] = err 948 nilSlot++ 949 } 950 // Reorg the pool internals if needed and return 951 done := pool.requestPromoteExecutables(dirtyAddrs) 952 if sync { 953 <-done 954 } 955 return errs 956 } 957 958 // addTxsLocked attempts to queue a batch of transactions if they are valid. 959 // The transaction pool lock must be held. 960 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 961 dirty := newAccountSet(pool.signer) 962 errs := make([]error, len(txs)) 963 for i, tx := range txs { 964 replaced, err := pool.add(tx, local) 965 errs[i] = err 966 if err == nil && !replaced { 967 dirty.addTx(tx) 968 } 969 } 970 validTxMeter.Mark(int64(len(dirty.accounts))) 971 return errs, dirty 972 } 973 974 // Status returns the status (unknown/pending/queued) of a batch of transactions 975 // identified by their hashes. 976 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 977 status := make([]TxStatus, len(hashes)) 978 for i, hash := range hashes { 979 tx := pool.Get(hash) 980 if tx == nil { 981 continue 982 } 983 from, _ := types.Sender(pool.signer, tx) // already validated 984 pool.mu.RLock() 985 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 986 status[i] = TxStatusPending 987 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 988 status[i] = TxStatusQueued 989 } 990 // implicit else: the tx may have been included into a block between 991 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 992 pool.mu.RUnlock() 993 } 994 return status 995 } 996 997 // Get returns a transaction if it is contained in the pool and nil otherwise. 998 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 999 return pool.all.Get(hash) 1000 } 1001 1002 // Has returns an indicator whether txpool has a transaction cached with the 1003 // given hash. 1004 func (pool *TxPool) Has(hash common.Hash) bool { 1005 return pool.all.Get(hash) != nil 1006 } 1007 1008 // removeTx removes a single transaction from the queue, moving all subsequent 1009 // transactions back to the future queue. 1010 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1011 // Fetch the transaction we wish to delete 1012 tx := pool.all.Get(hash) 1013 if tx == nil { 1014 return 1015 } 1016 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1017 1018 // Remove it from the list of known transactions 1019 pool.all.Remove(hash) 1020 if outofbound { 1021 pool.priced.Removed(1) 1022 } 1023 if pool.locals.contains(addr) { 1024 localGauge.Dec(1) 1025 } 1026 // Remove the transaction from the pending lists and reset the account nonce 1027 if pending := pool.pending[addr]; pending != nil { 1028 if removed, invalids := pending.Remove(tx); removed { 1029 // If no more pending transactions are left, remove the list 1030 if pending.Empty() { 1031 delete(pool.pending, addr) 1032 } 1033 // Postpone any invalidated transactions 1034 for _, tx := range invalids { 1035 // Internal shuffle shouldn't touch the lookup set. 1036 pool.enqueueTx(tx.Hash(), tx, false, false) 1037 } 1038 // Update the account nonce if needed 1039 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1040 // Reduce the pending counter 1041 pendingGauge.Dec(int64(1 + len(invalids))) 1042 return 1043 } 1044 } 1045 // Transaction is in the future queue 1046 if future := pool.queue[addr]; future != nil { 1047 if removed, _ := future.Remove(tx); removed { 1048 // Reduce the queued counter 1049 queuedGauge.Dec(1) 1050 } 1051 if future.Empty() { 1052 delete(pool.queue, addr) 1053 delete(pool.beats, addr) 1054 } 1055 } 1056 } 1057 1058 // requestReset requests a pool reset to the new head block. 1059 // The returned channel is closed when the reset has occurred. 1060 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1061 select { 1062 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1063 return <-pool.reorgDoneCh 1064 case <-pool.reorgShutdownCh: 1065 return pool.reorgShutdownCh 1066 } 1067 } 1068 1069 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1070 // The returned channel is closed when the promotion checks have occurred. 1071 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1072 select { 1073 case pool.reqPromoteCh <- set: 1074 return <-pool.reorgDoneCh 1075 case <-pool.reorgShutdownCh: 1076 return pool.reorgShutdownCh 1077 } 1078 } 1079 1080 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1081 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1082 select { 1083 case pool.queueTxEventCh <- tx: 1084 case <-pool.reorgShutdownCh: 1085 } 1086 } 1087 1088 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1089 // call those methods directly, but request them being run using requestReset and 1090 // requestPromoteExecutables instead. 1091 func (pool *TxPool) scheduleReorgLoop() { 1092 defer pool.wg.Done() 1093 1094 var ( 1095 curDone chan struct{} // non-nil while runReorg is active 1096 nextDone = make(chan struct{}) 1097 launchNextRun bool 1098 reset *txpoolResetRequest 1099 dirtyAccounts *accountSet 1100 queuedEvents = make(map[common.Address]*txSortedMap) 1101 ) 1102 1103 for { 1104 // Launch next background reorg if needed 1105 if curDone == nil && launchNextRun { 1106 // Run the background reorg and announcements 1107 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1108 1109 // Prepare everything for the next round of reorg 1110 curDone, nextDone = nextDone, make(chan struct{}) 1111 launchNextRun = false 1112 1113 reset, dirtyAccounts = nil, nil 1114 queuedEvents = make(map[common.Address]*txSortedMap) 1115 } 1116 1117 select { 1118 case req := <-pool.reqResetCh: 1119 // Reset request: update head if request is already pending. 1120 if reset == nil { 1121 reset = req 1122 } else { 1123 reset.newHead = req.newHead 1124 } 1125 launchNextRun = true 1126 pool.reorgDoneCh <- nextDone 1127 1128 case req := <-pool.reqPromoteCh: 1129 // Promote request: update address set if request is already pending. 1130 if dirtyAccounts == nil { 1131 dirtyAccounts = req 1132 } else { 1133 dirtyAccounts.merge(req) 1134 } 1135 launchNextRun = true 1136 pool.reorgDoneCh <- nextDone 1137 1138 case tx := <-pool.queueTxEventCh: 1139 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1140 // request one later if they want the events sent. 1141 addr, _ := types.Sender(pool.signer, tx) 1142 if _, ok := queuedEvents[addr]; !ok { 1143 queuedEvents[addr] = newTxSortedMap() 1144 } 1145 queuedEvents[addr].Put(tx) 1146 1147 case <-curDone: 1148 curDone = nil 1149 1150 case <-pool.reorgShutdownCh: 1151 // Wait for current run to finish. 1152 if curDone != nil { 1153 <-curDone 1154 } 1155 close(nextDone) 1156 return 1157 } 1158 } 1159 } 1160 1161 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1162 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1163 defer func(t0 time.Time) { 1164 reorgDurationTimer.Update(time.Since(t0)) 1165 }(time.Now()) 1166 defer close(done) 1167 1168 var promoteAddrs []common.Address 1169 if dirtyAccounts != nil && reset == nil { 1170 // Only dirty accounts need to be promoted, unless we're resetting. 1171 // For resets, all addresses in the tx queue will be promoted and 1172 // the flatten operation can be avoided. 1173 promoteAddrs = dirtyAccounts.flatten() 1174 } 1175 pool.mu.Lock() 1176 if reset != nil { 1177 // Reset from the old head to the new, rescheduling any reorged transactions 1178 pool.reset(reset.oldHead, reset.newHead) 1179 1180 // Nonces were reset, discard any events that became stale 1181 for addr := range events { 1182 events[addr].Forward(pool.pendingNonces.get(addr)) 1183 if events[addr].Len() == 0 { 1184 delete(events, addr) 1185 } 1186 } 1187 // Reset needs promote for all addresses 1188 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1189 for addr := range pool.queue { 1190 promoteAddrs = append(promoteAddrs, addr) 1191 } 1192 } 1193 // Check for pending transactions for every account that sent new ones 1194 promoted := pool.promoteExecutables(promoteAddrs) 1195 1196 // If a new block appeared, validate the pool of pending transactions. This will 1197 // remove any transaction that has been included in the block or was invalidated 1198 // because of another transaction (e.g. higher gas price). 1199 if reset != nil { 1200 pool.demoteUnexecutables() 1201 if reset.newHead != nil { 1202 if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1203 // london fork enabled, reset given the base fee 1204 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1205 pool.priced.SetBaseFee(pendingBaseFee) 1206 } else { 1207 // london fork not enabled, reheap to "reset" the priced list 1208 pool.priced.Reheap() 1209 } 1210 } 1211 // Update all accounts to the latest known pending nonce 1212 nonces := make(map[common.Address]uint64, len(pool.pending)) 1213 for addr, list := range pool.pending { 1214 highestPending := list.LastElement() 1215 nonces[addr] = highestPending.Nonce() + 1 1216 } 1217 pool.pendingNonces.setAll(nonces) 1218 } 1219 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1220 pool.truncatePending() 1221 pool.truncateQueue() 1222 1223 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1224 pool.changesSinceReorg = 0 // Reset change counter 1225 pool.mu.Unlock() 1226 1227 // Notify subsystems for newly added transactions 1228 for _, tx := range promoted { 1229 addr, _ := types.Sender(pool.signer, tx) 1230 if _, ok := events[addr]; !ok { 1231 events[addr] = newTxSortedMap() 1232 } 1233 events[addr].Put(tx) 1234 } 1235 if len(events) > 0 { 1236 var txs []*types.Transaction 1237 for _, set := range events { 1238 txs = append(txs, set.Flatten()...) 1239 } 1240 pool.txFeed.Send(NewTxsEvent{txs}) 1241 } 1242 } 1243 1244 // reset retrieves the current state of the blockchain and ensures the content 1245 // of the transaction pool is valid with regard to the chain state. 1246 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1247 // If we're reorging an old state, reinject all dropped transactions 1248 var reinject types.Transactions 1249 1250 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1251 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1252 oldNum := oldHead.Number.Uint64() 1253 newNum := newHead.Number.Uint64() 1254 1255 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1256 log.Debug("Skipping deep transaction reorg", "depth", depth) 1257 } else { 1258 // Reorg seems shallow enough to pull in all transactions into memory 1259 var discarded, included types.Transactions 1260 var ( 1261 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1262 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1263 ) 1264 if rem == nil { 1265 // This can happen if a setHead is performed, where we simply discard the old 1266 // head from the chain. 1267 // If that is the case, we don't have the lost transactions any more, and 1268 // there's nothing to add 1269 if newNum >= oldNum { 1270 // If we reorged to a same or higher number, then it's not a case of setHead 1271 log.Warn("Transaction pool reset with missing oldhead", 1272 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1273 return 1274 } 1275 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1276 log.Debug("Skipping transaction reset caused by setHead", 1277 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1278 // We still need to update the current state s.th. the lost transactions can be readded by the user 1279 } else { 1280 for rem.NumberU64() > add.NumberU64() { 1281 discarded = append(discarded, rem.Transactions()...) 1282 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1283 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1284 return 1285 } 1286 } 1287 for add.NumberU64() > rem.NumberU64() { 1288 included = append(included, add.Transactions()...) 1289 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1290 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1291 return 1292 } 1293 } 1294 for rem.Hash() != add.Hash() { 1295 discarded = append(discarded, rem.Transactions()...) 1296 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1297 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1298 return 1299 } 1300 included = append(included, add.Transactions()...) 1301 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1302 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1303 return 1304 } 1305 } 1306 reinject = types.TxDifference(discarded, included) 1307 } 1308 } 1309 } 1310 // Initialize the internal state to the current head 1311 if newHead == nil { 1312 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1313 } 1314 statedb, err := pool.chain.StateAt(newHead.Root) 1315 if err != nil { 1316 log.Error("Failed to reset txpool state", "err", err) 1317 return 1318 } 1319 pool.currentState = statedb 1320 pool.pendingNonces = newTxNoncer(statedb) 1321 pool.currentMaxGas = newHead.GasLimit 1322 1323 // Inject any transactions discarded due to reorgs 1324 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1325 senderCacher.recover(pool.signer, reinject) 1326 pool.addTxsLocked(reinject, false) 1327 1328 // Update all fork indicator by next pending block number. 1329 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1330 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1331 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1332 pool.eip1559 = pool.chainconfig.IsLondon(next) 1333 } 1334 1335 // promoteExecutables moves transactions that have become processable from the 1336 // future queue to the set of pending transactions. During this process, all 1337 // invalidated transactions (low nonce, low balance) are deleted. 1338 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1339 // Track the promoted transactions to broadcast them at once 1340 var promoted []*types.Transaction 1341 1342 // Iterate over all accounts and promote any executable transactions 1343 for _, addr := range accounts { 1344 list := pool.queue[addr] 1345 if list == nil { 1346 continue // Just in case someone calls with a non existing account 1347 } 1348 // Drop all transactions that are deemed too old (low nonce) 1349 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1350 for _, tx := range forwards { 1351 hash := tx.Hash() 1352 pool.all.Remove(hash) 1353 } 1354 log.Trace("Removed old queued transactions", "count", len(forwards)) 1355 // Drop all transactions that are too costly (low balance or out of gas) 1356 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1357 for _, tx := range drops { 1358 hash := tx.Hash() 1359 pool.all.Remove(hash) 1360 } 1361 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1362 queuedNofundsMeter.Mark(int64(len(drops))) 1363 1364 // Gather all executable transactions and promote them 1365 readies := list.Ready(pool.pendingNonces.get(addr)) 1366 for _, tx := range readies { 1367 hash := tx.Hash() 1368 if pool.promoteTx(addr, hash, tx) { 1369 promoted = append(promoted, tx) 1370 } 1371 } 1372 log.Trace("Promoted queued transactions", "count", len(promoted)) 1373 queuedGauge.Dec(int64(len(readies))) 1374 1375 // Drop all transactions over the allowed limit 1376 var caps types.Transactions 1377 if !pool.locals.contains(addr) { 1378 caps = list.Cap(int(pool.config.AccountQueue)) 1379 for _, tx := range caps { 1380 hash := tx.Hash() 1381 pool.all.Remove(hash) 1382 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1383 } 1384 queuedRateLimitMeter.Mark(int64(len(caps))) 1385 } 1386 // Mark all the items dropped as removed 1387 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1388 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1389 if pool.locals.contains(addr) { 1390 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1391 } 1392 // Delete the entire queue entry if it became empty. 1393 if list.Empty() { 1394 delete(pool.queue, addr) 1395 delete(pool.beats, addr) 1396 } 1397 } 1398 return promoted 1399 } 1400 1401 // truncatePending removes transactions from the pending queue if the pool is above the 1402 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1403 // equal number for all for accounts with many pending transactions. 1404 func (pool *TxPool) truncatePending() { 1405 pending := uint64(0) 1406 for _, list := range pool.pending { 1407 pending += uint64(list.Len()) 1408 } 1409 if pending <= pool.config.GlobalSlots { 1410 return 1411 } 1412 1413 pendingBeforeCap := pending 1414 // Assemble a spam order to penalize large transactors first 1415 spammers := prque.New(nil) 1416 for addr, list := range pool.pending { 1417 // Only evict transactions from high rollers 1418 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1419 spammers.Push(addr, int64(list.Len())) 1420 } 1421 } 1422 // Gradually drop transactions from offenders 1423 offenders := []common.Address{} 1424 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1425 // Retrieve the next offender if not local address 1426 offender, _ := spammers.Pop() 1427 offenders = append(offenders, offender.(common.Address)) 1428 1429 // Equalize balances until all the same or below threshold 1430 if len(offenders) > 1 { 1431 // Calculate the equalization threshold for all current offenders 1432 threshold := pool.pending[offender.(common.Address)].Len() 1433 1434 // Iteratively reduce all offenders until below limit or threshold reached 1435 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1436 for i := 0; i < len(offenders)-1; i++ { 1437 list := pool.pending[offenders[i]] 1438 1439 caps := list.Cap(list.Len() - 1) 1440 for _, tx := range caps { 1441 // Drop the transaction from the global pools too 1442 hash := tx.Hash() 1443 pool.all.Remove(hash) 1444 1445 // Update the account nonce to the dropped transaction 1446 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1447 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1448 } 1449 pool.priced.Removed(len(caps)) 1450 pendingGauge.Dec(int64(len(caps))) 1451 if pool.locals.contains(offenders[i]) { 1452 localGauge.Dec(int64(len(caps))) 1453 } 1454 pending-- 1455 } 1456 } 1457 } 1458 } 1459 1460 // If still above threshold, reduce to limit or min allowance 1461 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1462 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1463 for _, addr := range offenders { 1464 list := pool.pending[addr] 1465 1466 caps := list.Cap(list.Len() - 1) 1467 for _, tx := range caps { 1468 // Drop the transaction from the global pools too 1469 hash := tx.Hash() 1470 pool.all.Remove(hash) 1471 1472 // Update the account nonce to the dropped transaction 1473 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1474 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1475 } 1476 pool.priced.Removed(len(caps)) 1477 pendingGauge.Dec(int64(len(caps))) 1478 if pool.locals.contains(addr) { 1479 localGauge.Dec(int64(len(caps))) 1480 } 1481 pending-- 1482 } 1483 } 1484 } 1485 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1486 } 1487 1488 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1489 func (pool *TxPool) truncateQueue() { 1490 queued := uint64(0) 1491 for _, list := range pool.queue { 1492 queued += uint64(list.Len()) 1493 } 1494 if queued <= pool.config.GlobalQueue { 1495 return 1496 } 1497 1498 // Sort all accounts with queued transactions by heartbeat 1499 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1500 for addr := range pool.queue { 1501 if !pool.locals.contains(addr) { // don't drop locals 1502 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1503 } 1504 } 1505 sort.Sort(addresses) 1506 1507 // Drop transactions until the total is below the limit or only locals remain 1508 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1509 addr := addresses[len(addresses)-1] 1510 list := pool.queue[addr.address] 1511 1512 addresses = addresses[:len(addresses)-1] 1513 1514 // Drop all transactions if they are less than the overflow 1515 if size := uint64(list.Len()); size <= drop { 1516 for _, tx := range list.Flatten() { 1517 pool.removeTx(tx.Hash(), true) 1518 } 1519 drop -= size 1520 queuedRateLimitMeter.Mark(int64(size)) 1521 continue 1522 } 1523 // Otherwise drop only last few transactions 1524 txs := list.Flatten() 1525 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1526 pool.removeTx(txs[i].Hash(), true) 1527 drop-- 1528 queuedRateLimitMeter.Mark(1) 1529 } 1530 } 1531 } 1532 1533 // demoteUnexecutables removes invalid and processed transactions from the pools 1534 // executable/pending queue and any subsequent transactions that become unexecutable 1535 // are moved back into the future queue. 1536 // 1537 // Note: transactions are not marked as removed in the priced list because re-heaping 1538 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1539 // to trigger a re-heap is this function 1540 func (pool *TxPool) demoteUnexecutables() { 1541 // Iterate over all accounts and demote any non-executable transactions 1542 for addr, list := range pool.pending { 1543 nonce := pool.currentState.GetNonce(addr) 1544 1545 // Drop all transactions that are deemed too old (low nonce) 1546 olds := list.Forward(nonce) 1547 for _, tx := range olds { 1548 hash := tx.Hash() 1549 pool.all.Remove(hash) 1550 log.Trace("Removed old pending transaction", "hash", hash) 1551 } 1552 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1553 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1554 for _, tx := range drops { 1555 hash := tx.Hash() 1556 log.Trace("Removed unpayable pending transaction", "hash", hash) 1557 pool.all.Remove(hash) 1558 } 1559 pendingNofundsMeter.Mark(int64(len(drops))) 1560 1561 for _, tx := range invalids { 1562 hash := tx.Hash() 1563 log.Trace("Demoting pending transaction", "hash", hash) 1564 1565 // Internal shuffle shouldn't touch the lookup set. 1566 pool.enqueueTx(hash, tx, false, false) 1567 } 1568 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1569 if pool.locals.contains(addr) { 1570 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1571 } 1572 // If there's a gap in front, alert (should never happen) and postpone all transactions 1573 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1574 gapped := list.Cap(0) 1575 for _, tx := range gapped { 1576 hash := tx.Hash() 1577 log.Error("Demoting invalidated transaction", "hash", hash) 1578 1579 // Internal shuffle shouldn't touch the lookup set. 1580 pool.enqueueTx(hash, tx, false, false) 1581 } 1582 pendingGauge.Dec(int64(len(gapped))) 1583 // This might happen in a reorg, so log it to the metering 1584 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1585 } 1586 // Delete the entire pending entry if it became empty. 1587 if list.Empty() { 1588 delete(pool.pending, addr) 1589 } 1590 } 1591 } 1592 1593 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1594 type addressByHeartbeat struct { 1595 address common.Address 1596 heartbeat time.Time 1597 } 1598 1599 type addressesByHeartbeat []addressByHeartbeat 1600 1601 func (a addressesByHeartbeat) Len() int { return len(a) } 1602 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1603 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1604 1605 // accountSet is simply a set of addresses to check for existence, and a signer 1606 // capable of deriving addresses from transactions. 1607 type accountSet struct { 1608 accounts map[common.Address]struct{} 1609 signer types.Signer 1610 cache *[]common.Address 1611 } 1612 1613 // newAccountSet creates a new address set with an associated signer for sender 1614 // derivations. 1615 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1616 as := &accountSet{ 1617 accounts: make(map[common.Address]struct{}), 1618 signer: signer, 1619 } 1620 for _, addr := range addrs { 1621 as.add(addr) 1622 } 1623 return as 1624 } 1625 1626 // contains checks if a given address is contained within the set. 1627 func (as *accountSet) contains(addr common.Address) bool { 1628 _, exist := as.accounts[addr] 1629 return exist 1630 } 1631 1632 func (as *accountSet) empty() bool { 1633 return len(as.accounts) == 0 1634 } 1635 1636 // containsTx checks if the sender of a given tx is within the set. If the sender 1637 // cannot be derived, this method returns false. 1638 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1639 if addr, err := types.Sender(as.signer, tx); err == nil { 1640 return as.contains(addr) 1641 } 1642 return false 1643 } 1644 1645 // add inserts a new address into the set to track. 1646 func (as *accountSet) add(addr common.Address) { 1647 as.accounts[addr] = struct{}{} 1648 as.cache = nil 1649 } 1650 1651 // addTx adds the sender of tx into the set. 1652 func (as *accountSet) addTx(tx *types.Transaction) { 1653 if addr, err := types.Sender(as.signer, tx); err == nil { 1654 as.add(addr) 1655 } 1656 } 1657 1658 // flatten returns the list of addresses within this set, also caching it for later 1659 // reuse. The returned slice should not be changed! 1660 func (as *accountSet) flatten() []common.Address { 1661 if as.cache == nil { 1662 accounts := make([]common.Address, 0, len(as.accounts)) 1663 for account := range as.accounts { 1664 accounts = append(accounts, account) 1665 } 1666 as.cache = &accounts 1667 } 1668 return *as.cache 1669 } 1670 1671 // merge adds all addresses from the 'other' set into 'as'. 1672 func (as *accountSet) merge(other *accountSet) { 1673 for addr := range other.accounts { 1674 as.accounts[addr] = struct{}{} 1675 } 1676 as.cache = nil 1677 } 1678 1679 // txLookup is used internally by TxPool to track transactions while allowing 1680 // lookup without mutex contention. 1681 // 1682 // Note, although this type is properly protected against concurrent access, it 1683 // is **not** a type that should ever be mutated or even exposed outside of the 1684 // transaction pool, since its internal state is tightly coupled with the pools 1685 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1686 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1687 // TxPool.mu mutex. 1688 // 1689 // This lookup set combines the notion of "local transactions", which is useful 1690 // to build upper-level structure. 1691 type txLookup struct { 1692 slots int 1693 lock sync.RWMutex 1694 locals map[common.Hash]*types.Transaction 1695 remotes map[common.Hash]*types.Transaction 1696 } 1697 1698 // newTxLookup returns a new txLookup structure. 1699 func newTxLookup() *txLookup { 1700 return &txLookup{ 1701 locals: make(map[common.Hash]*types.Transaction), 1702 remotes: make(map[common.Hash]*types.Transaction), 1703 } 1704 } 1705 1706 // Range calls f on each key and value present in the map. The callback passed 1707 // should return the indicator whether the iteration needs to be continued. 1708 // Callers need to specify which set (or both) to be iterated. 1709 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1710 t.lock.RLock() 1711 defer t.lock.RUnlock() 1712 1713 if local { 1714 for key, value := range t.locals { 1715 if !f(key, value, true) { 1716 return 1717 } 1718 } 1719 } 1720 if remote { 1721 for key, value := range t.remotes { 1722 if !f(key, value, false) { 1723 return 1724 } 1725 } 1726 } 1727 } 1728 1729 // Get returns a transaction if it exists in the lookup, or nil if not found. 1730 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1731 t.lock.RLock() 1732 defer t.lock.RUnlock() 1733 1734 if tx := t.locals[hash]; tx != nil { 1735 return tx 1736 } 1737 return t.remotes[hash] 1738 } 1739 1740 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1741 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1742 t.lock.RLock() 1743 defer t.lock.RUnlock() 1744 1745 return t.locals[hash] 1746 } 1747 1748 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1749 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1750 t.lock.RLock() 1751 defer t.lock.RUnlock() 1752 1753 return t.remotes[hash] 1754 } 1755 1756 // Count returns the current number of transactions in the lookup. 1757 func (t *txLookup) Count() int { 1758 t.lock.RLock() 1759 defer t.lock.RUnlock() 1760 1761 return len(t.locals) + len(t.remotes) 1762 } 1763 1764 // LocalCount returns the current number of local transactions in the lookup. 1765 func (t *txLookup) LocalCount() int { 1766 t.lock.RLock() 1767 defer t.lock.RUnlock() 1768 1769 return len(t.locals) 1770 } 1771 1772 // RemoteCount returns the current number of remote transactions in the lookup. 1773 func (t *txLookup) RemoteCount() int { 1774 t.lock.RLock() 1775 defer t.lock.RUnlock() 1776 1777 return len(t.remotes) 1778 } 1779 1780 // Slots returns the current number of slots used in the lookup. 1781 func (t *txLookup) Slots() int { 1782 t.lock.RLock() 1783 defer t.lock.RUnlock() 1784 1785 return t.slots 1786 } 1787 1788 // Add adds a transaction to the lookup. 1789 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1790 t.lock.Lock() 1791 defer t.lock.Unlock() 1792 1793 t.slots += numSlots(tx) 1794 slotsGauge.Update(int64(t.slots)) 1795 1796 if local { 1797 t.locals[tx.Hash()] = tx 1798 } else { 1799 t.remotes[tx.Hash()] = tx 1800 } 1801 } 1802 1803 // Remove removes a transaction from the lookup. 1804 func (t *txLookup) Remove(hash common.Hash) { 1805 t.lock.Lock() 1806 defer t.lock.Unlock() 1807 1808 tx, ok := t.locals[hash] 1809 if !ok { 1810 tx, ok = t.remotes[hash] 1811 } 1812 if !ok { 1813 log.Error("No transaction found to be deleted", "hash", hash) 1814 return 1815 } 1816 t.slots -= numSlots(tx) 1817 slotsGauge.Update(int64(t.slots)) 1818 1819 delete(t.locals, hash) 1820 delete(t.remotes, hash) 1821 } 1822 1823 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1824 // set. The assumption is held the locals set is thread-safe to be used. 1825 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1826 t.lock.Lock() 1827 defer t.lock.Unlock() 1828 1829 var migrated int 1830 for hash, tx := range t.remotes { 1831 if locals.containsTx(tx) { 1832 t.locals[hash] = tx 1833 delete(t.remotes, hash) 1834 migrated += 1 1835 } 1836 } 1837 return migrated 1838 } 1839 1840 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1841 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1842 found := make(types.Transactions, 0, 128) 1843 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1844 if tx.GasTipCapIntCmp(threshold) < 0 { 1845 found = append(found, tx) 1846 } 1847 return true 1848 }, false, true) // Only iterate remotes 1849 return found 1850 } 1851 1852 // numSlots calculates the number of slots needed for a single transaction. 1853 func numSlots(tx *types.Transaction) int { 1854 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1855 }