github.com/DxChainNetwork/dxc@v0.8.1-0.20220824085222-1162e304b6e7/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "time" 26 27 "github.com/DxChainNetwork/dxc/common" 28 "github.com/DxChainNetwork/dxc/common/prque" 29 "github.com/DxChainNetwork/dxc/consensus/misc" 30 "github.com/DxChainNetwork/dxc/core/state" 31 "github.com/DxChainNetwork/dxc/core/types" 32 "github.com/DxChainNetwork/dxc/event" 33 "github.com/DxChainNetwork/dxc/log" 34 "github.com/DxChainNetwork/dxc/metrics" 35 "github.com/DxChainNetwork/dxc/params" 36 ) 37 38 const ( 39 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 40 chainHeadChanSize = 10 41 42 // txSlotSize is used to calculate how many data slots a single transaction 43 // takes up based on its size. The slots are used as DoS protection, ensuring 44 // that validating a new transaction remains a constant operation (in reality 45 // O(maxslots), where max slots are 4 currently). 46 txSlotSize = 32 * 1024 47 48 // txMaxSize is the maximum size a single transaction can have. This field has 49 // non-trivial consequences: larger transactions are significantly harder and 50 // more expensive to propagate; larger transactions also take more resources 51 // to validate whether they fit into the pool or not. 52 txMaxSize = 4 * txSlotSize // 128KB 53 ) 54 55 var ( 56 // ErrAlreadyKnown is returned if the transactions is already contained 57 // within the pool. 58 ErrAlreadyKnown = errors.New("already known") 59 60 // ErrInvalidSender is returned if the transaction contains an invalid signature. 61 ErrInvalidSender = errors.New("invalid sender") 62 63 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 64 // configured for the transaction pool. 65 ErrUnderpriced = errors.New("transaction underpriced") 66 67 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 68 // another remote transaction. 69 ErrTxPoolOverflow = errors.New("txpool is full") 70 71 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 72 // with a different one without the required price bump. 73 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 74 75 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 76 // maximum allowance of the current block. 77 ErrGasLimit = errors.New("exceeds block gas limit") 78 79 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 80 // transaction with a negative value. 81 ErrNegativeValue = errors.New("negative value") 82 83 // ErrOversizedData is returned if the input data of a transaction is greater 84 // than some meaningful limit a user might use. This is not a consensus error 85 // making the transaction invalid, rather a DOS protection. 86 ErrOversizedData = errors.New("oversized data") 87 ) 88 89 var ( 90 evictionInterval = time.Minute // Time interval to check for evictable transactions 91 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 92 ) 93 94 var ( 95 // Metrics for the pending pool 96 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 97 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 98 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 99 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 100 101 // Metrics for the queued pool 102 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 103 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 104 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 105 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 106 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 107 108 // General tx metrics 109 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 110 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 111 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 112 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 113 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 114 115 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 116 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 117 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 118 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 119 120 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 121 ) 122 123 // TxStatus is the current status of a transaction as seen by the pool. 124 type TxStatus uint 125 126 const ( 127 TxStatusUnknown TxStatus = iota 128 TxStatusQueued 129 TxStatusPending 130 TxStatusIncluded 131 ) 132 133 // blockChain provides the state of blockchain and current gas limit to do 134 // some pre checks in tx pool and event subscribers. 135 type blockChain interface { 136 CurrentBlock() *types.Block 137 GetBlock(hash common.Hash, number uint64) *types.Block 138 StateAt(root common.Hash) (*state.StateDB, error) 139 140 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 141 } 142 143 type exTxValidator interface { 144 ValidateTx(sender common.Address, tx *types.Transaction, header *types.Header, parentState *state.StateDB) error 145 } 146 147 // TxPoolConfig are the configuration parameters of the transaction pool. 148 type TxPoolConfig struct { 149 Locals []common.Address // Addresses that should be treated by default as local 150 NoLocals bool // Whether local transaction handling should be disabled 151 Journal string // Journal of local transactions to survive node restarts 152 Rejournal time.Duration // Time interval to regenerate the local transaction journal 153 154 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 155 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 156 157 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 158 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 159 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 160 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 161 162 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 163 164 JamConfig TxJamConfig 165 } 166 167 // DefaultTxPoolConfig contains the default configurations for the transaction 168 // pool. 169 var DefaultTxPoolConfig = TxPoolConfig{ 170 Journal: "transactions.rlp", 171 Rejournal: time.Hour, 172 173 PriceLimit: 1, 174 PriceBump: 10, 175 176 AccountSlots: 16, 177 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 178 AccountQueue: 64, 179 GlobalQueue: 1024, 180 181 Lifetime: 3 * time.Hour, 182 183 JamConfig: DefaultJamConfig, 184 } 185 186 // sanitize checks the provided user configurations and changes anything that's 187 // unreasonable or unworkable. 188 func (config *TxPoolConfig) sanitize() TxPoolConfig { 189 conf := *config 190 if conf.Rejournal < time.Second { 191 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 192 conf.Rejournal = time.Second 193 } 194 if conf.PriceLimit < 1 { 195 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 196 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 197 } 198 if conf.PriceBump < 1 { 199 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 200 conf.PriceBump = DefaultTxPoolConfig.PriceBump 201 } 202 if conf.AccountSlots < 1 { 203 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 204 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 205 } 206 if conf.GlobalSlots < 1 { 207 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 208 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 209 } 210 if conf.AccountQueue < 1 { 211 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 212 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 213 } 214 if conf.GlobalQueue < 1 { 215 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 216 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 217 } 218 if conf.Lifetime < 1 { 219 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 220 conf.Lifetime = DefaultTxPoolConfig.Lifetime 221 } 222 return conf 223 } 224 225 // TxPool contains all currently known transactions. Transactions 226 // enter the pool when they are received from the network or submitted 227 // locally. They exit the pool when they are included in the blockchain. 228 // 229 // The pool separates processable transactions (which can be applied to the 230 // current state) and future transactions. Transactions move between those 231 // two states over time as they are received and processed. 232 type TxPool struct { 233 config TxPoolConfig 234 chainconfig *params.ChainConfig 235 chain blockChain 236 gasPrice *big.Int 237 txFeed event.Feed 238 scope event.SubscriptionScope 239 signer types.Signer 240 mu sync.RWMutex 241 242 istanbul bool // Fork indicator whether we are in the istanbul stage. 243 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 244 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 245 246 currentState *state.StateDB // Current state in the blockchain head 247 pendingNonces *txNoncer // Pending state tracking virtual nonces 248 currentMaxGas uint64 // Current gas limit for transaction caps 249 250 locals *accountSet // Set of local transaction to exempt from eviction rules 251 journal *txJournal // Journal of local transaction to back up to disk 252 253 pending map[common.Address]*txList // All currently processable transactions 254 queue map[common.Address]*txList // Queued but non-processable transactions 255 beats map[common.Address]time.Time // Last heartbeat from each known account 256 all *txLookup // All transactions to allow lookups 257 priced *txPricedList // All transactions sorted by price 258 259 jamIndexer *txJamIndexer // tx jam indexer 260 261 txValidator exTxValidator // A specific consensus can use this to do some extra validation to a transaction 262 nextFakeHeader *types.Header // A fake header of next block for extra transaction validation 263 // disableExValidate will disable the extra tx validation during a period if it's true, 264 // there's a special case we need this: 265 // during a large chain insertion, the ChainHeadEvent will not be fired in time, then some old trie-nodes 266 // will be discarded due to GC, and it will cause failure to get blacklist. 267 disableExValidate bool 268 269 chainHeadCh chan ChainHeadEvent 270 chainHeadSub event.Subscription 271 reqResetCh chan *txpoolResetRequest 272 reqPromoteCh chan *accountSet 273 queueTxEventCh chan *types.Transaction 274 reorgDoneCh chan chan struct{} 275 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 276 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 277 } 278 279 type txpoolResetRequest struct { 280 oldHead, newHead *types.Header 281 } 282 283 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 284 // transactions from the network. 285 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 286 // Sanitize the input to ensure no vulnerable gas prices are set 287 config = (&config).sanitize() 288 289 // Create the transaction pool with its initial settings 290 pool := &TxPool{ 291 config: config, 292 chainconfig: chainconfig, 293 chain: chain, 294 signer: types.LatestSigner(chainconfig), 295 pending: make(map[common.Address]*txList), 296 queue: make(map[common.Address]*txList), 297 beats: make(map[common.Address]time.Time), 298 all: newTxLookup(), 299 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 300 reqResetCh: make(chan *txpoolResetRequest), 301 reqPromoteCh: make(chan *accountSet), 302 queueTxEventCh: make(chan *types.Transaction), 303 reorgDoneCh: make(chan chan struct{}), 304 reorgShutdownCh: make(chan struct{}), 305 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 306 } 307 pool.jamIndexer = newTxJamIndexer(config.JamConfig, pool) 308 pool.locals = newAccountSet(pool.signer) 309 for _, addr := range config.Locals { 310 log.Info("Setting new local account", "address", addr) 311 pool.locals.add(addr) 312 } 313 pool.priced = newTxPricedList(pool.all) 314 pool.reset(nil, chain.CurrentBlock().Header()) 315 316 // Start the reorg loop early so it can handle requests generated during journal loading. 317 pool.wg.Add(1) 318 go pool.scheduleReorgLoop() 319 320 // If local transactions and journaling is enabled, load from disk 321 if !config.NoLocals && config.Journal != "" { 322 pool.journal = newTxJournal(config.Journal) 323 324 if err := pool.journal.load(pool.AddLocals); err != nil { 325 log.Warn("Failed to load transaction journal", "err", err) 326 } 327 if err := pool.journal.rotate(pool.local()); err != nil { 328 log.Warn("Failed to rotate transaction journal", "err", err) 329 } 330 } 331 332 // Subscribe events from blockchain and start the main event loop. 333 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 334 pool.wg.Add(1) 335 go pool.loop() 336 337 return pool 338 } 339 340 // InitExTxValidator sets the extra validator 341 func (pool *TxPool) InitExTxValidator(v exTxValidator) { 342 pool.makeFakeHeader(pool.chain.CurrentBlock().Header()) 343 pool.txValidator = v 344 } 345 346 // loop is the transaction pool's main event loop, waiting for and reacting to 347 // outside blockchain events as well as for various reporting and transaction 348 // eviction events. 349 func (pool *TxPool) loop() { 350 defer pool.wg.Done() 351 352 var ( 353 prevPending, prevQueued, prevStales int 354 // Start the stats reporting and transaction eviction tickers 355 report = time.NewTicker(statsReportInterval) 356 evict = time.NewTicker(evictionInterval) 357 journal = time.NewTicker(pool.config.Rejournal) 358 // Track the previous head headers for transaction reorgs 359 head = pool.chain.CurrentBlock() 360 ) 361 defer report.Stop() 362 defer evict.Stop() 363 defer journal.Stop() 364 365 for { 366 select { 367 // Handle ChainHeadEvent 368 case ev := <-pool.chainHeadCh: 369 if ev.Block != nil { 370 pool.requestReset(head.Header(), ev.Block.Header()) 371 head = ev.Block 372 pool.jamIndexer.UpdateHeader(head.Header()) 373 } 374 375 // System shutdown. 376 case <-pool.chainHeadSub.Err(): 377 close(pool.reorgShutdownCh) 378 return 379 380 // Handle stats reporting ticks 381 case <-report.C: 382 pool.mu.RLock() 383 pending, queued := pool.stats() 384 stales := pool.priced.stales 385 pool.mu.RUnlock() 386 387 if pending != prevPending || queued != prevQueued || stales != prevStales { 388 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 389 prevPending, prevQueued, prevStales = pending, queued, stales 390 } 391 392 // Handle inactive account transaction eviction 393 case <-evict.C: 394 pool.mu.Lock() 395 for addr := range pool.queue { 396 // Skip local transactions from the eviction mechanism 397 if pool.locals.contains(addr) { 398 continue 399 } 400 // Any non-locals old enough should be removed 401 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 402 list := pool.queue[addr].Flatten() 403 for _, tx := range list { 404 pool.removeTx(tx.Hash(), true) 405 } 406 queuedEvictionMeter.Mark(int64(len(list))) 407 } 408 } 409 pool.mu.Unlock() 410 411 // Handle local transaction journal rotation 412 case <-journal.C: 413 if pool.journal != nil { 414 pool.mu.Lock() 415 if err := pool.journal.rotate(pool.local()); err != nil { 416 log.Warn("Failed to rotate local tx journal", "err", err) 417 } 418 pool.mu.Unlock() 419 } 420 } 421 } 422 } 423 424 // Stop terminates the transaction pool. 425 func (pool *TxPool) Stop() { 426 // Unsubscribe all subscriptions registered from txpool 427 pool.scope.Close() 428 429 // Unsubscribe subscriptions registered from blockchain 430 pool.chainHeadSub.Unsubscribe() 431 pool.wg.Wait() 432 433 pool.jamIndexer.Stop() 434 435 if pool.journal != nil { 436 pool.journal.close() 437 } 438 log.Info("Transaction pool stopped") 439 } 440 441 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 442 // starts sending event to the given channel. 443 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 444 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 445 } 446 447 // GasPrice returns the current gas price enforced by the transaction pool. 448 func (pool *TxPool) GasPrice() *big.Int { 449 pool.mu.RLock() 450 defer pool.mu.RUnlock() 451 452 return new(big.Int).Set(pool.gasPrice) 453 } 454 455 // SetGasPrice updates the minimum price required by the transaction pool for a 456 // new transaction, and drops all transactions below this threshold. 457 func (pool *TxPool) SetGasPrice(price *big.Int) { 458 pool.mu.Lock() 459 defer pool.mu.Unlock() 460 461 old := pool.gasPrice 462 pool.gasPrice = price 463 // if the min miner fee increased, remove transactions below the new threshold 464 if price.Cmp(old) > 0 { 465 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 466 drop := pool.all.RemotesBelowTip(price) 467 for _, tx := range drop { 468 pool.removeTx(tx.Hash(), false) 469 } 470 pool.priced.Removed(len(drop)) 471 } 472 473 log.Info("Transaction pool price threshold updated", "price", price) 474 } 475 476 // Nonce returns the next nonce of an account, with all transactions executable 477 // by the pool already applied on top. 478 func (pool *TxPool) Nonce(addr common.Address) uint64 { 479 pool.mu.RLock() 480 defer pool.mu.RUnlock() 481 482 return pool.pendingNonces.get(addr) 483 } 484 485 // Stats retrieves the current pool stats, namely the number of pending and the 486 // number of queued (non-executable) transactions. 487 func (pool *TxPool) Stats() (int, int) { 488 pool.mu.RLock() 489 defer pool.mu.RUnlock() 490 491 return pool.stats() 492 } 493 494 // stats retrieves the current pool stats, namely the number of pending and the 495 // number of queued (non-executable) transactions. 496 func (pool *TxPool) stats() (int, int) { 497 pending := 0 498 for _, list := range pool.pending { 499 pending += list.Len() 500 } 501 queued := 0 502 for _, list := range pool.queue { 503 queued += list.Len() 504 } 505 return pending, queued 506 } 507 508 // Content retrieves the data content of the transaction pool, returning all the 509 // pending as well as queued transactions, grouped by account and sorted by nonce. 510 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 511 pool.mu.Lock() 512 defer pool.mu.Unlock() 513 514 pending := make(map[common.Address]types.Transactions) 515 for addr, list := range pool.pending { 516 pending[addr] = list.Flatten() 517 } 518 queued := make(map[common.Address]types.Transactions) 519 for addr, list := range pool.queue { 520 queued[addr] = list.Flatten() 521 } 522 return pending, queued 523 } 524 525 // ContentFrom retrieves the data content of the transaction pool, returning the 526 // pending as well as queued transactions of this address, grouped by nonce. 527 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 528 pool.mu.RLock() 529 defer pool.mu.RUnlock() 530 531 var pending types.Transactions 532 if list, ok := pool.pending[addr]; ok { 533 pending = list.Flatten() 534 } 535 var queued types.Transactions 536 if list, ok := pool.queue[addr]; ok { 537 queued = list.Flatten() 538 } 539 return pending, queued 540 } 541 542 // Pending retrieves all currently processable transactions, grouped by origin 543 // account and sorted by nonce. The returned transaction set is a copy and can be 544 // freely modified by calling code. 545 // 546 // The enforceTips parameter can be used to do an extra filtering on the pending 547 // transactions and only return those whose **effective** tip is large enough in 548 // the next pending execution environment. 549 func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transactions, error) { 550 pool.mu.Lock() 551 defer pool.mu.Unlock() 552 553 pending := make(map[common.Address]types.Transactions) 554 for addr, list := range pool.pending { 555 txs := list.Flatten() 556 557 // If the miner requests tip enforcement, cap the lists now 558 if enforceTips && !pool.locals.contains(addr) { 559 for i, tx := range txs { 560 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 561 txs = txs[:i] 562 break 563 } 564 } 565 } 566 if len(txs) > 0 { 567 pending[addr] = txs 568 } 569 } 570 return pending, nil 571 } 572 573 // Locals retrieves the accounts currently considered local by the pool. 574 func (pool *TxPool) Locals() []common.Address { 575 pool.mu.Lock() 576 defer pool.mu.Unlock() 577 578 return pool.locals.flatten() 579 } 580 581 // JamIndex returns the jam index which is evaluated by current pending transactions. 582 func (pool *TxPool) JamIndex() int { 583 return pool.jamIndexer.JamIndex() 584 } 585 586 // local retrieves all currently known local transactions, grouped by origin 587 // account and sorted by nonce. The returned transaction set is a copy and can be 588 // freely modified by calling code. 589 func (pool *TxPool) local() map[common.Address]types.Transactions { 590 txs := make(map[common.Address]types.Transactions) 591 for addr := range pool.locals.accounts { 592 if pending := pool.pending[addr]; pending != nil { 593 txs[addr] = append(txs[addr], pending.Flatten()...) 594 } 595 if queued := pool.queue[addr]; queued != nil { 596 txs[addr] = append(txs[addr], queued.Flatten()...) 597 } 598 } 599 return txs 600 } 601 602 // validateTx checks whether a transaction is valid according to the consensus 603 // rules and adheres to some heuristic limits of the local node (price and size). 604 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 605 // Accept only legacy transactions until EIP-2718/2930 activates. 606 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 607 return ErrTxTypeNotSupported 608 } 609 // Reject dynamic fee transactions until EIP-1559 activates. 610 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 611 return ErrTxTypeNotSupported 612 } 613 // Reject transactions over defined size to prevent DOS attacks 614 if uint64(tx.Size()) > txMaxSize { 615 return ErrOversizedData 616 } 617 // Transactions can't be negative. This may never happen using RLP decoded 618 // transactions but may occur if you create a transaction using the RPC. 619 if tx.Value().Sign() < 0 { 620 return ErrNegativeValue 621 } 622 // Ensure the transaction doesn't exceed the current block limit gas. 623 if pool.currentMaxGas < tx.Gas() { 624 return ErrGasLimit 625 } 626 // Sanity check for extremely large numbers 627 if tx.GasFeeCap().BitLen() > 256 { 628 return ErrFeeCapVeryHigh 629 } 630 if tx.GasTipCap().BitLen() > 256 { 631 return ErrTipVeryHigh 632 } 633 // Ensure gasFeeCap is greater than or equal to gasTipCap. 634 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 635 return ErrTipAboveFeeCap 636 } 637 // Make sure the transaction is signed properly. 638 from, err := types.Sender(pool.signer, tx) 639 if err != nil { 640 return ErrInvalidSender 641 } 642 // Drop non-local transactions under our own minimal accepted gas price or tip 643 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 644 return ErrUnderpriced 645 } 646 // Ensure the transaction adheres to nonce ordering 647 if pool.currentState.GetNonce(from) > tx.Nonce() { 648 return ErrNonceTooLow 649 } 650 // Transactor should have enough funds to cover the costs 651 // cost == V + GP * GL 652 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 653 return ErrInsufficientFunds 654 } 655 // Ensure the transaction has more gas than the basic tx fee. 656 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 657 if err != nil { 658 return err 659 } 660 if tx.Gas() < intrGas { 661 return ErrIntrinsicGas 662 } 663 664 // do some extra validation if needed 665 if pool.txValidator != nil && !pool.disableExValidate { 666 err := pool.txValidator.ValidateTx(from, tx, pool.nextFakeHeader, pool.currentState) 667 if err == types.ErrAddressDenied { 668 return err 669 } 670 if err != nil { 671 log.Info("ValidateTx error", "err", err) 672 pool.disableExValidate = true 673 } 674 } 675 return nil 676 } 677 678 // add validates a transaction and inserts it into the non-executable queue for later 679 // pending promotion and execution. If the transaction is a replacement for an already 680 // pending or queued one, it overwrites the previous transaction if its price is higher. 681 // 682 // If a newly added transaction is marked as local, its sending account will be 683 // be added to the allowlist, preventing any associated transaction from being dropped 684 // out of the pool due to pricing constraints. 685 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 686 // If the transaction is already known, discard it 687 hash := tx.Hash() 688 if pool.all.Get(hash) != nil { 689 log.Trace("Discarding already known transaction", "hash", hash) 690 knownTxMeter.Mark(1) 691 return false, ErrAlreadyKnown 692 } 693 // Make the local flag. If it's from local source or it's from the network but 694 // the sender is marked as local previously, treat it as the local transaction. 695 isLocal := local || pool.locals.containsTx(tx) 696 697 // If the transaction fails basic validation, discard it 698 if err := pool.validateTx(tx, isLocal); err != nil { 699 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 700 invalidTxMeter.Mark(1) 701 return false, err 702 } 703 // If the transaction pool is full, discard underpriced transactions 704 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 705 // If the new transaction is underpriced, don't accept it 706 if !isLocal && pool.priced.Underpriced(tx) { 707 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 708 underpricedTxMeter.Mark(1) 709 pool.jamIndexer.UnderPricedInc() 710 return false, ErrUnderpriced 711 } 712 // New transaction is better than our worse ones, make room for it. 713 // If it's a local transaction, forcibly discard all available transactions. 714 // Otherwise if we can't make enough room for new one, abort the operation. 715 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 716 717 // Special case, we still can't make the room for the new remote one. 718 if !isLocal && !success { 719 log.Trace("Discarding overflown transaction", "hash", hash) 720 overflowedTxMeter.Mark(1) 721 return false, ErrTxPoolOverflow 722 } 723 // Kick out the underpriced remote transactions. 724 for _, tx := range drop { 725 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 726 underpricedTxMeter.Mark(1) 727 pool.jamIndexer.UnderPricedInc() 728 pool.removeTx(tx.Hash(), false) 729 } 730 } 731 // Try to replace an existing transaction in the pending pool 732 from, _ := types.Sender(pool.signer, tx) // already validated 733 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 734 // Nonce already pending, check if required price bump is met 735 inserted, old := list.Add(tx, pool.config.PriceBump) 736 if !inserted { 737 pendingDiscardMeter.Mark(1) 738 return false, ErrReplaceUnderpriced 739 } 740 // New transaction is better, replace old one 741 if old != nil { 742 pool.all.Remove(old.Hash()) 743 pool.priced.Removed(1) 744 pendingReplaceMeter.Mark(1) 745 } 746 pool.all.Add(tx, isLocal) 747 pool.priced.Put(tx, isLocal) 748 pool.journalTx(from, tx) 749 pool.queueTxEvent(tx) 750 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 751 752 // Successful promotion, bump the heartbeat 753 pool.beats[from] = time.Now() 754 return old != nil, nil 755 } 756 // New transaction isn't replacing a pending one, push into queue 757 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 758 if err != nil { 759 return false, err 760 } 761 // Mark local addresses and journal local transactions 762 if local && !pool.locals.contains(from) { 763 log.Info("Setting new local account", "address", from) 764 pool.locals.add(from) 765 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 766 } 767 if isLocal { 768 localGauge.Inc(1) 769 } 770 pool.journalTx(from, tx) 771 772 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 773 return replaced, nil 774 } 775 776 // enqueueTx inserts a new transaction into the non-executable transaction queue. 777 // 778 // Note, this method assumes the pool lock is held! 779 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 780 // Try to insert the transaction into the future queue 781 from, _ := types.Sender(pool.signer, tx) // already validated 782 if pool.queue[from] == nil { 783 pool.queue[from] = newTxList(false) 784 } 785 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 786 if !inserted { 787 // An older transaction was better, discard this 788 queuedDiscardMeter.Mark(1) 789 return false, ErrReplaceUnderpriced 790 } 791 // Discard any previous transaction and mark this 792 if old != nil { 793 pool.all.Remove(old.Hash()) 794 pool.priced.Removed(1) 795 queuedReplaceMeter.Mark(1) 796 } else { 797 // Nothing was replaced, bump the queued counter 798 queuedGauge.Inc(1) 799 } 800 // If the transaction isn't in lookup set but it's expected to be there, 801 // show the error log. 802 if pool.all.Get(hash) == nil && !addAll { 803 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 804 } 805 if addAll { 806 pool.all.Add(tx, local) 807 pool.priced.Put(tx, local) 808 } 809 // If we never record the heartbeat, do it right now. 810 if _, exist := pool.beats[from]; !exist { 811 pool.beats[from] = time.Now() 812 } 813 return old != nil, nil 814 } 815 816 // journalTx adds the specified transaction to the local disk journal if it is 817 // deemed to have been sent from a local account. 818 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 819 // Only journal if it's enabled and the transaction is local 820 if pool.journal == nil || !pool.locals.contains(from) { 821 return 822 } 823 if err := pool.journal.insert(tx); err != nil { 824 log.Warn("Failed to journal local transaction", "err", err) 825 } 826 } 827 828 // promoteTx adds a transaction to the pending (processable) list of transactions 829 // and returns whether it was inserted or an older was better. 830 // 831 // Note, this method assumes the pool lock is held! 832 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 833 // Try to insert the transaction into the pending queue 834 if pool.pending[addr] == nil { 835 pool.pending[addr] = newTxList(true) 836 } 837 list := pool.pending[addr] 838 839 inserted, old := list.Add(tx, pool.config.PriceBump) 840 if !inserted { 841 // An older transaction was better, discard this 842 pool.all.Remove(hash) 843 pool.priced.Removed(1) 844 pendingDiscardMeter.Mark(1) 845 return false 846 } 847 // Otherwise discard any previous transaction and mark this 848 if old != nil { 849 pool.all.Remove(old.Hash()) 850 pool.priced.Removed(1) 851 pendingReplaceMeter.Mark(1) 852 } else { 853 // Nothing was replaced, bump the pending counter 854 pendingGauge.Inc(1) 855 } 856 // Set the potentially new pending nonce and notify any subsystems of the new tx 857 pool.pendingNonces.set(addr, tx.Nonce()+1) 858 859 // Successful promotion, bump the heartbeat 860 pool.beats[addr] = time.Now() 861 return true 862 } 863 864 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 865 // senders as a local ones, ensuring they go around the local pricing constraints. 866 // 867 // This method is used to add transactions from the RPC API and performs synchronous pool 868 // reorganization and event propagation. 869 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 870 return pool.addTxs(txs, !pool.config.NoLocals, true) 871 } 872 873 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 874 // a convenience wrapper aroundd AddLocals. 875 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 876 errs := pool.AddLocals([]*types.Transaction{tx}) 877 return errs[0] 878 } 879 880 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 881 // senders are not among the locally tracked ones, full pricing constraints will apply. 882 // 883 // This method is used to add transactions from the p2p network and does not wait for pool 884 // reorganization and internal event propagation. 885 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 886 return pool.addTxs(txs, false, false) 887 } 888 889 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 890 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 891 return pool.addTxs(txs, false, true) 892 } 893 894 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 895 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 896 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 897 return errs[0] 898 } 899 900 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 901 // wrapper around AddRemotes. 902 // 903 // Deprecated: use AddRemotes 904 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 905 errs := pool.AddRemotes([]*types.Transaction{tx}) 906 return errs[0] 907 } 908 909 // addTxs attempts to queue a batch of transactions if they are valid. 910 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 911 // Filter out known ones without obtaining the pool lock or recovering signatures 912 var ( 913 errs = make([]error, len(txs)) 914 news = make([]*types.Transaction, 0, len(txs)) 915 ) 916 for i, tx := range txs { 917 // If the transaction is known, pre-set the error slot 918 if pool.all.Get(tx.Hash()) != nil { 919 errs[i] = ErrAlreadyKnown 920 knownTxMeter.Mark(1) 921 continue 922 } 923 // Exclude transactions with invalid signatures as soon as 924 // possible and cache senders in transactions before 925 // obtaining lock 926 _, err := types.Sender(pool.signer, tx) 927 if err != nil { 928 errs[i] = ErrInvalidSender 929 invalidTxMeter.Mark(1) 930 continue 931 } 932 // Accumulate all unknown transactions for deeper processing 933 news = append(news, tx) 934 } 935 if len(news) == 0 { 936 return errs 937 } 938 939 // Process all the new transaction and merge any errors into the original slice 940 pool.mu.Lock() 941 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 942 pool.mu.Unlock() 943 944 var nilSlot = 0 945 for _, err := range newErrs { 946 for errs[nilSlot] != nil { 947 nilSlot++ 948 } 949 errs[nilSlot] = err 950 nilSlot++ 951 } 952 // Reorg the pool internals if needed and return 953 done := pool.requestPromoteExecutables(dirtyAddrs) 954 if sync { 955 <-done 956 } 957 return errs 958 } 959 960 // addTxsLocked attempts to queue a batch of transactions if they are valid. 961 // The transaction pool lock must be held. 962 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 963 dirty := newAccountSet(pool.signer) 964 errs := make([]error, len(txs)) 965 for i, tx := range txs { 966 replaced, err := pool.add(tx, local) 967 errs[i] = err 968 if err == nil && !replaced { 969 dirty.addTx(tx) 970 } 971 } 972 validTxMeter.Mark(int64(len(dirty.accounts))) 973 return errs, dirty 974 } 975 976 // Status returns the status (unknown/pending/queued) of a batch of transactions 977 // identified by their hashes. 978 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 979 status := make([]TxStatus, len(hashes)) 980 for i, hash := range hashes { 981 tx := pool.Get(hash) 982 if tx == nil { 983 continue 984 } 985 from, _ := types.Sender(pool.signer, tx) // already validated 986 pool.mu.RLock() 987 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 988 status[i] = TxStatusPending 989 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 990 status[i] = TxStatusQueued 991 } 992 // implicit else: the tx may have been included into a block between 993 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 994 pool.mu.RUnlock() 995 } 996 return status 997 } 998 999 // Get returns a transaction if it is contained in the pool and nil otherwise. 1000 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1001 return pool.all.Get(hash) 1002 } 1003 1004 // Has returns an indicator whether txpool has a transaction cached with the 1005 // given hash. 1006 func (pool *TxPool) Has(hash common.Hash) bool { 1007 return pool.all.Get(hash) != nil 1008 } 1009 1010 // removeTx removes a single transaction from the queue, moving all subsequent 1011 // transactions back to the future queue. 1012 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1013 // Fetch the transaction we wish to delete 1014 tx := pool.all.Get(hash) 1015 if tx == nil { 1016 return 1017 } 1018 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1019 1020 // Remove it from the list of known transactions 1021 pool.all.Remove(hash) 1022 if outofbound { 1023 pool.priced.Removed(1) 1024 } 1025 if pool.locals.contains(addr) { 1026 localGauge.Dec(1) 1027 } 1028 // Remove the transaction from the pending lists and reset the account nonce 1029 if pending := pool.pending[addr]; pending != nil { 1030 if removed, invalids := pending.Remove(tx); removed { 1031 // If no more pending transactions are left, remove the list 1032 if pending.Empty() { 1033 delete(pool.pending, addr) 1034 } 1035 // Postpone any invalidated transactions 1036 for _, tx := range invalids { 1037 // Internal shuffle shouldn't touch the lookup set. 1038 pool.enqueueTx(tx.Hash(), tx, false, false) 1039 } 1040 // Update the account nonce if needed 1041 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1042 // Reduce the pending counter 1043 pendingGauge.Dec(int64(1 + len(invalids))) 1044 return 1045 } 1046 } 1047 // Transaction is in the future queue 1048 if future := pool.queue[addr]; future != nil { 1049 if removed, _ := future.Remove(tx); removed { 1050 // Reduce the queued counter 1051 queuedGauge.Dec(1) 1052 } 1053 if future.Empty() { 1054 delete(pool.queue, addr) 1055 delete(pool.beats, addr) 1056 } 1057 } 1058 } 1059 1060 // requestReset requests a pool reset to the new head block. 1061 // The returned channel is closed when the reset has occurred. 1062 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1063 select { 1064 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1065 return <-pool.reorgDoneCh 1066 case <-pool.reorgShutdownCh: 1067 return pool.reorgShutdownCh 1068 } 1069 } 1070 1071 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1072 // The returned channel is closed when the promotion checks have occurred. 1073 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1074 select { 1075 case pool.reqPromoteCh <- set: 1076 return <-pool.reorgDoneCh 1077 case <-pool.reorgShutdownCh: 1078 return pool.reorgShutdownCh 1079 } 1080 } 1081 1082 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1083 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1084 select { 1085 case pool.queueTxEventCh <- tx: 1086 case <-pool.reorgShutdownCh: 1087 } 1088 } 1089 1090 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1091 // call those methods directly, but request them being run using requestReset and 1092 // requestPromoteExecutables instead. 1093 func (pool *TxPool) scheduleReorgLoop() { 1094 defer pool.wg.Done() 1095 1096 var ( 1097 curDone chan struct{} // non-nil while runReorg is active 1098 nextDone = make(chan struct{}) 1099 launchNextRun bool 1100 reset *txpoolResetRequest 1101 dirtyAccounts *accountSet 1102 queuedEvents = make(map[common.Address]*txSortedMap) 1103 ) 1104 for { 1105 // Launch next background reorg if needed 1106 if curDone == nil && launchNextRun { 1107 // Run the background reorg and announcements 1108 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1109 1110 // Prepare everything for the next round of reorg 1111 curDone, nextDone = nextDone, make(chan struct{}) 1112 launchNextRun = false 1113 1114 reset, dirtyAccounts = nil, nil 1115 queuedEvents = make(map[common.Address]*txSortedMap) 1116 } 1117 1118 select { 1119 case req := <-pool.reqResetCh: 1120 // Reset request: update head if request is already pending. 1121 if reset == nil { 1122 reset = req 1123 } else { 1124 reset.newHead = req.newHead 1125 } 1126 launchNextRun = true 1127 pool.reorgDoneCh <- nextDone 1128 1129 case req := <-pool.reqPromoteCh: 1130 // Promote request: update address set if request is already pending. 1131 if dirtyAccounts == nil { 1132 dirtyAccounts = req 1133 } else { 1134 dirtyAccounts.merge(req) 1135 } 1136 launchNextRun = true 1137 pool.reorgDoneCh <- nextDone 1138 1139 case tx := <-pool.queueTxEventCh: 1140 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1141 // request one later if they want the events sent. 1142 addr, _ := types.Sender(pool.signer, tx) 1143 if _, ok := queuedEvents[addr]; !ok { 1144 queuedEvents[addr] = newTxSortedMap() 1145 } 1146 queuedEvents[addr].Put(tx) 1147 1148 case <-curDone: 1149 curDone = nil 1150 1151 case <-pool.reorgShutdownCh: 1152 // Wait for current run to finish. 1153 if curDone != nil { 1154 <-curDone 1155 } 1156 close(nextDone) 1157 return 1158 } 1159 } 1160 } 1161 1162 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1163 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1164 defer close(done) 1165 1166 var promoteAddrs []common.Address 1167 if dirtyAccounts != nil && reset == nil { 1168 // Only dirty accounts need to be promoted, unless we're resetting. 1169 // For resets, all addresses in the tx queue will be promoted and 1170 // the flatten operation can be avoided. 1171 promoteAddrs = dirtyAccounts.flatten() 1172 } 1173 pool.mu.Lock() 1174 if reset != nil { 1175 // Reset from the old head to the new, rescheduling any reorged transactions 1176 pool.reset(reset.oldHead, reset.newHead) 1177 1178 // Nonces were reset, discard any events that became stale 1179 for addr := range events { 1180 events[addr].Forward(pool.pendingNonces.get(addr)) 1181 if events[addr].Len() == 0 { 1182 delete(events, addr) 1183 } 1184 } 1185 // Reset needs promote for all addresses 1186 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1187 for addr := range pool.queue { 1188 promoteAddrs = append(promoteAddrs, addr) 1189 } 1190 } 1191 // Check for pending transactions for every account that sent new ones 1192 promoted := pool.promoteExecutables(promoteAddrs) 1193 1194 // If a new block appeared, validate the pool of pending transactions. This will 1195 // remove any transaction that has been included in the block or was invalidated 1196 // because of another transaction (e.g. higher gas price). 1197 if reset != nil { 1198 pool.demoteUnexecutables() 1199 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1200 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1201 pool.priced.SetBaseFee(pendingBaseFee) 1202 } 1203 } 1204 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1205 pool.truncatePending() 1206 pool.truncateQueue() 1207 1208 // Update all accounts to the latest known pending nonce 1209 for addr, list := range pool.pending { 1210 highestPending := list.LastElement() 1211 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1212 } 1213 pool.mu.Unlock() 1214 1215 // Notify subsystems for newly added transactions 1216 for _, tx := range promoted { 1217 addr, _ := types.Sender(pool.signer, tx) 1218 if _, ok := events[addr]; !ok { 1219 events[addr] = newTxSortedMap() 1220 } 1221 events[addr].Put(tx) 1222 } 1223 if len(events) > 0 { 1224 var txs []*types.Transaction 1225 for _, set := range events { 1226 txs = append(txs, set.Flatten()...) 1227 } 1228 pool.txFeed.Send(NewTxsEvent{txs}) 1229 } 1230 } 1231 1232 // reset retrieves the current state of the blockchain and ensures the content 1233 // of the transaction pool is valid with regard to the chain state. 1234 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1235 // If we're reorging an old state, reinject all dropped transactions 1236 var reinject types.Transactions 1237 1238 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1239 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1240 oldNum := oldHead.Number.Uint64() 1241 newNum := newHead.Number.Uint64() 1242 1243 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1244 log.Debug("Skipping deep transaction reorg", "depth", depth) 1245 } else { 1246 // Reorg seems shallow enough to pull in all transactions into memory 1247 var discarded, included types.Transactions 1248 var ( 1249 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1250 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1251 ) 1252 if rem == nil { 1253 // This can happen if a setHead is performed, where we simply discard the old 1254 // head from the chain. 1255 // If that is the case, we don't have the lost transactions any more, and 1256 // there's nothing to add 1257 if newNum >= oldNum { 1258 // If we reorged to a same or higher number, then it's not a case of setHead 1259 log.Warn("Transaction pool reset with missing oldhead", 1260 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1261 return 1262 } 1263 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1264 log.Debug("Skipping transaction reset caused by setHead", 1265 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1266 // We still need to update the current state s.th. the lost transactions can be readded by the user 1267 } else { 1268 for rem.NumberU64() > add.NumberU64() { 1269 discarded = append(discarded, rem.Transactions()...) 1270 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1271 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1272 return 1273 } 1274 } 1275 for add.NumberU64() > rem.NumberU64() { 1276 included = append(included, add.Transactions()...) 1277 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1278 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1279 return 1280 } 1281 } 1282 for rem.Hash() != add.Hash() { 1283 discarded = append(discarded, rem.Transactions()...) 1284 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1285 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1286 return 1287 } 1288 included = append(included, add.Transactions()...) 1289 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1290 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1291 return 1292 } 1293 } 1294 reinject = types.TxDifference(discarded, included) 1295 } 1296 } 1297 } 1298 // Initialize the internal state to the current head 1299 if newHead == nil { 1300 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1301 } 1302 statedb, err := pool.chain.StateAt(newHead.Root) 1303 if err != nil { 1304 log.Error("Failed to reset txpool state", "err", err) 1305 return 1306 } 1307 pool.currentState = statedb 1308 pool.pendingNonces = newTxNoncer(statedb) 1309 pool.currentMaxGas = newHead.GasLimit 1310 // Update fake next header if necessary 1311 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1312 if pool.txValidator != nil { 1313 pool.makeFakeHeader(newHead) 1314 pool.disableExValidate = false 1315 } 1316 1317 // Inject any transactions discarded due to reorgs 1318 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1319 senderCacher.recover(pool.signer, reinject) 1320 pool.addTxsLocked(reinject, false) 1321 1322 // Update all fork indicator by next pending block number. 1323 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1324 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1325 pool.eip1559 = pool.chainconfig.IsLondon(next) 1326 1327 } 1328 1329 func (pool *TxPool) makeFakeHeader(currHead *types.Header) { 1330 next := new(big.Int).Add(currHead.Number, big.NewInt(1)) 1331 pool.nextFakeHeader = &types.Header{ 1332 ParentHash: currHead.Hash(), 1333 Difficulty: new(big.Int).Set(currHead.Difficulty), 1334 Number: next, 1335 GasLimit: currHead.GasLimit, 1336 Time: currHead.Time + 1, 1337 } 1338 } 1339 1340 // promoteExecutables moves transactions that have become processable from the 1341 // future queue to the set of pending transactions. During this process, all 1342 // invalidated transactions (low nonce, low balance) are deleted. 1343 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1344 // Track the promoted transactions to broadcast them at once 1345 var promoted []*types.Transaction 1346 1347 // Iterate over all accounts and promote any executable transactions 1348 for _, addr := range accounts { 1349 list := pool.queue[addr] 1350 if list == nil { 1351 continue // Just in case someone calls with a non existing account 1352 } 1353 // Drop all transactions that are deemed too old (low nonce) 1354 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1355 for _, tx := range forwards { 1356 hash := tx.Hash() 1357 pool.all.Remove(hash) 1358 } 1359 log.Trace("Removed old queued transactions", "count", len(forwards)) 1360 // Drop all transactions that are too costly (low balance or out of gas) 1361 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1362 for _, tx := range drops { 1363 hash := tx.Hash() 1364 pool.all.Remove(hash) 1365 } 1366 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1367 queuedNofundsMeter.Mark(int64(len(drops))) 1368 1369 // Gather all executable transactions and promote them 1370 readies := list.Ready(pool.pendingNonces.get(addr)) 1371 for _, tx := range readies { 1372 hash := tx.Hash() 1373 if pool.promoteTx(addr, hash, tx) { 1374 promoted = append(promoted, tx) 1375 } 1376 } 1377 log.Trace("Promoted queued transactions", "count", len(promoted)) 1378 queuedGauge.Dec(int64(len(readies))) 1379 1380 // Drop all transactions over the allowed limit 1381 var caps types.Transactions 1382 if !pool.locals.contains(addr) { 1383 caps = list.Cap(int(pool.config.AccountQueue)) 1384 for _, tx := range caps { 1385 hash := tx.Hash() 1386 pool.all.Remove(hash) 1387 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1388 } 1389 queuedRateLimitMeter.Mark(int64(len(caps))) 1390 } 1391 // Mark all the items dropped as removed 1392 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1393 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1394 if pool.locals.contains(addr) { 1395 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1396 } 1397 // Delete the entire queue entry if it became empty. 1398 if list.Empty() { 1399 delete(pool.queue, addr) 1400 delete(pool.beats, addr) 1401 } 1402 } 1403 return promoted 1404 } 1405 1406 // truncatePending removes transactions from the pending queue if the pool is above the 1407 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1408 // equal number for all for accounts with many pending transactions. 1409 func (pool *TxPool) truncatePending() { 1410 pending := uint64(0) 1411 for _, list := range pool.pending { 1412 pending += uint64(list.Len()) 1413 } 1414 if pending <= pool.config.GlobalSlots { 1415 return 1416 } 1417 1418 pendingBeforeCap := pending 1419 // Assemble a spam order to penalize large transactors first 1420 spammers := prque.New(nil) 1421 for addr, list := range pool.pending { 1422 // Only evict transactions from high rollers 1423 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1424 spammers.Push(addr, int64(list.Len())) 1425 } 1426 } 1427 // Gradually drop transactions from offenders 1428 offenders := []common.Address{} 1429 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1430 // Retrieve the next offender if not local address 1431 offender, _ := spammers.Pop() 1432 offenders = append(offenders, offender.(common.Address)) 1433 1434 // Equalize balances until all the same or below threshold 1435 if len(offenders) > 1 { 1436 // Calculate the equalization threshold for all current offenders 1437 threshold := pool.pending[offender.(common.Address)].Len() 1438 1439 // Iteratively reduce all offenders until below limit or threshold reached 1440 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1441 for i := 0; i < len(offenders)-1; i++ { 1442 list := pool.pending[offenders[i]] 1443 1444 caps := list.Cap(list.Len() - 1) 1445 for _, tx := range caps { 1446 // Drop the transaction from the global pools too 1447 hash := tx.Hash() 1448 pool.all.Remove(hash) 1449 1450 // Update the account nonce to the dropped transaction 1451 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1452 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1453 } 1454 pool.priced.Removed(len(caps)) 1455 pendingGauge.Dec(int64(len(caps))) 1456 if pool.locals.contains(offenders[i]) { 1457 localGauge.Dec(int64(len(caps))) 1458 } 1459 pending-- 1460 } 1461 } 1462 } 1463 } 1464 1465 // If still above threshold, reduce to limit or min allowance 1466 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1467 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1468 for _, addr := range offenders { 1469 list := pool.pending[addr] 1470 1471 caps := list.Cap(list.Len() - 1) 1472 for _, tx := range caps { 1473 // Drop the transaction from the global pools too 1474 hash := tx.Hash() 1475 pool.all.Remove(hash) 1476 1477 // Update the account nonce to the dropped transaction 1478 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1479 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1480 } 1481 pool.priced.Removed(len(caps)) 1482 pendingGauge.Dec(int64(len(caps))) 1483 if pool.locals.contains(addr) { 1484 localGauge.Dec(int64(len(caps))) 1485 } 1486 pending-- 1487 } 1488 } 1489 } 1490 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1491 } 1492 1493 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1494 func (pool *TxPool) truncateQueue() { 1495 queued := uint64(0) 1496 for _, list := range pool.queue { 1497 queued += uint64(list.Len()) 1498 } 1499 if queued <= pool.config.GlobalQueue { 1500 return 1501 } 1502 1503 // Sort all accounts with queued transactions by heartbeat 1504 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1505 for addr := range pool.queue { 1506 if !pool.locals.contains(addr) { // don't drop locals 1507 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1508 } 1509 } 1510 sort.Sort(addresses) 1511 1512 // Drop transactions until the total is below the limit or only locals remain 1513 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1514 addr := addresses[len(addresses)-1] 1515 list := pool.queue[addr.address] 1516 1517 addresses = addresses[:len(addresses)-1] 1518 1519 // Drop all transactions if they are less than the overflow 1520 if size := uint64(list.Len()); size <= drop { 1521 for _, tx := range list.Flatten() { 1522 pool.removeTx(tx.Hash(), true) 1523 } 1524 drop -= size 1525 queuedRateLimitMeter.Mark(int64(size)) 1526 continue 1527 } 1528 // Otherwise drop only last few transactions 1529 txs := list.Flatten() 1530 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1531 pool.removeTx(txs[i].Hash(), true) 1532 drop-- 1533 queuedRateLimitMeter.Mark(1) 1534 } 1535 } 1536 } 1537 1538 // demoteUnexecutables removes invalid and processed transactions from the pools 1539 // executable/pending queue and any subsequent transactions that become unexecutable 1540 // are moved back into the future queue. 1541 // 1542 // Note: transactions are not marked as removed in the priced list because re-heaping 1543 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1544 // to trigger a re-heap is this function 1545 func (pool *TxPool) demoteUnexecutables() { 1546 // Iterate over all accounts and demote any non-executable transactions 1547 for addr, list := range pool.pending { 1548 nonce := pool.currentState.GetNonce(addr) 1549 1550 // Drop all transactions that are deemed too old (low nonce) 1551 olds := list.Forward(nonce) 1552 for _, tx := range olds { 1553 hash := tx.Hash() 1554 pool.all.Remove(hash) 1555 log.Trace("Removed old pending transaction", "hash", hash) 1556 } 1557 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1558 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1559 for _, tx := range drops { 1560 hash := tx.Hash() 1561 log.Trace("Removed unpayable pending transaction", "hash", hash) 1562 pool.all.Remove(hash) 1563 } 1564 pendingNofundsMeter.Mark(int64(len(drops))) 1565 1566 for _, tx := range invalids { 1567 hash := tx.Hash() 1568 log.Trace("Demoting pending transaction", "hash", hash) 1569 1570 // Internal shuffle shouldn't touch the lookup set. 1571 pool.enqueueTx(hash, tx, false, false) 1572 } 1573 ln := len(olds) + len(drops) + len(invalids) 1574 pendingGauge.Dec(int64(ln)) 1575 1576 if pool.locals.contains(addr) { 1577 localGauge.Dec(int64(ln)) 1578 } 1579 // If there's a gap in front, alert (should never happen) and postpone all transactions 1580 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1581 gapped := list.Cap(0) 1582 for _, tx := range gapped { 1583 hash := tx.Hash() 1584 log.Error("Demoting invalidated transaction", "hash", hash) 1585 1586 // Internal shuffle shouldn't touch the lookup set. 1587 pool.enqueueTx(hash, tx, false, false) 1588 } 1589 pendingGauge.Dec(int64(len(gapped))) 1590 // This might happen in a reorg, so log it to the metering 1591 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1592 } 1593 // Delete the entire pending entry if it became empty. 1594 if list.Empty() { 1595 delete(pool.pending, addr) 1596 } 1597 } 1598 } 1599 1600 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1601 type addressByHeartbeat struct { 1602 address common.Address 1603 heartbeat time.Time 1604 } 1605 1606 type addressesByHeartbeat []addressByHeartbeat 1607 1608 func (a addressesByHeartbeat) Len() int { return len(a) } 1609 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1610 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1611 1612 // accountSet is simply a set of addresses to check for existence, and a signer 1613 // capable of deriving addresses from transactions. 1614 type accountSet struct { 1615 accounts map[common.Address]struct{} 1616 signer types.Signer 1617 cache *[]common.Address 1618 } 1619 1620 // newAccountSet creates a new address set with an associated signer for sender 1621 // derivations. 1622 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1623 as := &accountSet{ 1624 accounts: make(map[common.Address]struct{}), 1625 signer: signer, 1626 } 1627 for _, addr := range addrs { 1628 as.add(addr) 1629 } 1630 return as 1631 } 1632 1633 // contains checks if a given address is contained within the set. 1634 func (as *accountSet) contains(addr common.Address) bool { 1635 _, exist := as.accounts[addr] 1636 return exist 1637 } 1638 1639 func (as *accountSet) empty() bool { 1640 return len(as.accounts) == 0 1641 } 1642 1643 // containsTx checks if the sender of a given tx is within the set. If the sender 1644 // cannot be derived, this method returns false. 1645 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1646 if addr, err := types.Sender(as.signer, tx); err == nil { 1647 return as.contains(addr) 1648 } 1649 return false 1650 } 1651 1652 // add inserts a new address into the set to track. 1653 func (as *accountSet) add(addr common.Address) { 1654 as.accounts[addr] = struct{}{} 1655 as.cache = nil 1656 } 1657 1658 // addTx adds the sender of tx into the set. 1659 func (as *accountSet) addTx(tx *types.Transaction) { 1660 if addr, err := types.Sender(as.signer, tx); err == nil { 1661 as.add(addr) 1662 } 1663 } 1664 1665 // flatten returns the list of addresses within this set, also caching it for later 1666 // reuse. The returned slice should not be changed! 1667 func (as *accountSet) flatten() []common.Address { 1668 if as.cache == nil { 1669 accounts := make([]common.Address, 0, len(as.accounts)) 1670 for account := range as.accounts { 1671 accounts = append(accounts, account) 1672 } 1673 as.cache = &accounts 1674 } 1675 return *as.cache 1676 } 1677 1678 // merge adds all addresses from the 'other' set into 'as'. 1679 func (as *accountSet) merge(other *accountSet) { 1680 for addr := range other.accounts { 1681 as.accounts[addr] = struct{}{} 1682 } 1683 as.cache = nil 1684 } 1685 1686 // txLookup is used internally by TxPool to track transactions while allowing 1687 // lookup without mutex contention. 1688 // 1689 // Note, although this type is properly protected against concurrent access, it 1690 // is **not** a type that should ever be mutated or even exposed outside of the 1691 // transaction pool, since its internal state is tightly coupled with the pools 1692 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1693 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1694 // TxPool.mu mutex. 1695 // 1696 // This lookup set combines the notion of "local transactions", which is useful 1697 // to build upper-level structure. 1698 type txLookup struct { 1699 slots int 1700 lock sync.RWMutex 1701 locals map[common.Hash]*types.Transaction 1702 remotes map[common.Hash]*types.Transaction 1703 } 1704 1705 // newTxLookup returns a new txLookup structure. 1706 func newTxLookup() *txLookup { 1707 return &txLookup{ 1708 locals: make(map[common.Hash]*types.Transaction), 1709 remotes: make(map[common.Hash]*types.Transaction), 1710 } 1711 } 1712 1713 // Range calls f on each key and value present in the map. The callback passed 1714 // should return the indicator whether the iteration needs to be continued. 1715 // Callers need to specify which set (or both) to be iterated. 1716 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1717 t.lock.RLock() 1718 defer t.lock.RUnlock() 1719 1720 if local { 1721 for key, value := range t.locals { 1722 if !f(key, value, true) { 1723 return 1724 } 1725 } 1726 } 1727 if remote { 1728 for key, value := range t.remotes { 1729 if !f(key, value, false) { 1730 return 1731 } 1732 } 1733 } 1734 } 1735 1736 // Get returns a transaction if it exists in the lookup, or nil if not found. 1737 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1738 t.lock.RLock() 1739 defer t.lock.RUnlock() 1740 1741 if tx := t.locals[hash]; tx != nil { 1742 return tx 1743 } 1744 return t.remotes[hash] 1745 } 1746 1747 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1748 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1749 t.lock.RLock() 1750 defer t.lock.RUnlock() 1751 1752 return t.locals[hash] 1753 } 1754 1755 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1756 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1757 t.lock.RLock() 1758 defer t.lock.RUnlock() 1759 1760 return t.remotes[hash] 1761 } 1762 1763 // Count returns the current number of transactions in the lookup. 1764 func (t *txLookup) Count() int { 1765 t.lock.RLock() 1766 defer t.lock.RUnlock() 1767 1768 return len(t.locals) + len(t.remotes) 1769 } 1770 1771 // LocalCount returns the current number of local transactions in the lookup. 1772 func (t *txLookup) LocalCount() int { 1773 t.lock.RLock() 1774 defer t.lock.RUnlock() 1775 1776 return len(t.locals) 1777 } 1778 1779 // RemoteCount returns the current number of remote transactions in the lookup. 1780 func (t *txLookup) RemoteCount() int { 1781 t.lock.RLock() 1782 defer t.lock.RUnlock() 1783 1784 return len(t.remotes) 1785 } 1786 1787 // Slots returns the current number of slots used in the lookup. 1788 func (t *txLookup) Slots() int { 1789 t.lock.RLock() 1790 defer t.lock.RUnlock() 1791 1792 return t.slots 1793 } 1794 1795 // Add adds a transaction to the lookup. 1796 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1797 t.lock.Lock() 1798 defer t.lock.Unlock() 1799 1800 t.slots += numSlots(tx) 1801 slotsGauge.Update(int64(t.slots)) 1802 1803 if local { 1804 t.locals[tx.Hash()] = tx 1805 } else { 1806 t.remotes[tx.Hash()] = tx 1807 } 1808 } 1809 1810 // Remove removes a transaction from the lookup. 1811 func (t *txLookup) Remove(hash common.Hash) { 1812 t.lock.Lock() 1813 defer t.lock.Unlock() 1814 1815 tx, ok := t.locals[hash] 1816 if !ok { 1817 tx, ok = t.remotes[hash] 1818 } 1819 if !ok { 1820 log.Error("No transaction found to be deleted", "hash", hash) 1821 return 1822 } 1823 t.slots -= numSlots(tx) 1824 slotsGauge.Update(int64(t.slots)) 1825 1826 delete(t.locals, hash) 1827 delete(t.remotes, hash) 1828 } 1829 1830 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1831 // set. The assumption is held the locals set is thread-safe to be used. 1832 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1833 t.lock.Lock() 1834 defer t.lock.Unlock() 1835 1836 var migrated int 1837 for hash, tx := range t.remotes { 1838 if locals.containsTx(tx) { 1839 t.locals[hash] = tx 1840 delete(t.remotes, hash) 1841 migrated += 1 1842 } 1843 } 1844 return migrated 1845 } 1846 1847 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1848 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1849 found := make(types.Transactions, 0, 128) 1850 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1851 if tx.GasTipCapIntCmp(threshold) < 0 { 1852 found = append(found, tx) 1853 } 1854 return true 1855 }, false, true) // Only iterate remotes 1856 return found 1857 } 1858 1859 // numSlots calculates the number of slots needed for a single transaction. 1860 func numSlots(tx *types.Transaction) int { 1861 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1862 }