github.com/theQRL/go-zond@v0.2.1/core/txpool/legacypool/legacypool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package legacypool implements the normal ZVM execution transaction pool. 18 package legacypool 19 20 import ( 21 "errors" 22 "math" 23 "math/big" 24 "sort" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/theQRL/go-zond/common" 30 "github.com/theQRL/go-zond/common/prque" 31 "github.com/theQRL/go-zond/consensus/misc/eip1559" 32 "github.com/theQRL/go-zond/core" 33 "github.com/theQRL/go-zond/core/state" 34 "github.com/theQRL/go-zond/core/txpool" 35 "github.com/theQRL/go-zond/core/types" 36 "github.com/theQRL/go-zond/event" 37 "github.com/theQRL/go-zond/log" 38 "github.com/theQRL/go-zond/metrics" 39 "github.com/theQRL/go-zond/params" 40 "golang.org/x/exp/maps" 41 ) 42 43 const ( 44 // txSlotSize is used to calculate how many data slots a single transaction 45 // takes up based on its size. The slots are used as DoS protection, ensuring 46 // that validating a new transaction remains a constant operation (in reality 47 // O(maxslots), where max slots are 4 currently). 48 txSlotSize = 32 * 1024 49 50 // txMaxSize is the maximum size a single transaction can have. This field has 51 // non-trivial consequences: larger transactions are significantly harder and 52 // more expensive to propagate; larger transactions also take more resources 53 // to validate whether they fit into the pool or not. 54 txMaxSize = 4 * txSlotSize // 128KB 55 ) 56 57 var ( 58 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept 59 // another remote transaction. 60 ErrTxPoolOverflow = errors.New("txpool is full") 61 ) 62 63 var ( 64 evictionInterval = time.Minute // Time interval to check for evictable transactions 65 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 66 ) 67 68 var ( 69 // Metrics for the pending pool 70 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 71 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 72 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 73 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 74 75 // Metrics for the queued pool 76 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 77 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 78 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 79 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 80 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 81 82 // General tx metrics 83 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 84 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 85 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 86 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 87 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 88 89 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 90 // txpool reorgs. 91 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 92 // reorgDurationTimer measures how long time a txpool reorg takes. 93 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 94 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 95 // that this number is pretty low, since txpool reorgs happen very frequently. 96 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 97 98 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 99 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 100 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 101 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 102 103 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 104 ) 105 106 // BlockChain defines the minimal set of methods needed to back a tx pool with 107 // a chain. Exists to allow mocking the live chain out of tests. 108 type BlockChain interface { 109 // Config retrieves the chain's fork configuration. 110 Config() *params.ChainConfig 111 112 // CurrentBlock returns the current head of the chain. 113 CurrentBlock() *types.Header 114 115 // GetBlock retrieves a specific block, used during pool resets. 116 GetBlock(hash common.Hash, number uint64) *types.Block 117 118 // StateAt returns a state database for a given root hash (generally the head). 119 StateAt(root common.Hash) (*state.StateDB, error) 120 } 121 122 // Config are the configuration parameters of the transaction pool. 123 type Config struct { 124 Locals []common.Address // Addresses that should be treated by default as local 125 NoLocals bool // Whether local transaction handling should be disabled 126 Journal string // Journal of local transactions to survive node restarts 127 Rejournal time.Duration // Time interval to regenerate the local transaction journal 128 129 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 130 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 131 132 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 133 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 134 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 135 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 136 137 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 138 } 139 140 // DefaultConfig contains the default configurations for the transaction pool. 141 var DefaultConfig = Config{ 142 Journal: "transactions.rlp", 143 Rejournal: time.Hour, 144 145 PriceLimit: 1, 146 PriceBump: 10, 147 148 AccountSlots: 16, 149 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 150 AccountQueue: 64, 151 GlobalQueue: 1024, 152 153 Lifetime: 3 * time.Hour, 154 } 155 156 // sanitize checks the provided user configurations and changes anything that's 157 // unreasonable or unworkable. 158 func (config *Config) sanitize() Config { 159 conf := *config 160 if conf.Rejournal < time.Second { 161 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 162 conf.Rejournal = time.Second 163 } 164 if conf.PriceLimit < 1 { 165 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) 166 conf.PriceLimit = DefaultConfig.PriceLimit 167 } 168 if conf.PriceBump < 1 { 169 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) 170 conf.PriceBump = DefaultConfig.PriceBump 171 } 172 if conf.AccountSlots < 1 { 173 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) 174 conf.AccountSlots = DefaultConfig.AccountSlots 175 } 176 if conf.GlobalSlots < 1 { 177 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) 178 conf.GlobalSlots = DefaultConfig.GlobalSlots 179 } 180 if conf.AccountQueue < 1 { 181 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) 182 conf.AccountQueue = DefaultConfig.AccountQueue 183 } 184 if conf.GlobalQueue < 1 { 185 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) 186 conf.GlobalQueue = DefaultConfig.GlobalQueue 187 } 188 if conf.Lifetime < 1 { 189 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) 190 conf.Lifetime = DefaultConfig.Lifetime 191 } 192 return conf 193 } 194 195 // LegacyPool contains all currently known transactions. Transactions 196 // enter the pool when they are received from the network or submitted 197 // locally. They exit the pool when they are included in the blockchain. 198 // 199 // The pool separates processable transactions (which can be applied to the 200 // current state) and future transactions. Transactions move between those 201 // two states over time as they are received and processed. 202 type LegacyPool struct { 203 config Config 204 chainconfig *params.ChainConfig 205 chain BlockChain 206 gasTip atomic.Pointer[big.Int] 207 txFeed event.Feed 208 signer types.Signer 209 mu sync.RWMutex 210 211 currentHead atomic.Pointer[types.Header] // Current head of the blockchain 212 currentState *state.StateDB // Current state in the blockchain head 213 pendingNonces *noncer // Pending state tracking virtual nonces 214 215 locals *accountSet // Set of local transaction to exempt from eviction rules 216 journal *journal // Journal of local transaction to back up to disk 217 218 reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools 219 pending map[common.Address]*list // All currently processable transactions 220 queue map[common.Address]*list // Queued but non-processable transactions 221 beats map[common.Address]time.Time // Last heartbeat from each known account 222 all *lookup // All transactions to allow lookups 223 priced *pricedList // All transactions sorted by price 224 225 reqResetCh chan *txpoolResetRequest 226 reqPromoteCh chan *accountSet 227 queueTxEventCh chan *types.Transaction 228 reorgDoneCh chan chan struct{} 229 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 230 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 231 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 232 233 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 234 } 235 236 type txpoolResetRequest struct { 237 oldHead, newHead *types.Header 238 } 239 240 // New creates a new transaction pool to gather, sort and filter inbound 241 // transactions from the network. 242 func New(config Config, chain BlockChain) *LegacyPool { 243 // Sanitize the input to ensure no vulnerable gas prices are set 244 config = (&config).sanitize() 245 246 // Create the transaction pool with its initial settings 247 pool := &LegacyPool{ 248 config: config, 249 chain: chain, 250 chainconfig: chain.Config(), 251 signer: types.LatestSigner(chain.Config()), 252 pending: make(map[common.Address]*list), 253 queue: make(map[common.Address]*list), 254 beats: make(map[common.Address]time.Time), 255 all: newLookup(), 256 reqResetCh: make(chan *txpoolResetRequest), 257 reqPromoteCh: make(chan *accountSet), 258 queueTxEventCh: make(chan *types.Transaction), 259 reorgDoneCh: make(chan chan struct{}), 260 reorgShutdownCh: make(chan struct{}), 261 initDoneCh: make(chan struct{}), 262 } 263 pool.locals = newAccountSet(pool.signer) 264 for _, addr := range config.Locals { 265 log.Info("Setting new local account", "address", addr) 266 pool.locals.add(addr) 267 } 268 pool.priced = newPricedList(pool.all) 269 270 if !config.NoLocals && config.Journal != "" { 271 pool.journal = newTxJournal(config.Journal) 272 } 273 return pool 274 } 275 276 // Filter returns whether the given transaction can be consumed by the legacy 277 // pool. 278 func (pool *LegacyPool) Filter(tx *types.Transaction) bool { 279 switch tx.Type() { 280 case types.DynamicFeeTxType: 281 return true 282 default: 283 return false 284 } 285 } 286 287 // Init sets the gas price needed to keep a transaction in the pool and the chain 288 // head to allow balance / nonce checks. The transaction journal will be loaded 289 // from disk and filtered based on the provided starting settings. The internal 290 // goroutines will be spun up and the pool deemed operational afterwards. 291 func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { 292 // Set the address reserver to request exclusive access to pooled accounts 293 pool.reserve = reserve 294 295 // Set the basic pool parameters 296 pool.gasTip.Store(gasTip) 297 298 // Initialize the state with head block, or fallback to empty one in 299 // case the head state is not available (might occur when node is not 300 // fully synced). 301 statedb, err := pool.chain.StateAt(head.Root) 302 if err != nil { 303 statedb, err = pool.chain.StateAt(types.EmptyRootHash) 304 } 305 if err != nil { 306 return err 307 } 308 pool.currentHead.Store(head) 309 pool.currentState = statedb 310 pool.pendingNonces = newNoncer(statedb) 311 312 // Start the reorg loop early, so it can handle requests generated during 313 // journal loading. 314 pool.wg.Add(1) 315 go pool.scheduleReorgLoop() 316 317 // If local transactions and journaling is enabled, load from disk 318 if pool.journal != nil { 319 if err := pool.journal.load(pool.addLocals); err != nil { 320 log.Warn("Failed to load transaction journal", "err", err) 321 } 322 if err := pool.journal.rotate(pool.local()); err != nil { 323 log.Warn("Failed to rotate transaction journal", "err", err) 324 } 325 } 326 pool.wg.Add(1) 327 go pool.loop() 328 return nil 329 } 330 331 // loop is the transaction pool's main event loop, waiting for and reacting to 332 // outside blockchain events as well as for various reporting and transaction 333 // eviction events. 334 func (pool *LegacyPool) loop() { 335 defer pool.wg.Done() 336 337 var ( 338 prevPending, prevQueued, prevStales int 339 340 // Start the stats reporting and transaction eviction tickers 341 report = time.NewTicker(statsReportInterval) 342 evict = time.NewTicker(evictionInterval) 343 journal = time.NewTicker(pool.config.Rejournal) 344 ) 345 defer report.Stop() 346 defer evict.Stop() 347 defer journal.Stop() 348 349 // Notify tests that the init phase is done 350 close(pool.initDoneCh) 351 for { 352 select { 353 // Handle pool shutdown 354 case <-pool.reorgShutdownCh: 355 return 356 357 // Handle stats reporting ticks 358 case <-report.C: 359 pool.mu.RLock() 360 pending, queued := pool.stats() 361 pool.mu.RUnlock() 362 stales := int(pool.priced.stales.Load()) 363 364 if pending != prevPending || queued != prevQueued || stales != prevStales { 365 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 366 prevPending, prevQueued, prevStales = pending, queued, stales 367 } 368 369 // Handle inactive account transaction eviction 370 case <-evict.C: 371 pool.mu.Lock() 372 for addr := range pool.queue { 373 // Skip local transactions from the eviction mechanism 374 if pool.locals.contains(addr) { 375 continue 376 } 377 // Any non-locals old enough should be removed 378 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 379 list := pool.queue[addr].Flatten() 380 for _, tx := range list { 381 pool.removeTx(tx.Hash(), true, true) 382 } 383 queuedEvictionMeter.Mark(int64(len(list))) 384 } 385 } 386 pool.mu.Unlock() 387 388 // Handle local transaction journal rotation 389 case <-journal.C: 390 if pool.journal != nil { 391 pool.mu.Lock() 392 if err := pool.journal.rotate(pool.local()); err != nil { 393 log.Warn("Failed to rotate local tx journal", "err", err) 394 } 395 pool.mu.Unlock() 396 } 397 } 398 } 399 } 400 401 // Close terminates the transaction pool. 402 func (pool *LegacyPool) Close() error { 403 // Terminate the pool reorger and return 404 close(pool.reorgShutdownCh) 405 pool.wg.Wait() 406 407 if pool.journal != nil { 408 pool.journal.close() 409 } 410 log.Info("Transaction pool stopped") 411 return nil 412 } 413 414 // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be 415 // kept in sync with the main transaction pool's internal state. 416 func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { 417 wait := pool.requestReset(oldHead, newHead) 418 <-wait 419 } 420 421 // SubscribeTransactions registers a subscription for new transaction events, 422 // supporting feeding only newly seen or also resurrected transactions. 423 func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { 424 // The legacy pool has a very messed up internal shuffling, so it's kind of 425 // hard to separate newly discovered transaction from resurrected ones. This 426 // is because the new txs are added to the queue, resurrected ones too and 427 // reorgs run lazily, so separating the two would need a marker. 428 return pool.txFeed.Subscribe(ch) 429 } 430 431 // SetGasTip updates the minimum gas tip required by the transaction pool for a 432 // new transaction, and drops all transactions below this threshold. 433 func (pool *LegacyPool) SetGasTip(tip *big.Int) { 434 pool.mu.Lock() 435 defer pool.mu.Unlock() 436 437 old := pool.gasTip.Load() 438 pool.gasTip.Store(new(big.Int).Set(tip)) 439 440 // If the min miner fee increased, remove transactions below the new threshold 441 if tip.Cmp(old) > 0 { 442 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 443 drop := pool.all.RemotesBelowTip(tip) 444 for _, tx := range drop { 445 pool.removeTx(tx.Hash(), false, true) 446 } 447 pool.priced.Removed(len(drop)) 448 } 449 log.Info("Legacy pool tip threshold updated", "tip", tip) 450 } 451 452 // Nonce returns the next nonce of an account, with all transactions executable 453 // by the pool already applied on top. 454 func (pool *LegacyPool) Nonce(addr common.Address) uint64 { 455 pool.mu.RLock() 456 defer pool.mu.RUnlock() 457 458 return pool.pendingNonces.get(addr) 459 } 460 461 // Stats retrieves the current pool stats, namely the number of pending and the 462 // number of queued (non-executable) transactions. 463 func (pool *LegacyPool) Stats() (int, int) { 464 pool.mu.RLock() 465 defer pool.mu.RUnlock() 466 467 return pool.stats() 468 } 469 470 // stats retrieves the current pool stats, namely the number of pending and the 471 // number of queued (non-executable) transactions. 472 func (pool *LegacyPool) stats() (int, int) { 473 pending := 0 474 for _, list := range pool.pending { 475 pending += list.Len() 476 } 477 queued := 0 478 for _, list := range pool.queue { 479 queued += list.Len() 480 } 481 return pending, queued 482 } 483 484 // Content retrieves the data content of the transaction pool, returning all the 485 // pending as well as queued transactions, grouped by account and sorted by nonce. 486 func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { 487 pool.mu.Lock() 488 defer pool.mu.Unlock() 489 490 pending := make(map[common.Address][]*types.Transaction, len(pool.pending)) 491 for addr, list := range pool.pending { 492 pending[addr] = list.Flatten() 493 } 494 queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) 495 for addr, list := range pool.queue { 496 queued[addr] = list.Flatten() 497 } 498 return pending, queued 499 } 500 501 // ContentFrom retrieves the data content of the transaction pool, returning the 502 // pending as well as queued transactions of this address, grouped by nonce. 503 func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { 504 pool.mu.RLock() 505 defer pool.mu.RUnlock() 506 507 var pending []*types.Transaction 508 if list, ok := pool.pending[addr]; ok { 509 pending = list.Flatten() 510 } 511 var queued []*types.Transaction 512 if list, ok := pool.queue[addr]; ok { 513 queued = list.Flatten() 514 } 515 return pending, queued 516 } 517 518 // Pending retrieves all currently processable transactions, grouped by origin 519 // account and sorted by nonce. 520 // 521 // The transactions can also be pre-filtered by the dynamic fee components to 522 // reduce allocations and load on downstream subsystems. 523 func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { 524 pool.mu.Lock() 525 defer pool.mu.Unlock() 526 527 var ( 528 minTipBig *big.Int 529 baseFeeBig *big.Int 530 ) 531 if filter.MinTip != nil { 532 minTipBig = filter.MinTip 533 } 534 if filter.BaseFee != nil { 535 baseFeeBig = filter.BaseFee 536 } 537 pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) 538 for addr, list := range pool.pending { 539 txs := list.Flatten() 540 541 // If the miner requests tip enforcement, cap the lists now 542 if minTipBig != nil && !pool.locals.contains(addr) { 543 for i, tx := range txs { 544 if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 { 545 txs = txs[:i] 546 break 547 } 548 } 549 } 550 if len(txs) > 0 { 551 lazies := make([]*txpool.LazyTransaction, len(txs)) 552 for i := 0; i < len(txs); i++ { 553 lazies[i] = &txpool.LazyTransaction{ 554 Pool: pool, 555 Hash: txs[i].Hash(), 556 Tx: txs[i], 557 Time: txs[i].Time(), 558 GasFeeCap: txs[i].GasFeeCap(), 559 GasTipCap: txs[i].GasTipCap(), 560 Gas: txs[i].Gas(), 561 } 562 } 563 pending[addr] = lazies 564 } 565 } 566 return pending 567 } 568 569 // Locals retrieves the accounts currently considered local by the pool. 570 func (pool *LegacyPool) Locals() []common.Address { 571 pool.mu.Lock() 572 defer pool.mu.Unlock() 573 574 return pool.locals.flatten() 575 } 576 577 // local retrieves all currently known local transactions, grouped by origin 578 // account and sorted by nonce. The returned transaction set is a copy and can be 579 // freely modified by calling code. 580 func (pool *LegacyPool) local() map[common.Address]types.Transactions { 581 txs := make(map[common.Address]types.Transactions) 582 for addr := range pool.locals.accounts { 583 if pending := pool.pending[addr]; pending != nil { 584 txs[addr] = append(txs[addr], pending.Flatten()...) 585 } 586 if queued := pool.queue[addr]; queued != nil { 587 txs[addr] = append(txs[addr], queued.Flatten()...) 588 } 589 } 590 return txs 591 } 592 593 // validateTxBasics checks whether a transaction is valid according to the consensus 594 // rules, but does not check state-dependent validation such as sufficient balance. 595 // This check is meant as an early check which only needs to be performed once, 596 // and does not require the pool mutex to be held. 597 func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { 598 opts := &txpool.ValidationOptions{ 599 Config: pool.chainconfig, 600 Accept: 0 | 601 1<<types.DynamicFeeTxType, 602 MaxSize: txMaxSize, 603 MinTip: pool.gasTip.Load(), 604 } 605 if local { 606 opts.MinTip = new(big.Int) 607 } 608 if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil { 609 return err 610 } 611 return nil 612 } 613 614 // validateTx checks whether a transaction is valid according to the consensus 615 // rules and adheres to some heuristic limits of the local node (price and size). 616 func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { 617 opts := &txpool.ValidationOptionsWithState{ 618 State: pool.currentState, 619 620 FirstNonceGap: nil, // Pool allows arbitrary arrival order, don't invalidate nonce gaps 621 UsedAndLeftSlots: func(addr common.Address) (int, int) { 622 var have int 623 if list := pool.pending[addr]; list != nil { 624 have += list.Len() 625 } 626 if list := pool.queue[addr]; list != nil { 627 have += list.Len() 628 } 629 return have, math.MaxInt 630 }, 631 ExistingExpenditure: func(addr common.Address) *big.Int { 632 if list := pool.pending[addr]; list != nil { 633 return list.totalcost 634 } 635 return new(big.Int) 636 }, 637 ExistingCost: func(addr common.Address, nonce uint64) *big.Int { 638 if list := pool.pending[addr]; list != nil { 639 if tx := list.txs.Get(nonce); tx != nil { 640 return tx.Cost() 641 } 642 } 643 return nil 644 }, 645 } 646 if err := txpool.ValidateTransactionWithState(tx, pool.signer, opts); err != nil { 647 return err 648 } 649 return nil 650 } 651 652 // add validates a transaction and inserts it into the non-executable queue for later 653 // pending promotion and execution. If the transaction is a replacement for an already 654 // pending or queued one, it overwrites the previous transaction if its price is higher. 655 // 656 // If a newly added transaction is marked as local, its sending account will be 657 // added to the allowlist, preventing any associated transaction from being dropped 658 // out of the pool due to pricing constraints. 659 func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 660 // If the transaction is already known, discard it 661 hash := tx.Hash() 662 if pool.all.Get(hash) != nil { 663 log.Trace("Discarding already known transaction", "hash", hash) 664 knownTxMeter.Mark(1) 665 return false, txpool.ErrAlreadyKnown 666 } 667 // Make the local flag. If it's from local source or it's from the network but 668 // the sender is marked as local previously, treat it as the local transaction. 669 isLocal := local || pool.locals.containsTx(tx) 670 671 // If the transaction fails basic validation, discard it 672 if err := pool.validateTx(tx, isLocal); err != nil { 673 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 674 invalidTxMeter.Mark(1) 675 return false, err 676 } 677 // already validated by this point 678 from, _ := types.Sender(pool.signer, tx) 679 680 // If the address is not yet known, request exclusivity to track the account 681 // only by this subpool until all transactions are evicted 682 var ( 683 _, hasPending = pool.pending[from] 684 _, hasQueued = pool.queue[from] 685 ) 686 if !hasPending && !hasQueued { 687 if err := pool.reserve(from, true); err != nil { 688 return false, err 689 } 690 defer func() { 691 // If the transaction is rejected by some post-validation check, remove 692 // the lock on the reservation set. 693 // 694 // Note, `err` here is the named error return, which will be initialized 695 // by a return statement before running deferred methods. Take care with 696 // removing or subscoping err as it will break this clause. 697 if err != nil { 698 pool.reserve(from, false) 699 } 700 }() 701 } 702 // If the transaction pool is full, discard underpriced transactions 703 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 704 // If the new transaction is underpriced, don't accept it 705 if !isLocal && pool.priced.Underpriced(tx) { 706 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 707 underpricedTxMeter.Mark(1) 708 return false, txpool.ErrUnderpriced 709 } 710 711 // We're about to replace a transaction. The reorg does a more thorough 712 // analysis of what to remove and how, but it runs async. We don't want to 713 // do too many replacements between reorg-runs, so we cap the number of 714 // replacements to 25% of the slots 715 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 716 throttleTxMeter.Mark(1) 717 return false, ErrTxPoolOverflow 718 } 719 720 // New transaction is better than our worse ones, make room for it. 721 // If it's a local transaction, forcibly discard all available transactions. 722 // Otherwise if we can't make enough room for new one, abort the operation. 723 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 724 725 // Special case, we still can't make the room for the new remote one. 726 if !isLocal && !success { 727 log.Trace("Discarding overflown transaction", "hash", hash) 728 overflowedTxMeter.Mark(1) 729 return false, ErrTxPoolOverflow 730 } 731 732 // If the new transaction is a future transaction it should never churn pending transactions 733 if !isLocal && pool.isGapped(from, tx) { 734 var replacesPending bool 735 for _, dropTx := range drop { 736 dropSender, _ := types.Sender(pool.signer, dropTx) 737 if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { 738 replacesPending = true 739 break 740 } 741 } 742 // Add all transactions back to the priced queue 743 if replacesPending { 744 for _, dropTx := range drop { 745 pool.priced.Put(dropTx, false) 746 } 747 log.Trace("Discarding future transaction replacing pending tx", "hash", hash) 748 return false, txpool.ErrFutureReplacePending 749 } 750 } 751 752 // Kick out the underpriced remote transactions. 753 for _, tx := range drop { 754 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 755 underpricedTxMeter.Mark(1) 756 757 sender, _ := types.Sender(pool.signer, tx) 758 dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc 759 760 pool.changesSinceReorg += dropped 761 } 762 } 763 764 // Try to replace an existing transaction in the pending pool 765 if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { 766 // Nonce already pending, check if required price bump is met 767 inserted, old := list.Add(tx, pool.config.PriceBump) 768 if !inserted { 769 pendingDiscardMeter.Mark(1) 770 return false, txpool.ErrReplaceUnderpriced 771 } 772 // New transaction is better, replace old one 773 if old != nil { 774 pool.all.Remove(old.Hash()) 775 pool.priced.Removed(1) 776 pendingReplaceMeter.Mark(1) 777 } 778 pool.all.Add(tx, isLocal) 779 pool.priced.Put(tx, isLocal) 780 pool.journalTx(from, tx) 781 pool.queueTxEvent(tx) 782 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 783 784 // Successful promotion, bump the heartbeat 785 pool.beats[from] = time.Now() 786 return old != nil, nil 787 } 788 // New transaction isn't replacing a pending one, push into queue 789 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 790 if err != nil { 791 return false, err 792 } 793 // Mark local addresses and journal local transactions 794 if local && !pool.locals.contains(from) { 795 log.Info("Setting new local account", "address", from) 796 pool.locals.add(from) 797 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 798 } 799 if isLocal { 800 localGauge.Inc(1) 801 } 802 pool.journalTx(from, tx) 803 804 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 805 return replaced, nil 806 } 807 808 // isGapped reports whether the given transaction is immediately executable. 809 func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool { 810 // Short circuit if transaction falls within the scope of the pending list 811 // or matches the next pending nonce which can be promoted as an executable 812 // transaction afterwards. Note, the tx staleness is already checked in 813 // 'validateTx' function previously. 814 next := pool.pendingNonces.get(from) 815 if tx.Nonce() <= next { 816 return false 817 } 818 // The transaction has a nonce gap with pending list, it's only considered 819 // as executable if transactions in queue can fill up the nonce gap. 820 queue, ok := pool.queue[from] 821 if !ok { 822 return true 823 } 824 for nonce := next; nonce < tx.Nonce(); nonce++ { 825 if !queue.Contains(nonce) { 826 return true // txs in queue can't fill up the nonce gap 827 } 828 } 829 return false 830 } 831 832 // enqueueTx inserts a new transaction into the non-executable transaction queue. 833 // 834 // Note, this method assumes the pool lock is held! 835 func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 836 // Try to insert the transaction into the future queue 837 from, _ := types.Sender(pool.signer, tx) // already validated 838 if pool.queue[from] == nil { 839 pool.queue[from] = newList(false) 840 } 841 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 842 if !inserted { 843 // An older transaction was better, discard this 844 queuedDiscardMeter.Mark(1) 845 return false, txpool.ErrReplaceUnderpriced 846 } 847 // Discard any previous transaction and mark this 848 if old != nil { 849 pool.all.Remove(old.Hash()) 850 pool.priced.Removed(1) 851 queuedReplaceMeter.Mark(1) 852 } else { 853 // Nothing was replaced, bump the queued counter 854 queuedGauge.Inc(1) 855 } 856 // If the transaction isn't in lookup set but it's expected to be there, 857 // show the error log. 858 if pool.all.Get(hash) == nil && !addAll { 859 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 860 } 861 if addAll { 862 pool.all.Add(tx, local) 863 pool.priced.Put(tx, local) 864 } 865 // If we never record the heartbeat, do it right now. 866 if _, exist := pool.beats[from]; !exist { 867 pool.beats[from] = time.Now() 868 } 869 return old != nil, nil 870 } 871 872 // journalTx adds the specified transaction to the local disk journal if it is 873 // deemed to have been sent from a local account. 874 func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { 875 // Only journal if it's enabled and the transaction is local 876 if pool.journal == nil || !pool.locals.contains(from) { 877 return 878 } 879 if err := pool.journal.insert(tx); err != nil { 880 log.Warn("Failed to journal local transaction", "err", err) 881 } 882 } 883 884 // promoteTx adds a transaction to the pending (processable) list of transactions 885 // and returns whether it was inserted or an older was better. 886 // 887 // Note, this method assumes the pool lock is held! 888 func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 889 // Try to insert the transaction into the pending queue 890 if pool.pending[addr] == nil { 891 pool.pending[addr] = newList(true) 892 } 893 list := pool.pending[addr] 894 895 inserted, old := list.Add(tx, pool.config.PriceBump) 896 if !inserted { 897 // An older transaction was better, discard this 898 pool.all.Remove(hash) 899 pool.priced.Removed(1) 900 pendingDiscardMeter.Mark(1) 901 return false 902 } 903 // Otherwise discard any previous transaction and mark this 904 if old != nil { 905 pool.all.Remove(old.Hash()) 906 pool.priced.Removed(1) 907 pendingReplaceMeter.Mark(1) 908 } else { 909 // Nothing was replaced, bump the pending counter 910 pendingGauge.Inc(1) 911 } 912 // Set the potentially new pending nonce and notify any subsystems of the new tx 913 pool.pendingNonces.set(addr, tx.Nonce()+1) 914 915 // Successful promotion, bump the heartbeat 916 pool.beats[addr] = time.Now() 917 return true 918 } 919 920 // addLocals enqueues a batch of transactions into the pool if they are valid, marking the 921 // senders as local ones, ensuring they go around the local pricing constraints. 922 // 923 // This method is used to add transactions from the RPC API and performs synchronous pool 924 // reorganization and event propagation. 925 func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { 926 return pool.Add(txs, !pool.config.NoLocals, true) 927 } 928 929 // addLocal enqueues a single local transaction into the pool if it is valid. This is 930 // a convenience wrapper around addLocals. 931 func (pool *LegacyPool) addLocal(tx *types.Transaction) error { 932 return pool.addLocals([]*types.Transaction{tx})[0] 933 } 934 935 // addRemotes enqueues a batch of transactions into the pool if they are valid. If the 936 // senders are not among the locally tracked ones, full pricing constraints will apply. 937 // 938 // This method is used to add transactions from the p2p network and does not wait for pool 939 // reorganization and internal event propagation. 940 func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { 941 return pool.Add(txs, false, false) 942 } 943 944 // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience 945 // wrapper around addRemotes. 946 func (pool *LegacyPool) addRemote(tx *types.Transaction) error { 947 return pool.addRemotes([]*types.Transaction{tx})[0] 948 } 949 950 // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. 951 func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { 952 return pool.Add(txs, false, true) 953 } 954 955 // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 956 func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { 957 return pool.Add([]*types.Transaction{tx}, false, true)[0] 958 } 959 960 // Add enqueues a batch of transactions into the pool if they are valid. Depending 961 // on the local flag, full pricing constraints will or will not be applied. 962 // 963 // If sync is set, the method will block until all internal maintenance related 964 // to the add is finished. Only use this during tests for determinism! 965 func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { 966 // Do not treat as local if local transactions have been disabled 967 local = local && !pool.config.NoLocals 968 969 // Filter out known ones without obtaining the pool lock or recovering signatures 970 var ( 971 errs = make([]error, len(txs)) 972 news = make([]*types.Transaction, 0, len(txs)) 973 ) 974 for i, tx := range txs { 975 // If the transaction is known, pre-set the error slot 976 if pool.all.Get(tx.Hash()) != nil { 977 errs[i] = txpool.ErrAlreadyKnown 978 knownTxMeter.Mark(1) 979 continue 980 } 981 // Exclude transactions with basic errors, e.g invalid signatures and 982 // insufficient intrinsic gas as soon as possible and cache senders 983 // in transactions before obtaining lock 984 if err := pool.validateTxBasics(tx, local); err != nil { 985 errs[i] = err 986 log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err) 987 invalidTxMeter.Mark(1) 988 continue 989 } 990 // Accumulate all unknown transactions for deeper processing 991 news = append(news, tx) 992 } 993 if len(news) == 0 { 994 return errs 995 } 996 997 // Process all the new transaction and merge any errors into the original slice 998 pool.mu.Lock() 999 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 1000 pool.mu.Unlock() 1001 1002 var nilSlot = 0 1003 for _, err := range newErrs { 1004 for errs[nilSlot] != nil { 1005 nilSlot++ 1006 } 1007 errs[nilSlot] = err 1008 nilSlot++ 1009 } 1010 // Reorg the pool internals if needed and return 1011 done := pool.requestPromoteExecutables(dirtyAddrs) 1012 if sync { 1013 <-done 1014 } 1015 return errs 1016 } 1017 1018 // addTxsLocked attempts to queue a batch of transactions if they are valid. 1019 // The transaction pool lock must be held. 1020 func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 1021 dirty := newAccountSet(pool.signer) 1022 errs := make([]error, len(txs)) 1023 for i, tx := range txs { 1024 replaced, err := pool.add(tx, local) 1025 errs[i] = err 1026 if err == nil && !replaced { 1027 dirty.addTx(tx) 1028 } 1029 } 1030 validTxMeter.Mark(int64(len(dirty.accounts))) 1031 return errs, dirty 1032 } 1033 1034 // Status returns the status (unknown/pending/queued) of a batch of transactions 1035 // identified by their hashes. 1036 func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus { 1037 tx := pool.get(hash) 1038 if tx == nil { 1039 return txpool.TxStatusUnknown 1040 } 1041 from, _ := types.Sender(pool.signer, tx) // already validated 1042 1043 pool.mu.RLock() 1044 defer pool.mu.RUnlock() 1045 1046 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1047 return txpool.TxStatusPending 1048 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1049 return txpool.TxStatusQueued 1050 } 1051 return txpool.TxStatusUnknown 1052 } 1053 1054 // Get returns a transaction if it is contained in the pool and nil otherwise. 1055 func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction { 1056 tx := pool.get(hash) 1057 if tx == nil { 1058 return nil 1059 } 1060 return tx 1061 } 1062 1063 // get returns a transaction if it is contained in the pool and nil otherwise. 1064 func (pool *LegacyPool) get(hash common.Hash) *types.Transaction { 1065 return pool.all.Get(hash) 1066 } 1067 1068 // Has returns an indicator whether txpool has a transaction cached with the 1069 // given hash. 1070 func (pool *LegacyPool) Has(hash common.Hash) bool { 1071 return pool.all.Get(hash) != nil 1072 } 1073 1074 // removeTx removes a single transaction from the queue, moving all subsequent 1075 // transactions back to the future queue. 1076 // 1077 // In unreserve is false, the account will not be relinquished to the main txpool 1078 // even if there are no more references to it. This is used to handle a race when 1079 // a tx being added, and it evicts a previously scheduled tx from the same account, 1080 // which could lead to a premature release of the lock. 1081 // 1082 // Returns the number of transactions removed from the pending queue. 1083 func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int { 1084 // Fetch the transaction we wish to delete 1085 tx := pool.all.Get(hash) 1086 if tx == nil { 1087 return 0 1088 } 1089 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1090 1091 // If after deletion there are no more transactions belonging to this account, 1092 // relinquish the address reservation. It's a bit convoluted do this, via a 1093 // defer, but it's safer vs. the many return pathways. 1094 if unreserve { 1095 defer func() { 1096 var ( 1097 _, hasPending = pool.pending[addr] 1098 _, hasQueued = pool.queue[addr] 1099 ) 1100 if !hasPending && !hasQueued { 1101 pool.reserve(addr, false) 1102 } 1103 }() 1104 } 1105 // Remove it from the list of known transactions 1106 pool.all.Remove(hash) 1107 if outofbound { 1108 pool.priced.Removed(1) 1109 } 1110 if pool.locals.contains(addr) { 1111 localGauge.Dec(1) 1112 } 1113 // Remove the transaction from the pending lists and reset the account nonce 1114 if pending := pool.pending[addr]; pending != nil { 1115 if removed, invalids := pending.Remove(tx); removed { 1116 // If no more pending transactions are left, remove the list 1117 if pending.Empty() { 1118 delete(pool.pending, addr) 1119 } 1120 // Postpone any invalidated transactions 1121 for _, tx := range invalids { 1122 // Internal shuffle shouldn't touch the lookup set. 1123 pool.enqueueTx(tx.Hash(), tx, false, false) 1124 } 1125 // Update the account nonce if needed 1126 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1127 // Reduce the pending counter 1128 pendingGauge.Dec(int64(1 + len(invalids))) 1129 return 1 + len(invalids) 1130 } 1131 } 1132 // Transaction is in the future queue 1133 if future := pool.queue[addr]; future != nil { 1134 if removed, _ := future.Remove(tx); removed { 1135 // Reduce the queued counter 1136 queuedGauge.Dec(1) 1137 } 1138 if future.Empty() { 1139 delete(pool.queue, addr) 1140 delete(pool.beats, addr) 1141 } 1142 } 1143 return 0 1144 } 1145 1146 // requestReset requests a pool reset to the new head block. 1147 // The returned channel is closed when the reset has occurred. 1148 func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1149 select { 1150 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1151 return <-pool.reorgDoneCh 1152 case <-pool.reorgShutdownCh: 1153 return pool.reorgShutdownCh 1154 } 1155 } 1156 1157 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1158 // The returned channel is closed when the promotion checks have occurred. 1159 func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1160 select { 1161 case pool.reqPromoteCh <- set: 1162 return <-pool.reorgDoneCh 1163 case <-pool.reorgShutdownCh: 1164 return pool.reorgShutdownCh 1165 } 1166 } 1167 1168 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1169 func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) { 1170 select { 1171 case pool.queueTxEventCh <- tx: 1172 case <-pool.reorgShutdownCh: 1173 } 1174 } 1175 1176 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1177 // call those methods directly, but request them being run using requestReset and 1178 // requestPromoteExecutables instead. 1179 func (pool *LegacyPool) scheduleReorgLoop() { 1180 defer pool.wg.Done() 1181 1182 var ( 1183 curDone chan struct{} // non-nil while runReorg is active 1184 nextDone = make(chan struct{}) 1185 launchNextRun bool 1186 reset *txpoolResetRequest 1187 dirtyAccounts *accountSet 1188 queuedEvents = make(map[common.Address]*sortedMap) 1189 ) 1190 for { 1191 // Launch next background reorg if needed 1192 if curDone == nil && launchNextRun { 1193 // Run the background reorg and announcements 1194 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1195 1196 // Prepare everything for the next round of reorg 1197 curDone, nextDone = nextDone, make(chan struct{}) 1198 launchNextRun = false 1199 1200 reset, dirtyAccounts = nil, nil 1201 queuedEvents = make(map[common.Address]*sortedMap) 1202 } 1203 1204 select { 1205 case req := <-pool.reqResetCh: 1206 // Reset request: update head if request is already pending. 1207 if reset == nil { 1208 reset = req 1209 } else { 1210 reset.newHead = req.newHead 1211 } 1212 launchNextRun = true 1213 pool.reorgDoneCh <- nextDone 1214 1215 case req := <-pool.reqPromoteCh: 1216 // Promote request: update address set if request is already pending. 1217 if dirtyAccounts == nil { 1218 dirtyAccounts = req 1219 } else { 1220 dirtyAccounts.merge(req) 1221 } 1222 launchNextRun = true 1223 pool.reorgDoneCh <- nextDone 1224 1225 case tx := <-pool.queueTxEventCh: 1226 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1227 // request one later if they want the events sent. 1228 addr, _ := types.Sender(pool.signer, tx) 1229 if _, ok := queuedEvents[addr]; !ok { 1230 queuedEvents[addr] = newSortedMap() 1231 } 1232 queuedEvents[addr].Put(tx) 1233 1234 case <-curDone: 1235 curDone = nil 1236 1237 case <-pool.reorgShutdownCh: 1238 // Wait for current run to finish. 1239 if curDone != nil { 1240 <-curDone 1241 } 1242 close(nextDone) 1243 return 1244 } 1245 } 1246 } 1247 1248 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1249 func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { 1250 defer func(t0 time.Time) { 1251 reorgDurationTimer.Update(time.Since(t0)) 1252 }(time.Now()) 1253 defer close(done) 1254 1255 var promoteAddrs []common.Address 1256 if dirtyAccounts != nil && reset == nil { 1257 // Only dirty accounts need to be promoted, unless we're resetting. 1258 // For resets, all addresses in the tx queue will be promoted and 1259 // the flatten operation can be avoided. 1260 promoteAddrs = dirtyAccounts.flatten() 1261 } 1262 pool.mu.Lock() 1263 if reset != nil { 1264 // Reset from the old head to the new, rescheduling any reorged transactions 1265 pool.reset(reset.oldHead, reset.newHead) 1266 1267 // Nonces were reset, discard any events that became stale 1268 for addr := range events { 1269 events[addr].Forward(pool.pendingNonces.get(addr)) 1270 if events[addr].Len() == 0 { 1271 delete(events, addr) 1272 } 1273 } 1274 // Reset needs promote for all addresses 1275 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1276 for addr := range pool.queue { 1277 promoteAddrs = append(promoteAddrs, addr) 1278 } 1279 } 1280 // Check for pending transactions for every account that sent new ones 1281 promoted := pool.promoteExecutables(promoteAddrs) 1282 1283 // If a new block appeared, validate the pool of pending transactions. This will 1284 // remove any transaction that has been included in the block or was invalidated 1285 // because of another transaction (e.g. higher gas price). 1286 if reset != nil { 1287 pool.demoteUnexecutables() 1288 if reset.newHead != nil { 1289 pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) 1290 pool.priced.SetBaseFee(pendingBaseFee) 1291 } 1292 // Update all accounts to the latest known pending nonce 1293 nonces := make(map[common.Address]uint64, len(pool.pending)) 1294 for addr, list := range pool.pending { 1295 highestPending := list.LastElement() 1296 nonces[addr] = highestPending.Nonce() + 1 1297 } 1298 pool.pendingNonces.setAll(nonces) 1299 } 1300 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1301 pool.truncatePending() 1302 pool.truncateQueue() 1303 1304 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1305 pool.changesSinceReorg = 0 // Reset change counter 1306 pool.mu.Unlock() 1307 1308 // Notify subsystems for newly added transactions 1309 for _, tx := range promoted { 1310 addr, _ := types.Sender(pool.signer, tx) 1311 if _, ok := events[addr]; !ok { 1312 events[addr] = newSortedMap() 1313 } 1314 events[addr].Put(tx) 1315 } 1316 if len(events) > 0 { 1317 var txs []*types.Transaction 1318 for _, set := range events { 1319 txs = append(txs, set.Flatten()...) 1320 } 1321 pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) 1322 } 1323 } 1324 1325 // reset retrieves the current state of the blockchain and ensures the content 1326 // of the transaction pool is valid with regard to the chain state. 1327 func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { 1328 // If we're reorging an old state, reinject all dropped transactions 1329 var reinject types.Transactions 1330 1331 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1332 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1333 oldNum := oldHead.Number.Uint64() 1334 newNum := newHead.Number.Uint64() 1335 1336 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1337 log.Debug("Skipping deep transaction reorg", "depth", depth) 1338 } else { 1339 // Reorg seems shallow enough to pull in all transactions into memory 1340 var ( 1341 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1342 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1343 ) 1344 if rem == nil { 1345 // This can happen if a setHead is performed, where we simply discard the old 1346 // head from the chain. 1347 // If that is the case, we don't have the lost transactions anymore, and 1348 // there's nothing to add 1349 if newNum >= oldNum { 1350 // If we reorged to a same or higher number, then it's not a case of setHead 1351 log.Warn("Transaction pool reset with missing old head", 1352 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1353 return 1354 } 1355 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1356 log.Debug("Skipping transaction reset caused by setHead", 1357 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1358 // We still need to update the current state s.th. the lost transactions can be readded by the user 1359 } else { 1360 if add == nil { 1361 // if the new head is nil, it means that something happened between 1362 // the firing of newhead-event and _now_: most likely a 1363 // reorg caused by sync-reversion or explicit sethead back to an 1364 // earlier block. 1365 log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) 1366 return 1367 } 1368 var discarded, included types.Transactions 1369 for rem.NumberU64() > add.NumberU64() { 1370 discarded = append(discarded, rem.Transactions()...) 1371 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1372 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1373 return 1374 } 1375 } 1376 for add.NumberU64() > rem.NumberU64() { 1377 included = append(included, add.Transactions()...) 1378 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1379 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1380 return 1381 } 1382 } 1383 for rem.Hash() != add.Hash() { 1384 discarded = append(discarded, rem.Transactions()...) 1385 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1386 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1387 return 1388 } 1389 included = append(included, add.Transactions()...) 1390 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1391 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1392 return 1393 } 1394 } 1395 lost := make([]*types.Transaction, 0, len(discarded)) 1396 for _, tx := range types.TxDifference(discarded, included) { 1397 if pool.Filter(tx) { 1398 lost = append(lost, tx) 1399 } 1400 } 1401 reinject = lost 1402 } 1403 } 1404 } 1405 // Initialize the internal state to the current head 1406 if newHead == nil { 1407 newHead = pool.chain.CurrentBlock() // Special case during testing 1408 } 1409 statedb, err := pool.chain.StateAt(newHead.Root) 1410 if err != nil { 1411 log.Error("Failed to reset txpool state", "err", err) 1412 return 1413 } 1414 pool.currentHead.Store(newHead) 1415 pool.currentState = statedb 1416 pool.pendingNonces = newNoncer(statedb) 1417 1418 // Inject any transactions discarded due to reorgs 1419 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1420 core.SenderCacher.Recover(pool.signer, reinject) 1421 pool.addTxsLocked(reinject, false) 1422 } 1423 1424 // promoteExecutables moves transactions that have become processable from the 1425 // future queue to the set of pending transactions. During this process, all 1426 // invalidated transactions (low nonce, low balance) are deleted. 1427 func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1428 // Track the promoted transactions to broadcast them at once 1429 var promoted []*types.Transaction 1430 1431 // Iterate over all accounts and promote any executable transactions 1432 gasLimit := pool.currentHead.Load().GasLimit 1433 for _, addr := range accounts { 1434 list := pool.queue[addr] 1435 if list == nil { 1436 continue // Just in case someone calls with a non existing account 1437 } 1438 // Drop all transactions that are deemed too old (low nonce) 1439 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1440 for _, tx := range forwards { 1441 hash := tx.Hash() 1442 pool.all.Remove(hash) 1443 } 1444 log.Trace("Removed old queued transactions", "count", len(forwards)) 1445 // Drop all transactions that are too costly (low balance or out of gas) 1446 drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) 1447 for _, tx := range drops { 1448 hash := tx.Hash() 1449 pool.all.Remove(hash) 1450 } 1451 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1452 queuedNofundsMeter.Mark(int64(len(drops))) 1453 1454 // Gather all executable transactions and promote them 1455 readies := list.Ready(pool.pendingNonces.get(addr)) 1456 for _, tx := range readies { 1457 hash := tx.Hash() 1458 if pool.promoteTx(addr, hash, tx) { 1459 promoted = append(promoted, tx) 1460 } 1461 } 1462 log.Trace("Promoted queued transactions", "count", len(promoted)) 1463 queuedGauge.Dec(int64(len(readies))) 1464 1465 // Drop all transactions over the allowed limit 1466 var caps types.Transactions 1467 if !pool.locals.contains(addr) { 1468 caps = list.Cap(int(pool.config.AccountQueue)) 1469 for _, tx := range caps { 1470 hash := tx.Hash() 1471 pool.all.Remove(hash) 1472 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1473 } 1474 queuedRateLimitMeter.Mark(int64(len(caps))) 1475 } 1476 // Mark all the items dropped as removed 1477 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1478 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1479 if pool.locals.contains(addr) { 1480 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1481 } 1482 // Delete the entire queue entry if it became empty. 1483 if list.Empty() { 1484 delete(pool.queue, addr) 1485 delete(pool.beats, addr) 1486 if _, ok := pool.pending[addr]; !ok { 1487 pool.reserve(addr, false) 1488 } 1489 } 1490 } 1491 return promoted 1492 } 1493 1494 // truncatePending removes transactions from the pending queue if the pool is above the 1495 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1496 // equal number for all for accounts with many pending transactions. 1497 func (pool *LegacyPool) truncatePending() { 1498 pending := uint64(0) 1499 for _, list := range pool.pending { 1500 pending += uint64(list.Len()) 1501 } 1502 if pending <= pool.config.GlobalSlots { 1503 return 1504 } 1505 1506 pendingBeforeCap := pending 1507 // Assemble a spam order to penalize large transactors first 1508 spammers := prque.New[int64, common.Address](nil) 1509 for addr, list := range pool.pending { 1510 // Only evict transactions from high rollers 1511 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1512 spammers.Push(addr, int64(list.Len())) 1513 } 1514 } 1515 // Gradually drop transactions from offenders 1516 offenders := []common.Address{} 1517 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1518 // Retrieve the next offender if not local address 1519 offender, _ := spammers.Pop() 1520 offenders = append(offenders, offender) 1521 1522 // Equalize balances until all the same or below threshold 1523 if len(offenders) > 1 { 1524 // Calculate the equalization threshold for all current offenders 1525 threshold := pool.pending[offender].Len() 1526 1527 // Iteratively reduce all offenders until below limit or threshold reached 1528 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1529 for i := 0; i < len(offenders)-1; i++ { 1530 list := pool.pending[offenders[i]] 1531 1532 caps := list.Cap(list.Len() - 1) 1533 for _, tx := range caps { 1534 // Drop the transaction from the global pools too 1535 hash := tx.Hash() 1536 pool.all.Remove(hash) 1537 1538 // Update the account nonce to the dropped transaction 1539 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1540 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1541 } 1542 pool.priced.Removed(len(caps)) 1543 pendingGauge.Dec(int64(len(caps))) 1544 if pool.locals.contains(offenders[i]) { 1545 localGauge.Dec(int64(len(caps))) 1546 } 1547 pending-- 1548 } 1549 } 1550 } 1551 } 1552 1553 // If still above threshold, reduce to limit or min allowance 1554 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1555 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1556 for _, addr := range offenders { 1557 list := pool.pending[addr] 1558 1559 caps := list.Cap(list.Len() - 1) 1560 for _, tx := range caps { 1561 // Drop the transaction from the global pools too 1562 hash := tx.Hash() 1563 pool.all.Remove(hash) 1564 1565 // Update the account nonce to the dropped transaction 1566 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1567 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1568 } 1569 pool.priced.Removed(len(caps)) 1570 pendingGauge.Dec(int64(len(caps))) 1571 if pool.locals.contains(addr) { 1572 localGauge.Dec(int64(len(caps))) 1573 } 1574 pending-- 1575 } 1576 } 1577 } 1578 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1579 } 1580 1581 // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. 1582 func (pool *LegacyPool) truncateQueue() { 1583 queued := uint64(0) 1584 for _, list := range pool.queue { 1585 queued += uint64(list.Len()) 1586 } 1587 if queued <= pool.config.GlobalQueue { 1588 return 1589 } 1590 1591 // Sort all accounts with queued transactions by heartbeat 1592 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1593 for addr := range pool.queue { 1594 if !pool.locals.contains(addr) { // don't drop locals 1595 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1596 } 1597 } 1598 sort.Sort(sort.Reverse(addresses)) 1599 1600 // Drop transactions until the total is below the limit or only locals remain 1601 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1602 addr := addresses[len(addresses)-1] 1603 list := pool.queue[addr.address] 1604 1605 addresses = addresses[:len(addresses)-1] 1606 1607 // Drop all transactions if they are less than the overflow 1608 if size := uint64(list.Len()); size <= drop { 1609 for _, tx := range list.Flatten() { 1610 pool.removeTx(tx.Hash(), true, true) 1611 } 1612 drop -= size 1613 queuedRateLimitMeter.Mark(int64(size)) 1614 continue 1615 } 1616 // Otherwise drop only last few transactions 1617 txs := list.Flatten() 1618 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1619 pool.removeTx(txs[i].Hash(), true, true) 1620 drop-- 1621 queuedRateLimitMeter.Mark(1) 1622 } 1623 } 1624 } 1625 1626 // demoteUnexecutables removes invalid and processed transactions from the pools 1627 // executable/pending queue and any subsequent transactions that become unexecutable 1628 // are moved back into the future queue. 1629 // 1630 // Note: transactions are not marked as removed in the priced list because re-heaping 1631 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1632 // to trigger a re-heap is this function 1633 func (pool *LegacyPool) demoteUnexecutables() { 1634 // Iterate over all accounts and demote any non-executable transactions 1635 gasLimit := pool.currentHead.Load().GasLimit 1636 for addr, list := range pool.pending { 1637 nonce := pool.currentState.GetNonce(addr) 1638 1639 // Drop all transactions that are deemed too old (low nonce) 1640 olds := list.Forward(nonce) 1641 for _, tx := range olds { 1642 hash := tx.Hash() 1643 pool.all.Remove(hash) 1644 log.Trace("Removed old pending transaction", "hash", hash) 1645 } 1646 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1647 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) 1648 for _, tx := range drops { 1649 hash := tx.Hash() 1650 log.Trace("Removed unpayable pending transaction", "hash", hash) 1651 pool.all.Remove(hash) 1652 } 1653 pendingNofundsMeter.Mark(int64(len(drops))) 1654 1655 for _, tx := range invalids { 1656 hash := tx.Hash() 1657 log.Trace("Demoting pending transaction", "hash", hash) 1658 1659 // Internal shuffle shouldn't touch the lookup set. 1660 pool.enqueueTx(hash, tx, false, false) 1661 } 1662 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1663 if pool.locals.contains(addr) { 1664 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1665 } 1666 // If there's a gap in front, alert (should never happen) and postpone all transactions 1667 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1668 gapped := list.Cap(0) 1669 for _, tx := range gapped { 1670 hash := tx.Hash() 1671 log.Error("Demoting invalidated transaction", "hash", hash) 1672 1673 // Internal shuffle shouldn't touch the lookup set. 1674 pool.enqueueTx(hash, tx, false, false) 1675 } 1676 pendingGauge.Dec(int64(len(gapped))) 1677 } 1678 // Delete the entire pending entry if it became empty. 1679 if list.Empty() { 1680 delete(pool.pending, addr) 1681 if _, ok := pool.queue[addr]; !ok { 1682 pool.reserve(addr, false) 1683 } 1684 } 1685 } 1686 } 1687 1688 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1689 type addressByHeartbeat struct { 1690 address common.Address 1691 heartbeat time.Time 1692 } 1693 1694 type addressesByHeartbeat []addressByHeartbeat 1695 1696 func (a addressesByHeartbeat) Len() int { return len(a) } 1697 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1698 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1699 1700 // accountSet is simply a set of addresses to check for existence, and a signer 1701 // capable of deriving addresses from transactions. 1702 type accountSet struct { 1703 accounts map[common.Address]struct{} 1704 signer types.Signer 1705 cache []common.Address 1706 } 1707 1708 // newAccountSet creates a new address set with an associated signer for sender 1709 // derivations. 1710 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1711 as := &accountSet{ 1712 accounts: make(map[common.Address]struct{}, len(addrs)), 1713 signer: signer, 1714 } 1715 for _, addr := range addrs { 1716 as.add(addr) 1717 } 1718 return as 1719 } 1720 1721 // contains checks if a given address is contained within the set. 1722 func (as *accountSet) contains(addr common.Address) bool { 1723 _, exist := as.accounts[addr] 1724 return exist 1725 } 1726 1727 // containsTx checks if the sender of a given tx is within the set. If the sender 1728 // cannot be derived, this method returns false. 1729 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1730 if addr, err := types.Sender(as.signer, tx); err == nil { 1731 return as.contains(addr) 1732 } 1733 return false 1734 } 1735 1736 // add inserts a new address into the set to track. 1737 func (as *accountSet) add(addr common.Address) { 1738 as.accounts[addr] = struct{}{} 1739 as.cache = nil 1740 } 1741 1742 // addTx adds the sender of tx into the set. 1743 func (as *accountSet) addTx(tx *types.Transaction) { 1744 if addr, err := types.Sender(as.signer, tx); err == nil { 1745 as.add(addr) 1746 } 1747 } 1748 1749 // flatten returns the list of addresses within this set, also caching it for later 1750 // reuse. The returned slice should not be changed! 1751 func (as *accountSet) flatten() []common.Address { 1752 if as.cache == nil { 1753 as.cache = maps.Keys(as.accounts) 1754 } 1755 return as.cache 1756 } 1757 1758 // merge adds all addresses from the 'other' set into 'as'. 1759 func (as *accountSet) merge(other *accountSet) { 1760 maps.Copy(as.accounts, other.accounts) 1761 as.cache = nil 1762 } 1763 1764 // lookup is used internally by LegacyPool to track transactions while allowing 1765 // lookup without mutex contention. 1766 // 1767 // Note, although this type is properly protected against concurrent access, it 1768 // is **not** a type that should ever be mutated or even exposed outside of the 1769 // transaction pool, since its internal state is tightly coupled with the pools 1770 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1771 // peeking into the pool in LegacyPool.Get without having to acquire the widely scoped 1772 // LegacyPool.mu mutex. 1773 // 1774 // This lookup set combines the notion of "local transactions", which is useful 1775 // to build upper-level structure. 1776 type lookup struct { 1777 slots int 1778 lock sync.RWMutex 1779 locals map[common.Hash]*types.Transaction 1780 remotes map[common.Hash]*types.Transaction 1781 } 1782 1783 // newLookup returns a new lookup structure. 1784 func newLookup() *lookup { 1785 return &lookup{ 1786 locals: make(map[common.Hash]*types.Transaction), 1787 remotes: make(map[common.Hash]*types.Transaction), 1788 } 1789 } 1790 1791 // Range calls f on each key and value present in the map. The callback passed 1792 // should return the indicator whether the iteration needs to be continued. 1793 // Callers need to specify which set (or both) to be iterated. 1794 func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1795 t.lock.RLock() 1796 defer t.lock.RUnlock() 1797 1798 if local { 1799 for key, value := range t.locals { 1800 if !f(key, value, true) { 1801 return 1802 } 1803 } 1804 } 1805 if remote { 1806 for key, value := range t.remotes { 1807 if !f(key, value, false) { 1808 return 1809 } 1810 } 1811 } 1812 } 1813 1814 // Get returns a transaction if it exists in the lookup, or nil if not found. 1815 func (t *lookup) Get(hash common.Hash) *types.Transaction { 1816 t.lock.RLock() 1817 defer t.lock.RUnlock() 1818 1819 if tx := t.locals[hash]; tx != nil { 1820 return tx 1821 } 1822 return t.remotes[hash] 1823 } 1824 1825 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1826 func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { 1827 t.lock.RLock() 1828 defer t.lock.RUnlock() 1829 1830 return t.locals[hash] 1831 } 1832 1833 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1834 func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { 1835 t.lock.RLock() 1836 defer t.lock.RUnlock() 1837 1838 return t.remotes[hash] 1839 } 1840 1841 // Count returns the current number of transactions in the lookup. 1842 func (t *lookup) Count() int { 1843 t.lock.RLock() 1844 defer t.lock.RUnlock() 1845 1846 return len(t.locals) + len(t.remotes) 1847 } 1848 1849 // LocalCount returns the current number of local transactions in the lookup. 1850 func (t *lookup) LocalCount() int { 1851 t.lock.RLock() 1852 defer t.lock.RUnlock() 1853 1854 return len(t.locals) 1855 } 1856 1857 // RemoteCount returns the current number of remote transactions in the lookup. 1858 func (t *lookup) RemoteCount() int { 1859 t.lock.RLock() 1860 defer t.lock.RUnlock() 1861 1862 return len(t.remotes) 1863 } 1864 1865 // Slots returns the current number of slots used in the lookup. 1866 func (t *lookup) Slots() int { 1867 t.lock.RLock() 1868 defer t.lock.RUnlock() 1869 1870 return t.slots 1871 } 1872 1873 // Add adds a transaction to the lookup. 1874 func (t *lookup) Add(tx *types.Transaction, local bool) { 1875 t.lock.Lock() 1876 defer t.lock.Unlock() 1877 1878 t.slots += numSlots(tx) 1879 slotsGauge.Update(int64(t.slots)) 1880 1881 if local { 1882 t.locals[tx.Hash()] = tx 1883 } else { 1884 t.remotes[tx.Hash()] = tx 1885 } 1886 } 1887 1888 // Remove removes a transaction from the lookup. 1889 func (t *lookup) Remove(hash common.Hash) { 1890 t.lock.Lock() 1891 defer t.lock.Unlock() 1892 1893 tx, ok := t.locals[hash] 1894 if !ok { 1895 tx, ok = t.remotes[hash] 1896 } 1897 if !ok { 1898 log.Error("No transaction found to be deleted", "hash", hash) 1899 return 1900 } 1901 t.slots -= numSlots(tx) 1902 slotsGauge.Update(int64(t.slots)) 1903 1904 delete(t.locals, hash) 1905 delete(t.remotes, hash) 1906 } 1907 1908 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1909 // set. The assumption is held the locals set is thread-safe to be used. 1910 func (t *lookup) RemoteToLocals(locals *accountSet) int { 1911 t.lock.Lock() 1912 defer t.lock.Unlock() 1913 1914 var migrated int 1915 for hash, tx := range t.remotes { 1916 if locals.containsTx(tx) { 1917 t.locals[hash] = tx 1918 delete(t.remotes, hash) 1919 migrated += 1 1920 } 1921 } 1922 return migrated 1923 } 1924 1925 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1926 func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1927 found := make(types.Transactions, 0, 128) 1928 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1929 if tx.GasTipCapIntCmp(threshold) < 0 { 1930 found = append(found, tx) 1931 } 1932 return true 1933 }, false, true) // Only iterate remotes 1934 return found 1935 } 1936 1937 // numSlots calculates the number of slots needed for a single transaction. 1938 func numSlots(tx *types.Transaction) int { 1939 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1940 }