github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "fmt" 22 "math" 23 "math/big" 24 "sort" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/scroll-tech/go-ethereum/common" 30 "github.com/scroll-tech/go-ethereum/common/prque" 31 "github.com/scroll-tech/go-ethereum/consensus/misc" 32 "github.com/scroll-tech/go-ethereum/core/state" 33 "github.com/scroll-tech/go-ethereum/core/types" 34 "github.com/scroll-tech/go-ethereum/event" 35 "github.com/scroll-tech/go-ethereum/log" 36 "github.com/scroll-tech/go-ethereum/metrics" 37 "github.com/scroll-tech/go-ethereum/params" 38 "github.com/scroll-tech/go-ethereum/rollup/fees" 39 ) 40 41 const ( 42 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 43 chainHeadChanSize = 10 44 45 // txSlotSize is used to calculate how many data slots a single transaction 46 // takes up based on its size. The slots are used as DoS protection, ensuring 47 // that validating a new transaction remains a constant operation (in reality 48 // O(maxslots), where max slots are 4 currently). 49 txSlotSize = 32 * 1024 50 51 // txMaxSize is the maximum size a single transaction can have. This field has 52 // non-trivial consequences: larger transactions are significantly harder and 53 // more expensive to propagate; larger transactions also take more resources 54 // to validate whether they fit into the pool or not. 55 txMaxSize = 4 * txSlotSize // 128KB 56 ) 57 58 var ( 59 // ErrAlreadyKnown is returned if the transactions is already contained 60 // within the pool. 61 ErrAlreadyKnown = errors.New("already known") 62 63 // ErrInvalidSender is returned if the transaction contains an invalid signature. 64 ErrInvalidSender = errors.New("invalid sender") 65 66 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 67 // configured for the transaction pool. 68 ErrUnderpriced = errors.New("transaction underpriced") 69 70 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 71 // another remote transaction. 72 ErrTxPoolOverflow = errors.New("txpool is full") 73 74 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 75 // with a different one without the required price bump. 76 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 77 78 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 79 // maximum allowance of the current block. 80 ErrGasLimit = errors.New("exceeds block gas limit") 81 82 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 83 // transaction with a negative value. 84 ErrNegativeValue = errors.New("negative value") 85 86 // ErrOversizedData is returned if the input data of a transaction is greater 87 // than some meaningful limit a user might use. This is not a consensus error 88 // making the transaction invalid, rather a DOS protection. 89 ErrOversizedData = errors.New("oversized data") 90 ) 91 92 var ( 93 evictionInterval = time.Minute // Time interval to check for evictable transactions 94 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 95 ) 96 97 var ( 98 // Metrics for the pending pool 99 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 100 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 101 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 102 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 103 104 // Metrics for the queued pool 105 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 106 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 107 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 108 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 109 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 110 111 // General tx metrics 112 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 113 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 114 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 115 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 116 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 117 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 118 // txpool reorgs. 119 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 120 // reorgDurationTimer measures how long time a txpool reorg takes. 121 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 122 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 123 // that this number is pretty low, since txpool reorgs happen very frequently. 124 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 125 126 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 127 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 128 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 129 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 130 131 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 132 ) 133 134 var ( 135 addrsPool = sync.Pool{ 136 New: func() interface{} { 137 return make([]common.Address, 0, 8) 138 }, 139 } 140 addrBeatPool = sync.Pool{ 141 New: func() interface{} { 142 return make(addressesByHeartbeat, 0, 8) 143 }, 144 } 145 ) 146 147 // TxStatus is the current status of a transaction as seen by the pool. 148 type TxStatus uint 149 150 const ( 151 TxStatusUnknown TxStatus = iota 152 TxStatusQueued 153 TxStatusPending 154 TxStatusIncluded 155 ) 156 157 // blockChain provides the state of blockchain and current gas limit to do 158 // some pre checks in tx pool and event subscribers. 159 type blockChain interface { 160 CurrentBlock() *types.Block 161 GetBlock(hash common.Hash, number uint64) *types.Block 162 StateAt(root common.Hash) (*state.StateDB, error) 163 164 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 165 } 166 167 // TxPoolConfig are the configuration parameters of the transaction pool. 168 type TxPoolConfig struct { 169 Locals []common.Address // Addresses that should be treated by default as local 170 NoLocals bool // Whether local transaction handling should be disabled 171 Journal string // Journal of local transactions to survive node restarts 172 Rejournal time.Duration // Time interval to regenerate the local transaction journal 173 174 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 175 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 176 177 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 178 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 179 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 180 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 181 182 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 183 } 184 185 // DefaultTxPoolConfig contains the default configurations for the transaction 186 // pool. 187 var DefaultTxPoolConfig = TxPoolConfig{ 188 Journal: "transactions.rlp", 189 Rejournal: time.Hour, 190 191 PriceLimit: 1, 192 PriceBump: 10, 193 194 AccountSlots: 16, 195 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 196 AccountQueue: 64, 197 GlobalQueue: 1024, 198 199 Lifetime: 3 * time.Hour, 200 } 201 202 // sanitize checks the provided user configurations and changes anything that's 203 // unreasonable or unworkable. 204 func (config *TxPoolConfig) sanitize() TxPoolConfig { 205 conf := *config 206 if conf.Rejournal < time.Second { 207 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 208 conf.Rejournal = time.Second 209 } 210 if conf.PriceLimit < 1 { 211 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 212 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 213 } 214 if conf.PriceBump < 1 { 215 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 216 conf.PriceBump = DefaultTxPoolConfig.PriceBump 217 } 218 if conf.AccountSlots < 1 { 219 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 220 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 221 } 222 if conf.GlobalSlots < 1 { 223 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 224 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 225 } 226 if conf.AccountQueue < 1 { 227 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 228 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 229 } 230 if conf.GlobalQueue < 1 { 231 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 232 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 233 } 234 if conf.Lifetime < 1 { 235 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 236 conf.Lifetime = DefaultTxPoolConfig.Lifetime 237 } 238 return conf 239 } 240 241 // TxPool contains all currently known transactions. Transactions 242 // enter the pool when they are received from the network or submitted 243 // locally. They exit the pool when they are included in the blockchain. 244 // 245 // The pool separates processable transactions (which can be applied to the 246 // current state) and future transactions. Transactions move between those 247 // two states over time as they are received and processed. 248 type TxPool struct { 249 config TxPoolConfig 250 chainconfig *params.ChainConfig 251 chain blockChain 252 gasPrice *big.Int 253 txFeed event.Feed 254 scope event.SubscriptionScope 255 signer types.Signer 256 mu sync.RWMutex 257 258 istanbul bool // Fork indicator whether we are in the istanbul stage. 259 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 260 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 261 shanghai bool // Fork indicator whether we are in the Shanghai stage. 262 263 currentState *state.StateDB // Current state in the blockchain head 264 pendingNonces *txNoncer // Pending state tracking virtual nonces 265 currentMaxGas uint64 // Current gas limit for transaction caps 266 267 locals *accountSet // Set of local transaction to exempt from eviction rules 268 journal *txJournal // Journal of local transaction to back up to disk 269 270 pending map[common.Address]*txList // All currently processable transactions 271 queue map[common.Address]*txList // Queued but non-processable transactions 272 beats map[common.Address]time.Time // Last heartbeat from each known account 273 all *txLookup // All transactions to allow lookups 274 priced *txPricedList // All transactions sorted by price 275 276 chainHeadCh chan ChainHeadEvent 277 chainHeadSub event.Subscription 278 reqResetCh chan *txpoolResetRequest 279 reqPromoteCh chan *accountSet 280 queueTxEventCh chan *types.Transaction 281 reorgDoneCh chan chan struct{} 282 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 283 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 284 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 285 286 spammers *prque.Prque 287 288 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 289 } 290 291 type txpoolResetRequest struct { 292 oldHead, newHead *types.Header 293 } 294 295 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 296 // transactions from the network. 297 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 298 // Sanitize the input to ensure no vulnerable gas prices are set 299 config = (&config).sanitize() 300 301 // Create the transaction pool with its initial settings 302 pool := &TxPool{ 303 config: config, 304 chainconfig: chainconfig, 305 chain: chain, 306 signer: types.LatestSigner(chainconfig), 307 pending: make(map[common.Address]*txList), 308 queue: make(map[common.Address]*txList), 309 beats: make(map[common.Address]time.Time), 310 all: newTxLookup(), 311 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 312 reqResetCh: make(chan *txpoolResetRequest), 313 reqPromoteCh: make(chan *accountSet), 314 queueTxEventCh: make(chan *types.Transaction), 315 reorgDoneCh: make(chan chan struct{}), 316 reorgShutdownCh: make(chan struct{}), 317 initDoneCh: make(chan struct{}), 318 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 319 spammers: prque.New(nil), 320 } 321 pool.locals = newAccountSet(pool.signer) 322 for _, addr := range config.Locals { 323 log.Info("Setting new local account", "address", addr) 324 pool.locals.add(addr) 325 } 326 pool.priced = newTxPricedList(pool.all) 327 pool.reset(nil, chain.CurrentBlock().Header()) 328 329 // Start the reorg loop early so it can handle requests generated during journal loading. 330 pool.wg.Add(1) 331 go pool.scheduleReorgLoop() 332 333 // If local transactions and journaling is enabled, load from disk 334 if !config.NoLocals && config.Journal != "" { 335 pool.journal = newTxJournal(config.Journal) 336 337 if err := pool.journal.load(pool.AddLocals); err != nil { 338 log.Warn("Failed to load transaction journal", "err", err) 339 } 340 if err := pool.journal.rotate(pool.local()); err != nil { 341 log.Warn("Failed to rotate transaction journal", "err", err) 342 } 343 } 344 345 // Subscribe events from blockchain and start the main event loop. 346 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 347 pool.wg.Add(1) 348 go pool.loop() 349 350 return pool 351 } 352 353 // loop is the transaction pool's main event loop, waiting for and reacting to 354 // outside blockchain events as well as for various reporting and transaction 355 // eviction events. 356 func (pool *TxPool) loop() { 357 defer pool.wg.Done() 358 359 var ( 360 prevPending, prevQueued, prevStales int 361 // Start the stats reporting and transaction eviction tickers 362 report = time.NewTicker(statsReportInterval) 363 evict = time.NewTicker(evictionInterval) 364 journal = time.NewTicker(pool.config.Rejournal) 365 // Track the previous head headers for transaction reorgs 366 head = pool.chain.CurrentBlock() 367 ) 368 defer report.Stop() 369 defer evict.Stop() 370 defer journal.Stop() 371 372 // Notify tests that the init phase is done 373 close(pool.initDoneCh) 374 for { 375 select { 376 // Handle ChainHeadEvent 377 case ev := <-pool.chainHeadCh: 378 if ev.Block != nil { 379 pool.requestReset(head.Header(), ev.Block.Header()) 380 head = ev.Block 381 } 382 383 // System shutdown. 384 case <-pool.chainHeadSub.Err(): 385 close(pool.reorgShutdownCh) 386 return 387 388 // Handle stats reporting ticks 389 case <-report.C: 390 pool.mu.RLock() 391 pending, queued := pool.stats() 392 pool.mu.RUnlock() 393 stales := int(atomic.LoadInt64(&pool.priced.stales)) 394 395 if pending != prevPending || queued != prevQueued || stales != prevStales { 396 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 397 prevPending, prevQueued, prevStales = pending, queued, stales 398 } 399 400 // Handle inactive account transaction eviction 401 case <-evict.C: 402 pool.mu.Lock() 403 for addr := range pool.queue { 404 // Skip local transactions from the eviction mechanism 405 if pool.locals.contains(addr) { 406 continue 407 } 408 // Any non-locals old enough should be removed 409 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 410 list := pool.queue[addr].Flatten() 411 for _, tx := range list { 412 pool.removeTx(tx.Hash(), true) 413 } 414 queuedEvictionMeter.Mark(int64(len(list))) 415 } 416 } 417 pool.mu.Unlock() 418 419 // Handle local transaction journal rotation 420 case <-journal.C: 421 if pool.journal != nil { 422 pool.mu.Lock() 423 if err := pool.journal.rotate(pool.local()); err != nil { 424 log.Warn("Failed to rotate local tx journal", "err", err) 425 } 426 pool.mu.Unlock() 427 } 428 } 429 } 430 } 431 432 // Stop terminates the transaction pool. 433 func (pool *TxPool) Stop() { 434 // Unsubscribe all subscriptions registered from txpool 435 pool.scope.Close() 436 437 // Unsubscribe subscriptions registered from blockchain 438 pool.chainHeadSub.Unsubscribe() 439 pool.wg.Wait() 440 441 if pool.journal != nil { 442 pool.journal.close() 443 } 444 log.Info("Transaction pool stopped") 445 } 446 447 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 448 // starts sending event to the given channel. 449 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 450 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 451 } 452 453 // GasPrice returns the current gas price enforced by the transaction pool. 454 func (pool *TxPool) GasPrice() *big.Int { 455 pool.mu.RLock() 456 defer pool.mu.RUnlock() 457 458 return new(big.Int).Set(pool.gasPrice) 459 } 460 461 // SetGasPrice updates the minimum price required by the transaction pool for a 462 // new transaction, and drops all transactions below this threshold. 463 func (pool *TxPool) SetGasPrice(price *big.Int) { 464 pool.mu.Lock() 465 defer pool.mu.Unlock() 466 467 old := pool.gasPrice 468 pool.gasPrice = price 469 // if the min miner fee increased, remove transactions below the new threshold 470 if price.Cmp(old) > 0 { 471 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 472 drop := pool.all.RemotesBelowTip(price) 473 for _, tx := range drop { 474 pool.removeTx(tx.Hash(), false) 475 } 476 pool.priced.Removed(len(drop)) 477 } 478 479 log.Info("Transaction pool price threshold updated", "price", price) 480 } 481 482 // Nonce returns the next nonce of an account, with all transactions executable 483 // by the pool already applied on top. 484 func (pool *TxPool) Nonce(addr common.Address) uint64 { 485 pool.mu.RLock() 486 defer pool.mu.RUnlock() 487 488 return pool.pendingNonces.get(addr) 489 } 490 491 // Stats retrieves the current pool stats, namely the number of pending and the 492 // number of queued (non-executable) transactions. 493 func (pool *TxPool) Stats() (int, int) { 494 pool.mu.RLock() 495 defer pool.mu.RUnlock() 496 497 return pool.stats() 498 } 499 500 // stats retrieves the current pool stats, namely the number of pending and the 501 // number of queued (non-executable) transactions. 502 func (pool *TxPool) stats() (int, int) { 503 pending := 0 504 for _, list := range pool.pending { 505 pending += list.Len() 506 } 507 queued := 0 508 for _, list := range pool.queue { 509 queued += list.Len() 510 } 511 return pending, queued 512 } 513 514 // Content retrieves the data content of the transaction pool, returning all the 515 // pending as well as queued transactions, grouped by account and sorted by nonce. 516 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 517 pool.mu.Lock() 518 defer pool.mu.Unlock() 519 520 pending := make(map[common.Address]types.Transactions) 521 for addr, list := range pool.pending { 522 pending[addr] = list.Flatten() 523 } 524 queued := make(map[common.Address]types.Transactions) 525 for addr, list := range pool.queue { 526 queued[addr] = list.Flatten() 527 } 528 return pending, queued 529 } 530 531 // ContentFrom retrieves the data content of the transaction pool, returning the 532 // pending as well as queued transactions of this address, grouped by nonce. 533 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 534 pool.mu.RLock() 535 defer pool.mu.RUnlock() 536 537 var pending types.Transactions 538 if list, ok := pool.pending[addr]; ok { 539 pending = list.Flatten() 540 } 541 var queued types.Transactions 542 if list, ok := pool.queue[addr]; ok { 543 queued = list.Flatten() 544 } 545 return pending, queued 546 } 547 548 // Pending retrieves all currently processable transactions, grouped by origin 549 // account and sorted by nonce. The returned transaction set is a copy and can be 550 // freely modified by calling code. 551 // 552 // The enforceTips parameter can be used to do an extra filtering on the pending 553 // transactions and only return those whose **effective** tip is large enough in 554 // the next pending execution environment. 555 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 556 pool.mu.Lock() 557 defer pool.mu.Unlock() 558 559 pending := make(map[common.Address]types.Transactions) 560 for addr, list := range pool.pending { 561 txs := list.Flatten() 562 563 // If the miner requests tip enforcement, cap the lists now 564 if enforceTips && !pool.locals.contains(addr) { 565 for i, tx := range txs { 566 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 567 txs = txs[:i] 568 break 569 } 570 } 571 } 572 if len(txs) > 0 { 573 pending[addr] = txs 574 } 575 } 576 return pending 577 } 578 579 // Locals retrieves the accounts currently considered local by the pool. 580 func (pool *TxPool) Locals() []common.Address { 581 pool.mu.Lock() 582 defer pool.mu.Unlock() 583 584 return pool.locals.flatten() 585 } 586 587 // local retrieves all currently known local transactions, grouped by origin 588 // account and sorted by nonce. The returned transaction set is a copy and can be 589 // freely modified by calling code. 590 func (pool *TxPool) local() map[common.Address]types.Transactions { 591 txs := make(map[common.Address]types.Transactions) 592 for addr := range pool.locals.accounts { 593 if pending := pool.pending[addr]; pending != nil { 594 txs[addr] = append(txs[addr], pending.Flatten()...) 595 } 596 if queued := pool.queue[addr]; queued != nil { 597 txs[addr] = append(txs[addr], queued.Flatten()...) 598 } 599 } 600 return txs 601 } 602 603 // validateTx checks whether a transaction is valid according to the consensus 604 // rules and adheres to some heuristic limits of the local node (price and size). 605 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 606 // No unauthenticated deposits allowed in the transaction pool. 607 if tx.IsL1MessageTx() { 608 return ErrTxTypeNotSupported 609 } 610 611 // Accept only legacy transactions until EIP-2718/2930 activates. 612 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 613 return ErrTxTypeNotSupported 614 } 615 // Reject dynamic fee transactions until EIP-1559 activates. 616 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 617 return ErrTxTypeNotSupported 618 } 619 // Reject transactions over defined size to prevent DOS attacks 620 if uint64(tx.Size()) > txMaxSize { 621 return ErrOversizedData 622 } 623 // Check whether the init code size has been exceeded. 624 if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { 625 return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) 626 } 627 // Transactions can't be negative. This may never happen using RLP decoded 628 // transactions but may occur if you create a transaction using the RPC. 629 if tx.Value().Sign() < 0 { 630 return ErrNegativeValue 631 } 632 // Ensure the transaction doesn't exceed the current block limit gas. 633 if pool.currentMaxGas < tx.Gas() { 634 return ErrGasLimit 635 } 636 // Sanity check for extremely large numbers 637 if tx.GasFeeCap().BitLen() > 256 { 638 return ErrFeeCapVeryHigh 639 } 640 if tx.GasTipCap().BitLen() > 256 { 641 return ErrTipVeryHigh 642 } 643 // Ensure gasFeeCap is greater than or equal to gasTipCap. 644 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 645 return ErrTipAboveFeeCap 646 } 647 // Make sure the transaction is signed properly. 648 from, err := types.Sender(pool.signer, tx) 649 if err != nil { 650 return ErrInvalidSender 651 } 652 // Drop non-local transactions under our own minimal accepted gas price or tip. 653 pendingBaseFee := pool.priced.urgent.baseFee 654 if !local && tx.EffectiveGasTipIntCmp(pool.gasPrice, pendingBaseFee) < 0 { 655 return ErrUnderpriced 656 } 657 // Ensure the transaction adheres to nonce ordering 658 if pool.currentState.GetNonce(from) > tx.Nonce() { 659 return ErrNonceTooLow 660 } 661 // Transactor should have enough funds to cover the costs 662 // cost == V + GP * GL 663 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 664 return ErrInsufficientFunds 665 } 666 // Ensure the transaction has more gas than the basic tx fee. 667 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai) 668 if err != nil { 669 return err 670 } 671 if tx.Gas() < intrGas { 672 return ErrIntrinsicGas 673 } 674 return nil 675 } 676 677 // add validates a transaction and inserts it into the non-executable queue for later 678 // pending promotion and execution. If the transaction is a replacement for an already 679 // pending or queued one, it overwrites the previous transaction if its price is higher. 680 // 681 // If a newly added transaction is marked as local, its sending account will be 682 // be added to the allowlist, preventing any associated transaction from being dropped 683 // out of the pool due to pricing constraints. 684 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 685 // If the transaction is already known, discard it 686 hash := tx.Hash() 687 if pool.all.Get(hash) != nil { 688 log.Trace("Discarding already known transaction", "hash", hash) 689 knownTxMeter.Mark(1) 690 return false, ErrAlreadyKnown 691 } 692 // Make the local flag. If it's from local source or it's from the network but 693 // the sender is marked as local previously, treat it as the local transaction. 694 isLocal := local || pool.locals.containsTx(tx) 695 696 if pool.chainconfig.Scroll.FeeVaultEnabled() { 697 if err := fees.VerifyFee(pool.signer, tx, pool.currentState); err != nil { 698 log.Trace("Discarding insufficient l1DataFee transaction", "hash", hash, "err", err) 699 invalidTxMeter.Mark(1) 700 return false, err 701 } 702 } 703 704 // If the transaction fails basic validation, discard it 705 if err := pool.validateTx(tx, isLocal); err != nil { 706 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 707 invalidTxMeter.Mark(1) 708 return false, err 709 } 710 // If the transaction pool is full, discard underpriced transactions 711 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 712 // If the new transaction is underpriced, don't accept it 713 if !isLocal && pool.priced.Underpriced(tx) { 714 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 715 underpricedTxMeter.Mark(1) 716 return false, ErrUnderpriced 717 } 718 // We're about to replace a transaction. The reorg does a more thorough 719 // analysis of what to remove and how, but it runs async. We don't want to 720 // do too many replacements between reorg-runs, so we cap the number of 721 // replacements to 25% of the slots 722 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 723 throttleTxMeter.Mark(1) 724 return false, ErrTxPoolOverflow 725 } 726 727 // New transaction is better than our worse ones, make room for it. 728 // If it's a local transaction, forcibly discard all available transactions. 729 // Otherwise if we can't make enough room for new one, abort the operation. 730 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 731 732 // Special case, we still can't make the room for the new remote one. 733 if !isLocal && !success { 734 log.Trace("Discarding overflown transaction", "hash", hash) 735 overflowedTxMeter.Mark(1) 736 return false, ErrTxPoolOverflow 737 } 738 // Bump the counter of rejections-since-reorg 739 pool.changesSinceReorg += len(drop) 740 // Kick out the underpriced remote transactions. 741 for _, tx := range drop { 742 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 743 underpricedTxMeter.Mark(1) 744 pool.removeTx(tx.Hash(), false) 745 } 746 } 747 // Try to replace an existing transaction in the pending pool 748 from, _ := types.Sender(pool.signer, tx) // already validated 749 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 750 // Nonce already pending, check if required price bump is met 751 inserted, old := list.Add(tx, pool.config.PriceBump) 752 if !inserted { 753 pendingDiscardMeter.Mark(1) 754 return false, ErrReplaceUnderpriced 755 } 756 // New transaction is better, replace old one 757 if old != nil { 758 pool.all.Remove(old.Hash()) 759 pool.priced.Removed(1) 760 pendingReplaceMeter.Mark(1) 761 } 762 pool.all.Add(tx, isLocal) 763 pool.priced.Put(tx, isLocal) 764 pool.journalTx(from, tx) 765 pool.queueTxEvent(tx) 766 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 767 768 // Successful promotion, bump the heartbeat 769 pool.beats[from] = time.Now() 770 return old != nil, nil 771 } 772 // New transaction isn't replacing a pending one, push into queue 773 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 774 if err != nil { 775 return false, err 776 } 777 // Mark local addresses and journal local transactions 778 if local && !pool.locals.contains(from) { 779 log.Info("Setting new local account", "address", from) 780 pool.locals.add(from) 781 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 782 } 783 if isLocal { 784 localGauge.Inc(1) 785 } 786 pool.journalTx(from, tx) 787 788 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 789 return replaced, nil 790 } 791 792 // enqueueTx inserts a new transaction into the non-executable transaction queue. 793 // 794 // Note, this method assumes the pool lock is held! 795 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 796 // Try to insert the transaction into the future queue 797 from, _ := types.Sender(pool.signer, tx) // already validated 798 if pool.queue[from] == nil { 799 pool.queue[from] = newTxList(false) 800 } 801 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 802 if !inserted { 803 // An older transaction was better, discard this 804 queuedDiscardMeter.Mark(1) 805 return false, ErrReplaceUnderpriced 806 } 807 // Discard any previous transaction and mark this 808 if old != nil { 809 pool.all.Remove(old.Hash()) 810 pool.priced.Removed(1) 811 queuedReplaceMeter.Mark(1) 812 } else { 813 // Nothing was replaced, bump the queued counter 814 queuedGauge.Inc(1) 815 } 816 // If the transaction isn't in lookup set but it's expected to be there, 817 // show the error log. 818 if pool.all.Get(hash) == nil && !addAll { 819 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 820 } 821 if addAll { 822 pool.all.Add(tx, local) 823 pool.priced.Put(tx, local) 824 } 825 // If we never record the heartbeat, do it right now. 826 if _, exist := pool.beats[from]; !exist { 827 pool.beats[from] = time.Now() 828 } 829 return old != nil, nil 830 } 831 832 // journalTx adds the specified transaction to the local disk journal if it is 833 // deemed to have been sent from a local account. 834 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 835 // Only journal if it's enabled and the transaction is local 836 if pool.journal == nil || !pool.locals.contains(from) { 837 return 838 } 839 if err := pool.journal.insert(tx); err != nil { 840 log.Warn("Failed to journal local transaction", "err", err) 841 } 842 } 843 844 // promoteTx adds a transaction to the pending (processable) list of transactions 845 // and returns whether it was inserted or an older was better. 846 // 847 // Note, this method assumes the pool lock is held! 848 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 849 // Try to insert the transaction into the pending queue 850 if pool.pending[addr] == nil { 851 pool.pending[addr] = newTxList(true) 852 } 853 list := pool.pending[addr] 854 855 inserted, old := list.Add(tx, pool.config.PriceBump) 856 if !inserted { 857 // An older transaction was better, discard this 858 pool.all.Remove(hash) 859 pool.priced.Removed(1) 860 pendingDiscardMeter.Mark(1) 861 return false 862 } 863 // Otherwise discard any previous transaction and mark this 864 if old != nil { 865 pool.all.Remove(old.Hash()) 866 pool.priced.Removed(1) 867 pendingReplaceMeter.Mark(1) 868 } else { 869 // Nothing was replaced, bump the pending counter 870 pendingGauge.Inc(1) 871 } 872 // Set the potentially new pending nonce and notify any subsystems of the new tx 873 pool.pendingNonces.set(addr, tx.Nonce()+1) 874 875 // Successful promotion, bump the heartbeat 876 pool.beats[addr] = time.Now() 877 return true 878 } 879 880 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 881 // senders as a local ones, ensuring they go around the local pricing constraints. 882 // 883 // This method is used to add transactions from the RPC API and performs synchronous pool 884 // reorganization and event propagation. 885 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 886 return pool.addTxs(txs, !pool.config.NoLocals, true) 887 } 888 889 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 890 // a convenience wrapper aroundd AddLocals. 891 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 892 errs := pool.AddLocals([]*types.Transaction{tx}) 893 return errs[0] 894 } 895 896 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 897 // senders are not among the locally tracked ones, full pricing constraints will apply. 898 // 899 // This method is used to add transactions from the p2p network and does not wait for pool 900 // reorganization and internal event propagation. 901 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 902 return pool.addTxs(txs, false, false) 903 } 904 905 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 906 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 907 return pool.addTxs(txs, false, true) 908 } 909 910 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 911 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 912 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 913 return errs[0] 914 } 915 916 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 917 // wrapper around AddRemotes. 918 // 919 // Deprecated: use AddRemotes 920 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 921 errs := pool.AddRemotes([]*types.Transaction{tx}) 922 return errs[0] 923 } 924 925 // addTxs attempts to queue a batch of transactions if they are valid. 926 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 927 // Filter out known ones without obtaining the pool lock or recovering signatures 928 var ( 929 errs = make([]error, len(txs)) 930 news = make([]*types.Transaction, 0, len(txs)) 931 ) 932 for i, tx := range txs { 933 // If the transaction is known, pre-set the error slot 934 if pool.all.Get(tx.Hash()) != nil { 935 errs[i] = ErrAlreadyKnown 936 knownTxMeter.Mark(1) 937 continue 938 } 939 // Exclude transactions with invalid signatures as soon as 940 // possible and cache senders in transactions before 941 // obtaining lock 942 _, err := types.Sender(pool.signer, tx) 943 if err != nil { 944 errs[i] = ErrInvalidSender 945 invalidTxMeter.Mark(1) 946 continue 947 } 948 // Accumulate all unknown transactions for deeper processing 949 news = append(news, tx) 950 } 951 if len(news) == 0 { 952 return errs 953 } 954 955 // Process all the new transaction and merge any errors into the original slice 956 pool.mu.Lock() 957 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 958 pool.mu.Unlock() 959 960 var nilSlot = 0 961 for _, err := range newErrs { 962 for errs[nilSlot] != nil { 963 nilSlot++ 964 } 965 errs[nilSlot] = err 966 nilSlot++ 967 } 968 // Reorg the pool internals if needed and return 969 done := pool.requestPromoteExecutables(dirtyAddrs) 970 if sync { 971 <-done 972 } 973 return errs 974 } 975 976 // addTxsLocked attempts to queue a batch of transactions if they are valid. 977 // The transaction pool lock must be held. 978 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 979 dirty := newAccountSet(pool.signer) 980 errs := make([]error, len(txs)) 981 for i, tx := range txs { 982 replaced, err := pool.add(tx, local) 983 errs[i] = err 984 if err == nil && !replaced { 985 dirty.addTx(tx) 986 } 987 } 988 validTxMeter.Mark(int64(len(dirty.accounts))) 989 return errs, dirty 990 } 991 992 // Status returns the status (unknown/pending/queued) of a batch of transactions 993 // identified by their hashes. 994 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 995 status := make([]TxStatus, len(hashes)) 996 for i, hash := range hashes { 997 tx := pool.Get(hash) 998 if tx == nil { 999 continue 1000 } 1001 from, _ := types.Sender(pool.signer, tx) // already validated 1002 pool.mu.RLock() 1003 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1004 status[i] = TxStatusPending 1005 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1006 status[i] = TxStatusQueued 1007 } 1008 // implicit else: the tx may have been included into a block between 1009 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 1010 pool.mu.RUnlock() 1011 } 1012 return status 1013 } 1014 1015 // Get returns a transaction if it is contained in the pool and nil otherwise. 1016 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1017 return pool.all.Get(hash) 1018 } 1019 1020 // Has returns an indicator whether txpool has a transaction cached with the 1021 // given hash. 1022 func (pool *TxPool) Has(hash common.Hash) bool { 1023 return pool.all.Get(hash) != nil 1024 } 1025 1026 // RemoveTx is similar to removeTx, but with locking to prevent concurrency. 1027 // Note: currently should only be called by miner/worker.go. 1028 func (pool *TxPool) RemoveTx(hash common.Hash, outofbound bool) { 1029 pool.mu.Lock() 1030 defer pool.mu.Unlock() 1031 1032 pool.removeTx(hash, outofbound) 1033 } 1034 1035 // removeTx removes a single transaction from the queue, moving all subsequent 1036 // transactions back to the future queue. 1037 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1038 // Fetch the transaction we wish to delete 1039 tx := pool.all.Get(hash) 1040 if tx == nil { 1041 return 1042 } 1043 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1044 1045 // Remove it from the list of known transactions 1046 pool.all.Remove(hash) 1047 if outofbound { 1048 pool.priced.Removed(1) 1049 } 1050 if pool.locals.contains(addr) { 1051 localGauge.Dec(1) 1052 } 1053 // Remove the transaction from the pending lists and reset the account nonce 1054 if pending := pool.pending[addr]; pending != nil { 1055 if removed, invalids := pending.Remove(tx); removed { 1056 // If no more pending transactions are left, remove the list 1057 if pending.Empty() { 1058 delete(pool.pending, addr) 1059 } 1060 // Postpone any invalidated transactions 1061 for _, tx := range invalids { 1062 // Internal shuffle shouldn't touch the lookup set. 1063 pool.enqueueTx(tx.Hash(), tx, false, false) 1064 } 1065 // Update the account nonce if needed 1066 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1067 // Reduce the pending counter 1068 pendingGauge.Dec(int64(1 + len(invalids))) 1069 return 1070 } 1071 } 1072 // Transaction is in the future queue 1073 if future := pool.queue[addr]; future != nil { 1074 if removed, _ := future.Remove(tx); removed { 1075 // Reduce the queued counter 1076 queuedGauge.Dec(1) 1077 } 1078 if future.Empty() { 1079 delete(pool.queue, addr) 1080 delete(pool.beats, addr) 1081 } 1082 } 1083 } 1084 1085 // requestReset requests a pool reset to the new head block. 1086 // The returned channel is closed when the reset has occurred. 1087 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1088 select { 1089 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1090 return <-pool.reorgDoneCh 1091 case <-pool.reorgShutdownCh: 1092 return pool.reorgShutdownCh 1093 } 1094 } 1095 1096 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1097 // The returned channel is closed when the promotion checks have occurred. 1098 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1099 select { 1100 case pool.reqPromoteCh <- set: 1101 return <-pool.reorgDoneCh 1102 case <-pool.reorgShutdownCh: 1103 return pool.reorgShutdownCh 1104 } 1105 } 1106 1107 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1108 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1109 select { 1110 case pool.queueTxEventCh <- tx: 1111 case <-pool.reorgShutdownCh: 1112 } 1113 } 1114 1115 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1116 // call those methods directly, but request them being run using requestReset and 1117 // requestPromoteExecutables instead. 1118 func (pool *TxPool) scheduleReorgLoop() { 1119 defer pool.wg.Done() 1120 1121 var ( 1122 curDone chan struct{} // non-nil while runReorg is active 1123 nextDone = make(chan struct{}) 1124 launchNextRun bool 1125 reset *txpoolResetRequest 1126 dirtyAccounts *accountSet 1127 queuedEvents = make(map[common.Address]*txSortedMap) 1128 ) 1129 for { 1130 // Launch next background reorg if needed 1131 if curDone == nil && launchNextRun { 1132 // Run the background reorg and announcements 1133 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1134 1135 // Prepare everything for the next round of reorg 1136 curDone, nextDone = nextDone, make(chan struct{}) 1137 launchNextRun = false 1138 1139 reset, dirtyAccounts = nil, nil 1140 queuedEvents = make(map[common.Address]*txSortedMap) 1141 } 1142 1143 select { 1144 case req := <-pool.reqResetCh: 1145 // Reset request: update head if request is already pending. 1146 if reset == nil { 1147 reset = req 1148 } else { 1149 reset.newHead = req.newHead 1150 } 1151 launchNextRun = true 1152 pool.reorgDoneCh <- nextDone 1153 1154 case req := <-pool.reqPromoteCh: 1155 // Promote request: update address set if request is already pending. 1156 if dirtyAccounts == nil { 1157 dirtyAccounts = req 1158 } else { 1159 dirtyAccounts.merge(req) 1160 } 1161 launchNextRun = true 1162 pool.reorgDoneCh <- nextDone 1163 1164 case tx := <-pool.queueTxEventCh: 1165 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1166 // request one later if they want the events sent. 1167 addr, _ := types.Sender(pool.signer, tx) 1168 if _, ok := queuedEvents[addr]; !ok { 1169 queuedEvents[addr] = newTxSortedMap() 1170 } 1171 queuedEvents[addr].Put(tx) 1172 1173 case <-curDone: 1174 curDone = nil 1175 1176 case <-pool.reorgShutdownCh: 1177 // Wait for current run to finish. 1178 if curDone != nil { 1179 <-curDone 1180 } 1181 close(nextDone) 1182 return 1183 } 1184 } 1185 } 1186 1187 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1188 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1189 defer func(t0 time.Time) { 1190 reorgDurationTimer.Update(time.Since(t0)) 1191 }(time.Now()) 1192 defer close(done) 1193 1194 var promoteAddrs []common.Address 1195 if dirtyAccounts != nil && reset == nil { 1196 // Only dirty accounts need to be promoted, unless we're resetting. 1197 // For resets, all addresses in the tx queue will be promoted and 1198 // the flatten operation can be avoided. 1199 promoteAddrs = dirtyAccounts.flatten() 1200 } 1201 pool.mu.Lock() 1202 if reset != nil { 1203 // Reset from the old head to the new, rescheduling any reorged transactions 1204 pool.reset(reset.oldHead, reset.newHead) 1205 1206 // Nonces were reset, discard any events that became stale 1207 for addr := range events { 1208 events[addr].Forward(pool.pendingNonces.get(addr)) 1209 if events[addr].Len() == 0 { 1210 delete(events, addr) 1211 } 1212 } 1213 // Reset needs promote for all addresses 1214 promoteAddrs = addrsPool.Get().([]common.Address) 1215 for addr := range pool.queue { 1216 promoteAddrs = append(promoteAddrs, addr) 1217 } 1218 } 1219 // Check for pending transactions for every account that sent new ones 1220 promoted := pool.promoteExecutables(promoteAddrs) 1221 defer addrsPool.Put(promoteAddrs[:0]) 1222 1223 // If a new block appeared, validate the pool of pending transactions. This will 1224 // remove any transaction that has been included in the block or was invalidated 1225 // because of another transaction (e.g. higher gas price). 1226 if reset != nil { 1227 pool.demoteUnexecutables() 1228 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1229 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1230 pool.priced.SetBaseFee(pendingBaseFee) 1231 } 1232 // Update all accounts to the latest known pending nonce 1233 nonces := make(map[common.Address]uint64, len(pool.pending)) 1234 for addr, list := range pool.pending { 1235 highestPending := list.LastElement() 1236 nonces[addr] = highestPending.Nonce() + 1 1237 } 1238 pool.pendingNonces.setAll(nonces) 1239 } 1240 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1241 pool.truncatePending() 1242 pool.truncateQueue() 1243 1244 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1245 pool.changesSinceReorg = 0 // Reset change counter 1246 pool.mu.Unlock() 1247 1248 // Notify subsystems for newly added transactions 1249 for _, tx := range promoted { 1250 addr, _ := types.Sender(pool.signer, tx) 1251 if _, ok := events[addr]; !ok { 1252 events[addr] = newTxSortedMap() 1253 } 1254 events[addr].Put(tx) 1255 } 1256 if len(events) > 0 { 1257 var txs []*types.Transaction 1258 for _, set := range events { 1259 txs = append(txs, set.Flatten()...) 1260 } 1261 pool.txFeed.Send(NewTxsEvent{txs}) 1262 } 1263 } 1264 1265 // reset retrieves the current state of the blockchain and ensures the content 1266 // of the transaction pool is valid with regard to the chain state. 1267 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1268 // If we're reorging an old state, reinject all dropped transactions 1269 var reinject types.Transactions 1270 1271 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1272 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1273 oldNum := oldHead.Number.Uint64() 1274 newNum := newHead.Number.Uint64() 1275 1276 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1277 log.Debug("Skipping deep transaction reorg", "depth", depth) 1278 } else { 1279 // Reorg seems shallow enough to pull in all transactions into memory 1280 var discarded, included types.Transactions 1281 var ( 1282 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1283 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1284 ) 1285 if rem == nil { 1286 // This can happen if a setHead is performed, where we simply discard the old 1287 // head from the chain. 1288 // If that is the case, we don't have the lost transactions any more, and 1289 // there's nothing to add 1290 if newNum >= oldNum { 1291 // If we reorged to a same or higher number, then it's not a case of setHead 1292 log.Warn("Transaction pool reset with missing oldhead", 1293 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1294 return 1295 } 1296 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1297 log.Debug("Skipping transaction reset caused by setHead", 1298 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1299 // We still need to update the current state s.th. the lost transactions can be readded by the user 1300 } else { 1301 for rem.NumberU64() > add.NumberU64() { 1302 discarded = append(discarded, rem.Transactions()...) 1303 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1304 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1305 return 1306 } 1307 } 1308 for add.NumberU64() > rem.NumberU64() { 1309 included = append(included, add.Transactions()...) 1310 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1311 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1312 return 1313 } 1314 } 1315 for rem.Hash() != add.Hash() { 1316 discarded = append(discarded, rem.Transactions()...) 1317 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1318 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1319 return 1320 } 1321 included = append(included, add.Transactions()...) 1322 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1323 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1324 return 1325 } 1326 } 1327 reinject = types.TxDifference(discarded, included) 1328 } 1329 } 1330 } 1331 // Initialize the internal state to the current head 1332 if newHead == nil { 1333 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1334 } 1335 statedb, err := pool.chain.StateAt(newHead.Root) 1336 if err != nil { 1337 log.Error("Failed to reset txpool state", "err", err) 1338 return 1339 } 1340 pool.currentState = statedb 1341 pool.pendingNonces = newTxNoncer(statedb) 1342 pool.currentMaxGas = newHead.GasLimit 1343 1344 // Inject any transactions discarded due to reorgs 1345 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1346 senderCacher.recover(pool.signer, reinject) 1347 pool.addTxsLocked(reinject, false) 1348 1349 // Update all fork indicator by next pending block number. 1350 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1351 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1352 1353 pool.eip2718 = pool.chainconfig.Scroll.EnableEIP2718 && pool.chainconfig.IsBerlin(next) 1354 pool.eip1559 = pool.chainconfig.Scroll.EnableEIP1559 && pool.chainconfig.IsLondon(next) 1355 pool.shanghai = pool.chainconfig.IsShanghai(next) 1356 } 1357 1358 // promoteExecutables moves transactions that have become processable from the 1359 // future queue to the set of pending transactions. During this process, all 1360 // invalidated transactions (low nonce, low balance) are deleted. 1361 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1362 // Track the promoted transactions to broadcast them at once 1363 var promoted []*types.Transaction 1364 1365 // Iterate over all accounts and promote any executable transactions 1366 for _, addr := range accounts { 1367 list := pool.queue[addr] 1368 if list == nil { 1369 continue // Just in case someone calls with a non existing account 1370 } 1371 // Drop all transactions that are deemed too old (low nonce) 1372 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1373 for _, tx := range forwards { 1374 hash := tx.Hash() 1375 pool.all.Remove(hash) 1376 } 1377 log.Trace("Removed old queued transactions", "count", len(forwards)) 1378 // Drop all transactions that are too costly (low balance or out of gas) 1379 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1380 for _, tx := range drops { 1381 hash := tx.Hash() 1382 pool.all.Remove(hash) 1383 } 1384 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1385 queuedNofundsMeter.Mark(int64(len(drops))) 1386 1387 // Gather all executable transactions and promote them 1388 readies := list.Ready(pool.pendingNonces.get(addr)) 1389 for _, tx := range readies { 1390 hash := tx.Hash() 1391 if pool.promoteTx(addr, hash, tx) { 1392 promoted = append(promoted, tx) 1393 } 1394 } 1395 log.Trace("Promoted queued transactions", "count", len(promoted)) 1396 queuedGauge.Dec(int64(len(readies))) 1397 1398 // Drop all transactions over the allowed limit 1399 var caps types.Transactions 1400 if !pool.locals.contains(addr) { 1401 caps = list.Cap(int(pool.config.AccountQueue)) 1402 for _, tx := range caps { 1403 hash := tx.Hash() 1404 pool.all.Remove(hash) 1405 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1406 } 1407 queuedRateLimitMeter.Mark(int64(len(caps))) 1408 } 1409 // Mark all the items dropped as removed 1410 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1411 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1412 if pool.locals.contains(addr) { 1413 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1414 } 1415 // Delete the entire queue entry if it became empty. 1416 if list.Empty() { 1417 delete(pool.queue, addr) 1418 delete(pool.beats, addr) 1419 } 1420 } 1421 return promoted 1422 } 1423 1424 // truncatePending removes transactions from the pending queue if the pool is above the 1425 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1426 // equal number for all for accounts with many pending transactions. 1427 func (pool *TxPool) truncatePending() { 1428 pending := uint64(0) 1429 for _, list := range pool.pending { 1430 pending += uint64(list.Len()) 1431 } 1432 if pending <= pool.config.GlobalSlots { 1433 return 1434 } 1435 1436 pendingBeforeCap := pending 1437 // Assemble a spam order to penalize large transactors first 1438 pool.spammers.Reset() 1439 for addr, list := range pool.pending { 1440 // Only evict transactions from high rollers 1441 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1442 pool.spammers.Push(addr, int64(list.Len())) 1443 } 1444 } 1445 // Gradually drop transactions from offenders 1446 offenders := addrsPool.Get().([]common.Address) 1447 defer addrsPool.Put(offenders[:0]) 1448 for pending > pool.config.GlobalSlots && !pool.spammers.Empty() { 1449 // Retrieve the next offender if not local address 1450 offender, _ := pool.spammers.Pop() 1451 offenders = append(offenders, offender.(common.Address)) 1452 1453 // Equalize balances until all the same or below threshold 1454 if len(offenders) > 1 { 1455 // Calculate the equalization threshold for all current offenders 1456 threshold := pool.pending[offender.(common.Address)].Len() 1457 1458 // Iteratively reduce all offenders until below limit or threshold reached 1459 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1460 for i := 0; i < len(offenders)-1; i++ { 1461 list := pool.pending[offenders[i]] 1462 1463 caps := list.Cap(list.Len() - 1) 1464 for _, tx := range caps { 1465 // Drop the transaction from the global pools too 1466 hash := tx.Hash() 1467 pool.all.Remove(hash) 1468 1469 // Update the account nonce to the dropped transaction 1470 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1471 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1472 } 1473 pool.priced.Removed(len(caps)) 1474 pendingGauge.Dec(int64(len(caps))) 1475 if pool.locals.contains(offenders[i]) { 1476 localGauge.Dec(int64(len(caps))) 1477 } 1478 pending-- 1479 } 1480 } 1481 } 1482 } 1483 1484 // If still above threshold, reduce to limit or min allowance 1485 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1486 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1487 for _, addr := range offenders { 1488 list := pool.pending[addr] 1489 1490 caps := list.Cap(list.Len() - 1) 1491 for _, tx := range caps { 1492 // Drop the transaction from the global pools too 1493 hash := tx.Hash() 1494 pool.all.Remove(hash) 1495 1496 // Update the account nonce to the dropped transaction 1497 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1498 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1499 } 1500 pool.priced.Removed(len(caps)) 1501 pendingGauge.Dec(int64(len(caps))) 1502 if pool.locals.contains(addr) { 1503 localGauge.Dec(int64(len(caps))) 1504 } 1505 pending-- 1506 } 1507 } 1508 } 1509 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1510 } 1511 1512 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1513 func (pool *TxPool) truncateQueue() { 1514 queued := uint64(0) 1515 for _, list := range pool.queue { 1516 queued += uint64(list.Len()) 1517 } 1518 if queued <= pool.config.GlobalQueue { 1519 return 1520 } 1521 1522 // Sort all accounts with queued transactions by heartbeat 1523 addresses := addrBeatPool.Get().(addressesByHeartbeat) 1524 defer addrBeatPool.Put(addresses[:0]) 1525 for addr := range pool.queue { 1526 if !pool.locals.contains(addr) { // don't drop locals 1527 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1528 } 1529 } 1530 sort.Sort(addresses) 1531 1532 // Drop transactions until the total is below the limit or only locals remain 1533 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1534 addr := addresses[len(addresses)-1] 1535 list := pool.queue[addr.address] 1536 1537 addresses = addresses[:len(addresses)-1] 1538 1539 // Drop all transactions if they are less than the overflow 1540 if size := uint64(list.Len()); size <= drop { 1541 for _, tx := range list.Flatten() { 1542 pool.removeTx(tx.Hash(), true) 1543 } 1544 drop -= size 1545 queuedRateLimitMeter.Mark(int64(size)) 1546 continue 1547 } 1548 // Otherwise drop only last few transactions 1549 txs := list.Flatten() 1550 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1551 pool.removeTx(txs[i].Hash(), true) 1552 drop-- 1553 queuedRateLimitMeter.Mark(1) 1554 } 1555 } 1556 } 1557 1558 // demoteUnexecutables removes invalid and processed transactions from the pools 1559 // executable/pending queue and any subsequent transactions that become unexecutable 1560 // are moved back into the future queue. 1561 // 1562 // Note: transactions are not marked as removed in the priced list because re-heaping 1563 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1564 // to trigger a re-heap is this function 1565 func (pool *TxPool) demoteUnexecutables() { 1566 // Iterate over all accounts and demote any non-executable transactions 1567 for addr, list := range pool.pending { 1568 nonce := pool.currentState.GetNonce(addr) 1569 1570 // Drop all transactions that are deemed too old (low nonce) 1571 olds := list.Forward(nonce) 1572 for _, tx := range olds { 1573 hash := tx.Hash() 1574 pool.all.Remove(hash) 1575 log.Trace("Removed old pending transaction", "hash", hash) 1576 } 1577 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1578 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1579 for _, tx := range drops { 1580 hash := tx.Hash() 1581 log.Trace("Removed unpayable pending transaction", "hash", hash) 1582 pool.all.Remove(hash) 1583 } 1584 pendingNofundsMeter.Mark(int64(len(drops))) 1585 1586 for _, tx := range invalids { 1587 hash := tx.Hash() 1588 log.Trace("Demoting pending transaction", "hash", hash) 1589 1590 // Internal shuffle shouldn't touch the lookup set. 1591 pool.enqueueTx(hash, tx, false, false) 1592 } 1593 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1594 if pool.locals.contains(addr) { 1595 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1596 } 1597 // If there's a gap in front, alert (should never happen) and postpone all transactions 1598 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1599 gapped := list.Cap(0) 1600 for _, tx := range gapped { 1601 hash := tx.Hash() 1602 log.Error("Demoting invalidated transaction", "hash", hash) 1603 1604 // Internal shuffle shouldn't touch the lookup set. 1605 pool.enqueueTx(hash, tx, false, false) 1606 } 1607 pendingGauge.Dec(int64(len(gapped))) 1608 // This might happen in a reorg, so log it to the metering 1609 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1610 } 1611 // Delete the entire pending entry if it became empty. 1612 if list.Empty() { 1613 delete(pool.pending, addr) 1614 } 1615 } 1616 } 1617 1618 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1619 type addressByHeartbeat struct { 1620 address common.Address 1621 heartbeat time.Time 1622 } 1623 1624 type addressesByHeartbeat []addressByHeartbeat 1625 1626 func (a addressesByHeartbeat) Len() int { return len(a) } 1627 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1628 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1629 1630 // accountSet is simply a set of addresses to check for existence, and a signer 1631 // capable of deriving addresses from transactions. 1632 type accountSet struct { 1633 accounts map[common.Address]struct{} 1634 signer types.Signer 1635 cache *[]common.Address 1636 } 1637 1638 // newAccountSet creates a new address set with an associated signer for sender 1639 // derivations. 1640 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1641 as := &accountSet{ 1642 accounts: make(map[common.Address]struct{}), 1643 signer: signer, 1644 } 1645 for _, addr := range addrs { 1646 as.add(addr) 1647 } 1648 return as 1649 } 1650 1651 // contains checks if a given address is contained within the set. 1652 func (as *accountSet) contains(addr common.Address) bool { 1653 _, exist := as.accounts[addr] 1654 return exist 1655 } 1656 1657 func (as *accountSet) empty() bool { 1658 return len(as.accounts) == 0 1659 } 1660 1661 // containsTx checks if the sender of a given tx is within the set. If the sender 1662 // cannot be derived, this method returns false. 1663 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1664 if addr, err := types.Sender(as.signer, tx); err == nil { 1665 return as.contains(addr) 1666 } 1667 return false 1668 } 1669 1670 // add inserts a new address into the set to track. 1671 func (as *accountSet) add(addr common.Address) { 1672 as.accounts[addr] = struct{}{} 1673 if as.cache != nil { 1674 addrsPool.Put((*as.cache)[:0]) 1675 as.cache = nil 1676 } 1677 } 1678 1679 // addTx adds the sender of tx into the set. 1680 func (as *accountSet) addTx(tx *types.Transaction) { 1681 if addr, err := types.Sender(as.signer, tx); err == nil { 1682 as.add(addr) 1683 } 1684 } 1685 1686 // flatten returns the list of addresses within this set, also caching it for later 1687 // reuse. The returned slice should not be changed! 1688 func (as *accountSet) flatten() []common.Address { 1689 if as.cache == nil { 1690 accounts := addrsPool.Get().([]common.Address) 1691 for account := range as.accounts { 1692 accounts = append(accounts, account) 1693 } 1694 as.cache = &accounts 1695 } 1696 return *as.cache 1697 } 1698 1699 // merge adds all addresses from the 'other' set into 'as'. 1700 func (as *accountSet) merge(other *accountSet) { 1701 for addr := range other.accounts { 1702 as.accounts[addr] = struct{}{} 1703 } 1704 if as.cache != nil { 1705 addrsPool.Put((*as.cache)[:0]) 1706 as.cache = nil 1707 } 1708 } 1709 1710 // txLookup is used internally by TxPool to track transactions while allowing 1711 // lookup without mutex contention. 1712 // 1713 // Note, although this type is properly protected against concurrent access, it 1714 // is **not** a type that should ever be mutated or even exposed outside of the 1715 // transaction pool, since its internal state is tightly coupled with the pools 1716 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1717 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1718 // TxPool.mu mutex. 1719 // 1720 // This lookup set combines the notion of "local transactions", which is useful 1721 // to build upper-level structure. 1722 type txLookup struct { 1723 slots int 1724 lock sync.RWMutex 1725 locals map[common.Hash]*types.Transaction 1726 remotes map[common.Hash]*types.Transaction 1727 } 1728 1729 // newTxLookup returns a new txLookup structure. 1730 func newTxLookup() *txLookup { 1731 return &txLookup{ 1732 locals: make(map[common.Hash]*types.Transaction), 1733 remotes: make(map[common.Hash]*types.Transaction), 1734 } 1735 } 1736 1737 // Range calls f on each key and value present in the map. The callback passed 1738 // should return the indicator whether the iteration needs to be continued. 1739 // Callers need to specify which set (or both) to be iterated. 1740 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1741 t.lock.RLock() 1742 defer t.lock.RUnlock() 1743 1744 if local { 1745 for key, value := range t.locals { 1746 if !f(key, value, true) { 1747 return 1748 } 1749 } 1750 } 1751 if remote { 1752 for key, value := range t.remotes { 1753 if !f(key, value, false) { 1754 return 1755 } 1756 } 1757 } 1758 } 1759 1760 // Get returns a transaction if it exists in the lookup, or nil if not found. 1761 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1762 t.lock.RLock() 1763 defer t.lock.RUnlock() 1764 1765 if tx := t.locals[hash]; tx != nil { 1766 return tx 1767 } 1768 return t.remotes[hash] 1769 } 1770 1771 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1772 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1773 t.lock.RLock() 1774 defer t.lock.RUnlock() 1775 1776 return t.locals[hash] 1777 } 1778 1779 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1780 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1781 t.lock.RLock() 1782 defer t.lock.RUnlock() 1783 1784 return t.remotes[hash] 1785 } 1786 1787 // Count returns the current number of transactions in the lookup. 1788 func (t *txLookup) Count() int { 1789 t.lock.RLock() 1790 defer t.lock.RUnlock() 1791 1792 return len(t.locals) + len(t.remotes) 1793 } 1794 1795 // LocalCount returns the current number of local transactions in the lookup. 1796 func (t *txLookup) LocalCount() int { 1797 t.lock.RLock() 1798 defer t.lock.RUnlock() 1799 1800 return len(t.locals) 1801 } 1802 1803 // RemoteCount returns the current number of remote transactions in the lookup. 1804 func (t *txLookup) RemoteCount() int { 1805 t.lock.RLock() 1806 defer t.lock.RUnlock() 1807 1808 return len(t.remotes) 1809 } 1810 1811 // Slots returns the current number of slots used in the lookup. 1812 func (t *txLookup) Slots() int { 1813 t.lock.RLock() 1814 defer t.lock.RUnlock() 1815 1816 return t.slots 1817 } 1818 1819 // Add adds a transaction to the lookup. 1820 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1821 t.lock.Lock() 1822 defer t.lock.Unlock() 1823 1824 t.slots += numSlots(tx) 1825 slotsGauge.Update(int64(t.slots)) 1826 1827 if local { 1828 t.locals[tx.Hash()] = tx 1829 } else { 1830 t.remotes[tx.Hash()] = tx 1831 } 1832 } 1833 1834 // Remove removes a transaction from the lookup. 1835 func (t *txLookup) Remove(hash common.Hash) { 1836 t.lock.Lock() 1837 defer t.lock.Unlock() 1838 1839 tx, ok := t.locals[hash] 1840 if !ok { 1841 tx, ok = t.remotes[hash] 1842 } 1843 if !ok { 1844 log.Error("No transaction found to be deleted", "hash", hash) 1845 return 1846 } 1847 t.slots -= numSlots(tx) 1848 slotsGauge.Update(int64(t.slots)) 1849 1850 delete(t.locals, hash) 1851 delete(t.remotes, hash) 1852 } 1853 1854 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1855 // set. The assumption is held the locals set is thread-safe to be used. 1856 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1857 t.lock.Lock() 1858 defer t.lock.Unlock() 1859 1860 var migrated int 1861 for hash, tx := range t.remotes { 1862 if locals.containsTx(tx) { 1863 t.locals[hash] = tx 1864 delete(t.remotes, hash) 1865 migrated += 1 1866 } 1867 } 1868 return migrated 1869 } 1870 1871 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1872 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1873 found := make(types.Transactions, 0, 128) 1874 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1875 if tx.GasTipCapIntCmp(threshold) < 0 { 1876 found = append(found, tx) 1877 } 1878 return true 1879 }, false, true) // Only iterate remotes 1880 return found 1881 } 1882 1883 // numSlots calculates the number of slots needed for a single transaction. 1884 func numSlots(tx *types.Transaction) int { 1885 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1886 }