github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/core/txpool/txpool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package txpool 18 19 import ( 20 "container/heap" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "sort" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/tacshi/go-ethereum/common" 31 "github.com/tacshi/go-ethereum/common/prque" 32 "github.com/tacshi/go-ethereum/consensus/misc" 33 "github.com/tacshi/go-ethereum/core" 34 "github.com/tacshi/go-ethereum/core/state" 35 "github.com/tacshi/go-ethereum/core/types" 36 "github.com/tacshi/go-ethereum/event" 37 "github.com/tacshi/go-ethereum/log" 38 "github.com/tacshi/go-ethereum/metrics" 39 "github.com/tacshi/go-ethereum/params" 40 ) 41 42 const ( 43 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 44 chainHeadChanSize = 10 45 46 // txSlotSize is used to calculate how many data slots a single transaction 47 // takes up based on its size. The slots are used as DoS protection, ensuring 48 // that validating a new transaction remains a constant operation (in reality 49 // O(maxslots), where max slots are 4 currently). 50 txSlotSize = 32 * 1024 51 52 // txMaxSize is the maximum size a single transaction can have. This field has 53 // non-trivial consequences: larger transactions are significantly harder and 54 // more expensive to propagate; larger transactions also take more resources 55 // to validate whether they fit into the pool or not. 56 txMaxSize = 4 * txSlotSize // 128KB 57 ) 58 59 var ( 60 // ErrAlreadyKnown is returned if the transactions is already contained 61 // within the pool. 62 ErrAlreadyKnown = errors.New("already known") 63 64 // ErrInvalidSender is returned if the transaction contains an invalid signature. 65 ErrInvalidSender = errors.New("invalid sender") 66 67 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 68 // configured for the transaction pool. 69 ErrUnderpriced = errors.New("transaction underpriced") 70 71 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept 72 // another remote transaction. 73 ErrTxPoolOverflow = errors.New("txpool is full") 74 75 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 76 // with a different one without the required price bump. 77 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 78 79 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 80 // maximum allowance of the current block. 81 ErrGasLimit = errors.New("exceeds block gas limit") 82 83 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 84 // transaction with a negative value. 85 ErrNegativeValue = errors.New("negative value") 86 87 // ErrOversizedData is returned if the input data of a transaction is greater 88 // than some meaningful limit a user might use. This is not a consensus error 89 // making the transaction invalid, rather a DOS protection. 90 ErrOversizedData = errors.New("oversized data") 91 92 // ErrFutureReplacePending is returned if a future transaction replaces a pending 93 // transaction. Future transactions should only be able to replace other future transactions. 94 ErrFutureReplacePending = errors.New("future transaction tries to replace pending") 95 96 // ErrOverdraft is returned if a transaction would cause the senders balance to go negative 97 // thus invalidating a potential large number of transactions. 98 ErrOverdraft = errors.New("transaction would cause overdraft") 99 ) 100 101 var ( 102 evictionInterval = time.Minute // Time interval to check for evictable transactions 103 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 104 ) 105 106 var ( 107 // Metrics for the pending pool 108 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 109 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 110 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 111 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 112 113 // Metrics for the queued pool 114 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 115 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 116 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 117 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 118 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 119 120 // General tx metrics 121 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 122 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 123 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 124 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 125 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 126 127 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 128 // txpool reorgs. 129 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 130 // reorgDurationTimer measures how long time a txpool reorg takes. 131 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 132 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 133 // that this number is pretty low, since txpool reorgs happen very frequently. 134 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 135 136 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 137 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 138 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 139 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 140 141 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 142 ) 143 144 // TxStatus is the current status of a transaction as seen by the pool. 145 type TxStatus uint 146 147 const ( 148 TxStatusUnknown TxStatus = iota 149 TxStatusQueued 150 TxStatusPending 151 TxStatusIncluded 152 ) 153 154 // blockChain provides the state of blockchain and current gas limit to do 155 // some pre checks in tx pool and event subscribers. 156 type blockChain interface { 157 CurrentBlock() *types.Header 158 GetBlock(hash common.Hash, number uint64) *types.Block 159 StateAt(root common.Hash) (*state.StateDB, error) 160 161 SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription 162 } 163 164 // Config are the configuration parameters of the transaction pool. 165 type Config struct { 166 Locals []common.Address // Addresses that should be treated by default as local 167 NoLocals bool // Whether local transaction handling should be disabled 168 Journal string // Journal of local transactions to survive node restarts 169 Rejournal time.Duration // Time interval to regenerate the local transaction journal 170 171 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 172 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 173 174 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 175 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 176 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 177 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 178 179 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 180 } 181 182 // DefaultConfig contains the default configurations for the transaction 183 // pool. 184 var DefaultConfig = Config{ 185 Journal: "transactions.rlp", 186 Rejournal: time.Hour, 187 188 PriceLimit: 1, 189 PriceBump: 10, 190 191 AccountSlots: 16, 192 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 193 AccountQueue: 64, 194 GlobalQueue: 1024, 195 196 Lifetime: 3 * time.Hour, 197 } 198 199 // sanitize checks the provided user configurations and changes anything that's 200 // unreasonable or unworkable. 201 func (config *Config) sanitize() Config { 202 conf := *config 203 if conf.Rejournal < time.Second { 204 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 205 conf.Rejournal = time.Second 206 } 207 if conf.PriceLimit < 1 { 208 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) 209 conf.PriceLimit = DefaultConfig.PriceLimit 210 } 211 if conf.PriceBump < 1 { 212 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) 213 conf.PriceBump = DefaultConfig.PriceBump 214 } 215 if conf.AccountSlots < 1 { 216 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) 217 conf.AccountSlots = DefaultConfig.AccountSlots 218 } 219 if conf.GlobalSlots < 1 { 220 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) 221 conf.GlobalSlots = DefaultConfig.GlobalSlots 222 } 223 if conf.AccountQueue < 1 { 224 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) 225 conf.AccountQueue = DefaultConfig.AccountQueue 226 } 227 if conf.GlobalQueue < 1 { 228 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) 229 conf.GlobalQueue = DefaultConfig.GlobalQueue 230 } 231 if conf.Lifetime < 1 { 232 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) 233 conf.Lifetime = DefaultConfig.Lifetime 234 } 235 return conf 236 } 237 238 // TxPool contains all currently known transactions. Transactions 239 // enter the pool when they are received from the network or submitted 240 // locally. They exit the pool when they are included in the blockchain. 241 // 242 // The pool separates processable transactions (which can be applied to the 243 // current state) and future transactions. Transactions move between those 244 // two states over time as they are received and processed. 245 type TxPool struct { 246 config Config 247 chainconfig *params.ChainConfig 248 chain blockChain 249 gasPrice *big.Int 250 txFeed event.Feed 251 scope event.SubscriptionScope 252 signer types.Signer 253 mu sync.RWMutex 254 255 istanbul bool // Fork indicator whether we are in the istanbul stage. 256 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 257 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 258 shanghai bool // Fork indicator whether we are in the Shanghai stage. 259 260 currentState *state.StateDB // Current state in the blockchain head 261 pendingNonces *noncer // Pending state tracking virtual nonces 262 currentMaxGas uint64 // Current gas limit for transaction caps 263 264 locals *accountSet // Set of local transaction to exempt from eviction rules 265 journal *journal // Journal of local transaction to back up to disk 266 267 pending map[common.Address]*list // All currently processable transactions 268 queue map[common.Address]*list // Queued but non-processable transactions 269 beats map[common.Address]time.Time // Last heartbeat from each known account 270 all *lookup // All transactions to allow lookups 271 priced *pricedList // All transactions sorted by price 272 273 chainHeadCh chan core.ChainHeadEvent 274 chainHeadSub event.Subscription 275 reqResetCh chan *txpoolResetRequest 276 reqPromoteCh chan *accountSet 277 queueTxEventCh chan *types.Transaction 278 reorgDoneCh chan chan struct{} 279 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 280 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 281 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 282 283 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 284 } 285 286 type txpoolResetRequest struct { 287 oldHead, newHead *types.Header 288 } 289 290 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 291 // transactions from the network. 292 func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 293 // Sanitize the input to ensure no vulnerable gas prices are set 294 config = (&config).sanitize() 295 296 // Create the transaction pool with its initial settings 297 pool := &TxPool{ 298 config: config, 299 chainconfig: chainconfig, 300 chain: chain, 301 signer: types.LatestSigner(chainconfig), 302 pending: make(map[common.Address]*list), 303 queue: make(map[common.Address]*list), 304 beats: make(map[common.Address]time.Time), 305 all: newLookup(), 306 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 307 reqResetCh: make(chan *txpoolResetRequest), 308 reqPromoteCh: make(chan *accountSet), 309 queueTxEventCh: make(chan *types.Transaction), 310 reorgDoneCh: make(chan chan struct{}), 311 reorgShutdownCh: make(chan struct{}), 312 initDoneCh: make(chan struct{}), 313 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 314 } 315 pool.locals = newAccountSet(pool.signer) 316 for _, addr := range config.Locals { 317 log.Info("Setting new local account", "address", addr) 318 pool.locals.add(addr) 319 } 320 pool.priced = newPricedList(pool.all) 321 pool.reset(nil, chain.CurrentBlock()) 322 323 // Start the reorg loop early so it can handle requests generated during journal loading. 324 pool.wg.Add(1) 325 go pool.scheduleReorgLoop() 326 327 // If local transactions and journaling is enabled, load from disk 328 if !config.NoLocals && config.Journal != "" { 329 pool.journal = newTxJournal(config.Journal) 330 331 if err := pool.journal.load(pool.AddLocals); err != nil { 332 log.Warn("Failed to load transaction journal", "err", err) 333 } 334 if err := pool.journal.rotate(pool.local()); err != nil { 335 log.Warn("Failed to rotate transaction journal", "err", err) 336 } 337 } 338 339 // Subscribe events from blockchain and start the main event loop. 340 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 341 pool.wg.Add(1) 342 go pool.loop() 343 344 return pool 345 } 346 347 // loop is the transaction pool's main event loop, waiting for and reacting to 348 // outside blockchain events as well as for various reporting and transaction 349 // eviction events. 350 func (pool *TxPool) loop() { 351 defer pool.wg.Done() 352 353 var ( 354 prevPending, prevQueued, prevStales int 355 // Start the stats reporting and transaction eviction tickers 356 report = time.NewTicker(statsReportInterval) 357 evict = time.NewTicker(evictionInterval) 358 journal = time.NewTicker(pool.config.Rejournal) 359 // Track the previous head headers for transaction reorgs 360 head = pool.chain.CurrentBlock() 361 ) 362 defer report.Stop() 363 defer evict.Stop() 364 defer journal.Stop() 365 366 // Notify tests that the init phase is done 367 close(pool.initDoneCh) 368 for { 369 select { 370 // Handle ChainHeadEvent 371 case ev := <-pool.chainHeadCh: 372 if ev.Block != nil { 373 pool.requestReset(head, ev.Block.Header()) 374 head = ev.Block.Header() 375 } 376 377 // System shutdown. 378 case <-pool.chainHeadSub.Err(): 379 close(pool.reorgShutdownCh) 380 return 381 382 // Handle stats reporting ticks 383 case <-report.C: 384 pool.mu.RLock() 385 pending, queued := pool.stats() 386 pool.mu.RUnlock() 387 stales := int(atomic.LoadInt64(&pool.priced.stales)) 388 389 if pending != prevPending || queued != prevQueued || stales != prevStales { 390 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 391 prevPending, prevQueued, prevStales = pending, queued, stales 392 } 393 394 // Handle inactive account transaction eviction 395 case <-evict.C: 396 pool.mu.Lock() 397 for addr := range pool.queue { 398 // Skip local transactions from the eviction mechanism 399 if pool.locals.contains(addr) { 400 continue 401 } 402 // Any non-locals old enough should be removed 403 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 404 list := pool.queue[addr].Flatten() 405 for _, tx := range list { 406 pool.removeTx(tx.Hash(), true) 407 } 408 queuedEvictionMeter.Mark(int64(len(list))) 409 } 410 } 411 pool.mu.Unlock() 412 413 // Handle local transaction journal rotation 414 case <-journal.C: 415 if pool.journal != nil { 416 pool.mu.Lock() 417 if err := pool.journal.rotate(pool.local()); err != nil { 418 log.Warn("Failed to rotate local tx journal", "err", err) 419 } 420 pool.mu.Unlock() 421 } 422 } 423 } 424 } 425 426 // Stop terminates the transaction pool. 427 func (pool *TxPool) Stop() { 428 // Unsubscribe all subscriptions registered from txpool 429 pool.scope.Close() 430 431 // Unsubscribe subscriptions registered from blockchain 432 pool.chainHeadSub.Unsubscribe() 433 pool.wg.Wait() 434 435 if pool.journal != nil { 436 pool.journal.close() 437 } 438 log.Info("Transaction pool stopped") 439 } 440 441 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 442 // starts sending event to the given channel. 443 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 444 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 445 } 446 447 // GasPrice returns the current gas price enforced by the transaction pool. 448 func (pool *TxPool) GasPrice() *big.Int { 449 pool.mu.RLock() 450 defer pool.mu.RUnlock() 451 452 return new(big.Int).Set(pool.gasPrice) 453 } 454 455 // SetGasPrice updates the minimum price required by the transaction pool for a 456 // new transaction, and drops all transactions below this threshold. 457 func (pool *TxPool) SetGasPrice(price *big.Int) { 458 pool.mu.Lock() 459 defer pool.mu.Unlock() 460 461 old := pool.gasPrice 462 pool.gasPrice = price 463 // if the min miner fee increased, remove transactions below the new threshold 464 if price.Cmp(old) > 0 { 465 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 466 drop := pool.all.RemotesBelowTip(price) 467 for _, tx := range drop { 468 pool.removeTx(tx.Hash(), false) 469 } 470 pool.priced.Removed(len(drop)) 471 } 472 473 log.Info("Transaction pool price threshold updated", "price", price) 474 } 475 476 // Nonce returns the next nonce of an account, with all transactions executable 477 // by the pool already applied on top. 478 func (pool *TxPool) Nonce(addr common.Address) uint64 { 479 pool.mu.RLock() 480 defer pool.mu.RUnlock() 481 482 return pool.pendingNonces.get(addr) 483 } 484 485 // Stats retrieves the current pool stats, namely the number of pending and the 486 // number of queued (non-executable) transactions. 487 func (pool *TxPool) Stats() (int, int) { 488 pool.mu.RLock() 489 defer pool.mu.RUnlock() 490 491 return pool.stats() 492 } 493 494 // stats retrieves the current pool stats, namely the number of pending and the 495 // number of queued (non-executable) transactions. 496 func (pool *TxPool) stats() (int, int) { 497 pending := 0 498 for _, list := range pool.pending { 499 pending += list.Len() 500 } 501 queued := 0 502 for _, list := range pool.queue { 503 queued += list.Len() 504 } 505 return pending, queued 506 } 507 508 // Content retrieves the data content of the transaction pool, returning all the 509 // pending as well as queued transactions, grouped by account and sorted by nonce. 510 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 511 pool.mu.Lock() 512 defer pool.mu.Unlock() 513 514 pending := make(map[common.Address]types.Transactions, len(pool.pending)) 515 for addr, list := range pool.pending { 516 pending[addr] = list.Flatten() 517 } 518 queued := make(map[common.Address]types.Transactions, len(pool.queue)) 519 for addr, list := range pool.queue { 520 queued[addr] = list.Flatten() 521 } 522 return pending, queued 523 } 524 525 // ContentFrom retrieves the data content of the transaction pool, returning the 526 // pending as well as queued transactions of this address, grouped by nonce. 527 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 528 pool.mu.RLock() 529 defer pool.mu.RUnlock() 530 531 var pending types.Transactions 532 if list, ok := pool.pending[addr]; ok { 533 pending = list.Flatten() 534 } 535 var queued types.Transactions 536 if list, ok := pool.queue[addr]; ok { 537 queued = list.Flatten() 538 } 539 return pending, queued 540 } 541 542 // Pending retrieves all currently processable transactions, grouped by origin 543 // account and sorted by nonce. The returned transaction set is a copy and can be 544 // freely modified by calling code. 545 // 546 // The enforceTips parameter can be used to do an extra filtering on the pending 547 // transactions and only return those whose **effective** tip is large enough in 548 // the next pending execution environment. 549 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 550 pool.mu.Lock() 551 defer pool.mu.Unlock() 552 553 pending := make(map[common.Address]types.Transactions) 554 for addr, list := range pool.pending { 555 txs := list.Flatten() 556 557 // If the miner requests tip enforcement, cap the lists now 558 if enforceTips && !pool.locals.contains(addr) { 559 for i, tx := range txs { 560 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 561 txs = txs[:i] 562 break 563 } 564 } 565 } 566 if len(txs) > 0 { 567 pending[addr] = txs 568 } 569 } 570 return pending 571 } 572 573 // Locals retrieves the accounts currently considered local by the pool. 574 func (pool *TxPool) Locals() []common.Address { 575 pool.mu.Lock() 576 defer pool.mu.Unlock() 577 578 return pool.locals.flatten() 579 } 580 581 // local retrieves all currently known local transactions, grouped by origin 582 // account and sorted by nonce. The returned transaction set is a copy and can be 583 // freely modified by calling code. 584 func (pool *TxPool) local() map[common.Address]types.Transactions { 585 txs := make(map[common.Address]types.Transactions) 586 for addr := range pool.locals.accounts { 587 if pending := pool.pending[addr]; pending != nil { 588 txs[addr] = append(txs[addr], pending.Flatten()...) 589 } 590 if queued := pool.queue[addr]; queued != nil { 591 txs[addr] = append(txs[addr], queued.Flatten()...) 592 } 593 } 594 return txs 595 } 596 597 // validateTx checks whether a transaction is valid according to the consensus 598 // rules and adheres to some heuristic limits of the local node (price and size). 599 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 600 // Accept only legacy transactions until EIP-2718/2930 activates. 601 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 602 return core.ErrTxTypeNotSupported 603 } 604 // Reject dynamic fee transactions until EIP-1559 activates. 605 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 606 return core.ErrTxTypeNotSupported 607 } 608 // Reject transactions over defined size to prevent DOS attacks 609 if tx.Size() > txMaxSize { 610 return ErrOversizedData 611 } 612 // Check whether the init code size has been exceeded. 613 if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { 614 return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) 615 } 616 // Transactions can't be negative. This may never happen using RLP decoded 617 // transactions but may occur if you create a transaction using the RPC. 618 if tx.Value().Sign() < 0 { 619 return ErrNegativeValue 620 } 621 // Ensure the transaction doesn't exceed the current block limit gas. 622 if pool.currentMaxGas < tx.Gas() { 623 return ErrGasLimit 624 } 625 // Sanity check for extremely large numbers 626 if tx.GasFeeCap().BitLen() > 256 { 627 return core.ErrFeeCapVeryHigh 628 } 629 if tx.GasTipCap().BitLen() > 256 { 630 return core.ErrTipVeryHigh 631 } 632 // Ensure gasFeeCap is greater than or equal to gasTipCap. 633 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 634 return core.ErrTipAboveFeeCap 635 } 636 // Make sure the transaction is signed properly. 637 from, err := types.Sender(pool.signer, tx) 638 if err != nil { 639 return ErrInvalidSender 640 } 641 // Drop non-local transactions under our own minimal accepted gas price or tip 642 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 643 return ErrUnderpriced 644 } 645 // Ensure the transaction adheres to nonce ordering 646 if pool.currentState.GetNonce(from) > tx.Nonce() { 647 return core.ErrNonceTooLow 648 } 649 // Transactor should have enough funds to cover the costs 650 // cost == V + GP * GL 651 balance := pool.currentState.GetBalance(from) 652 if balance.Cmp(tx.Cost()) < 0 { 653 return core.ErrInsufficientFunds 654 } 655 656 // Verify that replacing transactions will not result in overdraft 657 list := pool.pending[from] 658 if list != nil { // Sender already has pending txs 659 sum := new(big.Int).Add(tx.Cost(), list.totalcost) 660 if repl := list.txs.Get(tx.Nonce()); repl != nil { 661 // Deduct the cost of a transaction replaced by this 662 sum.Sub(sum, repl.Cost()) 663 } 664 if balance.Cmp(sum) < 0 { 665 log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) 666 return ErrOverdraft 667 } 668 } 669 670 // Ensure the transaction has more gas than the basic tx fee. 671 intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai) 672 if err != nil { 673 return err 674 } 675 if tx.Gas() < intrGas { 676 return core.ErrIntrinsicGas 677 } 678 return nil 679 } 680 681 // add validates a transaction and inserts it into the non-executable queue for later 682 // pending promotion and execution. If the transaction is a replacement for an already 683 // pending or queued one, it overwrites the previous transaction if its price is higher. 684 // 685 // If a newly added transaction is marked as local, its sending account will be 686 // be added to the allowlist, preventing any associated transaction from being dropped 687 // out of the pool due to pricing constraints. 688 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 689 // If the transaction is already known, discard it 690 hash := tx.Hash() 691 if pool.all.Get(hash) != nil { 692 log.Trace("Discarding already known transaction", "hash", hash) 693 knownTxMeter.Mark(1) 694 return false, ErrAlreadyKnown 695 } 696 // Make the local flag. If it's from local source or it's from the network but 697 // the sender is marked as local previously, treat it as the local transaction. 698 isLocal := local || pool.locals.containsTx(tx) 699 700 // If the transaction fails basic validation, discard it 701 if err := pool.validateTx(tx, isLocal); err != nil { 702 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 703 invalidTxMeter.Mark(1) 704 return false, err 705 } 706 707 // already validated by this point 708 from, _ := types.Sender(pool.signer, tx) 709 710 // If the transaction pool is full, discard underpriced transactions 711 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 712 // If the new transaction is underpriced, don't accept it 713 if !isLocal && pool.priced.Underpriced(tx) { 714 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 715 underpricedTxMeter.Mark(1) 716 return false, ErrUnderpriced 717 } 718 719 // We're about to replace a transaction. The reorg does a more thorough 720 // analysis of what to remove and how, but it runs async. We don't want to 721 // do too many replacements between reorg-runs, so we cap the number of 722 // replacements to 25% of the slots 723 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 724 throttleTxMeter.Mark(1) 725 return false, ErrTxPoolOverflow 726 } 727 728 // New transaction is better than our worse ones, make room for it. 729 // If it's a local transaction, forcibly discard all available transactions. 730 // Otherwise if we can't make enough room for new one, abort the operation. 731 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 732 733 // Special case, we still can't make the room for the new remote one. 734 if !isLocal && !success { 735 log.Trace("Discarding overflown transaction", "hash", hash) 736 overflowedTxMeter.Mark(1) 737 return false, ErrTxPoolOverflow 738 } 739 740 // If the new transaction is a future transaction it should never churn pending transactions 741 if pool.isFuture(from, tx) { 742 var replacesPending bool 743 for _, dropTx := range drop { 744 dropSender, _ := types.Sender(pool.signer, dropTx) 745 if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) { 746 replacesPending = true 747 break 748 } 749 } 750 // Add all transactions back to the priced queue 751 if replacesPending { 752 for _, dropTx := range drop { 753 heap.Push(&pool.priced.urgent, dropTx) 754 } 755 log.Trace("Discarding future transaction replacing pending tx", "hash", hash) 756 return false, ErrFutureReplacePending 757 } 758 } 759 760 // Kick out the underpriced remote transactions. 761 for _, tx := range drop { 762 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 763 underpricedTxMeter.Mark(1) 764 dropped := pool.removeTx(tx.Hash(), false) 765 pool.changesSinceReorg += dropped 766 } 767 } 768 769 // Try to replace an existing transaction in the pending pool 770 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 771 // Nonce already pending, check if required price bump is met 772 inserted, old := list.Add(tx, pool.config.PriceBump) 773 if !inserted { 774 pendingDiscardMeter.Mark(1) 775 return false, ErrReplaceUnderpriced 776 } 777 // New transaction is better, replace old one 778 if old != nil { 779 pool.all.Remove(old.Hash()) 780 pool.priced.Removed(1) 781 pendingReplaceMeter.Mark(1) 782 } 783 pool.all.Add(tx, isLocal) 784 pool.priced.Put(tx, isLocal) 785 pool.journalTx(from, tx) 786 pool.queueTxEvent(tx) 787 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 788 789 // Successful promotion, bump the heartbeat 790 pool.beats[from] = time.Now() 791 return old != nil, nil 792 } 793 // New transaction isn't replacing a pending one, push into queue 794 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 795 if err != nil { 796 return false, err 797 } 798 // Mark local addresses and journal local transactions 799 if local && !pool.locals.contains(from) { 800 log.Info("Setting new local account", "address", from) 801 pool.locals.add(from) 802 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 803 } 804 if isLocal { 805 localGauge.Inc(1) 806 } 807 pool.journalTx(from, tx) 808 809 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 810 return replaced, nil 811 } 812 813 // isFuture reports whether the given transaction is immediately executable. 814 func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool { 815 list := pool.pending[from] 816 if list == nil { 817 return pool.pendingNonces.get(from) != tx.Nonce() 818 } 819 // Sender has pending transactions. 820 if old := list.txs.Get(tx.Nonce()); old != nil { 821 return false // It replaces a pending transaction. 822 } 823 // Not replacing, check if parent nonce exists in pending. 824 return list.txs.Get(tx.Nonce()-1) == nil 825 } 826 827 // enqueueTx inserts a new transaction into the non-executable transaction queue. 828 // 829 // Note, this method assumes the pool lock is held! 830 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 831 // Try to insert the transaction into the future queue 832 from, _ := types.Sender(pool.signer, tx) // already validated 833 if pool.queue[from] == nil { 834 pool.queue[from] = newList(false) 835 } 836 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 837 if !inserted { 838 // An older transaction was better, discard this 839 queuedDiscardMeter.Mark(1) 840 return false, ErrReplaceUnderpriced 841 } 842 // Discard any previous transaction and mark this 843 if old != nil { 844 pool.all.Remove(old.Hash()) 845 pool.priced.Removed(1) 846 queuedReplaceMeter.Mark(1) 847 } else { 848 // Nothing was replaced, bump the queued counter 849 queuedGauge.Inc(1) 850 } 851 // If the transaction isn't in lookup set but it's expected to be there, 852 // show the error log. 853 if pool.all.Get(hash) == nil && !addAll { 854 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 855 } 856 if addAll { 857 pool.all.Add(tx, local) 858 pool.priced.Put(tx, local) 859 } 860 // If we never record the heartbeat, do it right now. 861 if _, exist := pool.beats[from]; !exist { 862 pool.beats[from] = time.Now() 863 } 864 return old != nil, nil 865 } 866 867 // journalTx adds the specified transaction to the local disk journal if it is 868 // deemed to have been sent from a local account. 869 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 870 // Only journal if it's enabled and the transaction is local 871 if pool.journal == nil || !pool.locals.contains(from) { 872 return 873 } 874 if err := pool.journal.insert(tx); err != nil { 875 log.Warn("Failed to journal local transaction", "err", err) 876 } 877 } 878 879 // promoteTx adds a transaction to the pending (processable) list of transactions 880 // and returns whether it was inserted or an older was better. 881 // 882 // Note, this method assumes the pool lock is held! 883 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 884 // Try to insert the transaction into the pending queue 885 if pool.pending[addr] == nil { 886 pool.pending[addr] = newList(true) 887 } 888 list := pool.pending[addr] 889 890 inserted, old := list.Add(tx, pool.config.PriceBump) 891 if !inserted { 892 // An older transaction was better, discard this 893 pool.all.Remove(hash) 894 pool.priced.Removed(1) 895 pendingDiscardMeter.Mark(1) 896 return false 897 } 898 // Otherwise discard any previous transaction and mark this 899 if old != nil { 900 pool.all.Remove(old.Hash()) 901 pool.priced.Removed(1) 902 pendingReplaceMeter.Mark(1) 903 } else { 904 // Nothing was replaced, bump the pending counter 905 pendingGauge.Inc(1) 906 } 907 // Set the potentially new pending nonce and notify any subsystems of the new tx 908 pool.pendingNonces.set(addr, tx.Nonce()+1) 909 910 // Successful promotion, bump the heartbeat 911 pool.beats[addr] = time.Now() 912 return true 913 } 914 915 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 916 // senders as a local ones, ensuring they go around the local pricing constraints. 917 // 918 // This method is used to add transactions from the RPC API and performs synchronous pool 919 // reorganization and event propagation. 920 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 921 return pool.addTxs(txs, !pool.config.NoLocals, true) 922 } 923 924 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 925 // a convenience wrapper around AddLocals. 926 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 927 errs := pool.AddLocals([]*types.Transaction{tx}) 928 return errs[0] 929 } 930 931 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 932 // senders are not among the locally tracked ones, full pricing constraints will apply. 933 // 934 // This method is used to add transactions from the p2p network and does not wait for pool 935 // reorganization and internal event propagation. 936 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 937 return pool.addTxs(txs, false, false) 938 } 939 940 // AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method. 941 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 942 return pool.addTxs(txs, false, true) 943 } 944 945 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 946 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 947 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 948 return errs[0] 949 } 950 951 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 952 // wrapper around AddRemotes. 953 // 954 // Deprecated: use AddRemotes 955 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 956 errs := pool.AddRemotes([]*types.Transaction{tx}) 957 return errs[0] 958 } 959 960 // addTxs attempts to queue a batch of transactions if they are valid. 961 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 962 // Filter out known ones without obtaining the pool lock or recovering signatures 963 var ( 964 errs = make([]error, len(txs)) 965 news = make([]*types.Transaction, 0, len(txs)) 966 ) 967 for i, tx := range txs { 968 // If the transaction is known, pre-set the error slot 969 if pool.all.Get(tx.Hash()) != nil { 970 errs[i] = ErrAlreadyKnown 971 knownTxMeter.Mark(1) 972 continue 973 } 974 // Exclude transactions with invalid signatures as soon as 975 // possible and cache senders in transactions before 976 // obtaining lock 977 _, err := types.Sender(pool.signer, tx) 978 if err != nil { 979 errs[i] = ErrInvalidSender 980 invalidTxMeter.Mark(1) 981 continue 982 } 983 // Accumulate all unknown transactions for deeper processing 984 news = append(news, tx) 985 } 986 if len(news) == 0 { 987 return errs 988 } 989 990 // Process all the new transaction and merge any errors into the original slice 991 pool.mu.Lock() 992 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 993 pool.mu.Unlock() 994 995 var nilSlot = 0 996 for _, err := range newErrs { 997 for errs[nilSlot] != nil { 998 nilSlot++ 999 } 1000 errs[nilSlot] = err 1001 nilSlot++ 1002 } 1003 // Reorg the pool internals if needed and return 1004 done := pool.requestPromoteExecutables(dirtyAddrs) 1005 if sync { 1006 <-done 1007 } 1008 return errs 1009 } 1010 1011 // addTxsLocked attempts to queue a batch of transactions if they are valid. 1012 // The transaction pool lock must be held. 1013 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 1014 dirty := newAccountSet(pool.signer) 1015 errs := make([]error, len(txs)) 1016 for i, tx := range txs { 1017 replaced, err := pool.add(tx, local) 1018 errs[i] = err 1019 if err == nil && !replaced { 1020 dirty.addTx(tx) 1021 } 1022 } 1023 validTxMeter.Mark(int64(len(dirty.accounts))) 1024 return errs, dirty 1025 } 1026 1027 // Status returns the status (unknown/pending/queued) of a batch of transactions 1028 // identified by their hashes. 1029 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 1030 status := make([]TxStatus, len(hashes)) 1031 for i, hash := range hashes { 1032 tx := pool.Get(hash) 1033 if tx == nil { 1034 continue 1035 } 1036 from, _ := types.Sender(pool.signer, tx) // already validated 1037 pool.mu.RLock() 1038 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1039 status[i] = TxStatusPending 1040 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1041 status[i] = TxStatusQueued 1042 } 1043 // implicit else: the tx may have been included into a block between 1044 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 1045 pool.mu.RUnlock() 1046 } 1047 return status 1048 } 1049 1050 // Get returns a transaction if it is contained in the pool and nil otherwise. 1051 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1052 return pool.all.Get(hash) 1053 } 1054 1055 // Has returns an indicator whether txpool has a transaction cached with the 1056 // given hash. 1057 func (pool *TxPool) Has(hash common.Hash) bool { 1058 return pool.all.Get(hash) != nil 1059 } 1060 1061 // removeTx removes a single transaction from the queue, moving all subsequent 1062 // transactions back to the future queue. 1063 // Returns the number of transactions removed from the pending queue. 1064 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { 1065 // Fetch the transaction we wish to delete 1066 tx := pool.all.Get(hash) 1067 if tx == nil { 1068 return 0 1069 } 1070 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1071 1072 // Remove it from the list of known transactions 1073 pool.all.Remove(hash) 1074 if outofbound { 1075 pool.priced.Removed(1) 1076 } 1077 if pool.locals.contains(addr) { 1078 localGauge.Dec(1) 1079 } 1080 // Remove the transaction from the pending lists and reset the account nonce 1081 if pending := pool.pending[addr]; pending != nil { 1082 if removed, invalids := pending.Remove(tx); removed { 1083 // If no more pending transactions are left, remove the list 1084 if pending.Empty() { 1085 delete(pool.pending, addr) 1086 } 1087 // Postpone any invalidated transactions 1088 for _, tx := range invalids { 1089 // Internal shuffle shouldn't touch the lookup set. 1090 pool.enqueueTx(tx.Hash(), tx, false, false) 1091 } 1092 // Update the account nonce if needed 1093 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1094 // Reduce the pending counter 1095 pendingGauge.Dec(int64(1 + len(invalids))) 1096 return 1 + len(invalids) 1097 } 1098 } 1099 // Transaction is in the future queue 1100 if future := pool.queue[addr]; future != nil { 1101 if removed, _ := future.Remove(tx); removed { 1102 // Reduce the queued counter 1103 queuedGauge.Dec(1) 1104 } 1105 if future.Empty() { 1106 delete(pool.queue, addr) 1107 delete(pool.beats, addr) 1108 } 1109 } 1110 return 0 1111 } 1112 1113 // requestReset requests a pool reset to the new head block. 1114 // The returned channel is closed when the reset has occurred. 1115 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1116 select { 1117 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1118 return <-pool.reorgDoneCh 1119 case <-pool.reorgShutdownCh: 1120 return pool.reorgShutdownCh 1121 } 1122 } 1123 1124 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1125 // The returned channel is closed when the promotion checks have occurred. 1126 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1127 select { 1128 case pool.reqPromoteCh <- set: 1129 return <-pool.reorgDoneCh 1130 case <-pool.reorgShutdownCh: 1131 return pool.reorgShutdownCh 1132 } 1133 } 1134 1135 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1136 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1137 select { 1138 case pool.queueTxEventCh <- tx: 1139 case <-pool.reorgShutdownCh: 1140 } 1141 } 1142 1143 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1144 // call those methods directly, but request them being run using requestReset and 1145 // requestPromoteExecutables instead. 1146 func (pool *TxPool) scheduleReorgLoop() { 1147 defer pool.wg.Done() 1148 1149 var ( 1150 curDone chan struct{} // non-nil while runReorg is active 1151 nextDone = make(chan struct{}) 1152 launchNextRun bool 1153 reset *txpoolResetRequest 1154 dirtyAccounts *accountSet 1155 queuedEvents = make(map[common.Address]*sortedMap) 1156 ) 1157 for { 1158 // Launch next background reorg if needed 1159 if curDone == nil && launchNextRun { 1160 // Run the background reorg and announcements 1161 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1162 1163 // Prepare everything for the next round of reorg 1164 curDone, nextDone = nextDone, make(chan struct{}) 1165 launchNextRun = false 1166 1167 reset, dirtyAccounts = nil, nil 1168 queuedEvents = make(map[common.Address]*sortedMap) 1169 } 1170 1171 select { 1172 case req := <-pool.reqResetCh: 1173 // Reset request: update head if request is already pending. 1174 if reset == nil { 1175 reset = req 1176 } else { 1177 reset.newHead = req.newHead 1178 } 1179 launchNextRun = true 1180 pool.reorgDoneCh <- nextDone 1181 1182 case req := <-pool.reqPromoteCh: 1183 // Promote request: update address set if request is already pending. 1184 if dirtyAccounts == nil { 1185 dirtyAccounts = req 1186 } else { 1187 dirtyAccounts.merge(req) 1188 } 1189 launchNextRun = true 1190 pool.reorgDoneCh <- nextDone 1191 1192 case tx := <-pool.queueTxEventCh: 1193 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1194 // request one later if they want the events sent. 1195 addr, _ := types.Sender(pool.signer, tx) 1196 if _, ok := queuedEvents[addr]; !ok { 1197 queuedEvents[addr] = newSortedMap() 1198 } 1199 queuedEvents[addr].Put(tx) 1200 1201 case <-curDone: 1202 curDone = nil 1203 1204 case <-pool.reorgShutdownCh: 1205 // Wait for current run to finish. 1206 if curDone != nil { 1207 <-curDone 1208 } 1209 close(nextDone) 1210 return 1211 } 1212 } 1213 } 1214 1215 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1216 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { 1217 defer func(t0 time.Time) { 1218 reorgDurationTimer.Update(time.Since(t0)) 1219 }(time.Now()) 1220 defer close(done) 1221 1222 var promoteAddrs []common.Address 1223 if dirtyAccounts != nil && reset == nil { 1224 // Only dirty accounts need to be promoted, unless we're resetting. 1225 // For resets, all addresses in the tx queue will be promoted and 1226 // the flatten operation can be avoided. 1227 promoteAddrs = dirtyAccounts.flatten() 1228 } 1229 pool.mu.Lock() 1230 if reset != nil { 1231 // Reset from the old head to the new, rescheduling any reorged transactions 1232 pool.reset(reset.oldHead, reset.newHead) 1233 1234 // Nonces were reset, discard any events that became stale 1235 for addr := range events { 1236 events[addr].Forward(pool.pendingNonces.get(addr)) 1237 if events[addr].Len() == 0 { 1238 delete(events, addr) 1239 } 1240 } 1241 // Reset needs promote for all addresses 1242 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1243 for addr := range pool.queue { 1244 promoteAddrs = append(promoteAddrs, addr) 1245 } 1246 } 1247 // Check for pending transactions for every account that sent new ones 1248 promoted := pool.promoteExecutables(promoteAddrs) 1249 1250 // If a new block appeared, validate the pool of pending transactions. This will 1251 // remove any transaction that has been included in the block or was invalidated 1252 // because of another transaction (e.g. higher gas price). 1253 if reset != nil { 1254 pool.demoteUnexecutables() 1255 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1256 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1257 pool.priced.SetBaseFee(pendingBaseFee) 1258 } 1259 // Update all accounts to the latest known pending nonce 1260 nonces := make(map[common.Address]uint64, len(pool.pending)) 1261 for addr, list := range pool.pending { 1262 highestPending := list.LastElement() 1263 nonces[addr] = highestPending.Nonce() + 1 1264 } 1265 pool.pendingNonces.setAll(nonces) 1266 } 1267 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1268 pool.truncatePending() 1269 pool.truncateQueue() 1270 1271 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1272 pool.changesSinceReorg = 0 // Reset change counter 1273 pool.mu.Unlock() 1274 1275 // Notify subsystems for newly added transactions 1276 for _, tx := range promoted { 1277 addr, _ := types.Sender(pool.signer, tx) 1278 if _, ok := events[addr]; !ok { 1279 events[addr] = newSortedMap() 1280 } 1281 events[addr].Put(tx) 1282 } 1283 if len(events) > 0 { 1284 var txs []*types.Transaction 1285 for _, set := range events { 1286 txs = append(txs, set.Flatten()...) 1287 } 1288 pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) 1289 } 1290 } 1291 1292 // reset retrieves the current state of the blockchain and ensures the content 1293 // of the transaction pool is valid with regard to the chain state. 1294 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1295 // If we're reorging an old state, reinject all dropped transactions 1296 var reinject types.Transactions 1297 1298 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1299 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1300 oldNum := oldHead.Number.Uint64() 1301 newNum := newHead.Number.Uint64() 1302 1303 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1304 log.Debug("Skipping deep transaction reorg", "depth", depth) 1305 } else { 1306 // Reorg seems shallow enough to pull in all transactions into memory 1307 var discarded, included types.Transactions 1308 var ( 1309 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1310 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1311 ) 1312 if rem == nil { 1313 // This can happen if a setHead is performed, where we simply discard the old 1314 // head from the chain. 1315 // If that is the case, we don't have the lost transactions anymore, and 1316 // there's nothing to add 1317 if newNum >= oldNum { 1318 // If we reorged to a same or higher number, then it's not a case of setHead 1319 log.Warn("Transaction pool reset with missing oldhead", 1320 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1321 return 1322 } 1323 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1324 log.Debug("Skipping transaction reset caused by setHead", 1325 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1326 // We still need to update the current state s.th. the lost transactions can be readded by the user 1327 } else { 1328 for rem.NumberU64() > add.NumberU64() { 1329 discarded = append(discarded, rem.Transactions()...) 1330 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1331 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1332 return 1333 } 1334 } 1335 for add.NumberU64() > rem.NumberU64() { 1336 included = append(included, add.Transactions()...) 1337 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1338 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1339 return 1340 } 1341 } 1342 for rem.Hash() != add.Hash() { 1343 discarded = append(discarded, rem.Transactions()...) 1344 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1345 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1346 return 1347 } 1348 included = append(included, add.Transactions()...) 1349 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1350 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1351 return 1352 } 1353 } 1354 reinject = types.TxDifference(discarded, included) 1355 } 1356 } 1357 } 1358 // Initialize the internal state to the current head 1359 if newHead == nil { 1360 newHead = pool.chain.CurrentBlock() // Special case during testing 1361 } 1362 statedb, err := pool.chain.StateAt(newHead.Root) 1363 if err != nil { 1364 log.Error("Failed to reset txpool state", "err", err) 1365 return 1366 } 1367 pool.currentState = statedb 1368 pool.pendingNonces = newNoncer(statedb) 1369 pool.currentMaxGas = newHead.GasLimit 1370 1371 // Inject any transactions discarded due to reorgs 1372 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1373 core.SenderCacher.Recover(pool.signer, reinject) 1374 pool.addTxsLocked(reinject, false) 1375 1376 // Update all fork indicator by next pending block number. 1377 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1378 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1379 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1380 pool.eip1559 = pool.chainconfig.IsLondon(next) 1381 pool.shanghai = pool.chainconfig.IsShanghai(uint64(time.Now().Unix()), types.DeserializeHeaderExtraInformation(newHead).ArbOSFormatVersion) 1382 } 1383 1384 // promoteExecutables moves transactions that have become processable from the 1385 // future queue to the set of pending transactions. During this process, all 1386 // invalidated transactions (low nonce, low balance) are deleted. 1387 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1388 // Track the promoted transactions to broadcast them at once 1389 var promoted []*types.Transaction 1390 1391 // Iterate over all accounts and promote any executable transactions 1392 for _, addr := range accounts { 1393 list := pool.queue[addr] 1394 if list == nil { 1395 continue // Just in case someone calls with a non existing account 1396 } 1397 // Drop all transactions that are deemed too old (low nonce) 1398 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1399 for _, tx := range forwards { 1400 hash := tx.Hash() 1401 pool.all.Remove(hash) 1402 } 1403 log.Trace("Removed old queued transactions", "count", len(forwards)) 1404 // Drop all transactions that are too costly (low balance or out of gas) 1405 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1406 for _, tx := range drops { 1407 hash := tx.Hash() 1408 pool.all.Remove(hash) 1409 } 1410 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1411 queuedNofundsMeter.Mark(int64(len(drops))) 1412 1413 // Gather all executable transactions and promote them 1414 readies := list.Ready(pool.pendingNonces.get(addr)) 1415 for _, tx := range readies { 1416 hash := tx.Hash() 1417 if pool.promoteTx(addr, hash, tx) { 1418 promoted = append(promoted, tx) 1419 } 1420 } 1421 log.Trace("Promoted queued transactions", "count", len(promoted)) 1422 queuedGauge.Dec(int64(len(readies))) 1423 1424 // Drop all transactions over the allowed limit 1425 var caps types.Transactions 1426 if !pool.locals.contains(addr) { 1427 caps = list.Cap(int(pool.config.AccountQueue)) 1428 for _, tx := range caps { 1429 hash := tx.Hash() 1430 pool.all.Remove(hash) 1431 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1432 } 1433 queuedRateLimitMeter.Mark(int64(len(caps))) 1434 } 1435 // Mark all the items dropped as removed 1436 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1437 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1438 if pool.locals.contains(addr) { 1439 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1440 } 1441 // Delete the entire queue entry if it became empty. 1442 if list.Empty() { 1443 delete(pool.queue, addr) 1444 delete(pool.beats, addr) 1445 } 1446 } 1447 return promoted 1448 } 1449 1450 // truncatePending removes transactions from the pending queue if the pool is above the 1451 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1452 // equal number for all for accounts with many pending transactions. 1453 func (pool *TxPool) truncatePending() { 1454 pending := uint64(0) 1455 for _, list := range pool.pending { 1456 pending += uint64(list.Len()) 1457 } 1458 if pending <= pool.config.GlobalSlots { 1459 return 1460 } 1461 1462 pendingBeforeCap := pending 1463 // Assemble a spam order to penalize large transactors first 1464 spammers := prque.New[int64, common.Address](nil) 1465 for addr, list := range pool.pending { 1466 // Only evict transactions from high rollers 1467 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1468 spammers.Push(addr, int64(list.Len())) 1469 } 1470 } 1471 // Gradually drop transactions from offenders 1472 offenders := []common.Address{} 1473 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1474 // Retrieve the next offender if not local address 1475 offender, _ := spammers.Pop() 1476 offenders = append(offenders, offender) 1477 1478 // Equalize balances until all the same or below threshold 1479 if len(offenders) > 1 { 1480 // Calculate the equalization threshold for all current offenders 1481 threshold := pool.pending[offender].Len() 1482 1483 // Iteratively reduce all offenders until below limit or threshold reached 1484 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1485 for i := 0; i < len(offenders)-1; i++ { 1486 list := pool.pending[offenders[i]] 1487 1488 caps := list.Cap(list.Len() - 1) 1489 for _, tx := range caps { 1490 // Drop the transaction from the global pools too 1491 hash := tx.Hash() 1492 pool.all.Remove(hash) 1493 1494 // Update the account nonce to the dropped transaction 1495 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1496 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1497 } 1498 pool.priced.Removed(len(caps)) 1499 pendingGauge.Dec(int64(len(caps))) 1500 if pool.locals.contains(offenders[i]) { 1501 localGauge.Dec(int64(len(caps))) 1502 } 1503 pending-- 1504 } 1505 } 1506 } 1507 } 1508 1509 // If still above threshold, reduce to limit or min allowance 1510 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1511 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1512 for _, addr := range offenders { 1513 list := pool.pending[addr] 1514 1515 caps := list.Cap(list.Len() - 1) 1516 for _, tx := range caps { 1517 // Drop the transaction from the global pools too 1518 hash := tx.Hash() 1519 pool.all.Remove(hash) 1520 1521 // Update the account nonce to the dropped transaction 1522 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1523 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1524 } 1525 pool.priced.Removed(len(caps)) 1526 pendingGauge.Dec(int64(len(caps))) 1527 if pool.locals.contains(addr) { 1528 localGauge.Dec(int64(len(caps))) 1529 } 1530 pending-- 1531 } 1532 } 1533 } 1534 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1535 } 1536 1537 // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. 1538 func (pool *TxPool) truncateQueue() { 1539 queued := uint64(0) 1540 for _, list := range pool.queue { 1541 queued += uint64(list.Len()) 1542 } 1543 if queued <= pool.config.GlobalQueue { 1544 return 1545 } 1546 1547 // Sort all accounts with queued transactions by heartbeat 1548 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1549 for addr := range pool.queue { 1550 if !pool.locals.contains(addr) { // don't drop locals 1551 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1552 } 1553 } 1554 sort.Sort(sort.Reverse(addresses)) 1555 1556 // Drop transactions until the total is below the limit or only locals remain 1557 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1558 addr := addresses[len(addresses)-1] 1559 list := pool.queue[addr.address] 1560 1561 addresses = addresses[:len(addresses)-1] 1562 1563 // Drop all transactions if they are less than the overflow 1564 if size := uint64(list.Len()); size <= drop { 1565 for _, tx := range list.Flatten() { 1566 pool.removeTx(tx.Hash(), true) 1567 } 1568 drop -= size 1569 queuedRateLimitMeter.Mark(int64(size)) 1570 continue 1571 } 1572 // Otherwise drop only last few transactions 1573 txs := list.Flatten() 1574 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1575 pool.removeTx(txs[i].Hash(), true) 1576 drop-- 1577 queuedRateLimitMeter.Mark(1) 1578 } 1579 } 1580 } 1581 1582 // demoteUnexecutables removes invalid and processed transactions from the pools 1583 // executable/pending queue and any subsequent transactions that become unexecutable 1584 // are moved back into the future queue. 1585 // 1586 // Note: transactions are not marked as removed in the priced list because re-heaping 1587 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1588 // to trigger a re-heap is this function 1589 func (pool *TxPool) demoteUnexecutables() { 1590 // Iterate over all accounts and demote any non-executable transactions 1591 for addr, list := range pool.pending { 1592 nonce := pool.currentState.GetNonce(addr) 1593 1594 // Drop all transactions that are deemed too old (low nonce) 1595 olds := list.Forward(nonce) 1596 for _, tx := range olds { 1597 hash := tx.Hash() 1598 pool.all.Remove(hash) 1599 log.Trace("Removed old pending transaction", "hash", hash) 1600 } 1601 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1602 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1603 for _, tx := range drops { 1604 hash := tx.Hash() 1605 log.Trace("Removed unpayable pending transaction", "hash", hash) 1606 pool.all.Remove(hash) 1607 } 1608 pendingNofundsMeter.Mark(int64(len(drops))) 1609 1610 for _, tx := range invalids { 1611 hash := tx.Hash() 1612 log.Trace("Demoting pending transaction", "hash", hash) 1613 1614 // Internal shuffle shouldn't touch the lookup set. 1615 pool.enqueueTx(hash, tx, false, false) 1616 } 1617 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1618 if pool.locals.contains(addr) { 1619 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1620 } 1621 // If there's a gap in front, alert (should never happen) and postpone all transactions 1622 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1623 gapped := list.Cap(0) 1624 for _, tx := range gapped { 1625 hash := tx.Hash() 1626 log.Error("Demoting invalidated transaction", "hash", hash) 1627 1628 // Internal shuffle shouldn't touch the lookup set. 1629 pool.enqueueTx(hash, tx, false, false) 1630 } 1631 pendingGauge.Dec(int64(len(gapped))) 1632 } 1633 // Delete the entire pending entry if it became empty. 1634 if list.Empty() { 1635 delete(pool.pending, addr) 1636 } 1637 } 1638 } 1639 1640 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1641 type addressByHeartbeat struct { 1642 address common.Address 1643 heartbeat time.Time 1644 } 1645 1646 type addressesByHeartbeat []addressByHeartbeat 1647 1648 func (a addressesByHeartbeat) Len() int { return len(a) } 1649 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1650 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1651 1652 // accountSet is simply a set of addresses to check for existence, and a signer 1653 // capable of deriving addresses from transactions. 1654 type accountSet struct { 1655 accounts map[common.Address]struct{} 1656 signer types.Signer 1657 cache *[]common.Address 1658 } 1659 1660 // newAccountSet creates a new address set with an associated signer for sender 1661 // derivations. 1662 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1663 as := &accountSet{ 1664 accounts: make(map[common.Address]struct{}, len(addrs)), 1665 signer: signer, 1666 } 1667 for _, addr := range addrs { 1668 as.add(addr) 1669 } 1670 return as 1671 } 1672 1673 // contains checks if a given address is contained within the set. 1674 func (as *accountSet) contains(addr common.Address) bool { 1675 _, exist := as.accounts[addr] 1676 return exist 1677 } 1678 1679 // containsTx checks if the sender of a given tx is within the set. If the sender 1680 // cannot be derived, this method returns false. 1681 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1682 if addr, err := types.Sender(as.signer, tx); err == nil { 1683 return as.contains(addr) 1684 } 1685 return false 1686 } 1687 1688 // add inserts a new address into the set to track. 1689 func (as *accountSet) add(addr common.Address) { 1690 as.accounts[addr] = struct{}{} 1691 as.cache = nil 1692 } 1693 1694 // addTx adds the sender of tx into the set. 1695 func (as *accountSet) addTx(tx *types.Transaction) { 1696 if addr, err := types.Sender(as.signer, tx); err == nil { 1697 as.add(addr) 1698 } 1699 } 1700 1701 // flatten returns the list of addresses within this set, also caching it for later 1702 // reuse. The returned slice should not be changed! 1703 func (as *accountSet) flatten() []common.Address { 1704 if as.cache == nil { 1705 accounts := make([]common.Address, 0, len(as.accounts)) 1706 for account := range as.accounts { 1707 accounts = append(accounts, account) 1708 } 1709 as.cache = &accounts 1710 } 1711 return *as.cache 1712 } 1713 1714 // merge adds all addresses from the 'other' set into 'as'. 1715 func (as *accountSet) merge(other *accountSet) { 1716 for addr := range other.accounts { 1717 as.accounts[addr] = struct{}{} 1718 } 1719 as.cache = nil 1720 } 1721 1722 // lookup is used internally by TxPool to track transactions while allowing 1723 // lookup without mutex contention. 1724 // 1725 // Note, although this type is properly protected against concurrent access, it 1726 // is **not** a type that should ever be mutated or even exposed outside of the 1727 // transaction pool, since its internal state is tightly coupled with the pools 1728 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1729 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1730 // TxPool.mu mutex. 1731 // 1732 // This lookup set combines the notion of "local transactions", which is useful 1733 // to build upper-level structure. 1734 type lookup struct { 1735 slots int 1736 lock sync.RWMutex 1737 locals map[common.Hash]*types.Transaction 1738 remotes map[common.Hash]*types.Transaction 1739 } 1740 1741 // newLookup returns a new lookup structure. 1742 func newLookup() *lookup { 1743 return &lookup{ 1744 locals: make(map[common.Hash]*types.Transaction), 1745 remotes: make(map[common.Hash]*types.Transaction), 1746 } 1747 } 1748 1749 // Range calls f on each key and value present in the map. The callback passed 1750 // should return the indicator whether the iteration needs to be continued. 1751 // Callers need to specify which set (or both) to be iterated. 1752 func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1753 t.lock.RLock() 1754 defer t.lock.RUnlock() 1755 1756 if local { 1757 for key, value := range t.locals { 1758 if !f(key, value, true) { 1759 return 1760 } 1761 } 1762 } 1763 if remote { 1764 for key, value := range t.remotes { 1765 if !f(key, value, false) { 1766 return 1767 } 1768 } 1769 } 1770 } 1771 1772 // Get returns a transaction if it exists in the lookup, or nil if not found. 1773 func (t *lookup) Get(hash common.Hash) *types.Transaction { 1774 t.lock.RLock() 1775 defer t.lock.RUnlock() 1776 1777 if tx := t.locals[hash]; tx != nil { 1778 return tx 1779 } 1780 return t.remotes[hash] 1781 } 1782 1783 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1784 func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { 1785 t.lock.RLock() 1786 defer t.lock.RUnlock() 1787 1788 return t.locals[hash] 1789 } 1790 1791 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1792 func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { 1793 t.lock.RLock() 1794 defer t.lock.RUnlock() 1795 1796 return t.remotes[hash] 1797 } 1798 1799 // Count returns the current number of transactions in the lookup. 1800 func (t *lookup) Count() int { 1801 t.lock.RLock() 1802 defer t.lock.RUnlock() 1803 1804 return len(t.locals) + len(t.remotes) 1805 } 1806 1807 // LocalCount returns the current number of local transactions in the lookup. 1808 func (t *lookup) LocalCount() int { 1809 t.lock.RLock() 1810 defer t.lock.RUnlock() 1811 1812 return len(t.locals) 1813 } 1814 1815 // RemoteCount returns the current number of remote transactions in the lookup. 1816 func (t *lookup) RemoteCount() int { 1817 t.lock.RLock() 1818 defer t.lock.RUnlock() 1819 1820 return len(t.remotes) 1821 } 1822 1823 // Slots returns the current number of slots used in the lookup. 1824 func (t *lookup) Slots() int { 1825 t.lock.RLock() 1826 defer t.lock.RUnlock() 1827 1828 return t.slots 1829 } 1830 1831 // Add adds a transaction to the lookup. 1832 func (t *lookup) Add(tx *types.Transaction, local bool) { 1833 t.lock.Lock() 1834 defer t.lock.Unlock() 1835 1836 t.slots += numSlots(tx) 1837 slotsGauge.Update(int64(t.slots)) 1838 1839 if local { 1840 t.locals[tx.Hash()] = tx 1841 } else { 1842 t.remotes[tx.Hash()] = tx 1843 } 1844 } 1845 1846 // Remove removes a transaction from the lookup. 1847 func (t *lookup) Remove(hash common.Hash) { 1848 t.lock.Lock() 1849 defer t.lock.Unlock() 1850 1851 tx, ok := t.locals[hash] 1852 if !ok { 1853 tx, ok = t.remotes[hash] 1854 } 1855 if !ok { 1856 log.Error("No transaction found to be deleted", "hash", hash) 1857 return 1858 } 1859 t.slots -= numSlots(tx) 1860 slotsGauge.Update(int64(t.slots)) 1861 1862 delete(t.locals, hash) 1863 delete(t.remotes, hash) 1864 } 1865 1866 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1867 // set. The assumption is held the locals set is thread-safe to be used. 1868 func (t *lookup) RemoteToLocals(locals *accountSet) int { 1869 t.lock.Lock() 1870 defer t.lock.Unlock() 1871 1872 var migrated int 1873 for hash, tx := range t.remotes { 1874 if locals.containsTx(tx) { 1875 t.locals[hash] = tx 1876 delete(t.remotes, hash) 1877 migrated += 1 1878 } 1879 } 1880 return migrated 1881 } 1882 1883 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1884 func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1885 found := make(types.Transactions, 0, 128) 1886 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1887 if tx.GasTipCapIntCmp(threshold) < 0 { 1888 found = append(found, tx) 1889 } 1890 return true 1891 }, false, true) // Only iterate remotes 1892 return found 1893 } 1894 1895 // numSlots calculates the number of slots needed for a single transaction. 1896 func numSlots(tx *types.Transaction) int { 1897 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1898 }