github.com/dim4egster/coreth@v0.10.2/core/tx_pool.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2014 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package core 28 29 import ( 30 "errors" 31 "fmt" 32 "math" 33 "math/big" 34 "sort" 35 "sync" 36 "sync/atomic" 37 "time" 38 39 "github.com/dim4egster/coreth/consensus/dummy" 40 "github.com/dim4egster/coreth/core/state" 41 "github.com/dim4egster/coreth/core/types" 42 "github.com/dim4egster/coreth/metrics" 43 "github.com/dim4egster/coreth/params" 44 "github.com/ethereum/go-ethereum/common" 45 "github.com/ethereum/go-ethereum/common/prque" 46 "github.com/ethereum/go-ethereum/event" 47 "github.com/ethereum/go-ethereum/log" 48 ) 49 50 const ( 51 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 52 chainHeadChanSize = 10 53 54 // txSlotSize is used to calculate how many data slots a single transaction 55 // takes up based on its size. The slots are used as DoS protection, ensuring 56 // that validating a new transaction remains a constant operation (in reality 57 // O(maxslots), where max slots are 4 currently). 58 txSlotSize = 32 * 1024 59 60 // txMaxSize is the maximum size a single transaction can have. This field has 61 // non-trivial consequences: larger transactions are significantly harder and 62 // more expensive to propagate; larger transactions also take more resources 63 // to validate whether they fit into the pool or not. 64 // 65 // Note: the max contract size is 24KB 66 txMaxSize = 32 * 1024 // 32 KB 67 ) 68 69 var ( 70 // ErrAlreadyKnown is returned if the transactions is already contained 71 // within the pool. 72 ErrAlreadyKnown = errors.New("already known") 73 74 // ErrInvalidSender is returned if the transaction contains an invalid signature. 75 ErrInvalidSender = errors.New("invalid sender") 76 77 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 78 // configured for the transaction pool. 79 ErrUnderpriced = errors.New("transaction underpriced") 80 81 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 82 // another remote transaction. 83 ErrTxPoolOverflow = errors.New("txpool is full") 84 85 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 86 // with a different one without the required price bump. 87 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 88 89 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 90 // maximum allowance of the current block. 91 ErrGasLimit = errors.New("exceeds block gas limit") 92 93 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 94 // transaction with a negative value. 95 ErrNegativeValue = errors.New("negative value") 96 97 // ErrOversizedData is returned if the input data of a transaction is greater 98 // than some meaningful limit a user might use. This is not a consensus error 99 // making the transaction invalid, rather a DOS protection. 100 ErrOversizedData = errors.New("oversized data") 101 ) 102 103 var ( 104 evictionInterval = time.Minute // Time interval to check for evictable transactions 105 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 106 baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after Apricot Phase 3 is enabled 107 ) 108 109 var ( 110 // Metrics for the pending pool 111 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 112 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 113 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 114 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 115 116 // Metrics for the queued pool 117 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 118 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 119 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 120 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 121 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 122 123 // General tx metrics 124 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 125 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 126 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 127 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 128 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 129 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 130 // txpool reorgs. 131 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 132 // reorgDurationTimer measures how long time a txpool reorg takes. 133 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 134 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 135 // that this number is pretty low, since txpool reorgs happen very frequently. 136 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 137 138 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 139 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 140 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 141 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 142 143 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 144 ) 145 146 // TxStatus is the current status of a transaction as seen by the pool. 147 type TxStatus uint 148 149 const ( 150 TxStatusUnknown TxStatus = iota 151 TxStatusQueued 152 TxStatusPending 153 TxStatusIncluded 154 ) 155 156 // blockChain provides the state of blockchain and current gas limit to do 157 // some pre checks in tx pool and event subscribers. 158 type blockChain interface { 159 CurrentBlock() *types.Block 160 GetBlock(hash common.Hash, number uint64) *types.Block 161 StateAt(root common.Hash) (*state.StateDB, error) 162 SenderCacher() *TxSenderCacher 163 164 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 165 } 166 167 // TxPoolConfig are the configuration parameters of the transaction pool. 168 type TxPoolConfig struct { 169 Locals []common.Address // Addresses that should be treated by default as local 170 NoLocals bool // Whether local transaction handling should be disabled 171 Journal string // Journal of local transactions to survive node restarts 172 Rejournal time.Duration // Time interval to regenerate the local transaction journal 173 174 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 175 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 176 177 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 178 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 179 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 180 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 181 182 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 183 } 184 185 // DefaultTxPoolConfig contains the default configurations for the transaction 186 // pool. 187 var DefaultTxPoolConfig = TxPoolConfig{ 188 Journal: "transactions.rlp", 189 Rejournal: time.Hour, 190 191 PriceLimit: 1, 192 PriceBump: 10, 193 194 AccountSlots: 16, 195 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 196 AccountQueue: 64, 197 GlobalQueue: 1024, 198 199 Lifetime: 3 * time.Hour, 200 } 201 202 // sanitize checks the provided user configurations and changes anything that's 203 // unreasonable or unworkable. 204 func (config *TxPoolConfig) sanitize() TxPoolConfig { 205 conf := *config 206 if conf.Rejournal < time.Second { 207 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 208 conf.Rejournal = time.Second 209 } 210 if conf.PriceLimit < 1 { 211 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 212 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 213 } 214 if conf.PriceBump < 1 { 215 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 216 conf.PriceBump = DefaultTxPoolConfig.PriceBump 217 } 218 if conf.AccountSlots < 1 { 219 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 220 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 221 } 222 if conf.GlobalSlots < 1 { 223 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 224 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 225 } 226 if conf.AccountQueue < 1 { 227 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 228 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 229 } 230 if conf.GlobalQueue < 1 { 231 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 232 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 233 } 234 if conf.Lifetime < 1 { 235 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 236 conf.Lifetime = DefaultTxPoolConfig.Lifetime 237 } 238 return conf 239 } 240 241 // TxPool contains all currently known transactions. Transactions 242 // enter the pool when they are received from the network or submitted 243 // locally. They exit the pool when they are included in the blockchain. 244 // 245 // The pool separates processable transactions (which can be applied to the 246 // current state) and future transactions. Transactions move between those 247 // two states over time as they are received and processed. 248 type TxPool struct { 249 config TxPoolConfig 250 chainconfig *params.ChainConfig 251 chain blockChain 252 gasPrice *big.Int 253 minimumFee *big.Int 254 txFeed event.Feed 255 headFeed event.Feed 256 reorgFeed event.Feed 257 scope event.SubscriptionScope 258 signer types.Signer 259 mu sync.RWMutex 260 261 istanbul bool // Fork indicator whether we are in the istanbul stage. 262 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 263 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 264 265 currentHead *types.Header 266 // [currentState] is the state of the blockchain head. It is reset whenever 267 // head changes. 268 currentState *state.StateDB 269 // [currentStateLock] is required to allow concurrent access to address nonces 270 // and balances during reorgs and gossip handling. 271 currentStateLock sync.Mutex 272 273 pendingNonces *txNoncer // Pending state tracking virtual nonces 274 currentMaxGas uint64 // Current gas limit for transaction caps 275 276 locals *accountSet // Set of local transaction to exempt from eviction rules 277 journal *txJournal // Journal of local transaction to back up to disk 278 279 pending map[common.Address]*txList // All currently processable transactions 280 queue map[common.Address]*txList // Queued but non-processable transactions 281 beats map[common.Address]time.Time // Last heartbeat from each known account 282 all *txLookup // All transactions to allow lookups 283 priced *txPricedList // All transactions sorted by price 284 285 chainHeadCh chan ChainHeadEvent 286 chainHeadSub event.Subscription 287 reqResetCh chan *txpoolResetRequest 288 reqPromoteCh chan *accountSet 289 queueTxEventCh chan *types.Transaction 290 reorgDoneCh chan chan struct{} 291 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 292 generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen 293 // to this to be notified if it should shut down. 294 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 295 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 296 297 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 298 } 299 300 type txpoolResetRequest struct { 301 oldHead, newHead *types.Header 302 } 303 304 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 305 // transactions from the network. 306 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 307 // Sanitize the input to ensure no vulnerable gas prices are set 308 config = (&config).sanitize() 309 310 // Create the transaction pool with its initial settings 311 pool := &TxPool{ 312 config: config, 313 chainconfig: chainconfig, 314 chain: chain, 315 signer: types.LatestSigner(chainconfig), 316 pending: make(map[common.Address]*txList), 317 queue: make(map[common.Address]*txList), 318 beats: make(map[common.Address]time.Time), 319 all: newTxLookup(), 320 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 321 reqResetCh: make(chan *txpoolResetRequest), 322 reqPromoteCh: make(chan *accountSet), 323 queueTxEventCh: make(chan *types.Transaction), 324 reorgDoneCh: make(chan chan struct{}), 325 reorgShutdownCh: make(chan struct{}), 326 initDoneCh: make(chan struct{}), 327 generalShutdownChan: make(chan struct{}), 328 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 329 } 330 pool.locals = newAccountSet(pool.signer) 331 for _, addr := range config.Locals { 332 log.Info("Setting new local account", "address", addr) 333 pool.locals.add(addr) 334 } 335 pool.priced = newTxPricedList(pool.all) 336 pool.reset(nil, chain.CurrentBlock().Header()) 337 338 // Start the reorg loop early so it can handle requests generated during journal loading. 339 pool.wg.Add(1) 340 go pool.scheduleReorgLoop() 341 342 // If local transactions and journaling is enabled, load from disk 343 if !config.NoLocals && config.Journal != "" { 344 pool.journal = newTxJournal(config.Journal) 345 346 if err := pool.journal.load(pool.AddLocals); err != nil { 347 log.Warn("Failed to load transaction journal", "err", err) 348 } 349 if err := pool.journal.rotate(pool.local()); err != nil { 350 log.Warn("Failed to rotate transaction journal", "err", err) 351 } 352 } 353 354 // Subscribe events from blockchain and start the main event loop. 355 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 356 pool.wg.Add(1) 357 go pool.loop() 358 359 pool.startPeriodicFeeUpdate() 360 361 return pool 362 } 363 364 // loop is the transaction pool's main event loop, waiting for and reacting to 365 // outside blockchain events as well as for various reporting and transaction 366 // eviction events. 367 func (pool *TxPool) loop() { 368 defer pool.wg.Done() 369 370 var ( 371 prevPending, prevQueued, prevStales int 372 // Start the stats reporting and transaction eviction tickers 373 report = time.NewTicker(statsReportInterval) 374 evict = time.NewTicker(evictionInterval) 375 journal = time.NewTicker(pool.config.Rejournal) 376 // Track the previous head headers for transaction reorgs 377 head = pool.chain.CurrentBlock() 378 ) 379 defer report.Stop() 380 defer evict.Stop() 381 defer journal.Stop() 382 383 // Notify tests that the init phase is done 384 close(pool.initDoneCh) 385 for { 386 select { 387 // Handle ChainHeadEvent 388 case ev := <-pool.chainHeadCh: 389 if ev.Block != nil { 390 pool.requestReset(head.Header(), ev.Block.Header()) 391 head = ev.Block 392 pool.headFeed.Send(NewTxPoolHeadEvent{Block: head}) 393 } 394 395 // System shutdown. 396 case <-pool.chainHeadSub.Err(): 397 close(pool.reorgShutdownCh) 398 return 399 400 // Handle stats reporting ticks 401 case <-report.C: 402 pool.mu.RLock() 403 pending, queued := pool.stats() 404 pool.mu.RUnlock() 405 stales := int(atomic.LoadInt64(&pool.priced.stales)) 406 407 if pending != prevPending || queued != prevQueued || stales != prevStales { 408 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 409 prevPending, prevQueued, prevStales = pending, queued, stales 410 } 411 412 // Handle inactive account transaction eviction 413 case <-evict.C: 414 pool.mu.Lock() 415 for addr := range pool.queue { 416 // Skip local transactions from the eviction mechanism 417 if pool.locals.contains(addr) { 418 continue 419 } 420 // Any non-locals old enough should be removed 421 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 422 list := pool.queue[addr].Flatten() 423 for _, tx := range list { 424 pool.removeTx(tx.Hash(), true) 425 } 426 queuedEvictionMeter.Mark(int64(len(list))) 427 } 428 } 429 pool.mu.Unlock() 430 431 // Handle local transaction journal rotation 432 case <-journal.C: 433 if pool.journal != nil { 434 pool.mu.Lock() 435 if err := pool.journal.rotate(pool.local()); err != nil { 436 log.Warn("Failed to rotate local tx journal", "err", err) 437 } 438 pool.mu.Unlock() 439 } 440 } 441 } 442 } 443 444 // Stop terminates the transaction pool. 445 func (pool *TxPool) Stop() { 446 // Unsubscribe all subscriptions registered from txpool 447 pool.scope.Close() 448 449 close(pool.generalShutdownChan) 450 // Unsubscribe subscriptions registered from blockchain 451 pool.chainHeadSub.Unsubscribe() 452 pool.wg.Wait() 453 454 if pool.journal != nil { 455 pool.journal.close() 456 } 457 log.Info("Transaction pool stopped") 458 } 459 460 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 461 // starts sending event to the given channel. 462 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 463 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 464 } 465 466 // SubscribeNewHeadEvent registers a subscription of NewHeadEvent and 467 // starts sending event to the given channel. 468 func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- NewTxPoolHeadEvent) event.Subscription { 469 return pool.scope.Track(pool.headFeed.Subscribe(ch)) 470 } 471 472 // SubscribeNewReorgEvent registers a subscription of NewReorgEvent and 473 // starts sending event to the given channel. 474 func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- NewTxPoolReorgEvent) event.Subscription { 475 return pool.scope.Track(pool.reorgFeed.Subscribe(ch)) 476 } 477 478 // GasPrice returns the current gas price enforced by the transaction pool. 479 func (pool *TxPool) GasPrice() *big.Int { 480 pool.mu.RLock() 481 defer pool.mu.RUnlock() 482 483 return new(big.Int).Set(pool.gasPrice) 484 } 485 486 // SetGasPrice updates the minimum price required by the transaction pool for a 487 // new transaction, and drops all transactions below this threshold. 488 func (pool *TxPool) SetGasPrice(price *big.Int) { 489 pool.mu.Lock() 490 defer pool.mu.Unlock() 491 492 old := pool.gasPrice 493 pool.gasPrice = price 494 // if the min miner fee increased, remove transactions below the new threshold 495 if price.Cmp(old) > 0 { 496 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 497 drop := pool.all.RemotesBelowTip(price) 498 for _, tx := range drop { 499 pool.removeTx(tx.Hash(), false) 500 } 501 pool.priced.Removed(len(drop)) 502 } 503 504 log.Info("Transaction pool price threshold updated", "price", price) 505 } 506 507 func (pool *TxPool) SetMinFee(minFee *big.Int) { 508 pool.mu.Lock() 509 defer pool.mu.Unlock() 510 511 pool.minimumFee = minFee 512 } 513 514 // Nonce returns the next nonce of an account, with all transactions executable 515 // by the pool already applied on top. 516 func (pool *TxPool) Nonce(addr common.Address) uint64 { 517 pool.mu.RLock() 518 defer pool.mu.RUnlock() 519 520 return pool.pendingNonces.get(addr) 521 } 522 523 // Stats retrieves the current pool stats, namely the number of pending and the 524 // number of queued (non-executable) transactions. 525 func (pool *TxPool) Stats() (int, int) { 526 pool.mu.RLock() 527 defer pool.mu.RUnlock() 528 529 return pool.stats() 530 } 531 532 // stats retrieves the current pool stats, namely the number of pending and the 533 // number of queued (non-executable) transactions. 534 func (pool *TxPool) stats() (int, int) { 535 pending := 0 536 for _, list := range pool.pending { 537 pending += list.Len() 538 } 539 queued := 0 540 for _, list := range pool.queue { 541 queued += list.Len() 542 } 543 return pending, queued 544 } 545 546 // Content retrieves the data content of the transaction pool, returning all the 547 // pending as well as queued transactions, grouped by account and sorted by nonce. 548 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 549 pool.mu.Lock() 550 defer pool.mu.Unlock() 551 552 pending := make(map[common.Address]types.Transactions) 553 for addr, list := range pool.pending { 554 pending[addr] = list.Flatten() 555 } 556 queued := make(map[common.Address]types.Transactions) 557 for addr, list := range pool.queue { 558 queued[addr] = list.Flatten() 559 } 560 return pending, queued 561 } 562 563 // ContentFrom retrieves the data content of the transaction pool, returning the 564 // pending as well as queued transactions of this address, grouped by nonce. 565 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 566 pool.mu.RLock() 567 defer pool.mu.RUnlock() 568 569 var pending types.Transactions 570 if list, ok := pool.pending[addr]; ok { 571 pending = list.Flatten() 572 } 573 var queued types.Transactions 574 if list, ok := pool.queue[addr]; ok { 575 queued = list.Flatten() 576 } 577 return pending, queued 578 } 579 580 // Pending retrieves all currently processable transactions, grouped by origin 581 // account and sorted by nonce. The returned transaction set is a copy and can be 582 // freely modified by calling code. 583 // 584 // The enforceTips parameter can be used to do an extra filtering on the pending 585 // transactions and only return those whose **effective** tip is large enough in 586 // the next pending execution environment. 587 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 588 pool.mu.Lock() 589 defer pool.mu.Unlock() 590 591 pending := make(map[common.Address]types.Transactions) 592 for addr, list := range pool.pending { 593 txs := list.Flatten() 594 595 // If the miner requests tip enforcement, cap the lists now 596 if enforceTips && !pool.locals.contains(addr) { 597 for i, tx := range txs { 598 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 599 txs = txs[:i] 600 break 601 } 602 } 603 } 604 if len(txs) > 0 { 605 pending[addr] = txs 606 } 607 } 608 return pending 609 } 610 611 // PendingSize returns the number of pending txs in the tx pool. 612 func (pool *TxPool) PendingSize() int { 613 pending := pool.Pending(true) 614 count := 0 615 for _, txs := range pending { 616 count += len(txs) 617 } 618 return count 619 } 620 621 // Locals retrieves the accounts currently considered local by the pool. 622 func (pool *TxPool) Locals() []common.Address { 623 pool.mu.Lock() 624 defer pool.mu.Unlock() 625 626 return pool.locals.flatten() 627 } 628 629 // local retrieves all currently known local transactions, grouped by origin 630 // account and sorted by nonce. The returned transaction set is a copy and can be 631 // freely modified by calling code. 632 func (pool *TxPool) local() map[common.Address]types.Transactions { 633 txs := make(map[common.Address]types.Transactions) 634 for addr := range pool.locals.accounts { 635 if pending := pool.pending[addr]; pending != nil { 636 txs[addr] = append(txs[addr], pending.Flatten()...) 637 } 638 if queued := pool.queue[addr]; queued != nil { 639 txs[addr] = append(txs[addr], queued.Flatten()...) 640 } 641 } 642 return txs 643 } 644 645 // checks transaction validity against the current state. 646 func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) error { 647 pool.currentStateLock.Lock() 648 defer pool.currentStateLock.Unlock() 649 650 // cost == V + GP * GL 651 if balance, cost := pool.currentState.GetBalance(from), tx.Cost(); balance.Cmp(cost) < 0 { 652 return fmt.Errorf("%w: address %s have (%d) want (%d)", ErrInsufficientFunds, from.Hex(), balance, cost) 653 } 654 655 txNonce := tx.Nonce() 656 // Ensure the transaction adheres to nonce ordering 657 if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce { 658 return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)", 659 ErrNonceTooLow, from.Hex(), currentNonce, txNonce) 660 } 661 return nil 662 } 663 664 // validateTx checks whether a transaction is valid according to the consensus 665 // rules and adheres to some heuristic limits of the local node (price and size). 666 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 667 // Accept only legacy transactions until EIP-2718/2930 activates. 668 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 669 return ErrTxTypeNotSupported 670 } 671 // Reject dynamic fee transactions until EIP-1559 activates. 672 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 673 return ErrTxTypeNotSupported 674 } 675 // Reject transactions over defined size to prevent DOS attacks 676 if txSize := uint64(tx.Size()); txSize > txMaxSize { 677 return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize) 678 } 679 // Transactions can't be negative. This may never happen using RLP decoded 680 // transactions but may occur if you create a transaction using the RPC. 681 if tx.Value().Sign() < 0 { 682 return ErrNegativeValue 683 } 684 // Ensure the transaction doesn't exceed the current block limit gas. 685 if txGas := tx.Gas(); pool.currentMaxGas < txGas { 686 return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, txGas, pool.currentMaxGas) 687 } 688 // Sanity check for extremely large numbers 689 if tx.GasFeeCap().BitLen() > 256 { 690 return ErrFeeCapVeryHigh 691 } 692 if tx.GasTipCap().BitLen() > 256 { 693 return ErrTipVeryHigh 694 } 695 // Ensure gasFeeCap is greater than or equal to gasTipCap. 696 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 697 return ErrTipAboveFeeCap 698 } 699 // Make sure the transaction is signed properly. 700 from, err := types.Sender(pool.signer, tx) 701 if err != nil { 702 return ErrInvalidSender 703 } 704 // Drop non-local transactions under our own minimal accepted gas price or tip 705 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 706 return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice) 707 } 708 // Drop the transaction if the gas fee cap is below the pool's minimum fee 709 if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 { 710 return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee) 711 } 712 713 // Ensure the transaction adheres to nonce ordering 714 if err := pool.checkTxState(from, tx); err != nil { 715 return err 716 } 717 // Transactor should have enough funds to cover the costs 718 719 // Ensure the transaction has more gas than the basic tx fee. 720 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 721 if err != nil { 722 return err 723 } 724 if txGas := tx.Gas(); txGas < intrGas { 725 return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) 726 } 727 return nil 728 } 729 730 // add validates a transaction and inserts it into the non-executable queue for later 731 // pending promotion and execution. If the transaction is a replacement for an already 732 // pending or queued one, it overwrites the previous transaction if its price is higher. 733 // 734 // If a newly added transaction is marked as local, its sending account will be 735 // be added to the allowlist, preventing any associated transaction from being dropped 736 // out of the pool due to pricing constraints. 737 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 738 // If the transaction is already known, discard it 739 hash := tx.Hash() 740 if pool.all.Get(hash) != nil { 741 log.Trace("Discarding already known transaction", "hash", hash) 742 knownTxMeter.Mark(1) 743 return false, ErrAlreadyKnown 744 } 745 // Make the local flag. If it's from local source or it's from the network but 746 // the sender is marked as local previously, treat it as the local transaction. 747 isLocal := local || pool.locals.containsTx(tx) 748 749 // If the transaction fails basic validation, discard it 750 if err := pool.validateTx(tx, isLocal); err != nil { 751 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 752 invalidTxMeter.Mark(1) 753 return false, err 754 } 755 // If the transaction pool is full, discard underpriced transactions 756 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 757 // If the new transaction is underpriced, don't accept it 758 if !isLocal && pool.priced.Underpriced(tx) { 759 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 760 underpricedTxMeter.Mark(1) 761 return false, ErrUnderpriced 762 } 763 // We're about to replace a transaction. The reorg does a more thorough 764 // analysis of what to remove and how, but it runs async. We don't want to 765 // do too many replacements between reorg-runs, so we cap the number of 766 // replacements to 25% of the slots 767 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 768 throttleTxMeter.Mark(1) 769 return false, ErrTxPoolOverflow 770 } 771 772 // New transaction is better than our worse ones, make room for it. 773 // If it's a local transaction, forcibly discard all available transactions. 774 // Otherwise if we can't make enough room for new one, abort the operation. 775 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 776 777 // Special case, we still can't make the room for the new remote one. 778 if !isLocal && !success { 779 log.Trace("Discarding overflown transaction", "hash", hash) 780 overflowedTxMeter.Mark(1) 781 return false, ErrTxPoolOverflow 782 } 783 // Bump the counter of rejections-since-reorg 784 pool.changesSinceReorg += len(drop) 785 // Kick out the underpriced remote transactions. 786 for _, tx := range drop { 787 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 788 underpricedTxMeter.Mark(1) 789 pool.removeTx(tx.Hash(), false) 790 } 791 } 792 // Try to replace an existing transaction in the pending pool 793 from, _ := types.Sender(pool.signer, tx) // already validated 794 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 795 // Nonce already pending, check if required price bump is met 796 inserted, old := list.Add(tx, pool.config.PriceBump) 797 if !inserted { 798 pendingDiscardMeter.Mark(1) 799 return false, ErrReplaceUnderpriced 800 } 801 // New transaction is better, replace old one 802 if old != nil { 803 pool.all.Remove(old.Hash()) 804 pool.priced.Removed(1) 805 pendingReplaceMeter.Mark(1) 806 } 807 pool.all.Add(tx, isLocal) 808 pool.priced.Put(tx, isLocal) 809 pool.journalTx(from, tx) 810 pool.queueTxEvent(tx) 811 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 812 813 // Successful promotion, bump the heartbeat 814 pool.beats[from] = time.Now() 815 return old != nil, nil 816 } 817 // New transaction isn't replacing a pending one, push into queue 818 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 819 if err != nil { 820 return false, err 821 } 822 // Mark local addresses and journal local transactions 823 if local && !pool.locals.contains(from) { 824 log.Info("Setting new local account", "address", from) 825 pool.locals.add(from) 826 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 827 } 828 if isLocal { 829 localGauge.Inc(1) 830 } 831 pool.journalTx(from, tx) 832 833 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 834 return replaced, nil 835 } 836 837 // enqueueTx inserts a new transaction into the non-executable transaction queue. 838 // 839 // Note, this method assumes the pool lock is held! 840 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 841 // Try to insert the transaction into the future queue 842 from, _ := types.Sender(pool.signer, tx) // already validated 843 if pool.queue[from] == nil { 844 pool.queue[from] = newTxList(false) 845 } 846 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 847 if !inserted { 848 // An older transaction was better, discard this 849 queuedDiscardMeter.Mark(1) 850 return false, ErrReplaceUnderpriced 851 } 852 // Discard any previous transaction and mark this 853 if old != nil { 854 pool.all.Remove(old.Hash()) 855 pool.priced.Removed(1) 856 queuedReplaceMeter.Mark(1) 857 } else { 858 // Nothing was replaced, bump the queued counter 859 queuedGauge.Inc(1) 860 } 861 // If the transaction isn't in lookup set but it's expected to be there, 862 // show the error log. 863 if pool.all.Get(hash) == nil && !addAll { 864 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 865 } 866 if addAll { 867 pool.all.Add(tx, local) 868 pool.priced.Put(tx, local) 869 } 870 // If we never record the heartbeat, do it right now. 871 if _, exist := pool.beats[from]; !exist { 872 pool.beats[from] = time.Now() 873 } 874 return old != nil, nil 875 } 876 877 // journalTx adds the specified transaction to the local disk journal if it is 878 // deemed to have been sent from a local account. 879 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 880 // Only journal if it's enabled and the transaction is local 881 if pool.journal == nil || !pool.locals.contains(from) { 882 return 883 } 884 if err := pool.journal.insert(tx); err != nil { 885 log.Warn("Failed to journal local transaction", "err", err) 886 } 887 } 888 889 // promoteTx adds a transaction to the pending (processable) list of transactions 890 // and returns whether it was inserted or an older was better. 891 // 892 // Note, this method assumes the pool lock is held! 893 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 894 // Try to insert the transaction into the pending queue 895 if pool.pending[addr] == nil { 896 pool.pending[addr] = newTxList(true) 897 } 898 list := pool.pending[addr] 899 900 inserted, old := list.Add(tx, pool.config.PriceBump) 901 if !inserted { 902 // An older transaction was better, discard this 903 pool.all.Remove(hash) 904 pool.priced.Removed(1) 905 pendingDiscardMeter.Mark(1) 906 return false 907 } 908 // Otherwise discard any previous transaction and mark this 909 if old != nil { 910 pool.all.Remove(old.Hash()) 911 pool.priced.Removed(1) 912 pendingReplaceMeter.Mark(1) 913 } else { 914 // Nothing was replaced, bump the pending counter 915 pendingGauge.Inc(1) 916 } 917 // Set the potentially new pending nonce and notify any subsystems of the new tx 918 pool.pendingNonces.set(addr, tx.Nonce()+1) 919 920 // Successful promotion, bump the heartbeat 921 pool.beats[addr] = time.Now() 922 return true 923 } 924 925 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 926 // senders as a local ones, ensuring they go around the local pricing constraints. 927 // 928 // This method is used to add transactions from the RPC API and performs synchronous pool 929 // reorganization and event propagation. 930 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 931 return pool.addTxs(txs, !pool.config.NoLocals, true) 932 } 933 934 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 935 // a convenience wrapper aroundd AddLocals. 936 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 937 errs := pool.AddLocals([]*types.Transaction{tx}) 938 return errs[0] 939 } 940 941 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 942 // senders are not among the locally tracked ones, full pricing constraints will apply. 943 // 944 // This method is used to add transactions from the p2p network and does not wait for pool 945 // reorganization and internal event propagation. 946 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 947 return pool.addTxs(txs, false, false) 948 } 949 950 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 951 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 952 return pool.addTxs(txs, false, true) 953 } 954 955 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 956 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 957 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 958 return errs[0] 959 } 960 961 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 962 // wrapper around AddRemotes. 963 // 964 // Deprecated: use AddRemotes 965 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 966 errs := pool.AddRemotes([]*types.Transaction{tx}) 967 return errs[0] 968 } 969 970 // addTxs attempts to queue a batch of transactions if they are valid. 971 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 972 // Filter out known ones without obtaining the pool lock or recovering signatures 973 var ( 974 errs = make([]error, len(txs)) 975 news = make([]*types.Transaction, 0, len(txs)) 976 ) 977 for i, tx := range txs { 978 // If the transaction is known, pre-set the error slot 979 if pool.all.Get(tx.Hash()) != nil { 980 errs[i] = ErrAlreadyKnown 981 knownTxMeter.Mark(1) 982 continue 983 } 984 // Exclude transactions with invalid signatures as soon as 985 // possible and cache senders in transactions before 986 // obtaining lock 987 _, err := types.Sender(pool.signer, tx) 988 if err != nil { 989 errs[i] = ErrInvalidSender 990 invalidTxMeter.Mark(1) 991 continue 992 } 993 // Accumulate all unknown transactions for deeper processing 994 news = append(news, tx) 995 } 996 if len(news) == 0 { 997 return errs 998 } 999 1000 // Process all the new transaction and merge any errors into the original slice 1001 pool.mu.Lock() 1002 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 1003 pool.mu.Unlock() 1004 1005 nilSlot := 0 1006 for _, err := range newErrs { 1007 for errs[nilSlot] != nil { 1008 nilSlot++ 1009 } 1010 errs[nilSlot] = err 1011 nilSlot++ 1012 } 1013 // Reorg the pool internals if needed and return 1014 done := pool.requestPromoteExecutables(dirtyAddrs) 1015 if sync { 1016 <-done 1017 } 1018 return errs 1019 } 1020 1021 // addTxsLocked attempts to queue a batch of transactions if they are valid. 1022 // The transaction pool lock must be held. 1023 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 1024 dirty := newAccountSet(pool.signer) 1025 errs := make([]error, len(txs)) 1026 for i, tx := range txs { 1027 replaced, err := pool.add(tx, local) 1028 errs[i] = err 1029 if err == nil && !replaced { 1030 dirty.addTx(tx) 1031 } 1032 } 1033 validTxMeter.Mark(int64(len(dirty.accounts))) 1034 return errs, dirty 1035 } 1036 1037 // Status returns the status (unknown/pending/queued) of a batch of transactions 1038 // identified by their hashes. 1039 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 1040 status := make([]TxStatus, len(hashes)) 1041 for i, hash := range hashes { 1042 tx := pool.Get(hash) 1043 if tx == nil { 1044 continue 1045 } 1046 from, _ := types.Sender(pool.signer, tx) // already validated 1047 pool.mu.RLock() 1048 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1049 status[i] = TxStatusPending 1050 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1051 status[i] = TxStatusQueued 1052 } 1053 // implicit else: the tx may have been included into a block between 1054 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 1055 pool.mu.RUnlock() 1056 } 1057 return status 1058 } 1059 1060 // Get returns a transaction if it is contained in the pool and nil otherwise. 1061 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1062 return pool.all.Get(hash) 1063 } 1064 1065 // Has returns an indicator whether txpool has a transaction cached with the 1066 // given hash. 1067 func (pool *TxPool) Has(hash common.Hash) bool { 1068 return pool.all.Get(hash) != nil 1069 } 1070 1071 // Has returns an indicator whether txpool has a local transaction cached with 1072 // the given hash. 1073 func (pool *TxPool) HasLocal(hash common.Hash) bool { 1074 return pool.all.GetLocal(hash) != nil 1075 } 1076 1077 func (pool *TxPool) RemoveTx(hash common.Hash) { 1078 pool.mu.Lock() 1079 defer pool.mu.Unlock() 1080 1081 pool.removeTx(hash, true) 1082 } 1083 1084 // removeTx removes a single transaction from the queue, moving all subsequent 1085 // transactions back to the future queue. 1086 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1087 // Fetch the transaction we wish to delete 1088 tx := pool.all.Get(hash) 1089 if tx == nil { 1090 return 1091 } 1092 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1093 1094 // Remove it from the list of known transactions 1095 pool.all.Remove(hash) 1096 if outofbound { 1097 pool.priced.Removed(1) 1098 } 1099 if pool.locals.contains(addr) { 1100 localGauge.Dec(1) 1101 } 1102 // Remove the transaction from the pending lists and reset the account nonce 1103 if pending := pool.pending[addr]; pending != nil { 1104 if removed, invalids := pending.Remove(tx); removed { 1105 // If no more pending transactions are left, remove the list 1106 if pending.Empty() { 1107 delete(pool.pending, addr) 1108 } 1109 // Postpone any invalidated transactions 1110 for _, tx := range invalids { 1111 // Internal shuffle shouldn't touch the lookup set. 1112 pool.enqueueTx(tx.Hash(), tx, false, false) 1113 } 1114 // Update the account nonce if needed 1115 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1116 // Reduce the pending counter 1117 pendingGauge.Dec(int64(1 + len(invalids))) 1118 return 1119 } 1120 } 1121 // Transaction is in the future queue 1122 if future := pool.queue[addr]; future != nil { 1123 if removed, _ := future.Remove(tx); removed { 1124 // Reduce the queued counter 1125 queuedGauge.Dec(1) 1126 } 1127 if future.Empty() { 1128 delete(pool.queue, addr) 1129 delete(pool.beats, addr) 1130 } 1131 } 1132 } 1133 1134 // requestReset requests a pool reset to the new head block. 1135 // The returned channel is closed when the reset has occurred. 1136 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1137 select { 1138 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1139 return <-pool.reorgDoneCh 1140 case <-pool.reorgShutdownCh: 1141 return pool.reorgShutdownCh 1142 } 1143 } 1144 1145 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1146 // The returned channel is closed when the promotion checks have occurred. 1147 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1148 select { 1149 case pool.reqPromoteCh <- set: 1150 return <-pool.reorgDoneCh 1151 case <-pool.reorgShutdownCh: 1152 return pool.reorgShutdownCh 1153 } 1154 } 1155 1156 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1157 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1158 select { 1159 case pool.queueTxEventCh <- tx: 1160 case <-pool.reorgShutdownCh: 1161 } 1162 } 1163 1164 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1165 // call those methods directly, but request them being run using requestReset and 1166 // requestPromoteExecutables instead. 1167 func (pool *TxPool) scheduleReorgLoop() { 1168 defer pool.wg.Done() 1169 1170 var ( 1171 curDone chan struct{} // non-nil while runReorg is active 1172 nextDone = make(chan struct{}) 1173 launchNextRun bool 1174 reset *txpoolResetRequest 1175 dirtyAccounts *accountSet 1176 queuedEvents = make(map[common.Address]*txSortedMap) 1177 ) 1178 for { 1179 // Launch next background reorg if needed 1180 if curDone == nil && launchNextRun { 1181 // Run the background reorg and announcements 1182 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1183 1184 // Prepare everything for the next round of reorg 1185 curDone, nextDone = nextDone, make(chan struct{}) 1186 launchNextRun = false 1187 1188 reset, dirtyAccounts = nil, nil 1189 queuedEvents = make(map[common.Address]*txSortedMap) 1190 } 1191 1192 select { 1193 case req := <-pool.reqResetCh: 1194 // Reset request: update head if request is already pending. 1195 if reset == nil { 1196 reset = req 1197 } else { 1198 reset.newHead = req.newHead 1199 } 1200 launchNextRun = true 1201 pool.reorgDoneCh <- nextDone 1202 1203 case req := <-pool.reqPromoteCh: 1204 // Promote request: update address set if request is already pending. 1205 if dirtyAccounts == nil { 1206 dirtyAccounts = req 1207 } else { 1208 dirtyAccounts.merge(req) 1209 } 1210 launchNextRun = true 1211 pool.reorgDoneCh <- nextDone 1212 1213 case tx := <-pool.queueTxEventCh: 1214 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1215 // request one later if they want the events sent. 1216 addr, _ := types.Sender(pool.signer, tx) 1217 if _, ok := queuedEvents[addr]; !ok { 1218 queuedEvents[addr] = newTxSortedMap() 1219 } 1220 queuedEvents[addr].Put(tx) 1221 1222 case <-curDone: 1223 curDone = nil 1224 1225 case <-pool.reorgShutdownCh: 1226 // Wait for current run to finish. 1227 if curDone != nil { 1228 <-curDone 1229 } 1230 close(nextDone) 1231 return 1232 } 1233 } 1234 } 1235 1236 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1237 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1238 defer func(t0 time.Time) { 1239 reorgDurationTimer.Update(time.Since(t0)) 1240 }(time.Now()) 1241 defer close(done) 1242 1243 var promoteAddrs []common.Address 1244 if dirtyAccounts != nil && reset == nil { 1245 // Only dirty accounts need to be promoted, unless we're resetting. 1246 // For resets, all addresses in the tx queue will be promoted and 1247 // the flatten operation can be avoided. 1248 promoteAddrs = dirtyAccounts.flatten() 1249 } 1250 pool.mu.Lock() 1251 if reset != nil { 1252 // Reset from the old head to the new, rescheduling any reorged transactions 1253 pool.reset(reset.oldHead, reset.newHead) 1254 1255 // Nonces were reset, discard any events that became stale 1256 for addr := range events { 1257 events[addr].Forward(pool.pendingNonces.get(addr)) 1258 if events[addr].Len() == 0 { 1259 delete(events, addr) 1260 } 1261 } 1262 // Reset needs promote for all addresses 1263 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1264 for addr := range pool.queue { 1265 promoteAddrs = append(promoteAddrs, addr) 1266 } 1267 } 1268 // Check for pending transactions for every account that sent new ones 1269 promoted := pool.promoteExecutables(promoteAddrs) 1270 1271 // If a new block appeared, validate the pool of pending transactions. This will 1272 // remove any transaction that has been included in the block or was invalidated 1273 // because of another transaction (e.g. higher gas price). 1274 if reset != nil { 1275 pool.demoteUnexecutables() 1276 if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(new(big.Int).SetUint64(reset.newHead.Time)) { 1277 _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix())) 1278 if err == nil { 1279 pool.priced.SetBaseFee(baseFeeEstimate) 1280 } 1281 } 1282 1283 // Update all accounts to the latest known pending nonce 1284 nonces := make(map[common.Address]uint64, len(pool.pending)) 1285 for addr, list := range pool.pending { 1286 highestPending := list.LastElement() 1287 nonces[addr] = highestPending.Nonce() + 1 1288 } 1289 pool.pendingNonces.setAll(nonces) 1290 } 1291 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1292 pool.truncatePending() 1293 pool.truncateQueue() 1294 1295 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1296 pool.changesSinceReorg = 0 // Reset change counter 1297 pool.mu.Unlock() 1298 1299 if reset != nil && reset.newHead != nil { 1300 pool.reorgFeed.Send(NewTxPoolReorgEvent{reset.newHead}) 1301 } 1302 1303 // Notify subsystems for newly added transactions 1304 for _, tx := range promoted { 1305 addr, _ := types.Sender(pool.signer, tx) 1306 if _, ok := events[addr]; !ok { 1307 events[addr] = newTxSortedMap() 1308 } 1309 events[addr].Put(tx) 1310 } 1311 if len(events) > 0 { 1312 var txs []*types.Transaction 1313 for _, set := range events { 1314 txs = append(txs, set.Flatten()...) 1315 } 1316 pool.txFeed.Send(NewTxsEvent{txs}) 1317 } 1318 } 1319 1320 // reset retrieves the current state of the blockchain and ensures the content 1321 // of the transaction pool is valid with regard to the chain state. 1322 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1323 // If we're reorging an old state, reinject all dropped transactions 1324 var reinject types.Transactions 1325 1326 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1327 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1328 oldNum := oldHead.Number.Uint64() 1329 newNum := newHead.Number.Uint64() 1330 1331 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1332 log.Debug("Skipping deep transaction reorg", "depth", depth) 1333 } else { 1334 // Reorg seems shallow enough to pull in all transactions into memory 1335 var discarded, included types.Transactions 1336 var ( 1337 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1338 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1339 ) 1340 if rem == nil { 1341 // This can happen if a setHead is performed, where we simply discard the old 1342 // head from the chain. 1343 // If that is the case, we don't have the lost transactions any more, and 1344 // there's nothing to add 1345 if newNum >= oldNum { 1346 // If we reorged to a same or higher number, then it's not a case of setHead 1347 log.Warn("Transaction pool reset with missing oldhead", 1348 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1349 return 1350 } 1351 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1352 log.Debug("Skipping transaction reset caused by setHead", 1353 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1354 // We still need to update the current state s.th. the lost transactions can be readded by the user 1355 } else { 1356 for rem.NumberU64() > add.NumberU64() { 1357 discarded = append(discarded, rem.Transactions()...) 1358 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1359 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1360 return 1361 } 1362 } 1363 for add.NumberU64() > rem.NumberU64() { 1364 included = append(included, add.Transactions()...) 1365 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1366 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1367 return 1368 } 1369 } 1370 for rem.Hash() != add.Hash() { 1371 discarded = append(discarded, rem.Transactions()...) 1372 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1373 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1374 return 1375 } 1376 included = append(included, add.Transactions()...) 1377 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1378 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1379 return 1380 } 1381 } 1382 reinject = types.TxDifference(discarded, included) 1383 } 1384 } 1385 } 1386 // Initialize the internal state to the current head 1387 if newHead == nil { 1388 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1389 } 1390 statedb, err := pool.chain.StateAt(newHead.Root) 1391 if err != nil { 1392 log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root) 1393 return 1394 } 1395 pool.currentHead = newHead 1396 pool.currentStateLock.Lock() 1397 pool.currentState = statedb 1398 pool.currentStateLock.Unlock() 1399 pool.pendingNonces = newTxNoncer(statedb) 1400 pool.currentMaxGas = newHead.GasLimit 1401 1402 // Inject any transactions discarded due to reorgs 1403 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1404 pool.chain.SenderCacher().Recover(pool.signer, reinject) 1405 pool.addTxsLocked(reinject, false) 1406 1407 // Update all fork indicator by next pending block number. 1408 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1409 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1410 1411 timestamp := new(big.Int).SetUint64(newHead.Time) 1412 pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp) 1413 pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp) 1414 } 1415 1416 // promoteExecutables moves transactions that have become processable from the 1417 // future queue to the set of pending transactions. During this process, all 1418 // invalidated transactions (low nonce, low balance) are deleted. 1419 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1420 pool.currentStateLock.Lock() 1421 defer pool.currentStateLock.Unlock() 1422 1423 // Track the promoted transactions to broadcast them at once 1424 var promoted []*types.Transaction 1425 1426 // Iterate over all accounts and promote any executable transactions 1427 for _, addr := range accounts { 1428 list := pool.queue[addr] 1429 if list == nil { 1430 continue // Just in case someone calls with a non existing account 1431 } 1432 // Drop all transactions that are deemed too old (low nonce) 1433 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1434 for _, tx := range forwards { 1435 hash := tx.Hash() 1436 pool.all.Remove(hash) 1437 } 1438 log.Trace("Removed old queued transactions", "count", len(forwards)) 1439 // Drop all transactions that are too costly (low balance or out of gas) 1440 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1441 for _, tx := range drops { 1442 hash := tx.Hash() 1443 pool.all.Remove(hash) 1444 } 1445 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1446 queuedNofundsMeter.Mark(int64(len(drops))) 1447 1448 // Gather all executable transactions and promote them 1449 readies := list.Ready(pool.pendingNonces.get(addr)) 1450 for _, tx := range readies { 1451 hash := tx.Hash() 1452 if pool.promoteTx(addr, hash, tx) { 1453 promoted = append(promoted, tx) 1454 } 1455 } 1456 log.Trace("Promoted queued transactions", "count", len(promoted)) 1457 queuedGauge.Dec(int64(len(readies))) 1458 1459 // Drop all transactions over the allowed limit 1460 var caps types.Transactions 1461 if !pool.locals.contains(addr) { 1462 caps = list.Cap(int(pool.config.AccountQueue)) 1463 for _, tx := range caps { 1464 hash := tx.Hash() 1465 pool.all.Remove(hash) 1466 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1467 } 1468 queuedRateLimitMeter.Mark(int64(len(caps))) 1469 } 1470 // Mark all the items dropped as removed 1471 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1472 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1473 if pool.locals.contains(addr) { 1474 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1475 } 1476 // Delete the entire queue entry if it became empty. 1477 if list.Empty() { 1478 delete(pool.queue, addr) 1479 delete(pool.beats, addr) 1480 } 1481 } 1482 return promoted 1483 } 1484 1485 // truncatePending removes transactions from the pending queue if the pool is above the 1486 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1487 // equal number for all for accounts with many pending transactions. 1488 func (pool *TxPool) truncatePending() { 1489 pending := uint64(0) 1490 for _, list := range pool.pending { 1491 pending += uint64(list.Len()) 1492 } 1493 if pending <= pool.config.GlobalSlots { 1494 return 1495 } 1496 1497 pendingBeforeCap := pending 1498 // Assemble a spam order to penalize large transactors first 1499 spammers := prque.New(nil) 1500 for addr, list := range pool.pending { 1501 // Only evict transactions from high rollers 1502 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1503 spammers.Push(addr, int64(list.Len())) 1504 } 1505 } 1506 // Gradually drop transactions from offenders 1507 offenders := []common.Address{} 1508 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1509 // Retrieve the next offender if not local address 1510 offender, _ := spammers.Pop() 1511 offenders = append(offenders, offender.(common.Address)) 1512 1513 // Equalize balances until all the same or below threshold 1514 if len(offenders) > 1 { 1515 // Calculate the equalization threshold for all current offenders 1516 threshold := pool.pending[offender.(common.Address)].Len() 1517 1518 // Iteratively reduce all offenders until below limit or threshold reached 1519 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1520 for i := 0; i < len(offenders)-1; i++ { 1521 list := pool.pending[offenders[i]] 1522 1523 caps := list.Cap(list.Len() - 1) 1524 for _, tx := range caps { 1525 // Drop the transaction from the global pools too 1526 hash := tx.Hash() 1527 pool.all.Remove(hash) 1528 1529 // Update the account nonce to the dropped transaction 1530 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1531 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1532 } 1533 pool.priced.Removed(len(caps)) 1534 pendingGauge.Dec(int64(len(caps))) 1535 if pool.locals.contains(offenders[i]) { 1536 localGauge.Dec(int64(len(caps))) 1537 } 1538 pending-- 1539 } 1540 } 1541 } 1542 } 1543 1544 // If still above threshold, reduce to limit or min allowance 1545 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1546 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1547 for _, addr := range offenders { 1548 list := pool.pending[addr] 1549 1550 caps := list.Cap(list.Len() - 1) 1551 for _, tx := range caps { 1552 // Drop the transaction from the global pools too 1553 hash := tx.Hash() 1554 pool.all.Remove(hash) 1555 1556 // Update the account nonce to the dropped transaction 1557 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1558 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1559 } 1560 pool.priced.Removed(len(caps)) 1561 pendingGauge.Dec(int64(len(caps))) 1562 if pool.locals.contains(addr) { 1563 localGauge.Dec(int64(len(caps))) 1564 } 1565 pending-- 1566 } 1567 } 1568 } 1569 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1570 } 1571 1572 // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. 1573 func (pool *TxPool) truncateQueue() { 1574 queued := uint64(0) 1575 for _, list := range pool.queue { 1576 queued += uint64(list.Len()) 1577 } 1578 if queued <= pool.config.GlobalQueue { 1579 return 1580 } 1581 1582 // Sort all accounts with queued transactions by heartbeat 1583 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1584 for addr := range pool.queue { 1585 if !pool.locals.contains(addr) { // don't drop locals 1586 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1587 } 1588 } 1589 sort.Sort(sort.Reverse(addresses)) 1590 1591 // Drop transactions until the total is below the limit or only locals remain 1592 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1593 addr := addresses[len(addresses)-1] 1594 list := pool.queue[addr.address] 1595 1596 addresses = addresses[:len(addresses)-1] 1597 1598 // Drop all transactions if they are less than the overflow 1599 if size := uint64(list.Len()); size <= drop { 1600 for _, tx := range list.Flatten() { 1601 pool.removeTx(tx.Hash(), true) 1602 } 1603 drop -= size 1604 queuedRateLimitMeter.Mark(int64(size)) 1605 continue 1606 } 1607 // Otherwise drop only last few transactions 1608 txs := list.Flatten() 1609 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1610 pool.removeTx(txs[i].Hash(), true) 1611 drop-- 1612 queuedRateLimitMeter.Mark(1) 1613 } 1614 } 1615 } 1616 1617 // demoteUnexecutables removes invalid and processed transactions from the pools 1618 // executable/pending queue and any subsequent transactions that become unexecutable 1619 // are moved back into the future queue. 1620 // 1621 // Note: transactions are not marked as removed in the priced list because re-heaping 1622 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1623 // to trigger a re-heap is this function 1624 func (pool *TxPool) demoteUnexecutables() { 1625 pool.currentStateLock.Lock() 1626 defer pool.currentStateLock.Unlock() 1627 1628 // Iterate over all accounts and demote any non-executable transactions 1629 for addr, list := range pool.pending { 1630 nonce := pool.currentState.GetNonce(addr) 1631 1632 // Drop all transactions that are deemed too old (low nonce) 1633 olds := list.Forward(nonce) 1634 for _, tx := range olds { 1635 hash := tx.Hash() 1636 pool.all.Remove(hash) 1637 log.Trace("Removed old pending transaction", "hash", hash) 1638 } 1639 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1640 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1641 for _, tx := range drops { 1642 hash := tx.Hash() 1643 log.Trace("Removed unpayable pending transaction", "hash", hash) 1644 pool.all.Remove(hash) 1645 } 1646 pendingNofundsMeter.Mark(int64(len(drops))) 1647 1648 for _, tx := range invalids { 1649 hash := tx.Hash() 1650 log.Trace("Demoting pending transaction", "hash", hash) 1651 1652 // Internal shuffle shouldn't touch the lookup set. 1653 pool.enqueueTx(hash, tx, false, false) 1654 } 1655 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1656 if pool.locals.contains(addr) { 1657 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1658 } 1659 // If there's a gap in front, alert (should never happen) and postpone all transactions 1660 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1661 gapped := list.Cap(0) 1662 for _, tx := range gapped { 1663 hash := tx.Hash() 1664 log.Error("Demoting invalidated transaction", "hash", hash) 1665 1666 // Internal shuffle shouldn't touch the lookup set. 1667 pool.enqueueTx(hash, tx, false, false) 1668 } 1669 // This might happen in a reorg, so log it to the metering 1670 pendingGauge.Dec(int64(len(gapped))) 1671 } 1672 // Delete the entire pending entry if it became empty. 1673 if list.Empty() { 1674 delete(pool.pending, addr) 1675 } 1676 } 1677 } 1678 1679 func (pool *TxPool) startPeriodicFeeUpdate() { 1680 if pool.chainconfig.ApricotPhase3BlockTimestamp == nil { 1681 return 1682 } 1683 1684 // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay 1685 // when starting up in ApricotPhase3 before the base fee is updated. 1686 if time.Now().After(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0)) { 1687 pool.updateBaseFee() 1688 } 1689 1690 pool.wg.Add(1) 1691 go pool.periodicBaseFeeUpdate() 1692 } 1693 1694 func (pool *TxPool) periodicBaseFeeUpdate() { 1695 defer pool.wg.Done() 1696 1697 // Sleep until its time to start the periodic base fee update or the tx pool is shutting down 1698 select { 1699 case <-time.After(time.Until(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0))): 1700 case <-pool.generalShutdownChan: 1701 return // Return early if shutting down 1702 } 1703 1704 // Update the base fee every [baseFeeUpdateInterval] 1705 // and shutdown when [generalShutdownChan] is closed by Stop() 1706 for { 1707 select { 1708 case <-time.After(baseFeeUpdateInterval): 1709 pool.updateBaseFee() 1710 case <-pool.generalShutdownChan: 1711 return 1712 } 1713 } 1714 } 1715 1716 func (pool *TxPool) updateBaseFee() { 1717 pool.mu.Lock() 1718 defer pool.mu.Unlock() 1719 1720 _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, pool.currentHead, uint64(time.Now().Unix())) 1721 if err == nil { 1722 pool.priced.SetBaseFee(baseFeeEstimate) 1723 } else { 1724 log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err) 1725 } 1726 } 1727 1728 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1729 type addressByHeartbeat struct { 1730 address common.Address 1731 heartbeat time.Time 1732 } 1733 1734 type addressesByHeartbeat []addressByHeartbeat 1735 1736 func (a addressesByHeartbeat) Len() int { return len(a) } 1737 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1738 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1739 1740 // accountSet is simply a set of addresses to check for existence, and a signer 1741 // capable of deriving addresses from transactions. 1742 type accountSet struct { 1743 accounts map[common.Address]struct{} 1744 signer types.Signer 1745 cache *[]common.Address 1746 } 1747 1748 // newAccountSet creates a new address set with an associated signer for sender 1749 // derivations. 1750 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1751 as := &accountSet{ 1752 accounts: make(map[common.Address]struct{}), 1753 signer: signer, 1754 } 1755 for _, addr := range addrs { 1756 as.add(addr) 1757 } 1758 return as 1759 } 1760 1761 // contains checks if a given address is contained within the set. 1762 func (as *accountSet) contains(addr common.Address) bool { 1763 _, exist := as.accounts[addr] 1764 return exist 1765 } 1766 1767 // containsTx checks if the sender of a given tx is within the set. If the sender 1768 // cannot be derived, this method returns false. 1769 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1770 if addr, err := types.Sender(as.signer, tx); err == nil { 1771 return as.contains(addr) 1772 } 1773 return false 1774 } 1775 1776 // add inserts a new address into the set to track. 1777 func (as *accountSet) add(addr common.Address) { 1778 as.accounts[addr] = struct{}{} 1779 as.cache = nil 1780 } 1781 1782 // addTx adds the sender of tx into the set. 1783 func (as *accountSet) addTx(tx *types.Transaction) { 1784 if addr, err := types.Sender(as.signer, tx); err == nil { 1785 as.add(addr) 1786 } 1787 } 1788 1789 // flatten returns the list of addresses within this set, also caching it for later 1790 // reuse. The returned slice should not be changed! 1791 func (as *accountSet) flatten() []common.Address { 1792 if as.cache == nil { 1793 accounts := make([]common.Address, 0, len(as.accounts)) 1794 for account := range as.accounts { 1795 accounts = append(accounts, account) 1796 } 1797 as.cache = &accounts 1798 } 1799 return *as.cache 1800 } 1801 1802 // merge adds all addresses from the 'other' set into 'as'. 1803 func (as *accountSet) merge(other *accountSet) { 1804 for addr := range other.accounts { 1805 as.accounts[addr] = struct{}{} 1806 } 1807 as.cache = nil 1808 } 1809 1810 // txLookup is used internally by TxPool to track transactions while allowing 1811 // lookup without mutex contention. 1812 // 1813 // Note, although this type is properly protected against concurrent access, it 1814 // is **not** a type that should ever be mutated or even exposed outside of the 1815 // transaction pool, since its internal state is tightly coupled with the pools 1816 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1817 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1818 // TxPool.mu mutex. 1819 // 1820 // This lookup set combines the notion of "local transactions", which is useful 1821 // to build upper-level structure. 1822 type txLookup struct { 1823 slots int 1824 lock sync.RWMutex 1825 locals map[common.Hash]*types.Transaction 1826 remotes map[common.Hash]*types.Transaction 1827 } 1828 1829 // newTxLookup returns a new txLookup structure. 1830 func newTxLookup() *txLookup { 1831 return &txLookup{ 1832 locals: make(map[common.Hash]*types.Transaction), 1833 remotes: make(map[common.Hash]*types.Transaction), 1834 } 1835 } 1836 1837 // Range calls f on each key and value present in the map. The callback passed 1838 // should return the indicator whether the iteration needs to be continued. 1839 // Callers need to specify which set (or both) to be iterated. 1840 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1841 t.lock.RLock() 1842 defer t.lock.RUnlock() 1843 1844 if local { 1845 for key, value := range t.locals { 1846 if !f(key, value, true) { 1847 return 1848 } 1849 } 1850 } 1851 if remote { 1852 for key, value := range t.remotes { 1853 if !f(key, value, false) { 1854 return 1855 } 1856 } 1857 } 1858 } 1859 1860 // Get returns a transaction if it exists in the lookup, or nil if not found. 1861 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1862 t.lock.RLock() 1863 defer t.lock.RUnlock() 1864 1865 if tx := t.locals[hash]; tx != nil { 1866 return tx 1867 } 1868 return t.remotes[hash] 1869 } 1870 1871 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1872 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1873 t.lock.RLock() 1874 defer t.lock.RUnlock() 1875 1876 return t.locals[hash] 1877 } 1878 1879 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1880 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1881 t.lock.RLock() 1882 defer t.lock.RUnlock() 1883 1884 return t.remotes[hash] 1885 } 1886 1887 // Count returns the current number of transactions in the lookup. 1888 func (t *txLookup) Count() int { 1889 t.lock.RLock() 1890 defer t.lock.RUnlock() 1891 1892 return len(t.locals) + len(t.remotes) 1893 } 1894 1895 // LocalCount returns the current number of local transactions in the lookup. 1896 func (t *txLookup) LocalCount() int { 1897 t.lock.RLock() 1898 defer t.lock.RUnlock() 1899 1900 return len(t.locals) 1901 } 1902 1903 // RemoteCount returns the current number of remote transactions in the lookup. 1904 func (t *txLookup) RemoteCount() int { 1905 t.lock.RLock() 1906 defer t.lock.RUnlock() 1907 1908 return len(t.remotes) 1909 } 1910 1911 // Slots returns the current number of slots used in the lookup. 1912 func (t *txLookup) Slots() int { 1913 t.lock.RLock() 1914 defer t.lock.RUnlock() 1915 1916 return t.slots 1917 } 1918 1919 // Add adds a transaction to the lookup. 1920 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1921 t.lock.Lock() 1922 defer t.lock.Unlock() 1923 1924 t.slots += numSlots(tx) 1925 slotsGauge.Update(int64(t.slots)) 1926 1927 if local { 1928 t.locals[tx.Hash()] = tx 1929 } else { 1930 t.remotes[tx.Hash()] = tx 1931 } 1932 } 1933 1934 // Remove removes a transaction from the lookup. 1935 func (t *txLookup) Remove(hash common.Hash) { 1936 t.lock.Lock() 1937 defer t.lock.Unlock() 1938 1939 tx, ok := t.locals[hash] 1940 if !ok { 1941 tx, ok = t.remotes[hash] 1942 } 1943 if !ok { 1944 log.Error("No transaction found to be deleted", "hash", hash) 1945 return 1946 } 1947 t.slots -= numSlots(tx) 1948 slotsGauge.Update(int64(t.slots)) 1949 1950 delete(t.locals, hash) 1951 delete(t.remotes, hash) 1952 } 1953 1954 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1955 // set. The assumption is held the locals set is thread-safe to be used. 1956 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1957 t.lock.Lock() 1958 defer t.lock.Unlock() 1959 1960 var migrated int 1961 for hash, tx := range t.remotes { 1962 if locals.containsTx(tx) { 1963 t.locals[hash] = tx 1964 delete(t.remotes, hash) 1965 migrated += 1 1966 } 1967 } 1968 return migrated 1969 } 1970 1971 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1972 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1973 found := make(types.Transactions, 0, 128) 1974 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1975 if tx.GasTipCapIntCmp(threshold) < 0 { 1976 found = append(found, tx) 1977 } 1978 return true 1979 }, false, true) // Only iterate remotes 1980 return found 1981 } 1982 1983 // numSlots calculates the number of slots needed for a single transaction. 1984 func numSlots(tx *types.Transaction) int { 1985 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1986 }