github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/core/txpool/txpool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package txpool 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/tirogen/go-ethereum/common" 29 "github.com/tirogen/go-ethereum/common/prque" 30 "github.com/tirogen/go-ethereum/consensus/misc" 31 "github.com/tirogen/go-ethereum/core" 32 "github.com/tirogen/go-ethereum/core/state" 33 "github.com/tirogen/go-ethereum/core/types" 34 "github.com/tirogen/go-ethereum/event" 35 "github.com/tirogen/go-ethereum/log" 36 "github.com/tirogen/go-ethereum/metrics" 37 "github.com/tirogen/go-ethereum/params" 38 ) 39 40 const ( 41 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 42 chainHeadChanSize = 10 43 44 // txSlotSize is used to calculate how many data slots a single transaction 45 // takes up based on its size. The slots are used as DoS protection, ensuring 46 // that validating a new transaction remains a constant operation (in reality 47 // O(maxslots), where max slots are 4 currently). 48 txSlotSize = 32 * 1024 49 50 // txMaxSize is the maximum size a single transaction can have. This field has 51 // non-trivial consequences: larger transactions are significantly harder and 52 // more expensive to propagate; larger transactions also take more resources 53 // to validate whether they fit into the pool or not. 54 txMaxSize = 4 * txSlotSize // 128KB 55 ) 56 57 var ( 58 // ErrAlreadyKnown is returned if the transactions is already contained 59 // within the pool. 60 ErrAlreadyKnown = errors.New("already known") 61 62 // ErrInvalidSender is returned if the transaction contains an invalid signature. 63 ErrInvalidSender = errors.New("invalid sender") 64 65 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 66 // configured for the transaction pool. 67 ErrUnderpriced = errors.New("transaction underpriced") 68 69 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept 70 // another remote transaction. 71 ErrTxPoolOverflow = errors.New("txpool is full") 72 73 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 74 // with a different one without the required price bump. 75 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 76 77 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 78 // maximum allowance of the current block. 79 ErrGasLimit = errors.New("exceeds block gas limit") 80 81 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 82 // transaction with a negative value. 83 ErrNegativeValue = errors.New("negative value") 84 85 // ErrOversizedData is returned if the input data of a transaction is greater 86 // than some meaningful limit a user might use. This is not a consensus error 87 // making the transaction invalid, rather a DOS protection. 88 ErrOversizedData = errors.New("oversized data") 89 ) 90 91 var ( 92 evictionInterval = time.Minute // Time interval to check for evictable transactions 93 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 94 ) 95 96 var ( 97 // Metrics for the pending pool 98 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 99 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 100 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 101 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 102 103 // Metrics for the queued pool 104 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 105 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 106 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 107 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 108 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 109 110 // General tx metrics 111 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 112 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 113 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 114 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 115 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 116 117 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 118 // txpool reorgs. 119 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 120 // reorgDurationTimer measures how long time a txpool reorg takes. 121 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 122 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 123 // that this number is pretty low, since txpool reorgs happen very frequently. 124 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 125 126 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 127 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 128 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 129 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 130 131 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 132 ) 133 134 // TxStatus is the current status of a transaction as seen by the pool. 135 type TxStatus uint 136 137 const ( 138 TxStatusUnknown TxStatus = iota 139 TxStatusQueued 140 TxStatusPending 141 TxStatusIncluded 142 ) 143 144 // blockChain provides the state of blockchain and current gas limit to do 145 // some pre checks in tx pool and event subscribers. 146 type blockChain interface { 147 CurrentBlock() *types.Block 148 GetBlock(hash common.Hash, number uint64) *types.Block 149 StateAt(root common.Hash) (*state.StateDB, error) 150 151 SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription 152 } 153 154 // Config are the configuration parameters of the transaction pool. 155 type Config struct { 156 Locals []common.Address // Addresses that should be treated by default as local 157 NoLocals bool // Whether local transaction handling should be disabled 158 Journal string // Journal of local transactions to survive node restarts 159 Rejournal time.Duration // Time interval to regenerate the local transaction journal 160 161 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 162 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 163 164 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 165 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 166 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 167 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 168 169 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 170 } 171 172 // DefaultConfig contains the default configurations for the transaction 173 // pool. 174 var DefaultConfig = Config{ 175 Journal: "transactions.rlp", 176 Rejournal: time.Hour, 177 178 PriceLimit: 1, 179 PriceBump: 10, 180 181 AccountSlots: 16, 182 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 183 AccountQueue: 64, 184 GlobalQueue: 1024, 185 186 Lifetime: 3 * time.Hour, 187 } 188 189 // sanitize checks the provided user configurations and changes anything that's 190 // unreasonable or unworkable. 191 func (config *Config) sanitize() Config { 192 conf := *config 193 if conf.Rejournal < time.Second { 194 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 195 conf.Rejournal = time.Second 196 } 197 if conf.PriceLimit < 1 { 198 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) 199 conf.PriceLimit = DefaultConfig.PriceLimit 200 } 201 if conf.PriceBump < 1 { 202 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) 203 conf.PriceBump = DefaultConfig.PriceBump 204 } 205 if conf.AccountSlots < 1 { 206 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) 207 conf.AccountSlots = DefaultConfig.AccountSlots 208 } 209 if conf.GlobalSlots < 1 { 210 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) 211 conf.GlobalSlots = DefaultConfig.GlobalSlots 212 } 213 if conf.AccountQueue < 1 { 214 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) 215 conf.AccountQueue = DefaultConfig.AccountQueue 216 } 217 if conf.GlobalQueue < 1 { 218 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) 219 conf.GlobalQueue = DefaultConfig.GlobalQueue 220 } 221 if conf.Lifetime < 1 { 222 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) 223 conf.Lifetime = DefaultConfig.Lifetime 224 } 225 return conf 226 } 227 228 // TxPool contains all currently known transactions. Transactions 229 // enter the pool when they are received from the network or submitted 230 // locally. They exit the pool when they are included in the blockchain. 231 // 232 // The pool separates processable transactions (which can be applied to the 233 // current state) and future transactions. Transactions move between those 234 // two states over time as they are received and processed. 235 type TxPool struct { 236 config Config 237 chainconfig *params.ChainConfig 238 chain blockChain 239 gasPrice *big.Int 240 txFeed event.Feed 241 scope event.SubscriptionScope 242 signer types.Signer 243 mu sync.RWMutex 244 245 istanbul bool // Fork indicator whether we are in the istanbul stage. 246 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 247 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 248 249 currentState *state.StateDB // Current state in the blockchain head 250 pendingNonces *noncer // Pending state tracking virtual nonces 251 currentMaxGas uint64 // Current gas limit for transaction caps 252 253 locals *accountSet // Set of local transaction to exempt from eviction rules 254 journal *journal // Journal of local transaction to back up to disk 255 256 pending map[common.Address]*list // All currently processable transactions 257 queue map[common.Address]*list // Queued but non-processable transactions 258 beats map[common.Address]time.Time // Last heartbeat from each known account 259 all *lookup // All transactions to allow lookups 260 priced *pricedList // All transactions sorted by price 261 262 chainHeadCh chan core.ChainHeadEvent 263 chainHeadSub event.Subscription 264 reqResetCh chan *txpoolResetRequest 265 reqPromoteCh chan *accountSet 266 queueTxEventCh chan *types.Transaction 267 reorgDoneCh chan chan struct{} 268 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 269 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 270 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 271 272 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 273 } 274 275 type txpoolResetRequest struct { 276 oldHead, newHead *types.Header 277 } 278 279 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 280 // transactions from the network. 281 func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 282 // Sanitize the input to ensure no vulnerable gas prices are set 283 config = (&config).sanitize() 284 285 // Create the transaction pool with its initial settings 286 pool := &TxPool{ 287 config: config, 288 chainconfig: chainconfig, 289 chain: chain, 290 signer: types.LatestSigner(chainconfig), 291 pending: make(map[common.Address]*list), 292 queue: make(map[common.Address]*list), 293 beats: make(map[common.Address]time.Time), 294 all: newLookup(), 295 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 296 reqResetCh: make(chan *txpoolResetRequest), 297 reqPromoteCh: make(chan *accountSet), 298 queueTxEventCh: make(chan *types.Transaction), 299 reorgDoneCh: make(chan chan struct{}), 300 reorgShutdownCh: make(chan struct{}), 301 initDoneCh: make(chan struct{}), 302 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 303 } 304 pool.locals = newAccountSet(pool.signer) 305 for _, addr := range config.Locals { 306 log.Info("Setting new local account", "address", addr) 307 pool.locals.add(addr) 308 } 309 pool.priced = newPricedList(pool.all) 310 pool.reset(nil, chain.CurrentBlock().Header()) 311 312 // Start the reorg loop early so it can handle requests generated during journal loading. 313 pool.wg.Add(1) 314 go pool.scheduleReorgLoop() 315 316 // If local transactions and journaling is enabled, load from disk 317 if !config.NoLocals && config.Journal != "" { 318 pool.journal = newTxJournal(config.Journal) 319 320 if err := pool.journal.load(pool.AddLocals); err != nil { 321 log.Warn("Failed to load transaction journal", "err", err) 322 } 323 if err := pool.journal.rotate(pool.local()); err != nil { 324 log.Warn("Failed to rotate transaction journal", "err", err) 325 } 326 } 327 328 // Subscribe events from blockchain and start the main event loop. 329 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 330 pool.wg.Add(1) 331 go pool.loop() 332 333 return pool 334 } 335 336 // loop is the transaction pool's main event loop, waiting for and reacting to 337 // outside blockchain events as well as for various reporting and transaction 338 // eviction events. 339 func (pool *TxPool) loop() { 340 defer pool.wg.Done() 341 342 var ( 343 prevPending, prevQueued, prevStales int 344 // Start the stats reporting and transaction eviction tickers 345 report = time.NewTicker(statsReportInterval) 346 evict = time.NewTicker(evictionInterval) 347 journal = time.NewTicker(pool.config.Rejournal) 348 // Track the previous head headers for transaction reorgs 349 head = pool.chain.CurrentBlock() 350 ) 351 defer report.Stop() 352 defer evict.Stop() 353 defer journal.Stop() 354 355 // Notify tests that the init phase is done 356 close(pool.initDoneCh) 357 for { 358 select { 359 // Handle ChainHeadEvent 360 case ev := <-pool.chainHeadCh: 361 if ev.Block != nil { 362 pool.requestReset(head.Header(), ev.Block.Header()) 363 head = ev.Block 364 } 365 366 // System shutdown. 367 case <-pool.chainHeadSub.Err(): 368 close(pool.reorgShutdownCh) 369 return 370 371 // Handle stats reporting ticks 372 case <-report.C: 373 pool.mu.RLock() 374 pending, queued := pool.stats() 375 pool.mu.RUnlock() 376 stales := int(atomic.LoadInt64(&pool.priced.stales)) 377 378 if pending != prevPending || queued != prevQueued || stales != prevStales { 379 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 380 prevPending, prevQueued, prevStales = pending, queued, stales 381 } 382 383 // Handle inactive account transaction eviction 384 case <-evict.C: 385 pool.mu.Lock() 386 for addr := range pool.queue { 387 // Skip local transactions from the eviction mechanism 388 if pool.locals.contains(addr) { 389 continue 390 } 391 // Any non-locals old enough should be removed 392 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 393 list := pool.queue[addr].Flatten() 394 for _, tx := range list { 395 pool.removeTx(tx.Hash(), true) 396 } 397 queuedEvictionMeter.Mark(int64(len(list))) 398 } 399 } 400 pool.mu.Unlock() 401 402 // Handle local transaction journal rotation 403 case <-journal.C: 404 if pool.journal != nil { 405 pool.mu.Lock() 406 if err := pool.journal.rotate(pool.local()); err != nil { 407 log.Warn("Failed to rotate local tx journal", "err", err) 408 } 409 pool.mu.Unlock() 410 } 411 } 412 } 413 } 414 415 // Stop terminates the transaction pool. 416 func (pool *TxPool) Stop() { 417 // Unsubscribe all subscriptions registered from txpool 418 pool.scope.Close() 419 420 // Unsubscribe subscriptions registered from blockchain 421 pool.chainHeadSub.Unsubscribe() 422 pool.wg.Wait() 423 424 if pool.journal != nil { 425 pool.journal.close() 426 } 427 log.Info("Transaction pool stopped") 428 } 429 430 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 431 // starts sending event to the given channel. 432 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 433 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 434 } 435 436 // GasPrice returns the current gas price enforced by the transaction pool. 437 func (pool *TxPool) GasPrice() *big.Int { 438 pool.mu.RLock() 439 defer pool.mu.RUnlock() 440 441 return new(big.Int).Set(pool.gasPrice) 442 } 443 444 // SetGasPrice updates the minimum price required by the transaction pool for a 445 // new transaction, and drops all transactions below this threshold. 446 func (pool *TxPool) SetGasPrice(price *big.Int) { 447 pool.mu.Lock() 448 defer pool.mu.Unlock() 449 450 old := pool.gasPrice 451 pool.gasPrice = price 452 // if the min miner fee increased, remove transactions below the new threshold 453 if price.Cmp(old) > 0 { 454 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 455 drop := pool.all.RemotesBelowTip(price) 456 for _, tx := range drop { 457 pool.removeTx(tx.Hash(), false) 458 } 459 pool.priced.Removed(len(drop)) 460 } 461 462 log.Info("Transaction pool price threshold updated", "price", price) 463 } 464 465 // Nonce returns the next nonce of an account, with all transactions executable 466 // by the pool already applied on top. 467 func (pool *TxPool) Nonce(addr common.Address) uint64 { 468 pool.mu.RLock() 469 defer pool.mu.RUnlock() 470 471 return pool.pendingNonces.get(addr) 472 } 473 474 // Stats retrieves the current pool stats, namely the number of pending and the 475 // number of queued (non-executable) transactions. 476 func (pool *TxPool) Stats() (int, int) { 477 pool.mu.RLock() 478 defer pool.mu.RUnlock() 479 480 return pool.stats() 481 } 482 483 // stats retrieves the current pool stats, namely the number of pending and the 484 // number of queued (non-executable) transactions. 485 func (pool *TxPool) stats() (int, int) { 486 pending := 0 487 for _, list := range pool.pending { 488 pending += list.Len() 489 } 490 queued := 0 491 for _, list := range pool.queue { 492 queued += list.Len() 493 } 494 return pending, queued 495 } 496 497 // Content retrieves the data content of the transaction pool, returning all the 498 // pending as well as queued transactions, grouped by account and sorted by nonce. 499 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 500 pool.mu.Lock() 501 defer pool.mu.Unlock() 502 503 pending := make(map[common.Address]types.Transactions, len(pool.pending)) 504 for addr, list := range pool.pending { 505 pending[addr] = list.Flatten() 506 } 507 queued := make(map[common.Address]types.Transactions, len(pool.queue)) 508 for addr, list := range pool.queue { 509 queued[addr] = list.Flatten() 510 } 511 return pending, queued 512 } 513 514 // ContentFrom retrieves the data content of the transaction pool, returning the 515 // pending as well as queued transactions of this address, grouped by nonce. 516 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 517 pool.mu.RLock() 518 defer pool.mu.RUnlock() 519 520 var pending types.Transactions 521 if list, ok := pool.pending[addr]; ok { 522 pending = list.Flatten() 523 } 524 var queued types.Transactions 525 if list, ok := pool.queue[addr]; ok { 526 queued = list.Flatten() 527 } 528 return pending, queued 529 } 530 531 // Pending retrieves all currently processable transactions, grouped by origin 532 // account and sorted by nonce. The returned transaction set is a copy and can be 533 // freely modified by calling code. 534 // 535 // The enforceTips parameter can be used to do an extra filtering on the pending 536 // transactions and only return those whose **effective** tip is large enough in 537 // the next pending execution environment. 538 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 539 pool.mu.Lock() 540 defer pool.mu.Unlock() 541 542 pending := make(map[common.Address]types.Transactions) 543 for addr, list := range pool.pending { 544 txs := list.Flatten() 545 546 // If the miner requests tip enforcement, cap the lists now 547 if enforceTips && !pool.locals.contains(addr) { 548 for i, tx := range txs { 549 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 550 txs = txs[:i] 551 break 552 } 553 } 554 } 555 if len(txs) > 0 { 556 pending[addr] = txs 557 } 558 } 559 return pending 560 } 561 562 // Locals retrieves the accounts currently considered local by the pool. 563 func (pool *TxPool) Locals() []common.Address { 564 pool.mu.Lock() 565 defer pool.mu.Unlock() 566 567 return pool.locals.flatten() 568 } 569 570 // local retrieves all currently known local transactions, grouped by origin 571 // account and sorted by nonce. The returned transaction set is a copy and can be 572 // freely modified by calling code. 573 func (pool *TxPool) local() map[common.Address]types.Transactions { 574 txs := make(map[common.Address]types.Transactions) 575 for addr := range pool.locals.accounts { 576 if pending := pool.pending[addr]; pending != nil { 577 txs[addr] = append(txs[addr], pending.Flatten()...) 578 } 579 if queued := pool.queue[addr]; queued != nil { 580 txs[addr] = append(txs[addr], queued.Flatten()...) 581 } 582 } 583 return txs 584 } 585 586 // validateTx checks whether a transaction is valid according to the consensus 587 // rules and adheres to some heuristic limits of the local node (price and size). 588 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 589 // Accept only legacy transactions until EIP-2718/2930 activates. 590 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 591 return core.ErrTxTypeNotSupported 592 } 593 // Reject dynamic fee transactions until EIP-1559 activates. 594 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 595 return core.ErrTxTypeNotSupported 596 } 597 // Reject transactions over defined size to prevent DOS attacks 598 if tx.Size() > txMaxSize { 599 return ErrOversizedData 600 } 601 // Transactions can't be negative. This may never happen using RLP decoded 602 // transactions but may occur if you create a transaction using the RPC. 603 if tx.Value().Sign() < 0 { 604 return ErrNegativeValue 605 } 606 // Ensure the transaction doesn't exceed the current block limit gas. 607 if pool.currentMaxGas < tx.Gas() { 608 return ErrGasLimit 609 } 610 // Sanity check for extremely large numbers 611 if tx.GasFeeCap().BitLen() > 256 { 612 return core.ErrFeeCapVeryHigh 613 } 614 if tx.GasTipCap().BitLen() > 256 { 615 return core.ErrTipVeryHigh 616 } 617 // Ensure gasFeeCap is greater than or equal to gasTipCap. 618 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 619 return core.ErrTipAboveFeeCap 620 } 621 // Make sure the transaction is signed properly. 622 from, err := types.Sender(pool.signer, tx) 623 if err != nil { 624 return ErrInvalidSender 625 } 626 // Drop non-local transactions under our own minimal accepted gas price or tip 627 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 628 return ErrUnderpriced 629 } 630 // Ensure the transaction adheres to nonce ordering 631 if pool.currentState.GetNonce(from) > tx.Nonce() { 632 return core.ErrNonceTooLow 633 } 634 // Transactor should have enough funds to cover the costs 635 // cost == V + GP * GL 636 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 637 return core.ErrInsufficientFunds 638 } 639 // Ensure the transaction has more gas than the basic tx fee. 640 intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 641 if err != nil { 642 return err 643 } 644 if tx.Gas() < intrGas { 645 return core.ErrIntrinsicGas 646 } 647 return nil 648 } 649 650 // add validates a transaction and inserts it into the non-executable queue for later 651 // pending promotion and execution. If the transaction is a replacement for an already 652 // pending or queued one, it overwrites the previous transaction if its price is higher. 653 // 654 // If a newly added transaction is marked as local, its sending account will be 655 // be added to the allowlist, preventing any associated transaction from being dropped 656 // out of the pool due to pricing constraints. 657 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 658 // If the transaction is already known, discard it 659 hash := tx.Hash() 660 if pool.all.Get(hash) != nil { 661 log.Trace("Discarding already known transaction", "hash", hash) 662 knownTxMeter.Mark(1) 663 return false, ErrAlreadyKnown 664 } 665 // Make the local flag. If it's from local source or it's from the network but 666 // the sender is marked as local previously, treat it as the local transaction. 667 isLocal := local || pool.locals.containsTx(tx) 668 669 // If the transaction fails basic validation, discard it 670 if err := pool.validateTx(tx, isLocal); err != nil { 671 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 672 invalidTxMeter.Mark(1) 673 return false, err 674 } 675 // If the transaction pool is full, discard underpriced transactions 676 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 677 // If the new transaction is underpriced, don't accept it 678 if !isLocal && pool.priced.Underpriced(tx) { 679 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 680 underpricedTxMeter.Mark(1) 681 return false, ErrUnderpriced 682 } 683 // We're about to replace a transaction. The reorg does a more thorough 684 // analysis of what to remove and how, but it runs async. We don't want to 685 // do too many replacements between reorg-runs, so we cap the number of 686 // replacements to 25% of the slots 687 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 688 throttleTxMeter.Mark(1) 689 return false, ErrTxPoolOverflow 690 } 691 692 // New transaction is better than our worse ones, make room for it. 693 // If it's a local transaction, forcibly discard all available transactions. 694 // Otherwise if we can't make enough room for new one, abort the operation. 695 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 696 697 // Special case, we still can't make the room for the new remote one. 698 if !isLocal && !success { 699 log.Trace("Discarding overflown transaction", "hash", hash) 700 overflowedTxMeter.Mark(1) 701 return false, ErrTxPoolOverflow 702 } 703 // Bump the counter of rejections-since-reorg 704 pool.changesSinceReorg += len(drop) 705 // Kick out the underpriced remote transactions. 706 for _, tx := range drop { 707 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 708 underpricedTxMeter.Mark(1) 709 pool.removeTx(tx.Hash(), false) 710 } 711 } 712 // Try to replace an existing transaction in the pending pool 713 from, _ := types.Sender(pool.signer, tx) // already validated 714 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 715 // Nonce already pending, check if required price bump is met 716 inserted, old := list.Add(tx, pool.config.PriceBump) 717 if !inserted { 718 pendingDiscardMeter.Mark(1) 719 return false, ErrReplaceUnderpriced 720 } 721 // New transaction is better, replace old one 722 if old != nil { 723 pool.all.Remove(old.Hash()) 724 pool.priced.Removed(1) 725 pendingReplaceMeter.Mark(1) 726 } 727 pool.all.Add(tx, isLocal) 728 pool.priced.Put(tx, isLocal) 729 pool.journalTx(from, tx) 730 pool.queueTxEvent(tx) 731 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 732 733 // Successful promotion, bump the heartbeat 734 pool.beats[from] = time.Now() 735 return old != nil, nil 736 } 737 // New transaction isn't replacing a pending one, push into queue 738 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 739 if err != nil { 740 return false, err 741 } 742 // Mark local addresses and journal local transactions 743 if local && !pool.locals.contains(from) { 744 log.Info("Setting new local account", "address", from) 745 pool.locals.add(from) 746 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 747 } 748 if isLocal { 749 localGauge.Inc(1) 750 } 751 pool.journalTx(from, tx) 752 753 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 754 return replaced, nil 755 } 756 757 // enqueueTx inserts a new transaction into the non-executable transaction queue. 758 // 759 // Note, this method assumes the pool lock is held! 760 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 761 // Try to insert the transaction into the future queue 762 from, _ := types.Sender(pool.signer, tx) // already validated 763 if pool.queue[from] == nil { 764 pool.queue[from] = newList(false) 765 } 766 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 767 if !inserted { 768 // An older transaction was better, discard this 769 queuedDiscardMeter.Mark(1) 770 return false, ErrReplaceUnderpriced 771 } 772 // Discard any previous transaction and mark this 773 if old != nil { 774 pool.all.Remove(old.Hash()) 775 pool.priced.Removed(1) 776 queuedReplaceMeter.Mark(1) 777 } else { 778 // Nothing was replaced, bump the queued counter 779 queuedGauge.Inc(1) 780 } 781 // If the transaction isn't in lookup set but it's expected to be there, 782 // show the error log. 783 if pool.all.Get(hash) == nil && !addAll { 784 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 785 } 786 if addAll { 787 pool.all.Add(tx, local) 788 pool.priced.Put(tx, local) 789 } 790 // If we never record the heartbeat, do it right now. 791 if _, exist := pool.beats[from]; !exist { 792 pool.beats[from] = time.Now() 793 } 794 return old != nil, nil 795 } 796 797 // journalTx adds the specified transaction to the local disk journal if it is 798 // deemed to have been sent from a local account. 799 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 800 // Only journal if it's enabled and the transaction is local 801 if pool.journal == nil || !pool.locals.contains(from) { 802 return 803 } 804 if err := pool.journal.insert(tx); err != nil { 805 log.Warn("Failed to journal local transaction", "err", err) 806 } 807 } 808 809 // promoteTx adds a transaction to the pending (processable) list of transactions 810 // and returns whether it was inserted or an older was better. 811 // 812 // Note, this method assumes the pool lock is held! 813 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 814 // Try to insert the transaction into the pending queue 815 if pool.pending[addr] == nil { 816 pool.pending[addr] = newList(true) 817 } 818 list := pool.pending[addr] 819 820 inserted, old := list.Add(tx, pool.config.PriceBump) 821 if !inserted { 822 // An older transaction was better, discard this 823 pool.all.Remove(hash) 824 pool.priced.Removed(1) 825 pendingDiscardMeter.Mark(1) 826 return false 827 } 828 // Otherwise discard any previous transaction and mark this 829 if old != nil { 830 pool.all.Remove(old.Hash()) 831 pool.priced.Removed(1) 832 pendingReplaceMeter.Mark(1) 833 } else { 834 // Nothing was replaced, bump the pending counter 835 pendingGauge.Inc(1) 836 } 837 // Set the potentially new pending nonce and notify any subsystems of the new tx 838 pool.pendingNonces.set(addr, tx.Nonce()+1) 839 840 // Successful promotion, bump the heartbeat 841 pool.beats[addr] = time.Now() 842 return true 843 } 844 845 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 846 // senders as a local ones, ensuring they go around the local pricing constraints. 847 // 848 // This method is used to add transactions from the RPC API and performs synchronous pool 849 // reorganization and event propagation. 850 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 851 return pool.addTxs(txs, !pool.config.NoLocals, true) 852 } 853 854 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 855 // a convenience wrapper around AddLocals. 856 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 857 errs := pool.AddLocals([]*types.Transaction{tx}) 858 return errs[0] 859 } 860 861 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 862 // senders are not among the locally tracked ones, full pricing constraints will apply. 863 // 864 // This method is used to add transactions from the p2p network and does not wait for pool 865 // reorganization and internal event propagation. 866 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 867 return pool.addTxs(txs, false, false) 868 } 869 870 // AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method. 871 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 872 return pool.addTxs(txs, false, true) 873 } 874 875 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 876 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 877 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 878 return errs[0] 879 } 880 881 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 882 // wrapper around AddRemotes. 883 // 884 // Deprecated: use AddRemotes 885 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 886 errs := pool.AddRemotes([]*types.Transaction{tx}) 887 return errs[0] 888 } 889 890 // addTxs attempts to queue a batch of transactions if they are valid. 891 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 892 // Filter out known ones without obtaining the pool lock or recovering signatures 893 var ( 894 errs = make([]error, len(txs)) 895 news = make([]*types.Transaction, 0, len(txs)) 896 ) 897 for i, tx := range txs { 898 // If the transaction is known, pre-set the error slot 899 if pool.all.Get(tx.Hash()) != nil { 900 errs[i] = ErrAlreadyKnown 901 knownTxMeter.Mark(1) 902 continue 903 } 904 // Exclude transactions with invalid signatures as soon as 905 // possible and cache senders in transactions before 906 // obtaining lock 907 _, err := types.Sender(pool.signer, tx) 908 if err != nil { 909 errs[i] = ErrInvalidSender 910 invalidTxMeter.Mark(1) 911 continue 912 } 913 // Accumulate all unknown transactions for deeper processing 914 news = append(news, tx) 915 } 916 if len(news) == 0 { 917 return errs 918 } 919 920 // Process all the new transaction and merge any errors into the original slice 921 pool.mu.Lock() 922 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 923 pool.mu.Unlock() 924 925 var nilSlot = 0 926 for _, err := range newErrs { 927 for errs[nilSlot] != nil { 928 nilSlot++ 929 } 930 errs[nilSlot] = err 931 nilSlot++ 932 } 933 // Reorg the pool internals if needed and return 934 done := pool.requestPromoteExecutables(dirtyAddrs) 935 if sync { 936 <-done 937 } 938 return errs 939 } 940 941 // addTxsLocked attempts to queue a batch of transactions if they are valid. 942 // The transaction pool lock must be held. 943 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 944 dirty := newAccountSet(pool.signer) 945 errs := make([]error, len(txs)) 946 for i, tx := range txs { 947 replaced, err := pool.add(tx, local) 948 errs[i] = err 949 if err == nil && !replaced { 950 dirty.addTx(tx) 951 } 952 } 953 validTxMeter.Mark(int64(len(dirty.accounts))) 954 return errs, dirty 955 } 956 957 // Status returns the status (unknown/pending/queued) of a batch of transactions 958 // identified by their hashes. 959 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 960 status := make([]TxStatus, len(hashes)) 961 for i, hash := range hashes { 962 tx := pool.Get(hash) 963 if tx == nil { 964 continue 965 } 966 from, _ := types.Sender(pool.signer, tx) // already validated 967 pool.mu.RLock() 968 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 969 status[i] = TxStatusPending 970 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 971 status[i] = TxStatusQueued 972 } 973 // implicit else: the tx may have been included into a block between 974 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 975 pool.mu.RUnlock() 976 } 977 return status 978 } 979 980 // Get returns a transaction if it is contained in the pool and nil otherwise. 981 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 982 return pool.all.Get(hash) 983 } 984 985 // Has returns an indicator whether txpool has a transaction cached with the 986 // given hash. 987 func (pool *TxPool) Has(hash common.Hash) bool { 988 return pool.all.Get(hash) != nil 989 } 990 991 // removeTx removes a single transaction from the queue, moving all subsequent 992 // transactions back to the future queue. 993 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 994 // Fetch the transaction we wish to delete 995 tx := pool.all.Get(hash) 996 if tx == nil { 997 return 998 } 999 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1000 1001 // Remove it from the list of known transactions 1002 pool.all.Remove(hash) 1003 if outofbound { 1004 pool.priced.Removed(1) 1005 } 1006 if pool.locals.contains(addr) { 1007 localGauge.Dec(1) 1008 } 1009 // Remove the transaction from the pending lists and reset the account nonce 1010 if pending := pool.pending[addr]; pending != nil { 1011 if removed, invalids := pending.Remove(tx); removed { 1012 // If no more pending transactions are left, remove the list 1013 if pending.Empty() { 1014 delete(pool.pending, addr) 1015 } 1016 // Postpone any invalidated transactions 1017 for _, tx := range invalids { 1018 // Internal shuffle shouldn't touch the lookup set. 1019 pool.enqueueTx(tx.Hash(), tx, false, false) 1020 } 1021 // Update the account nonce if needed 1022 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1023 // Reduce the pending counter 1024 pendingGauge.Dec(int64(1 + len(invalids))) 1025 return 1026 } 1027 } 1028 // Transaction is in the future queue 1029 if future := pool.queue[addr]; future != nil { 1030 if removed, _ := future.Remove(tx); removed { 1031 // Reduce the queued counter 1032 queuedGauge.Dec(1) 1033 } 1034 if future.Empty() { 1035 delete(pool.queue, addr) 1036 delete(pool.beats, addr) 1037 } 1038 } 1039 } 1040 1041 // requestReset requests a pool reset to the new head block. 1042 // The returned channel is closed when the reset has occurred. 1043 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1044 select { 1045 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1046 return <-pool.reorgDoneCh 1047 case <-pool.reorgShutdownCh: 1048 return pool.reorgShutdownCh 1049 } 1050 } 1051 1052 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1053 // The returned channel is closed when the promotion checks have occurred. 1054 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1055 select { 1056 case pool.reqPromoteCh <- set: 1057 return <-pool.reorgDoneCh 1058 case <-pool.reorgShutdownCh: 1059 return pool.reorgShutdownCh 1060 } 1061 } 1062 1063 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1064 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1065 select { 1066 case pool.queueTxEventCh <- tx: 1067 case <-pool.reorgShutdownCh: 1068 } 1069 } 1070 1071 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1072 // call those methods directly, but request them being run using requestReset and 1073 // requestPromoteExecutables instead. 1074 func (pool *TxPool) scheduleReorgLoop() { 1075 defer pool.wg.Done() 1076 1077 var ( 1078 curDone chan struct{} // non-nil while runReorg is active 1079 nextDone = make(chan struct{}) 1080 launchNextRun bool 1081 reset *txpoolResetRequest 1082 dirtyAccounts *accountSet 1083 queuedEvents = make(map[common.Address]*sortedMap) 1084 ) 1085 for { 1086 // Launch next background reorg if needed 1087 if curDone == nil && launchNextRun { 1088 // Run the background reorg and announcements 1089 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1090 1091 // Prepare everything for the next round of reorg 1092 curDone, nextDone = nextDone, make(chan struct{}) 1093 launchNextRun = false 1094 1095 reset, dirtyAccounts = nil, nil 1096 queuedEvents = make(map[common.Address]*sortedMap) 1097 } 1098 1099 select { 1100 case req := <-pool.reqResetCh: 1101 // Reset request: update head if request is already pending. 1102 if reset == nil { 1103 reset = req 1104 } else { 1105 reset.newHead = req.newHead 1106 } 1107 launchNextRun = true 1108 pool.reorgDoneCh <- nextDone 1109 1110 case req := <-pool.reqPromoteCh: 1111 // Promote request: update address set if request is already pending. 1112 if dirtyAccounts == nil { 1113 dirtyAccounts = req 1114 } else { 1115 dirtyAccounts.merge(req) 1116 } 1117 launchNextRun = true 1118 pool.reorgDoneCh <- nextDone 1119 1120 case tx := <-pool.queueTxEventCh: 1121 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1122 // request one later if they want the events sent. 1123 addr, _ := types.Sender(pool.signer, tx) 1124 if _, ok := queuedEvents[addr]; !ok { 1125 queuedEvents[addr] = newSortedMap() 1126 } 1127 queuedEvents[addr].Put(tx) 1128 1129 case <-curDone: 1130 curDone = nil 1131 1132 case <-pool.reorgShutdownCh: 1133 // Wait for current run to finish. 1134 if curDone != nil { 1135 <-curDone 1136 } 1137 close(nextDone) 1138 return 1139 } 1140 } 1141 } 1142 1143 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1144 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { 1145 defer func(t0 time.Time) { 1146 reorgDurationTimer.Update(time.Since(t0)) 1147 }(time.Now()) 1148 defer close(done) 1149 1150 var promoteAddrs []common.Address 1151 if dirtyAccounts != nil && reset == nil { 1152 // Only dirty accounts need to be promoted, unless we're resetting. 1153 // For resets, all addresses in the tx queue will be promoted and 1154 // the flatten operation can be avoided. 1155 promoteAddrs = dirtyAccounts.flatten() 1156 } 1157 pool.mu.Lock() 1158 if reset != nil { 1159 // Reset from the old head to the new, rescheduling any reorged transactions 1160 pool.reset(reset.oldHead, reset.newHead) 1161 1162 // Nonces were reset, discard any events that became stale 1163 for addr := range events { 1164 events[addr].Forward(pool.pendingNonces.get(addr)) 1165 if events[addr].Len() == 0 { 1166 delete(events, addr) 1167 } 1168 } 1169 // Reset needs promote for all addresses 1170 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1171 for addr := range pool.queue { 1172 promoteAddrs = append(promoteAddrs, addr) 1173 } 1174 } 1175 // Check for pending transactions for every account that sent new ones 1176 promoted := pool.promoteExecutables(promoteAddrs) 1177 1178 // If a new block appeared, validate the pool of pending transactions. This will 1179 // remove any transaction that has been included in the block or was invalidated 1180 // because of another transaction (e.g. higher gas price). 1181 if reset != nil { 1182 pool.demoteUnexecutables() 1183 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1184 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1185 pool.priced.SetBaseFee(pendingBaseFee) 1186 } 1187 // Update all accounts to the latest known pending nonce 1188 nonces := make(map[common.Address]uint64, len(pool.pending)) 1189 for addr, list := range pool.pending { 1190 highestPending := list.LastElement() 1191 nonces[addr] = highestPending.Nonce() + 1 1192 } 1193 pool.pendingNonces.setAll(nonces) 1194 } 1195 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1196 pool.truncatePending() 1197 pool.truncateQueue() 1198 1199 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1200 pool.changesSinceReorg = 0 // Reset change counter 1201 pool.mu.Unlock() 1202 1203 // Notify subsystems for newly added transactions 1204 for _, tx := range promoted { 1205 addr, _ := types.Sender(pool.signer, tx) 1206 if _, ok := events[addr]; !ok { 1207 events[addr] = newSortedMap() 1208 } 1209 events[addr].Put(tx) 1210 } 1211 if len(events) > 0 { 1212 var txs []*types.Transaction 1213 for _, set := range events { 1214 txs = append(txs, set.Flatten()...) 1215 } 1216 pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) 1217 } 1218 } 1219 1220 // reset retrieves the current state of the blockchain and ensures the content 1221 // of the transaction pool is valid with regard to the chain state. 1222 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1223 // If we're reorging an old state, reinject all dropped transactions 1224 var reinject types.Transactions 1225 1226 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1227 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1228 oldNum := oldHead.Number.Uint64() 1229 newNum := newHead.Number.Uint64() 1230 1231 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1232 log.Debug("Skipping deep transaction reorg", "depth", depth) 1233 } else { 1234 // Reorg seems shallow enough to pull in all transactions into memory 1235 var discarded, included types.Transactions 1236 var ( 1237 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1238 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1239 ) 1240 if rem == nil { 1241 // This can happen if a setHead is performed, where we simply discard the old 1242 // head from the chain. 1243 // If that is the case, we don't have the lost transactions anymore, and 1244 // there's nothing to add 1245 if newNum >= oldNum { 1246 // If we reorged to a same or higher number, then it's not a case of setHead 1247 log.Warn("Transaction pool reset with missing oldhead", 1248 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1249 return 1250 } 1251 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1252 log.Debug("Skipping transaction reset caused by setHead", 1253 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1254 // We still need to update the current state s.th. the lost transactions can be readded by the user 1255 } else { 1256 for rem.NumberU64() > add.NumberU64() { 1257 discarded = append(discarded, rem.Transactions()...) 1258 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1259 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1260 return 1261 } 1262 } 1263 for add.NumberU64() > rem.NumberU64() { 1264 included = append(included, add.Transactions()...) 1265 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1266 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1267 return 1268 } 1269 } 1270 for rem.Hash() != add.Hash() { 1271 discarded = append(discarded, rem.Transactions()...) 1272 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1273 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1274 return 1275 } 1276 included = append(included, add.Transactions()...) 1277 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1278 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1279 return 1280 } 1281 } 1282 reinject = types.TxDifference(discarded, included) 1283 } 1284 } 1285 } 1286 // Initialize the internal state to the current head 1287 if newHead == nil { 1288 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1289 } 1290 statedb, err := pool.chain.StateAt(newHead.Root) 1291 if err != nil { 1292 log.Error("Failed to reset txpool state", "err", err) 1293 return 1294 } 1295 pool.currentState = statedb 1296 pool.pendingNonces = newNoncer(statedb) 1297 pool.currentMaxGas = newHead.GasLimit 1298 1299 // Inject any transactions discarded due to reorgs 1300 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1301 core.SenderCacher.Recover(pool.signer, reinject) 1302 pool.addTxsLocked(reinject, false) 1303 1304 // Update all fork indicator by next pending block number. 1305 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1306 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1307 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1308 pool.eip1559 = pool.chainconfig.IsLondon(next) 1309 } 1310 1311 // promoteExecutables moves transactions that have become processable from the 1312 // future queue to the set of pending transactions. During this process, all 1313 // invalidated transactions (low nonce, low balance) are deleted. 1314 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1315 // Track the promoted transactions to broadcast them at once 1316 var promoted []*types.Transaction 1317 1318 // Iterate over all accounts and promote any executable transactions 1319 for _, addr := range accounts { 1320 list := pool.queue[addr] 1321 if list == nil { 1322 continue // Just in case someone calls with a non existing account 1323 } 1324 // Drop all transactions that are deemed too old (low nonce) 1325 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1326 for _, tx := range forwards { 1327 hash := tx.Hash() 1328 pool.all.Remove(hash) 1329 } 1330 log.Trace("Removed old queued transactions", "count", len(forwards)) 1331 // Drop all transactions that are too costly (low balance or out of gas) 1332 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1333 for _, tx := range drops { 1334 hash := tx.Hash() 1335 pool.all.Remove(hash) 1336 } 1337 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1338 queuedNofundsMeter.Mark(int64(len(drops))) 1339 1340 // Gather all executable transactions and promote them 1341 readies := list.Ready(pool.pendingNonces.get(addr)) 1342 for _, tx := range readies { 1343 hash := tx.Hash() 1344 if pool.promoteTx(addr, hash, tx) { 1345 promoted = append(promoted, tx) 1346 } 1347 } 1348 log.Trace("Promoted queued transactions", "count", len(promoted)) 1349 queuedGauge.Dec(int64(len(readies))) 1350 1351 // Drop all transactions over the allowed limit 1352 var caps types.Transactions 1353 if !pool.locals.contains(addr) { 1354 caps = list.Cap(int(pool.config.AccountQueue)) 1355 for _, tx := range caps { 1356 hash := tx.Hash() 1357 pool.all.Remove(hash) 1358 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1359 } 1360 queuedRateLimitMeter.Mark(int64(len(caps))) 1361 } 1362 // Mark all the items dropped as removed 1363 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1364 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1365 if pool.locals.contains(addr) { 1366 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1367 } 1368 // Delete the entire queue entry if it became empty. 1369 if list.Empty() { 1370 delete(pool.queue, addr) 1371 delete(pool.beats, addr) 1372 } 1373 } 1374 return promoted 1375 } 1376 1377 // truncatePending removes transactions from the pending queue if the pool is above the 1378 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1379 // equal number for all for accounts with many pending transactions. 1380 func (pool *TxPool) truncatePending() { 1381 pending := uint64(0) 1382 for _, list := range pool.pending { 1383 pending += uint64(list.Len()) 1384 } 1385 if pending <= pool.config.GlobalSlots { 1386 return 1387 } 1388 1389 pendingBeforeCap := pending 1390 // Assemble a spam order to penalize large transactors first 1391 spammers := prque.New(nil) 1392 for addr, list := range pool.pending { 1393 // Only evict transactions from high rollers 1394 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1395 spammers.Push(addr, int64(list.Len())) 1396 } 1397 } 1398 // Gradually drop transactions from offenders 1399 offenders := []common.Address{} 1400 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1401 // Retrieve the next offender if not local address 1402 offender, _ := spammers.Pop() 1403 offenders = append(offenders, offender.(common.Address)) 1404 1405 // Equalize balances until all the same or below threshold 1406 if len(offenders) > 1 { 1407 // Calculate the equalization threshold for all current offenders 1408 threshold := pool.pending[offender.(common.Address)].Len() 1409 1410 // Iteratively reduce all offenders until below limit or threshold reached 1411 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1412 for i := 0; i < len(offenders)-1; i++ { 1413 list := pool.pending[offenders[i]] 1414 1415 caps := list.Cap(list.Len() - 1) 1416 for _, tx := range caps { 1417 // Drop the transaction from the global pools too 1418 hash := tx.Hash() 1419 pool.all.Remove(hash) 1420 1421 // Update the account nonce to the dropped transaction 1422 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1423 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1424 } 1425 pool.priced.Removed(len(caps)) 1426 pendingGauge.Dec(int64(len(caps))) 1427 if pool.locals.contains(offenders[i]) { 1428 localGauge.Dec(int64(len(caps))) 1429 } 1430 pending-- 1431 } 1432 } 1433 } 1434 } 1435 1436 // If still above threshold, reduce to limit or min allowance 1437 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1438 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1439 for _, addr := range offenders { 1440 list := pool.pending[addr] 1441 1442 caps := list.Cap(list.Len() - 1) 1443 for _, tx := range caps { 1444 // Drop the transaction from the global pools too 1445 hash := tx.Hash() 1446 pool.all.Remove(hash) 1447 1448 // Update the account nonce to the dropped transaction 1449 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1450 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1451 } 1452 pool.priced.Removed(len(caps)) 1453 pendingGauge.Dec(int64(len(caps))) 1454 if pool.locals.contains(addr) { 1455 localGauge.Dec(int64(len(caps))) 1456 } 1457 pending-- 1458 } 1459 } 1460 } 1461 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1462 } 1463 1464 // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. 1465 func (pool *TxPool) truncateQueue() { 1466 queued := uint64(0) 1467 for _, list := range pool.queue { 1468 queued += uint64(list.Len()) 1469 } 1470 if queued <= pool.config.GlobalQueue { 1471 return 1472 } 1473 1474 // Sort all accounts with queued transactions by heartbeat 1475 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1476 for addr := range pool.queue { 1477 if !pool.locals.contains(addr) { // don't drop locals 1478 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1479 } 1480 } 1481 sort.Sort(sort.Reverse(addresses)) 1482 1483 // Drop transactions until the total is below the limit or only locals remain 1484 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1485 addr := addresses[len(addresses)-1] 1486 list := pool.queue[addr.address] 1487 1488 addresses = addresses[:len(addresses)-1] 1489 1490 // Drop all transactions if they are less than the overflow 1491 if size := uint64(list.Len()); size <= drop { 1492 for _, tx := range list.Flatten() { 1493 pool.removeTx(tx.Hash(), true) 1494 } 1495 drop -= size 1496 queuedRateLimitMeter.Mark(int64(size)) 1497 continue 1498 } 1499 // Otherwise drop only last few transactions 1500 txs := list.Flatten() 1501 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1502 pool.removeTx(txs[i].Hash(), true) 1503 drop-- 1504 queuedRateLimitMeter.Mark(1) 1505 } 1506 } 1507 } 1508 1509 // demoteUnexecutables removes invalid and processed transactions from the pools 1510 // executable/pending queue and any subsequent transactions that become unexecutable 1511 // are moved back into the future queue. 1512 // 1513 // Note: transactions are not marked as removed in the priced list because re-heaping 1514 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1515 // to trigger a re-heap is this function 1516 func (pool *TxPool) demoteUnexecutables() { 1517 // Iterate over all accounts and demote any non-executable transactions 1518 for addr, list := range pool.pending { 1519 nonce := pool.currentState.GetNonce(addr) 1520 1521 // Drop all transactions that are deemed too old (low nonce) 1522 olds := list.Forward(nonce) 1523 for _, tx := range olds { 1524 hash := tx.Hash() 1525 pool.all.Remove(hash) 1526 log.Trace("Removed old pending transaction", "hash", hash) 1527 } 1528 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1529 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1530 for _, tx := range drops { 1531 hash := tx.Hash() 1532 log.Trace("Removed unpayable pending transaction", "hash", hash) 1533 pool.all.Remove(hash) 1534 } 1535 pendingNofundsMeter.Mark(int64(len(drops))) 1536 1537 for _, tx := range invalids { 1538 hash := tx.Hash() 1539 log.Trace("Demoting pending transaction", "hash", hash) 1540 1541 // Internal shuffle shouldn't touch the lookup set. 1542 pool.enqueueTx(hash, tx, false, false) 1543 } 1544 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1545 if pool.locals.contains(addr) { 1546 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1547 } 1548 // If there's a gap in front, alert (should never happen) and postpone all transactions 1549 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1550 gapped := list.Cap(0) 1551 for _, tx := range gapped { 1552 hash := tx.Hash() 1553 log.Error("Demoting invalidated transaction", "hash", hash) 1554 1555 // Internal shuffle shouldn't touch the lookup set. 1556 pool.enqueueTx(hash, tx, false, false) 1557 } 1558 pendingGauge.Dec(int64(len(gapped))) 1559 } 1560 // Delete the entire pending entry if it became empty. 1561 if list.Empty() { 1562 delete(pool.pending, addr) 1563 } 1564 } 1565 } 1566 1567 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1568 type addressByHeartbeat struct { 1569 address common.Address 1570 heartbeat time.Time 1571 } 1572 1573 type addressesByHeartbeat []addressByHeartbeat 1574 1575 func (a addressesByHeartbeat) Len() int { return len(a) } 1576 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1577 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1578 1579 // accountSet is simply a set of addresses to check for existence, and a signer 1580 // capable of deriving addresses from transactions. 1581 type accountSet struct { 1582 accounts map[common.Address]struct{} 1583 signer types.Signer 1584 cache *[]common.Address 1585 } 1586 1587 // newAccountSet creates a new address set with an associated signer for sender 1588 // derivations. 1589 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1590 as := &accountSet{ 1591 accounts: make(map[common.Address]struct{}, len(addrs)), 1592 signer: signer, 1593 } 1594 for _, addr := range addrs { 1595 as.add(addr) 1596 } 1597 return as 1598 } 1599 1600 // contains checks if a given address is contained within the set. 1601 func (as *accountSet) contains(addr common.Address) bool { 1602 _, exist := as.accounts[addr] 1603 return exist 1604 } 1605 1606 // containsTx checks if the sender of a given tx is within the set. If the sender 1607 // cannot be derived, this method returns false. 1608 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1609 if addr, err := types.Sender(as.signer, tx); err == nil { 1610 return as.contains(addr) 1611 } 1612 return false 1613 } 1614 1615 // add inserts a new address into the set to track. 1616 func (as *accountSet) add(addr common.Address) { 1617 as.accounts[addr] = struct{}{} 1618 as.cache = nil 1619 } 1620 1621 // addTx adds the sender of tx into the set. 1622 func (as *accountSet) addTx(tx *types.Transaction) { 1623 if addr, err := types.Sender(as.signer, tx); err == nil { 1624 as.add(addr) 1625 } 1626 } 1627 1628 // flatten returns the list of addresses within this set, also caching it for later 1629 // reuse. The returned slice should not be changed! 1630 func (as *accountSet) flatten() []common.Address { 1631 if as.cache == nil { 1632 accounts := make([]common.Address, 0, len(as.accounts)) 1633 for account := range as.accounts { 1634 accounts = append(accounts, account) 1635 } 1636 as.cache = &accounts 1637 } 1638 return *as.cache 1639 } 1640 1641 // merge adds all addresses from the 'other' set into 'as'. 1642 func (as *accountSet) merge(other *accountSet) { 1643 for addr := range other.accounts { 1644 as.accounts[addr] = struct{}{} 1645 } 1646 as.cache = nil 1647 } 1648 1649 // lookup is used internally by TxPool to track transactions while allowing 1650 // lookup without mutex contention. 1651 // 1652 // Note, although this type is properly protected against concurrent access, it 1653 // is **not** a type that should ever be mutated or even exposed outside of the 1654 // transaction pool, since its internal state is tightly coupled with the pools 1655 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1656 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1657 // TxPool.mu mutex. 1658 // 1659 // This lookup set combines the notion of "local transactions", which is useful 1660 // to build upper-level structure. 1661 type lookup struct { 1662 slots int 1663 lock sync.RWMutex 1664 locals map[common.Hash]*types.Transaction 1665 remotes map[common.Hash]*types.Transaction 1666 } 1667 1668 // newLookup returns a new lookup structure. 1669 func newLookup() *lookup { 1670 return &lookup{ 1671 locals: make(map[common.Hash]*types.Transaction), 1672 remotes: make(map[common.Hash]*types.Transaction), 1673 } 1674 } 1675 1676 // Range calls f on each key and value present in the map. The callback passed 1677 // should return the indicator whether the iteration needs to be continued. 1678 // Callers need to specify which set (or both) to be iterated. 1679 func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1680 t.lock.RLock() 1681 defer t.lock.RUnlock() 1682 1683 if local { 1684 for key, value := range t.locals { 1685 if !f(key, value, true) { 1686 return 1687 } 1688 } 1689 } 1690 if remote { 1691 for key, value := range t.remotes { 1692 if !f(key, value, false) { 1693 return 1694 } 1695 } 1696 } 1697 } 1698 1699 // Get returns a transaction if it exists in the lookup, or nil if not found. 1700 func (t *lookup) Get(hash common.Hash) *types.Transaction { 1701 t.lock.RLock() 1702 defer t.lock.RUnlock() 1703 1704 if tx := t.locals[hash]; tx != nil { 1705 return tx 1706 } 1707 return t.remotes[hash] 1708 } 1709 1710 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1711 func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { 1712 t.lock.RLock() 1713 defer t.lock.RUnlock() 1714 1715 return t.locals[hash] 1716 } 1717 1718 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1719 func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { 1720 t.lock.RLock() 1721 defer t.lock.RUnlock() 1722 1723 return t.remotes[hash] 1724 } 1725 1726 // Count returns the current number of transactions in the lookup. 1727 func (t *lookup) Count() int { 1728 t.lock.RLock() 1729 defer t.lock.RUnlock() 1730 1731 return len(t.locals) + len(t.remotes) 1732 } 1733 1734 // LocalCount returns the current number of local transactions in the lookup. 1735 func (t *lookup) LocalCount() int { 1736 t.lock.RLock() 1737 defer t.lock.RUnlock() 1738 1739 return len(t.locals) 1740 } 1741 1742 // RemoteCount returns the current number of remote transactions in the lookup. 1743 func (t *lookup) RemoteCount() int { 1744 t.lock.RLock() 1745 defer t.lock.RUnlock() 1746 1747 return len(t.remotes) 1748 } 1749 1750 // Slots returns the current number of slots used in the lookup. 1751 func (t *lookup) Slots() int { 1752 t.lock.RLock() 1753 defer t.lock.RUnlock() 1754 1755 return t.slots 1756 } 1757 1758 // Add adds a transaction to the lookup. 1759 func (t *lookup) Add(tx *types.Transaction, local bool) { 1760 t.lock.Lock() 1761 defer t.lock.Unlock() 1762 1763 t.slots += numSlots(tx) 1764 slotsGauge.Update(int64(t.slots)) 1765 1766 if local { 1767 t.locals[tx.Hash()] = tx 1768 } else { 1769 t.remotes[tx.Hash()] = tx 1770 } 1771 } 1772 1773 // Remove removes a transaction from the lookup. 1774 func (t *lookup) Remove(hash common.Hash) { 1775 t.lock.Lock() 1776 defer t.lock.Unlock() 1777 1778 tx, ok := t.locals[hash] 1779 if !ok { 1780 tx, ok = t.remotes[hash] 1781 } 1782 if !ok { 1783 log.Error("No transaction found to be deleted", "hash", hash) 1784 return 1785 } 1786 t.slots -= numSlots(tx) 1787 slotsGauge.Update(int64(t.slots)) 1788 1789 delete(t.locals, hash) 1790 delete(t.remotes, hash) 1791 } 1792 1793 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1794 // set. The assumption is held the locals set is thread-safe to be used. 1795 func (t *lookup) RemoteToLocals(locals *accountSet) int { 1796 t.lock.Lock() 1797 defer t.lock.Unlock() 1798 1799 var migrated int 1800 for hash, tx := range t.remotes { 1801 if locals.containsTx(tx) { 1802 t.locals[hash] = tx 1803 delete(t.remotes, hash) 1804 migrated += 1 1805 } 1806 } 1807 return migrated 1808 } 1809 1810 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1811 func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1812 found := make(types.Transactions, 0, 128) 1813 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1814 if tx.GasTipCapIntCmp(threshold) < 0 { 1815 found = append(found, tx) 1816 } 1817 return true 1818 }, false, true) // Only iterate remotes 1819 return found 1820 } 1821 1822 // numSlots calculates the number of slots needed for a single transaction. 1823 func numSlots(tx *types.Transaction) int { 1824 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1825 }