github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/evmcore/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package evmcore 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "math/rand" 24 "sort" 25 "sync" 26 "time" 27 28 "github.com/unicornultrafoundation/go-u2u/common" 29 "github.com/unicornultrafoundation/go-u2u/common/prque" 30 "github.com/unicornultrafoundation/go-u2u/core/state" 31 "github.com/unicornultrafoundation/go-u2u/core/types" 32 notify "github.com/unicornultrafoundation/go-u2u/event" 33 "github.com/unicornultrafoundation/go-u2u/log" 34 "github.com/unicornultrafoundation/go-u2u/metrics" 35 "github.com/unicornultrafoundation/go-u2u/params" 36 37 "github.com/unicornultrafoundation/go-u2u/utils/signers/gsignercache" 38 "github.com/unicornultrafoundation/go-u2u/utils/txtime" 39 ) 40 41 const ( 42 // chainHeadChanSize is the size of channel listening to ChainHeadNotify. 43 chainHeadChanSize = 10 44 45 // txSlotSize is used to calculate how many data slots a single transaction 46 // takes up based on its size. The slots are used as DoS protection, ensuring 47 // that validating a new transaction remains a constant operation (in reality 48 // O(maxslots), where max slots are 4 currently). 49 txSlotSize = 32 * 1024 50 51 // txMaxSize is the maximum size a single transaction can have. This field has 52 // non-trivial consequences: larger transactions are significantly harder and 53 // more expensive to propagate; larger transactions also take more resources 54 // to validate whether they fit into the pool or not. 55 txMaxSize = 4 * txSlotSize // 128KB 56 ) 57 58 var ( 59 // ErrAlreadyKnown is returned if the transactions is already contained 60 // within the pool. 61 ErrAlreadyKnown = errors.New("already known") 62 63 // ErrInvalidSender is returned if the transaction contains an invalid signature. 64 ErrInvalidSender = errors.New("invalid sender") 65 66 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 67 // configured for the transaction pool. 68 ErrUnderpriced = errors.New("transaction underpriced") 69 70 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 71 // another remote transaction. 72 ErrTxPoolOverflow = errors.New("txpool is full") 73 74 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 75 // with a different one without the required price bump. 76 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 77 78 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 79 // maximum allowance of the current block. 80 ErrGasLimit = errors.New("exceeds block gas limit") 81 82 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 83 // transaction with a negative value. 84 ErrNegativeValue = errors.New("negative value") 85 86 // ErrOversizedData is returned if the input data of a transaction is greater 87 // than some meaningful limit a user might use. This is not a consensus error 88 // making the transaction invalid, rather a DOS protection. 89 ErrOversizedData = errors.New("oversized data") 90 ) 91 92 var ( 93 evictionInterval = time.Minute // Time interval to check for evictable transactions 94 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 95 ) 96 97 var ( 98 // Metrics for the pending pool 99 pendingDiscardMeter = metrics.GetOrRegisterMeter("txpool/pending/discard", nil) 100 pendingReplaceMeter = metrics.GetOrRegisterMeter("txpool/pending/replace", nil) 101 pendingRateLimitMeter = metrics.GetOrRegisterMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 102 pendingNofundsMeter = metrics.GetOrRegisterMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 103 104 // Metrics for the queued pool 105 queuedDiscardMeter = metrics.GetOrRegisterMeter("txpool/queued/discard", nil) 106 queuedReplaceMeter = metrics.GetOrRegisterMeter("txpool/queued/replace", nil) 107 queuedRateLimitMeter = metrics.GetOrRegisterMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 108 queuedNofundsMeter = metrics.GetOrRegisterMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 109 queuedEvictionMeter = metrics.GetOrRegisterMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 110 111 // General tx metrics 112 validTxMeter = metrics.GetOrRegisterMeter("txpool/valid", nil) 113 invalidTxMeter = metrics.GetOrRegisterMeter("txpool/invalid", nil) 114 underpricedTxMeter = metrics.GetOrRegisterMeter("txpool/underpriced", nil) 115 overflowedTxMeter = metrics.GetOrRegisterMeter("txpool/overflowed", nil) 116 117 pendingGauge = metrics.GetOrRegisterGauge("txpool/pending", nil) 118 queuedGauge = metrics.GetOrRegisterGauge("txpool/queued", nil) 119 localGauge = metrics.GetOrRegisterGauge("txpool/local", nil) 120 slotsGauge = metrics.GetOrRegisterGauge("txpool/slots", nil) 121 122 reheapTimer = metrics.GetOrRegisterTimer("txpool/reheap", nil) 123 ) 124 125 // TxStatus is the current status of a transaction as seen by the pool. 126 type TxStatus uint 127 128 const ( 129 TxStatusUnknown TxStatus = iota 130 TxStatusQueued 131 TxStatusPending 132 TxStatusIncluded 133 ) 134 135 // StateReader provides the state of blockchain and current gas limit to do 136 // some pre checks in tx pool and event subscribers. 137 type StateReader interface { 138 Config() *params.ChainConfig 139 CurrentBlock() *EvmBlock 140 GetBlock(hash common.Hash, number uint64) *EvmBlock 141 StateAt(root common.Hash) (*state.StateDB, error) 142 MinGasPrice() *big.Int 143 EffectiveMinTip() *big.Int 144 MaxGasLimit() uint64 145 SubscribeNewBlock(ch chan<- ChainHeadNotify) notify.Subscription 146 } 147 148 // TxPoolConfig are the configuration parameters of the transaction pool. 149 type TxPoolConfig struct { 150 Locals []common.Address // Addresses that should be treated by default as local 151 NoLocals bool // Whether local transaction handling should be disabled 152 Journal string // Journal of local transactions to survive node restarts 153 Rejournal time.Duration // Time interval to regenerate the local transaction journal 154 155 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 156 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 157 158 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 159 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 160 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 161 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 162 163 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 164 } 165 166 // DefaultTxPoolConfig contains the default configurations for the transaction 167 // pool. 168 var DefaultTxPoolConfig = TxPoolConfig{ 169 Journal: "transactions.rlp", 170 Rejournal: time.Hour, 171 172 PriceLimit: 0, 173 PriceBump: 10, 174 175 AccountSlots: 32, 176 GlobalSlots: 2048 + 512, // urgent + floating queue capacity with 4:1 ratio 177 AccountQueue: 128, 178 GlobalQueue: 512, 179 180 Lifetime: 1 * time.Hour, 181 } 182 183 // sanitize checks the provided user configurations and changes anything that's 184 // unreasonable or unworkable. 185 func (config *TxPoolConfig) sanitize() TxPoolConfig { 186 conf := *config 187 if conf.Rejournal < time.Second { 188 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 189 conf.Rejournal = time.Second 190 } 191 if conf.PriceBump < 1 { 192 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 193 conf.PriceBump = DefaultTxPoolConfig.PriceBump 194 } 195 if conf.AccountSlots < 1 { 196 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 197 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 198 } 199 if conf.GlobalSlots < 1 { 200 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 201 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 202 } 203 if conf.AccountQueue < 1 { 204 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 205 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 206 } 207 if conf.GlobalQueue < 1 { 208 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 209 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 210 } 211 if conf.Lifetime < 1 { 212 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 213 conf.Lifetime = DefaultTxPoolConfig.Lifetime 214 } 215 return conf 216 } 217 218 // TxPool contains all currently known transactions. Transactions 219 // enter the pool when they are received from the network or submitted 220 // locally. They exit the pool when they are included in the blockchain. 221 // 222 // The pool separates processable transactions (which can be applied to the 223 // current state) and future transactions. Transactions move between those 224 // two states over time as they are received and processed. 225 type TxPool struct { 226 config TxPoolConfig 227 chainconfig *params.ChainConfig 228 chain StateReader 229 gasPrice *big.Int 230 txFeed notify.Feed 231 scope notify.SubscriptionScope 232 signer types.Signer 233 mu sync.RWMutex 234 235 istanbul bool // Fork indicator whether we are in the istanbul stage. 236 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 237 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 238 239 currentState *state.StateDB // Current state in the blockchain head 240 pendingNonces *txNoncer // Pending state tracking virtual nonces 241 currentMaxGas uint64 // Current gas limit for transaction caps 242 243 locals *accountSet // Set of local transaction to exempt from eviction rules 244 journal *txJournal // Journal of local transaction to back up to disk 245 246 pending map[common.Address]*txList // All currently processable transactions 247 queue map[common.Address]*txList // Queued but non-processable transactions 248 beats map[common.Address]time.Time // Last heartbeat from each known account 249 all *txLookup // All transactions to allow lookups 250 priced *txPricedList // All transactions sorted by price 251 252 chainHeadCh chan ChainHeadNotify 253 chainHeadSub notify.Subscription 254 reqResetCh chan *txpoolResetRequest 255 reqPromoteCh chan *accountSet 256 queueTxEventCh chan *types.Transaction 257 reorgDoneCh chan chan struct{} 258 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 259 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 260 } 261 262 type txpoolResetRequest struct { 263 oldHead, newHead *EvmHeader 264 } 265 266 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 267 // transactions from the network. 268 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain StateReader) *TxPool { 269 // Sanitize the input to ensure no vulnerable gas prices are set 270 config = (&config).sanitize() 271 272 // Create the transaction pool with its initial settings 273 pool := &TxPool{ 274 config: config, 275 chainconfig: chainconfig, 276 chain: chain, 277 signer: gsignercache.Wrap(types.LatestSignerForChainID(chainconfig.ChainID)), 278 pending: make(map[common.Address]*txList), 279 queue: make(map[common.Address]*txList), 280 beats: make(map[common.Address]time.Time), 281 all: newTxLookup(), 282 chainHeadCh: make(chan ChainHeadNotify, chainHeadChanSize), 283 reqResetCh: make(chan *txpoolResetRequest), 284 reqPromoteCh: make(chan *accountSet), 285 queueTxEventCh: make(chan *types.Transaction), 286 reorgDoneCh: make(chan chan struct{}), 287 reorgShutdownCh: make(chan struct{}), 288 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 289 } 290 pool.locals = newAccountSet(pool.signer) 291 for _, addr := range config.Locals { 292 log.Info("Setting new local account", "address", addr) 293 pool.locals.add(addr) 294 } 295 pool.priced = newTxPricedList(pool.all) 296 pool.reset(nil, chain.CurrentBlock().Header()) 297 298 // Start the reorg loop early so it can handle requests generated during journal loading. 299 pool.wg.Add(1) 300 go pool.scheduleReorgLoop() 301 302 // If local transactions and journaling is enabled, load from disk 303 if !config.NoLocals && config.Journal != "" { 304 pool.journal = newTxJournal(config.Journal) 305 306 if err := pool.journal.load(pool.AddLocals); err != nil { 307 log.Warn("Failed to load transaction journal", "err", err) 308 } 309 if err := pool.journal.rotate(pool.local()); err != nil { 310 log.Warn("Failed to rotate transaction journal", "err", err) 311 } 312 } 313 314 // Subscribe events from blockchain and start the main event loop. 315 pool.chainHeadSub = pool.chain.SubscribeNewBlock(pool.chainHeadCh) 316 pool.wg.Add(1) 317 go pool.loop() 318 319 return pool 320 } 321 322 // loop is the transaction pool's main event loop, waiting for and reacting to 323 // outside blockchain events as well as for various reporting and transaction 324 // eviction events. 325 func (pool *TxPool) loop() { 326 defer pool.wg.Done() 327 328 var ( 329 prevPending, prevQueued, prevStales int 330 // Start the stats reporting and transaction eviction tickers 331 report = time.NewTicker(statsReportInterval) 332 evict = time.NewTicker(evictionInterval) 333 journal = time.NewTicker(pool.config.Rejournal) 334 // Track the previous head headers for transaction reorgs 335 head = pool.chain.CurrentBlock() 336 ) 337 defer report.Stop() 338 defer evict.Stop() 339 defer journal.Stop() 340 341 for { 342 select { 343 // Handle ChainHeadNotify 344 case ev := <-pool.chainHeadCh: 345 if ev.Block != nil { 346 pool.requestReset(head.Header(), ev.Block.Header()) 347 head = ev.Block 348 } 349 350 // System shutdown. 351 case <-pool.chainHeadSub.Err(): 352 close(pool.reorgShutdownCh) 353 return 354 355 // Handle stats reporting ticks 356 case <-report.C: 357 pool.mu.RLock() 358 pending, queued := pool.stats() 359 stales := pool.priced.stales 360 pool.mu.RUnlock() 361 362 if pending != prevPending || queued != prevQueued || stales != prevStales { 363 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 364 prevPending, prevQueued, prevStales = pending, queued, stales 365 } 366 367 // Handle inactive account transaction eviction 368 case <-evict.C: 369 pool.mu.Lock() 370 for addr := range pool.queue { 371 // Skip local transactions from the eviction mechanism 372 if pool.locals.contains(addr) { 373 continue 374 } 375 // Any non-locals old enough should be removed 376 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 377 list := pool.queue[addr].Flatten() 378 for _, tx := range list { 379 pool.removeTx(tx.Hash(), true) 380 } 381 queuedEvictionMeter.Mark(int64(len(list))) 382 } 383 } 384 pool.mu.Unlock() 385 386 // Handle local transaction journal rotation 387 case <-journal.C: 388 if pool.journal != nil { 389 pool.mu.Lock() 390 if err := pool.journal.rotate(pool.local()); err != nil { 391 log.Warn("Failed to rotate local tx journal", "err", err) 392 } 393 pool.mu.Unlock() 394 } 395 } 396 } 397 } 398 399 // Stop terminates the transaction pool. 400 func (pool *TxPool) Stop() { 401 // Unsubscribe all subscriptions registered from txpool 402 pool.scope.Close() 403 404 // Unsubscribe subscriptions registered from blockchain 405 pool.chainHeadSub.Unsubscribe() 406 pool.wg.Wait() 407 408 if pool.journal != nil { 409 pool.journal.close() 410 } 411 log.Info("Transaction pool stopped") 412 } 413 414 // SubscribeNewTxsNotify registers a subscription of NewTxsNotify and 415 // starts sending event to the given channel. 416 func (pool *TxPool) SubscribeNewTxsNotify(ch chan<- NewTxsNotify) notify.Subscription { 417 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 418 } 419 420 // GasPrice returns the current gas price enforced by the transaction pool. 421 func (pool *TxPool) GasPrice() *big.Int { 422 pool.mu.RLock() 423 defer pool.mu.RUnlock() 424 425 return new(big.Int).Set(pool.gasPrice) 426 } 427 428 // SetGasPrice updates the minimum price required by the transaction pool for a 429 // new transaction, and drops all transactions below this threshold. 430 func (pool *TxPool) SetGasPrice(price *big.Int) { 431 pool.mu.Lock() 432 defer pool.mu.Unlock() 433 434 old := pool.gasPrice 435 pool.gasPrice = price 436 // if the min miner fee increased, remove transactions below the new threshold 437 if price.Cmp(old) > 0 { 438 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 439 drop := pool.all.RemotesBelowTip(price) 440 for _, tx := range drop { 441 pool.removeTx(tx.Hash(), false) 442 } 443 pool.priced.Removed(len(drop)) 444 } 445 446 log.Info("Transaction pool price threshold updated", "price", price) 447 } 448 449 // Nonce returns the next nonce of an account, with all transactions executable 450 // by the pool already applied on top. 451 func (pool *TxPool) Nonce(addr common.Address) uint64 { 452 pool.mu.RLock() 453 defer pool.mu.RUnlock() 454 455 return pool.pendingNonces.get(addr) 456 } 457 458 // Stats retrieves the current pool stats, namely the number of pending and the 459 // number of queued (non-executable) transactions. 460 func (pool *TxPool) Stats() (int, int) { 461 pool.mu.RLock() 462 defer pool.mu.RUnlock() 463 464 return pool.stats() 465 } 466 467 // Count returns the total number of transactions 468 func (pool *TxPool) Count() int { 469 pool.mu.RLock() 470 defer pool.mu.RUnlock() 471 472 return pool.all.Count() 473 } 474 475 // stats retrieves the current pool stats, namely the number of pending and the 476 // number of queued (non-executable) transactions. 477 func (pool *TxPool) stats() (int, int) { 478 pending := 0 479 for _, list := range pool.pending { 480 pending += list.Len() 481 } 482 queued := 0 483 for _, list := range pool.queue { 484 queued += list.Len() 485 } 486 return pending, queued 487 } 488 489 // Content retrieves the data content of the transaction pool, returning all the 490 // pending as well as queued transactions, grouped by account and sorted by nonce. 491 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 492 pool.mu.Lock() 493 defer pool.mu.Unlock() 494 495 pending := make(map[common.Address]types.Transactions) 496 for addr, list := range pool.pending { 497 pending[addr] = list.Flatten() 498 } 499 queued := make(map[common.Address]types.Transactions) 500 for addr, list := range pool.queue { 501 queued[addr] = list.Flatten() 502 } 503 return pending, queued 504 } 505 506 // ContentFrom retrieves the data content of the transaction pool, returning the 507 // pending as well as queued transactions of this address, grouped by nonce. 508 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 509 pool.mu.RLock() 510 defer pool.mu.RUnlock() 511 512 var pending types.Transactions 513 if list, ok := pool.pending[addr]; ok { 514 pending = list.Flatten() 515 } 516 var queued types.Transactions 517 if list, ok := pool.queue[addr]; ok { 518 queued = list.Flatten() 519 } 520 return pending, queued 521 } 522 523 // Pending retrieves all currently processable transactions, grouped by origin 524 // account and sorted by nonce. The returned transaction set is a copy and can be 525 // freely modified by calling code. 526 // 527 // The enforceTips parameter can be used to do an extra filtering on the pending 528 // transactions and only return those whose **effective** tip is large enough in 529 // the next pending execution environment. 530 func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transactions, error) { 531 pool.mu.Lock() 532 defer pool.mu.Unlock() 533 534 pending := make(map[common.Address]types.Transactions) 535 for addr, list := range pool.pending { 536 txs := list.Flatten() 537 538 // If the miner requests tip enforcement, cap the lists now 539 if enforceTips && !pool.locals.contains(addr) { 540 for i, tx := range txs { 541 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 542 txs = txs[:i] 543 break 544 } 545 } 546 } 547 if len(txs) > 0 { 548 pending[addr] = txs 549 } 550 } 551 return pending, nil 552 } 553 554 func (pool *TxPool) SampleHashes(max int) []common.Hash { 555 return pool.all.SampleHashes(max) 556 } 557 558 // Locals retrieves the accounts currently considered local by the pool. 559 func (pool *TxPool) Locals() []common.Address { 560 pool.mu.Lock() 561 defer pool.mu.Unlock() 562 563 return pool.locals.flatten() 564 } 565 566 // local retrieves all currently known local transactions, grouped by origin 567 // account and sorted by nonce. The returned transaction set is a copy and can be 568 // freely modified by calling code. 569 func (pool *TxPool) local() map[common.Address]types.Transactions { 570 txs := make(map[common.Address]types.Transactions) 571 for addr := range pool.locals.accounts { 572 if pending := pool.pending[addr]; pending != nil { 573 txs[addr] = append(txs[addr], pending.Flatten()...) 574 } 575 if queued := pool.queue[addr]; queued != nil { 576 txs[addr] = append(txs[addr], queued.Flatten()...) 577 } 578 } 579 return txs 580 } 581 582 // validateTx checks whether a transaction is valid according to the consensus 583 // rules and adheres to some heuristic limits of the local node (price and size). 584 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 585 // Accept only legacy transactions until EIP-2718/2930 activates. 586 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 587 return ErrTxTypeNotSupported 588 } 589 // Reject dynamic fee transactions until EIP-1559 activates. 590 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 591 return ErrTxTypeNotSupported 592 } 593 // Reject transactions over defined size to prevent DOS attacks 594 if uint64(tx.Size()) > txMaxSize { 595 return ErrOversizedData 596 } 597 // Transactions can't be negative. This may never happen using RLP decoded 598 // transactions but may occur if you create a transaction using the RPC. 599 if tx.Value().Sign() < 0 { 600 return ErrNegativeValue 601 } 602 // Ensure the transaction doesn't exceed the current block limit gas. 603 if pool.currentMaxGas < tx.Gas() { 604 return ErrGasLimit 605 } 606 // Sanity check for extremely large numbers 607 if tx.GasFeeCap().BitLen() > 256 { 608 return ErrFeeCapVeryHigh 609 } 610 if tx.GasTipCap().BitLen() > 256 { 611 return ErrTipVeryHigh 612 } 613 // Ensure gasFeeCap is greater than or equal to gasTipCap. 614 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 615 return ErrTipAboveFeeCap 616 } 617 // Make sure the transaction is signed properly. 618 from, err := types.Sender(pool.signer, tx) 619 if err != nil { 620 return ErrInvalidSender 621 } 622 // Drop non-local transactions under our own minimal accepted gas price or tip 623 local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network 624 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 625 return ErrUnderpriced 626 } 627 // Ensure U2U-specific hard bounds 628 if recommendedGasTip, minPrice := pool.chain.EffectiveMinTip(), pool.chain.MinGasPrice(); recommendedGasTip != nil && minPrice != nil { 629 if tx.GasTipCapIntCmp(recommendedGasTip) < 0 { 630 return ErrUnderpriced 631 } 632 if tx.GasFeeCapIntCmp(new(big.Int).Add(recommendedGasTip, minPrice)) < 0 { 633 return ErrUnderpriced 634 } 635 } 636 // Ensure the transaction adheres to nonce ordering 637 if pool.currentState.GetNonce(from) > tx.Nonce() { 638 return ErrNonceTooLow 639 } 640 // Transactor should have enough funds to cover the costs 641 // cost == V + GP * GL 642 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 643 return ErrInsufficientFunds 644 } 645 // Ensure the transaction has more gas than the basic tx fee. 646 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil) 647 if err != nil { 648 return err 649 } 650 if tx.Gas() < intrGas { 651 return ErrIntrinsicGas 652 } 653 return nil 654 } 655 656 // add validates a transaction and inserts it into the non-executable queue for later 657 // pending promotion and execution. If the transaction is a replacement for an already 658 // pending or queued one, it overwrites the previous transaction if its price is higher. 659 // 660 // If a newly added transaction is marked as local, its sending account will be 661 // be added to the allowlist, preventing any associated transaction from being dropped 662 // out of the pool due to pricing constraints. 663 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 664 // If the transaction is already known, discard it 665 hash := tx.Hash() 666 if pool.all.Get(hash) != nil { 667 log.Trace("Discarding already known transaction", "hash", hash) 668 return false, ErrAlreadyKnown 669 } 670 // Make the local flag. If it's from local source or it's from the network but 671 // the sender is marked as local previously, treat it as the local transaction. 672 isLocal := local || pool.locals.containsTx(tx) 673 674 // If the transaction fails basic validation, discard it 675 if err := pool.validateTx(tx, isLocal); err != nil { 676 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 677 invalidTxMeter.Mark(1) 678 return false, err 679 } 680 // If the transaction pool is full, discard underpriced transactions 681 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 682 // If the new transaction is underpriced, don't accept it 683 if !isLocal && pool.priced.Underpriced(tx) { 684 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 685 underpricedTxMeter.Mark(1) 686 return false, ErrUnderpriced 687 } 688 // New transaction is better than our worse ones, make room for it. 689 // If it's a local transaction, forcibly discard all available transactions. 690 // Otherwise if we can't make enough room for new one, abort the operation. 691 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 692 693 // Special case, we still can't make the room for the new remote one. 694 if !isLocal && !success { 695 log.Trace("Discarding overflown transaction", "hash", hash) 696 overflowedTxMeter.Mark(1) 697 return false, ErrTxPoolOverflow 698 } 699 // Kick out the underpriced remote transactions. 700 for _, tx := range drop { 701 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 702 underpricedTxMeter.Mark(1) 703 pool.removeTx(tx.Hash(), false) 704 } 705 } 706 // Try to replace an existing transaction in the pending pool 707 from, _ := types.Sender(pool.signer, tx) // already validated 708 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 709 // Nonce already pending, check if required price bump is met 710 inserted, old := list.Add(tx, pool.config.PriceBump) 711 if !inserted { 712 pendingDiscardMeter.Mark(1) 713 return false, ErrReplaceUnderpriced 714 } 715 // New transaction is better, replace old one 716 if old != nil { 717 pool.all.Remove(old.Hash()) 718 pool.priced.Removed(1) 719 pendingReplaceMeter.Mark(1) 720 } 721 pool.all.Add(tx, isLocal) 722 pool.priced.Put(tx, isLocal) 723 pool.journalTx(from, tx) 724 pool.queueTxEvent(tx) 725 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 726 727 // Successful promotion, bump the heartbeat 728 pool.beats[from] = time.Now() 729 return old != nil, nil 730 } 731 // New transaction isn't replacing a pending one, push into queue 732 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 733 if err != nil { 734 return false, err 735 } 736 // Mark local addresses and journal local transactions 737 if local && !pool.locals.contains(from) { 738 log.Info("Setting new local account", "address", from) 739 pool.locals.add(from) 740 migratedAS := pool.all.RemoteToLocals(pool.locals) // Migrate the remotes if it's marked as local first time. 741 pool.priced.Removed(migratedAS) 742 localGauge.Inc(int64(migratedAS)) 743 } 744 if isLocal { 745 localGauge.Inc(1) 746 } 747 pool.journalTx(from, tx) 748 749 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 750 return replaced, nil 751 } 752 753 // enqueueTx inserts a new transaction into the non-executable transaction queue. 754 // 755 // Note, this method assumes the pool lock is held! 756 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 757 // Try to insert the transaction into the future queue 758 from, _ := types.Sender(pool.signer, tx) // already validated 759 if pool.queue[from] == nil { 760 pool.queue[from] = newTxList(false) 761 } 762 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 763 if !inserted { 764 // An older transaction was better, discard this 765 queuedDiscardMeter.Mark(1) 766 return false, ErrReplaceUnderpriced 767 } 768 // Discard any previous transaction and mark this 769 if old != nil { 770 pool.all.Remove(old.Hash()) 771 pool.priced.Removed(1) 772 queuedReplaceMeter.Mark(1) 773 } else { 774 // Nothing was replaced, bump the queued counter 775 queuedGauge.Inc(1) 776 } 777 // If the transaction isn't in lookup set but it's expected to be there, 778 // show the error log. 779 if pool.all.Get(hash) == nil && !addAll { 780 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 781 } 782 if addAll { 783 pool.all.Add(tx, local) 784 pool.priced.Put(tx, local) 785 } 786 // If we never record the heartbeat, do it right now. 787 if _, exist := pool.beats[from]; !exist { 788 pool.beats[from] = time.Now() 789 } 790 return old != nil, nil 791 } 792 793 // journalTx adds the specified transaction to the local disk journal if it is 794 // deemed to have been sent from a local account. 795 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 796 // Only journal if it's enabled and the transaction is local 797 if pool.journal == nil || !pool.locals.contains(from) { 798 return 799 } 800 if err := pool.journal.insert(tx); err != nil { 801 log.Warn("Failed to journal local transaction", "err", err) 802 } 803 } 804 805 // promoteTx adds a transaction to the pending (processable) list of transactions 806 // and returns whether it was inserted or an older was better. 807 // 808 // Note, this method assumes the pool lock is held! 809 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 810 // Try to insert the transaction into the pending queue 811 if pool.pending[addr] == nil { 812 pool.pending[addr] = newTxList(true) 813 } 814 list := pool.pending[addr] 815 816 inserted, old := list.Add(tx, pool.config.PriceBump) 817 if !inserted { 818 // An older transaction was better, discard this 819 pool.all.Remove(hash) 820 pool.priced.Removed(1) 821 pendingDiscardMeter.Mark(1) 822 return false 823 } 824 // Otherwise discard any previous transaction and mark this 825 if old != nil { 826 pool.all.Remove(old.Hash()) 827 pool.priced.Removed(1) 828 pendingReplaceMeter.Mark(1) 829 } else { 830 // Nothing was replaced, bump the pending counter 831 pendingGauge.Inc(1) 832 } 833 // Set the potentially new pending nonce and notify any subsystems of the new tx 834 pool.pendingNonces.set(addr, tx.Nonce()+1) 835 836 // Successful promotion, bump the heartbeat 837 pool.beats[addr] = time.Now() 838 return true 839 } 840 841 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 842 // senders as a local ones, ensuring they go around the local pricing constraints. 843 // 844 // This method is used to add transactions from the RPC API and performs synchronous pool 845 // reorganization and event propagation. 846 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 847 return pool.addTxs(txs, !pool.config.NoLocals, true) 848 } 849 850 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 851 // a convenience wrapper aroundd AddLocals. 852 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 853 errs := pool.AddLocals([]*types.Transaction{tx}) 854 return errs[0] 855 } 856 857 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 858 // senders are not among the locally tracked ones, full pricing constraints will apply. 859 // 860 // This method is used to add transactions from the p2p network and does not wait for pool 861 // reorganization and internal event propagation. 862 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 863 return pool.addTxs(txs, false, false) 864 } 865 866 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 867 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 868 return pool.addTxs(txs, false, true) 869 } 870 871 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 872 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 873 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 874 return errs[0] 875 } 876 877 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 878 // wrapper around AddRemotes. 879 // 880 // Deprecated: use AddRemotes 881 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 882 errs := pool.AddRemotes([]*types.Transaction{tx}) 883 return errs[0] 884 } 885 886 // addTxs attempts to queue a batch of transactions if they are valid. 887 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 888 arrivedAt := time.Now() 889 // Filter out known ones without obtaining the pool lock or recovering signatures 890 var ( 891 errs = make([]error, len(txs)) 892 news = make([]*types.Transaction, 0, len(txs)) 893 ) 894 for i, tx := range txs { 895 // If the transaction is known, pre-set the error slot 896 if pool.all.Get(tx.Hash()) != nil { 897 errs[i] = ErrAlreadyKnown 898 continue 899 } 900 // Exclude transactions with invalid signatures as soon as 901 // possible and cache senders in transactions before 902 // obtaining lock 903 _, err := types.Sender(pool.signer, tx) 904 if err != nil { 905 errs[i] = ErrInvalidSender 906 invalidTxMeter.Mark(1) 907 continue 908 } 909 // Accumulate all unknown transactions for deeper processing 910 news = append(news, tx) 911 } 912 if len(news) == 0 { 913 return errs 914 } 915 916 // Process all the new transaction and merge any errors into the original slice 917 pool.mu.Lock() 918 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 919 pool.mu.Unlock() 920 921 // memorize tx time of validated transactions 922 for i, tx := range news { 923 if newErrs[i] == nil { 924 txtime.Validated(tx.Hash(), arrivedAt) 925 } 926 } 927 928 var nilSlot = 0 929 for _, err := range newErrs { 930 for errs[nilSlot] != nil { 931 nilSlot++ 932 } 933 errs[nilSlot] = err 934 nilSlot++ 935 } 936 // Reorg the pool internals if needed and return 937 done := pool.requestPromoteExecutables(dirtyAddrs) 938 if sync { 939 <-done 940 } 941 return errs 942 } 943 944 // addTxsLocked attempts to queue a batch of transactions if they are valid. 945 // The transaction pool lock must be held. 946 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 947 dirty := newAccountSet(pool.signer) 948 errs := make([]error, len(txs)) 949 for i, tx := range txs { 950 replaced, err := pool.add(tx, local) 951 errs[i] = err 952 if err == nil && !replaced { 953 dirty.addTx(tx) 954 } 955 } 956 validTxMeter.Mark(int64(len(dirty.accounts))) 957 return errs, dirty 958 } 959 960 // Status returns the status (unknown/pending/queued) of a batch of transactions 961 // identified by their hashes. 962 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 963 status := make([]TxStatus, len(hashes)) 964 for i, hash := range hashes { 965 tx := pool.Get(hash) 966 if tx == nil { 967 continue 968 } 969 from, _ := types.Sender(pool.signer, tx) // already validated 970 pool.mu.RLock() 971 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 972 status[i] = TxStatusPending 973 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 974 status[i] = TxStatusQueued 975 } 976 // implicit else: the tx may have been included into a block between 977 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 978 pool.mu.RUnlock() 979 } 980 return status 981 } 982 983 // Get returns a transaction if it is contained in the pool and nil otherwise. 984 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 985 return pool.all.Get(hash) 986 } 987 988 // Has returns an indicator whether txpool has a transaction cached with the 989 // given hash. 990 func (pool *TxPool) Has(hash common.Hash) bool { 991 return pool.all.Get(hash) != nil 992 } 993 994 func (pool *TxPool) OnlyNotExisting(hashes []common.Hash) []common.Hash { 995 return pool.all.OnlyNotExisting(hashes) 996 } 997 998 // removeTx removes a single transaction from the queue, moving all subsequent 999 // transactions back to the future queue. 1000 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1001 // Fetch the transaction we wish to delete 1002 tx := pool.all.Get(hash) 1003 if tx == nil { 1004 return 1005 } 1006 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1007 1008 // Remove it from the list of known transactions 1009 pool.all.Remove(hash) 1010 if outofbound { 1011 pool.priced.Removed(1) 1012 } 1013 if pool.locals.contains(addr) { 1014 localGauge.Dec(1) 1015 } 1016 // Remove the transaction from the pending lists and reset the account nonce 1017 if pending := pool.pending[addr]; pending != nil { 1018 if removed, invalids := pending.Remove(tx); removed { 1019 // If no more pending transactions are left, remove the list 1020 if pending.Empty() { 1021 delete(pool.pending, addr) 1022 } 1023 // Postpone any invalidated transactions 1024 for _, tx := range invalids { 1025 // Internal shuffle shouldn't touch the lookup set. 1026 pool.enqueueTx(tx.Hash(), tx, false, false) 1027 } 1028 // Update the account nonce if needed 1029 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1030 // Reduce the pending counter 1031 pendingGauge.Dec(int64(1 + len(invalids))) 1032 return 1033 } 1034 } 1035 // Transaction is in the future queue 1036 if future := pool.queue[addr]; future != nil { 1037 if removed, _ := future.Remove(tx); removed { 1038 // Reduce the queued counter 1039 queuedGauge.Dec(1) 1040 } 1041 if future.Empty() { 1042 delete(pool.queue, addr) 1043 delete(pool.beats, addr) 1044 } 1045 } 1046 } 1047 1048 // requestReset requests a pool reset to the new head block. 1049 // The returned channel is closed when the reset has occurred. 1050 func (pool *TxPool) requestReset(oldHead *EvmHeader, newHead *EvmHeader) chan struct{} { 1051 select { 1052 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1053 return <-pool.reorgDoneCh 1054 case <-pool.reorgShutdownCh: 1055 return pool.reorgShutdownCh 1056 } 1057 } 1058 1059 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1060 // The returned channel is closed when the promotion checks have occurred. 1061 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1062 select { 1063 case pool.reqPromoteCh <- set: 1064 return <-pool.reorgDoneCh 1065 case <-pool.reorgShutdownCh: 1066 return pool.reorgShutdownCh 1067 } 1068 } 1069 1070 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1071 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1072 select { 1073 case pool.queueTxEventCh <- tx: 1074 case <-pool.reorgShutdownCh: 1075 } 1076 } 1077 1078 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1079 // call those methods directly, but request them being run using requestReset and 1080 // requestPromoteExecutables instead. 1081 func (pool *TxPool) scheduleReorgLoop() { 1082 defer pool.wg.Done() 1083 1084 var ( 1085 curDone chan struct{} // non-nil while runReorg is active 1086 nextDone = make(chan struct{}) 1087 launchNextRun bool 1088 reset *txpoolResetRequest 1089 dirtyAccounts *accountSet 1090 queuedEvents = make(map[common.Address]*txSortedMap) 1091 ) 1092 for { 1093 // Launch next background reorg if needed 1094 if curDone == nil && launchNextRun { 1095 // Run the background reorg and announcements 1096 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1097 1098 // Prepare everything for the next round of reorg 1099 curDone, nextDone = nextDone, make(chan struct{}) 1100 launchNextRun = false 1101 1102 reset, dirtyAccounts = nil, nil 1103 queuedEvents = make(map[common.Address]*txSortedMap) 1104 } 1105 1106 select { 1107 case req := <-pool.reqResetCh: 1108 // Reset request: update head if request is already pending. 1109 if reset == nil { 1110 reset = req 1111 } else { 1112 reset.newHead = req.newHead 1113 } 1114 launchNextRun = true 1115 pool.reorgDoneCh <- nextDone 1116 1117 case req := <-pool.reqPromoteCh: 1118 // Promote request: update address set if request is already pending. 1119 if dirtyAccounts == nil { 1120 dirtyAccounts = req 1121 } else { 1122 dirtyAccounts.merge(req) 1123 } 1124 launchNextRun = true 1125 pool.reorgDoneCh <- nextDone 1126 1127 case tx := <-pool.queueTxEventCh: 1128 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1129 // request one later if they want the events sent. 1130 addr, _ := types.Sender(pool.signer, tx) 1131 if _, ok := queuedEvents[addr]; !ok { 1132 queuedEvents[addr] = newTxSortedMap() 1133 } 1134 queuedEvents[addr].Put(tx) 1135 1136 case <-curDone: 1137 curDone = nil 1138 1139 case <-pool.reorgShutdownCh: 1140 // Wait for current run to finish. 1141 if curDone != nil { 1142 <-curDone 1143 } 1144 close(nextDone) 1145 return 1146 } 1147 } 1148 } 1149 1150 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1151 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1152 defer close(done) 1153 1154 var promoteAddrs []common.Address 1155 if dirtyAccounts != nil && reset == nil { 1156 // Only dirty accounts need to be promoted, unless we're resetting. 1157 // For resets, all addresses in the tx queue will be promoted and 1158 // the flatten operation can be avoided. 1159 promoteAddrs = dirtyAccounts.flatten() 1160 } 1161 pool.mu.Lock() 1162 if reset != nil { 1163 // Reset from the old head to the new, rescheduling any reorged transactions 1164 pool.reset(reset.oldHead, reset.newHead) 1165 1166 // Nonces were reset, discard any events that became stale 1167 for addr := range events { 1168 events[addr].Forward(pool.pendingNonces.get(addr)) 1169 if events[addr].Len() == 0 { 1170 delete(events, addr) 1171 } 1172 } 1173 // Reset needs promote for all addresses 1174 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1175 for addr := range pool.queue { 1176 promoteAddrs = append(promoteAddrs, addr) 1177 } 1178 } 1179 // Check for pending transactions for every account that sent new ones 1180 promoted := pool.promoteExecutables(promoteAddrs) 1181 1182 // If a new block appeared, validate the pool of pending transactions. This will 1183 // remove any transaction that has been included in the block or was invalidated 1184 // because of another transaction (e.g. higher gas price). 1185 if reset != nil { 1186 pool.demoteUnexecutables() 1187 if pool.chain.MinGasPrice() != nil { 1188 // U2U-specific base fee 1189 pool.priced.SetBaseFee(pool.chain.MinGasPrice()) 1190 } else { 1191 // for tests only 1192 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1193 pendingBaseFee := calcBaseFee(pool.chainconfig, reset.newHead.EthHeader()) 1194 pool.priced.SetBaseFee(pendingBaseFee) 1195 } 1196 } 1197 } 1198 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1199 pool.truncatePending() 1200 pool.truncateQueue() 1201 1202 // Update all accounts to the latest known pending nonce 1203 for addr, list := range pool.pending { 1204 highestPending := list.LastElement() 1205 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1206 } 1207 pool.mu.Unlock() 1208 1209 // Notify subsystems for newly added transactions 1210 for _, tx := range promoted { 1211 addr, _ := types.Sender(pool.signer, tx) 1212 if _, ok := events[addr]; !ok { 1213 events[addr] = newTxSortedMap() 1214 } 1215 events[addr].Put(tx) 1216 } 1217 if len(events) > 0 { 1218 var txs []*types.Transaction 1219 for _, set := range events { 1220 txs = append(txs, set.Flatten()...) 1221 } 1222 pool.txFeed.Send(NewTxsNotify{txs}) 1223 } 1224 } 1225 1226 // reset retrieves the current state of the blockchain and ensures the content 1227 // of the transaction pool is valid with regard to the chain state. 1228 func (pool *TxPool) reset(oldHead, newHead *EvmHeader) { 1229 // update chain config (U2U-specific) 1230 if newConfig := pool.chain.Config(); newConfig != nil { 1231 pool.chainconfig = newConfig 1232 } 1233 1234 // If we're reorging an old state, reinject all dropped transactions 1235 var reinject types.Transactions 1236 1237 if oldHead != nil && oldHead.Hash != newHead.ParentHash { 1238 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1239 oldNum := oldHead.Number.Uint64() 1240 newNum := newHead.Number.Uint64() 1241 1242 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1243 log.Debug("Skipping deep transaction reorg", "depth", depth) 1244 } else { 1245 // Reorg seems shallow enough to pull in all transactions into memory 1246 var discarded, included types.Transactions 1247 var ( 1248 rem = pool.chain.GetBlock(oldHead.Hash, oldHead.Number.Uint64()) 1249 add = pool.chain.GetBlock(newHead.Hash, newHead.Number.Uint64()) 1250 ) 1251 if rem == nil { 1252 // This can happen if a setHead is performed, where we simply discard the old 1253 // head from the chain. 1254 // If that is the case, we don't have the lost transactions any more, and 1255 // there's nothing to add 1256 if newNum >= oldNum { 1257 // If we reorged to a same or higher number, then it's not a case of setHead 1258 log.Warn("Transaction pool reset with missing oldhead", 1259 "old", oldHead.Hash, "oldnum", oldNum, "new", newHead.Hash, "newnum", newNum) 1260 return 1261 } 1262 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1263 log.Debug("Skipping transaction reset caused by setHead", 1264 "old", oldHead.Hash, "oldnum", oldNum, "new", newHead.Hash, "newnum", newNum) 1265 // We still need to update the current state s.th. the lost transactions can be readded by the user 1266 } else { 1267 for rem.NumberU64() > add.NumberU64() { 1268 discarded = append(discarded, rem.Transactions...) 1269 if rem = pool.chain.GetBlock(rem.ParentHash, rem.NumberU64()-1); rem == nil { 1270 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash) 1271 return 1272 } 1273 } 1274 for add.NumberU64() > rem.NumberU64() { 1275 included = append(included, add.Transactions...) 1276 if add = pool.chain.GetBlock(add.ParentHash, add.NumberU64()-1); add == nil { 1277 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash) 1278 return 1279 } 1280 } 1281 for rem.Hash != add.Hash { 1282 discarded = append(discarded, rem.Transactions...) 1283 if rem = pool.chain.GetBlock(rem.ParentHash, rem.NumberU64()-1); rem == nil { 1284 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash) 1285 return 1286 } 1287 included = append(included, add.Transactions...) 1288 if add = pool.chain.GetBlock(add.ParentHash, add.NumberU64()-1); add == nil { 1289 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash) 1290 return 1291 } 1292 } 1293 reinject = types.TxDifference(discarded, included) 1294 } 1295 } 1296 } 1297 // Initialize the internal state to the current head 1298 if newHead == nil { 1299 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1300 } 1301 statedb, err := pool.chain.StateAt(newHead.Root) 1302 if err != nil && pool.currentState == nil { 1303 log.Debug("Failed to access EVM state", "block", newHead.Number, "root", newHead.Root, "err", err) 1304 statedb, err = pool.chain.StateAt(common.Hash{}) 1305 } 1306 if err != nil { 1307 log.Error("Failed to reset txpool state", "block", newHead.Number, "root", newHead.Root, "err", err) 1308 return 1309 } 1310 pool.currentState = statedb 1311 pool.pendingNonces = newTxNoncer(statedb) 1312 pool.currentMaxGas = pool.chain.MaxGasLimit() 1313 1314 // Inject any transactions discarded due to reorgs 1315 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1316 senderCacher.recover(pool.signer, reinject) 1317 pool.addTxsLocked(reinject, false) 1318 1319 // Update all fork indicator by next pending block number. 1320 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1321 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1322 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1323 pool.eip1559 = pool.chainconfig.IsLondon(next) 1324 } 1325 1326 // promoteExecutables moves transactions that have become processable from the 1327 // future queue to the set of pending transactions. During this process, all 1328 // invalidated transactions (low nonce, low balance) are deleted. 1329 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1330 // Track the promoted transactions to broadcast them at once 1331 var promoted []*types.Transaction 1332 1333 // Iterate over all accounts and promote any executable transactions 1334 for _, addr := range accounts { 1335 list := pool.queue[addr] 1336 if list == nil { 1337 continue // Just in case someone calls with a non existing account 1338 } 1339 // Drop all transactions that are deemed too old (low nonce) 1340 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1341 for _, tx := range forwards { 1342 hash := tx.Hash() 1343 pool.all.Remove(hash) 1344 } 1345 log.Trace("Removed old queued transactions", "count", len(forwards)) 1346 // Drop all transactions that are too costly (low balance or out of gas) 1347 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1348 for _, tx := range drops { 1349 hash := tx.Hash() 1350 pool.all.Remove(hash) 1351 } 1352 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1353 queuedNofundsMeter.Mark(int64(len(drops))) 1354 1355 // Gather all executable transactions and promote them 1356 readies := list.Ready(pool.pendingNonces.get(addr)) 1357 for _, tx := range readies { 1358 hash := tx.Hash() 1359 if pool.promoteTx(addr, hash, tx) { 1360 promoted = append(promoted, tx) 1361 } 1362 } 1363 log.Trace("Promoted queued transactions", "count", len(promoted)) 1364 queuedGauge.Dec(int64(len(readies))) 1365 1366 // Drop all transactions over the allowed limit 1367 var caps types.Transactions 1368 if !pool.locals.contains(addr) { 1369 caps = list.Cap(int(pool.config.AccountQueue)) 1370 for _, tx := range caps { 1371 hash := tx.Hash() 1372 pool.all.Remove(hash) 1373 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1374 } 1375 queuedRateLimitMeter.Mark(int64(len(caps))) 1376 } 1377 // Mark all the items dropped as removed 1378 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1379 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1380 if pool.locals.contains(addr) { 1381 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1382 } 1383 // Delete the entire queue entry if it became empty. 1384 if list.Empty() { 1385 delete(pool.queue, addr) 1386 delete(pool.beats, addr) 1387 } 1388 } 1389 return promoted 1390 } 1391 1392 // truncatePending removes transactions from the pending queue if the pool is above the 1393 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1394 // equal number for all for accounts with many pending transactions. 1395 func (pool *TxPool) truncatePending() { 1396 pending := uint64(0) 1397 for _, list := range pool.pending { 1398 pending += uint64(list.Len()) 1399 } 1400 if pending <= pool.config.GlobalSlots { 1401 return 1402 } 1403 1404 pendingBeforeCap := pending 1405 // Assemble a spam order to penalize large transactors first 1406 spammers := prque.New(nil) 1407 for addr, list := range pool.pending { 1408 // Only evict transactions from high rollers 1409 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1410 spammers.Push(addr, int64(list.Len())) 1411 } 1412 } 1413 // Gradually drop transactions from offenders 1414 offenders := []common.Address{} 1415 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1416 // Retrieve the next offender if not local address 1417 offender, _ := spammers.Pop() 1418 offenders = append(offenders, offender.(common.Address)) 1419 1420 // Equalize balances until all the same or below threshold 1421 if len(offenders) > 1 { 1422 // Calculate the equalization threshold for all current offenders 1423 threshold := pool.pending[offender.(common.Address)].Len() 1424 1425 // Iteratively reduce all offenders until below limit or threshold reached 1426 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1427 for i := 0; i < len(offenders)-1; i++ { 1428 list := pool.pending[offenders[i]] 1429 1430 caps := list.Cap(list.Len() - 1) 1431 for _, tx := range caps { 1432 // Drop the transaction from the global pools too 1433 hash := tx.Hash() 1434 pool.all.Remove(hash) 1435 1436 // Update the account nonce to the dropped transaction 1437 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1438 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1439 } 1440 pool.priced.Removed(len(caps)) 1441 pendingGauge.Dec(int64(len(caps))) 1442 if pool.locals.contains(offenders[i]) { 1443 localGauge.Dec(int64(len(caps))) 1444 } 1445 pending-- 1446 } 1447 } 1448 } 1449 } 1450 1451 // If still above threshold, reduce to limit or min allowance 1452 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1453 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1454 for _, addr := range offenders { 1455 list := pool.pending[addr] 1456 1457 caps := list.Cap(list.Len() - 1) 1458 for _, tx := range caps { 1459 // Drop the transaction from the global pools too 1460 hash := tx.Hash() 1461 pool.all.Remove(hash) 1462 1463 // Update the account nonce to the dropped transaction 1464 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1465 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1466 } 1467 pool.priced.Removed(len(caps)) 1468 pendingGauge.Dec(int64(len(caps))) 1469 if pool.locals.contains(addr) { 1470 localGauge.Dec(int64(len(caps))) 1471 } 1472 pending-- 1473 } 1474 } 1475 } 1476 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1477 } 1478 1479 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1480 func (pool *TxPool) truncateQueue() { 1481 queued := uint64(0) 1482 for _, list := range pool.queue { 1483 queued += uint64(list.Len()) 1484 } 1485 if queued <= pool.config.GlobalQueue { 1486 return 1487 } 1488 1489 // Sort all accounts with queued transactions by heartbeat 1490 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1491 for addr := range pool.queue { 1492 if !pool.locals.contains(addr) { // don't drop locals 1493 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1494 } 1495 } 1496 sort.Sort(addresses) 1497 1498 // Drop transactions until the total is below the limit or only locals remain 1499 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1500 addr := addresses[len(addresses)-1] 1501 list := pool.queue[addr.address] 1502 1503 addresses = addresses[:len(addresses)-1] 1504 1505 // Drop all transactions if they are less than the overflow 1506 if size := uint64(list.Len()); size <= drop { 1507 for _, tx := range list.Flatten() { 1508 pool.removeTx(tx.Hash(), true) 1509 } 1510 drop -= size 1511 queuedRateLimitMeter.Mark(int64(size)) 1512 continue 1513 } 1514 // Otherwise drop only last few transactions 1515 txs := list.Flatten() 1516 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1517 pool.removeTx(txs[i].Hash(), true) 1518 drop-- 1519 queuedRateLimitMeter.Mark(1) 1520 } 1521 } 1522 } 1523 1524 // demoteUnexecutables removes invalid and processed transactions from the pools 1525 // executable/pending queue and any subsequent transactions that become unexecutable 1526 // are moved back into the future queue. 1527 // 1528 // Note: transactions are not marked as removed in the priced list because re-heaping 1529 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1530 // to trigger a re-heap is this function 1531 func (pool *TxPool) demoteUnexecutables() { 1532 // Iterate over all accounts and demote any non-executable transactions 1533 for addr, list := range pool.pending { 1534 nonce := pool.currentState.GetNonce(addr) 1535 1536 // Drop all transactions that are deemed too old (low nonce) 1537 olds := list.Forward(nonce) 1538 for _, tx := range olds { 1539 hash := tx.Hash() 1540 pool.all.Remove(hash) 1541 log.Trace("Removed old pending transaction", "hash", hash) 1542 } 1543 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1544 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1545 for _, tx := range drops { 1546 hash := tx.Hash() 1547 log.Trace("Removed unpayable pending transaction", "hash", hash) 1548 pool.all.Remove(hash) 1549 } 1550 pendingNofundsMeter.Mark(int64(len(drops))) 1551 1552 for _, tx := range invalids { 1553 hash := tx.Hash() 1554 log.Trace("Demoting pending transaction", "hash", hash) 1555 1556 // Internal shuffle shouldn't touch the lookup set. 1557 pool.enqueueTx(hash, tx, false, false) 1558 } 1559 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1560 if pool.locals.contains(addr) { 1561 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1562 } else { 1563 pool.priced.Removed(len(olds) + len(drops)) // invalids are only moved into queue, keeps in priced 1564 } 1565 // If there's a gap in front, alert (should never happen) and postpone all transactions 1566 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1567 gapped := list.Cap(0) 1568 for _, tx := range gapped { 1569 hash := tx.Hash() 1570 log.Error("Demoting invalidated transaction", "hash", hash) 1571 1572 // Internal shuffle shouldn't touch the lookup set. 1573 pool.enqueueTx(hash, tx, false, false) 1574 } 1575 pendingGauge.Dec(int64(len(gapped))) 1576 } 1577 // Delete the entire pending entry if it became empty. 1578 if list.Empty() { 1579 delete(pool.pending, addr) 1580 } 1581 } 1582 } 1583 1584 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1585 type addressByHeartbeat struct { 1586 address common.Address 1587 heartbeat time.Time 1588 } 1589 1590 type addressesByHeartbeat []addressByHeartbeat 1591 1592 func (a addressesByHeartbeat) Len() int { return len(a) } 1593 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1594 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1595 1596 // accountSet is simply a set of addresses to check for existence, and a signer 1597 // capable of deriving addresses from transactions. 1598 type accountSet struct { 1599 accounts map[common.Address]struct{} 1600 signer types.Signer 1601 cache *[]common.Address 1602 } 1603 1604 // newAccountSet creates a new address set with an associated signer for sender 1605 // derivations. 1606 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1607 as := &accountSet{ 1608 accounts: make(map[common.Address]struct{}), 1609 signer: signer, 1610 } 1611 for _, addr := range addrs { 1612 as.add(addr) 1613 } 1614 return as 1615 } 1616 1617 // contains checks if a given address is contained within the set. 1618 func (as *accountSet) contains(addr common.Address) bool { 1619 _, exist := as.accounts[addr] 1620 return exist 1621 } 1622 1623 func (as *accountSet) empty() bool { 1624 return len(as.accounts) == 0 1625 } 1626 1627 // containsTx checks if the sender of a given tx is within the set. If the sender 1628 // cannot be derived, this method returns false. 1629 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1630 if addr, err := types.Sender(as.signer, tx); err == nil { 1631 return as.contains(addr) 1632 } 1633 return false 1634 } 1635 1636 // add inserts a new address into the set to track. 1637 func (as *accountSet) add(addr common.Address) { 1638 as.accounts[addr] = struct{}{} 1639 as.cache = nil 1640 } 1641 1642 // addTx adds the sender of tx into the set. 1643 func (as *accountSet) addTx(tx *types.Transaction) { 1644 if addr, err := types.Sender(as.signer, tx); err == nil { 1645 as.add(addr) 1646 } 1647 } 1648 1649 // flatten returns the list of addresses within this set, also caching it for later 1650 // reuse. The returned slice should not be changed! 1651 func (as *accountSet) flatten() []common.Address { 1652 if as.cache == nil { 1653 accounts := make([]common.Address, 0, len(as.accounts)) 1654 for account := range as.accounts { 1655 accounts = append(accounts, account) 1656 } 1657 as.cache = &accounts 1658 } 1659 return *as.cache 1660 } 1661 1662 // merge adds all addresses from the 'other' set into 'as'. 1663 func (as *accountSet) merge(other *accountSet) { 1664 for addr := range other.accounts { 1665 as.accounts[addr] = struct{}{} 1666 } 1667 as.cache = nil 1668 } 1669 1670 // txLookup is used internally by TxPool to track transactions while allowing 1671 // lookup without mutex contention. 1672 // 1673 // Note, although this type is properly protected against concurrent access, it 1674 // is **not** a type that should ever be mutated or even exposed outside of the 1675 // transaction pool, since its internal state is tightly coupled with the pools 1676 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1677 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1678 // TxPool.mu mutex. 1679 // 1680 // This lookup set combines the notion of "local transactions", which is useful 1681 // to build upper-level structure. 1682 type txLookup struct { 1683 slots int 1684 lock sync.RWMutex 1685 locals map[common.Hash]*types.Transaction 1686 remotes map[common.Hash]*types.Transaction 1687 } 1688 1689 // newTxLookup returns a new txLookup structure. 1690 func newTxLookup() *txLookup { 1691 return &txLookup{ 1692 locals: make(map[common.Hash]*types.Transaction), 1693 remotes: make(map[common.Hash]*types.Transaction), 1694 } 1695 } 1696 1697 func (t *txLookup) SampleHashes(max int) []common.Hash { 1698 t.lock.RLock() 1699 defer t.lock.RUnlock() 1700 res := make([]common.Hash, 0, max) 1701 skip := 0 1702 if len(t.locals)+len(t.remotes) > max { 1703 skip = rand.Intn(len(t.locals) + len(t.remotes) - max) 1704 } 1705 forEach := func(key common.Hash) bool { 1706 if len(res) >= max { 1707 return false 1708 } 1709 if skip > 0 { 1710 skip-- 1711 return true 1712 } 1713 res = append(res, key) 1714 return true 1715 } 1716 for key := range t.locals { 1717 if !forEach(key) { 1718 break 1719 } 1720 } 1721 for key := range t.remotes { 1722 if !forEach(key) { 1723 break 1724 } 1725 } 1726 return res 1727 } 1728 1729 // Range calls f on each key and value present in the map. The callback passed 1730 // should return the indicator whether the iteration needs to be continued. 1731 // Callers need to specify which set (or both) to be iterated. 1732 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1733 t.lock.RLock() 1734 defer t.lock.RUnlock() 1735 1736 if local { 1737 for key, value := range t.locals { 1738 if !f(key, value, true) { 1739 return 1740 } 1741 } 1742 } 1743 if remote { 1744 for key, value := range t.remotes { 1745 if !f(key, value, false) { 1746 return 1747 } 1748 } 1749 } 1750 } 1751 1752 // Get returns a transaction if it exists in the lookup, or nil if not found. 1753 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1754 t.lock.RLock() 1755 defer t.lock.RUnlock() 1756 1757 if tx := t.locals[hash]; tx != nil { 1758 return tx 1759 } 1760 return t.remotes[hash] 1761 } 1762 1763 // OnlyNotExisting filters hashes of unknown txs from a provided slice of tx hashes. 1764 func (t *txLookup) OnlyNotExisting(hashes []common.Hash) []common.Hash { 1765 t.lock.RLock() 1766 defer t.lock.RUnlock() 1767 notExisting := make([]common.Hash, 0, len(hashes)) 1768 for _, txid := range hashes { 1769 if t.locals[txid] == nil && t.remotes[txid] == nil { 1770 notExisting = append(notExisting, txid) 1771 } 1772 } 1773 return notExisting 1774 } 1775 1776 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1777 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1778 t.lock.RLock() 1779 defer t.lock.RUnlock() 1780 1781 return t.locals[hash] 1782 } 1783 1784 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1785 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1786 t.lock.RLock() 1787 defer t.lock.RUnlock() 1788 1789 return t.remotes[hash] 1790 } 1791 1792 // Count returns the current number of transactions in the lookup. 1793 func (t *txLookup) Count() int { 1794 t.lock.RLock() 1795 defer t.lock.RUnlock() 1796 1797 return len(t.locals) + len(t.remotes) 1798 } 1799 1800 // LocalCount returns the current number of local transactions in the lookup. 1801 func (t *txLookup) LocalCount() int { 1802 t.lock.RLock() 1803 defer t.lock.RUnlock() 1804 1805 return len(t.locals) 1806 } 1807 1808 // RemoteCount returns the current number of remote transactions in the lookup. 1809 func (t *txLookup) RemoteCount() int { 1810 t.lock.RLock() 1811 defer t.lock.RUnlock() 1812 1813 return len(t.remotes) 1814 } 1815 1816 // Slots returns the current number of slots used in the lookup. 1817 func (t *txLookup) Slots() int { 1818 t.lock.RLock() 1819 defer t.lock.RUnlock() 1820 1821 return t.slots 1822 } 1823 1824 // Add adds a transaction to the lookup. 1825 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1826 t.lock.Lock() 1827 defer t.lock.Unlock() 1828 1829 t.slots += numSlots(tx) 1830 slotsGauge.Update(int64(t.slots)) 1831 1832 if local { 1833 t.locals[tx.Hash()] = tx 1834 } else { 1835 t.remotes[tx.Hash()] = tx 1836 } 1837 } 1838 1839 // Remove removes a transaction from the lookup. 1840 func (t *txLookup) Remove(hash common.Hash) { 1841 t.lock.Lock() 1842 defer t.lock.Unlock() 1843 1844 tx, ok := t.locals[hash] 1845 if !ok { 1846 tx, ok = t.remotes[hash] 1847 } 1848 if !ok { 1849 log.Error("No transaction found to be deleted", "hash", hash) 1850 return 1851 } 1852 t.slots -= numSlots(tx) 1853 slotsGauge.Update(int64(t.slots)) 1854 1855 delete(t.locals, hash) 1856 delete(t.remotes, hash) 1857 } 1858 1859 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1860 // set. The assumption is held the locals set is thread-safe to be used. 1861 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1862 t.lock.Lock() 1863 defer t.lock.Unlock() 1864 1865 var migrated int 1866 for hash, tx := range t.remotes { 1867 if locals.containsTx(tx) { 1868 t.locals[hash] = tx 1869 delete(t.remotes, hash) 1870 migrated += 1 1871 } 1872 } 1873 return migrated 1874 } 1875 1876 // RemotesBelowTip finds all remote transactions below the given gas price threshold. 1877 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1878 found := make(types.Transactions, 0, 128) 1879 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1880 if tx.GasTipCapIntCmp(threshold) < 0 { 1881 found = append(found, tx) 1882 } 1883 return true 1884 }, false, true) // Only iterate remotes 1885 return found 1886 } 1887 1888 // numSlots calculates the number of slots needed for a single transaction. 1889 func numSlots(tx *types.Transaction) int { 1890 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1891 }