github.com/dominant-strategies/go-quai@v0.28.2/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "fmt" 22 "math" 23 "math/big" 24 "sort" 25 "sync" 26 "time" 27 28 "github.com/dominant-strategies/go-quai/common" 29 "github.com/dominant-strategies/go-quai/common/prque" 30 "github.com/dominant-strategies/go-quai/consensus/misc" 31 "github.com/dominant-strategies/go-quai/core/state" 32 "github.com/dominant-strategies/go-quai/core/types" 33 "github.com/dominant-strategies/go-quai/event" 34 "github.com/dominant-strategies/go-quai/log" 35 "github.com/dominant-strategies/go-quai/metrics" 36 "github.com/dominant-strategies/go-quai/params" 37 orderedmap "github.com/wk8/go-ordered-map/v2" 38 ) 39 40 const ( 41 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 42 chainHeadChanSize = 10 43 44 // txSlotSize is used to calculate how many data slots a single transaction 45 // takes up based on its size. The slots are used as DoS protection, ensuring 46 // that validating a new transaction remains a constant operation (in reality 47 // O(maxslots), where max slots are 4 currently). 48 txSlotSize = 32 * 1024 49 50 // txMaxSize is the maximum size a single transaction can have. This field has 51 // non-trivial consequences: larger transactions are significantly harder and 52 // more expensive to propagate; larger transactions also take more resources 53 // to validate whether they fit into the pool or not. 54 txMaxSize = 4 * txSlotSize // 128KB 55 56 // c_reorgCounterThreshold determines the frequency of the timing prints 57 // around important functions in txpool 58 c_reorgCounterThreshold = 200 59 ) 60 61 var ( 62 // ErrAlreadyKnown is returned if the transactions is already contained 63 // within the pool. 64 ErrAlreadyKnown = errors.New("already known") 65 66 // ErrInvalidSender is returned if the transaction contains an invalid signature. 67 ErrInvalidSender = errors.New("invalid sender") 68 69 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 70 // configured for the transaction pool. 71 ErrUnderpriced = errors.New("transaction underpriced") 72 73 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 74 // another remote transaction. 75 ErrTxPoolOverflow = errors.New("txpool is full") 76 77 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 78 // with a different one without the required price bump. 79 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 80 81 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 82 // maximum allowance of the current block. 83 errGasLimit = errors.New("exceeds block gas limit") 84 85 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 86 // transaction with a negative value. 87 ErrNegativeValue = errors.New("negative value") 88 89 // ErrOversizedData is returned if the input data of a transaction is greater 90 // than some meaningful limit a user might use. This is not a consensus error 91 // making the transaction invalid, rather a DOS protection. 92 ErrOversizedData = errors.New("oversized data") 93 ) 94 95 var ( 96 evictionInterval = time.Minute // Time interval to check for evictable transactions 97 statsReportInterval = 1 * time.Minute // Time interval to report transaction pool stats 98 ) 99 100 var ( 101 // Metrics for the pending pool 102 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 103 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 104 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 105 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 106 107 // Metrics for the queued pool 108 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 109 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 110 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 111 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 112 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 113 114 // General tx metrics 115 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 116 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 117 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 118 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 119 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 120 121 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 122 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 123 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 124 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 125 126 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 127 ) 128 129 // TxStatus is the current status of a transaction as seen by the pool. 130 type TxStatus uint 131 132 const ( 133 TxStatusUnknown TxStatus = iota 134 TxStatusQueued 135 TxStatusPending 136 TxStatusIncluded 137 ) 138 139 // blockChain provides the state of blockchain and current gas limit to do 140 // some pre checks in tx pool and event subscribers. 141 type blockChain interface { 142 CurrentBlock() *types.Block 143 GetBlock(hash common.Hash, number uint64) *types.Block 144 StateAt(root common.Hash) (*state.StateDB, error) 145 146 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 147 } 148 149 // TxPoolConfig are the configuration parameters of the transaction pool. 150 type TxPoolConfig struct { 151 Locals []common.InternalAddress // Addresses that should be treated by default as local 152 NoLocals bool // Whether local transaction handling should be disabled 153 Journal string // Journal of local transactions to survive node restarts 154 Rejournal time.Duration // Time interval to regenerate the local transaction journal 155 156 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 157 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 158 159 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 160 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 161 MaxSenders uint64 // Maximum number of senders in the senders cache 162 SendersChBuffer uint64 // Senders cache channel buffer size 163 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 164 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 165 166 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 167 } 168 169 // DefaultTxPoolConfig contains the default configurations for the transaction 170 // pool. 171 var DefaultTxPoolConfig = TxPoolConfig{ 172 Journal: "transactions.rlp", 173 Rejournal: time.Hour, 174 175 PriceLimit: 1, 176 PriceBump: 10, 177 178 AccountSlots: 1, 179 GlobalSlots: 9000 + 1024, // urgent + floating queue capacity with 4:1 ratio 180 MaxSenders: 100000, // 5 MB - at least 10 blocks worth of transactions in case of reorg or high production rate 181 SendersChBuffer: 1024, // at 500 TPS in zone, 2s buffer 182 AccountQueue: 1, 183 GlobalQueue: 2048, 184 185 Lifetime: 3 * time.Hour, 186 } 187 188 // sanitize checks the provided user configurations and changes anything that's 189 // unreasonable or unworkable. 190 func (config *TxPoolConfig) sanitize() TxPoolConfig { 191 conf := *config 192 if conf.Rejournal < time.Second { 193 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 194 conf.Rejournal = time.Second 195 } 196 if conf.PriceLimit < 1 { 197 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 198 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 199 } 200 if conf.PriceBump < 1 { 201 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 202 conf.PriceBump = DefaultTxPoolConfig.PriceBump 203 } 204 if conf.AccountSlots < 1 { 205 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 206 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 207 } 208 if conf.GlobalSlots < 1 { 209 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 210 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 211 } 212 if conf.AccountQueue < 1 { 213 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 214 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 215 } 216 if conf.GlobalQueue < 1 { 217 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 218 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 219 } 220 if conf.Lifetime < 1 { 221 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 222 conf.Lifetime = DefaultTxPoolConfig.Lifetime 223 } 224 return conf 225 } 226 227 // TxPool contains all currently known transactions. Transactions 228 // enter the pool when they are received from the network or submitted 229 // locally. They exit the pool when they are included in the blockchain. 230 // 231 // The pool separates processable transactions (which can be applied to the 232 // current state) and future transactions. Transactions move between those 233 // two states over time as they are received and processed. 234 type TxPool struct { 235 config TxPoolConfig 236 chainconfig *params.ChainConfig 237 chain blockChain 238 gasPrice *big.Int 239 txFeed event.Feed 240 scope event.SubscriptionScope 241 signer types.Signer 242 mu sync.RWMutex 243 244 currentState *state.StateDB // Current state in the blockchain head 245 pendingNonces *txNoncer // Pending state tracking virtual nonces 246 currentMaxGas uint64 // Current gas limit for transaction caps 247 248 locals *accountSet // Set of local transaction to exempt from eviction rules 249 journal *txJournal // Journal of local transaction to back up to disk 250 251 pending map[common.InternalAddress]*txList // All currently processable transactions 252 queue map[common.InternalAddress]*txList // Queued but non-processable transactions 253 beats map[common.InternalAddress]time.Time // Last heartbeat from each known account 254 all *txLookup // All transactions to allow lookups 255 priced *txPricedList // All transactions sorted by price 256 senders *orderedmap.OrderedMap[common.Hash, common.InternalAddress] // Tx hash to sender lookup cache (async populated) 257 sendersCh chan newSender // Channel for async senders cache goroutine 258 SendersMutex sync.RWMutex // Mutex for senders map 259 localTxsCount int // count of txs in last 1 min. Purely for logging purpose 260 remoteTxsCount int // count of txs in last 1 min. Purely for logging purpose 261 262 reOrgCounter int // keeps track of the number of times the runReorg is called, it is reset every c_reorgCounterThreshold times 263 264 chainHeadCh chan ChainHeadEvent 265 chainHeadSub event.Subscription 266 reqResetCh chan *txpoolResetRequest 267 reqPromoteCh chan *accountSet 268 queueTxEventCh chan *types.Transaction 269 reorgDoneCh chan chan struct{} 270 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 271 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 272 } 273 274 type txpoolResetRequest struct { 275 oldHead, newHead *types.Header 276 } 277 278 type newSender struct { 279 hash common.Hash 280 sender common.InternalAddress 281 } 282 283 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 284 // transactions from the network. 285 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 286 // Sanitize the input to ensure no vulnerable gas prices are set 287 config = (&config).sanitize() 288 289 // Create the transaction pool with its initial settings 290 pool := &TxPool{ 291 config: config, 292 chainconfig: chainconfig, 293 chain: chain, 294 signer: types.LatestSigner(chainconfig), 295 pending: make(map[common.InternalAddress]*txList), 296 queue: make(map[common.InternalAddress]*txList), 297 beats: make(map[common.InternalAddress]time.Time), 298 senders: orderedmap.New[common.Hash, common.InternalAddress](), 299 sendersCh: make(chan newSender, config.SendersChBuffer), 300 all: newTxLookup(), 301 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 302 reqResetCh: make(chan *txpoolResetRequest), 303 reqPromoteCh: make(chan *accountSet), 304 queueTxEventCh: make(chan *types.Transaction), 305 reorgDoneCh: make(chan chan struct{}), 306 reorgShutdownCh: make(chan struct{}), 307 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 308 localTxsCount: 0, 309 remoteTxsCount: 0, 310 reOrgCounter: 0, 311 } 312 pool.locals = newAccountSet(pool.signer) 313 for _, addr := range config.Locals { 314 log.Debug("Setting new local account", "address", addr) 315 pool.locals.add(addr) 316 } 317 pool.priced = newTxPricedList(pool.all) 318 pool.reset(nil, chain.CurrentBlock().Header()) 319 320 // Start the reorg loop early so it can handle requests generated during journal loading. 321 pool.wg.Add(1) 322 go pool.scheduleReorgLoop() 323 324 // If local transactions and journaling is enabled, load from disk 325 if !config.NoLocals && config.Journal != "" { 326 pool.journal = newTxJournal(config.Journal) 327 328 if err := pool.journal.load(pool.AddLocals); err != nil { 329 log.Warn("Failed to load transaction journal", "err", err) 330 } 331 if err := pool.journal.rotate(pool.local()); err != nil { 332 log.Warn("Failed to rotate transaction journal", "err", err) 333 } 334 } 335 336 // Subscribe events from blockchain and start the main event loop. 337 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 338 pool.wg.Add(1) 339 go pool.loop() 340 go pool.sendersGoroutine() 341 return pool 342 } 343 344 // loop is the transaction pool's main event loop, waiting for and reacting to 345 // outside blockchain events as well as for various reporting and transaction 346 // eviction events. 347 func (pool *TxPool) loop() { 348 defer pool.wg.Done() 349 350 var ( 351 // Start the stats reporting and transaction eviction tickers 352 report = time.NewTicker(statsReportInterval) 353 evict = time.NewTicker(evictionInterval) 354 journal = time.NewTicker(pool.config.Rejournal) 355 // Track the previous head headers for transaction reorgs 356 head = pool.chain.CurrentBlock() 357 ) 358 defer report.Stop() 359 defer evict.Stop() 360 defer journal.Stop() 361 362 for { 363 select { 364 // Handle ChainHeadEvent 365 case ev := <-pool.chainHeadCh: 366 if ev.Block != nil { 367 pool.requestReset(head.Header(), ev.Block.Header()) 368 head = ev.Block 369 } 370 371 // System shutdown. 372 case <-pool.chainHeadSub.Err(): 373 close(pool.reorgShutdownCh) 374 return 375 376 // Handle stats reporting ticks 377 case <-report.C: 378 pool.mu.RLock() 379 pending, queued := pool.stats() 380 stales := pool.priced.stales 381 log.Info("Added Transactions in last Min", "Local Txs", pool.localTxsCount, "Remote Txs", pool.remoteTxsCount) 382 pool.localTxsCount = 0 383 pool.remoteTxsCount = 0 384 pool.mu.RUnlock() 385 386 log.Info("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 387 // Handle inactive account transaction eviction 388 case <-evict.C: 389 pool.mu.Lock() 390 for addr := range pool.queue { 391 // Skip local transactions from the eviction mechanism 392 if pool.locals.contains(addr) { 393 continue 394 } 395 // Any non-locals old enough should be removed 396 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 397 list := pool.queue[addr].Flatten() 398 for _, tx := range list { 399 pool.removeTx(tx.Hash(), true) 400 } 401 queuedEvictionMeter.Mark(int64(len(list))) 402 } 403 } 404 pool.mu.Unlock() 405 406 // Handle local transaction journal rotation 407 case <-journal.C: 408 if pool.journal != nil { 409 pool.mu.Lock() 410 if err := pool.journal.rotate(pool.local()); err != nil { 411 log.Warn("Failed to rotate local tx journal", "err", err) 412 } 413 pool.mu.Unlock() 414 } 415 } 416 } 417 } 418 419 // Stop terminates the transaction pool. 420 func (pool *TxPool) Stop() { 421 // Unsubscribe all subscriptions registered from txpool 422 pool.scope.Close() 423 424 // Unsubscribe subscriptions registered from blockchain 425 pool.chainHeadSub.Unsubscribe() 426 pool.wg.Wait() 427 428 if pool.journal != nil { 429 pool.journal.close() 430 } 431 log.Info("Transaction pool stopped") 432 } 433 434 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 435 // starts sending event to the given channel. 436 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 437 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 438 } 439 440 // GasPrice returns the current gas price enforced by the transaction pool. 441 func (pool *TxPool) GasPrice() *big.Int { 442 pool.mu.RLock() 443 defer pool.mu.RUnlock() 444 445 return new(big.Int).Set(pool.gasPrice) 446 } 447 448 // SetGasPrice updates the minimum price required by the transaction pool for a 449 // new transaction, and drops all transactions below this threshold. 450 func (pool *TxPool) SetGasPrice(price *big.Int) { 451 pool.mu.Lock() 452 defer pool.mu.Unlock() 453 454 old := pool.gasPrice 455 pool.gasPrice = price 456 // if the min miner fee increased, remove transactions below the new threshold 457 if price.Cmp(old) > 0 { 458 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 459 drop := pool.all.RemotesBelowTip(price) 460 for _, tx := range drop { 461 pool.removeTx(tx.Hash(), false) 462 } 463 pool.priced.Removed(len(drop)) 464 } 465 466 log.Info("Transaction pool price threshold updated", "price", price) 467 } 468 469 // Nonce returns the next nonce of an account, with all transactions executable 470 // by the pool already applied on top. 471 func (pool *TxPool) Nonce(addr common.InternalAddress) uint64 { 472 pool.mu.RLock() 473 defer pool.mu.RUnlock() 474 475 return pool.pendingNonces.get(addr) 476 } 477 478 // Stats retrieves the current pool stats, namely the number of pending and the 479 // number of queued (non-executable) transactions. 480 func (pool *TxPool) Stats() (int, int) { 481 pool.mu.RLock() 482 defer pool.mu.RUnlock() 483 484 return pool.stats() 485 } 486 487 // stats retrieves the current pool stats, namely the number of pending and the 488 // number of queued (non-executable) transactions. 489 func (pool *TxPool) stats() (int, int) { 490 pending := 0 491 for _, list := range pool.pending { 492 pending += list.Len() 493 } 494 queued := 0 495 for _, list := range pool.queue { 496 queued += list.Len() 497 } 498 return pending, queued 499 } 500 501 // Content retrieves the data content of the transaction pool, returning all the 502 // pending as well as queued transactions, grouped by account and sorted by nonce. 503 func (pool *TxPool) Content() (map[common.InternalAddress]types.Transactions, map[common.InternalAddress]types.Transactions) { 504 pool.mu.Lock() 505 defer pool.mu.Unlock() 506 507 pending := make(map[common.InternalAddress]types.Transactions) 508 for addr, list := range pool.pending { 509 pending[addr] = list.Flatten() 510 } 511 queued := make(map[common.InternalAddress]types.Transactions) 512 for addr, list := range pool.queue { 513 queued[addr] = list.Flatten() 514 } 515 return pending, queued 516 } 517 518 // ContentFrom retrieves the data content of the transaction pool, returning the 519 // pending as well as queued transactions of this address, grouped by nonce. 520 func (pool *TxPool) ContentFrom(addr common.InternalAddress) (types.Transactions, types.Transactions) { 521 pool.mu.RLock() 522 defer pool.mu.RUnlock() 523 524 var pending types.Transactions 525 if list, ok := pool.pending[addr]; ok { 526 pending = list.Flatten() 527 } 528 var queued types.Transactions 529 if list, ok := pool.queue[addr]; ok { 530 queued = list.Flatten() 531 } 532 return pending, queued 533 } 534 535 // Pending retrieves all currently processable transactions, grouped by origin 536 // account and sorted by nonce. The returned transaction set is a copy and can be 537 // freely modified by calling code. 538 // 539 // The enforceTips parameter can be used to do an extra filtering on the pending 540 // transactions and only return those whose **effective** tip is large enough in 541 // the next pending execution environment. 542 func (pool *TxPool) TxPoolPending(enforceTips bool, etxSet types.EtxSet) (map[common.AddressBytes]types.Transactions, error) { 543 pool.mu.RLock() 544 defer pool.mu.RUnlock() 545 546 pending := make(map[common.AddressBytes]types.Transactions) 547 for addr, list := range pool.pending { 548 txs := list.Flatten() 549 550 // If the miner requests tip enforcement, cap the lists now 551 if enforceTips && !pool.locals.contains(addr) { 552 for i, tx := range txs { 553 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 554 log.Debug("TX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) 555 txs = txs[:i] 556 break 557 } 558 } 559 } 560 if len(txs) > 0 { 561 pending[addr.Bytes20()] = txs 562 } 563 } 564 565 for _, entry := range etxSet { 566 addr := entry.ETX.ETXSender() 567 tx := entry.ETX 568 if tx.ETXSender().Location().Equal(common.NodeLocation) { // Sanity check 569 log.Error("ETX sender is in our location!", "tx", tx.Hash().String(), "sender", tx.ETXSender().String()) 570 continue // skip this tx 571 } 572 // If the miner requests tip enforcement, cap the lists now 573 if enforceTips && tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 574 log.Debug("ETX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) 575 continue // skip this tx 576 } 577 pending[addr.Bytes20()] = append(pending[addr.Bytes20()], &tx) // ETXs do not have to be sorted by address but this way all TXs are in the same list 578 } 579 return pending, nil 580 } 581 582 // Locals retrieves the accounts currently considered local by the pool. 583 func (pool *TxPool) Locals() []common.InternalAddress { 584 pool.mu.Lock() 585 defer pool.mu.Unlock() 586 587 return pool.locals.flatten() 588 } 589 590 // local retrieves all currently known local transactions, grouped by origin 591 // account and sorted by nonce. The returned transaction set is a copy and can be 592 // freely modified by calling code. 593 func (pool *TxPool) local() map[common.InternalAddress]types.Transactions { 594 txs := make(map[common.InternalAddress]types.Transactions) 595 for addr := range pool.locals.accounts { 596 if pending := pool.pending[addr]; pending != nil { 597 txs[addr] = append(txs[addr], pending.Flatten()...) 598 } 599 if queued := pool.queue[addr]; queued != nil { 600 txs[addr] = append(txs[addr], queued.Flatten()...) 601 } 602 } 603 return txs 604 } 605 606 // validateTx checks whether a transaction is valid according to the consensus 607 // rules and adheres to some heuristic limits of the local node (price and size). 608 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 609 // Reject transactions over defined size to prevent DOS attacks 610 if uint64(tx.Size()) > txMaxSize { 611 return ErrOversizedData 612 } 613 // Transactions can't be negative. This may never happen using RLP decoded 614 // transactions but may occur if you create a transaction using the RPC. 615 if tx.Value().Sign() < 0 { 616 return ErrNegativeValue 617 } 618 // Ensure the transaction doesn't exceed the current block limit gas. 619 if pool.currentMaxGas < tx.Gas() { 620 return ErrGasLimit(tx.Gas(), pool.currentMaxGas) 621 } 622 // Sanity check for extremely large numbers 623 if tx.GasFeeCap().BitLen() > 256 { 624 return ErrFeeCapVeryHigh 625 } 626 if tx.GasTipCap().BitLen() > 256 { 627 return ErrTipVeryHigh 628 } 629 // Ensure gasFeeCap is greater than or equal to gasTipCap. 630 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 631 return ErrTipAboveFeeCap 632 } 633 var internal common.InternalAddress 634 addToCache := true 635 if sender := tx.From(); sender != nil { // Check tx cache first 636 var err error 637 internal, err = sender.InternalAddress() 638 if err != nil { 639 return err 640 } 641 } else if sender, found := pool.GetSender(tx.Hash()); found { 642 internal = sender 643 addToCache = false 644 } else { 645 // Make sure the transaction is signed properly. 646 from, err := types.Sender(pool.signer, tx) 647 if err != nil { 648 return ErrInvalidSender 649 } 650 internal, err = from.InternalAddress() 651 if err != nil { 652 return err 653 } 654 } 655 656 // Drop non-local transactions under our own minimal accepted gas price or tip 657 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 658 return ErrUnderpriced 659 } 660 // Ensure the transaction adheres to nonce ordering 661 if pool.currentState.GetNonce(internal) > tx.Nonce() { 662 return ErrNonceTooLow 663 } 664 // Transactor should have enough funds to cover the costs 665 // cost == V + GP * GL 666 if pool.currentState.GetBalance(internal).Cmp(tx.Cost()) < 0 { 667 return ErrInsufficientFunds 668 } 669 // Ensure the transaction has more gas than the basic tx fee. 670 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil) 671 if err != nil { 672 return err 673 } 674 if tx.Gas() < intrGas { 675 log.Warn("tx has insufficient gas", "gas supplied", tx.Gas(), "gas needed", intrGas, "tx", tx) 676 return ErrIntrinsicGas 677 } 678 if len(pool.sendersCh) == int(pool.config.SendersChBuffer) { 679 log.Error("sendersCh is full, skipping until there is room") 680 } 681 if addToCache { 682 select { 683 case pool.sendersCh <- newSender{tx.Hash(), internal}: // Non-blocking 684 default: 685 log.Error("sendersCh is full, skipping until there is room") 686 } 687 } 688 689 return nil 690 } 691 692 // add validates a transaction and inserts it into the non-executable queue for later 693 // pending promotion and execution. If the transaction is a replacement for an already 694 // pending or queued one, it overwrites the previous transaction if its price is higher. 695 // 696 // If a newly added transaction is marked as local, its sending account will be 697 // be added to the allowlist, preventing any associated transaction from being dropped 698 // out of the pool due to pricing constraints. 699 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 700 // If the transaction is already known, discard it 701 hash := tx.Hash() 702 if pool.all.Get(hash) != nil { 703 log.Trace("Discarding already known transaction", "hash", hash) 704 knownTxMeter.Mark(1) 705 return false, ErrAlreadyKnown 706 } 707 // Make the local flag. If it's from local source or it's from the network but 708 // the sender is marked as local previously, treat it as the local transaction. 709 isLocal := local || pool.locals.containsTx(tx) 710 711 // If the transaction fails basic validation, discard it 712 if err := pool.validateTx(tx, isLocal); err != nil { 713 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 714 invalidTxMeter.Mark(1) 715 return false, err 716 } 717 // If the transaction pool is full, discard underpriced transactions 718 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 719 // If the new transaction is underpriced, don't accept it 720 if !isLocal && pool.priced.Underpriced(tx) { 721 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 722 underpricedTxMeter.Mark(1) 723 return false, ErrUnderpriced 724 } 725 // New transaction is better than our worse ones, make room for it. 726 // If it's a local transaction, forcibly discard all available transactions. 727 // Otherwise if we can't make enough room for new one, abort the operation. 728 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 729 730 // Special case, we still can't make the room for the new remote one. 731 if !isLocal && !success { 732 log.Trace("Discarding overflown transaction", "hash", hash) 733 overflowedTxMeter.Mark(1) 734 return false, ErrTxPoolOverflow 735 } 736 // Kick out the underpriced remote transactions. 737 for _, tx := range drop { 738 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 739 underpricedTxMeter.Mark(1) 740 pool.removeTx(tx.Hash(), false) 741 } 742 } 743 // Try to replace an existing transaction in the pending pool 744 from, _ := types.Sender(pool.signer, tx) // already validated 745 internal, err := from.InternalAddress() 746 if err != nil { 747 return false, err 748 } 749 if list := pool.pending[internal]; list != nil && list.Overlaps(tx) { 750 // Nonce already pending, check if required price bump is met 751 inserted, old := list.Add(tx, pool.config.PriceBump) 752 if !inserted { 753 pendingDiscardMeter.Mark(1) 754 return false, ErrReplaceUnderpriced 755 } 756 // New transaction is better, replace old one 757 if old != nil { 758 pool.all.Remove(old.Hash()) 759 pool.priced.Removed(1) 760 pendingReplaceMeter.Mark(1) 761 } 762 pool.all.Add(tx, isLocal) 763 pool.priced.Put(tx, isLocal) 764 pool.journalTx(internal, tx) 765 pool.queueTxEvent(tx) 766 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 767 768 // Successful promotion, bump the heartbeat 769 pool.beats[internal] = time.Now() 770 return old != nil, nil 771 } 772 // New transaction isn't replacing a pending one, push into queue 773 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 774 if err != nil { 775 return false, err 776 } 777 // Mark local addresses and journal local transactions 778 if local && !pool.locals.contains(internal) { 779 log.Info("Setting new local account", "address", from) 780 pool.locals.add(internal) 781 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 782 } 783 if isLocal { 784 localGauge.Inc(1) 785 } 786 pool.journalTx(internal, tx) 787 pool.queueTxEvent(tx) 788 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 789 return replaced, nil 790 } 791 792 // enqueueTx inserts a new transaction into the non-executable transaction queue. 793 // 794 // Note, this method assumes the pool lock is held! 795 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 796 // Try to insert the transaction into the future queue 797 from, _ := types.Sender(pool.signer, tx) // already validated 798 internal, err := from.InternalAddress() 799 if err != nil { 800 return false, err 801 } 802 if pool.queue[internal] == nil { 803 pool.queue[internal] = newTxList(false) 804 } 805 inserted, old := pool.queue[internal].Add(tx, pool.config.PriceBump) 806 if !inserted { 807 // An older transaction was better, discard this 808 queuedDiscardMeter.Mark(1) 809 return false, ErrReplaceUnderpriced 810 } 811 // Discard any previous transaction and mark this 812 if old != nil { 813 pool.all.Remove(old.Hash()) 814 pool.priced.Removed(1) 815 queuedReplaceMeter.Mark(1) 816 } else { 817 // Nothing was replaced, bump the queued counter 818 queuedGauge.Inc(1) 819 } 820 // If the transaction isn't in lookup set but it's expected to be there, 821 // show the error log. 822 if pool.all.Get(hash) == nil && !addAll { 823 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 824 } 825 if addAll { 826 pool.all.Add(tx, local) 827 pool.priced.Put(tx, local) 828 } 829 // If we never record the heartbeat, do it right now. 830 if _, exist := pool.beats[internal]; !exist { 831 pool.beats[internal] = time.Now() 832 } 833 return old != nil, nil 834 } 835 836 // journalTx adds the specified transaction to the local disk journal if it is 837 // deemed to have been sent from a local account. 838 func (pool *TxPool) journalTx(from common.InternalAddress, tx *types.Transaction) { 839 // Only journal if it's enabled and the transaction is local 840 if pool.journal == nil || !pool.locals.contains(from) { 841 return 842 } 843 if err := pool.journal.insert(tx); err != nil { 844 log.Warn("Failed to journal local transaction", "err", err) 845 } 846 } 847 848 // promoteTx adds a transaction to the pending (processable) list of transactions 849 // and returns whether it was inserted or an older was better. 850 // 851 // Note, this method assumes the pool lock is held! 852 func (pool *TxPool) promoteTx(addr common.InternalAddress, hash common.Hash, tx *types.Transaction) bool { 853 // Try to insert the transaction into the pending queue 854 if pool.pending[addr] == nil { 855 pool.pending[addr] = newTxList(true) 856 } 857 list := pool.pending[addr] 858 859 inserted, old := list.Add(tx, pool.config.PriceBump) 860 if !inserted { 861 // An older transaction was better, discard this 862 pool.all.Remove(hash) 863 pool.priced.Removed(1) 864 pendingDiscardMeter.Mark(1) 865 return false 866 } 867 // Otherwise discard any previous transaction and mark this 868 if old != nil { 869 pool.all.Remove(old.Hash()) 870 pool.priced.Removed(1) 871 pendingReplaceMeter.Mark(1) 872 } else { 873 // Nothing was replaced, bump the pending counter 874 pendingGauge.Inc(1) 875 } 876 // Set the potentially new pending nonce and notify any subsystems of the new tx 877 pool.pendingNonces.set(addr, tx.Nonce()+1) 878 879 // Successful promotion, bump the heartbeat 880 pool.beats[addr] = time.Now() 881 if list.Len()%100 == 0 { 882 log.Info("Another 100 txs added to list", "addr", addr, "len", list.Len()) 883 } 884 return true 885 } 886 887 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 888 // senders as a local ones, ensuring they go around the local pricing constraints. 889 // 890 // This method is used to add transactions from the RPC API and performs synchronous pool 891 // reorganization and event propagation. 892 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 893 return pool.addTxs(txs, !pool.config.NoLocals, true) 894 } 895 896 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 897 // a convenience wrapper aroundd AddLocals. 898 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 899 pool.localTxsCount += 1 900 errs := pool.AddLocals([]*types.Transaction{tx}) 901 return errs[0] 902 } 903 904 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 905 // senders are not among the locally tracked ones, full pricing constraints will apply. 906 // 907 // This method is used to add transactions from the p2p network and does not wait for pool 908 // reorganization and internal event propagation. 909 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 910 pool.remoteTxsCount += len(txs) 911 return pool.addTxs(txs, false, false) 912 } 913 914 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 915 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 916 return pool.addTxs(txs, false, true) 917 } 918 919 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 920 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 921 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 922 return errs[0] 923 } 924 925 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 926 // wrapper around AddRemotes. 927 // 928 // Deprecated: use AddRemotes 929 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 930 errs := pool.AddRemotes([]*types.Transaction{tx}) 931 return errs[0] 932 } 933 934 // addTxs attempts to queue a batch of transactions if they are valid. 935 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 936 // Filter out known ones without obtaining the pool lock or recovering signatures 937 var ( 938 errs = make([]error, len(txs)) 939 news = make([]*types.Transaction, 0, len(txs)) 940 ) 941 for i, tx := range txs { 942 // If the transaction is known, pre-set the error slot 943 if pool.all.Get(tx.Hash()) != nil { 944 errs[i] = ErrAlreadyKnown 945 knownTxMeter.Mark(1) 946 continue 947 } 948 // Exclude transactions with invalid signatures as soon as 949 // possible and cache senders in transactions before 950 // obtaining lock 951 if sender := tx.From(); sender != nil { 952 var err error 953 _, err = sender.InternalAddress() 954 if err != nil { 955 errs[i] = err 956 invalidTxMeter.Mark(1) 957 continue 958 } 959 } else if _, found := pool.GetSender(tx.Hash()); found { 960 // if the sender is cached in the tx or in the pool cache, we don't need to add it into the cache 961 } else { 962 from, err := types.Sender(pool.signer, tx) 963 if err != nil { 964 errs[i] = ErrInvalidSender 965 invalidTxMeter.Mark(1) 966 continue 967 } 968 _, err = from.InternalAddress() 969 if err != nil { 970 errs[i] = ErrInvalidSender 971 invalidTxMeter.Mark(1) 972 continue 973 } 974 } 975 976 // Accumulate all unknown transactions for deeper processing 977 news = append(news, tx) 978 } 979 if len(news) == 0 { 980 return errs 981 } 982 983 // Process all the new transaction and merge any errors into the original slice 984 pool.mu.Lock() 985 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 986 pool.mu.Unlock() 987 988 var nilSlot = 0 989 for _, err := range newErrs { 990 for errs[nilSlot] != nil { 991 nilSlot++ 992 } 993 errs[nilSlot] = err 994 nilSlot++ 995 } 996 // Reorg the pool internals if needed and return 997 done := pool.requestPromoteExecutables(dirtyAddrs) 998 if sync { 999 <-done 1000 } 1001 return errs 1002 } 1003 1004 // addTxsLocked attempts to queue a batch of transactions if they are valid. 1005 // The transaction pool lock must be held. 1006 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 1007 dirty := newAccountSet(pool.signer) 1008 errs := make([]error, len(txs)) 1009 for i, tx := range txs { 1010 replaced, err := pool.add(tx, local) 1011 errs[i] = err 1012 if err == nil && !replaced { 1013 dirty.addTx(tx) 1014 } 1015 } 1016 validTxMeter.Mark(int64(len(dirty.accounts))) 1017 return errs, dirty 1018 } 1019 1020 // Status returns the status (unknown/pending/queued) of a batch of transactions 1021 // identified by their hashes. 1022 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 1023 status := make([]TxStatus, len(hashes)) 1024 for i, hash := range hashes { 1025 tx := pool.Get(hash) 1026 if tx == nil { 1027 continue 1028 } 1029 from, _ := types.Sender(pool.signer, tx) // already validated 1030 internal, err := from.InternalAddress() 1031 if err != nil { 1032 continue 1033 } 1034 pool.mu.RLock() 1035 if txList := pool.pending[internal]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1036 status[i] = TxStatusPending 1037 } else if txList := pool.queue[internal]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1038 status[i] = TxStatusQueued 1039 } 1040 // implicit else: the tx may have been included into a block between 1041 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 1042 pool.mu.RUnlock() 1043 } 1044 return status 1045 } 1046 1047 // Get returns a transaction if it is contained in the pool and nil otherwise. 1048 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1049 return pool.all.Get(hash) 1050 } 1051 1052 // Has returns an indicator whether txpool has a transaction cached with the 1053 // given hash. 1054 func (pool *TxPool) Has(hash common.Hash) bool { 1055 return pool.all.Get(hash) != nil 1056 } 1057 1058 // removeTx removes a single transaction from the queue, moving all subsequent 1059 // transactions back to the future queue. 1060 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1061 // Fetch the transaction we wish to delete 1062 tx := pool.all.Get(hash) 1063 if tx == nil { 1064 return 1065 } 1066 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1067 internal, err := addr.InternalAddress() 1068 if err != nil { 1069 return 1070 } 1071 // Remove it from the list of known transactions 1072 pool.all.Remove(hash) 1073 if outofbound { 1074 pool.priced.Removed(1) 1075 } 1076 if pool.locals.contains(internal) { 1077 localGauge.Dec(1) 1078 } 1079 // Remove the transaction from the pending lists and reset the account nonce 1080 if pending := pool.pending[internal]; pending != nil { 1081 if removed, invalids := pending.Remove(tx); removed { 1082 // If no more pending transactions are left, remove the list 1083 if pending.Empty() { 1084 delete(pool.pending, internal) 1085 } 1086 // Postpone any invalidated transactions 1087 for _, tx := range invalids { 1088 // Internal shuffle shouldn't touch the lookup set. 1089 pool.enqueueTx(tx.Hash(), tx, false, false) 1090 } 1091 // Update the account nonce if needed 1092 pool.pendingNonces.setIfLower(internal, tx.Nonce()) 1093 // Reduce the pending counter 1094 pendingGauge.Dec(int64(1 + len(invalids))) 1095 return 1096 } 1097 } 1098 // Transaction is in the future queue 1099 if future := pool.queue[internal]; future != nil { 1100 if removed, _ := future.Remove(tx); removed { 1101 // Reduce the queued counter 1102 queuedGauge.Dec(1) 1103 } 1104 if future.Empty() { 1105 delete(pool.queue, internal) 1106 delete(pool.beats, internal) 1107 } 1108 } 1109 } 1110 1111 // requestReset requests a pool reset to the new head block. 1112 // The returned channel is closed when the reset has occurred. 1113 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1114 select { 1115 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1116 return <-pool.reorgDoneCh 1117 case <-pool.reorgShutdownCh: 1118 return pool.reorgShutdownCh 1119 } 1120 } 1121 1122 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1123 // The returned channel is closed when the promotion checks have occurred. 1124 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1125 select { 1126 case pool.reqPromoteCh <- set: 1127 return <-pool.reorgDoneCh 1128 case <-pool.reorgShutdownCh: 1129 return pool.reorgShutdownCh 1130 } 1131 } 1132 1133 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1134 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1135 select { 1136 case pool.queueTxEventCh <- tx: 1137 case <-pool.reorgShutdownCh: 1138 } 1139 } 1140 1141 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1142 // call those methods directly, but request them being run using requestReset and 1143 // requestPromoteExecutables instead. 1144 func (pool *TxPool) scheduleReorgLoop() { 1145 defer pool.wg.Done() 1146 1147 var ( 1148 curDone chan struct{} // non-nil while runReorg is active 1149 nextDone = make(chan struct{}) 1150 launchNextRun bool 1151 reset *txpoolResetRequest 1152 dirtyAccounts *accountSet 1153 queuedEvents = make(map[common.InternalAddress]*txSortedMap) 1154 reorgCancelCh = make(chan struct{}) 1155 ) 1156 for { 1157 // Launch next background reorg if needed 1158 if curDone == nil && launchNextRun { 1159 // kill any currently running runReorg and launch the next one 1160 close(reorgCancelCh) 1161 reorgCancelCh = make(chan struct{}) 1162 // Run the background reorg and announcements 1163 go pool.runReorg(nextDone, reorgCancelCh, reset, dirtyAccounts, queuedEvents) 1164 1165 // Prepare everything for the next round of reorg 1166 curDone, nextDone = nextDone, make(chan struct{}) 1167 launchNextRun = false 1168 1169 reset, dirtyAccounts = nil, nil 1170 queuedEvents = make(map[common.InternalAddress]*txSortedMap) 1171 } 1172 1173 select { 1174 case req := <-pool.reqResetCh: 1175 // Reset request: update head if request is already pending. 1176 if reset == nil { 1177 reset = req 1178 } else { 1179 reset.newHead = req.newHead 1180 } 1181 launchNextRun = true 1182 pool.reorgDoneCh <- nextDone 1183 1184 case req := <-pool.reqPromoteCh: 1185 // Promote request: update address set if request is already pending. 1186 if dirtyAccounts == nil { 1187 dirtyAccounts = req 1188 } else { 1189 dirtyAccounts.merge(req) 1190 } 1191 launchNextRun = true 1192 pool.reorgDoneCh <- nextDone 1193 1194 case tx := <-pool.queueTxEventCh: 1195 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1196 // request one later if they want the events sent. 1197 addr, _ := types.Sender(pool.signer, tx) 1198 internal, err := addr.InternalAddress() 1199 if err != nil { 1200 log.Debug("Failed to queue transaction", "err", err) 1201 continue 1202 } 1203 if _, ok := queuedEvents[internal]; !ok { 1204 queuedEvents[internal] = newTxSortedMap() 1205 } 1206 queuedEvents[internal].Put(tx) 1207 1208 case <-curDone: 1209 curDone = nil 1210 1211 case <-pool.reorgShutdownCh: 1212 // Wait for current run to finish. 1213 if curDone != nil { 1214 <-curDone 1215 } 1216 close(nextDone) 1217 return 1218 } 1219 } 1220 } 1221 1222 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1223 func (pool *TxPool) runReorg(done chan struct{}, cancel chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.InternalAddress]*txSortedMap) { 1224 defer close(done) 1225 1226 for { 1227 select { 1228 case <-cancel: 1229 return 1230 default: 1231 pool.reOrgCounter += 1 1232 var start time.Time 1233 if pool.reOrgCounter == c_reorgCounterThreshold { 1234 start = time.Now() 1235 } 1236 1237 var promoteAddrs []common.InternalAddress 1238 if dirtyAccounts != nil && reset == nil { 1239 // Only dirty accounts need to be promoted, unless we're resetting. 1240 // For resets, all addresses in the tx queue will be promoted and 1241 // the flatten operation can be avoided. 1242 promoteAddrs = dirtyAccounts.flatten() 1243 } 1244 pool.mu.Lock() 1245 if reset != nil { 1246 // Reset from the old head to the new, rescheduling any reorged transactions 1247 pool.reset(reset.oldHead, reset.newHead) 1248 1249 // Nonces were reset, discard any events that became stale 1250 for addr := range events { 1251 events[addr].Forward(pool.pendingNonces.get(addr)) 1252 if events[addr].Len() == 0 { 1253 delete(events, addr) 1254 } 1255 } 1256 // Reset needs promote for all addresses 1257 promoteAddrs = make([]common.InternalAddress, 0, len(pool.queue)) 1258 for addr := range pool.queue { 1259 promoteAddrs = append(promoteAddrs, addr) 1260 } 1261 } 1262 // Check for pending transactions for every account that sent new ones 1263 promoted := pool.promoteExecutables(promoteAddrs) 1264 1265 // If a new block appeared, validate the pool of pending transactions. This will 1266 // remove any transaction that has been included in the block or was invalidated 1267 // because of another transaction (e.g. higher gas price). 1268 if reset != nil { 1269 pool.demoteUnexecutables() 1270 if reset.newHead != nil { 1271 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1272 pool.priced.SetBaseFee(pendingBaseFee) 1273 } 1274 } 1275 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1276 pool.truncatePending() 1277 pool.truncateQueue() 1278 1279 // Update all accounts to the latest known pending nonce 1280 for addr, list := range pool.pending { 1281 highestPending := list.LastElement() 1282 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1283 } 1284 pool.mu.Unlock() 1285 1286 // Notify subsystems for newly added transactions 1287 for _, tx := range promoted { 1288 addr, _ := types.Sender(pool.signer, tx) 1289 internal, err := addr.InternalAddress() 1290 if err != nil { 1291 log.Debug("Failed to add transaction event", "err", err) 1292 continue 1293 } 1294 if _, ok := events[internal]; !ok { 1295 events[(internal)] = newTxSortedMap() 1296 } 1297 events[internal].Put(tx) 1298 } 1299 if len(events) > 0 { 1300 var txs []*types.Transaction 1301 for _, set := range events { 1302 txs = append(txs, set.Flatten()...) 1303 } 1304 pool.txFeed.Send(NewTxsEvent{txs}) 1305 } 1306 if pool.reOrgCounter == c_reorgCounterThreshold { 1307 log.Debug("Time taken to runReorg in txpool", "time", common.PrettyDuration(time.Since(start))) 1308 pool.reOrgCounter = 0 1309 } 1310 return 1311 } 1312 } 1313 } 1314 1315 // reset retrieves the current state of the blockchain and ensures the content 1316 // of the transaction pool is valid with regard to the chain state. 1317 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1318 var start time.Time 1319 if pool.reOrgCounter == c_reorgCounterThreshold { 1320 start = time.Now() 1321 } 1322 // If we're reorging an old state, reinject all dropped transactions 1323 var reinject types.Transactions 1324 1325 if oldHead != nil && oldHead.Hash() != newHead.ParentHash() { 1326 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1327 oldNum := oldHead.Number().Uint64() 1328 newNum := newHead.Number().Uint64() 1329 1330 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1331 log.Debug("Skipping deep transaction reorg", "depth", depth) 1332 } else { 1333 // Reorg seems shallow enough to pull in all transactions into memory 1334 var discarded, included types.Transactions 1335 var ( 1336 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number().Uint64()) 1337 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number().Uint64()) 1338 ) 1339 if rem == nil { 1340 // This can happen if a setHead is performed, where we simply discard the old 1341 // head from the chain. 1342 // If that is the case, we don't have the lost transactions any more, and 1343 // there's nothing to add 1344 if newNum >= oldNum { 1345 // If we reorged to a same or higher number, then it's not a case of setHead 1346 log.Warn("Transaction pool reset with missing oldhead", 1347 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1348 return 1349 } 1350 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1351 log.Debug("Skipping transaction reset caused by setHead", 1352 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1353 // We still need to update the current state s.th. the lost transactions can be readded by the user 1354 } else { 1355 if rem == nil || add == nil { 1356 log.Error("Unrooted chain seen by tx pool") 1357 return 1358 } 1359 for rem.NumberU64() > add.NumberU64() { 1360 discarded = append(discarded, rem.Transactions()...) 1361 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1362 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(), "hash", oldHead.Hash()) 1363 return 1364 } 1365 } 1366 for add.NumberU64() > rem.NumberU64() { 1367 included = append(included, add.Transactions()...) 1368 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1369 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(), "hash", newHead.Hash()) 1370 return 1371 } 1372 } 1373 for rem.Hash() != add.Hash() { 1374 discarded = append(discarded, rem.Transactions()...) 1375 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1376 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(), "hash", oldHead.Hash()) 1377 return 1378 } 1379 included = append(included, add.Transactions()...) 1380 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1381 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(), "hash", newHead.Hash()) 1382 return 1383 } 1384 } 1385 reinject = types.TxDifference(discarded, included) 1386 } 1387 } 1388 } 1389 // Initialize the internal state to the current head 1390 if newHead == nil { 1391 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1392 } 1393 statedb, err := pool.chain.StateAt(newHead.Root()) 1394 if err != nil { 1395 log.Error("Failed to reset txpool state", "err", err) 1396 return 1397 } 1398 pool.currentState = statedb 1399 pool.pendingNonces = newTxNoncer(statedb) 1400 pool.currentMaxGas = newHead.GasLimit() 1401 1402 // Inject any transactions discarded due to reorgs 1403 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1404 senderCacher.recover(pool.signer, reinject) 1405 pool.addTxsLocked(reinject, false) 1406 if pool.reOrgCounter == c_reorgCounterThreshold { 1407 log.Debug("Time taken to resetTxPool", "time", common.PrettyDuration(time.Since(start))) 1408 } 1409 } 1410 1411 // promoteExecutables moves transactions that have become processable from the 1412 // future queue to the set of pending transactions. During this process, all 1413 // invalidated transactions (low nonce, low balance) are deleted. 1414 func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*types.Transaction { 1415 var start time.Time 1416 if pool.reOrgCounter == c_reorgCounterThreshold { 1417 start = time.Now() 1418 } 1419 // Track the promoted transactions to broadcast them at once 1420 var promoted []*types.Transaction 1421 1422 // Iterate over all accounts and promote any executable transactions 1423 for _, addr := range accounts { 1424 list := pool.queue[addr] 1425 if list == nil { 1426 continue // Just in case someone calls with a non existing account 1427 } 1428 // Drop all transactions that are deemed too old (low nonce) 1429 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1430 for _, tx := range forwards { 1431 hash := tx.Hash() 1432 pool.all.Remove(hash) 1433 } 1434 log.Trace("Removed old queued transactions", "count", len(forwards)) 1435 // Drop all transactions that are too costly (low balance or out of gas) 1436 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1437 for _, tx := range drops { 1438 hash := tx.Hash() 1439 pool.all.Remove(hash) 1440 } 1441 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1442 queuedNofundsMeter.Mark(int64(len(drops))) 1443 1444 // Gather all executable transactions and promote them 1445 readies := list.Ready(pool.pendingNonces.get(addr)) 1446 for _, tx := range readies { 1447 hash := tx.Hash() 1448 if pool.promoteTx(addr, hash, tx) { 1449 promoted = append(promoted, tx) 1450 } 1451 } 1452 log.Trace("Promoted queued transactions", "count", len(promoted)) 1453 queuedGauge.Dec(int64(len(readies))) 1454 1455 // Drop all transactions over the allowed limit 1456 var caps types.Transactions 1457 if !pool.locals.contains(addr) { 1458 caps = list.Cap(int(pool.config.AccountQueue)) 1459 for _, tx := range caps { 1460 hash := tx.Hash() 1461 pool.all.Remove(hash) 1462 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1463 } 1464 queuedRateLimitMeter.Mark(int64(len(caps))) 1465 } 1466 // Mark all the items dropped as removed 1467 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1468 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1469 if pool.locals.contains(addr) { 1470 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1471 } 1472 // Delete the entire queue entry if it became empty. 1473 if list.Empty() { 1474 delete(pool.queue, addr) 1475 delete(pool.beats, addr) 1476 } 1477 } 1478 if pool.reOrgCounter == c_reorgCounterThreshold { 1479 log.Debug("Time taken to promoteExecutables", "time", common.PrettyDuration(time.Since(start))) 1480 } 1481 return promoted 1482 } 1483 1484 // truncatePending removes transactions from the pending queue if the pool is above the 1485 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1486 // equal number for all for accounts with many pending transactions. 1487 func (pool *TxPool) truncatePending() { 1488 var start time.Time 1489 if pool.reOrgCounter == c_reorgCounterThreshold { 1490 start = time.Now() 1491 } 1492 pending := uint64(0) 1493 for _, list := range pool.pending { 1494 pending += uint64(list.Len()) 1495 } 1496 if pending <= pool.config.GlobalSlots { 1497 return 1498 } 1499 1500 pendingBeforeCap := pending 1501 // Assemble a spam order to penalize large transactors first 1502 spammers := prque.New(nil) 1503 for addr, list := range pool.pending { 1504 // Only evict transactions from high rollers 1505 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1506 spammers.Push(addr, int64(list.Len())) 1507 } 1508 } 1509 // Gradually drop transactions from offenders 1510 offenders := []common.InternalAddress{} 1511 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1512 // Retrieve the next offender if not local address 1513 offender, _ := spammers.Pop() 1514 offenders = append(offenders, offender.(common.InternalAddress)) 1515 1516 // Equalize balances until all the same or below threshold 1517 if len(offenders) > 1 { 1518 // Calculate the equalization threshold for all current offenders 1519 threshold := pool.pending[offender.(common.InternalAddress)].Len() 1520 1521 // Iteratively reduce all offenders until below limit or threshold reached 1522 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1523 for i := 0; i < len(offenders)-1; i++ { 1524 list := pool.pending[offenders[i]] 1525 1526 caps := list.Cap(list.Len() - 1) 1527 for _, tx := range caps { 1528 // Drop the transaction from the global pools too 1529 hash := tx.Hash() 1530 pool.all.Remove(hash) 1531 1532 // Update the account nonce to the dropped transaction 1533 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1534 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1535 } 1536 pool.priced.Removed(len(caps)) 1537 pendingGauge.Dec(int64(len(caps))) 1538 if pool.locals.contains(offenders[i]) { 1539 localGauge.Dec(int64(len(caps))) 1540 } 1541 pending-- 1542 } 1543 } 1544 } 1545 } 1546 1547 // If still above threshold, reduce to limit or min allowance 1548 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1549 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1550 for _, addr := range offenders { 1551 list := pool.pending[addr] 1552 1553 caps := list.Cap(list.Len() - 1) 1554 for _, tx := range caps { 1555 // Drop the transaction from the global pools too 1556 hash := tx.Hash() 1557 pool.all.Remove(hash) 1558 1559 // Update the account nonce to the dropped transaction 1560 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1561 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1562 } 1563 pool.priced.Removed(len(caps)) 1564 pendingGauge.Dec(int64(len(caps))) 1565 if pool.locals.contains(addr) { 1566 localGauge.Dec(int64(len(caps))) 1567 } 1568 pending-- 1569 } 1570 } 1571 } 1572 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1573 if pool.reOrgCounter == c_reorgCounterThreshold { 1574 log.Debug("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start))) 1575 } 1576 } 1577 1578 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1579 func (pool *TxPool) truncateQueue() { 1580 var start time.Time 1581 if pool.reOrgCounter == c_reorgCounterThreshold { 1582 start = time.Now() 1583 } 1584 queued := uint64(0) 1585 for _, list := range pool.queue { 1586 queued += uint64(list.Len()) 1587 } 1588 if queued <= pool.config.GlobalQueue { 1589 return 1590 } 1591 1592 // Sort all accounts with queued transactions by heartbeat 1593 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1594 for addr := range pool.queue { 1595 if !pool.locals.contains(addr) { // don't drop locals 1596 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1597 } 1598 } 1599 sort.Sort(addresses) 1600 1601 // Drop transactions until the total is below the limit or only locals remain 1602 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1603 addr := addresses[len(addresses)-1] 1604 list := pool.queue[addr.address] 1605 1606 addresses = addresses[:len(addresses)-1] 1607 1608 // Drop all transactions if they are less than the overflow 1609 if size := uint64(list.Len()); size <= drop { 1610 for _, tx := range list.Flatten() { 1611 pool.removeTx(tx.Hash(), true) 1612 } 1613 drop -= size 1614 queuedRateLimitMeter.Mark(int64(size)) 1615 continue 1616 } 1617 // Otherwise drop only last few transactions 1618 txs := list.Flatten() 1619 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1620 pool.removeTx(txs[i].Hash(), true) 1621 drop-- 1622 queuedRateLimitMeter.Mark(1) 1623 } 1624 } 1625 if pool.reOrgCounter == c_reorgCounterThreshold { 1626 log.Debug("Time taken to truncateQueue", "time", common.PrettyDuration(time.Since(start))) 1627 } 1628 } 1629 1630 // demoteUnexecutables removes invalid and processed transactions from the pools 1631 // executable/pending queue and any subsequent transactions that become unexecutable 1632 // are moved back into the future queue. 1633 // 1634 // Note: transactions are not marked as removed in the priced list because re-heaping 1635 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1636 // to trigger a re-heap is this function 1637 func (pool *TxPool) demoteUnexecutables() { 1638 var start time.Time 1639 if pool.reOrgCounter == c_reorgCounterThreshold { 1640 start = time.Now() 1641 } 1642 // Iterate over all accounts and demote any non-executable transactions 1643 for addr, list := range pool.pending { 1644 nonce := pool.currentState.GetNonce(addr) 1645 1646 // Drop all transactions that are deemed too old (low nonce) 1647 olds := list.Forward(nonce) 1648 for _, tx := range olds { 1649 hash := tx.Hash() 1650 pool.all.Remove(hash) 1651 log.Debug("Removed old pending transaction", "hash", hash) 1652 } 1653 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1654 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1655 for _, tx := range drops { 1656 hash := tx.Hash() 1657 log.Trace("Removed unpayable pending transaction", "hash", hash) 1658 pool.all.Remove(hash) 1659 } 1660 pendingNofundsMeter.Mark(int64(len(drops))) 1661 1662 for _, tx := range invalids { 1663 hash := tx.Hash() 1664 log.Trace("Demoting pending transaction", "hash", hash) 1665 1666 // Internal shuffle shouldn't touch the lookup set. 1667 pool.enqueueTx(hash, tx, false, false) 1668 } 1669 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1670 if pool.locals.contains(addr) { 1671 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1672 } 1673 // If there's a gap in front, alert (should never happen) and postpone all transactions 1674 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1675 gapped := list.Cap(0) 1676 log.Error("Demoting invalidated transactions", "count", len(gapped)) 1677 for _, tx := range gapped { 1678 hash := tx.Hash() 1679 1680 // Internal shuffle shouldn't touch the lookup set. 1681 pool.enqueueTx(hash, tx, false, false) 1682 } 1683 pendingGauge.Dec(int64(len(gapped))) 1684 } 1685 // Delete the entire pending entry if it became empty. 1686 if list.Empty() { 1687 delete(pool.pending, addr) 1688 } 1689 } 1690 if pool.reOrgCounter == c_reorgCounterThreshold { 1691 log.Debug("Time taken to demoteExecutables", "time", common.PrettyDuration(time.Since(start))) 1692 } 1693 } 1694 1695 // GetSender returns the sender of a stored transaction. 1696 func (pool *TxPool) GetSender(hash common.Hash) (common.InternalAddress, bool) { 1697 pool.SendersMutex.RLock() 1698 defer pool.SendersMutex.RUnlock() 1699 return pool.senders.Get(hash) 1700 } 1701 1702 // GetSenderThreadUnsafe returns the sender of a stored transaction. 1703 // It is not thread safe and should only be used when the pool senders mutex is locked. 1704 func (pool *TxPool) GetSenderThreadUnsafe(hash common.Hash) (common.InternalAddress, bool) { 1705 return pool.senders.Get(hash) 1706 } 1707 1708 // SetSender caches the sender of a transaction. 1709 func (pool *TxPool) SetSender(hash common.Hash, address common.InternalAddress) { 1710 pool.SendersMutex.Lock() 1711 defer pool.SendersMutex.Unlock() 1712 pool.senders.Set(hash, address) 1713 } 1714 1715 // sendersGoroutine asynchronously adds a new sender to the cache 1716 func (pool *TxPool) sendersGoroutine() { 1717 for { 1718 select { 1719 case <-pool.reorgShutdownCh: 1720 return 1721 case tx := <-pool.sendersCh: 1722 // Add transaction to sender cache 1723 pool.SendersMutex.Lock() // We could RLock here but it's unlikely to just be a read 1724 if _, ok := pool.senders.Get(tx.hash); !ok { 1725 pool.senders.Set(tx.hash, tx.sender) 1726 if pool.senders.Len() > int(pool.config.MaxSenders) { 1727 pool.senders.Delete(pool.senders.Oldest().Key) // FIFO 1728 } 1729 } else { 1730 log.Debug("Tx already seen in sender cache (reorg?)", "tx", tx.hash.String(), "sender", tx.sender.String()) 1731 } 1732 pool.SendersMutex.Unlock() 1733 } 1734 } 1735 } 1736 1737 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1738 type addressByHeartbeat struct { 1739 address common.InternalAddress 1740 heartbeat time.Time 1741 } 1742 1743 type addressesByHeartbeat []addressByHeartbeat 1744 1745 func (a addressesByHeartbeat) Len() int { return len(a) } 1746 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1747 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1748 1749 // accountSet is simply a set of addresses to check for existence, and a signer 1750 // capable of deriving addresses from transactions. 1751 type accountSet struct { 1752 accounts map[common.InternalAddress]struct{} 1753 signer types.Signer 1754 cache *[]common.InternalAddress 1755 } 1756 1757 // newAccountSet creates a new address set with an associated signer for sender 1758 // derivations. 1759 func newAccountSet(signer types.Signer, addrs ...common.InternalAddress) *accountSet { 1760 as := &accountSet{ 1761 accounts: make(map[common.InternalAddress]struct{}), 1762 signer: signer, 1763 } 1764 for _, addr := range addrs { 1765 as.add(addr) 1766 } 1767 return as 1768 } 1769 1770 // contains checks if a given address is contained within the set. 1771 func (as *accountSet) contains(addr common.InternalAddress) bool { 1772 _, exist := as.accounts[addr] 1773 return exist 1774 } 1775 1776 func (as *accountSet) empty() bool { 1777 return len(as.accounts) == 0 1778 } 1779 1780 // containsTx checks if the sender of a given tx is within the set. If the sender 1781 // cannot be derived, this method returns false. 1782 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1783 if addr, err := types.Sender(as.signer, tx); err == nil { 1784 internal, err := addr.InternalAddress() 1785 if err != nil { 1786 return false 1787 } 1788 return as.contains(internal) 1789 } 1790 return false 1791 } 1792 1793 // add inserts a new address into the set to track. 1794 func (as *accountSet) add(addr common.InternalAddress) { 1795 as.accounts[addr] = struct{}{} 1796 as.cache = nil 1797 } 1798 1799 // addTx adds the sender of tx into the set. 1800 func (as *accountSet) addTx(tx *types.Transaction) { 1801 if addr, err := types.Sender(as.signer, tx); err == nil { 1802 internal, err := addr.InternalAddress() 1803 if err != nil { 1804 log.Debug("Failed to add tx to account set", "err", err) 1805 return 1806 } 1807 as.add(internal) 1808 } 1809 } 1810 1811 // flatten returns the list of addresses within this set, also caching it for later 1812 // reuse. The returned slice should not be changed! 1813 func (as *accountSet) flatten() []common.InternalAddress { 1814 if as.cache == nil { 1815 accounts := make([]common.InternalAddress, 0, len(as.accounts)) 1816 for account := range as.accounts { 1817 accounts = append(accounts, account) 1818 } 1819 as.cache = &accounts 1820 } 1821 return *as.cache 1822 } 1823 1824 // merge adds all addresses from the 'other' set into 'as'. 1825 func (as *accountSet) merge(other *accountSet) { 1826 for addr := range other.accounts { 1827 as.accounts[addr] = struct{}{} 1828 } 1829 as.cache = nil 1830 } 1831 1832 // txLookup is used internally by TxPool to track transactions while allowing 1833 // lookup without mutex contention. 1834 // 1835 // Note, although this type is properly protected against concurrent access, it 1836 // is **not** a type that should ever be mutated or even exposed outside of the 1837 // transaction pool, since its internal state is tightly coupled with the pools 1838 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1839 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1840 // TxPool.mu mutex. 1841 // 1842 // This lookup set combines the notion of "local transactions", which is useful 1843 // to build upper-level structure. 1844 type txLookup struct { 1845 slots int 1846 lock sync.RWMutex 1847 locals map[common.Hash]*types.Transaction 1848 remotes map[common.Hash]*types.Transaction 1849 } 1850 1851 // newTxLookup returns a new txLookup structure. 1852 func newTxLookup() *txLookup { 1853 return &txLookup{ 1854 locals: make(map[common.Hash]*types.Transaction), 1855 remotes: make(map[common.Hash]*types.Transaction), 1856 } 1857 } 1858 1859 // Range calls f on each key and value present in the map. The callback passed 1860 // should return the indicator whether the iteration needs to be continued. 1861 // Callers need to specify which set (or both) to be iterated. 1862 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1863 t.lock.RLock() 1864 defer t.lock.RUnlock() 1865 1866 if local { 1867 for key, value := range t.locals { 1868 if !f(key, value, true) { 1869 return 1870 } 1871 } 1872 } 1873 if remote { 1874 for key, value := range t.remotes { 1875 if !f(key, value, false) { 1876 return 1877 } 1878 } 1879 } 1880 } 1881 1882 // Get returns a transaction if it exists in the lookup, or nil if not found. 1883 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1884 t.lock.RLock() 1885 defer t.lock.RUnlock() 1886 1887 if tx := t.locals[hash]; tx != nil { 1888 return tx 1889 } 1890 return t.remotes[hash] 1891 } 1892 1893 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1894 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1895 t.lock.RLock() 1896 defer t.lock.RUnlock() 1897 1898 return t.locals[hash] 1899 } 1900 1901 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1902 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1903 t.lock.RLock() 1904 defer t.lock.RUnlock() 1905 1906 return t.remotes[hash] 1907 } 1908 1909 // Count returns the current number of transactions in the lookup. 1910 func (t *txLookup) Count() int { 1911 t.lock.RLock() 1912 defer t.lock.RUnlock() 1913 1914 return len(t.locals) + len(t.remotes) 1915 } 1916 1917 // LocalCount returns the current number of local transactions in the lookup. 1918 func (t *txLookup) LocalCount() int { 1919 t.lock.RLock() 1920 defer t.lock.RUnlock() 1921 1922 return len(t.locals) 1923 } 1924 1925 // RemoteCount returns the current number of remote transactions in the lookup. 1926 func (t *txLookup) RemoteCount() int { 1927 t.lock.RLock() 1928 defer t.lock.RUnlock() 1929 1930 return len(t.remotes) 1931 } 1932 1933 // Slots returns the current number of slots used in the lookup. 1934 func (t *txLookup) Slots() int { 1935 t.lock.RLock() 1936 defer t.lock.RUnlock() 1937 1938 return t.slots 1939 } 1940 1941 // Add adds a transaction to the lookup. 1942 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1943 t.lock.Lock() 1944 defer t.lock.Unlock() 1945 1946 t.slots += numSlots(tx) 1947 slotsGauge.Update(int64(t.slots)) 1948 1949 if local { 1950 t.locals[tx.Hash()] = tx 1951 } else { 1952 t.remotes[tx.Hash()] = tx 1953 } 1954 } 1955 1956 // Remove removes a transaction from the lookup. 1957 func (t *txLookup) Remove(hash common.Hash) { 1958 t.lock.Lock() 1959 defer t.lock.Unlock() 1960 1961 tx, ok := t.locals[hash] 1962 if !ok { 1963 tx, ok = t.remotes[hash] 1964 } 1965 if !ok { 1966 log.Error("No transaction found to be deleted", "hash", hash) 1967 return 1968 } 1969 t.slots -= numSlots(tx) 1970 slotsGauge.Update(int64(t.slots)) 1971 1972 delete(t.locals, hash) 1973 delete(t.remotes, hash) 1974 } 1975 1976 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1977 // set. The assumption is held the locals set is thread-safe to be used. 1978 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1979 t.lock.Lock() 1980 defer t.lock.Unlock() 1981 1982 var migrated int 1983 for hash, tx := range t.remotes { 1984 if locals.containsTx(tx) { 1985 t.locals[hash] = tx 1986 delete(t.remotes, hash) 1987 migrated += 1 1988 } 1989 } 1990 return migrated 1991 } 1992 1993 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1994 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1995 found := make(types.Transactions, 0, 128) 1996 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1997 if tx.GasTipCapIntCmp(threshold) < 0 { 1998 found = append(found, tx) 1999 } 2000 return true 2001 }, false, true) // Only iterate remotes 2002 return found 2003 } 2004 2005 // numSlots calculates the number of slots needed for a single transaction. 2006 func numSlots(tx *types.Transaction) int { 2007 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 2008 } 2009 2010 func ErrGasLimit(txGas uint64, limit uint64) error { 2011 return fmt.Errorf(errGasLimit.Error()+", tx: %d, current limit: %d", txGas, limit) 2012 }