github.com/ubiq/go-ubiq/v6@v6.0.0/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/ubiq/go-ubiq/v6/common" 29 "github.com/ubiq/go-ubiq/v6/common/prque" 30 "github.com/ubiq/go-ubiq/v6/consensus/misc" 31 "github.com/ubiq/go-ubiq/v6/core/state" 32 "github.com/ubiq/go-ubiq/v6/core/types" 33 "github.com/ubiq/go-ubiq/v6/event" 34 "github.com/ubiq/go-ubiq/v6/log" 35 "github.com/ubiq/go-ubiq/v6/metrics" 36 "github.com/ubiq/go-ubiq/v6/params" 37 ) 38 39 const ( 40 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 41 chainHeadChanSize = 10 42 43 // txSlotSize is used to calculate how many data slots a single transaction 44 // takes up based on its size. The slots are used as DoS protection, ensuring 45 // that validating a new transaction remains a constant operation (in reality 46 // O(maxslots), where max slots are 4 currently). 47 txSlotSize = 32 * 1024 48 49 // txMaxSize is the maximum size a single transaction can have. This field has 50 // non-trivial consequences: larger transactions are significantly harder and 51 // more expensive to propagate; larger transactions also take more resources 52 // to validate whether they fit into the pool or not. 53 txMaxSize = 4 * txSlotSize // 128KB 54 ) 55 56 var ( 57 // ErrAlreadyKnown is returned if the transactions is already contained 58 // within the pool. 59 ErrAlreadyKnown = errors.New("already known") 60 61 // ErrInvalidSender is returned if the transaction contains an invalid signature. 62 ErrInvalidSender = errors.New("invalid sender") 63 64 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 65 // configured for the transaction pool. 66 ErrUnderpriced = errors.New("transaction underpriced") 67 68 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 69 // another remote transaction. 70 ErrTxPoolOverflow = errors.New("txpool is full") 71 72 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 73 // with a different one without the required price bump. 74 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 75 76 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 77 // maximum allowance of the current block. 78 ErrGasLimit = errors.New("exceeds block gas limit") 79 80 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 81 // transaction with a negative value. 82 ErrNegativeValue = errors.New("negative value") 83 84 // ErrOversizedData is returned if the input data of a transaction is greater 85 // than some meaningful limit a user might use. This is not a consensus error 86 // making the transaction invalid, rather a DOS protection. 87 ErrOversizedData = errors.New("oversized data") 88 ) 89 90 var ( 91 evictionInterval = time.Minute // Time interval to check for evictable transactions 92 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 93 ) 94 95 var ( 96 // Metrics for the pending pool 97 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 98 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 99 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 100 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 101 102 // Metrics for the queued pool 103 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 104 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 105 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 106 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 107 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 108 109 // General tx metrics 110 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 111 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 112 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 113 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 114 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 115 // throttleTxMeter counts how many transactions are rejected due to too-many-changes between 116 // txpool reorgs. 117 throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) 118 // reorgDurationTimer measures how long time a txpool reorg takes. 119 reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) 120 // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected 121 // that this number is pretty low, since txpool reorgs happen very frequently. 122 dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) 123 124 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 125 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 126 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 127 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 128 129 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 130 ) 131 132 // TxStatus is the current status of a transaction as seen by the pool. 133 type TxStatus uint 134 135 const ( 136 TxStatusUnknown TxStatus = iota 137 TxStatusQueued 138 TxStatusPending 139 TxStatusIncluded 140 ) 141 142 // blockChain provides the state of blockchain and current gas limit to do 143 // some pre checks in tx pool and event subscribers. 144 type blockChain interface { 145 CurrentBlock() *types.Block 146 GetBlock(hash common.Hash, number uint64) *types.Block 147 StateAt(root common.Hash) (*state.StateDB, error) 148 149 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 150 } 151 152 // TxPoolConfig are the configuration parameters of the transaction pool. 153 type TxPoolConfig struct { 154 Locals []common.Address // Addresses that should be treated by default as local 155 NoLocals bool // Whether local transaction handling should be disabled 156 Journal string // Journal of local transactions to survive node restarts 157 Rejournal time.Duration // Time interval to regenerate the local transaction journal 158 159 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 160 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 161 162 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 163 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 164 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 165 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 166 167 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 168 } 169 170 // DefaultTxPoolConfig contains the default configurations for the transaction 171 // pool. 172 var DefaultTxPoolConfig = TxPoolConfig{ 173 Journal: "transactions.rlp", 174 Rejournal: time.Hour, 175 176 PriceLimit: 1, 177 PriceBump: 10, 178 179 AccountSlots: 16, 180 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 181 AccountQueue: 64, 182 GlobalQueue: 1024, 183 184 Lifetime: 3 * time.Hour, 185 } 186 187 // sanitize checks the provided user configurations and changes anything that's 188 // unreasonable or unworkable. 189 func (config *TxPoolConfig) sanitize() TxPoolConfig { 190 conf := *config 191 if conf.Rejournal < time.Second { 192 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 193 conf.Rejournal = time.Second 194 } 195 if conf.PriceLimit < 1 { 196 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 197 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 198 } 199 if conf.PriceBump < 1 { 200 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 201 conf.PriceBump = DefaultTxPoolConfig.PriceBump 202 } 203 if conf.AccountSlots < 1 { 204 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 205 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 206 } 207 if conf.GlobalSlots < 1 { 208 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 209 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 210 } 211 if conf.AccountQueue < 1 { 212 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 213 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 214 } 215 if conf.GlobalQueue < 1 { 216 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 217 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 218 } 219 if conf.Lifetime < 1 { 220 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 221 conf.Lifetime = DefaultTxPoolConfig.Lifetime 222 } 223 return conf 224 } 225 226 // TxPool contains all currently known transactions. Transactions 227 // enter the pool when they are received from the network or submitted 228 // locally. They exit the pool when they are included in the blockchain. 229 // 230 // The pool separates processable transactions (which can be applied to the 231 // current state) and future transactions. Transactions move between those 232 // two states over time as they are received and processed. 233 type TxPool struct { 234 config TxPoolConfig 235 chainconfig *params.ChainConfig 236 chain blockChain 237 gasPrice *big.Int 238 txFeed event.Feed 239 scope event.SubscriptionScope 240 signer types.Signer 241 mu sync.RWMutex 242 243 istanbul bool // Fork indicator whether we are in the istanbul stage. 244 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 245 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 246 247 currentState *state.StateDB // Current state in the blockchain head 248 pendingNonces *txNoncer // Pending state tracking virtual nonces 249 currentMaxGas uint64 // Current gas limit for transaction caps 250 251 locals *accountSet // Set of local transaction to exempt from eviction rules 252 journal *txJournal // Journal of local transaction to back up to disk 253 254 pending map[common.Address]*txList // All currently processable transactions 255 queue map[common.Address]*txList // Queued but non-processable transactions 256 beats map[common.Address]time.Time // Last heartbeat from each known account 257 all *txLookup // All transactions to allow lookups 258 priced *txPricedList // All transactions sorted by price 259 260 chainHeadCh chan ChainHeadEvent 261 chainHeadSub event.Subscription 262 reqResetCh chan *txpoolResetRequest 263 reqPromoteCh chan *accountSet 264 queueTxEventCh chan *types.Transaction 265 reorgDoneCh chan chan struct{} 266 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 267 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 268 initDoneCh chan struct{} // is closed once the pool is initialized (for tests) 269 270 changesSinceReorg int // A counter for how many drops we've performed in-between reorg. 271 } 272 273 type txpoolResetRequest struct { 274 oldHead, newHead *types.Header 275 } 276 277 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 278 // transactions from the network. 279 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 280 // Sanitize the input to ensure no vulnerable gas prices are set 281 config = (&config).sanitize() 282 283 // Create the transaction pool with its initial settings 284 pool := &TxPool{ 285 config: config, 286 chainconfig: chainconfig, 287 chain: chain, 288 signer: types.LatestSigner(chainconfig), 289 pending: make(map[common.Address]*txList), 290 queue: make(map[common.Address]*txList), 291 beats: make(map[common.Address]time.Time), 292 all: newTxLookup(), 293 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 294 reqResetCh: make(chan *txpoolResetRequest), 295 reqPromoteCh: make(chan *accountSet), 296 queueTxEventCh: make(chan *types.Transaction), 297 reorgDoneCh: make(chan chan struct{}), 298 reorgShutdownCh: make(chan struct{}), 299 initDoneCh: make(chan struct{}), 300 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 301 } 302 pool.locals = newAccountSet(pool.signer) 303 for _, addr := range config.Locals { 304 log.Info("Setting new local account", "address", addr) 305 pool.locals.add(addr) 306 } 307 pool.priced = newTxPricedList(pool.all) 308 pool.reset(nil, chain.CurrentBlock().Header()) 309 310 // Start the reorg loop early so it can handle requests generated during journal loading. 311 pool.wg.Add(1) 312 go pool.scheduleReorgLoop() 313 314 // If local transactions and journaling is enabled, load from disk 315 if !config.NoLocals && config.Journal != "" { 316 pool.journal = newTxJournal(config.Journal) 317 318 if err := pool.journal.load(pool.AddLocals); err != nil { 319 log.Warn("Failed to load transaction journal", "err", err) 320 } 321 if err := pool.journal.rotate(pool.local()); err != nil { 322 log.Warn("Failed to rotate transaction journal", "err", err) 323 } 324 } 325 326 // Subscribe events from blockchain and start the main event loop. 327 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 328 pool.wg.Add(1) 329 go pool.loop() 330 331 return pool 332 } 333 334 // loop is the transaction pool's main event loop, waiting for and reacting to 335 // outside blockchain events as well as for various reporting and transaction 336 // eviction events. 337 func (pool *TxPool) loop() { 338 defer pool.wg.Done() 339 340 var ( 341 prevPending, prevQueued, prevStales int 342 // Start the stats reporting and transaction eviction tickers 343 report = time.NewTicker(statsReportInterval) 344 evict = time.NewTicker(evictionInterval) 345 journal = time.NewTicker(pool.config.Rejournal) 346 // Track the previous head headers for transaction reorgs 347 head = pool.chain.CurrentBlock() 348 ) 349 defer report.Stop() 350 defer evict.Stop() 351 defer journal.Stop() 352 353 // Notify tests that the init phase is done 354 close(pool.initDoneCh) 355 for { 356 select { 357 // Handle ChainHeadEvent 358 case ev := <-pool.chainHeadCh: 359 if ev.Block != nil { 360 pool.requestReset(head.Header(), ev.Block.Header()) 361 head = ev.Block 362 } 363 364 // System shutdown. 365 case <-pool.chainHeadSub.Err(): 366 close(pool.reorgShutdownCh) 367 return 368 369 // Handle stats reporting ticks 370 case <-report.C: 371 pool.mu.RLock() 372 pending, queued := pool.stats() 373 pool.mu.RUnlock() 374 stales := int(atomic.LoadInt64(&pool.priced.stales)) 375 376 if pending != prevPending || queued != prevQueued || stales != prevStales { 377 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 378 prevPending, prevQueued, prevStales = pending, queued, stales 379 } 380 381 // Handle inactive account transaction eviction 382 case <-evict.C: 383 pool.mu.Lock() 384 for addr := range pool.queue { 385 // Skip local transactions from the eviction mechanism 386 if pool.locals.contains(addr) { 387 continue 388 } 389 // Any non-locals old enough should be removed 390 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 391 list := pool.queue[addr].Flatten() 392 for _, tx := range list { 393 pool.removeTx(tx.Hash(), true) 394 } 395 queuedEvictionMeter.Mark(int64(len(list))) 396 } 397 } 398 pool.mu.Unlock() 399 400 // Handle local transaction journal rotation 401 case <-journal.C: 402 if pool.journal != nil { 403 pool.mu.Lock() 404 if err := pool.journal.rotate(pool.local()); err != nil { 405 log.Warn("Failed to rotate local tx journal", "err", err) 406 } 407 pool.mu.Unlock() 408 } 409 } 410 } 411 } 412 413 // Stop terminates the transaction pool. 414 func (pool *TxPool) Stop() { 415 // Unsubscribe all subscriptions registered from txpool 416 pool.scope.Close() 417 418 // Unsubscribe subscriptions registered from blockchain 419 pool.chainHeadSub.Unsubscribe() 420 pool.wg.Wait() 421 422 if pool.journal != nil { 423 pool.journal.close() 424 } 425 log.Info("Transaction pool stopped") 426 } 427 428 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 429 // starts sending event to the given channel. 430 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 431 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 432 } 433 434 // GasPrice returns the current gas price enforced by the transaction pool. 435 func (pool *TxPool) GasPrice() *big.Int { 436 pool.mu.RLock() 437 defer pool.mu.RUnlock() 438 439 return new(big.Int).Set(pool.gasPrice) 440 } 441 442 // SetGasPrice updates the minimum price required by the transaction pool for a 443 // new transaction, and drops all transactions below this threshold. 444 func (pool *TxPool) SetGasPrice(price *big.Int) { 445 pool.mu.Lock() 446 defer pool.mu.Unlock() 447 448 old := pool.gasPrice 449 pool.gasPrice = price 450 // if the min miner fee increased, remove transactions below the new threshold 451 if price.Cmp(old) > 0 { 452 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 453 drop := pool.all.RemotesBelowTip(price) 454 for _, tx := range drop { 455 pool.removeTx(tx.Hash(), false) 456 } 457 pool.priced.Removed(len(drop)) 458 } 459 460 log.Info("Transaction pool price threshold updated", "price", price) 461 } 462 463 // Nonce returns the next nonce of an account, with all transactions executable 464 // by the pool already applied on top. 465 func (pool *TxPool) Nonce(addr common.Address) uint64 { 466 pool.mu.RLock() 467 defer pool.mu.RUnlock() 468 469 return pool.pendingNonces.get(addr) 470 } 471 472 // Stats retrieves the current pool stats, namely the number of pending and the 473 // number of queued (non-executable) transactions. 474 func (pool *TxPool) Stats() (int, int) { 475 pool.mu.RLock() 476 defer pool.mu.RUnlock() 477 478 return pool.stats() 479 } 480 481 // stats retrieves the current pool stats, namely the number of pending and the 482 // number of queued (non-executable) transactions. 483 func (pool *TxPool) stats() (int, int) { 484 pending := 0 485 for _, list := range pool.pending { 486 pending += list.Len() 487 } 488 queued := 0 489 for _, list := range pool.queue { 490 queued += list.Len() 491 } 492 return pending, queued 493 } 494 495 // Content retrieves the data content of the transaction pool, returning all the 496 // pending as well as queued transactions, grouped by account and sorted by nonce. 497 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 498 pool.mu.Lock() 499 defer pool.mu.Unlock() 500 501 pending := make(map[common.Address]types.Transactions) 502 for addr, list := range pool.pending { 503 pending[addr] = list.Flatten() 504 } 505 queued := make(map[common.Address]types.Transactions) 506 for addr, list := range pool.queue { 507 queued[addr] = list.Flatten() 508 } 509 return pending, queued 510 } 511 512 // ContentFrom retrieves the data content of the transaction pool, returning the 513 // pending as well as queued transactions of this address, grouped by nonce. 514 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 515 pool.mu.RLock() 516 defer pool.mu.RUnlock() 517 518 var pending types.Transactions 519 if list, ok := pool.pending[addr]; ok { 520 pending = list.Flatten() 521 } 522 var queued types.Transactions 523 if list, ok := pool.queue[addr]; ok { 524 queued = list.Flatten() 525 } 526 return pending, queued 527 } 528 529 // Pending retrieves all currently processable transactions, grouped by origin 530 // account and sorted by nonce. The returned transaction set is a copy and can be 531 // freely modified by calling code. 532 // 533 // The enforceTips parameter can be used to do an extra filtering on the pending 534 // transactions and only return those whose **effective** tip is large enough in 535 // the next pending execution environment. 536 func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { 537 pool.mu.Lock() 538 defer pool.mu.Unlock() 539 540 pending := make(map[common.Address]types.Transactions) 541 for addr, list := range pool.pending { 542 txs := list.Flatten() 543 544 // If the miner requests tip enforcement, cap the lists now 545 if enforceTips && !pool.locals.contains(addr) { 546 for i, tx := range txs { 547 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 548 txs = txs[:i] 549 break 550 } 551 } 552 } 553 if len(txs) > 0 { 554 pending[addr] = txs 555 } 556 } 557 return pending 558 } 559 560 // Locals retrieves the accounts currently considered local by the pool. 561 func (pool *TxPool) Locals() []common.Address { 562 pool.mu.Lock() 563 defer pool.mu.Unlock() 564 565 return pool.locals.flatten() 566 } 567 568 // local retrieves all currently known local transactions, grouped by origin 569 // account and sorted by nonce. The returned transaction set is a copy and can be 570 // freely modified by calling code. 571 func (pool *TxPool) local() map[common.Address]types.Transactions { 572 txs := make(map[common.Address]types.Transactions) 573 for addr := range pool.locals.accounts { 574 if pending := pool.pending[addr]; pending != nil { 575 txs[addr] = append(txs[addr], pending.Flatten()...) 576 } 577 if queued := pool.queue[addr]; queued != nil { 578 txs[addr] = append(txs[addr], queued.Flatten()...) 579 } 580 } 581 return txs 582 } 583 584 // validateTx checks whether a transaction is valid according to the consensus 585 // rules and adheres to some heuristic limits of the local node (price and size). 586 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 587 // Accept only legacy transactions until EIP-2718/2930 activates. 588 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 589 return ErrTxTypeNotSupported 590 } 591 // Reject dynamic fee transactions until EIP-1559 activates. 592 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 593 return ErrTxTypeNotSupported 594 } 595 // Reject transactions over defined size to prevent DOS attacks 596 if uint64(tx.Size()) > txMaxSize { 597 return ErrOversizedData 598 } 599 // Transactions can't be negative. This may never happen using RLP decoded 600 // transactions but may occur if you create a transaction using the RPC. 601 if tx.Value().Sign() < 0 { 602 return ErrNegativeValue 603 } 604 // Ensure the transaction doesn't exceed the current block limit gas. 605 if pool.currentMaxGas < tx.Gas() { 606 return ErrGasLimit 607 } 608 // Sanity check for extremely large numbers 609 if tx.GasFeeCap().BitLen() > 256 { 610 return ErrFeeCapVeryHigh 611 } 612 if tx.GasTipCap().BitLen() > 256 { 613 return ErrTipVeryHigh 614 } 615 // Ensure gasFeeCap is greater than or equal to gasTipCap. 616 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 617 return ErrTipAboveFeeCap 618 } 619 // Make sure the transaction is signed properly. 620 from, err := types.Sender(pool.signer, tx) 621 if err != nil { 622 return ErrInvalidSender 623 } 624 // Drop non-local transactions under our own minimal accepted gas price or tip. 625 pendingBaseFee := pool.priced.urgent.baseFee 626 if !local && tx.EffectiveGasTipIntCmp(pool.gasPrice, pendingBaseFee) < 0 { 627 return ErrUnderpriced 628 } 629 // Ensure the transaction adheres to nonce ordering 630 if pool.currentState.GetNonce(from) > tx.Nonce() { 631 return ErrNonceTooLow 632 } 633 // Transactor should have enough funds to cover the costs 634 // cost == V + GP * GL 635 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 636 return ErrInsufficientFunds 637 } 638 // Ensure the transaction has more gas than the basic tx fee. 639 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 640 if err != nil { 641 return err 642 } 643 if tx.Gas() < intrGas { 644 return ErrIntrinsicGas 645 } 646 return nil 647 } 648 649 // add validates a transaction and inserts it into the non-executable queue for later 650 // pending promotion and execution. If the transaction is a replacement for an already 651 // pending or queued one, it overwrites the previous transaction if its price is higher. 652 // 653 // If a newly added transaction is marked as local, its sending account will be 654 // be added to the allowlist, preventing any associated transaction from being dropped 655 // out of the pool due to pricing constraints. 656 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 657 // If the transaction is already known, discard it 658 hash := tx.Hash() 659 if pool.all.Get(hash) != nil { 660 log.Trace("Discarding already known transaction", "hash", hash) 661 knownTxMeter.Mark(1) 662 return false, ErrAlreadyKnown 663 } 664 // Make the local flag. If it's from local source or it's from the network but 665 // the sender is marked as local previously, treat it as the local transaction. 666 isLocal := local || pool.locals.containsTx(tx) 667 668 // If the transaction fails basic validation, discard it 669 if err := pool.validateTx(tx, isLocal); err != nil { 670 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 671 invalidTxMeter.Mark(1) 672 return false, err 673 } 674 // If the transaction pool is full, discard underpriced transactions 675 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 676 // If the new transaction is underpriced, don't accept it 677 if !isLocal && pool.priced.Underpriced(tx) { 678 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 679 underpricedTxMeter.Mark(1) 680 return false, ErrUnderpriced 681 } 682 // We're about to replace a transaction. The reorg does a more thorough 683 // analysis of what to remove and how, but it runs async. We don't want to 684 // do too many replacements between reorg-runs, so we cap the number of 685 // replacements to 25% of the slots 686 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 687 throttleTxMeter.Mark(1) 688 return false, ErrTxPoolOverflow 689 } 690 691 // New transaction is better than our worse ones, make room for it. 692 // If it's a local transaction, forcibly discard all available transactions. 693 // Otherwise if we can't make enough room for new one, abort the operation. 694 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 695 696 // Special case, we still can't make the room for the new remote one. 697 if !isLocal && !success { 698 log.Trace("Discarding overflown transaction", "hash", hash) 699 overflowedTxMeter.Mark(1) 700 return false, ErrTxPoolOverflow 701 } 702 // Bump the counter of rejections-since-reorg 703 pool.changesSinceReorg += len(drop) 704 // Kick out the underpriced remote transactions. 705 for _, tx := range drop { 706 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 707 underpricedTxMeter.Mark(1) 708 pool.removeTx(tx.Hash(), false) 709 } 710 } 711 // Try to replace an existing transaction in the pending pool 712 from, _ := types.Sender(pool.signer, tx) // already validated 713 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 714 // Nonce already pending, check if required price bump is met 715 inserted, old := list.Add(tx, pool.config.PriceBump) 716 if !inserted { 717 pendingDiscardMeter.Mark(1) 718 return false, ErrReplaceUnderpriced 719 } 720 // New transaction is better, replace old one 721 if old != nil { 722 pool.all.Remove(old.Hash()) 723 pool.priced.Removed(1) 724 pendingReplaceMeter.Mark(1) 725 } 726 pool.all.Add(tx, isLocal) 727 pool.priced.Put(tx, isLocal) 728 pool.journalTx(from, tx) 729 pool.queueTxEvent(tx) 730 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 731 732 // Successful promotion, bump the heartbeat 733 pool.beats[from] = time.Now() 734 return old != nil, nil 735 } 736 // New transaction isn't replacing a pending one, push into queue 737 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 738 if err != nil { 739 return false, err 740 } 741 // Mark local addresses and journal local transactions 742 if local && !pool.locals.contains(from) { 743 log.Info("Setting new local account", "address", from) 744 pool.locals.add(from) 745 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 746 } 747 if isLocal { 748 localGauge.Inc(1) 749 } 750 pool.journalTx(from, tx) 751 752 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 753 return replaced, nil 754 } 755 756 // enqueueTx inserts a new transaction into the non-executable transaction queue. 757 // 758 // Note, this method assumes the pool lock is held! 759 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 760 // Try to insert the transaction into the future queue 761 from, _ := types.Sender(pool.signer, tx) // already validated 762 if pool.queue[from] == nil { 763 pool.queue[from] = newTxList(false) 764 } 765 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 766 if !inserted { 767 // An older transaction was better, discard this 768 queuedDiscardMeter.Mark(1) 769 return false, ErrReplaceUnderpriced 770 } 771 // Discard any previous transaction and mark this 772 if old != nil { 773 pool.all.Remove(old.Hash()) 774 pool.priced.Removed(1) 775 queuedReplaceMeter.Mark(1) 776 } else { 777 // Nothing was replaced, bump the queued counter 778 queuedGauge.Inc(1) 779 } 780 // If the transaction isn't in lookup set but it's expected to be there, 781 // show the error log. 782 if pool.all.Get(hash) == nil && !addAll { 783 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 784 } 785 if addAll { 786 pool.all.Add(tx, local) 787 pool.priced.Put(tx, local) 788 } 789 // If we never record the heartbeat, do it right now. 790 if _, exist := pool.beats[from]; !exist { 791 pool.beats[from] = time.Now() 792 } 793 return old != nil, nil 794 } 795 796 // journalTx adds the specified transaction to the local disk journal if it is 797 // deemed to have been sent from a local account. 798 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 799 // Only journal if it's enabled and the transaction is local 800 if pool.journal == nil || !pool.locals.contains(from) { 801 return 802 } 803 if err := pool.journal.insert(tx); err != nil { 804 log.Warn("Failed to journal local transaction", "err", err) 805 } 806 } 807 808 // promoteTx adds a transaction to the pending (processable) list of transactions 809 // and returns whether it was inserted or an older was better. 810 // 811 // Note, this method assumes the pool lock is held! 812 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 813 // Try to insert the transaction into the pending queue 814 if pool.pending[addr] == nil { 815 pool.pending[addr] = newTxList(true) 816 } 817 list := pool.pending[addr] 818 819 inserted, old := list.Add(tx, pool.config.PriceBump) 820 if !inserted { 821 // An older transaction was better, discard this 822 pool.all.Remove(hash) 823 pool.priced.Removed(1) 824 pendingDiscardMeter.Mark(1) 825 return false 826 } 827 // Otherwise discard any previous transaction and mark this 828 if old != nil { 829 pool.all.Remove(old.Hash()) 830 pool.priced.Removed(1) 831 pendingReplaceMeter.Mark(1) 832 } else { 833 // Nothing was replaced, bump the pending counter 834 pendingGauge.Inc(1) 835 } 836 // Set the potentially new pending nonce and notify any subsystems of the new tx 837 pool.pendingNonces.set(addr, tx.Nonce()+1) 838 839 // Successful promotion, bump the heartbeat 840 pool.beats[addr] = time.Now() 841 return true 842 } 843 844 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 845 // senders as a local ones, ensuring they go around the local pricing constraints. 846 // 847 // This method is used to add transactions from the RPC API and performs synchronous pool 848 // reorganization and event propagation. 849 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 850 return pool.addTxs(txs, !pool.config.NoLocals, true) 851 } 852 853 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 854 // a convenience wrapper aroundd AddLocals. 855 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 856 errs := pool.AddLocals([]*types.Transaction{tx}) 857 return errs[0] 858 } 859 860 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 861 // senders are not among the locally tracked ones, full pricing constraints will apply. 862 // 863 // This method is used to add transactions from the p2p network and does not wait for pool 864 // reorganization and internal event propagation. 865 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 866 return pool.addTxs(txs, false, false) 867 } 868 869 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 870 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 871 return pool.addTxs(txs, false, true) 872 } 873 874 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 875 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 876 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 877 return errs[0] 878 } 879 880 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 881 // wrapper around AddRemotes. 882 // 883 // Deprecated: use AddRemotes 884 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 885 errs := pool.AddRemotes([]*types.Transaction{tx}) 886 return errs[0] 887 } 888 889 // addTxs attempts to queue a batch of transactions if they are valid. 890 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 891 // Filter out known ones without obtaining the pool lock or recovering signatures 892 var ( 893 errs = make([]error, len(txs)) 894 news = make([]*types.Transaction, 0, len(txs)) 895 ) 896 for i, tx := range txs { 897 // If the transaction is known, pre-set the error slot 898 if pool.all.Get(tx.Hash()) != nil { 899 errs[i] = ErrAlreadyKnown 900 knownTxMeter.Mark(1) 901 continue 902 } 903 // Exclude transactions with invalid signatures as soon as 904 // possible and cache senders in transactions before 905 // obtaining lock 906 _, err := types.Sender(pool.signer, tx) 907 if err != nil { 908 errs[i] = ErrInvalidSender 909 invalidTxMeter.Mark(1) 910 continue 911 } 912 // Accumulate all unknown transactions for deeper processing 913 news = append(news, tx) 914 } 915 if len(news) == 0 { 916 return errs 917 } 918 919 // Process all the new transaction and merge any errors into the original slice 920 pool.mu.Lock() 921 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 922 pool.mu.Unlock() 923 924 var nilSlot = 0 925 for _, err := range newErrs { 926 for errs[nilSlot] != nil { 927 nilSlot++ 928 } 929 errs[nilSlot] = err 930 nilSlot++ 931 } 932 // Reorg the pool internals if needed and return 933 done := pool.requestPromoteExecutables(dirtyAddrs) 934 if sync { 935 <-done 936 } 937 return errs 938 } 939 940 // addTxsLocked attempts to queue a batch of transactions if they are valid. 941 // The transaction pool lock must be held. 942 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 943 dirty := newAccountSet(pool.signer) 944 errs := make([]error, len(txs)) 945 for i, tx := range txs { 946 replaced, err := pool.add(tx, local) 947 errs[i] = err 948 if err == nil && !replaced { 949 dirty.addTx(tx) 950 } 951 } 952 validTxMeter.Mark(int64(len(dirty.accounts))) 953 return errs, dirty 954 } 955 956 // Status returns the status (unknown/pending/queued) of a batch of transactions 957 // identified by their hashes. 958 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 959 status := make([]TxStatus, len(hashes)) 960 for i, hash := range hashes { 961 tx := pool.Get(hash) 962 if tx == nil { 963 continue 964 } 965 from, _ := types.Sender(pool.signer, tx) // already validated 966 pool.mu.RLock() 967 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 968 status[i] = TxStatusPending 969 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 970 status[i] = TxStatusQueued 971 } 972 // implicit else: the tx may have been included into a block between 973 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 974 pool.mu.RUnlock() 975 } 976 return status 977 } 978 979 // Get returns a transaction if it is contained in the pool and nil otherwise. 980 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 981 return pool.all.Get(hash) 982 } 983 984 // Has returns an indicator whether txpool has a transaction cached with the 985 // given hash. 986 func (pool *TxPool) Has(hash common.Hash) bool { 987 return pool.all.Get(hash) != nil 988 } 989 990 // removeTx removes a single transaction from the queue, moving all subsequent 991 // transactions back to the future queue. 992 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 993 // Fetch the transaction we wish to delete 994 tx := pool.all.Get(hash) 995 if tx == nil { 996 return 997 } 998 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 999 1000 // Remove it from the list of known transactions 1001 pool.all.Remove(hash) 1002 if outofbound { 1003 pool.priced.Removed(1) 1004 } 1005 if pool.locals.contains(addr) { 1006 localGauge.Dec(1) 1007 } 1008 // Remove the transaction from the pending lists and reset the account nonce 1009 if pending := pool.pending[addr]; pending != nil { 1010 if removed, invalids := pending.Remove(tx); removed { 1011 // If no more pending transactions are left, remove the list 1012 if pending.Empty() { 1013 delete(pool.pending, addr) 1014 } 1015 // Postpone any invalidated transactions 1016 for _, tx := range invalids { 1017 // Internal shuffle shouldn't touch the lookup set. 1018 pool.enqueueTx(tx.Hash(), tx, false, false) 1019 } 1020 // Update the account nonce if needed 1021 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1022 // Reduce the pending counter 1023 pendingGauge.Dec(int64(1 + len(invalids))) 1024 return 1025 } 1026 } 1027 // Transaction is in the future queue 1028 if future := pool.queue[addr]; future != nil { 1029 if removed, _ := future.Remove(tx); removed { 1030 // Reduce the queued counter 1031 queuedGauge.Dec(1) 1032 } 1033 if future.Empty() { 1034 delete(pool.queue, addr) 1035 delete(pool.beats, addr) 1036 } 1037 } 1038 } 1039 1040 // requestReset requests a pool reset to the new head block. 1041 // The returned channel is closed when the reset has occurred. 1042 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1043 select { 1044 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1045 return <-pool.reorgDoneCh 1046 case <-pool.reorgShutdownCh: 1047 return pool.reorgShutdownCh 1048 } 1049 } 1050 1051 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1052 // The returned channel is closed when the promotion checks have occurred. 1053 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1054 select { 1055 case pool.reqPromoteCh <- set: 1056 return <-pool.reorgDoneCh 1057 case <-pool.reorgShutdownCh: 1058 return pool.reorgShutdownCh 1059 } 1060 } 1061 1062 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1063 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1064 select { 1065 case pool.queueTxEventCh <- tx: 1066 case <-pool.reorgShutdownCh: 1067 } 1068 } 1069 1070 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1071 // call those methods directly, but request them being run using requestReset and 1072 // requestPromoteExecutables instead. 1073 func (pool *TxPool) scheduleReorgLoop() { 1074 defer pool.wg.Done() 1075 1076 var ( 1077 curDone chan struct{} // non-nil while runReorg is active 1078 nextDone = make(chan struct{}) 1079 launchNextRun bool 1080 reset *txpoolResetRequest 1081 dirtyAccounts *accountSet 1082 queuedEvents = make(map[common.Address]*txSortedMap) 1083 ) 1084 for { 1085 // Launch next background reorg if needed 1086 if curDone == nil && launchNextRun { 1087 // Run the background reorg and announcements 1088 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1089 1090 // Prepare everything for the next round of reorg 1091 curDone, nextDone = nextDone, make(chan struct{}) 1092 launchNextRun = false 1093 1094 reset, dirtyAccounts = nil, nil 1095 queuedEvents = make(map[common.Address]*txSortedMap) 1096 } 1097 1098 select { 1099 case req := <-pool.reqResetCh: 1100 // Reset request: update head if request is already pending. 1101 if reset == nil { 1102 reset = req 1103 } else { 1104 reset.newHead = req.newHead 1105 } 1106 launchNextRun = true 1107 pool.reorgDoneCh <- nextDone 1108 1109 case req := <-pool.reqPromoteCh: 1110 // Promote request: update address set if request is already pending. 1111 if dirtyAccounts == nil { 1112 dirtyAccounts = req 1113 } else { 1114 dirtyAccounts.merge(req) 1115 } 1116 launchNextRun = true 1117 pool.reorgDoneCh <- nextDone 1118 1119 case tx := <-pool.queueTxEventCh: 1120 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1121 // request one later if they want the events sent. 1122 addr, _ := types.Sender(pool.signer, tx) 1123 if _, ok := queuedEvents[addr]; !ok { 1124 queuedEvents[addr] = newTxSortedMap() 1125 } 1126 queuedEvents[addr].Put(tx) 1127 1128 case <-curDone: 1129 curDone = nil 1130 1131 case <-pool.reorgShutdownCh: 1132 // Wait for current run to finish. 1133 if curDone != nil { 1134 <-curDone 1135 } 1136 close(nextDone) 1137 return 1138 } 1139 } 1140 } 1141 1142 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1143 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1144 defer func(t0 time.Time) { 1145 reorgDurationTimer.Update(time.Since(t0)) 1146 }(time.Now()) 1147 defer close(done) 1148 1149 var promoteAddrs []common.Address 1150 if dirtyAccounts != nil && reset == nil { 1151 // Only dirty accounts need to be promoted, unless we're resetting. 1152 // For resets, all addresses in the tx queue will be promoted and 1153 // the flatten operation can be avoided. 1154 promoteAddrs = dirtyAccounts.flatten() 1155 } 1156 pool.mu.Lock() 1157 if reset != nil { 1158 // Reset from the old head to the new, rescheduling any reorged transactions 1159 pool.reset(reset.oldHead, reset.newHead) 1160 1161 // Nonces were reset, discard any events that became stale 1162 for addr := range events { 1163 events[addr].Forward(pool.pendingNonces.get(addr)) 1164 if events[addr].Len() == 0 { 1165 delete(events, addr) 1166 } 1167 } 1168 // Reset needs promote for all addresses 1169 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1170 for addr := range pool.queue { 1171 promoteAddrs = append(promoteAddrs, addr) 1172 } 1173 } 1174 // Check for pending transactions for every account that sent new ones 1175 promoted := pool.promoteExecutables(promoteAddrs) 1176 1177 // If a new block appeared, validate the pool of pending transactions. This will 1178 // remove any transaction that has been included in the block or was invalidated 1179 // because of another transaction (e.g. higher gas price). 1180 if reset != nil { 1181 pool.demoteUnexecutables() 1182 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1183 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1184 pool.priced.SetBaseFee(pendingBaseFee) 1185 } 1186 // Update all accounts to the latest known pending nonce 1187 nonces := make(map[common.Address]uint64, len(pool.pending)) 1188 for addr, list := range pool.pending { 1189 highestPending := list.LastElement() 1190 nonces[addr] = highestPending.Nonce() + 1 1191 } 1192 pool.pendingNonces.setAll(nonces) 1193 } 1194 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1195 pool.truncatePending() 1196 pool.truncateQueue() 1197 1198 dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) 1199 pool.changesSinceReorg = 0 // Reset change counter 1200 pool.mu.Unlock() 1201 1202 // Notify subsystems for newly added transactions 1203 for _, tx := range promoted { 1204 addr, _ := types.Sender(pool.signer, tx) 1205 if _, ok := events[addr]; !ok { 1206 events[addr] = newTxSortedMap() 1207 } 1208 events[addr].Put(tx) 1209 } 1210 if len(events) > 0 { 1211 var txs []*types.Transaction 1212 for _, set := range events { 1213 txs = append(txs, set.Flatten()...) 1214 } 1215 pool.txFeed.Send(NewTxsEvent{txs}) 1216 } 1217 } 1218 1219 // reset retrieves the current state of the blockchain and ensures the content 1220 // of the transaction pool is valid with regard to the chain state. 1221 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1222 // If we're reorging an old state, reinject all dropped transactions 1223 var reinject types.Transactions 1224 1225 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1226 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1227 oldNum := oldHead.Number.Uint64() 1228 newNum := newHead.Number.Uint64() 1229 1230 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1231 log.Debug("Skipping deep transaction reorg", "depth", depth) 1232 } else { 1233 // Reorg seems shallow enough to pull in all transactions into memory 1234 var discarded, included types.Transactions 1235 var ( 1236 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1237 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1238 ) 1239 if rem == nil { 1240 // This can happen if a setHead is performed, where we simply discard the old 1241 // head from the chain. 1242 // If that is the case, we don't have the lost transactions any more, and 1243 // there's nothing to add 1244 if newNum >= oldNum { 1245 // If we reorged to a same or higher number, then it's not a case of setHead 1246 log.Warn("Transaction pool reset with missing oldhead", 1247 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1248 return 1249 } 1250 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1251 log.Debug("Skipping transaction reset caused by setHead", 1252 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1253 // We still need to update the current state s.th. the lost transactions can be readded by the user 1254 } else { 1255 for rem.NumberU64() > add.NumberU64() { 1256 discarded = append(discarded, rem.Transactions()...) 1257 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1258 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1259 return 1260 } 1261 } 1262 for add.NumberU64() > rem.NumberU64() { 1263 included = append(included, add.Transactions()...) 1264 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1265 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1266 return 1267 } 1268 } 1269 for rem.Hash() != add.Hash() { 1270 discarded = append(discarded, rem.Transactions()...) 1271 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1272 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1273 return 1274 } 1275 included = append(included, add.Transactions()...) 1276 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1277 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1278 return 1279 } 1280 } 1281 reinject = types.TxDifference(discarded, included) 1282 } 1283 } 1284 } 1285 // Initialize the internal state to the current head 1286 if newHead == nil { 1287 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1288 } 1289 statedb, err := pool.chain.StateAt(newHead.Root) 1290 if err != nil { 1291 log.Error("Failed to reset txpool state", "err", err) 1292 return 1293 } 1294 pool.currentState = statedb 1295 pool.pendingNonces = newTxNoncer(statedb) 1296 pool.currentMaxGas = newHead.GasLimit 1297 1298 // Inject any transactions discarded due to reorgs 1299 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1300 senderCacher.recover(pool.signer, reinject) 1301 pool.addTxsLocked(reinject, false) 1302 1303 // Update all fork indicator by next pending block number. 1304 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1305 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1306 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1307 pool.eip1559 = pool.chainconfig.IsLondon(next) 1308 } 1309 1310 // promoteExecutables moves transactions that have become processable from the 1311 // future queue to the set of pending transactions. During this process, all 1312 // invalidated transactions (low nonce, low balance) are deleted. 1313 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1314 // Track the promoted transactions to broadcast them at once 1315 var promoted []*types.Transaction 1316 1317 // Iterate over all accounts and promote any executable transactions 1318 for _, addr := range accounts { 1319 list := pool.queue[addr] 1320 if list == nil { 1321 continue // Just in case someone calls with a non existing account 1322 } 1323 // Drop all transactions that are deemed too old (low nonce) 1324 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1325 for _, tx := range forwards { 1326 hash := tx.Hash() 1327 pool.all.Remove(hash) 1328 } 1329 log.Trace("Removed old queued transactions", "count", len(forwards)) 1330 // Drop all transactions that are too costly (low balance or out of gas) 1331 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1332 for _, tx := range drops { 1333 hash := tx.Hash() 1334 pool.all.Remove(hash) 1335 } 1336 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1337 queuedNofundsMeter.Mark(int64(len(drops))) 1338 1339 // Gather all executable transactions and promote them 1340 readies := list.Ready(pool.pendingNonces.get(addr)) 1341 for _, tx := range readies { 1342 hash := tx.Hash() 1343 if pool.promoteTx(addr, hash, tx) { 1344 promoted = append(promoted, tx) 1345 } 1346 } 1347 log.Trace("Promoted queued transactions", "count", len(promoted)) 1348 queuedGauge.Dec(int64(len(readies))) 1349 1350 // Drop all transactions over the allowed limit 1351 var caps types.Transactions 1352 if !pool.locals.contains(addr) { 1353 caps = list.Cap(int(pool.config.AccountQueue)) 1354 for _, tx := range caps { 1355 hash := tx.Hash() 1356 pool.all.Remove(hash) 1357 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1358 } 1359 queuedRateLimitMeter.Mark(int64(len(caps))) 1360 } 1361 // Mark all the items dropped as removed 1362 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1363 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1364 if pool.locals.contains(addr) { 1365 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1366 } 1367 // Delete the entire queue entry if it became empty. 1368 if list.Empty() { 1369 delete(pool.queue, addr) 1370 delete(pool.beats, addr) 1371 } 1372 } 1373 return promoted 1374 } 1375 1376 // truncatePending removes transactions from the pending queue if the pool is above the 1377 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1378 // equal number for all for accounts with many pending transactions. 1379 func (pool *TxPool) truncatePending() { 1380 pending := uint64(0) 1381 for _, list := range pool.pending { 1382 pending += uint64(list.Len()) 1383 } 1384 if pending <= pool.config.GlobalSlots { 1385 return 1386 } 1387 1388 pendingBeforeCap := pending 1389 // Assemble a spam order to penalize large transactors first 1390 spammers := prque.New(nil) 1391 for addr, list := range pool.pending { 1392 // Only evict transactions from high rollers 1393 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1394 spammers.Push(addr, int64(list.Len())) 1395 } 1396 } 1397 // Gradually drop transactions from offenders 1398 offenders := []common.Address{} 1399 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1400 // Retrieve the next offender if not local address 1401 offender, _ := spammers.Pop() 1402 offenders = append(offenders, offender.(common.Address)) 1403 1404 // Equalize balances until all the same or below threshold 1405 if len(offenders) > 1 { 1406 // Calculate the equalization threshold for all current offenders 1407 threshold := pool.pending[offender.(common.Address)].Len() 1408 1409 // Iteratively reduce all offenders until below limit or threshold reached 1410 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1411 for i := 0; i < len(offenders)-1; i++ { 1412 list := pool.pending[offenders[i]] 1413 1414 caps := list.Cap(list.Len() - 1) 1415 for _, tx := range caps { 1416 // Drop the transaction from the global pools too 1417 hash := tx.Hash() 1418 pool.all.Remove(hash) 1419 1420 // Update the account nonce to the dropped transaction 1421 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1422 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1423 } 1424 pool.priced.Removed(len(caps)) 1425 pendingGauge.Dec(int64(len(caps))) 1426 if pool.locals.contains(offenders[i]) { 1427 localGauge.Dec(int64(len(caps))) 1428 } 1429 pending-- 1430 } 1431 } 1432 } 1433 } 1434 1435 // If still above threshold, reduce to limit or min allowance 1436 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1437 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1438 for _, addr := range offenders { 1439 list := pool.pending[addr] 1440 1441 caps := list.Cap(list.Len() - 1) 1442 for _, tx := range caps { 1443 // Drop the transaction from the global pools too 1444 hash := tx.Hash() 1445 pool.all.Remove(hash) 1446 1447 // Update the account nonce to the dropped transaction 1448 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1449 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1450 } 1451 pool.priced.Removed(len(caps)) 1452 pendingGauge.Dec(int64(len(caps))) 1453 if pool.locals.contains(addr) { 1454 localGauge.Dec(int64(len(caps))) 1455 } 1456 pending-- 1457 } 1458 } 1459 } 1460 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1461 } 1462 1463 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1464 func (pool *TxPool) truncateQueue() { 1465 queued := uint64(0) 1466 for _, list := range pool.queue { 1467 queued += uint64(list.Len()) 1468 } 1469 if queued <= pool.config.GlobalQueue { 1470 return 1471 } 1472 1473 // Sort all accounts with queued transactions by heartbeat 1474 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1475 for addr := range pool.queue { 1476 if !pool.locals.contains(addr) { // don't drop locals 1477 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1478 } 1479 } 1480 sort.Sort(addresses) 1481 1482 // Drop transactions until the total is below the limit or only locals remain 1483 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1484 addr := addresses[len(addresses)-1] 1485 list := pool.queue[addr.address] 1486 1487 addresses = addresses[:len(addresses)-1] 1488 1489 // Drop all transactions if they are less than the overflow 1490 if size := uint64(list.Len()); size <= drop { 1491 for _, tx := range list.Flatten() { 1492 pool.removeTx(tx.Hash(), true) 1493 } 1494 drop -= size 1495 queuedRateLimitMeter.Mark(int64(size)) 1496 continue 1497 } 1498 // Otherwise drop only last few transactions 1499 txs := list.Flatten() 1500 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1501 pool.removeTx(txs[i].Hash(), true) 1502 drop-- 1503 queuedRateLimitMeter.Mark(1) 1504 } 1505 } 1506 } 1507 1508 // demoteUnexecutables removes invalid and processed transactions from the pools 1509 // executable/pending queue and any subsequent transactions that become unexecutable 1510 // are moved back into the future queue. 1511 // 1512 // Note: transactions are not marked as removed in the priced list because re-heaping 1513 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1514 // to trigger a re-heap is this function 1515 func (pool *TxPool) demoteUnexecutables() { 1516 // Iterate over all accounts and demote any non-executable transactions 1517 for addr, list := range pool.pending { 1518 nonce := pool.currentState.GetNonce(addr) 1519 1520 // Drop all transactions that are deemed too old (low nonce) 1521 olds := list.Forward(nonce) 1522 for _, tx := range olds { 1523 hash := tx.Hash() 1524 pool.all.Remove(hash) 1525 log.Trace("Removed old pending transaction", "hash", hash) 1526 } 1527 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1528 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1529 for _, tx := range drops { 1530 hash := tx.Hash() 1531 log.Trace("Removed unpayable pending transaction", "hash", hash) 1532 pool.all.Remove(hash) 1533 } 1534 pendingNofundsMeter.Mark(int64(len(drops))) 1535 1536 for _, tx := range invalids { 1537 hash := tx.Hash() 1538 log.Trace("Demoting pending transaction", "hash", hash) 1539 1540 // Internal shuffle shouldn't touch the lookup set. 1541 pool.enqueueTx(hash, tx, false, false) 1542 } 1543 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1544 if pool.locals.contains(addr) { 1545 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1546 } 1547 // If there's a gap in front, alert (should never happen) and postpone all transactions 1548 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1549 gapped := list.Cap(0) 1550 for _, tx := range gapped { 1551 hash := tx.Hash() 1552 log.Error("Demoting invalidated transaction", "hash", hash) 1553 1554 // Internal shuffle shouldn't touch the lookup set. 1555 pool.enqueueTx(hash, tx, false, false) 1556 } 1557 pendingGauge.Dec(int64(len(gapped))) 1558 // This might happen in a reorg, so log it to the metering 1559 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1560 } 1561 // Delete the entire pending entry if it became empty. 1562 if list.Empty() { 1563 delete(pool.pending, addr) 1564 } 1565 } 1566 } 1567 1568 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1569 type addressByHeartbeat struct { 1570 address common.Address 1571 heartbeat time.Time 1572 } 1573 1574 type addressesByHeartbeat []addressByHeartbeat 1575 1576 func (a addressesByHeartbeat) Len() int { return len(a) } 1577 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1578 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1579 1580 // accountSet is simply a set of addresses to check for existence, and a signer 1581 // capable of deriving addresses from transactions. 1582 type accountSet struct { 1583 accounts map[common.Address]struct{} 1584 signer types.Signer 1585 cache *[]common.Address 1586 } 1587 1588 // newAccountSet creates a new address set with an associated signer for sender 1589 // derivations. 1590 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1591 as := &accountSet{ 1592 accounts: make(map[common.Address]struct{}), 1593 signer: signer, 1594 } 1595 for _, addr := range addrs { 1596 as.add(addr) 1597 } 1598 return as 1599 } 1600 1601 // contains checks if a given address is contained within the set. 1602 func (as *accountSet) contains(addr common.Address) bool { 1603 _, exist := as.accounts[addr] 1604 return exist 1605 } 1606 1607 func (as *accountSet) empty() bool { 1608 return len(as.accounts) == 0 1609 } 1610 1611 // containsTx checks if the sender of a given tx is within the set. If the sender 1612 // cannot be derived, this method returns false. 1613 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1614 if addr, err := types.Sender(as.signer, tx); err == nil { 1615 return as.contains(addr) 1616 } 1617 return false 1618 } 1619 1620 // add inserts a new address into the set to track. 1621 func (as *accountSet) add(addr common.Address) { 1622 as.accounts[addr] = struct{}{} 1623 as.cache = nil 1624 } 1625 1626 // addTx adds the sender of tx into the set. 1627 func (as *accountSet) addTx(tx *types.Transaction) { 1628 if addr, err := types.Sender(as.signer, tx); err == nil { 1629 as.add(addr) 1630 } 1631 } 1632 1633 // flatten returns the list of addresses within this set, also caching it for later 1634 // reuse. The returned slice should not be changed! 1635 func (as *accountSet) flatten() []common.Address { 1636 if as.cache == nil { 1637 accounts := make([]common.Address, 0, len(as.accounts)) 1638 for account := range as.accounts { 1639 accounts = append(accounts, account) 1640 } 1641 as.cache = &accounts 1642 } 1643 return *as.cache 1644 } 1645 1646 // merge adds all addresses from the 'other' set into 'as'. 1647 func (as *accountSet) merge(other *accountSet) { 1648 for addr := range other.accounts { 1649 as.accounts[addr] = struct{}{} 1650 } 1651 as.cache = nil 1652 } 1653 1654 // txLookup is used internally by TxPool to track transactions while allowing 1655 // lookup without mutex contention. 1656 // 1657 // Note, although this type is properly protected against concurrent access, it 1658 // is **not** a type that should ever be mutated or even exposed outside of the 1659 // transaction pool, since its internal state is tightly coupled with the pools 1660 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1661 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1662 // TxPool.mu mutex. 1663 // 1664 // This lookup set combines the notion of "local transactions", which is useful 1665 // to build upper-level structure. 1666 type txLookup struct { 1667 slots int 1668 lock sync.RWMutex 1669 locals map[common.Hash]*types.Transaction 1670 remotes map[common.Hash]*types.Transaction 1671 } 1672 1673 // newTxLookup returns a new txLookup structure. 1674 func newTxLookup() *txLookup { 1675 return &txLookup{ 1676 locals: make(map[common.Hash]*types.Transaction), 1677 remotes: make(map[common.Hash]*types.Transaction), 1678 } 1679 } 1680 1681 // Range calls f on each key and value present in the map. The callback passed 1682 // should return the indicator whether the iteration needs to be continued. 1683 // Callers need to specify which set (or both) to be iterated. 1684 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1685 t.lock.RLock() 1686 defer t.lock.RUnlock() 1687 1688 if local { 1689 for key, value := range t.locals { 1690 if !f(key, value, true) { 1691 return 1692 } 1693 } 1694 } 1695 if remote { 1696 for key, value := range t.remotes { 1697 if !f(key, value, false) { 1698 return 1699 } 1700 } 1701 } 1702 } 1703 1704 // Get returns a transaction if it exists in the lookup, or nil if not found. 1705 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1706 t.lock.RLock() 1707 defer t.lock.RUnlock() 1708 1709 if tx := t.locals[hash]; tx != nil { 1710 return tx 1711 } 1712 return t.remotes[hash] 1713 } 1714 1715 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1716 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1717 t.lock.RLock() 1718 defer t.lock.RUnlock() 1719 1720 return t.locals[hash] 1721 } 1722 1723 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1724 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1725 t.lock.RLock() 1726 defer t.lock.RUnlock() 1727 1728 return t.remotes[hash] 1729 } 1730 1731 // Count returns the current number of transactions in the lookup. 1732 func (t *txLookup) Count() int { 1733 t.lock.RLock() 1734 defer t.lock.RUnlock() 1735 1736 return len(t.locals) + len(t.remotes) 1737 } 1738 1739 // LocalCount returns the current number of local transactions in the lookup. 1740 func (t *txLookup) LocalCount() int { 1741 t.lock.RLock() 1742 defer t.lock.RUnlock() 1743 1744 return len(t.locals) 1745 } 1746 1747 // RemoteCount returns the current number of remote transactions in the lookup. 1748 func (t *txLookup) RemoteCount() int { 1749 t.lock.RLock() 1750 defer t.lock.RUnlock() 1751 1752 return len(t.remotes) 1753 } 1754 1755 // Slots returns the current number of slots used in the lookup. 1756 func (t *txLookup) Slots() int { 1757 t.lock.RLock() 1758 defer t.lock.RUnlock() 1759 1760 return t.slots 1761 } 1762 1763 // Add adds a transaction to the lookup. 1764 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1765 t.lock.Lock() 1766 defer t.lock.Unlock() 1767 1768 t.slots += numSlots(tx) 1769 slotsGauge.Update(int64(t.slots)) 1770 1771 if local { 1772 t.locals[tx.Hash()] = tx 1773 } else { 1774 t.remotes[tx.Hash()] = tx 1775 } 1776 } 1777 1778 // Remove removes a transaction from the lookup. 1779 func (t *txLookup) Remove(hash common.Hash) { 1780 t.lock.Lock() 1781 defer t.lock.Unlock() 1782 1783 tx, ok := t.locals[hash] 1784 if !ok { 1785 tx, ok = t.remotes[hash] 1786 } 1787 if !ok { 1788 log.Error("No transaction found to be deleted", "hash", hash) 1789 return 1790 } 1791 t.slots -= numSlots(tx) 1792 slotsGauge.Update(int64(t.slots)) 1793 1794 delete(t.locals, hash) 1795 delete(t.remotes, hash) 1796 } 1797 1798 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1799 // set. The assumption is held the locals set is thread-safe to be used. 1800 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1801 t.lock.Lock() 1802 defer t.lock.Unlock() 1803 1804 var migrated int 1805 for hash, tx := range t.remotes { 1806 if locals.containsTx(tx) { 1807 t.locals[hash] = tx 1808 delete(t.remotes, hash) 1809 migrated += 1 1810 } 1811 } 1812 return migrated 1813 } 1814 1815 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1816 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1817 found := make(types.Transactions, 0, 128) 1818 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1819 if tx.GasTipCapIntCmp(threshold) < 0 { 1820 found = append(found, tx) 1821 } 1822 return true 1823 }, false, true) // Only iterate remotes 1824 return found 1825 } 1826 1827 // numSlots calculates the number of slots needed for a single transaction. 1828 func numSlots(tx *types.Transaction) int { 1829 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1830 }