github.com/ethereum-optimism/optimism/l2geth@v0.0.0-20230612200230-50b04ade19e3/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "fmt" 22 "math" 23 "math/big" 24 "sort" 25 "sync" 26 "time" 27 28 "github.com/ethereum-optimism/optimism/l2geth/common" 29 "github.com/ethereum-optimism/optimism/l2geth/common/prque" 30 "github.com/ethereum-optimism/optimism/l2geth/core/state" 31 "github.com/ethereum-optimism/optimism/l2geth/core/types" 32 "github.com/ethereum-optimism/optimism/l2geth/event" 33 "github.com/ethereum-optimism/optimism/l2geth/log" 34 "github.com/ethereum-optimism/optimism/l2geth/metrics" 35 "github.com/ethereum-optimism/optimism/l2geth/params" 36 "github.com/ethereum-optimism/optimism/l2geth/rollup/rcfg" 37 ) 38 39 const ( 40 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 41 chainHeadChanSize = 10 42 43 // txSlotSize is used to calculate how many data slots a single transaction 44 // takes up based on its size. The slots are used as DoS protection, ensuring 45 // that validating a new transaction remains a constant operation (in reality 46 // O(maxslots), where max slots are 4 currently). 47 txSlotSize = 32 * 1024 48 49 // txMaxSize is the maximum size a single transaction can have. This field has 50 // non-trivial consequences: larger transactions are significantly harder and 51 // more expensive to propagate; larger transactions also take more resources 52 // to validate whether they fit into the pool or not. 53 txMaxSize = 2 * txSlotSize // 64KB, don't bump without EIP-2464 support 54 ) 55 56 var ( 57 // ErrInvalidSender is returned if the transaction contains an invalid signature. 58 ErrInvalidSender = errors.New("invalid sender") 59 60 // ErrNonceTooLow is returned if the nonce of a transaction is lower than the 61 // one present in the local chain. 62 ErrNonceTooLow = errors.New("nonce too low") 63 64 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 65 // configured for the transaction pool. 66 ErrUnderpriced = errors.New("transaction underpriced") 67 68 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 69 // with a different one without the required price bump. 70 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 71 72 // ErrInsufficientFunds is returned if the total cost of executing a transaction 73 // is higher than the balance of the user's account. 74 ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") 75 76 // ErrIntrinsicGas is returned if the transaction is specified to use less gas 77 // than required to start the invocation. 78 ErrIntrinsicGas = errors.New("intrinsic gas too low") 79 80 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 81 // maximum allowance of the current block. 82 ErrGasLimit = errors.New("exceeds block gas limit") 83 84 // ErrNegativeValue is a sanity error to ensure noone is able to specify a 85 // transaction with a negative value. 86 ErrNegativeValue = errors.New("negative value") 87 88 // ErrOversizedData is returned if the input data of a transaction is greater 89 // than some meaningful limit a user might use. This is not a consensus error 90 // making the transaction invalid, rather a DOS protection. 91 ErrOversizedData = errors.New("oversized data") 92 ) 93 94 var ( 95 evictionInterval = time.Minute // Time interval to check for evictable transactions 96 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 97 ) 98 99 var ( 100 // Metrics for the pending pool 101 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 102 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 103 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 104 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 105 106 // Metrics for the queued pool 107 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 108 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 109 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 110 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 111 112 // General tx metrics 113 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 114 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 115 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 116 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 117 118 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 119 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 120 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 121 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 122 ) 123 124 // TxStatus is the current status of a transaction as seen by the pool. 125 type TxStatus uint 126 127 const ( 128 TxStatusUnknown TxStatus = iota 129 TxStatusQueued 130 TxStatusPending 131 TxStatusIncluded 132 ) 133 134 // blockChain provides the state of blockchain and current gas limit to do 135 // some pre checks in tx pool and event subscribers. 136 type blockChain interface { 137 CurrentBlock() *types.Block 138 GetBlock(hash common.Hash, number uint64) *types.Block 139 StateAt(root common.Hash) (*state.StateDB, error) 140 141 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 142 } 143 144 // TxPoolConfig are the configuration parameters of the transaction pool. 145 type TxPoolConfig struct { 146 Locals []common.Address // Addresses that should be treated by default as local 147 NoLocals bool // Whether local transaction handling should be disabled 148 Journal string // Journal of local transactions to survive node restarts 149 Rejournal time.Duration // Time interval to regenerate the local transaction journal 150 151 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 152 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 153 154 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 155 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 156 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 157 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 158 159 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 160 } 161 162 // DefaultTxPoolConfig contains the default configurations for the transaction 163 // pool. 164 var DefaultTxPoolConfig = TxPoolConfig{ 165 Journal: "transactions.rlp", 166 Rejournal: time.Hour, 167 168 PriceLimit: 1, 169 PriceBump: 10, 170 171 AccountSlots: 16, 172 GlobalSlots: 4096, 173 AccountQueue: 64, 174 GlobalQueue: 1024, 175 176 Lifetime: 3 * time.Hour, 177 } 178 179 // sanitize checks the provided user configurations and changes anything that's 180 // unreasonable or unworkable. 181 func (config *TxPoolConfig) sanitize() TxPoolConfig { 182 conf := *config 183 if conf.Rejournal < time.Second { 184 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 185 conf.Rejournal = time.Second 186 } 187 if conf.PriceLimit < 1 { 188 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 189 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 190 } 191 if conf.PriceBump < 1 { 192 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 193 conf.PriceBump = DefaultTxPoolConfig.PriceBump 194 } 195 if conf.AccountSlots < 1 { 196 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 197 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 198 } 199 if conf.GlobalSlots < 1 { 200 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 201 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 202 } 203 if conf.AccountQueue < 1 { 204 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 205 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 206 } 207 if conf.GlobalQueue < 1 { 208 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 209 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 210 } 211 if conf.Lifetime < 1 { 212 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 213 conf.Lifetime = DefaultTxPoolConfig.Lifetime 214 } 215 return conf 216 } 217 218 // TxPool contains all currently known transactions. Transactions 219 // enter the pool when they are received from the network or submitted 220 // locally. They exit the pool when they are included in the blockchain. 221 // 222 // The pool separates processable transactions (which can be applied to the 223 // current state) and future transactions. Transactions move between those 224 // two states over time as they are received and processed. 225 type TxPool struct { 226 config TxPoolConfig 227 chainconfig *params.ChainConfig 228 chain blockChain 229 gasPrice *big.Int 230 txFeed event.Feed 231 scope event.SubscriptionScope 232 signer types.Signer 233 mu sync.RWMutex 234 235 istanbul bool // Fork indicator whether we are in the istanbul stage. 236 237 currentState *state.StateDB // Current state in the blockchain head 238 pendingNonces *txNoncer // Pending state tracking virtual nonces 239 currentMaxGas uint64 // Current gas limit for transaction caps 240 241 locals *accountSet // Set of local transaction to exempt from eviction rules 242 journal *txJournal // Journal of local transaction to back up to disk 243 244 pending map[common.Address]*txList // All currently processable transactions 245 queue map[common.Address]*txList // Queued but non-processable transactions 246 beats map[common.Address]time.Time // Last heartbeat from each known account 247 all *txLookup // All transactions to allow lookups 248 priced *txPricedList // All transactions sorted by price 249 250 chainHeadCh chan ChainHeadEvent 251 chainHeadSub event.Subscription 252 reqResetCh chan *txpoolResetRequest 253 reqPromoteCh chan *accountSet 254 queueTxEventCh chan *types.Transaction 255 reorgDoneCh chan chan struct{} 256 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 257 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 258 } 259 260 type txpoolResetRequest struct { 261 oldHead, newHead *types.Header 262 } 263 264 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 265 // transactions from the network. 266 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 267 // Sanitize the input to ensure no vulnerable gas prices are set 268 config = (&config).sanitize() 269 270 // Create the transaction pool with its initial settings 271 pool := &TxPool{ 272 config: config, 273 chainconfig: chainconfig, 274 chain: chain, 275 signer: types.NewEIP155Signer(chainconfig.ChainID), 276 pending: make(map[common.Address]*txList), 277 queue: make(map[common.Address]*txList), 278 beats: make(map[common.Address]time.Time), 279 all: newTxLookup(), 280 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 281 reqResetCh: make(chan *txpoolResetRequest), 282 reqPromoteCh: make(chan *accountSet), 283 queueTxEventCh: make(chan *types.Transaction), 284 reorgDoneCh: make(chan chan struct{}), 285 reorgShutdownCh: make(chan struct{}), 286 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 287 } 288 pool.locals = newAccountSet(pool.signer) 289 for _, addr := range config.Locals { 290 log.Info("Setting new local account", "address", addr) 291 pool.locals.add(addr) 292 } 293 pool.priced = newTxPricedList(pool.all) 294 pool.reset(nil, chain.CurrentBlock().Header()) 295 296 // Start the reorg loop early so it can handle requests generated during journal loading. 297 pool.wg.Add(1) 298 go pool.scheduleReorgLoop() 299 300 // If local transactions and journaling is enabled, load from disk 301 if !config.NoLocals && config.Journal != "" { 302 pool.journal = newTxJournal(config.Journal) 303 304 if err := pool.journal.load(pool.AddLocals); err != nil { 305 log.Warn("Failed to load transaction journal", "err", err) 306 } 307 if err := pool.journal.rotate(pool.local()); err != nil { 308 log.Warn("Failed to rotate transaction journal", "err", err) 309 } 310 } 311 312 // Subscribe events from blockchain and start the main event loop. 313 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 314 pool.wg.Add(1) 315 go pool.loop() 316 317 return pool 318 } 319 320 // loop is the transaction pool's main event loop, waiting for and reacting to 321 // outside blockchain events as well as for various reporting and transaction 322 // eviction events. 323 func (pool *TxPool) loop() { 324 defer pool.wg.Done() 325 326 var ( 327 prevPending, prevQueued, prevStales int 328 // Start the stats reporting and transaction eviction tickers 329 report = time.NewTicker(statsReportInterval) 330 evict = time.NewTicker(evictionInterval) 331 journal = time.NewTicker(pool.config.Rejournal) 332 // Track the previous head headers for transaction reorgs 333 head = pool.chain.CurrentBlock() 334 ) 335 defer report.Stop() 336 defer evict.Stop() 337 defer journal.Stop() 338 339 for { 340 select { 341 // Handle ChainHeadEvent 342 case ev := <-pool.chainHeadCh: 343 if ev.Block != nil { 344 pool.requestReset(head.Header(), ev.Block.Header()) 345 head = ev.Block 346 } 347 348 // System shutdown. 349 case <-pool.chainHeadSub.Err(): 350 close(pool.reorgShutdownCh) 351 return 352 353 // Handle stats reporting ticks 354 case <-report.C: 355 pool.mu.RLock() 356 pending, queued := pool.stats() 357 stales := pool.priced.stales 358 pool.mu.RUnlock() 359 360 if pending != prevPending || queued != prevQueued || stales != prevStales { 361 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 362 prevPending, prevQueued, prevStales = pending, queued, stales 363 } 364 365 // Handle inactive account transaction eviction 366 case <-evict.C: 367 pool.mu.Lock() 368 for addr := range pool.queue { 369 // Skip local transactions from the eviction mechanism 370 if pool.locals.contains(addr) { 371 continue 372 } 373 // Any non-locals old enough should be removed 374 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 375 for _, tx := range pool.queue[addr].Flatten() { 376 pool.removeTx(tx.Hash(), true) 377 } 378 } 379 } 380 pool.mu.Unlock() 381 382 // Handle local transaction journal rotation 383 case <-journal.C: 384 if pool.journal != nil { 385 pool.mu.Lock() 386 if err := pool.journal.rotate(pool.local()); err != nil { 387 log.Warn("Failed to rotate local tx journal", "err", err) 388 } 389 pool.mu.Unlock() 390 } 391 } 392 } 393 } 394 395 // Stop terminates the transaction pool. 396 func (pool *TxPool) Stop() { 397 // Unsubscribe all subscriptions registered from txpool 398 pool.scope.Close() 399 400 // Unsubscribe subscriptions registered from blockchain 401 pool.chainHeadSub.Unsubscribe() 402 pool.wg.Wait() 403 404 if pool.journal != nil { 405 pool.journal.close() 406 } 407 log.Info("Transaction pool stopped") 408 } 409 410 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 411 // starts sending event to the given channel. 412 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 413 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 414 } 415 416 // GasPrice returns the current gas price enforced by the transaction pool. 417 func (pool *TxPool) GasPrice() *big.Int { 418 pool.mu.RLock() 419 defer pool.mu.RUnlock() 420 421 return new(big.Int).Set(pool.gasPrice) 422 } 423 424 // SetGasPrice updates the minimum price required by the transaction pool for a 425 // new transaction, and drops all transactions below this threshold. 426 func (pool *TxPool) SetGasPrice(price *big.Int) { 427 pool.mu.Lock() 428 defer pool.mu.Unlock() 429 430 pool.gasPrice = price 431 for _, tx := range pool.priced.Cap(price, pool.locals) { 432 pool.removeTx(tx.Hash(), false) 433 } 434 log.Info("Transaction pool price threshold updated", "price", price) 435 } 436 437 // Nonce returns the next nonce of an account, with all transactions executable 438 // by the pool already applied on top. 439 func (pool *TxPool) Nonce(addr common.Address) uint64 { 440 pool.mu.RLock() 441 defer pool.mu.RUnlock() 442 443 return pool.pendingNonces.get(addr) 444 } 445 446 // Stats retrieves the current pool stats, namely the number of pending and the 447 // number of queued (non-executable) transactions. 448 func (pool *TxPool) Stats() (int, int) { 449 pool.mu.RLock() 450 defer pool.mu.RUnlock() 451 452 return pool.stats() 453 } 454 455 // stats retrieves the current pool stats, namely the number of pending and the 456 // number of queued (non-executable) transactions. 457 func (pool *TxPool) stats() (int, int) { 458 pending := 0 459 for _, list := range pool.pending { 460 pending += list.Len() 461 } 462 queued := 0 463 for _, list := range pool.queue { 464 queued += list.Len() 465 } 466 return pending, queued 467 } 468 469 // Content retrieves the data content of the transaction pool, returning all the 470 // pending as well as queued transactions, grouped by account and sorted by nonce. 471 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 472 pool.mu.Lock() 473 defer pool.mu.Unlock() 474 475 pending := make(map[common.Address]types.Transactions) 476 for addr, list := range pool.pending { 477 pending[addr] = list.Flatten() 478 } 479 queued := make(map[common.Address]types.Transactions) 480 for addr, list := range pool.queue { 481 queued[addr] = list.Flatten() 482 } 483 return pending, queued 484 } 485 486 // Pending retrieves all currently processable transactions, grouped by origin 487 // account and sorted by nonce. The returned transaction set is a copy and can be 488 // freely modified by calling code. 489 func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { 490 pool.mu.Lock() 491 defer pool.mu.Unlock() 492 493 pending := make(map[common.Address]types.Transactions) 494 for addr, list := range pool.pending { 495 pending[addr] = list.Flatten() 496 } 497 return pending, nil 498 } 499 500 // Locals retrieves the accounts currently considered local by the pool. 501 func (pool *TxPool) Locals() []common.Address { 502 pool.mu.Lock() 503 defer pool.mu.Unlock() 504 505 return pool.locals.flatten() 506 } 507 508 // local retrieves all currently known local transactions, grouped by origin 509 // account and sorted by nonce. The returned transaction set is a copy and can be 510 // freely modified by calling code. 511 func (pool *TxPool) local() map[common.Address]types.Transactions { 512 txs := make(map[common.Address]types.Transactions) 513 for addr := range pool.locals.accounts { 514 if pending := pool.pending[addr]; pending != nil { 515 txs[addr] = append(txs[addr], pending.Flatten()...) 516 } 517 if queued := pool.queue[addr]; queued != nil { 518 txs[addr] = append(txs[addr], queued.Flatten()...) 519 } 520 } 521 return txs 522 } 523 524 func (pool *TxPool) ValidateTx(tx *types.Transaction) error { 525 return pool.validateTx(tx, false) 526 } 527 528 // validateTx checks whether a transaction is valid according to the consensus 529 // rules and adheres to some heuristic limits of the local node (price and size). 530 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 531 // Reject transactions over defined size to prevent DOS attacks 532 if uint64(tx.Size()) > txMaxSize { 533 return ErrOversizedData 534 } 535 // Transactions can't be negative. This may never happen using RLP decoded 536 // transactions but may occur if you create a transaction using the RPC. 537 if tx.Value().Sign() < 0 { 538 return ErrNegativeValue 539 } 540 541 // Ensure the transaction doesn't exceed the current block limit gas. 542 if pool.currentMaxGas < tx.Gas() { 543 return ErrGasLimit 544 } 545 546 // Make sure the transaction is signed properly 547 from, err := types.Sender(pool.signer, tx) 548 if err != nil { 549 return ErrInvalidSender 550 } 551 // Drop non-local transactions under our own minimal accepted gas price 552 local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network 553 if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 { 554 return ErrUnderpriced 555 } 556 // Ensure the transaction adheres to nonce ordering 557 if rcfg.UsingOVM { 558 if pool.currentState.GetNonce(from) > tx.Nonce() { 559 return ErrNonceTooLow 560 } else if pool.currentState.GetNonce(from) < tx.Nonce() { 561 return ErrNonceTooHigh 562 } 563 } else { 564 if pool.currentState.GetNonce(from) > tx.Nonce() { 565 return ErrNonceTooLow 566 } 567 } 568 // Transactor should have enough funds to cover the costs 569 // cost == V + GP * GL 570 if !rcfg.UsingOVM { 571 // This check is done in SyncService.verifyFee 572 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 573 return ErrInsufficientFunds 574 } 575 } 576 // Ensure the transaction has more gas than the basic tx fee. 577 intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) 578 if err != nil { 579 return err 580 } 581 if tx.Gas() < intrGas { 582 return ErrIntrinsicGas 583 } 584 return nil 585 } 586 587 // add validates a transaction and inserts it into the non-executable queue for later 588 // pending promotion and execution. If the transaction is a replacement for an already 589 // pending or queued one, it overwrites the previous transaction if its price is higher. 590 // 591 // If a newly added transaction is marked as local, its sending account will be 592 // whitelisted, preventing any associated transaction from being dropped out of the pool 593 // due to pricing constraints. 594 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 595 // If the transaction is already known, discard it 596 hash := tx.Hash() 597 if pool.all.Get(hash) != nil { 598 log.Trace("Discarding already known transaction", "hash", hash) 599 knownTxMeter.Mark(1) 600 return false, fmt.Errorf("known transaction: %x", hash) 601 } 602 603 // If the transaction fails basic validation, discard it 604 if err := pool.validateTx(tx, local); err != nil { 605 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 606 invalidTxMeter.Mark(1) 607 return false, err 608 } 609 // If the transaction pool is full, discard underpriced transactions 610 if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { 611 // If the new transaction is underpriced, don't accept it 612 if !local && pool.priced.Underpriced(tx, pool.locals) { 613 log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) 614 underpricedTxMeter.Mark(1) 615 return false, ErrUnderpriced 616 } 617 // New transaction is better than our worse ones, make room for it 618 drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals) 619 for _, tx := range drop { 620 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) 621 underpricedTxMeter.Mark(1) 622 pool.removeTx(tx.Hash(), false) 623 } 624 } 625 // Try to replace an existing transaction in the pending pool 626 from, _ := types.Sender(pool.signer, tx) // already validated 627 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 628 // Nonce already pending, check if required price bump is met 629 inserted, old := list.Add(tx, pool.config.PriceBump) 630 if !inserted { 631 pendingDiscardMeter.Mark(1) 632 return false, ErrReplaceUnderpriced 633 } 634 // New transaction is better, replace old one 635 if old != nil { 636 pool.all.Remove(old.Hash()) 637 pool.priced.Removed(1) 638 pendingReplaceMeter.Mark(1) 639 } 640 pool.all.Add(tx) 641 pool.priced.Put(tx) 642 pool.journalTx(from, tx) 643 pool.queueTxEvent(tx) 644 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 645 return old != nil, nil 646 } 647 // New transaction isn't replacing a pending one, push into queue 648 replaced, err = pool.enqueueTx(hash, tx) 649 if err != nil { 650 return false, err 651 } 652 // Mark local addresses and journal local transactions 653 if local { 654 if !pool.locals.contains(from) { 655 log.Info("Setting new local account", "address", from) 656 pool.locals.add(from) 657 } 658 } 659 if local || pool.locals.contains(from) { 660 localGauge.Inc(1) 661 } 662 pool.journalTx(from, tx) 663 664 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 665 return replaced, nil 666 } 667 668 // enqueueTx inserts a new transaction into the non-executable transaction queue. 669 // 670 // Note, this method assumes the pool lock is held! 671 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) { 672 // Try to insert the transaction into the future queue 673 from, _ := types.Sender(pool.signer, tx) // already validated 674 if pool.queue[from] == nil { 675 pool.queue[from] = newTxList(false) 676 } 677 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 678 if !inserted { 679 // An older transaction was better, discard this 680 queuedDiscardMeter.Mark(1) 681 return false, ErrReplaceUnderpriced 682 } 683 // Discard any previous transaction and mark this 684 if old != nil { 685 pool.all.Remove(old.Hash()) 686 pool.priced.Removed(1) 687 queuedReplaceMeter.Mark(1) 688 } else { 689 // Nothing was replaced, bump the queued counter 690 queuedGauge.Inc(1) 691 } 692 if pool.all.Get(hash) == nil { 693 pool.all.Add(tx) 694 pool.priced.Put(tx) 695 } 696 return old != nil, nil 697 } 698 699 // journalTx adds the specified transaction to the local disk journal if it is 700 // deemed to have been sent from a local account. 701 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 702 // Only journal if it's enabled and the transaction is local 703 if pool.journal == nil || !pool.locals.contains(from) { 704 return 705 } 706 if err := pool.journal.insert(tx); err != nil { 707 log.Warn("Failed to journal local transaction", "err", err) 708 } 709 } 710 711 // promoteTx adds a transaction to the pending (processable) list of transactions 712 // and returns whether it was inserted or an older was better. 713 // 714 // Note, this method assumes the pool lock is held! 715 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 716 // Try to insert the transaction into the pending queue 717 if pool.pending[addr] == nil { 718 pool.pending[addr] = newTxList(true) 719 } 720 list := pool.pending[addr] 721 722 inserted, old := list.Add(tx, pool.config.PriceBump) 723 if !inserted { 724 // An older transaction was better, discard this 725 pool.all.Remove(hash) 726 pool.priced.Removed(1) 727 728 pendingDiscardMeter.Mark(1) 729 return false 730 } 731 // Otherwise discard any previous transaction and mark this 732 if old != nil { 733 pool.all.Remove(old.Hash()) 734 pool.priced.Removed(1) 735 736 pendingReplaceMeter.Mark(1) 737 } else { 738 // Nothing was replaced, bump the pending counter 739 pendingGauge.Inc(1) 740 } 741 // Failsafe to work around direct pending inserts (tests) 742 if pool.all.Get(hash) == nil { 743 pool.all.Add(tx) 744 pool.priced.Put(tx) 745 } 746 // Set the potentially new pending nonce and notify any subsystems of the new tx 747 pool.beats[addr] = time.Now() 748 pool.pendingNonces.set(addr, tx.Nonce()+1) 749 750 return true 751 } 752 753 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 754 // senders as a local ones, ensuring they go around the local pricing constraints. 755 // 756 // This method is used to add transactions from the RPC API and performs synchronous pool 757 // reorganization and event propagation. 758 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 759 return pool.addTxs(txs, !pool.config.NoLocals, true) 760 } 761 762 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 763 // a convenience wrapper aroundd AddLocals. 764 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 765 errs := pool.AddLocals([]*types.Transaction{tx}) 766 return errs[0] 767 } 768 769 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 770 // senders are not among the locally tracked ones, full pricing constraints will apply. 771 // 772 // This method is used to add transactions from the p2p network and does not wait for pool 773 // reorganization and internal event propagation. 774 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 775 return pool.addTxs(txs, false, false) 776 } 777 778 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 779 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 780 return pool.addTxs(txs, false, true) 781 } 782 783 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 784 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 785 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 786 return errs[0] 787 } 788 789 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 790 // wrapper around AddRemotes. 791 // 792 // Deprecated: use AddRemotes 793 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 794 errs := pool.AddRemotes([]*types.Transaction{tx}) 795 return errs[0] 796 } 797 798 // addTxs attempts to queue a batch of transactions if they are valid. 799 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 800 // Filter out known ones without obtaining the pool lock or recovering signatures 801 var ( 802 errs = make([]error, len(txs)) 803 news = make([]*types.Transaction, 0, len(txs)) 804 ) 805 for i, tx := range txs { 806 // If the transaction is known, pre-set the error slot 807 if pool.all.Get(tx.Hash()) != nil { 808 errs[i] = fmt.Errorf("known transaction: %x", tx.Hash()) 809 knownTxMeter.Mark(1) 810 continue 811 } 812 // Accumulate all unknown transactions for deeper processing 813 news = append(news, tx) 814 } 815 if len(news) == 0 { 816 return errs 817 } 818 // Cache senders in transactions before obtaining lock (pool.signer is immutable) 819 for _, tx := range news { 820 types.Sender(pool.signer, tx) 821 } 822 // Process all the new transaction and merge any errors into the original slice 823 pool.mu.Lock() 824 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 825 pool.mu.Unlock() 826 827 var nilSlot = 0 828 for _, err := range newErrs { 829 for errs[nilSlot] != nil { 830 nilSlot++ 831 } 832 errs[nilSlot] = err 833 } 834 // Reorg the pool internals if needed and return 835 done := pool.requestPromoteExecutables(dirtyAddrs) 836 if sync { 837 <-done 838 } 839 return errs 840 } 841 842 // addTxsLocked attempts to queue a batch of transactions if they are valid. 843 // The transaction pool lock must be held. 844 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 845 dirty := newAccountSet(pool.signer) 846 errs := make([]error, len(txs)) 847 for i, tx := range txs { 848 replaced, err := pool.add(tx, local) 849 errs[i] = err 850 if err == nil && !replaced { 851 dirty.addTx(tx) 852 } 853 } 854 validTxMeter.Mark(int64(len(dirty.accounts))) 855 return errs, dirty 856 } 857 858 // Status returns the status (unknown/pending/queued) of a batch of transactions 859 // identified by their hashes. 860 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 861 status := make([]TxStatus, len(hashes)) 862 for i, hash := range hashes { 863 tx := pool.Get(hash) 864 if tx == nil { 865 continue 866 } 867 from, _ := types.Sender(pool.signer, tx) // already validated 868 pool.mu.RLock() 869 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 870 status[i] = TxStatusPending 871 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 872 status[i] = TxStatusQueued 873 } 874 // implicit else: the tx may have been included into a block between 875 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 876 pool.mu.RUnlock() 877 } 878 return status 879 } 880 881 // Get returns a transaction if it is contained in the pool and nil otherwise. 882 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 883 return pool.all.Get(hash) 884 } 885 886 // removeTx removes a single transaction from the queue, moving all subsequent 887 // transactions back to the future queue. 888 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 889 // Fetch the transaction we wish to delete 890 tx := pool.all.Get(hash) 891 if tx == nil { 892 return 893 } 894 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 895 896 // Remove it from the list of known transactions 897 pool.all.Remove(hash) 898 if outofbound { 899 pool.priced.Removed(1) 900 } 901 if pool.locals.contains(addr) { 902 localGauge.Dec(1) 903 } 904 // Remove the transaction from the pending lists and reset the account nonce 905 if pending := pool.pending[addr]; pending != nil { 906 if removed, invalids := pending.Remove(tx); removed { 907 // If no more pending transactions are left, remove the list 908 if pending.Empty() { 909 delete(pool.pending, addr) 910 delete(pool.beats, addr) 911 } 912 // Postpone any invalidated transactions 913 for _, tx := range invalids { 914 pool.enqueueTx(tx.Hash(), tx) 915 } 916 // Update the account nonce if needed 917 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 918 // Reduce the pending counter 919 pendingGauge.Dec(int64(1 + len(invalids))) 920 return 921 } 922 } 923 // Transaction is in the future queue 924 if future := pool.queue[addr]; future != nil { 925 if removed, _ := future.Remove(tx); removed { 926 // Reduce the queued counter 927 queuedGauge.Dec(1) 928 } 929 if future.Empty() { 930 delete(pool.queue, addr) 931 } 932 } 933 } 934 935 // requestPromoteExecutables requests a pool reset to the new head block. 936 // The returned channel is closed when the reset has occurred. 937 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 938 select { 939 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 940 return <-pool.reorgDoneCh 941 case <-pool.reorgShutdownCh: 942 return pool.reorgShutdownCh 943 } 944 } 945 946 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 947 // The returned channel is closed when the promotion checks have occurred. 948 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 949 select { 950 case pool.reqPromoteCh <- set: 951 return <-pool.reorgDoneCh 952 case <-pool.reorgShutdownCh: 953 return pool.reorgShutdownCh 954 } 955 } 956 957 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 958 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 959 select { 960 case pool.queueTxEventCh <- tx: 961 case <-pool.reorgShutdownCh: 962 } 963 } 964 965 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 966 // call those methods directly, but request them being run using requestReset and 967 // requestPromoteExecutables instead. 968 func (pool *TxPool) scheduleReorgLoop() { 969 defer pool.wg.Done() 970 971 var ( 972 curDone chan struct{} // non-nil while runReorg is active 973 nextDone = make(chan struct{}) 974 launchNextRun bool 975 reset *txpoolResetRequest 976 dirtyAccounts *accountSet 977 queuedEvents = make(map[common.Address]*txSortedMap) 978 ) 979 for { 980 // Launch next background reorg if needed 981 if curDone == nil && launchNextRun { 982 // Run the background reorg and announcements 983 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 984 985 // Prepare everything for the next round of reorg 986 curDone, nextDone = nextDone, make(chan struct{}) 987 launchNextRun = false 988 989 reset, dirtyAccounts = nil, nil 990 queuedEvents = make(map[common.Address]*txSortedMap) 991 } 992 993 select { 994 case req := <-pool.reqResetCh: 995 // Reset request: update head if request is already pending. 996 if reset == nil { 997 reset = req 998 } else { 999 reset.newHead = req.newHead 1000 } 1001 launchNextRun = true 1002 pool.reorgDoneCh <- nextDone 1003 1004 case req := <-pool.reqPromoteCh: 1005 // Promote request: update address set if request is already pending. 1006 if dirtyAccounts == nil { 1007 dirtyAccounts = req 1008 } else { 1009 dirtyAccounts.merge(req) 1010 } 1011 launchNextRun = true 1012 pool.reorgDoneCh <- nextDone 1013 1014 case tx := <-pool.queueTxEventCh: 1015 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1016 // request one later if they want the events sent. 1017 addr, _ := types.Sender(pool.signer, tx) 1018 if _, ok := queuedEvents[addr]; !ok { 1019 queuedEvents[addr] = newTxSortedMap() 1020 } 1021 queuedEvents[addr].Put(tx) 1022 1023 case <-curDone: 1024 curDone = nil 1025 1026 case <-pool.reorgShutdownCh: 1027 // Wait for current run to finish. 1028 if curDone != nil { 1029 <-curDone 1030 } 1031 close(nextDone) 1032 return 1033 } 1034 } 1035 } 1036 1037 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1038 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1039 defer close(done) 1040 1041 var promoteAddrs []common.Address 1042 if dirtyAccounts != nil { 1043 promoteAddrs = dirtyAccounts.flatten() 1044 } 1045 pool.mu.Lock() 1046 if reset != nil { 1047 // Reset from the old head to the new, rescheduling any reorged transactions 1048 pool.reset(reset.oldHead, reset.newHead) 1049 1050 // Nonces were reset, discard any events that became stale 1051 for addr := range events { 1052 events[addr].Forward(pool.pendingNonces.get(addr)) 1053 if events[addr].Len() == 0 { 1054 delete(events, addr) 1055 } 1056 } 1057 // Reset needs promote for all addresses 1058 promoteAddrs = promoteAddrs[:0] 1059 for addr := range pool.queue { 1060 promoteAddrs = append(promoteAddrs, addr) 1061 } 1062 } 1063 // Check for pending transactions for every account that sent new ones 1064 promoted := pool.promoteExecutables(promoteAddrs) 1065 for _, tx := range promoted { 1066 addr, _ := types.Sender(pool.signer, tx) 1067 if _, ok := events[addr]; !ok { 1068 events[addr] = newTxSortedMap() 1069 } 1070 events[addr].Put(tx) 1071 } 1072 // If a new block appeared, validate the pool of pending transactions. This will 1073 // remove any transaction that has been included in the block or was invalidated 1074 // because of another transaction (e.g. higher gas price). 1075 if reset != nil { 1076 pool.demoteUnexecutables() 1077 } 1078 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1079 pool.truncatePending() 1080 pool.truncateQueue() 1081 1082 // Update all accounts to the latest known pending nonce 1083 for addr, list := range pool.pending { 1084 txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway 1085 pool.pendingNonces.set(addr, txs[len(txs)-1].Nonce()+1) 1086 } 1087 pool.mu.Unlock() 1088 1089 // Notify subsystems for newly added transactions 1090 if len(events) > 0 { 1091 var txs []*types.Transaction 1092 for _, set := range events { 1093 txs = append(txs, set.Flatten()...) 1094 } 1095 pool.txFeed.Send(NewTxsEvent{Txs: txs}) 1096 } 1097 } 1098 1099 // reset retrieves the current state of the blockchain and ensures the content 1100 // of the transaction pool is valid with regard to the chain state. 1101 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1102 // If we're reorging an old state, reinject all dropped transactions 1103 var reinject types.Transactions 1104 1105 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1106 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1107 oldNum := oldHead.Number.Uint64() 1108 newNum := newHead.Number.Uint64() 1109 1110 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1111 log.Debug("Skipping deep transaction reorg", "depth", depth) 1112 } else { 1113 // Reorg seems shallow enough to pull in all transactions into memory 1114 var discarded, included types.Transactions 1115 var ( 1116 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1117 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1118 ) 1119 if rem == nil { 1120 // This can happen if a setHead is performed, where we simply discard the old 1121 // head from the chain. 1122 // If that is the case, we don't have the lost transactions any more, and 1123 // there's nothing to add 1124 if newNum < oldNum { 1125 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1126 log.Debug("Skipping transaction reset caused by setHead", 1127 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1128 } else { 1129 // If we reorged to a same or higher number, then it's not a case of setHead 1130 log.Warn("Transaction pool reset with missing oldhead", 1131 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1132 } 1133 return 1134 } 1135 for rem.NumberU64() > add.NumberU64() { 1136 discarded = append(discarded, rem.Transactions()...) 1137 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1138 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1139 return 1140 } 1141 } 1142 for add.NumberU64() > rem.NumberU64() { 1143 included = append(included, add.Transactions()...) 1144 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1145 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1146 return 1147 } 1148 } 1149 for rem.Hash() != add.Hash() { 1150 discarded = append(discarded, rem.Transactions()...) 1151 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1152 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1153 return 1154 } 1155 included = append(included, add.Transactions()...) 1156 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1157 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1158 return 1159 } 1160 } 1161 reinject = types.TxDifference(discarded, included) 1162 } 1163 } 1164 // Initialize the internal state to the current head 1165 if newHead == nil { 1166 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1167 } 1168 statedb, err := pool.chain.StateAt(newHead.Root) 1169 if err != nil { 1170 log.Error("Failed to reset txpool state", "err", err) 1171 return 1172 } 1173 pool.currentState = statedb 1174 pool.pendingNonces = newTxNoncer(statedb) 1175 pool.currentMaxGas = newHead.GasLimit 1176 1177 // Inject any transactions discarded due to reorgs 1178 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1179 senderCacher.recover(pool.signer, reinject) 1180 pool.addTxsLocked(reinject, false) 1181 1182 // Update all fork indicator by next pending block number. 1183 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1184 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1185 } 1186 1187 // promoteExecutables moves transactions that have become processable from the 1188 // future queue to the set of pending transactions. During this process, all 1189 // invalidated transactions (low nonce, low balance) are deleted. 1190 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1191 // Track the promoted transactions to broadcast them at once 1192 var promoted []*types.Transaction 1193 1194 // Iterate over all accounts and promote any executable transactions 1195 for _, addr := range accounts { 1196 list := pool.queue[addr] 1197 if list == nil { 1198 continue // Just in case someone calls with a non existing account 1199 } 1200 // Drop all transactions that are deemed too old (low nonce) 1201 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1202 for _, tx := range forwards { 1203 hash := tx.Hash() 1204 pool.all.Remove(hash) 1205 log.Trace("Removed old queued transaction", "hash", hash) 1206 } 1207 // Drop all transactions that are too costly (low balance or out of gas) 1208 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1209 for _, tx := range drops { 1210 hash := tx.Hash() 1211 pool.all.Remove(hash) 1212 log.Trace("Removed unpayable queued transaction", "hash", hash) 1213 } 1214 queuedNofundsMeter.Mark(int64(len(drops))) 1215 1216 // Gather all executable transactions and promote them 1217 readies := list.Ready(pool.pendingNonces.get(addr)) 1218 for _, tx := range readies { 1219 hash := tx.Hash() 1220 if pool.promoteTx(addr, hash, tx) { 1221 log.Trace("Promoting queued transaction", "hash", hash) 1222 promoted = append(promoted, tx) 1223 } 1224 } 1225 queuedGauge.Dec(int64(len(readies))) 1226 1227 // Drop all transactions over the allowed limit 1228 var caps types.Transactions 1229 if !pool.locals.contains(addr) { 1230 caps = list.Cap(int(pool.config.AccountQueue)) 1231 for _, tx := range caps { 1232 hash := tx.Hash() 1233 pool.all.Remove(hash) 1234 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1235 } 1236 queuedRateLimitMeter.Mark(int64(len(caps))) 1237 } 1238 // Mark all the items dropped as removed 1239 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1240 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1241 if pool.locals.contains(addr) { 1242 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1243 } 1244 // Delete the entire queue entry if it became empty. 1245 if list.Empty() { 1246 delete(pool.queue, addr) 1247 } 1248 } 1249 return promoted 1250 } 1251 1252 // truncatePending removes transactions from the pending queue if the pool is above the 1253 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1254 // equal number for all for accounts with many pending transactions. 1255 func (pool *TxPool) truncatePending() { 1256 pending := uint64(0) 1257 for _, list := range pool.pending { 1258 pending += uint64(list.Len()) 1259 } 1260 if pending <= pool.config.GlobalSlots { 1261 return 1262 } 1263 1264 pendingBeforeCap := pending 1265 // Assemble a spam order to penalize large transactors first 1266 spammers := prque.New(nil) 1267 for addr, list := range pool.pending { 1268 // Only evict transactions from high rollers 1269 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1270 spammers.Push(addr, int64(list.Len())) 1271 } 1272 } 1273 // Gradually drop transactions from offenders 1274 offenders := []common.Address{} 1275 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1276 // Retrieve the next offender if not local address 1277 offender, _ := spammers.Pop() 1278 offenders = append(offenders, offender.(common.Address)) 1279 1280 // Equalize balances until all the same or below threshold 1281 if len(offenders) > 1 { 1282 // Calculate the equalization threshold for all current offenders 1283 threshold := pool.pending[offender.(common.Address)].Len() 1284 1285 // Iteratively reduce all offenders until below limit or threshold reached 1286 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1287 for i := 0; i < len(offenders)-1; i++ { 1288 list := pool.pending[offenders[i]] 1289 1290 caps := list.Cap(list.Len() - 1) 1291 for _, tx := range caps { 1292 // Drop the transaction from the global pools too 1293 hash := tx.Hash() 1294 pool.all.Remove(hash) 1295 1296 // Update the account nonce to the dropped transaction 1297 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1298 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1299 } 1300 pool.priced.Removed(len(caps)) 1301 pendingGauge.Dec(int64(len(caps))) 1302 if pool.locals.contains(offenders[i]) { 1303 localGauge.Dec(int64(len(caps))) 1304 } 1305 pending-- 1306 } 1307 } 1308 } 1309 } 1310 1311 // If still above threshold, reduce to limit or min allowance 1312 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1313 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1314 for _, addr := range offenders { 1315 list := pool.pending[addr] 1316 1317 caps := list.Cap(list.Len() - 1) 1318 for _, tx := range caps { 1319 // Drop the transaction from the global pools too 1320 hash := tx.Hash() 1321 pool.all.Remove(hash) 1322 1323 // Update the account nonce to the dropped transaction 1324 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1325 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1326 } 1327 pool.priced.Removed(len(caps)) 1328 pendingGauge.Dec(int64(len(caps))) 1329 if pool.locals.contains(addr) { 1330 localGauge.Dec(int64(len(caps))) 1331 } 1332 pending-- 1333 } 1334 } 1335 } 1336 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1337 } 1338 1339 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1340 func (pool *TxPool) truncateQueue() { 1341 queued := uint64(0) 1342 for _, list := range pool.queue { 1343 queued += uint64(list.Len()) 1344 } 1345 if queued <= pool.config.GlobalQueue { 1346 return 1347 } 1348 1349 // Sort all accounts with queued transactions by heartbeat 1350 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1351 for addr := range pool.queue { 1352 if !pool.locals.contains(addr) { // don't drop locals 1353 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1354 } 1355 } 1356 sort.Sort(addresses) 1357 1358 // Drop transactions until the total is below the limit or only locals remain 1359 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1360 addr := addresses[len(addresses)-1] 1361 list := pool.queue[addr.address] 1362 1363 addresses = addresses[:len(addresses)-1] 1364 1365 // Drop all transactions if they are less than the overflow 1366 if size := uint64(list.Len()); size <= drop { 1367 for _, tx := range list.Flatten() { 1368 pool.removeTx(tx.Hash(), true) 1369 } 1370 drop -= size 1371 queuedRateLimitMeter.Mark(int64(size)) 1372 continue 1373 } 1374 // Otherwise drop only last few transactions 1375 txs := list.Flatten() 1376 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1377 pool.removeTx(txs[i].Hash(), true) 1378 drop-- 1379 queuedRateLimitMeter.Mark(1) 1380 } 1381 } 1382 } 1383 1384 // demoteUnexecutables removes invalid and processed transactions from the pools 1385 // executable/pending queue and any subsequent transactions that become unexecutable 1386 // are moved back into the future queue. 1387 func (pool *TxPool) demoteUnexecutables() { 1388 // Iterate over all accounts and demote any non-executable transactions 1389 for addr, list := range pool.pending { 1390 nonce := pool.currentState.GetNonce(addr) 1391 1392 // Drop all transactions that are deemed too old (low nonce) 1393 olds := list.Forward(nonce) 1394 for _, tx := range olds { 1395 hash := tx.Hash() 1396 pool.all.Remove(hash) 1397 log.Trace("Removed old pending transaction", "hash", hash) 1398 } 1399 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1400 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1401 for _, tx := range drops { 1402 hash := tx.Hash() 1403 log.Trace("Removed unpayable pending transaction", "hash", hash) 1404 pool.all.Remove(hash) 1405 } 1406 pool.priced.Removed(len(olds) + len(drops)) 1407 pendingNofundsMeter.Mark(int64(len(drops))) 1408 1409 for _, tx := range invalids { 1410 hash := tx.Hash() 1411 log.Trace("Demoting pending transaction", "hash", hash) 1412 pool.enqueueTx(hash, tx) 1413 } 1414 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1415 if pool.locals.contains(addr) { 1416 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1417 } 1418 // If there's a gap in front, alert (should never happen) and postpone all transactions 1419 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1420 gapped := list.Cap(0) 1421 for _, tx := range gapped { 1422 hash := tx.Hash() 1423 log.Error("Demoting invalidated transaction", "hash", hash) 1424 pool.enqueueTx(hash, tx) 1425 } 1426 pendingGauge.Dec(int64(len(gapped))) 1427 } 1428 // Delete the entire queue entry if it became empty. 1429 if list.Empty() { 1430 delete(pool.pending, addr) 1431 delete(pool.beats, addr) 1432 } 1433 } 1434 } 1435 1436 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1437 type addressByHeartbeat struct { 1438 address common.Address 1439 heartbeat time.Time 1440 } 1441 1442 type addressesByHeartbeat []addressByHeartbeat 1443 1444 func (a addressesByHeartbeat) Len() int { return len(a) } 1445 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1446 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1447 1448 // accountSet is simply a set of addresses to check for existence, and a signer 1449 // capable of deriving addresses from transactions. 1450 type accountSet struct { 1451 accounts map[common.Address]struct{} 1452 signer types.Signer 1453 cache *[]common.Address 1454 } 1455 1456 // newAccountSet creates a new address set with an associated signer for sender 1457 // derivations. 1458 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1459 as := &accountSet{ 1460 accounts: make(map[common.Address]struct{}), 1461 signer: signer, 1462 } 1463 for _, addr := range addrs { 1464 as.add(addr) 1465 } 1466 return as 1467 } 1468 1469 // contains checks if a given address is contained within the set. 1470 func (as *accountSet) contains(addr common.Address) bool { 1471 _, exist := as.accounts[addr] 1472 return exist 1473 } 1474 1475 // containsTx checks if the sender of a given tx is within the set. If the sender 1476 // cannot be derived, this method returns false. 1477 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1478 if addr, err := types.Sender(as.signer, tx); err == nil { 1479 return as.contains(addr) 1480 } 1481 return false 1482 } 1483 1484 // add inserts a new address into the set to track. 1485 func (as *accountSet) add(addr common.Address) { 1486 as.accounts[addr] = struct{}{} 1487 as.cache = nil 1488 } 1489 1490 // addTx adds the sender of tx into the set. 1491 func (as *accountSet) addTx(tx *types.Transaction) { 1492 if addr, err := types.Sender(as.signer, tx); err == nil { 1493 as.add(addr) 1494 } 1495 } 1496 1497 // flatten returns the list of addresses within this set, also caching it for later 1498 // reuse. The returned slice should not be changed! 1499 func (as *accountSet) flatten() []common.Address { 1500 if as.cache == nil { 1501 accounts := make([]common.Address, 0, len(as.accounts)) 1502 for account := range as.accounts { 1503 accounts = append(accounts, account) 1504 } 1505 as.cache = &accounts 1506 } 1507 return *as.cache 1508 } 1509 1510 // merge adds all addresses from the 'other' set into 'as'. 1511 func (as *accountSet) merge(other *accountSet) { 1512 for addr := range other.accounts { 1513 as.accounts[addr] = struct{}{} 1514 } 1515 as.cache = nil 1516 } 1517 1518 // txLookup is used internally by TxPool to track transactions while allowing lookup without 1519 // mutex contention. 1520 // 1521 // Note, although this type is properly protected against concurrent access, it 1522 // is **not** a type that should ever be mutated or even exposed outside of the 1523 // transaction pool, since its internal state is tightly coupled with the pools 1524 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1525 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1526 // TxPool.mu mutex. 1527 type txLookup struct { 1528 all map[common.Hash]*types.Transaction 1529 slots int 1530 lock sync.RWMutex 1531 } 1532 1533 // newTxLookup returns a new txLookup structure. 1534 func newTxLookup() *txLookup { 1535 return &txLookup{ 1536 all: make(map[common.Hash]*types.Transaction), 1537 } 1538 } 1539 1540 // Range calls f on each key and value present in the map. 1541 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { 1542 t.lock.RLock() 1543 defer t.lock.RUnlock() 1544 1545 for key, value := range t.all { 1546 if !f(key, value) { 1547 break 1548 } 1549 } 1550 } 1551 1552 // Get returns a transaction if it exists in the lookup, or nil if not found. 1553 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1554 t.lock.RLock() 1555 defer t.lock.RUnlock() 1556 1557 return t.all[hash] 1558 } 1559 1560 // Count returns the current number of items in the lookup. 1561 func (t *txLookup) Count() int { 1562 t.lock.RLock() 1563 defer t.lock.RUnlock() 1564 1565 return len(t.all) 1566 } 1567 1568 // Slots returns the current number of slots used in the lookup. 1569 func (t *txLookup) Slots() int { 1570 t.lock.RLock() 1571 defer t.lock.RUnlock() 1572 1573 return t.slots 1574 } 1575 1576 // Add adds a transaction to the lookup. 1577 func (t *txLookup) Add(tx *types.Transaction) { 1578 t.lock.Lock() 1579 defer t.lock.Unlock() 1580 1581 t.slots += numSlots(tx) 1582 slotsGauge.Update(int64(t.slots)) 1583 1584 t.all[tx.Hash()] = tx 1585 } 1586 1587 // Remove removes a transaction from the lookup. 1588 func (t *txLookup) Remove(hash common.Hash) { 1589 t.lock.Lock() 1590 defer t.lock.Unlock() 1591 1592 t.slots -= numSlots(t.all[hash]) 1593 slotsGauge.Update(int64(t.slots)) 1594 1595 delete(t.all, hash) 1596 } 1597 1598 // numSlots calculates the number of slots needed for a single transaction. 1599 func numSlots(tx *types.Transaction) int { 1600 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1601 }