github.com/snowblossomcoin/go-ethereum@v1.9.25/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/prque" 29 "github.com/ethereum/go-ethereum/core/state" 30 "github.com/ethereum/go-ethereum/core/types" 31 "github.com/ethereum/go-ethereum/event" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/metrics" 34 "github.com/ethereum/go-ethereum/params" 35 ) 36 37 const ( 38 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 39 chainHeadChanSize = 10 40 41 // txSlotSize is used to calculate how many data slots a single transaction 42 // takes up based on its size. The slots are used as DoS protection, ensuring 43 // that validating a new transaction remains a constant operation (in reality 44 // O(maxslots), where max slots are 4 currently). 45 txSlotSize = 32 * 1024 46 47 // txMaxSize is the maximum size a single transaction can have. This field has 48 // non-trivial consequences: larger transactions are significantly harder and 49 // more expensive to propagate; larger transactions also take more resources 50 // to validate whether they fit into the pool or not. 51 txMaxSize = 4 * txSlotSize // 128KB 52 ) 53 54 var ( 55 // ErrAlreadyKnown is returned if the transactions is already contained 56 // within the pool. 57 ErrAlreadyKnown = errors.New("already known") 58 59 // ErrInvalidSender is returned if the transaction contains an invalid signature. 60 ErrInvalidSender = errors.New("invalid sender") 61 62 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 63 // configured for the transaction pool. 64 ErrUnderpriced = errors.New("transaction underpriced") 65 66 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 67 // with a different one without the required price bump. 68 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 69 70 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 71 // maximum allowance of the current block. 72 ErrGasLimit = errors.New("exceeds block gas limit") 73 74 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 75 // transaction with a negative value. 76 ErrNegativeValue = errors.New("negative value") 77 78 // ErrOversizedData is returned if the input data of a transaction is greater 79 // than some meaningful limit a user might use. This is not a consensus error 80 // making the transaction invalid, rather a DOS protection. 81 ErrOversizedData = errors.New("oversized data") 82 ) 83 84 var ( 85 evictionInterval = time.Minute // Time interval to check for evictable transactions 86 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 87 ) 88 89 var ( 90 // Metrics for the pending pool 91 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 92 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 93 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 94 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 95 96 // Metrics for the queued pool 97 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 98 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 99 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 100 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 101 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 102 103 // General tx metrics 104 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 105 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 106 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 107 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 108 109 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 110 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 111 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 112 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 113 ) 114 115 // TxStatus is the current status of a transaction as seen by the pool. 116 type TxStatus uint 117 118 const ( 119 TxStatusUnknown TxStatus = iota 120 TxStatusQueued 121 TxStatusPending 122 TxStatusIncluded 123 ) 124 125 // blockChain provides the state of blockchain and current gas limit to do 126 // some pre checks in tx pool and event subscribers. 127 type blockChain interface { 128 CurrentBlock() *types.Block 129 GetBlock(hash common.Hash, number uint64) *types.Block 130 StateAt(root common.Hash) (*state.StateDB, error) 131 132 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 133 } 134 135 // TxPoolConfig are the configuration parameters of the transaction pool. 136 type TxPoolConfig struct { 137 Locals []common.Address // Addresses that should be treated by default as local 138 NoLocals bool // Whether local transaction handling should be disabled 139 Journal string // Journal of local transactions to survive node restarts 140 Rejournal time.Duration // Time interval to regenerate the local transaction journal 141 142 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 143 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 144 145 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 146 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 147 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 148 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 149 150 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 151 } 152 153 // DefaultTxPoolConfig contains the default configurations for the transaction 154 // pool. 155 var DefaultTxPoolConfig = TxPoolConfig{ 156 Journal: "transactions.rlp", 157 Rejournal: time.Hour, 158 159 PriceLimit: 1, 160 PriceBump: 10, 161 162 AccountSlots: 16, 163 GlobalSlots: 4096, 164 AccountQueue: 64, 165 GlobalQueue: 1024, 166 167 Lifetime: 3 * time.Hour, 168 } 169 170 // sanitize checks the provided user configurations and changes anything that's 171 // unreasonable or unworkable. 172 func (config *TxPoolConfig) sanitize() TxPoolConfig { 173 conf := *config 174 if conf.Rejournal < time.Second { 175 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 176 conf.Rejournal = time.Second 177 } 178 if conf.PriceLimit < 1 { 179 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 180 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 181 } 182 if conf.PriceBump < 1 { 183 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 184 conf.PriceBump = DefaultTxPoolConfig.PriceBump 185 } 186 if conf.AccountSlots < 1 { 187 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 188 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 189 } 190 if conf.GlobalSlots < 1 { 191 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 192 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 193 } 194 if conf.AccountQueue < 1 { 195 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 196 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 197 } 198 if conf.GlobalQueue < 1 { 199 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 200 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 201 } 202 if conf.Lifetime < 1 { 203 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 204 conf.Lifetime = DefaultTxPoolConfig.Lifetime 205 } 206 return conf 207 } 208 209 // TxPool contains all currently known transactions. Transactions 210 // enter the pool when they are received from the network or submitted 211 // locally. They exit the pool when they are included in the blockchain. 212 // 213 // The pool separates processable transactions (which can be applied to the 214 // current state) and future transactions. Transactions move between those 215 // two states over time as they are received and processed. 216 type TxPool struct { 217 config TxPoolConfig 218 chainconfig *params.ChainConfig 219 chain blockChain 220 gasPrice *big.Int 221 txFeed event.Feed 222 scope event.SubscriptionScope 223 signer types.Signer 224 mu sync.RWMutex 225 226 istanbul bool // Fork indicator whether we are in the istanbul stage. 227 228 currentState *state.StateDB // Current state in the blockchain head 229 pendingNonces *txNoncer // Pending state tracking virtual nonces 230 currentMaxGas uint64 // Current gas limit for transaction caps 231 232 locals *accountSet // Set of local transaction to exempt from eviction rules 233 journal *txJournal // Journal of local transaction to back up to disk 234 235 pending map[common.Address]*txList // All currently processable transactions 236 queue map[common.Address]*txList // Queued but non-processable transactions 237 beats map[common.Address]time.Time // Last heartbeat from each known account 238 all *txLookup // All transactions to allow lookups 239 priced *txPricedList // All transactions sorted by price 240 241 chainHeadCh chan ChainHeadEvent 242 chainHeadSub event.Subscription 243 reqResetCh chan *txpoolResetRequest 244 reqPromoteCh chan *accountSet 245 queueTxEventCh chan *types.Transaction 246 reorgDoneCh chan chan struct{} 247 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 248 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 249 } 250 251 type txpoolResetRequest struct { 252 oldHead, newHead *types.Header 253 } 254 255 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 256 // transactions from the network. 257 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 258 // Sanitize the input to ensure no vulnerable gas prices are set 259 config = (&config).sanitize() 260 261 // Create the transaction pool with its initial settings 262 pool := &TxPool{ 263 config: config, 264 chainconfig: chainconfig, 265 chain: chain, 266 signer: types.NewEIP155Signer(chainconfig.ChainID), 267 pending: make(map[common.Address]*txList), 268 queue: make(map[common.Address]*txList), 269 beats: make(map[common.Address]time.Time), 270 all: newTxLookup(), 271 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 272 reqResetCh: make(chan *txpoolResetRequest), 273 reqPromoteCh: make(chan *accountSet), 274 queueTxEventCh: make(chan *types.Transaction), 275 reorgDoneCh: make(chan chan struct{}), 276 reorgShutdownCh: make(chan struct{}), 277 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 278 } 279 pool.locals = newAccountSet(pool.signer) 280 for _, addr := range config.Locals { 281 log.Info("Setting new local account", "address", addr) 282 pool.locals.add(addr) 283 } 284 pool.priced = newTxPricedList(pool.all) 285 pool.reset(nil, chain.CurrentBlock().Header()) 286 287 // Start the reorg loop early so it can handle requests generated during journal loading. 288 pool.wg.Add(1) 289 go pool.scheduleReorgLoop() 290 291 // If local transactions and journaling is enabled, load from disk 292 if !config.NoLocals && config.Journal != "" { 293 pool.journal = newTxJournal(config.Journal) 294 295 if err := pool.journal.load(pool.AddLocals); err != nil { 296 log.Warn("Failed to load transaction journal", "err", err) 297 } 298 if err := pool.journal.rotate(pool.local()); err != nil { 299 log.Warn("Failed to rotate transaction journal", "err", err) 300 } 301 } 302 303 // Subscribe events from blockchain and start the main event loop. 304 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 305 pool.wg.Add(1) 306 go pool.loop() 307 308 return pool 309 } 310 311 // loop is the transaction pool's main event loop, waiting for and reacting to 312 // outside blockchain events as well as for various reporting and transaction 313 // eviction events. 314 func (pool *TxPool) loop() { 315 defer pool.wg.Done() 316 317 var ( 318 prevPending, prevQueued, prevStales int 319 // Start the stats reporting and transaction eviction tickers 320 report = time.NewTicker(statsReportInterval) 321 evict = time.NewTicker(evictionInterval) 322 journal = time.NewTicker(pool.config.Rejournal) 323 // Track the previous head headers for transaction reorgs 324 head = pool.chain.CurrentBlock() 325 ) 326 defer report.Stop() 327 defer evict.Stop() 328 defer journal.Stop() 329 330 for { 331 select { 332 // Handle ChainHeadEvent 333 case ev := <-pool.chainHeadCh: 334 if ev.Block != nil { 335 pool.requestReset(head.Header(), ev.Block.Header()) 336 head = ev.Block 337 } 338 339 // System shutdown. 340 case <-pool.chainHeadSub.Err(): 341 close(pool.reorgShutdownCh) 342 return 343 344 // Handle stats reporting ticks 345 case <-report.C: 346 pool.mu.RLock() 347 pending, queued := pool.stats() 348 stales := pool.priced.stales 349 pool.mu.RUnlock() 350 351 if pending != prevPending || queued != prevQueued || stales != prevStales { 352 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 353 prevPending, prevQueued, prevStales = pending, queued, stales 354 } 355 356 // Handle inactive account transaction eviction 357 case <-evict.C: 358 pool.mu.Lock() 359 for addr := range pool.queue { 360 // Skip local transactions from the eviction mechanism 361 if pool.locals.contains(addr) { 362 continue 363 } 364 // Any non-locals old enough should be removed 365 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 366 list := pool.queue[addr].Flatten() 367 for _, tx := range list { 368 pool.removeTx(tx.Hash(), true) 369 } 370 queuedEvictionMeter.Mark(int64(len(list))) 371 } 372 } 373 pool.mu.Unlock() 374 375 // Handle local transaction journal rotation 376 case <-journal.C: 377 if pool.journal != nil { 378 pool.mu.Lock() 379 if err := pool.journal.rotate(pool.local()); err != nil { 380 log.Warn("Failed to rotate local tx journal", "err", err) 381 } 382 pool.mu.Unlock() 383 } 384 } 385 } 386 } 387 388 // Stop terminates the transaction pool. 389 func (pool *TxPool) Stop() { 390 // Unsubscribe all subscriptions registered from txpool 391 pool.scope.Close() 392 393 // Unsubscribe subscriptions registered from blockchain 394 pool.chainHeadSub.Unsubscribe() 395 pool.wg.Wait() 396 397 if pool.journal != nil { 398 pool.journal.close() 399 } 400 log.Info("Transaction pool stopped") 401 } 402 403 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 404 // starts sending event to the given channel. 405 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 406 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 407 } 408 409 // GasPrice returns the current gas price enforced by the transaction pool. 410 func (pool *TxPool) GasPrice() *big.Int { 411 pool.mu.RLock() 412 defer pool.mu.RUnlock() 413 414 return new(big.Int).Set(pool.gasPrice) 415 } 416 417 // SetGasPrice updates the minimum price required by the transaction pool for a 418 // new transaction, and drops all transactions below this threshold. 419 func (pool *TxPool) SetGasPrice(price *big.Int) { 420 pool.mu.Lock() 421 defer pool.mu.Unlock() 422 423 pool.gasPrice = price 424 for _, tx := range pool.priced.Cap(price, pool.locals) { 425 pool.removeTx(tx.Hash(), false) 426 } 427 log.Info("Transaction pool price threshold updated", "price", price) 428 } 429 430 // Nonce returns the next nonce of an account, with all transactions executable 431 // by the pool already applied on top. 432 func (pool *TxPool) Nonce(addr common.Address) uint64 { 433 pool.mu.RLock() 434 defer pool.mu.RUnlock() 435 436 return pool.pendingNonces.get(addr) 437 } 438 439 // Stats retrieves the current pool stats, namely the number of pending and the 440 // number of queued (non-executable) transactions. 441 func (pool *TxPool) Stats() (int, int) { 442 pool.mu.RLock() 443 defer pool.mu.RUnlock() 444 445 return pool.stats() 446 } 447 448 // stats retrieves the current pool stats, namely the number of pending and the 449 // number of queued (non-executable) transactions. 450 func (pool *TxPool) stats() (int, int) { 451 pending := 0 452 for _, list := range pool.pending { 453 pending += list.Len() 454 } 455 queued := 0 456 for _, list := range pool.queue { 457 queued += list.Len() 458 } 459 return pending, queued 460 } 461 462 // Content retrieves the data content of the transaction pool, returning all the 463 // pending as well as queued transactions, grouped by account and sorted by nonce. 464 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 465 pool.mu.Lock() 466 defer pool.mu.Unlock() 467 468 pending := make(map[common.Address]types.Transactions) 469 for addr, list := range pool.pending { 470 pending[addr] = list.Flatten() 471 } 472 queued := make(map[common.Address]types.Transactions) 473 for addr, list := range pool.queue { 474 queued[addr] = list.Flatten() 475 } 476 return pending, queued 477 } 478 479 // Pending retrieves all currently processable transactions, grouped by origin 480 // account and sorted by nonce. The returned transaction set is a copy and can be 481 // freely modified by calling code. 482 func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { 483 pool.mu.Lock() 484 defer pool.mu.Unlock() 485 486 pending := make(map[common.Address]types.Transactions) 487 for addr, list := range pool.pending { 488 pending[addr] = list.Flatten() 489 } 490 return pending, nil 491 } 492 493 // Locals retrieves the accounts currently considered local by the pool. 494 func (pool *TxPool) Locals() []common.Address { 495 pool.mu.Lock() 496 defer pool.mu.Unlock() 497 498 return pool.locals.flatten() 499 } 500 501 // local retrieves all currently known local transactions, grouped by origin 502 // account and sorted by nonce. The returned transaction set is a copy and can be 503 // freely modified by calling code. 504 func (pool *TxPool) local() map[common.Address]types.Transactions { 505 txs := make(map[common.Address]types.Transactions) 506 for addr := range pool.locals.accounts { 507 if pending := pool.pending[addr]; pending != nil { 508 txs[addr] = append(txs[addr], pending.Flatten()...) 509 } 510 if queued := pool.queue[addr]; queued != nil { 511 txs[addr] = append(txs[addr], queued.Flatten()...) 512 } 513 } 514 return txs 515 } 516 517 // validateTx checks whether a transaction is valid according to the consensus 518 // rules and adheres to some heuristic limits of the local node (price and size). 519 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 520 // Reject transactions over defined size to prevent DOS attacks 521 if uint64(tx.Size()) > txMaxSize { 522 return ErrOversizedData 523 } 524 // Transactions can't be negative. This may never happen using RLP decoded 525 // transactions but may occur if you create a transaction using the RPC. 526 if tx.Value().Sign() < 0 { 527 return ErrNegativeValue 528 } 529 // Ensure the transaction doesn't exceed the current block limit gas. 530 if pool.currentMaxGas < tx.Gas() { 531 return ErrGasLimit 532 } 533 // Make sure the transaction is signed properly 534 from, err := types.Sender(pool.signer, tx) 535 if err != nil { 536 return ErrInvalidSender 537 } 538 // Drop non-local transactions under our own minimal accepted gas price 539 local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network 540 if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 { 541 return ErrUnderpriced 542 } 543 // Ensure the transaction adheres to nonce ordering 544 if pool.currentState.GetNonce(from) > tx.Nonce() { 545 return ErrNonceTooLow 546 } 547 // Transactor should have enough funds to cover the costs 548 // cost == V + GP * GL 549 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 550 return ErrInsufficientFunds 551 } 552 // Ensure the transaction has more gas than the basic tx fee. 553 intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) 554 if err != nil { 555 return err 556 } 557 if tx.Gas() < intrGas { 558 return ErrIntrinsicGas 559 } 560 return nil 561 } 562 563 // add validates a transaction and inserts it into the non-executable queue for later 564 // pending promotion and execution. If the transaction is a replacement for an already 565 // pending or queued one, it overwrites the previous transaction if its price is higher. 566 // 567 // If a newly added transaction is marked as local, its sending account will be 568 // whitelisted, preventing any associated transaction from being dropped out of the pool 569 // due to pricing constraints. 570 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 571 // If the transaction is already known, discard it 572 hash := tx.Hash() 573 if pool.all.Get(hash) != nil { 574 log.Trace("Discarding already known transaction", "hash", hash) 575 knownTxMeter.Mark(1) 576 return false, ErrAlreadyKnown 577 } 578 // If the transaction fails basic validation, discard it 579 if err := pool.validateTx(tx, local); err != nil { 580 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 581 invalidTxMeter.Mark(1) 582 return false, err 583 } 584 // If the transaction pool is full, discard underpriced transactions 585 if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { 586 // If the new transaction is underpriced, don't accept it 587 if !local && pool.priced.Underpriced(tx, pool.locals) { 588 log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) 589 underpricedTxMeter.Mark(1) 590 return false, ErrUnderpriced 591 } 592 // New transaction is better than our worse ones, make room for it 593 drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals) 594 for _, tx := range drop { 595 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) 596 underpricedTxMeter.Mark(1) 597 pool.removeTx(tx.Hash(), false) 598 } 599 } 600 // Try to replace an existing transaction in the pending pool 601 from, _ := types.Sender(pool.signer, tx) // already validated 602 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 603 // Nonce already pending, check if required price bump is met 604 inserted, old := list.Add(tx, pool.config.PriceBump) 605 if !inserted { 606 pendingDiscardMeter.Mark(1) 607 return false, ErrReplaceUnderpriced 608 } 609 // New transaction is better, replace old one 610 if old != nil { 611 pool.all.Remove(old.Hash()) 612 pool.priced.Removed(1) 613 pendingReplaceMeter.Mark(1) 614 } 615 pool.all.Add(tx) 616 pool.priced.Put(tx) 617 pool.journalTx(from, tx) 618 pool.queueTxEvent(tx) 619 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 620 621 // Successful promotion, bump the heartbeat 622 pool.beats[from] = time.Now() 623 return old != nil, nil 624 } 625 // New transaction isn't replacing a pending one, push into queue 626 replaced, err = pool.enqueueTx(hash, tx) 627 if err != nil { 628 return false, err 629 } 630 // Mark local addresses and journal local transactions 631 if local { 632 if !pool.locals.contains(from) { 633 log.Info("Setting new local account", "address", from) 634 pool.locals.add(from) 635 } 636 } 637 if local || pool.locals.contains(from) { 638 localGauge.Inc(1) 639 } 640 pool.journalTx(from, tx) 641 642 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 643 return replaced, nil 644 } 645 646 // enqueueTx inserts a new transaction into the non-executable transaction queue. 647 // 648 // Note, this method assumes the pool lock is held! 649 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) { 650 // Try to insert the transaction into the future queue 651 from, _ := types.Sender(pool.signer, tx) // already validated 652 if pool.queue[from] == nil { 653 pool.queue[from] = newTxList(false) 654 } 655 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 656 if !inserted { 657 // An older transaction was better, discard this 658 queuedDiscardMeter.Mark(1) 659 return false, ErrReplaceUnderpriced 660 } 661 // Discard any previous transaction and mark this 662 if old != nil { 663 pool.all.Remove(old.Hash()) 664 pool.priced.Removed(1) 665 queuedReplaceMeter.Mark(1) 666 } else { 667 // Nothing was replaced, bump the queued counter 668 queuedGauge.Inc(1) 669 } 670 if pool.all.Get(hash) == nil { 671 pool.all.Add(tx) 672 pool.priced.Put(tx) 673 } 674 // If we never record the heartbeat, do it right now. 675 if _, exist := pool.beats[from]; !exist { 676 pool.beats[from] = time.Now() 677 } 678 return old != nil, nil 679 } 680 681 // journalTx adds the specified transaction to the local disk journal if it is 682 // deemed to have been sent from a local account. 683 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 684 // Only journal if it's enabled and the transaction is local 685 if pool.journal == nil || !pool.locals.contains(from) { 686 return 687 } 688 if err := pool.journal.insert(tx); err != nil { 689 log.Warn("Failed to journal local transaction", "err", err) 690 } 691 } 692 693 // promoteTx adds a transaction to the pending (processable) list of transactions 694 // and returns whether it was inserted or an older was better. 695 // 696 // Note, this method assumes the pool lock is held! 697 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 698 // Try to insert the transaction into the pending queue 699 if pool.pending[addr] == nil { 700 pool.pending[addr] = newTxList(true) 701 } 702 list := pool.pending[addr] 703 704 inserted, old := list.Add(tx, pool.config.PriceBump) 705 if !inserted { 706 // An older transaction was better, discard this 707 pool.all.Remove(hash) 708 pool.priced.Removed(1) 709 pendingDiscardMeter.Mark(1) 710 return false 711 } 712 // Otherwise discard any previous transaction and mark this 713 if old != nil { 714 pool.all.Remove(old.Hash()) 715 pool.priced.Removed(1) 716 pendingReplaceMeter.Mark(1) 717 } else { 718 // Nothing was replaced, bump the pending counter 719 pendingGauge.Inc(1) 720 } 721 // Failsafe to work around direct pending inserts (tests) 722 if pool.all.Get(hash) == nil { 723 pool.all.Add(tx) 724 pool.priced.Put(tx) 725 } 726 // Set the potentially new pending nonce and notify any subsystems of the new tx 727 pool.pendingNonces.set(addr, tx.Nonce()+1) 728 729 // Successful promotion, bump the heartbeat 730 pool.beats[addr] = time.Now() 731 return true 732 } 733 734 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 735 // senders as a local ones, ensuring they go around the local pricing constraints. 736 // 737 // This method is used to add transactions from the RPC API and performs synchronous pool 738 // reorganization and event propagation. 739 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 740 return pool.addTxs(txs, !pool.config.NoLocals, true) 741 } 742 743 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 744 // a convenience wrapper aroundd AddLocals. 745 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 746 errs := pool.AddLocals([]*types.Transaction{tx}) 747 return errs[0] 748 } 749 750 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 751 // senders are not among the locally tracked ones, full pricing constraints will apply. 752 // 753 // This method is used to add transactions from the p2p network and does not wait for pool 754 // reorganization and internal event propagation. 755 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 756 return pool.addTxs(txs, false, false) 757 } 758 759 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 760 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 761 return pool.addTxs(txs, false, true) 762 } 763 764 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 765 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 766 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 767 return errs[0] 768 } 769 770 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 771 // wrapper around AddRemotes. 772 // 773 // Deprecated: use AddRemotes 774 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 775 errs := pool.AddRemotes([]*types.Transaction{tx}) 776 return errs[0] 777 } 778 779 // addTxs attempts to queue a batch of transactions if they are valid. 780 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 781 // Filter out known ones without obtaining the pool lock or recovering signatures 782 var ( 783 errs = make([]error, len(txs)) 784 news = make([]*types.Transaction, 0, len(txs)) 785 ) 786 for i, tx := range txs { 787 // If the transaction is known, pre-set the error slot 788 if pool.all.Get(tx.Hash()) != nil { 789 errs[i] = ErrAlreadyKnown 790 knownTxMeter.Mark(1) 791 continue 792 } 793 // Exclude transactions with invalid signatures as soon as 794 // possible and cache senders in transactions before 795 // obtaining lock 796 _, err := types.Sender(pool.signer, tx) 797 if err != nil { 798 errs[i] = ErrInvalidSender 799 invalidTxMeter.Mark(1) 800 continue 801 } 802 // Accumulate all unknown transactions for deeper processing 803 news = append(news, tx) 804 } 805 if len(news) == 0 { 806 return errs 807 } 808 809 // Process all the new transaction and merge any errors into the original slice 810 pool.mu.Lock() 811 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 812 pool.mu.Unlock() 813 814 var nilSlot = 0 815 for _, err := range newErrs { 816 for errs[nilSlot] != nil { 817 nilSlot++ 818 } 819 errs[nilSlot] = err 820 nilSlot++ 821 } 822 // Reorg the pool internals if needed and return 823 done := pool.requestPromoteExecutables(dirtyAddrs) 824 if sync { 825 <-done 826 } 827 return errs 828 } 829 830 // addTxsLocked attempts to queue a batch of transactions if they are valid. 831 // The transaction pool lock must be held. 832 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 833 dirty := newAccountSet(pool.signer) 834 errs := make([]error, len(txs)) 835 for i, tx := range txs { 836 replaced, err := pool.add(tx, local) 837 errs[i] = err 838 if err == nil && !replaced { 839 dirty.addTx(tx) 840 } 841 } 842 validTxMeter.Mark(int64(len(dirty.accounts))) 843 return errs, dirty 844 } 845 846 // Status returns the status (unknown/pending/queued) of a batch of transactions 847 // identified by their hashes. 848 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 849 status := make([]TxStatus, len(hashes)) 850 for i, hash := range hashes { 851 tx := pool.Get(hash) 852 if tx == nil { 853 continue 854 } 855 from, _ := types.Sender(pool.signer, tx) // already validated 856 pool.mu.RLock() 857 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 858 status[i] = TxStatusPending 859 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 860 status[i] = TxStatusQueued 861 } 862 // implicit else: the tx may have been included into a block between 863 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 864 pool.mu.RUnlock() 865 } 866 return status 867 } 868 869 // Get returns a transaction if it is contained in the pool and nil otherwise. 870 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 871 return pool.all.Get(hash) 872 } 873 874 // Has returns an indicator whether txpool has a transaction cached with the 875 // given hash. 876 func (pool *TxPool) Has(hash common.Hash) bool { 877 return pool.all.Get(hash) != nil 878 } 879 880 // removeTx removes a single transaction from the queue, moving all subsequent 881 // transactions back to the future queue. 882 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 883 // Fetch the transaction we wish to delete 884 tx := pool.all.Get(hash) 885 if tx == nil { 886 return 887 } 888 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 889 890 // Remove it from the list of known transactions 891 pool.all.Remove(hash) 892 if outofbound { 893 pool.priced.Removed(1) 894 } 895 if pool.locals.contains(addr) { 896 localGauge.Dec(1) 897 } 898 // Remove the transaction from the pending lists and reset the account nonce 899 if pending := pool.pending[addr]; pending != nil { 900 if removed, invalids := pending.Remove(tx); removed { 901 // If no more pending transactions are left, remove the list 902 if pending.Empty() { 903 delete(pool.pending, addr) 904 } 905 // Postpone any invalidated transactions 906 for _, tx := range invalids { 907 pool.enqueueTx(tx.Hash(), tx) 908 } 909 // Update the account nonce if needed 910 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 911 // Reduce the pending counter 912 pendingGauge.Dec(int64(1 + len(invalids))) 913 return 914 } 915 } 916 // Transaction is in the future queue 917 if future := pool.queue[addr]; future != nil { 918 if removed, _ := future.Remove(tx); removed { 919 // Reduce the queued counter 920 queuedGauge.Dec(1) 921 } 922 if future.Empty() { 923 delete(pool.queue, addr) 924 delete(pool.beats, addr) 925 } 926 } 927 } 928 929 // requestPromoteExecutables requests a pool reset to the new head block. 930 // The returned channel is closed when the reset has occurred. 931 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 932 select { 933 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 934 return <-pool.reorgDoneCh 935 case <-pool.reorgShutdownCh: 936 return pool.reorgShutdownCh 937 } 938 } 939 940 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 941 // The returned channel is closed when the promotion checks have occurred. 942 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 943 select { 944 case pool.reqPromoteCh <- set: 945 return <-pool.reorgDoneCh 946 case <-pool.reorgShutdownCh: 947 return pool.reorgShutdownCh 948 } 949 } 950 951 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 952 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 953 select { 954 case pool.queueTxEventCh <- tx: 955 case <-pool.reorgShutdownCh: 956 } 957 } 958 959 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 960 // call those methods directly, but request them being run using requestReset and 961 // requestPromoteExecutables instead. 962 func (pool *TxPool) scheduleReorgLoop() { 963 defer pool.wg.Done() 964 965 var ( 966 curDone chan struct{} // non-nil while runReorg is active 967 nextDone = make(chan struct{}) 968 launchNextRun bool 969 reset *txpoolResetRequest 970 dirtyAccounts *accountSet 971 queuedEvents = make(map[common.Address]*txSortedMap) 972 ) 973 for { 974 // Launch next background reorg if needed 975 if curDone == nil && launchNextRun { 976 // Run the background reorg and announcements 977 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 978 979 // Prepare everything for the next round of reorg 980 curDone, nextDone = nextDone, make(chan struct{}) 981 launchNextRun = false 982 983 reset, dirtyAccounts = nil, nil 984 queuedEvents = make(map[common.Address]*txSortedMap) 985 } 986 987 select { 988 case req := <-pool.reqResetCh: 989 // Reset request: update head if request is already pending. 990 if reset == nil { 991 reset = req 992 } else { 993 reset.newHead = req.newHead 994 } 995 launchNextRun = true 996 pool.reorgDoneCh <- nextDone 997 998 case req := <-pool.reqPromoteCh: 999 // Promote request: update address set if request is already pending. 1000 if dirtyAccounts == nil { 1001 dirtyAccounts = req 1002 } else { 1003 dirtyAccounts.merge(req) 1004 } 1005 launchNextRun = true 1006 pool.reorgDoneCh <- nextDone 1007 1008 case tx := <-pool.queueTxEventCh: 1009 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1010 // request one later if they want the events sent. 1011 addr, _ := types.Sender(pool.signer, tx) 1012 if _, ok := queuedEvents[addr]; !ok { 1013 queuedEvents[addr] = newTxSortedMap() 1014 } 1015 queuedEvents[addr].Put(tx) 1016 1017 case <-curDone: 1018 curDone = nil 1019 1020 case <-pool.reorgShutdownCh: 1021 // Wait for current run to finish. 1022 if curDone != nil { 1023 <-curDone 1024 } 1025 close(nextDone) 1026 return 1027 } 1028 } 1029 } 1030 1031 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1032 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1033 defer close(done) 1034 1035 var promoteAddrs []common.Address 1036 if dirtyAccounts != nil && reset == nil { 1037 // Only dirty accounts need to be promoted, unless we're resetting. 1038 // For resets, all addresses in the tx queue will be promoted and 1039 // the flatten operation can be avoided. 1040 promoteAddrs = dirtyAccounts.flatten() 1041 } 1042 pool.mu.Lock() 1043 if reset != nil { 1044 // Reset from the old head to the new, rescheduling any reorged transactions 1045 pool.reset(reset.oldHead, reset.newHead) 1046 1047 // Nonces were reset, discard any events that became stale 1048 for addr := range events { 1049 events[addr].Forward(pool.pendingNonces.get(addr)) 1050 if events[addr].Len() == 0 { 1051 delete(events, addr) 1052 } 1053 } 1054 // Reset needs promote for all addresses 1055 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1056 for addr := range pool.queue { 1057 promoteAddrs = append(promoteAddrs, addr) 1058 } 1059 } 1060 // Check for pending transactions for every account that sent new ones 1061 promoted := pool.promoteExecutables(promoteAddrs) 1062 1063 // If a new block appeared, validate the pool of pending transactions. This will 1064 // remove any transaction that has been included in the block or was invalidated 1065 // because of another transaction (e.g. higher gas price). 1066 if reset != nil { 1067 pool.demoteUnexecutables() 1068 } 1069 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1070 pool.truncatePending() 1071 pool.truncateQueue() 1072 1073 // Update all accounts to the latest known pending nonce 1074 for addr, list := range pool.pending { 1075 highestPending := list.LastElement() 1076 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1077 } 1078 pool.mu.Unlock() 1079 1080 // Notify subsystems for newly added transactions 1081 for _, tx := range promoted { 1082 addr, _ := types.Sender(pool.signer, tx) 1083 if _, ok := events[addr]; !ok { 1084 events[addr] = newTxSortedMap() 1085 } 1086 events[addr].Put(tx) 1087 } 1088 if len(events) > 0 { 1089 var txs []*types.Transaction 1090 for _, set := range events { 1091 txs = append(txs, set.Flatten()...) 1092 } 1093 pool.txFeed.Send(NewTxsEvent{txs}) 1094 } 1095 } 1096 1097 // reset retrieves the current state of the blockchain and ensures the content 1098 // of the transaction pool is valid with regard to the chain state. 1099 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1100 // If we're reorging an old state, reinject all dropped transactions 1101 var reinject types.Transactions 1102 1103 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1104 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1105 oldNum := oldHead.Number.Uint64() 1106 newNum := newHead.Number.Uint64() 1107 1108 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1109 log.Debug("Skipping deep transaction reorg", "depth", depth) 1110 } else { 1111 // Reorg seems shallow enough to pull in all transactions into memory 1112 var discarded, included types.Transactions 1113 var ( 1114 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1115 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1116 ) 1117 if rem == nil { 1118 // This can happen if a setHead is performed, where we simply discard the old 1119 // head from the chain. 1120 // If that is the case, we don't have the lost transactions any more, and 1121 // there's nothing to add 1122 if newNum < oldNum { 1123 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1124 log.Debug("Skipping transaction reset caused by setHead", 1125 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1126 } else { 1127 // If we reorged to a same or higher number, then it's not a case of setHead 1128 log.Warn("Transaction pool reset with missing oldhead", 1129 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1130 } 1131 return 1132 } 1133 for rem.NumberU64() > add.NumberU64() { 1134 discarded = append(discarded, rem.Transactions()...) 1135 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1136 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1137 return 1138 } 1139 } 1140 for add.NumberU64() > rem.NumberU64() { 1141 included = append(included, add.Transactions()...) 1142 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1143 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1144 return 1145 } 1146 } 1147 for rem.Hash() != add.Hash() { 1148 discarded = append(discarded, rem.Transactions()...) 1149 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1150 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1151 return 1152 } 1153 included = append(included, add.Transactions()...) 1154 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1155 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1156 return 1157 } 1158 } 1159 reinject = types.TxDifference(discarded, included) 1160 } 1161 } 1162 // Initialize the internal state to the current head 1163 if newHead == nil { 1164 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1165 } 1166 statedb, err := pool.chain.StateAt(newHead.Root) 1167 if err != nil { 1168 log.Error("Failed to reset txpool state", "err", err) 1169 return 1170 } 1171 pool.currentState = statedb 1172 pool.pendingNonces = newTxNoncer(statedb) 1173 pool.currentMaxGas = newHead.GasLimit 1174 1175 // Inject any transactions discarded due to reorgs 1176 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1177 senderCacher.recover(pool.signer, reinject) 1178 pool.addTxsLocked(reinject, false) 1179 1180 // Update all fork indicator by next pending block number. 1181 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1182 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1183 } 1184 1185 // promoteExecutables moves transactions that have become processable from the 1186 // future queue to the set of pending transactions. During this process, all 1187 // invalidated transactions (low nonce, low balance) are deleted. 1188 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1189 // Track the promoted transactions to broadcast them at once 1190 var promoted []*types.Transaction 1191 1192 // Iterate over all accounts and promote any executable transactions 1193 for _, addr := range accounts { 1194 list := pool.queue[addr] 1195 if list == nil { 1196 continue // Just in case someone calls with a non existing account 1197 } 1198 // Drop all transactions that are deemed too old (low nonce) 1199 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1200 for _, tx := range forwards { 1201 hash := tx.Hash() 1202 pool.all.Remove(hash) 1203 } 1204 log.Trace("Removed old queued transactions", "count", len(forwards)) 1205 // Drop all transactions that are too costly (low balance or out of gas) 1206 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1207 for _, tx := range drops { 1208 hash := tx.Hash() 1209 pool.all.Remove(hash) 1210 } 1211 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1212 queuedNofundsMeter.Mark(int64(len(drops))) 1213 1214 // Gather all executable transactions and promote them 1215 readies := list.Ready(pool.pendingNonces.get(addr)) 1216 for _, tx := range readies { 1217 hash := tx.Hash() 1218 if pool.promoteTx(addr, hash, tx) { 1219 promoted = append(promoted, tx) 1220 } 1221 } 1222 log.Trace("Promoted queued transactions", "count", len(promoted)) 1223 queuedGauge.Dec(int64(len(readies))) 1224 1225 // Drop all transactions over the allowed limit 1226 var caps types.Transactions 1227 if !pool.locals.contains(addr) { 1228 caps = list.Cap(int(pool.config.AccountQueue)) 1229 for _, tx := range caps { 1230 hash := tx.Hash() 1231 pool.all.Remove(hash) 1232 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1233 } 1234 queuedRateLimitMeter.Mark(int64(len(caps))) 1235 } 1236 // Mark all the items dropped as removed 1237 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1238 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1239 if pool.locals.contains(addr) { 1240 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1241 } 1242 // Delete the entire queue entry if it became empty. 1243 if list.Empty() { 1244 delete(pool.queue, addr) 1245 delete(pool.beats, addr) 1246 } 1247 } 1248 return promoted 1249 } 1250 1251 // truncatePending removes transactions from the pending queue if the pool is above the 1252 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1253 // equal number for all for accounts with many pending transactions. 1254 func (pool *TxPool) truncatePending() { 1255 pending := uint64(0) 1256 for _, list := range pool.pending { 1257 pending += uint64(list.Len()) 1258 } 1259 if pending <= pool.config.GlobalSlots { 1260 return 1261 } 1262 1263 pendingBeforeCap := pending 1264 // Assemble a spam order to penalize large transactors first 1265 spammers := prque.New(nil) 1266 for addr, list := range pool.pending { 1267 // Only evict transactions from high rollers 1268 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1269 spammers.Push(addr, int64(list.Len())) 1270 } 1271 } 1272 // Gradually drop transactions from offenders 1273 offenders := []common.Address{} 1274 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1275 // Retrieve the next offender if not local address 1276 offender, _ := spammers.Pop() 1277 offenders = append(offenders, offender.(common.Address)) 1278 1279 // Equalize balances until all the same or below threshold 1280 if len(offenders) > 1 { 1281 // Calculate the equalization threshold for all current offenders 1282 threshold := pool.pending[offender.(common.Address)].Len() 1283 1284 // Iteratively reduce all offenders until below limit or threshold reached 1285 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1286 for i := 0; i < len(offenders)-1; i++ { 1287 list := pool.pending[offenders[i]] 1288 1289 caps := list.Cap(list.Len() - 1) 1290 for _, tx := range caps { 1291 // Drop the transaction from the global pools too 1292 hash := tx.Hash() 1293 pool.all.Remove(hash) 1294 1295 // Update the account nonce to the dropped transaction 1296 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1297 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1298 } 1299 pool.priced.Removed(len(caps)) 1300 pendingGauge.Dec(int64(len(caps))) 1301 if pool.locals.contains(offenders[i]) { 1302 localGauge.Dec(int64(len(caps))) 1303 } 1304 pending-- 1305 } 1306 } 1307 } 1308 } 1309 1310 // If still above threshold, reduce to limit or min allowance 1311 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1312 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1313 for _, addr := range offenders { 1314 list := pool.pending[addr] 1315 1316 caps := list.Cap(list.Len() - 1) 1317 for _, tx := range caps { 1318 // Drop the transaction from the global pools too 1319 hash := tx.Hash() 1320 pool.all.Remove(hash) 1321 1322 // Update the account nonce to the dropped transaction 1323 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1324 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1325 } 1326 pool.priced.Removed(len(caps)) 1327 pendingGauge.Dec(int64(len(caps))) 1328 if pool.locals.contains(addr) { 1329 localGauge.Dec(int64(len(caps))) 1330 } 1331 pending-- 1332 } 1333 } 1334 } 1335 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1336 } 1337 1338 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1339 func (pool *TxPool) truncateQueue() { 1340 queued := uint64(0) 1341 for _, list := range pool.queue { 1342 queued += uint64(list.Len()) 1343 } 1344 if queued <= pool.config.GlobalQueue { 1345 return 1346 } 1347 1348 // Sort all accounts with queued transactions by heartbeat 1349 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1350 for addr := range pool.queue { 1351 if !pool.locals.contains(addr) { // don't drop locals 1352 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1353 } 1354 } 1355 sort.Sort(addresses) 1356 1357 // Drop transactions until the total is below the limit or only locals remain 1358 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1359 addr := addresses[len(addresses)-1] 1360 list := pool.queue[addr.address] 1361 1362 addresses = addresses[:len(addresses)-1] 1363 1364 // Drop all transactions if they are less than the overflow 1365 if size := uint64(list.Len()); size <= drop { 1366 for _, tx := range list.Flatten() { 1367 pool.removeTx(tx.Hash(), true) 1368 } 1369 drop -= size 1370 queuedRateLimitMeter.Mark(int64(size)) 1371 continue 1372 } 1373 // Otherwise drop only last few transactions 1374 txs := list.Flatten() 1375 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1376 pool.removeTx(txs[i].Hash(), true) 1377 drop-- 1378 queuedRateLimitMeter.Mark(1) 1379 } 1380 } 1381 } 1382 1383 // demoteUnexecutables removes invalid and processed transactions from the pools 1384 // executable/pending queue and any subsequent transactions that become unexecutable 1385 // are moved back into the future queue. 1386 func (pool *TxPool) demoteUnexecutables() { 1387 // Iterate over all accounts and demote any non-executable transactions 1388 for addr, list := range pool.pending { 1389 nonce := pool.currentState.GetNonce(addr) 1390 1391 // Drop all transactions that are deemed too old (low nonce) 1392 olds := list.Forward(nonce) 1393 for _, tx := range olds { 1394 hash := tx.Hash() 1395 pool.all.Remove(hash) 1396 log.Trace("Removed old pending transaction", "hash", hash) 1397 } 1398 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1399 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1400 for _, tx := range drops { 1401 hash := tx.Hash() 1402 log.Trace("Removed unpayable pending transaction", "hash", hash) 1403 pool.all.Remove(hash) 1404 } 1405 pool.priced.Removed(len(olds) + len(drops)) 1406 pendingNofundsMeter.Mark(int64(len(drops))) 1407 1408 for _, tx := range invalids { 1409 hash := tx.Hash() 1410 log.Trace("Demoting pending transaction", "hash", hash) 1411 pool.enqueueTx(hash, tx) 1412 } 1413 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1414 if pool.locals.contains(addr) { 1415 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1416 } 1417 // If there's a gap in front, alert (should never happen) and postpone all transactions 1418 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1419 gapped := list.Cap(0) 1420 for _, tx := range gapped { 1421 hash := tx.Hash() 1422 log.Error("Demoting invalidated transaction", "hash", hash) 1423 pool.enqueueTx(hash, tx) 1424 } 1425 pendingGauge.Dec(int64(len(gapped))) 1426 // This might happen in a reorg, so log it to the metering 1427 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1428 } 1429 // Delete the entire pending entry if it became empty. 1430 if list.Empty() { 1431 delete(pool.pending, addr) 1432 } 1433 } 1434 } 1435 1436 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1437 type addressByHeartbeat struct { 1438 address common.Address 1439 heartbeat time.Time 1440 } 1441 1442 type addressesByHeartbeat []addressByHeartbeat 1443 1444 func (a addressesByHeartbeat) Len() int { return len(a) } 1445 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1446 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1447 1448 // accountSet is simply a set of addresses to check for existence, and a signer 1449 // capable of deriving addresses from transactions. 1450 type accountSet struct { 1451 accounts map[common.Address]struct{} 1452 signer types.Signer 1453 cache *[]common.Address 1454 } 1455 1456 // newAccountSet creates a new address set with an associated signer for sender 1457 // derivations. 1458 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1459 as := &accountSet{ 1460 accounts: make(map[common.Address]struct{}), 1461 signer: signer, 1462 } 1463 for _, addr := range addrs { 1464 as.add(addr) 1465 } 1466 return as 1467 } 1468 1469 // contains checks if a given address is contained within the set. 1470 func (as *accountSet) contains(addr common.Address) bool { 1471 _, exist := as.accounts[addr] 1472 return exist 1473 } 1474 1475 func (as *accountSet) empty() bool { 1476 return len(as.accounts) == 0 1477 } 1478 1479 // containsTx checks if the sender of a given tx is within the set. If the sender 1480 // cannot be derived, this method returns false. 1481 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1482 if addr, err := types.Sender(as.signer, tx); err == nil { 1483 return as.contains(addr) 1484 } 1485 return false 1486 } 1487 1488 // add inserts a new address into the set to track. 1489 func (as *accountSet) add(addr common.Address) { 1490 as.accounts[addr] = struct{}{} 1491 as.cache = nil 1492 } 1493 1494 // addTx adds the sender of tx into the set. 1495 func (as *accountSet) addTx(tx *types.Transaction) { 1496 if addr, err := types.Sender(as.signer, tx); err == nil { 1497 as.add(addr) 1498 } 1499 } 1500 1501 // flatten returns the list of addresses within this set, also caching it for later 1502 // reuse. The returned slice should not be changed! 1503 func (as *accountSet) flatten() []common.Address { 1504 if as.cache == nil { 1505 accounts := make([]common.Address, 0, len(as.accounts)) 1506 for account := range as.accounts { 1507 accounts = append(accounts, account) 1508 } 1509 as.cache = &accounts 1510 } 1511 return *as.cache 1512 } 1513 1514 // merge adds all addresses from the 'other' set into 'as'. 1515 func (as *accountSet) merge(other *accountSet) { 1516 for addr := range other.accounts { 1517 as.accounts[addr] = struct{}{} 1518 } 1519 as.cache = nil 1520 } 1521 1522 // txLookup is used internally by TxPool to track transactions while allowing lookup without 1523 // mutex contention. 1524 // 1525 // Note, although this type is properly protected against concurrent access, it 1526 // is **not** a type that should ever be mutated or even exposed outside of the 1527 // transaction pool, since its internal state is tightly coupled with the pools 1528 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1529 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1530 // TxPool.mu mutex. 1531 type txLookup struct { 1532 all map[common.Hash]*types.Transaction 1533 slots int 1534 lock sync.RWMutex 1535 } 1536 1537 // newTxLookup returns a new txLookup structure. 1538 func newTxLookup() *txLookup { 1539 return &txLookup{ 1540 all: make(map[common.Hash]*types.Transaction), 1541 } 1542 } 1543 1544 // Range calls f on each key and value present in the map. 1545 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { 1546 t.lock.RLock() 1547 defer t.lock.RUnlock() 1548 1549 for key, value := range t.all { 1550 if !f(key, value) { 1551 break 1552 } 1553 } 1554 } 1555 1556 // Get returns a transaction if it exists in the lookup, or nil if not found. 1557 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1558 t.lock.RLock() 1559 defer t.lock.RUnlock() 1560 1561 return t.all[hash] 1562 } 1563 1564 // Count returns the current number of items in the lookup. 1565 func (t *txLookup) Count() int { 1566 t.lock.RLock() 1567 defer t.lock.RUnlock() 1568 1569 return len(t.all) 1570 } 1571 1572 // Slots returns the current number of slots used in the lookup. 1573 func (t *txLookup) Slots() int { 1574 t.lock.RLock() 1575 defer t.lock.RUnlock() 1576 1577 return t.slots 1578 } 1579 1580 // Add adds a transaction to the lookup. 1581 func (t *txLookup) Add(tx *types.Transaction) { 1582 t.lock.Lock() 1583 defer t.lock.Unlock() 1584 1585 t.slots += numSlots(tx) 1586 slotsGauge.Update(int64(t.slots)) 1587 1588 t.all[tx.Hash()] = tx 1589 } 1590 1591 // Remove removes a transaction from the lookup. 1592 func (t *txLookup) Remove(hash common.Hash) { 1593 t.lock.Lock() 1594 defer t.lock.Unlock() 1595 1596 t.slots -= numSlots(t.all[hash]) 1597 slotsGauge.Update(int64(t.slots)) 1598 1599 delete(t.all, hash) 1600 } 1601 1602 // numSlots calculates the number of slots needed for a single transaction. 1603 func numSlots(tx *types.Transaction) int { 1604 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1605 }