github.com/phillinzzz/newBsc@v1.1.6/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "time" 26 27 "github.com/phillinzzz/newBsc/common" 28 "github.com/phillinzzz/newBsc/common/prque" 29 "github.com/phillinzzz/newBsc/core/state" 30 "github.com/phillinzzz/newBsc/core/types" 31 "github.com/phillinzzz/newBsc/event" 32 "github.com/phillinzzz/newBsc/log" 33 "github.com/phillinzzz/newBsc/metrics" 34 "github.com/phillinzzz/newBsc/params" 35 ) 36 37 const ( 38 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 39 chainHeadChanSize = 10 40 41 // txSlotSize is used to calculate how many data slots a single transaction 42 // takes up based on its size. The slots are used as DoS protection, ensuring 43 // that validating a new transaction remains a constant operation (in reality 44 // O(maxslots), where max slots are 4 currently). 45 txSlotSize = 32 * 1024 46 47 // txMaxSize is the maximum size a single transaction can have. This field has 48 // non-trivial consequences: larger transactions are significantly harder and 49 // more expensive to propagate; larger transactions also take more resources 50 // to validate whether they fit into the pool or not. 51 txMaxSize = 4 * txSlotSize // 128KB 52 ) 53 54 var ( 55 // ErrAlreadyKnown is returned if the transactions is already contained 56 // within the pool. 57 ErrAlreadyKnown = errors.New("already known") 58 59 // ErrInvalidSender is returned if the transaction contains an invalid signature. 60 ErrInvalidSender = errors.New("invalid sender") 61 62 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 63 // configured for the transaction pool. 64 ErrUnderpriced = errors.New("transaction underpriced") 65 66 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 67 // another remote transaction. 68 ErrTxPoolOverflow = errors.New("txpool is full") 69 70 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 71 // with a different one without the required price bump. 72 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 73 74 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 75 // maximum allowance of the current block. 76 ErrGasLimit = errors.New("exceeds block gas limit") 77 78 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 79 // transaction with a negative value. 80 ErrNegativeValue = errors.New("negative value") 81 82 // ErrOversizedData is returned if the input data of a transaction is greater 83 // than some meaningful limit a user might use. This is not a consensus error 84 // making the transaction invalid, rather a DOS protection. 85 ErrOversizedData = errors.New("oversized data") 86 ) 87 88 var ( 89 evictionInterval = time.Minute // Time interval to check for evictable transactions 90 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 91 ) 92 93 var ( 94 // Metrics for the pending pool 95 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 96 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 97 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 98 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 99 100 // Metrics for the queued pool 101 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 102 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 103 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 104 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 105 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 106 107 // General tx metrics 108 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 109 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 110 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 111 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 112 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 113 114 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 115 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 116 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 117 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 118 ) 119 120 // TxStatus is the current status of a transaction as seen by the pool. 121 type TxStatus uint 122 123 const ( 124 TxStatusUnknown TxStatus = iota 125 TxStatusQueued 126 TxStatusPending 127 TxStatusIncluded 128 ) 129 130 // blockChain provides the state of blockchain and current gas limit to do 131 // some pre checks in tx pool and event subscribers. 132 type blockChain interface { 133 CurrentBlock() *types.Block 134 GetBlock(hash common.Hash, number uint64) *types.Block 135 StateAt(root common.Hash) (*state.StateDB, error) 136 137 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 138 } 139 140 // TxPoolConfig are the configuration parameters of the transaction pool. 141 type TxPoolConfig struct { 142 Locals []common.Address // Addresses that should be treated by default as local 143 NoLocals bool // Whether local transaction handling should be disabled 144 Journal string // Journal of local transactions to survive node restarts 145 Rejournal time.Duration // Time interval to regenerate the local transaction journal 146 147 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 148 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 149 150 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 151 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 152 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 153 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 154 155 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 156 } 157 158 // DefaultTxPoolConfig contains the default configurations for the transaction 159 // pool. 160 var DefaultTxPoolConfig = TxPoolConfig{ 161 Journal: "transactions.rlp", 162 Rejournal: time.Hour, 163 164 PriceLimit: 1, 165 PriceBump: 10, 166 167 AccountSlots: 16, 168 GlobalSlots: 4096, 169 AccountQueue: 64, 170 GlobalQueue: 1024, 171 172 Lifetime: 3 * time.Hour, 173 } 174 175 // sanitize checks the provided user configurations and changes anything that's 176 // unreasonable or unworkable. 177 func (config *TxPoolConfig) sanitize() TxPoolConfig { 178 conf := *config 179 if conf.Rejournal < time.Second { 180 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 181 conf.Rejournal = time.Second 182 } 183 if conf.PriceLimit < 1 { 184 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 185 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 186 } 187 if conf.PriceBump < 1 { 188 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 189 conf.PriceBump = DefaultTxPoolConfig.PriceBump 190 } 191 if conf.AccountSlots < 1 { 192 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 193 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 194 } 195 if conf.GlobalSlots < 1 { 196 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 197 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 198 } 199 if conf.AccountQueue < 1 { 200 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 201 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 202 } 203 if conf.GlobalQueue < 1 { 204 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 205 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 206 } 207 if conf.Lifetime < 1 { 208 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 209 conf.Lifetime = DefaultTxPoolConfig.Lifetime 210 } 211 return conf 212 } 213 214 // TxPool contains all currently known transactions. Transactions 215 // enter the pool when they are received from the network or submitted 216 // locally. They exit the pool when they are included in the blockchain. 217 // 218 // The pool separates processable transactions (which can be applied to the 219 // current state) and future transactions. Transactions move between those 220 // two states over time as they are received and processed. 221 type TxPool struct { 222 config TxPoolConfig 223 chainconfig *params.ChainConfig 224 chain blockChain 225 gasPrice *big.Int 226 txFeed event.Feed 227 scope event.SubscriptionScope 228 signer types.Signer 229 mu sync.RWMutex 230 231 istanbul bool // Fork indicator whether we are in the istanbul stage. 232 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 233 234 currentState *state.StateDB // Current state in the blockchain head 235 pendingNonces *txNoncer // Pending state tracking virtual nonces 236 currentMaxGas uint64 // Current gas limit for transaction caps 237 238 locals *accountSet // Set of local transaction to exempt from eviction rules 239 journal *txJournal // Journal of local transaction to back up to disk 240 241 pending map[common.Address]*txList // All currently processable transactions 242 queue map[common.Address]*txList // Queued but non-processable transactions 243 beats map[common.Address]time.Time // Last heartbeat from each known account 244 all *txLookup // All transactions to allow lookups 245 priced *txPricedList // All transactions sorted by price 246 247 chainHeadCh chan ChainHeadEvent 248 chainHeadSub event.Subscription 249 reqResetCh chan *txpoolResetRequest 250 reqPromoteCh chan *accountSet 251 queueTxEventCh chan *types.Transaction 252 reorgDoneCh chan chan struct{} 253 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 254 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 255 } 256 257 type txpoolResetRequest struct { 258 oldHead, newHead *types.Header 259 } 260 261 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 262 // transactions from the network. 263 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 264 // Sanitize the input to ensure no vulnerable gas prices are set 265 config = (&config).sanitize() 266 267 // Create the transaction pool with its initial settings 268 pool := &TxPool{ 269 config: config, 270 chainconfig: chainconfig, 271 chain: chain, 272 signer: types.LatestSigner(chainconfig), 273 pending: make(map[common.Address]*txList), 274 queue: make(map[common.Address]*txList), 275 beats: make(map[common.Address]time.Time), 276 all: newTxLookup(), 277 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 278 reqResetCh: make(chan *txpoolResetRequest), 279 reqPromoteCh: make(chan *accountSet), 280 queueTxEventCh: make(chan *types.Transaction), 281 reorgDoneCh: make(chan chan struct{}), 282 reorgShutdownCh: make(chan struct{}), 283 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 284 } 285 pool.locals = newAccountSet(pool.signer) 286 for _, addr := range config.Locals { 287 log.Info("Setting new local account", "address", addr) 288 pool.locals.add(addr) 289 } 290 pool.priced = newTxPricedList(pool.all) 291 pool.reset(nil, chain.CurrentBlock().Header()) 292 293 // Start the reorg loop early so it can handle requests generated during journal loading. 294 pool.wg.Add(1) 295 go pool.scheduleReorgLoop() 296 297 // If local transactions and journaling is enabled, load from disk 298 if !config.NoLocals && config.Journal != "" { 299 pool.journal = newTxJournal(config.Journal) 300 301 if err := pool.journal.load(pool.AddLocals); err != nil { 302 log.Warn("Failed to load transaction journal", "err", err) 303 } 304 if err := pool.journal.rotate(pool.local()); err != nil { 305 log.Warn("Failed to rotate transaction journal", "err", err) 306 } 307 } 308 309 // Subscribe events from blockchain and start the main event loop. 310 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 311 pool.wg.Add(1) 312 go pool.loop() 313 314 return pool 315 } 316 317 // loop is the transaction pool's main event loop, waiting for and reacting to 318 // outside blockchain events as well as for various reporting and transaction 319 // eviction events. 320 func (pool *TxPool) loop() { 321 defer pool.wg.Done() 322 323 var ( 324 prevPending, prevQueued, prevStales int 325 // Start the stats reporting and transaction eviction tickers 326 report = time.NewTicker(statsReportInterval) 327 evict = time.NewTicker(evictionInterval) 328 journal = time.NewTicker(pool.config.Rejournal) 329 // Track the previous head headers for transaction reorgs 330 head = pool.chain.CurrentBlock() 331 ) 332 defer report.Stop() 333 defer evict.Stop() 334 defer journal.Stop() 335 336 for { 337 select { 338 // Handle ChainHeadEvent 339 case ev := <-pool.chainHeadCh: 340 if ev.Block != nil { 341 pool.requestReset(head.Header(), ev.Block.Header()) 342 head = ev.Block 343 } 344 345 // System shutdown. 346 case <-pool.chainHeadSub.Err(): 347 close(pool.reorgShutdownCh) 348 return 349 350 // Handle stats reporting ticks 351 case <-report.C: 352 pool.mu.RLock() 353 pending, queued := pool.stats() 354 stales := pool.priced.stales 355 pool.mu.RUnlock() 356 357 if pending != prevPending || queued != prevQueued || stales != prevStales { 358 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 359 prevPending, prevQueued, prevStales = pending, queued, stales 360 } 361 362 // Handle inactive account transaction eviction 363 case <-evict.C: 364 pool.mu.Lock() 365 for addr := range pool.queue { 366 // Skip local transactions from the eviction mechanism 367 if pool.locals.contains(addr) { 368 continue 369 } 370 // Any non-locals old enough should be removed 371 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 372 list := pool.queue[addr].Flatten() 373 for _, tx := range list { 374 pool.removeTx(tx.Hash(), true) 375 } 376 queuedEvictionMeter.Mark(int64(len(list))) 377 } 378 } 379 pool.mu.Unlock() 380 381 // Handle local transaction journal rotation 382 case <-journal.C: 383 if pool.journal != nil { 384 pool.mu.Lock() 385 if err := pool.journal.rotate(pool.local()); err != nil { 386 log.Warn("Failed to rotate local tx journal", "err", err) 387 } 388 pool.mu.Unlock() 389 } 390 } 391 } 392 } 393 394 // Stop terminates the transaction pool. 395 func (pool *TxPool) Stop() { 396 // Unsubscribe all subscriptions registered from txpool 397 pool.scope.Close() 398 399 // Unsubscribe subscriptions registered from blockchain 400 pool.chainHeadSub.Unsubscribe() 401 pool.wg.Wait() 402 403 if pool.journal != nil { 404 pool.journal.close() 405 } 406 log.Info("Transaction pool stopped") 407 } 408 409 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 410 // starts sending event to the given channel. 411 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 412 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 413 } 414 415 // GasPrice returns the current gas price enforced by the transaction pool. 416 func (pool *TxPool) GasPrice() *big.Int { 417 pool.mu.RLock() 418 defer pool.mu.RUnlock() 419 420 return new(big.Int).Set(pool.gasPrice) 421 } 422 423 // SetGasPrice updates the minimum price required by the transaction pool for a 424 // new transaction, and drops all transactions below this threshold. 425 func (pool *TxPool) SetGasPrice(price *big.Int) { 426 pool.mu.Lock() 427 defer pool.mu.Unlock() 428 429 pool.gasPrice = price 430 for _, tx := range pool.priced.Cap(price) { 431 pool.removeTx(tx.Hash(), false) 432 } 433 log.Info("Transaction pool price threshold updated", "price", price) 434 } 435 436 // Nonce returns the next nonce of an account, with all transactions executable 437 // by the pool already applied on top. 438 func (pool *TxPool) Nonce(addr common.Address) uint64 { 439 pool.mu.RLock() 440 defer pool.mu.RUnlock() 441 442 return pool.pendingNonces.get(addr) 443 } 444 445 // Stats retrieves the current pool stats, namely the number of pending and the 446 // number of queued (non-executable) transactions. 447 func (pool *TxPool) Stats() (int, int) { 448 pool.mu.RLock() 449 defer pool.mu.RUnlock() 450 451 return pool.stats() 452 } 453 454 // stats retrieves the current pool stats, namely the number of pending and the 455 // number of queued (non-executable) transactions. 456 func (pool *TxPool) stats() (int, int) { 457 pending := 0 458 for _, list := range pool.pending { 459 pending += len(list.txs.items) 460 } 461 queued := 0 462 for _, list := range pool.queue { 463 queued += len(list.txs.items) 464 } 465 return pending, queued 466 } 467 468 // Content retrieves the data content of the transaction pool, returning all the 469 // pending as well as queued transactions, grouped by account and sorted by nonce. 470 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 471 pool.mu.Lock() 472 defer pool.mu.Unlock() 473 474 pending := make(map[common.Address]types.Transactions) 475 for addr, list := range pool.pending { 476 pending[addr] = list.Flatten() 477 } 478 queued := make(map[common.Address]types.Transactions) 479 for addr, list := range pool.queue { 480 queued[addr] = list.Flatten() 481 } 482 return pending, queued 483 } 484 485 // Pending retrieves all currently processable transactions, grouped by origin 486 // account and sorted by nonce. The returned transaction set is a copy and can be 487 // freely modified by calling code. 488 func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { 489 pool.mu.Lock() 490 defer pool.mu.Unlock() 491 492 pending := make(map[common.Address]types.Transactions) 493 for addr, list := range pool.pending { 494 pending[addr] = list.Flatten() 495 } 496 return pending, nil 497 } 498 499 // Locals retrieves the accounts currently considered local by the pool. 500 func (pool *TxPool) Locals() []common.Address { 501 pool.mu.Lock() 502 defer pool.mu.Unlock() 503 504 return pool.locals.flatten() 505 } 506 507 // local retrieves all currently known local transactions, grouped by origin 508 // account and sorted by nonce. The returned transaction set is a copy and can be 509 // freely modified by calling code. 510 func (pool *TxPool) local() map[common.Address]types.Transactions { 511 txs := make(map[common.Address]types.Transactions) 512 for addr := range pool.locals.accounts { 513 if pending := pool.pending[addr]; pending != nil { 514 txs[addr] = append(txs[addr], pending.Flatten()...) 515 } 516 if queued := pool.queue[addr]; queued != nil { 517 txs[addr] = append(txs[addr], queued.Flatten()...) 518 } 519 } 520 return txs 521 } 522 523 // validateTx checks whether a transaction is valid according to the consensus 524 // rules and adheres to some heuristic limits of the local node (price and size). 525 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 526 // Accept only legacy transactions until EIP-2718/2930 activates. 527 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 528 return ErrTxTypeNotSupported 529 } 530 // Reject transactions over defined size to prevent DOS attacks 531 if uint64(tx.Size()) > txMaxSize { 532 return ErrOversizedData 533 } 534 // Transactions can't be negative. This may never happen using RLP decoded 535 // transactions but may occur if you create a transaction using the RPC. 536 if tx.Value().Sign() < 0 { 537 return ErrNegativeValue 538 } 539 // Ensure the transaction doesn't exceed the current block limit gas. 540 if pool.currentMaxGas < tx.Gas() { 541 return ErrGasLimit 542 } 543 // Make sure the transaction is signed properly. 544 from, err := types.Sender(pool.signer, tx) 545 if err != nil { 546 return ErrInvalidSender 547 } 548 // Drop non-local transactions under our own minimal accepted gas price 549 if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 { 550 return ErrUnderpriced 551 } 552 // Ensure the transaction adheres to nonce ordering 553 if pool.currentState.GetNonce(from) > tx.Nonce() { 554 return ErrNonceTooLow 555 } 556 // Transactor should have enough funds to cover the costs 557 // cost == V + GP * GL 558 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 559 return ErrInsufficientFunds 560 } 561 // Ensure the transaction has more gas than the basic tx fee. 562 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 563 if err != nil { 564 return err 565 } 566 if tx.Gas() < intrGas { 567 return ErrIntrinsicGas 568 } 569 return nil 570 } 571 572 // add validates a transaction and inserts it into the non-executable queue for later 573 // pending promotion and execution. If the transaction is a replacement for an already 574 // pending or queued one, it overwrites the previous transaction if its price is higher. 575 // 576 // If a newly added transaction is marked as local, its sending account will be 577 // whitelisted, preventing any associated transaction from being dropped out of the pool 578 // due to pricing constraints. 579 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 580 // If the transaction is already known, discard it 581 hash := tx.Hash() 582 if pool.all.Get(hash) != nil { 583 //log.Trace("Discarding already known transaction", "hash", hash) 584 knownTxMeter.Mark(1) 585 return false, ErrAlreadyKnown 586 } 587 // Make the local flag. If it's from local source or it's from the network but 588 // the sender is marked as local previously, treat it as the local transaction. 589 isLocal := local || pool.locals.containsTx(tx) 590 591 // If the transaction fails basic validation, discard it 592 if err := pool.validateTx(tx, isLocal); err != nil { 593 //log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 594 invalidTxMeter.Mark(1) 595 return false, err 596 } 597 // If the transaction pool is full, discard underpriced transactions 598 if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 599 // If the new transaction is underpriced, don't accept it 600 if !isLocal && pool.priced.Underpriced(tx) { 601 //log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) 602 underpricedTxMeter.Mark(1) 603 return false, ErrUnderpriced 604 } 605 // New transaction is better than our worse ones, make room for it. 606 // If it's a local transaction, forcibly discard all available transactions. 607 // Otherwise if we can't make enough room for new one, abort the operation. 608 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 609 610 // Special case, we still can't make the room for the new remote one. 611 if !isLocal && !success { 612 //log.Trace("Discarding overflown transaction", "hash", hash) 613 overflowedTxMeter.Mark(1) 614 return false, ErrTxPoolOverflow 615 } 616 // Kick out the underpriced remote transactions. 617 for _, tx := range drop { 618 //log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) 619 underpricedTxMeter.Mark(1) 620 pool.removeTx(tx.Hash(), false) 621 } 622 } 623 // Try to replace an existing transaction in the pending pool 624 from, _ := types.Sender(pool.signer, tx) // already validated 625 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 626 // Nonce already pending, check if required price bump is met 627 inserted, old := list.Add(tx, pool.config.PriceBump) 628 if !inserted { 629 pendingDiscardMeter.Mark(1) 630 return false, ErrReplaceUnderpriced 631 } 632 // New transaction is better, replace old one 633 if old != nil { 634 pool.all.Remove(old.Hash()) 635 pool.priced.Removed(1) 636 pendingReplaceMeter.Mark(1) 637 } 638 pool.all.Add(tx, isLocal) 639 pool.priced.Put(tx, isLocal) 640 pool.journalTx(from, tx) 641 pool.queueTxEvent(tx) 642 //log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 643 644 // Successful promotion, bump the heartbeat 645 pool.beats[from] = time.Now() 646 return old != nil, nil 647 } 648 // New transaction isn't replacing a pending one, push into queue 649 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 650 if err != nil { 651 return false, err 652 } 653 // Mark local addresses and journal local transactions 654 if local && !pool.locals.contains(from) { 655 //log.Info("Setting new local account", "address", from) 656 pool.locals.add(from) 657 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 658 } 659 if isLocal { 660 localGauge.Inc(1) 661 } 662 pool.journalTx(from, tx) 663 664 //log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 665 return replaced, nil 666 } 667 668 // enqueueTx inserts a new transaction into the non-executable transaction queue. 669 // 670 // Note, this method assumes the pool lock is held! 671 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 672 // Try to insert the transaction into the future queue 673 from, _ := types.Sender(pool.signer, tx) // already validated 674 if pool.queue[from] == nil { 675 pool.queue[from] = newTxList(false) 676 } 677 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 678 if !inserted { 679 // An older transaction was better, discard this 680 queuedDiscardMeter.Mark(1) 681 return false, ErrReplaceUnderpriced 682 } 683 // Discard any previous transaction and mark this 684 if old != nil { 685 pool.all.Remove(old.Hash()) 686 pool.priced.Removed(1) 687 queuedReplaceMeter.Mark(1) 688 } else { 689 // Nothing was replaced, bump the queued counter 690 queuedGauge.Inc(1) 691 } 692 // If the transaction isn't in lookup set but it's expected to be there, 693 // show the error log. 694 if pool.all.Get(hash) == nil && !addAll { 695 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 696 } 697 if addAll { 698 pool.all.Add(tx, local) 699 pool.priced.Put(tx, local) 700 } 701 // If we never record the heartbeat, do it right now. 702 if _, exist := pool.beats[from]; !exist { 703 pool.beats[from] = time.Now() 704 } 705 return old != nil, nil 706 } 707 708 // journalTx adds the specified transaction to the local disk journal if it is 709 // deemed to have been sent from a local account. 710 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 711 // Only journal if it's enabled and the transaction is local 712 if pool.journal == nil || !pool.locals.contains(from) { 713 return 714 } 715 if err := pool.journal.insert(tx); err != nil { 716 log.Warn("Failed to journal local transaction", "err", err) 717 } 718 } 719 720 // promoteTx adds a transaction to the pending (processable) list of transactions 721 // and returns whether it was inserted or an older was better. 722 // 723 // Note, this method assumes the pool lock is held! 724 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 725 // Try to insert the transaction into the pending queue 726 if pool.pending[addr] == nil { 727 pool.pending[addr] = newTxList(true) 728 } 729 list := pool.pending[addr] 730 731 inserted, old := list.Add(tx, pool.config.PriceBump) 732 if !inserted { 733 // An older transaction was better, discard this 734 pool.all.Remove(hash) 735 pool.priced.Removed(1) 736 pendingDiscardMeter.Mark(1) 737 return false 738 } 739 // Otherwise discard any previous transaction and mark this 740 if old != nil { 741 pool.all.Remove(old.Hash()) 742 pool.priced.Removed(1) 743 pendingReplaceMeter.Mark(1) 744 } else { 745 // Nothing was replaced, bump the pending counter 746 pendingGauge.Inc(1) 747 } 748 // Set the potentially new pending nonce and notify any subsystems of the new tx 749 pool.pendingNonces.set(addr, tx.Nonce()+1) 750 751 // Successful promotion, bump the heartbeat 752 pool.beats[addr] = time.Now() 753 return true 754 } 755 756 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 757 // senders as a local ones, ensuring they go around the local pricing constraints. 758 // 759 // This method is used to add transactions from the RPC API and performs synchronous pool 760 // reorganization and event propagation. 761 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 762 return pool.addTxs(txs, !pool.config.NoLocals, true) 763 } 764 765 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 766 // a convenience wrapper aroundd AddLocals. 767 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 768 errs := pool.AddLocals([]*types.Transaction{tx}) 769 return errs[0] 770 } 771 772 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 773 // senders are not among the locally tracked ones, full pricing constraints will apply. 774 // 775 // This method is used to add transactions from the p2p network and does not wait for pool 776 // reorganization and internal event propagation. 777 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 778 return pool.addTxs(txs, false, false) 779 } 780 781 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 782 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 783 return pool.addTxs(txs, false, true) 784 } 785 786 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 787 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 788 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 789 return errs[0] 790 } 791 792 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 793 // wrapper around AddRemotes. 794 // 795 // Deprecated: use AddRemotes 796 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 797 errs := pool.AddRemotes([]*types.Transaction{tx}) 798 return errs[0] 799 } 800 801 // addTxs attempts to queue a batch of transactions if they are valid. 802 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 803 // Filter out known ones without obtaining the pool lock or recovering signatures 804 var ( 805 errs = make([]error, len(txs)) 806 news = make([]*types.Transaction, 0, len(txs)) 807 ) 808 for i, tx := range txs { 809 // If the transaction is known, pre-set the error slot 810 if pool.all.Get(tx.Hash()) != nil { 811 errs[i] = ErrAlreadyKnown 812 knownTxMeter.Mark(1) 813 continue 814 } 815 // Exclude transactions with invalid signatures as soon as 816 // possible and cache senders in transactions before 817 // obtaining lock 818 _, err := types.Sender(pool.signer, tx) 819 if err != nil { 820 errs[i] = ErrInvalidSender 821 invalidTxMeter.Mark(1) 822 continue 823 } 824 // Accumulate all unknown transactions for deeper processing 825 news = append(news, tx) 826 } 827 if len(news) == 0 { 828 return errs 829 } 830 831 // Process all the new transaction and merge any errors into the original slice 832 pool.mu.Lock() 833 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 834 pool.mu.Unlock() 835 836 var nilSlot = 0 837 for _, err := range newErrs { 838 for errs[nilSlot] != nil { 839 nilSlot++ 840 } 841 errs[nilSlot] = err 842 nilSlot++ 843 } 844 // Reorg the pool internals if needed and return 845 done := pool.requestPromoteExecutables(dirtyAddrs) 846 if sync { 847 <-done 848 } 849 return errs 850 } 851 852 // addTxsLocked attempts to queue a batch of transactions if they are valid. 853 // The transaction pool lock must be held. 854 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 855 dirty := newAccountSet(pool.signer) 856 errs := make([]error, len(txs)) 857 for i, tx := range txs { 858 replaced, err := pool.add(tx, local) 859 errs[i] = err 860 if err == nil && !replaced { 861 dirty.addTx(tx) 862 } 863 } 864 validTxMeter.Mark(int64(len(dirty.accounts))) 865 return errs, dirty 866 } 867 868 // Status returns the status (unknown/pending/queued) of a batch of transactions 869 // identified by their hashes. 870 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 871 status := make([]TxStatus, len(hashes)) 872 for i, hash := range hashes { 873 tx := pool.Get(hash) 874 if tx == nil { 875 continue 876 } 877 from, _ := types.Sender(pool.signer, tx) // already validated 878 pool.mu.RLock() 879 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 880 status[i] = TxStatusPending 881 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 882 status[i] = TxStatusQueued 883 } 884 // implicit else: the tx may have been included into a block between 885 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 886 pool.mu.RUnlock() 887 } 888 return status 889 } 890 891 // Get returns a transaction if it is contained in the pool and nil otherwise. 892 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 893 return pool.all.Get(hash) 894 } 895 896 // Has returns an indicator whether txpool has a transaction cached with the 897 // given hash. 898 func (pool *TxPool) Has(hash common.Hash) bool { 899 return pool.all.Get(hash) != nil 900 } 901 902 // removeTx removes a single transaction from the queue, moving all subsequent 903 // transactions back to the future queue. 904 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 905 // Fetch the transaction we wish to delete 906 tx := pool.all.Get(hash) 907 if tx == nil { 908 return 909 } 910 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 911 912 // Remove it from the list of known transactions 913 pool.all.Remove(hash) 914 if outofbound { 915 pool.priced.Removed(1) 916 } 917 if pool.locals.contains(addr) { 918 localGauge.Dec(1) 919 } 920 // Remove the transaction from the pending lists and reset the account nonce 921 if pending := pool.pending[addr]; pending != nil { 922 if removed, invalids := pending.Remove(tx); removed { 923 // If no more pending transactions are left, remove the list 924 if pending.Empty() { 925 delete(pool.pending, addr) 926 } 927 // Postpone any invalidated transactions 928 for _, tx := range invalids { 929 // Internal shuffle shouldn't touch the lookup set. 930 pool.enqueueTx(tx.Hash(), tx, false, false) 931 } 932 // Update the account nonce if needed 933 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 934 // Reduce the pending counter 935 pendingGauge.Dec(int64(1 + len(invalids))) 936 return 937 } 938 } 939 // Transaction is in the future queue 940 if future := pool.queue[addr]; future != nil { 941 if removed, _ := future.Remove(tx); removed { 942 // Reduce the queued counter 943 queuedGauge.Dec(1) 944 } 945 if future.Empty() { 946 delete(pool.queue, addr) 947 delete(pool.beats, addr) 948 } 949 } 950 } 951 952 // requestReset requests a pool reset to the new head block. 953 // The returned channel is closed when the reset has occurred. 954 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 955 select { 956 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 957 return <-pool.reorgDoneCh 958 case <-pool.reorgShutdownCh: 959 return pool.reorgShutdownCh 960 } 961 } 962 963 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 964 // The returned channel is closed when the promotion checks have occurred. 965 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 966 select { 967 case pool.reqPromoteCh <- set: 968 return <-pool.reorgDoneCh 969 case <-pool.reorgShutdownCh: 970 return pool.reorgShutdownCh 971 } 972 } 973 974 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 975 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 976 select { 977 case pool.queueTxEventCh <- tx: 978 case <-pool.reorgShutdownCh: 979 } 980 } 981 982 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 983 // call those methods directly, but request them being run using requestReset and 984 // requestPromoteExecutables instead. 985 func (pool *TxPool) scheduleReorgLoop() { 986 defer pool.wg.Done() 987 988 var ( 989 curDone chan struct{} // non-nil while runReorg is active 990 nextDone = make(chan struct{}) 991 launchNextRun bool 992 reset *txpoolResetRequest 993 dirtyAccounts *accountSet 994 queuedEvents = make(map[common.Address]*txSortedMap) 995 ) 996 for { 997 // Launch next background reorg if needed 998 if curDone == nil && launchNextRun { 999 // Run the background reorg and announcements 1000 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1001 1002 // Prepare everything for the next round of reorg 1003 curDone, nextDone = nextDone, make(chan struct{}) 1004 launchNextRun = false 1005 1006 reset, dirtyAccounts = nil, nil 1007 queuedEvents = make(map[common.Address]*txSortedMap) 1008 } 1009 1010 select { 1011 case req := <-pool.reqResetCh: 1012 // Reset request: update head if request is already pending. 1013 if reset == nil { 1014 reset = req 1015 } else { 1016 reset.newHead = req.newHead 1017 } 1018 launchNextRun = true 1019 pool.reorgDoneCh <- nextDone 1020 1021 case req := <-pool.reqPromoteCh: 1022 // Promote request: update address set if request is already pending. 1023 if dirtyAccounts == nil { 1024 dirtyAccounts = req 1025 } else { 1026 dirtyAccounts.merge(req) 1027 } 1028 launchNextRun = true 1029 pool.reorgDoneCh <- nextDone 1030 1031 case tx := <-pool.queueTxEventCh: 1032 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1033 // request one later if they want the events sent. 1034 addr, _ := types.Sender(pool.signer, tx) 1035 if _, ok := queuedEvents[addr]; !ok { 1036 queuedEvents[addr] = newTxSortedMap() 1037 } 1038 queuedEvents[addr].Put(tx) 1039 1040 case <-curDone: 1041 curDone = nil 1042 1043 case <-pool.reorgShutdownCh: 1044 // Wait for current run to finish. 1045 if curDone != nil { 1046 <-curDone 1047 } 1048 close(nextDone) 1049 return 1050 } 1051 } 1052 } 1053 1054 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1055 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1056 defer close(done) 1057 1058 var promoteAddrs []common.Address 1059 if dirtyAccounts != nil && reset == nil { 1060 // Only dirty accounts need to be promoted, unless we're resetting. 1061 // For resets, all addresses in the tx queue will be promoted and 1062 // the flatten operation can be avoided. 1063 promoteAddrs = dirtyAccounts.flatten() 1064 } 1065 pool.mu.Lock() 1066 if reset != nil { 1067 // Reset from the old head to the new, rescheduling any reorged transactions 1068 pool.reset(reset.oldHead, reset.newHead) 1069 1070 // Nonces were reset, discard any events that became stale 1071 for addr := range events { 1072 events[addr].Forward(pool.pendingNonces.get(addr)) 1073 if len(events[addr].items) == 0 { 1074 delete(events, addr) 1075 } 1076 } 1077 // Reset needs promote for all addresses 1078 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1079 for addr := range pool.queue { 1080 promoteAddrs = append(promoteAddrs, addr) 1081 } 1082 } 1083 // Check for pending transactions for every account that sent new ones 1084 promoted := pool.promoteExecutables(promoteAddrs) 1085 1086 // If a new block appeared, validate the pool of pending transactions. This will 1087 // remove any transaction that has been included in the block or was invalidated 1088 // because of another transaction (e.g. higher gas price). 1089 if reset != nil { 1090 pool.demoteUnexecutables() 1091 } 1092 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1093 pool.truncatePending() 1094 pool.truncateQueue() 1095 1096 // Update all accounts to the latest known pending nonce 1097 for addr, list := range pool.pending { 1098 highestPending := list.LastElement() 1099 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1100 } 1101 pool.mu.Unlock() 1102 1103 // Notify subsystems for newly added transactions 1104 for _, tx := range promoted { 1105 addr, _ := types.Sender(pool.signer, tx) 1106 if _, ok := events[addr]; !ok { 1107 events[addr] = newTxSortedMap() 1108 } 1109 events[addr].Put(tx) 1110 } 1111 if len(events) > 0 { 1112 var txs []*types.Transaction 1113 for _, set := range events { 1114 txs = append(txs, set.Flatten()...) 1115 } 1116 pool.txFeed.Send(NewTxsEvent{txs}) 1117 } 1118 } 1119 1120 // reset retrieves the current state of the blockchain and ensures the content 1121 // of the transaction pool is valid with regard to the chain state. 1122 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1123 // If we're reorging an old state, reinject all dropped transactions 1124 var reinject types.Transactions 1125 1126 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1127 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1128 oldNum := oldHead.Number.Uint64() 1129 newNum := newHead.Number.Uint64() 1130 1131 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1132 log.Debug("Skipping deep transaction reorg", "depth", depth) 1133 } else { 1134 // Reorg seems shallow enough to pull in all transactions into memory 1135 var discarded, included types.Transactions 1136 var ( 1137 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1138 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1139 ) 1140 if rem == nil { 1141 // This can happen if a setHead is performed, where we simply discard the old 1142 // head from the chain. 1143 // If that is the case, we don't have the lost transactions any more, and 1144 // there's nothing to add 1145 if newNum >= oldNum { 1146 // If we reorged to a same or higher number, then it's not a case of setHead 1147 log.Warn("Transaction pool reset with missing oldhead", 1148 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1149 return 1150 } 1151 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1152 log.Debug("Skipping transaction reset caused by setHead", 1153 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1154 // We still need to update the current state s.th. the lost transactions can be readded by the user 1155 } else { 1156 for rem.NumberU64() > add.NumberU64() { 1157 discarded = append(discarded, rem.Transactions()...) 1158 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1159 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1160 return 1161 } 1162 } 1163 for add.NumberU64() > rem.NumberU64() { 1164 included = append(included, add.Transactions()...) 1165 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1166 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1167 return 1168 } 1169 } 1170 for rem.Hash() != add.Hash() { 1171 discarded = append(discarded, rem.Transactions()...) 1172 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1173 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1174 return 1175 } 1176 included = append(included, add.Transactions()...) 1177 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1178 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1179 return 1180 } 1181 } 1182 reinject = types.TxDifference(discarded, included) 1183 } 1184 } 1185 } 1186 // Initialize the internal state to the current head 1187 if newHead == nil { 1188 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1189 } 1190 statedb, err := pool.chain.StateAt(newHead.Root) 1191 if err != nil { 1192 log.Error("Failed to reset txpool state", "err", err) 1193 return 1194 } 1195 pool.currentState = statedb 1196 pool.pendingNonces = newTxNoncer(statedb) 1197 pool.currentMaxGas = newHead.GasLimit 1198 1199 // Inject any transactions discarded due to reorgs 1200 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1201 senderCacher.recover(pool.signer, reinject) 1202 pool.addTxsLocked(reinject, false) 1203 1204 // Update all fork indicator by next pending block number. 1205 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1206 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1207 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1208 } 1209 1210 // promoteExecutables moves transactions that have become processable from the 1211 // future queue to the set of pending transactions. During this process, all 1212 // invalidated transactions (low nonce, low balance) are deleted. 1213 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1214 // Track the promoted transactions to broadcast them at once 1215 var promoted []*types.Transaction 1216 1217 // Iterate over all accounts and promote any executable transactions 1218 for _, addr := range accounts { 1219 list := pool.queue[addr] 1220 if list == nil { 1221 continue // Just in case someone calls with a non existing account 1222 } 1223 // Drop all transactions that are deemed too old (low nonce) 1224 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1225 for _, tx := range forwards { 1226 hash := tx.Hash() 1227 pool.all.Remove(hash) 1228 } 1229 log.Trace("Removed old queued transactions", "count", len(forwards)) 1230 // Drop all transactions that are too costly (low balance or out of gas) 1231 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1232 for _, tx := range drops { 1233 hash := tx.Hash() 1234 pool.all.Remove(hash) 1235 } 1236 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1237 queuedNofundsMeter.Mark(int64(len(drops))) 1238 1239 // Gather all executable transactions and promote them 1240 readies := list.Ready(pool.pendingNonces.get(addr)) 1241 for _, tx := range readies { 1242 hash := tx.Hash() 1243 if pool.promoteTx(addr, hash, tx) { 1244 promoted = append(promoted, tx) 1245 } 1246 } 1247 log.Trace("Promoted queued transactions", "count", len(promoted)) 1248 queuedGauge.Dec(int64(len(readies))) 1249 1250 // Drop all transactions over the allowed limit 1251 var caps types.Transactions 1252 if !pool.locals.contains(addr) { 1253 caps = list.Cap(int(pool.config.AccountQueue)) 1254 for _, tx := range caps { 1255 hash := tx.Hash() 1256 pool.all.Remove(hash) 1257 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1258 } 1259 queuedRateLimitMeter.Mark(int64(len(caps))) 1260 } 1261 // Mark all the items dropped as removed 1262 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1263 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1264 if pool.locals.contains(addr) { 1265 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1266 } 1267 // Delete the entire queue entry if it became empty. 1268 if list.Empty() { 1269 delete(pool.queue, addr) 1270 delete(pool.beats, addr) 1271 } 1272 } 1273 return promoted 1274 } 1275 1276 // truncatePending removes transactions from the pending queue if the pool is above the 1277 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1278 // equal number for all for accounts with many pending transactions. 1279 func (pool *TxPool) truncatePending() { 1280 pending := uint64(0) 1281 for _, list := range pool.pending { 1282 pending += uint64(len(list.txs.items)) 1283 } 1284 if pending <= pool.config.GlobalSlots { 1285 return 1286 } 1287 1288 pendingBeforeCap := pending 1289 // Assemble a spam order to penalize large transactors first 1290 spammers := prque.New(nil) 1291 for addr, list := range pool.pending { 1292 // Only evict transactions from high rollers 1293 if !pool.locals.contains(addr) && uint64(len(list.txs.items)) > pool.config.AccountSlots { 1294 spammers.Push(addr, int64(len(list.txs.items))) 1295 } 1296 } 1297 // Gradually drop transactions from offenders 1298 offenders := []common.Address{} 1299 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1300 // Retrieve the next offender if not local address 1301 offender, _ := spammers.Pop() 1302 offenders = append(offenders, offender.(common.Address)) 1303 1304 // Equalize balances until all the same or below threshold 1305 if len(offenders) > 1 { 1306 // Calculate the equalization threshold for all current offenders 1307 threshold := len(pool.pending[offender.(common.Address)].txs.items) 1308 1309 // Iteratively reduce all offenders until below limit or threshold reached 1310 for pending > pool.config.GlobalSlots && len(pool.pending[offenders[len(offenders)-2]].txs.items) > threshold { 1311 for i := 0; i < len(offenders)-1; i++ { 1312 list := pool.pending[offenders[i]] 1313 1314 caps := list.Cap(len(list.txs.items) - 1) 1315 for _, tx := range caps { 1316 // Drop the transaction from the global pools too 1317 hash := tx.Hash() 1318 pool.all.Remove(hash) 1319 1320 // Update the account nonce to the dropped transaction 1321 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1322 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1323 } 1324 pool.priced.Removed(len(caps)) 1325 pendingGauge.Dec(int64(len(caps))) 1326 if pool.locals.contains(offenders[i]) { 1327 localGauge.Dec(int64(len(caps))) 1328 } 1329 pending-- 1330 } 1331 } 1332 } 1333 } 1334 1335 // If still above threshold, reduce to limit or min allowance 1336 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1337 for pending > pool.config.GlobalSlots && uint64(len(pool.pending[offenders[len(offenders)-1]].txs.items)) > pool.config.AccountSlots { 1338 for _, addr := range offenders { 1339 list := pool.pending[addr] 1340 1341 caps := list.Cap(len(list.txs.items) - 1) 1342 for _, tx := range caps { 1343 // Drop the transaction from the global pools too 1344 hash := tx.Hash() 1345 pool.all.Remove(hash) 1346 1347 // Update the account nonce to the dropped transaction 1348 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1349 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1350 } 1351 pool.priced.Removed(len(caps)) 1352 pendingGauge.Dec(int64(len(caps))) 1353 if pool.locals.contains(addr) { 1354 localGauge.Dec(int64(len(caps))) 1355 } 1356 pending-- 1357 } 1358 } 1359 } 1360 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1361 } 1362 1363 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1364 func (pool *TxPool) truncateQueue() { 1365 queued := uint64(0) 1366 for _, list := range pool.queue { 1367 queued += uint64(len(list.txs.items)) 1368 } 1369 if queued <= pool.config.GlobalQueue { 1370 return 1371 } 1372 1373 // Sort all accounts with queued transactions by heartbeat 1374 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1375 for addr := range pool.queue { 1376 if !pool.locals.contains(addr) { // don't drop locals 1377 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1378 } 1379 } 1380 sort.Sort(addresses) 1381 1382 // Drop transactions until the total is below the limit or only locals remain 1383 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1384 addr := addresses[len(addresses)-1] 1385 list := pool.queue[addr.address] 1386 1387 addresses = addresses[:len(addresses)-1] 1388 1389 // Drop all transactions if they are less than the overflow 1390 if size := uint64(len(list.txs.items)); size <= drop { 1391 for _, tx := range list.Flatten() { 1392 pool.removeTx(tx.Hash(), true) 1393 } 1394 drop -= size 1395 queuedRateLimitMeter.Mark(int64(size)) 1396 continue 1397 } 1398 // Otherwise drop only last few transactions 1399 txs := list.Flatten() 1400 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1401 pool.removeTx(txs[i].Hash(), true) 1402 drop-- 1403 queuedRateLimitMeter.Mark(1) 1404 } 1405 } 1406 } 1407 1408 // demoteUnexecutables removes invalid and processed transactions from the pools 1409 // executable/pending queue and any subsequent transactions that become unexecutable 1410 // are moved back into the future queue. 1411 func (pool *TxPool) demoteUnexecutables() { 1412 // Iterate over all accounts and demote any non-executable transactions 1413 for addr, list := range pool.pending { 1414 nonce := pool.currentState.GetNonce(addr) 1415 1416 // Drop all transactions that are deemed too old (low nonce) 1417 olds := list.Forward(nonce) 1418 for _, tx := range olds { 1419 hash := tx.Hash() 1420 pool.all.Remove(hash) 1421 log.Trace("Removed old pending transaction", "hash", hash) 1422 } 1423 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1424 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1425 for _, tx := range drops { 1426 hash := tx.Hash() 1427 log.Trace("Removed unpayable pending transaction", "hash", hash) 1428 pool.all.Remove(hash) 1429 } 1430 pool.priced.Removed(len(olds) + len(drops)) 1431 pendingNofundsMeter.Mark(int64(len(drops))) 1432 1433 for _, tx := range invalids { 1434 hash := tx.Hash() 1435 log.Trace("Demoting pending transaction", "hash", hash) 1436 1437 // Internal shuffle shouldn't touch the lookup set. 1438 pool.enqueueTx(hash, tx, false, false) 1439 } 1440 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1441 if pool.locals.contains(addr) { 1442 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1443 } 1444 // If there's a gap in front, alert (should never happen) and postpone all transactions 1445 if len(list.txs.items) > 0 && list.txs.Get(nonce) == nil { 1446 gapped := list.Cap(0) 1447 for _, tx := range gapped { 1448 hash := tx.Hash() 1449 log.Error("Demoting invalidated transaction", "hash", hash) 1450 1451 // Internal shuffle shouldn't touch the lookup set. 1452 pool.enqueueTx(hash, tx, false, false) 1453 } 1454 pendingGauge.Dec(int64(len(gapped))) 1455 // This might happen in a reorg, so log it to the metering 1456 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1457 } 1458 // Delete the entire pending entry if it became empty. 1459 if list.Empty() { 1460 delete(pool.pending, addr) 1461 } 1462 } 1463 } 1464 1465 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1466 type addressByHeartbeat struct { 1467 address common.Address 1468 heartbeat time.Time 1469 } 1470 1471 type addressesByHeartbeat []addressByHeartbeat 1472 1473 func (a addressesByHeartbeat) Len() int { return len(a) } 1474 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1475 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1476 1477 // accountSet is simply a set of addresses to check for existence, and a signer 1478 // capable of deriving addresses from transactions. 1479 type accountSet struct { 1480 accounts map[common.Address]struct{} 1481 signer types.Signer 1482 cache *[]common.Address 1483 } 1484 1485 // newAccountSet creates a new address set with an associated signer for sender 1486 // derivations. 1487 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1488 as := &accountSet{ 1489 accounts: make(map[common.Address]struct{}), 1490 signer: signer, 1491 } 1492 for _, addr := range addrs { 1493 as.add(addr) 1494 } 1495 return as 1496 } 1497 1498 // contains checks if a given address is contained within the set. 1499 func (as *accountSet) contains(addr common.Address) bool { 1500 _, exist := as.accounts[addr] 1501 return exist 1502 } 1503 1504 func (as *accountSet) empty() bool { 1505 return len(as.accounts) == 0 1506 } 1507 1508 // containsTx checks if the sender of a given tx is within the set. If the sender 1509 // cannot be derived, this method returns false. 1510 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1511 if addr, err := types.Sender(as.signer, tx); err == nil { 1512 return as.contains(addr) 1513 } 1514 return false 1515 } 1516 1517 // add inserts a new address into the set to track. 1518 func (as *accountSet) add(addr common.Address) { 1519 as.accounts[addr] = struct{}{} 1520 as.cache = nil 1521 } 1522 1523 // addTx adds the sender of tx into the set. 1524 func (as *accountSet) addTx(tx *types.Transaction) { 1525 if addr, err := types.Sender(as.signer, tx); err == nil { 1526 as.add(addr) 1527 } 1528 } 1529 1530 // flatten returns the list of addresses within this set, also caching it for later 1531 // reuse. The returned slice should not be changed! 1532 func (as *accountSet) flatten() []common.Address { 1533 if as.cache == nil { 1534 accounts := make([]common.Address, 0, len(as.accounts)) 1535 for account := range as.accounts { 1536 accounts = append(accounts, account) 1537 } 1538 as.cache = &accounts 1539 } 1540 return *as.cache 1541 } 1542 1543 // merge adds all addresses from the 'other' set into 'as'. 1544 func (as *accountSet) merge(other *accountSet) { 1545 for addr := range other.accounts { 1546 as.accounts[addr] = struct{}{} 1547 } 1548 as.cache = nil 1549 } 1550 1551 // txLookup is used internally by TxPool to track transactions while allowing 1552 // lookup without mutex contention. 1553 // 1554 // Note, although this type is properly protected against concurrent access, it 1555 // is **not** a type that should ever be mutated or even exposed outside of the 1556 // transaction pool, since its internal state is tightly coupled with the pools 1557 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1558 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1559 // TxPool.mu mutex. 1560 // 1561 // This lookup set combines the notion of "local transactions", which is useful 1562 // to build upper-level structure. 1563 type txLookup struct { 1564 slots int 1565 lock sync.RWMutex 1566 locals map[common.Hash]*types.Transaction 1567 remotes map[common.Hash]*types.Transaction 1568 } 1569 1570 // newTxLookup returns a new txLookup structure. 1571 func newTxLookup() *txLookup { 1572 return &txLookup{ 1573 locals: make(map[common.Hash]*types.Transaction), 1574 remotes: make(map[common.Hash]*types.Transaction), 1575 } 1576 } 1577 1578 // Range calls f on each key and value present in the map. The callback passed 1579 // should return the indicator whether the iteration needs to be continued. 1580 // Callers need to specify which set (or both) to be iterated. 1581 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1582 t.lock.RLock() 1583 defer t.lock.RUnlock() 1584 1585 if local { 1586 for key, value := range t.locals { 1587 if !f(key, value, true) { 1588 return 1589 } 1590 } 1591 } 1592 if remote { 1593 for key, value := range t.remotes { 1594 if !f(key, value, false) { 1595 return 1596 } 1597 } 1598 } 1599 } 1600 1601 // Get returns a transaction if it exists in the lookup, or nil if not found. 1602 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1603 t.lock.RLock() 1604 defer t.lock.RUnlock() 1605 1606 if tx := t.locals[hash]; tx != nil { 1607 return tx 1608 } 1609 return t.remotes[hash] 1610 } 1611 1612 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1613 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1614 t.lock.RLock() 1615 defer t.lock.RUnlock() 1616 1617 return t.locals[hash] 1618 } 1619 1620 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1621 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1622 t.lock.RLock() 1623 defer t.lock.RUnlock() 1624 1625 return t.remotes[hash] 1626 } 1627 1628 // Count returns the current number of transactions in the lookup. 1629 func (t *txLookup) Count() int { 1630 t.lock.RLock() 1631 defer t.lock.RUnlock() 1632 1633 return len(t.locals) + len(t.remotes) 1634 } 1635 1636 // LocalCount returns the current number of local transactions in the lookup. 1637 func (t *txLookup) LocalCount() int { 1638 t.lock.RLock() 1639 defer t.lock.RUnlock() 1640 1641 return len(t.locals) 1642 } 1643 1644 // RemoteCount returns the current number of remote transactions in the lookup. 1645 func (t *txLookup) RemoteCount() int { 1646 t.lock.RLock() 1647 defer t.lock.RUnlock() 1648 1649 return len(t.remotes) 1650 } 1651 1652 // Slots returns the current number of slots used in the lookup. 1653 func (t *txLookup) Slots() int { 1654 t.lock.RLock() 1655 defer t.lock.RUnlock() 1656 1657 return t.slots 1658 } 1659 1660 // Add adds a transaction to the lookup. 1661 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1662 t.lock.Lock() 1663 defer t.lock.Unlock() 1664 1665 t.slots += numSlots(tx) 1666 slotsGauge.Update(int64(t.slots)) 1667 1668 if local { 1669 t.locals[tx.Hash()] = tx 1670 } else { 1671 t.remotes[tx.Hash()] = tx 1672 } 1673 } 1674 1675 // Remove removes a transaction from the lookup. 1676 func (t *txLookup) Remove(hash common.Hash) { 1677 t.lock.Lock() 1678 defer t.lock.Unlock() 1679 1680 tx, ok := t.locals[hash] 1681 if !ok { 1682 tx, ok = t.remotes[hash] 1683 } 1684 if !ok { 1685 log.Error("No transaction found to be deleted", "hash", hash) 1686 return 1687 } 1688 t.slots -= numSlots(tx) 1689 slotsGauge.Update(int64(t.slots)) 1690 1691 delete(t.locals, hash) 1692 delete(t.remotes, hash) 1693 } 1694 1695 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1696 // set. The assumption is held the locals set is thread-safe to be used. 1697 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1698 t.lock.Lock() 1699 defer t.lock.Unlock() 1700 1701 var migrated int 1702 for hash, tx := range t.remotes { 1703 if locals.containsTx(tx) { 1704 t.locals[hash] = tx 1705 delete(t.remotes, hash) 1706 migrated += 1 1707 } 1708 } 1709 return migrated 1710 } 1711 1712 // numSlots calculates the number of slots needed for a single transaction. 1713 func numSlots(tx *types.Transaction) int { 1714 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1715 }