github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/core/tx_pool.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "time" 26 27 "github.com/kisexp/xdchain/common" 28 "github.com/kisexp/xdchain/common/prque" 29 "github.com/kisexp/xdchain/core/mps" 30 "github.com/kisexp/xdchain/core/state" 31 "github.com/kisexp/xdchain/core/types" 32 "github.com/kisexp/xdchain/event" 33 "github.com/kisexp/xdchain/log" 34 "github.com/kisexp/xdchain/metrics" 35 "github.com/kisexp/xdchain/params" 36 pcore "github.com/kisexp/xdchain/permission/core" 37 "github.com/kisexp/xdchain/private" 38 ) 39 40 const ( 41 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 42 chainHeadChanSize = 10 43 44 // txSlotSize is used to calculate how many data slots a single transaction 45 // takes up based on its size. The slots are used as DoS protection, ensuring 46 // that validating a new transaction remains a constant operation (in reality 47 // O(maxslots), where max slots are 4 currently). 48 txSlotSize = 32 * 1024 49 50 // txMaxSize is the maximum size a single transaction can have. This field has 51 // non-trivial consequences: larger transactions are significantly harder and 52 // more expensive to propagate; larger transactions also take more resources 53 // to validate whether they fit into the pool or not. 54 // txMaxSize = 4 * txSlotSize // 128KB 55 // Quorum - value above is not used. instead, ChainConfig.TransactionSizeLimit is used 56 ) 57 58 var ( 59 // ErrAlreadyKnown is returned if the transactions is already contained 60 // within the pool. 61 ErrAlreadyKnown = errors.New("already known") 62 63 // ErrInvalidSender is returned if the transaction contains an invalid signature. 64 ErrInvalidSender = errors.New("invalid sender") 65 66 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 67 // configured for the transaction pool. 68 ErrUnderpriced = errors.New("transaction underpriced") 69 70 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 71 // with a different one without the required price bump. 72 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 73 74 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 75 // maximum allowance of the current block. 76 ErrGasLimit = errors.New("exceeds block gas limit") 77 78 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 79 // transaction with a negative value. 80 ErrNegativeValue = errors.New("negative value") 81 82 // ErrOversizedData is returned if the input data of a transaction is greater 83 // than some meaningful limit a user might use. This is not a consensus error 84 // making the transaction invalid, rather a DOS protection. 85 ErrOversizedData = errors.New("oversized data") 86 87 ErrInvalidGasPrice = errors.New("Gas price not 0") 88 89 // ErrEtherValueUnsupported is returned if a transaction specifies an Ether Value 90 // for a private Quorum transaction. 91 ErrEtherValueUnsupported = errors.New("ether value is not supported for private transactions") 92 ) 93 94 var ( 95 evictionInterval = time.Minute // Time interval to check for evictable transactions 96 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 97 ) 98 99 var ( 100 // Metrics for the pending pool 101 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 102 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 103 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 104 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 105 106 // Metrics for the queued pool 107 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 108 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 109 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 110 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 111 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 112 113 // General tx metrics 114 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 115 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 116 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 117 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 118 119 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 120 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 121 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 122 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 123 ) 124 125 // TxStatus is the current status of a transaction as seen by the pool. 126 type TxStatus uint 127 128 const ( 129 TxStatusUnknown TxStatus = iota 130 TxStatusQueued 131 TxStatusPending 132 TxStatusIncluded 133 ) 134 135 // blockChain provides the state of blockchain and current gas limit to do 136 // some pre checks in tx pool and event subscribers. 137 type blockChain interface { 138 CurrentBlock() *types.Block 139 GetBlock(hash common.Hash, number uint64) *types.Block 140 StateAt(root common.Hash) (*state.StateDB, mps.PrivateStateRepository, error) 141 142 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 143 } 144 145 // TxPoolConfig are the configuration parameters of the transaction pool. 146 type TxPoolConfig struct { 147 Locals []common.Address // Addresses that should be treated by default as local 148 NoLocals bool // Whether local transaction handling should be disabled 149 Journal string // Journal of local transactions to survive node restarts 150 Rejournal time.Duration // Time interval to regenerate the local transaction journal 151 152 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 153 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 154 155 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 156 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 157 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 158 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 159 160 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 161 162 // Quorum 163 TransactionSizeLimit uint64 // Maximum size allowed for valid transaction (in KB) 164 MaxCodeSize uint64 // Maximum size allowed of contract code that can be deployed (in KB) 165 } 166 167 // DefaultTxPoolConfig contains the default configurations for the transaction 168 // pool. 169 var DefaultTxPoolConfig = TxPoolConfig{ 170 Journal: "transactions.rlp", 171 Rejournal: time.Hour, 172 173 PriceLimit: 1, 174 PriceBump: 10, 175 176 AccountSlots: 16, 177 GlobalSlots: 4096, 178 AccountQueue: 64, 179 GlobalQueue: 1024, 180 181 Lifetime: 3 * time.Hour, 182 183 // Quorum 184 TransactionSizeLimit: 64, 185 MaxCodeSize: 24, 186 } 187 188 // sanitize checks the provided user configurations and changes anything that's 189 // unreasonable or unworkable. 190 func (config *TxPoolConfig) sanitize() TxPoolConfig { 191 conf := *config 192 if conf.Rejournal < time.Second { 193 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 194 conf.Rejournal = time.Second 195 } 196 if conf.PriceLimit < 1 { 197 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 198 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 199 } 200 if conf.PriceBump < 1 { 201 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 202 conf.PriceBump = DefaultTxPoolConfig.PriceBump 203 } 204 if conf.AccountSlots < 1 { 205 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 206 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 207 } 208 if conf.GlobalSlots < 1 { 209 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 210 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 211 } 212 if conf.AccountQueue < 1 { 213 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 214 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 215 } 216 if conf.GlobalQueue < 1 { 217 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 218 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 219 } 220 if conf.Lifetime < 1 { 221 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 222 conf.Lifetime = DefaultTxPoolConfig.Lifetime 223 } 224 return conf 225 } 226 227 // TxPool contains all currently known transactions. Transactions 228 // enter the pool when they are received from the network or submitted 229 // locally. They exit the pool when they are included in the blockchain. 230 // 231 // The pool separates processable transactions (which can be applied to the 232 // current state) and future transactions. Transactions move between those 233 // two states over time as they are received and processed. 234 type TxPool struct { 235 config TxPoolConfig 236 chainconfig *params.ChainConfig 237 chain blockChain 238 gasPrice *big.Int 239 txFeed event.Feed 240 scope event.SubscriptionScope 241 signer types.Signer 242 mu sync.RWMutex 243 244 istanbul bool // Fork indicator whether we are in the istanbul stage. 245 246 currentState *state.StateDB // Current state in the blockchain head 247 pendingNonces *txNoncer // Pending state tracking virtual nonces 248 currentMaxGas uint64 // Current gas limit for transaction caps 249 250 locals *accountSet // Set of local transaction to exempt from eviction rules 251 journal *txJournal // Journal of local transaction to back up to disk 252 253 pending map[common.Address]*txList // All currently processable transactions 254 queue map[common.Address]*txList // Queued but non-processable transactions 255 beats map[common.Address]time.Time // Last heartbeat from each known account 256 all *txLookup // All transactions to allow lookups 257 priced *txPricedList // All transactions sorted by price 258 259 chainHeadCh chan ChainHeadEvent 260 chainHeadSub event.Subscription 261 reqResetCh chan *txpoolResetRequest 262 reqPromoteCh chan *accountSet 263 queueTxEventCh chan *types.Transaction 264 reorgDoneCh chan chan struct{} 265 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 266 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 267 } 268 269 type txpoolResetRequest struct { 270 oldHead, newHead *types.Header 271 } 272 273 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 274 // transactions from the network. 275 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 276 // Sanitize the input to ensure no vulnerable gas prices are set 277 config = (&config).sanitize() 278 279 // Create the transaction pool with its initial settings 280 pool := &TxPool{ 281 config: config, 282 chainconfig: chainconfig, 283 chain: chain, 284 signer: types.NewEIP155Signer(chainconfig.ChainID), 285 pending: make(map[common.Address]*txList), 286 queue: make(map[common.Address]*txList), 287 beats: make(map[common.Address]time.Time), 288 all: newTxLookup(), 289 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 290 reqResetCh: make(chan *txpoolResetRequest), 291 reqPromoteCh: make(chan *accountSet), 292 queueTxEventCh: make(chan *types.Transaction), 293 reorgDoneCh: make(chan chan struct{}), 294 reorgShutdownCh: make(chan struct{}), 295 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 296 } 297 pool.locals = newAccountSet(pool.signer) 298 for _, addr := range config.Locals { 299 log.Info("Setting new local account", "address", addr) 300 pool.locals.add(addr) 301 } 302 pool.priced = newTxPricedList(pool.all) 303 pool.reset(nil, chain.CurrentBlock().Header()) 304 305 // Start the reorg loop early so it can handle requests generated during journal loading. 306 pool.wg.Add(1) 307 go pool.scheduleReorgLoop() 308 309 // If local transactions and journaling is enabled, load from disk 310 if !config.NoLocals && config.Journal != "" { 311 pool.journal = newTxJournal(config.Journal) 312 313 if err := pool.journal.load(pool.AddLocals); err != nil { 314 log.Warn("Failed to load transaction journal", "err", err) 315 } 316 if err := pool.journal.rotate(pool.local()); err != nil { 317 log.Warn("Failed to rotate transaction journal", "err", err) 318 } 319 } 320 321 // Subscribe events from blockchain and start the main event loop. 322 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 323 pool.wg.Add(1) 324 go pool.loop() 325 326 return pool 327 } 328 329 // loop is the transaction pool's main event loop, waiting for and reacting to 330 // outside blockchain events as well as for various reporting and transaction 331 // eviction events. 332 func (pool *TxPool) loop() { 333 defer pool.wg.Done() 334 335 var ( 336 prevPending, prevQueued, prevStales int 337 // Start the stats reporting and transaction eviction tickers 338 report = time.NewTicker(statsReportInterval) 339 evict = time.NewTicker(evictionInterval) 340 journal = time.NewTicker(pool.config.Rejournal) 341 // Track the previous head headers for transaction reorgs 342 head = pool.chain.CurrentBlock() 343 ) 344 defer report.Stop() 345 defer evict.Stop() 346 defer journal.Stop() 347 348 for { 349 select { 350 // Handle ChainHeadEvent 351 case ev := <-pool.chainHeadCh: 352 if ev.Block != nil { 353 pool.requestReset(head.Header(), ev.Block.Header()) 354 head = ev.Block 355 } 356 357 // System shutdown. 358 case <-pool.chainHeadSub.Err(): 359 close(pool.reorgShutdownCh) 360 return 361 362 // Handle stats reporting ticks 363 case <-report.C: 364 pool.mu.RLock() 365 pending, queued := pool.stats() 366 stales := pool.priced.stales 367 pool.mu.RUnlock() 368 369 if pending != prevPending || queued != prevQueued || stales != prevStales { 370 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 371 prevPending, prevQueued, prevStales = pending, queued, stales 372 } 373 374 // Handle inactive account transaction eviction 375 case <-evict.C: 376 pool.mu.Lock() 377 for addr := range pool.queue { 378 // Skip local transactions from the eviction mechanism 379 if pool.locals.contains(addr) { 380 continue 381 } 382 // Any non-locals old enough should be removed 383 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 384 list := pool.queue[addr].Flatten() 385 for _, tx := range list { 386 pool.removeTx(tx.Hash(), true) 387 } 388 queuedEvictionMeter.Mark(int64(len(list))) 389 } 390 } 391 pool.mu.Unlock() 392 393 // Handle local transaction journal rotation 394 case <-journal.C: 395 if pool.journal != nil { 396 pool.mu.Lock() 397 if err := pool.journal.rotate(pool.local()); err != nil { 398 log.Warn("Failed to rotate local tx journal", "err", err) 399 } 400 pool.mu.Unlock() 401 } 402 } 403 } 404 } 405 406 // Stop terminates the transaction pool. 407 func (pool *TxPool) Stop() { 408 // Unsubscribe all subscriptions registered from txpool 409 pool.scope.Close() 410 411 // Unsubscribe subscriptions registered from blockchain 412 pool.chainHeadSub.Unsubscribe() 413 pool.wg.Wait() 414 415 if pool.journal != nil { 416 pool.journal.close() 417 } 418 log.Info("Transaction pool stopped") 419 } 420 421 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 422 // starts sending event to the given channel. 423 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 424 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 425 } 426 427 // GasPrice returns the current gas price enforced by the transaction pool. 428 func (pool *TxPool) GasPrice() *big.Int { 429 pool.mu.RLock() 430 defer pool.mu.RUnlock() 431 432 return new(big.Int).Set(pool.gasPrice) 433 } 434 435 // SetGasPrice updates the minimum price required by the transaction pool for a 436 // new transaction, and drops all transactions below this threshold. 437 func (pool *TxPool) SetGasPrice(price *big.Int) { 438 pool.mu.Lock() 439 defer pool.mu.Unlock() 440 441 pool.gasPrice = price 442 for _, tx := range pool.priced.Cap(price, pool.locals) { 443 pool.removeTx(tx.Hash(), false) 444 } 445 log.Info("Transaction pool price threshold updated", "price", price) 446 } 447 448 // Nonce returns the next nonce of an account, with all transactions executable 449 // by the pool already applied on top. 450 func (pool *TxPool) Nonce(addr common.Address) uint64 { 451 pool.mu.RLock() 452 defer pool.mu.RUnlock() 453 454 return pool.pendingNonces.get(addr) 455 } 456 457 // Stats retrieves the current pool stats, namely the number of pending and the 458 // number of queued (non-executable) transactions. 459 func (pool *TxPool) Stats() (int, int) { 460 pool.mu.RLock() 461 defer pool.mu.RUnlock() 462 463 return pool.stats() 464 } 465 466 // stats retrieves the current pool stats, namely the number of pending and the 467 // number of queued (non-executable) transactions. 468 func (pool *TxPool) stats() (int, int) { 469 pending := 0 470 for _, list := range pool.pending { 471 pending += list.Len() 472 } 473 queued := 0 474 for _, list := range pool.queue { 475 queued += list.Len() 476 } 477 return pending, queued 478 } 479 480 // Content retrieves the data content of the transaction pool, returning all the 481 // pending as well as queued transactions, grouped by account and sorted by nonce. 482 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 483 pool.mu.Lock() 484 defer pool.mu.Unlock() 485 486 pending := make(map[common.Address]types.Transactions) 487 for addr, list := range pool.pending { 488 pending[addr] = list.Flatten() 489 } 490 queued := make(map[common.Address]types.Transactions) 491 for addr, list := range pool.queue { 492 queued[addr] = list.Flatten() 493 } 494 return pending, queued 495 } 496 497 // Pending retrieves all currently processable transactions, grouped by origin 498 // account and sorted by nonce. The returned transaction set is a copy and can be 499 // freely modified by calling code. 500 func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { 501 pool.mu.Lock() 502 defer pool.mu.Unlock() 503 504 pending := make(map[common.Address]types.Transactions) 505 for addr, list := range pool.pending { 506 pending[addr] = list.Flatten() 507 } 508 return pending, nil 509 } 510 511 // Locals retrieves the accounts currently considered local by the pool. 512 func (pool *TxPool) Locals() []common.Address { 513 pool.mu.Lock() 514 defer pool.mu.Unlock() 515 516 return pool.locals.flatten() 517 } 518 519 // local retrieves all currently known local transactions, grouped by origin 520 // account and sorted by nonce. The returned transaction set is a copy and can be 521 // freely modified by calling code. 522 func (pool *TxPool) local() map[common.Address]types.Transactions { 523 txs := make(map[common.Address]types.Transactions) 524 for addr := range pool.locals.accounts { 525 if pending := pool.pending[addr]; pending != nil { 526 txs[addr] = append(txs[addr], pending.Flatten()...) 527 } 528 if queued := pool.queue[addr]; queued != nil { 529 txs[addr] = append(txs[addr], queued.Flatten()...) 530 } 531 } 532 return txs 533 } 534 535 // validateTx checks whether a transaction is valid according to the consensus 536 // rules and adheres to some heuristic limits of the local node (price and size). 537 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 538 // Quorum 539 sizeLimit := pool.chainconfig.TransactionSizeLimit 540 if sizeLimit == 0 { 541 sizeLimit = DefaultTxPoolConfig.TransactionSizeLimit 542 } 543 // Reject transactions over 64KB (or manually set limit) to prevent DOS attacks 544 if float64(tx.Size()) > float64(sizeLimit*1024) { 545 return ErrOversizedData 546 } 547 // /Quorum 548 549 // Transactions can't be negative. This may never happen using RLP decoded 550 // transactions but may occur if you create a transaction using the RPC. 551 if tx.Value().Sign() < 0 { 552 return ErrNegativeValue 553 } 554 // Ensure the transaction doesn't exceed the current block limit gas. 555 if pool.currentMaxGas < tx.Gas() { 556 return ErrGasLimit 557 } 558 // Make sure the transaction is signed properly 559 from, err := types.Sender(pool.signer, tx) 560 if err != nil { 561 return ErrInvalidSender 562 } 563 if pool.chainconfig.IsQuorum { 564 // Quorum 565 if tx.IsPrivacyMarker() { 566 innerTx, _, _, _ := private.FetchPrivateTransaction(tx.Data()) 567 if innerTx != nil { 568 if err := pool.validateTx(innerTx, local); err != nil { 569 return err 570 } 571 } 572 } 573 574 // Gas price must be zero for Quorum transaction 575 if tx.GasPriceIntCmp(common.Big0) != 0 { 576 return ErrInvalidGasPrice 577 } 578 // Ether value is not currently supported on private transactions 579 if tx.IsPrivate() && (len(tx.Data()) == 0 || tx.Value().Sign() != 0) { 580 return ErrEtherValueUnsupported 581 } 582 // Quorum - check if the sender account is authorized to perform the transaction 583 if err := pcore.CheckAccountPermission(tx.From(), tx.To(), tx.Value(), tx.Data(), tx.Gas(), tx.GasPrice()); err != nil { 584 return err 585 } 586 } else { 587 // Drop non-local transactions under our own minimal accepted gas price 588 local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network 589 if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 { 590 return ErrUnderpriced 591 } 592 } 593 // Ensure the transaction adheres to nonce ordering 594 if pool.currentState.GetNonce(from) > tx.Nonce() { 595 return ErrNonceTooLow 596 } 597 // Transactor should have enough funds to cover the costs 598 // cost == V + GP * GL 599 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 600 return ErrInsufficientFunds 601 } 602 // Ensure the transaction has more gas than the basic tx fee. 603 intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) 604 if err != nil { 605 return err 606 } 607 if tx.Gas() < intrGas { 608 return ErrIntrinsicGas 609 } 610 return nil 611 } 612 613 // add validates a transaction and inserts it into the non-executable queue for later 614 // pending promotion and execution. If the transaction is a replacement for an already 615 // pending or queued one, it overwrites the previous transaction if its price is higher. 616 // 617 // If a newly added transaction is marked as local, its sending account will be 618 // whitelisted, preventing any associated transaction from being dropped out of the pool 619 // due to pricing constraints. 620 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 621 // If the transaction is already known, discard it 622 hash := tx.Hash() 623 if pool.all.Get(hash) != nil { 624 log.Trace("Discarding already known transaction", "hash", hash) 625 knownTxMeter.Mark(1) 626 return false, ErrAlreadyKnown 627 } 628 // If the transaction fails basic validation, discard it 629 if err := pool.validateTx(tx, local); err != nil { 630 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 631 invalidTxMeter.Mark(1) 632 return false, err 633 } 634 // If the transaction pool is full, discard underpriced transactions 635 if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { 636 // If the new transaction is underpriced, don't accept it 637 if !pool.chainconfig.IsQuorum && !local && pool.priced.Underpriced(tx, pool.locals) { 638 log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) 639 underpricedTxMeter.Mark(1) 640 return false, ErrUnderpriced 641 } 642 // New transaction is better than our worse ones, make room for it 643 drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals) 644 for _, tx := range drop { 645 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) 646 underpricedTxMeter.Mark(1) 647 pool.removeTx(tx.Hash(), false) 648 } 649 } 650 // Try to replace an existing transaction in the pending pool 651 from, _ := types.Sender(pool.signer, tx) // already validated 652 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 653 // Nonce already pending, check if required price bump is met 654 inserted, old := list.Add(tx, pool.config.PriceBump) 655 if !inserted { 656 pendingDiscardMeter.Mark(1) 657 return false, ErrReplaceUnderpriced 658 } 659 // New transaction is better, replace old one 660 if old != nil { 661 pool.all.Remove(old.Hash()) 662 pool.priced.Removed(1) 663 pendingReplaceMeter.Mark(1) 664 } 665 pool.all.Add(tx) 666 pool.priced.Put(tx) 667 pool.journalTx(from, tx) 668 pool.queueTxEvent(tx) 669 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 670 671 // Successful promotion, bump the heartbeat 672 pool.beats[from] = time.Now() 673 return old != nil, nil 674 } 675 // New transaction isn't replacing a pending one, push into queue 676 replaced, err = pool.enqueueTx(hash, tx) 677 if err != nil { 678 return false, err 679 } 680 // Mark local addresses and journal local transactions 681 if local { 682 if !pool.locals.contains(from) { 683 log.Info("Setting new local account", "address", from) 684 pool.locals.add(from) 685 } 686 } 687 if local || pool.locals.contains(from) { 688 localGauge.Inc(1) 689 } 690 pool.journalTx(from, tx) 691 692 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 693 return replaced, nil 694 } 695 696 // enqueueTx inserts a new transaction into the non-executable transaction queue. 697 // 698 // Note, this method assumes the pool lock is held! 699 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) { 700 // Try to insert the transaction into the future queue 701 from, _ := types.Sender(pool.signer, tx) // already validated 702 if pool.queue[from] == nil { 703 pool.queue[from] = newTxList(false) 704 } 705 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 706 if !inserted { 707 // An older transaction was better, discard this 708 queuedDiscardMeter.Mark(1) 709 return false, ErrReplaceUnderpriced 710 } 711 // Discard any previous transaction and mark this 712 if old != nil { 713 pool.all.Remove(old.Hash()) 714 pool.priced.Removed(1) 715 queuedReplaceMeter.Mark(1) 716 } else { 717 // Nothing was replaced, bump the queued counter 718 queuedGauge.Inc(1) 719 } 720 if pool.all.Get(hash) == nil { 721 pool.all.Add(tx) 722 pool.priced.Put(tx) 723 } 724 // If we never record the heartbeat, do it right now. 725 if _, exist := pool.beats[from]; !exist { 726 pool.beats[from] = time.Now() 727 } 728 return old != nil, nil 729 } 730 731 // journalTx adds the specified transaction to the local disk journal if it is 732 // deemed to have been sent from a local account. 733 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 734 // Only journal if it's enabled and the transaction is local 735 if pool.journal == nil || !pool.locals.contains(from) { 736 return 737 } 738 if err := pool.journal.insert(tx); err != nil { 739 log.Warn("Failed to journal local transaction", "err", err) 740 } 741 } 742 743 // promoteTx adds a transaction to the pending (processable) list of transactions 744 // and returns whether it was inserted or an older was better. 745 // 746 // Note, this method assumes the pool lock is held! 747 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 748 // Try to insert the transaction into the pending queue 749 if pool.pending[addr] == nil { 750 pool.pending[addr] = newTxList(true) 751 } 752 list := pool.pending[addr] 753 754 inserted, old := list.Add(tx, pool.config.PriceBump) 755 if !inserted { 756 // An older transaction was better, discard this 757 pool.all.Remove(hash) 758 pool.priced.Removed(1) 759 pendingDiscardMeter.Mark(1) 760 return false 761 } 762 // Otherwise discard any previous transaction and mark this 763 if old != nil { 764 pool.all.Remove(old.Hash()) 765 pool.priced.Removed(1) 766 pendingReplaceMeter.Mark(1) 767 } else { 768 // Nothing was replaced, bump the pending counter 769 pendingGauge.Inc(1) 770 } 771 // Failsafe to work around direct pending inserts (tests) 772 if pool.all.Get(hash) == nil { 773 pool.all.Add(tx) 774 pool.priced.Put(tx) 775 } 776 // Set the potentially new pending nonce and notify any subsystems of the new tx 777 pool.pendingNonces.set(addr, tx.Nonce()+1) 778 779 // Successful promotion, bump the heartbeat 780 pool.beats[addr] = time.Now() 781 return true 782 } 783 784 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 785 // senders as a local ones, ensuring they go around the local pricing constraints. 786 // 787 // This method is used to add transactions from the RPC API and performs synchronous pool 788 // reorganization and event propagation. 789 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 790 return pool.addTxs(txs, !pool.config.NoLocals, true) 791 } 792 793 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 794 // a convenience wrapper aroundd AddLocals. 795 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 796 errs := pool.AddLocals([]*types.Transaction{tx}) 797 return errs[0] 798 } 799 800 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 801 // senders are not among the locally tracked ones, full pricing constraints will apply. 802 // 803 // This method is used to add transactions from the p2p network and does not wait for pool 804 // reorganization and internal event propagation. 805 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 806 return pool.addTxs(txs, false, false) 807 } 808 809 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 810 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 811 return pool.addTxs(txs, false, true) 812 } 813 814 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 815 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 816 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 817 return errs[0] 818 } 819 820 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 821 // wrapper around AddRemotes. 822 // 823 // Deprecated: use AddRemotes 824 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 825 errs := pool.AddRemotes([]*types.Transaction{tx}) 826 return errs[0] 827 } 828 829 // addTxs attempts to queue a batch of transactions if they are valid. 830 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 831 // Filter out known ones without obtaining the pool lock or recovering signatures 832 var ( 833 errs = make([]error, len(txs)) 834 news = make([]*types.Transaction, 0, len(txs)) 835 ) 836 for i, tx := range txs { 837 // If the transaction is known, pre-set the error slot 838 if pool.all.Get(tx.Hash()) != nil { 839 errs[i] = ErrAlreadyKnown 840 knownTxMeter.Mark(1) 841 continue 842 } 843 // Exclude transactions with invalid signatures as soon as 844 // possible and cache senders in transactions before 845 // obtaining lock 846 _, err := types.Sender(pool.signer, tx) 847 if err != nil { 848 errs[i] = ErrInvalidSender 849 invalidTxMeter.Mark(1) 850 continue 851 } 852 // Accumulate all unknown transactions for deeper processing 853 news = append(news, tx) 854 } 855 if len(news) == 0 { 856 return errs 857 } 858 859 // Process all the new transaction and merge any errors into the original slice 860 pool.mu.Lock() 861 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 862 pool.mu.Unlock() 863 864 var nilSlot = 0 865 for _, err := range newErrs { 866 for errs[nilSlot] != nil { 867 nilSlot++ 868 } 869 errs[nilSlot] = err 870 nilSlot++ 871 } 872 // Reorg the pool internals if needed and return 873 done := pool.requestPromoteExecutables(dirtyAddrs) 874 if sync { 875 <-done 876 } 877 return errs 878 } 879 880 // addTxsLocked attempts to queue a batch of transactions if they are valid. 881 // The transaction pool lock must be held. 882 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 883 dirty := newAccountSet(pool.signer) 884 errs := make([]error, len(txs)) 885 for i, tx := range txs { 886 replaced, err := pool.add(tx, local) 887 errs[i] = err 888 if err == nil && !replaced { 889 dirty.addTx(tx) 890 } 891 } 892 validTxMeter.Mark(int64(len(dirty.accounts))) 893 return errs, dirty 894 } 895 896 // Status returns the status (unknown/pending/queued) of a batch of transactions 897 // identified by their hashes. 898 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 899 status := make([]TxStatus, len(hashes)) 900 for i, hash := range hashes { 901 tx := pool.Get(hash) 902 if tx == nil { 903 continue 904 } 905 from, _ := types.Sender(pool.signer, tx) // already validated 906 pool.mu.RLock() 907 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 908 status[i] = TxStatusPending 909 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 910 status[i] = TxStatusQueued 911 } 912 // implicit else: the tx may have been included into a block between 913 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 914 pool.mu.RUnlock() 915 } 916 return status 917 } 918 919 // Get returns a transaction if it is contained in the pool and nil otherwise. 920 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 921 return pool.all.Get(hash) 922 } 923 924 // Has returns an indicator whether txpool has a transaction cached with the 925 // given hash. 926 func (pool *TxPool) Has(hash common.Hash) bool { 927 return pool.all.Get(hash) != nil 928 } 929 930 // removeTx removes a single transaction from the queue, moving all subsequent 931 // transactions back to the future queue. 932 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 933 // Fetch the transaction we wish to delete 934 tx := pool.all.Get(hash) 935 if tx == nil { 936 return 937 } 938 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 939 940 // Remove it from the list of known transactions 941 pool.all.Remove(hash) 942 if outofbound { 943 pool.priced.Removed(1) 944 } 945 if pool.locals.contains(addr) { 946 localGauge.Dec(1) 947 } 948 // Remove the transaction from the pending lists and reset the account nonce 949 if pending := pool.pending[addr]; pending != nil { 950 if removed, invalids := pending.Remove(tx); removed { 951 // If no more pending transactions are left, remove the list 952 if pending.Empty() { 953 delete(pool.pending, addr) 954 } 955 // Postpone any invalidated transactions 956 for _, tx := range invalids { 957 pool.enqueueTx(tx.Hash(), tx) 958 } 959 // Update the account nonce if needed 960 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 961 // Reduce the pending counter 962 pendingGauge.Dec(int64(1 + len(invalids))) 963 return 964 } 965 } 966 // Transaction is in the future queue 967 if future := pool.queue[addr]; future != nil { 968 if removed, _ := future.Remove(tx); removed { 969 // Reduce the queued counter 970 queuedGauge.Dec(1) 971 } 972 if future.Empty() { 973 delete(pool.queue, addr) 974 delete(pool.beats, addr) 975 } 976 } 977 } 978 979 // requestPromoteExecutables requests a pool reset to the new head block. 980 // The returned channel is closed when the reset has occurred. 981 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 982 select { 983 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 984 return <-pool.reorgDoneCh 985 case <-pool.reorgShutdownCh: 986 return pool.reorgShutdownCh 987 } 988 } 989 990 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 991 // The returned channel is closed when the promotion checks have occurred. 992 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 993 select { 994 case pool.reqPromoteCh <- set: 995 return <-pool.reorgDoneCh 996 case <-pool.reorgShutdownCh: 997 return pool.reorgShutdownCh 998 } 999 } 1000 1001 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1002 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1003 select { 1004 case pool.queueTxEventCh <- tx: 1005 case <-pool.reorgShutdownCh: 1006 } 1007 } 1008 1009 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1010 // call those methods directly, but request them being run using requestReset and 1011 // requestPromoteExecutables instead. 1012 func (pool *TxPool) scheduleReorgLoop() { 1013 defer pool.wg.Done() 1014 1015 var ( 1016 curDone chan struct{} // non-nil while runReorg is active 1017 nextDone = make(chan struct{}) 1018 launchNextRun bool 1019 reset *txpoolResetRequest 1020 dirtyAccounts *accountSet 1021 queuedEvents = make(map[common.Address]*txSortedMap) 1022 ) 1023 for { 1024 // Launch next background reorg if needed 1025 if curDone == nil && launchNextRun { 1026 // Run the background reorg and announcements 1027 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1028 1029 // Prepare everything for the next round of reorg 1030 curDone, nextDone = nextDone, make(chan struct{}) 1031 launchNextRun = false 1032 1033 reset, dirtyAccounts = nil, nil 1034 queuedEvents = make(map[common.Address]*txSortedMap) 1035 } 1036 1037 select { 1038 case req := <-pool.reqResetCh: 1039 // Reset request: update head if request is already pending. 1040 if reset == nil { 1041 reset = req 1042 } else { 1043 reset.newHead = req.newHead 1044 } 1045 launchNextRun = true 1046 pool.reorgDoneCh <- nextDone 1047 1048 case req := <-pool.reqPromoteCh: 1049 // Promote request: update address set if request is already pending. 1050 if dirtyAccounts == nil { 1051 dirtyAccounts = req 1052 } else { 1053 dirtyAccounts.merge(req) 1054 } 1055 launchNextRun = true 1056 pool.reorgDoneCh <- nextDone 1057 1058 case tx := <-pool.queueTxEventCh: 1059 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1060 // request one later if they want the events sent. 1061 addr, _ := types.Sender(pool.signer, tx) 1062 if _, ok := queuedEvents[addr]; !ok { 1063 queuedEvents[addr] = newTxSortedMap() 1064 } 1065 queuedEvents[addr].Put(tx) 1066 1067 case <-curDone: 1068 curDone = nil 1069 1070 case <-pool.reorgShutdownCh: 1071 // Wait for current run to finish. 1072 if curDone != nil { 1073 <-curDone 1074 } 1075 close(nextDone) 1076 return 1077 } 1078 } 1079 } 1080 1081 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1082 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1083 defer close(done) 1084 1085 var promoteAddrs []common.Address 1086 if dirtyAccounts != nil && reset == nil { 1087 // Only dirty accounts need to be promoted, unless we're resetting. 1088 // For resets, all addresses in the tx queue will be promoted and 1089 // the flatten operation can be avoided. 1090 promoteAddrs = dirtyAccounts.flatten() 1091 } 1092 pool.mu.Lock() 1093 if reset != nil { 1094 // Reset from the old head to the new, rescheduling any reorged transactions 1095 pool.reset(reset.oldHead, reset.newHead) 1096 1097 // Nonces were reset, discard any events that became stale 1098 for addr := range events { 1099 events[addr].Forward(pool.pendingNonces.get(addr)) 1100 if events[addr].Len() == 0 { 1101 delete(events, addr) 1102 } 1103 } 1104 // Reset needs promote for all addresses 1105 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1106 for addr := range pool.queue { 1107 promoteAddrs = append(promoteAddrs, addr) 1108 } 1109 } 1110 // Check for pending transactions for every account that sent new ones 1111 promoted := pool.promoteExecutables(promoteAddrs) 1112 1113 // If a new block appeared, validate the pool of pending transactions. This will 1114 // remove any transaction that has been included in the block or was invalidated 1115 // because of another transaction (e.g. higher gas price). 1116 if reset != nil { 1117 pool.demoteUnexecutables() 1118 } 1119 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1120 pool.truncatePending() 1121 pool.truncateQueue() 1122 1123 // Update all accounts to the latest known pending nonce 1124 for addr, list := range pool.pending { 1125 highestPending := list.LastElement() 1126 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1127 } 1128 pool.mu.Unlock() 1129 1130 // Notify subsystems for newly added transactions 1131 for _, tx := range promoted { 1132 addr, _ := types.Sender(pool.signer, tx) 1133 if _, ok := events[addr]; !ok { 1134 events[addr] = newTxSortedMap() 1135 } 1136 events[addr].Put(tx) 1137 } 1138 if len(events) > 0 { 1139 var txs []*types.Transaction 1140 for _, set := range events { 1141 txs = append(txs, set.Flatten()...) 1142 } 1143 pool.txFeed.Send(NewTxsEvent{txs}) 1144 } 1145 } 1146 1147 // reset retrieves the current state of the blockchain and ensures the content 1148 // of the transaction pool is valid with regard to the chain state. 1149 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1150 // If we're reorging an old state, reinject all dropped transactions 1151 var reinject types.Transactions 1152 1153 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1154 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1155 oldNum := oldHead.Number.Uint64() 1156 newNum := newHead.Number.Uint64() 1157 1158 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1159 log.Debug("Skipping deep transaction reorg", "depth", depth) 1160 } else { 1161 // Reorg seems shallow enough to pull in all transactions into memory 1162 var discarded, included types.Transactions 1163 var ( 1164 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1165 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1166 ) 1167 if rem == nil { 1168 // This can happen if a setHead is performed, where we simply discard the old 1169 // head from the chain. 1170 // If that is the case, we don't have the lost transactions any more, and 1171 // there's nothing to add 1172 if newNum < oldNum { 1173 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1174 log.Debug("Skipping transaction reset caused by setHead", 1175 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1176 } else { 1177 // If we reorged to a same or higher number, then it's not a case of setHead 1178 log.Warn("Transaction pool reset with missing oldhead", 1179 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1180 } 1181 return 1182 } 1183 for rem.NumberU64() > add.NumberU64() { 1184 discarded = append(discarded, rem.Transactions()...) 1185 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1186 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1187 return 1188 } 1189 } 1190 for add.NumberU64() > rem.NumberU64() { 1191 included = append(included, add.Transactions()...) 1192 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1193 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1194 return 1195 } 1196 } 1197 for rem.Hash() != add.Hash() { 1198 discarded = append(discarded, rem.Transactions()...) 1199 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1200 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1201 return 1202 } 1203 included = append(included, add.Transactions()...) 1204 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1205 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1206 return 1207 } 1208 } 1209 reinject = types.TxDifference(discarded, included) 1210 } 1211 } 1212 // Initialize the internal state to the current head 1213 if newHead == nil { 1214 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1215 } 1216 statedb, _, err := pool.chain.StateAt(newHead.Root) 1217 if err != nil { 1218 log.Error("Failed to reset txpool state", "err", err) 1219 return 1220 } 1221 pool.currentState = statedb 1222 pool.pendingNonces = newTxNoncer(statedb) 1223 pool.currentMaxGas = newHead.GasLimit 1224 1225 // Inject any transactions discarded due to reorgs 1226 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1227 senderCacher.recover(pool.signer, reinject) 1228 pool.addTxsLocked(reinject, false) 1229 1230 // Update all fork indicator by next pending block number. 1231 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1232 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1233 } 1234 1235 // promoteExecutables moves transactions that have become processable from the 1236 // future queue to the set of pending transactions. During this process, all 1237 // invalidated transactions (low nonce, low balance) are deleted. 1238 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1239 isQuorum := pool.chainconfig.IsQuorum 1240 // Init delayed since tx pool could have been started before any state sync 1241 if isQuorum && pool.pendingNonces == nil { 1242 pool.reset(nil, nil) 1243 } 1244 // Track the promoted transactions to broadcast them at once 1245 var promoted []*types.Transaction 1246 1247 // Iterate over all accounts and promote any executable transactions 1248 for _, addr := range accounts { 1249 list := pool.queue[addr] 1250 if list == nil { 1251 continue // Just in case someone calls with a non existing account 1252 } 1253 // Drop all transactions that are deemed too old (low nonce) 1254 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1255 for _, tx := range forwards { 1256 hash := tx.Hash() 1257 pool.all.Remove(hash) 1258 } 1259 var drops types.Transactions 1260 if !isQuorum { 1261 log.Trace("Removed old queued transactions", "count", len(forwards)) 1262 // Drop all transactions that are too costly (low balance or out of gas) 1263 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1264 for _, tx := range drops { 1265 hash := tx.Hash() 1266 pool.all.Remove(hash) 1267 } 1268 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1269 queuedNofundsMeter.Mark(int64(len(drops))) 1270 } 1271 1272 // Gather all executable transactions and promote them 1273 readies := list.Ready(pool.pendingNonces.get(addr)) 1274 for _, tx := range readies { 1275 hash := tx.Hash() 1276 log.Trace("Promoting queued transaction", "hash", hash) 1277 if pool.promoteTx(addr, hash, tx) { 1278 promoted = append(promoted, tx) 1279 } 1280 } 1281 log.Trace("Promoted queued transactions", "count", len(promoted)) 1282 queuedGauge.Dec(int64(len(readies))) 1283 1284 // Drop all transactions over the allowed limit 1285 var caps types.Transactions 1286 if !pool.locals.contains(addr) { 1287 caps = list.Cap(int(pool.config.AccountQueue)) 1288 for _, tx := range caps { 1289 hash := tx.Hash() 1290 pool.all.Remove(hash) 1291 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1292 } 1293 queuedRateLimitMeter.Mark(int64(len(caps))) 1294 } 1295 // Mark all the items dropped as removed 1296 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1297 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1298 if pool.locals.contains(addr) { 1299 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1300 } 1301 // Delete the entire queue entry if it became empty. 1302 if list.Empty() { 1303 delete(pool.queue, addr) 1304 delete(pool.beats, addr) 1305 } 1306 } 1307 return promoted 1308 } 1309 1310 // truncatePending removes transactions from the pending queue if the pool is above the 1311 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1312 // equal number for all for accounts with many pending transactions. 1313 func (pool *TxPool) truncatePending() { 1314 pending := uint64(0) 1315 for _, list := range pool.pending { 1316 pending += uint64(list.Len()) 1317 } 1318 if pending <= pool.config.GlobalSlots { 1319 return 1320 } 1321 1322 pendingBeforeCap := pending 1323 // Assemble a spam order to penalize large transactors first 1324 spammers := prque.New(nil) 1325 for addr, list := range pool.pending { 1326 // Only evict transactions from high rollers 1327 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1328 spammers.Push(addr, int64(list.Len())) 1329 } 1330 } 1331 // Gradually drop transactions from offenders 1332 offenders := []common.Address{} 1333 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1334 // Retrieve the next offender if not local address 1335 offender, _ := spammers.Pop() 1336 offenders = append(offenders, offender.(common.Address)) 1337 1338 // Equalize balances until all the same or below threshold 1339 if len(offenders) > 1 { 1340 // Calculate the equalization threshold for all current offenders 1341 threshold := pool.pending[offender.(common.Address)].Len() 1342 1343 // Iteratively reduce all offenders until below limit or threshold reached 1344 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1345 for i := 0; i < len(offenders)-1; i++ { 1346 list := pool.pending[offenders[i]] 1347 1348 caps := list.Cap(list.Len() - 1) 1349 for _, tx := range caps { 1350 // Drop the transaction from the global pools too 1351 hash := tx.Hash() 1352 pool.all.Remove(hash) 1353 1354 // Update the account nonce to the dropped transaction 1355 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1356 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1357 } 1358 pool.priced.Removed(len(caps)) 1359 pendingGauge.Dec(int64(len(caps))) 1360 if pool.locals.contains(offenders[i]) { 1361 localGauge.Dec(int64(len(caps))) 1362 } 1363 pending-- 1364 } 1365 } 1366 } 1367 } 1368 1369 // If still above threshold, reduce to limit or min allowance 1370 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1371 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1372 for _, addr := range offenders { 1373 list := pool.pending[addr] 1374 1375 caps := list.Cap(list.Len() - 1) 1376 for _, tx := range caps { 1377 // Drop the transaction from the global pools too 1378 hash := tx.Hash() 1379 pool.all.Remove(hash) 1380 1381 // Update the account nonce to the dropped transaction 1382 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1383 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1384 } 1385 pool.priced.Removed(len(caps)) 1386 pendingGauge.Dec(int64(len(caps))) 1387 if pool.locals.contains(addr) { 1388 localGauge.Dec(int64(len(caps))) 1389 } 1390 pending-- 1391 } 1392 } 1393 } 1394 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1395 } 1396 1397 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1398 func (pool *TxPool) truncateQueue() { 1399 queued := uint64(0) 1400 for _, list := range pool.queue { 1401 queued += uint64(list.Len()) 1402 } 1403 if queued <= pool.config.GlobalQueue { 1404 return 1405 } 1406 1407 // Sort all accounts with queued transactions by heartbeat 1408 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1409 for addr := range pool.queue { 1410 if !pool.locals.contains(addr) { // don't drop locals 1411 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1412 } 1413 } 1414 sort.Sort(addresses) 1415 1416 // Drop transactions until the total is below the limit or only locals remain 1417 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1418 addr := addresses[len(addresses)-1] 1419 list := pool.queue[addr.address] 1420 1421 addresses = addresses[:len(addresses)-1] 1422 1423 // Drop all transactions if they are less than the overflow 1424 if size := uint64(list.Len()); size <= drop { 1425 for _, tx := range list.Flatten() { 1426 pool.removeTx(tx.Hash(), true) 1427 } 1428 drop -= size 1429 queuedRateLimitMeter.Mark(int64(size)) 1430 continue 1431 } 1432 // Otherwise drop only last few transactions 1433 txs := list.Flatten() 1434 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1435 pool.removeTx(txs[i].Hash(), true) 1436 drop-- 1437 queuedRateLimitMeter.Mark(1) 1438 } 1439 } 1440 } 1441 1442 // demoteUnexecutables removes invalid and processed transactions from the pools 1443 // executable/pending queue and any subsequent transactions that become unexecutable 1444 // are moved back into the future queue. 1445 func (pool *TxPool) demoteUnexecutables() { 1446 // Iterate over all accounts and demote any non-executable transactions 1447 for addr, list := range pool.pending { 1448 nonce := pool.currentState.GetNonce(addr) 1449 1450 // Drop all transactions that are deemed too old (low nonce) 1451 olds := list.Forward(nonce) 1452 for _, tx := range olds { 1453 hash := tx.Hash() 1454 pool.all.Remove(hash) 1455 log.Trace("Removed old pending transaction", "hash", hash) 1456 } 1457 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1458 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1459 for _, tx := range drops { 1460 hash := tx.Hash() 1461 log.Trace("Removed unpayable pending transaction", "hash", hash) 1462 pool.all.Remove(hash) 1463 } 1464 pool.priced.Removed(len(olds) + len(drops)) 1465 pendingNofundsMeter.Mark(int64(len(drops))) 1466 1467 for _, tx := range invalids { 1468 hash := tx.Hash() 1469 log.Trace("Demoting pending transaction", "hash", hash) 1470 pool.enqueueTx(hash, tx) 1471 } 1472 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1473 if pool.locals.contains(addr) { 1474 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1475 } 1476 // If there's a gap in front, alert (should never happen) and postpone all transactions 1477 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1478 gapped := list.Cap(0) 1479 for _, tx := range gapped { 1480 hash := tx.Hash() 1481 log.Error("Demoting invalidated transaction", "hash", hash) 1482 pool.enqueueTx(hash, tx) 1483 } 1484 pendingGauge.Dec(int64(len(gapped))) 1485 // This might happen in a reorg, so log it to the metering 1486 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1487 } 1488 // Delete the entire pending entry if it became empty. 1489 if list.Empty() { 1490 delete(pool.pending, addr) 1491 } 1492 } 1493 } 1494 1495 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1496 type addressByHeartbeat struct { 1497 address common.Address 1498 heartbeat time.Time 1499 } 1500 1501 type addressesByHeartbeat []addressByHeartbeat 1502 1503 func (a addressesByHeartbeat) Len() int { return len(a) } 1504 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1505 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1506 1507 // accountSet is simply a set of addresses to check for existence, and a signer 1508 // capable of deriving addresses from transactions. 1509 type accountSet struct { 1510 accounts map[common.Address]struct{} 1511 signer types.Signer 1512 cache *[]common.Address 1513 } 1514 1515 // newAccountSet creates a new address set with an associated signer for sender 1516 // derivations. 1517 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1518 as := &accountSet{ 1519 accounts: make(map[common.Address]struct{}), 1520 signer: signer, 1521 } 1522 for _, addr := range addrs { 1523 as.add(addr) 1524 } 1525 return as 1526 } 1527 1528 // contains checks if a given address is contained within the set. 1529 func (as *accountSet) contains(addr common.Address) bool { 1530 _, exist := as.accounts[addr] 1531 return exist 1532 } 1533 1534 func (as *accountSet) empty() bool { 1535 return len(as.accounts) == 0 1536 } 1537 1538 // containsTx checks if the sender of a given tx is within the set. If the sender 1539 // cannot be derived, this method returns false. 1540 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1541 if addr, err := types.Sender(as.signer, tx); err == nil { 1542 return as.contains(addr) 1543 } 1544 return false 1545 } 1546 1547 // add inserts a new address into the set to track. 1548 func (as *accountSet) add(addr common.Address) { 1549 as.accounts[addr] = struct{}{} 1550 as.cache = nil 1551 } 1552 1553 // addTx adds the sender of tx into the set. 1554 func (as *accountSet) addTx(tx *types.Transaction) { 1555 if addr, err := types.Sender(as.signer, tx); err == nil { 1556 as.add(addr) 1557 } 1558 } 1559 1560 // flatten returns the list of addresses within this set, also caching it for later 1561 // reuse. The returned slice should not be changed! 1562 func (as *accountSet) flatten() []common.Address { 1563 if as.cache == nil { 1564 accounts := make([]common.Address, 0, len(as.accounts)) 1565 for account := range as.accounts { 1566 accounts = append(accounts, account) 1567 } 1568 as.cache = &accounts 1569 } 1570 return *as.cache 1571 } 1572 1573 // merge adds all addresses from the 'other' set into 'as'. 1574 func (as *accountSet) merge(other *accountSet) { 1575 for addr := range other.accounts { 1576 as.accounts[addr] = struct{}{} 1577 } 1578 as.cache = nil 1579 } 1580 1581 // txLookup is used internally by TxPool to track transactions while allowing lookup without 1582 // mutex contention. 1583 // 1584 // Note, although this type is properly protected against concurrent access, it 1585 // is **not** a type that should ever be mutated or even exposed outside of the 1586 // transaction pool, since its internal state is tightly coupled with the pools 1587 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1588 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1589 // TxPool.mu mutex. 1590 type txLookup struct { 1591 all map[common.Hash]*types.Transaction 1592 slots int 1593 lock sync.RWMutex 1594 } 1595 1596 // newTxLookup returns a new txLookup structure. 1597 func newTxLookup() *txLookup { 1598 return &txLookup{ 1599 all: make(map[common.Hash]*types.Transaction), 1600 } 1601 } 1602 1603 // Range calls f on each key and value present in the map. 1604 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { 1605 t.lock.RLock() 1606 defer t.lock.RUnlock() 1607 1608 for key, value := range t.all { 1609 if !f(key, value) { 1610 break 1611 } 1612 } 1613 } 1614 1615 // Get returns a transaction if it exists in the lookup, or nil if not found. 1616 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1617 t.lock.RLock() 1618 defer t.lock.RUnlock() 1619 1620 return t.all[hash] 1621 } 1622 1623 // Count returns the current number of items in the lookup. 1624 func (t *txLookup) Count() int { 1625 t.lock.RLock() 1626 defer t.lock.RUnlock() 1627 1628 return len(t.all) 1629 } 1630 1631 // Slots returns the current number of slots used in the lookup. 1632 func (t *txLookup) Slots() int { 1633 t.lock.RLock() 1634 defer t.lock.RUnlock() 1635 1636 return t.slots 1637 } 1638 1639 // Add adds a transaction to the lookup. 1640 func (t *txLookup) Add(tx *types.Transaction) { 1641 t.lock.Lock() 1642 defer t.lock.Unlock() 1643 1644 t.slots += numSlots(tx) 1645 slotsGauge.Update(int64(t.slots)) 1646 1647 t.all[tx.Hash()] = tx 1648 } 1649 1650 // Remove removes a transaction from the lookup. 1651 func (t *txLookup) Remove(hash common.Hash) { 1652 t.lock.Lock() 1653 defer t.lock.Unlock() 1654 1655 t.slots -= numSlots(t.all[hash]) 1656 slotsGauge.Update(int64(t.slots)) 1657 1658 delete(t.all, hash) 1659 } 1660 1661 // helper function to return chainHeadChannel size 1662 func GetChainHeadChannleSize() int { 1663 return chainHeadChanSize 1664 } 1665 1666 // numSlots calculates the number of slots needed for a single transaction. 1667 func numSlots(tx *types.Transaction) int { 1668 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1669 }