github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/core/tx_pool.go (about) 1 // Copyright 2021 The adkgo Authors 2 // This file is part of the adkgo library. 3 // 4 // The adkgo library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The adkgo library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "math" 22 "math/big" 23 "sort" 24 "sync" 25 "time" 26 "bytes" 27 "github.com/aidoskuneen/adk-node/common" 28 "github.com/aidoskuneen/adk-node/common/prque" 29 "github.com/aidoskuneen/adk-node/consensus/misc" 30 "github.com/aidoskuneen/adk-node/core/state" 31 "github.com/aidoskuneen/adk-node/core/types" 32 "github.com/aidoskuneen/adk-node/event" 33 "github.com/aidoskuneen/adk-node/log" 34 "github.com/aidoskuneen/adk-node/metrics" 35 "github.com/aidoskuneen/adk-node/params" 36 "github.com/AidosKuneen/gadk" 37 "crypto/sha1" 38 "strings" 39 ) 40 41 const ( 42 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 43 chainHeadChanSize = 10 44 45 // txSlotSize is used to calculate how many data slots a single transaction 46 // takes up based on its size. The slots are used as DoS protection, ensuring 47 // that validating a new transaction remains a constant operation (in reality 48 // O(maxslots), where max slots are 4 currently). 49 txSlotSize = 32 * 1024 50 51 // txMaxSize is the maximum size a single transaction can have. This field has 52 // non-trivial consequences: larger transactions are significantly harder and 53 // more expensive to propagate; larger transactions also take more resources 54 // to validate whether they fit into the pool or not. 55 txMaxSize = 4 * 4 * txSlotSize // 512 KB 56 ) 57 58 var ( 59 // ErrAlreadyKnown is returned if the transactions is already contained 60 // within the pool. 61 ErrAlreadyKnown = errors.New("already known") 62 63 // ErrInvalidSender is returned if the transaction contains an invalid signature. 64 ErrInvalidSender = errors.New("invalid sender") 65 66 // ErrUnderpriced is returned if a transaction's gas price is below the minimum 67 // configured for the transaction pool. 68 ErrUnderpriced = errors.New("transaction underpriced") 69 70 // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet 71 // another remote transaction. 72 ErrTxPoolOverflow = errors.New("txpool is full") 73 74 // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced 75 // with a different one without the required price bump. 76 ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") 77 78 // ErrGasLimit is returned if a transaction's requested gas limit exceeds the 79 // maximum allowance of the current block. 80 ErrGasLimit = errors.New("exceeds block gas limit") 81 82 // ErrNegativeValue is a sanity error to ensure no one is able to specify a 83 // transaction with a negative value. 84 ErrNegativeValue = errors.New("negative value") 85 86 // ErrOversizedData is returned if the input data of a transaction is greater 87 // than some meaningful limit a user might use. This is not a consensus error 88 // making the transaction invalid, rather a DOS protection. 89 ErrOversizedData = errors.New("oversized data") 90 ) 91 92 var ( 93 evictionInterval = time.Minute // Time interval to check for evictable transactions 94 statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats 95 ) 96 97 var ( 98 // Metrics for the pending pool 99 pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) 100 pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) 101 pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting 102 pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds 103 104 // Metrics for the queued pool 105 queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) 106 queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) 107 queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting 108 queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds 109 queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime 110 111 // General tx metrics 112 knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) 113 validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) 114 invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) 115 underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) 116 overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) 117 118 pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) 119 queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) 120 localGauge = metrics.NewRegisteredGauge("txpool/local", nil) 121 slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) 122 123 reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) 124 ) 125 126 // TxStatus is the current status of a transaction as seen by the pool. 127 type TxStatus uint 128 129 const ( 130 TxStatusUnknown TxStatus = iota 131 TxStatusQueued 132 TxStatusPending 133 TxStatusIncluded 134 ) 135 136 // blockChain provides the state of blockchain and current gas limit to do 137 // some pre checks in tx pool and event subscribers. 138 type blockChain interface { 139 CurrentBlock() *types.Block 140 GetBlock(hash common.Hash, number uint64) *types.Block 141 StateAt(root common.Hash) (*state.StateDB, error) 142 143 SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription 144 } 145 146 // TxPoolConfig are the configuration parameters of the transaction pool. 147 type TxPoolConfig struct { 148 Locals []common.Address // Addresses that should be treated by default as local 149 NoLocals bool // Whether local transaction handling should be disabled 150 Journal string // Journal of local transactions to survive node restarts 151 Rejournal time.Duration // Time interval to regenerate the local transaction journal 152 153 PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool 154 PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) 155 156 AccountSlots uint64 // Number of executable transaction slots guaranteed per account 157 GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts 158 AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account 159 GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts 160 161 Lifetime time.Duration // Maximum amount of time non-executable transaction are queued 162 } 163 164 // DefaultTxPoolConfig contains the default configurations for the transaction 165 // pool. 166 var DefaultTxPoolConfig = TxPoolConfig{ 167 Journal: "transactions.rlp", 168 Rejournal: time.Hour, 169 170 PriceLimit: 1, 171 PriceBump: 10, 172 173 AccountSlots: 16, 174 GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio 175 AccountQueue: 64, 176 GlobalQueue: 1024, 177 178 Lifetime: 3 * time.Hour, 179 } 180 181 // sanitize checks the provided user configurations and changes anything that's 182 // unreasonable or unworkable. 183 func (config *TxPoolConfig) sanitize() TxPoolConfig { 184 conf := *config 185 if conf.Rejournal < time.Second { 186 log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) 187 conf.Rejournal = time.Second 188 } 189 if conf.PriceLimit < 1 { 190 log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) 191 conf.PriceLimit = DefaultTxPoolConfig.PriceLimit 192 } 193 if conf.PriceBump < 1 { 194 log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) 195 conf.PriceBump = DefaultTxPoolConfig.PriceBump 196 } 197 if conf.AccountSlots < 1 { 198 log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) 199 conf.AccountSlots = DefaultTxPoolConfig.AccountSlots 200 } 201 if conf.GlobalSlots < 1 { 202 log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) 203 conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots 204 } 205 if conf.AccountQueue < 1 { 206 log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) 207 conf.AccountQueue = DefaultTxPoolConfig.AccountQueue 208 } 209 if conf.GlobalQueue < 1 { 210 log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) 211 conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue 212 } 213 if conf.Lifetime < 1 { 214 log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) 215 conf.Lifetime = DefaultTxPoolConfig.Lifetime 216 } 217 return conf 218 } 219 220 // TxPool contains all currently known transactions. Transactions 221 // enter the pool when they are received from the network or submitted 222 // locally. They exit the pool when they are included in the blockchain. 223 // 224 // The pool separates processable transactions (which can be applied to the 225 // current state) and future transactions. Transactions move between those 226 // two states over time as they are received and processed. 227 type TxPool struct { 228 config TxPoolConfig 229 chainconfig *params.ChainConfig 230 chain blockChain 231 gasPrice *big.Int 232 txFeed event.Feed 233 scope event.SubscriptionScope 234 signer types.Signer 235 mu sync.RWMutex 236 237 istanbul bool // Fork indicator whether we are in the istanbul stage. 238 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 239 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 240 241 currentState *state.StateDB // Current state in the blockchain head 242 pendingNonces *txNoncer // Pending state tracking virtual nonces 243 currentMaxGas uint64 // Current gas limit for transaction caps 244 245 locals *accountSet // Set of local transaction to exempt from eviction rules 246 journal *txJournal // Journal of local transaction to back up to disk 247 248 pending map[common.Address]*txList // All currently processable transactions 249 queue map[common.Address]*txList // Queued but non-processable transactions 250 beats map[common.Address]time.Time // Last heartbeat from each known account 251 all *txLookup // All transactions to allow lookups 252 priced *txPricedList // All transactions sorted by price 253 254 chainHeadCh chan ChainHeadEvent 255 chainHeadSub event.Subscription 256 reqResetCh chan *txpoolResetRequest 257 reqPromoteCh chan *accountSet 258 queueTxEventCh chan *types.Transaction 259 reorgDoneCh chan chan struct{} 260 reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop 261 wg sync.WaitGroup // tracks loop, scheduleReorgLoop 262 } 263 264 type txpoolResetRequest struct { 265 oldHead, newHead *types.Header 266 } 267 268 // NewTxPool creates a new transaction pool to gather, sort and filter inbound 269 // transactions from the network. 270 func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { 271 // Sanitize the input to ensure no vulnerable gas prices are set 272 config = (&config).sanitize() 273 274 // Create the transaction pool with its initial settings 275 pool := &TxPool{ 276 config: config, 277 chainconfig: chainconfig, 278 chain: chain, 279 signer: types.LatestSigner(chainconfig), 280 pending: make(map[common.Address]*txList), 281 queue: make(map[common.Address]*txList), 282 beats: make(map[common.Address]time.Time), 283 all: newTxLookup(), 284 chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 285 reqResetCh: make(chan *txpoolResetRequest), 286 reqPromoteCh: make(chan *accountSet), 287 queueTxEventCh: make(chan *types.Transaction), 288 reorgDoneCh: make(chan chan struct{}), 289 reorgShutdownCh: make(chan struct{}), 290 gasPrice: new(big.Int).SetUint64(config.PriceLimit), 291 } 292 pool.locals = newAccountSet(pool.signer) 293 for _, addr := range config.Locals { 294 log.Info("Setting new local account", "address", addr) 295 pool.locals.add(addr) 296 } 297 pool.priced = newTxPricedList(pool.all) 298 pool.reset(nil, chain.CurrentBlock().Header()) 299 300 // Start the reorg loop early so it can handle requests generated during journal loading. 301 pool.wg.Add(1) 302 go pool.scheduleReorgLoop() 303 304 // If local transactions and journaling is enabled, load from disk 305 if !config.NoLocals && config.Journal != "" { 306 pool.journal = newTxJournal(config.Journal) 307 308 if err := pool.journal.load(pool.AddLocals); err != nil { 309 log.Warn("Failed to load transaction journal", "err", err) 310 } 311 if err := pool.journal.rotate(pool.local()); err != nil { 312 log.Warn("Failed to rotate transaction journal", "err", err) 313 } 314 } 315 316 // Subscribe events from blockchain and start the main event loop. 317 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 318 pool.wg.Add(1) 319 go pool.loop() 320 321 return pool 322 } 323 324 // loop is the transaction pool's main event loop, waiting for and reacting to 325 // outside blockchain events as well as for various reporting and transaction 326 // eviction events. 327 func (pool *TxPool) loop() { 328 defer pool.wg.Done() 329 330 var ( 331 prevPending, prevQueued, prevStales int 332 // Start the stats reporting and transaction eviction tickers 333 report = time.NewTicker(statsReportInterval) 334 evict = time.NewTicker(evictionInterval) 335 journal = time.NewTicker(pool.config.Rejournal) 336 // Track the previous head headers for transaction reorgs 337 head = pool.chain.CurrentBlock() 338 ) 339 defer report.Stop() 340 defer evict.Stop() 341 defer journal.Stop() 342 343 for { 344 select { 345 // Handle ChainHeadEvent 346 case ev := <-pool.chainHeadCh: 347 if ev.Block != nil { 348 pool.requestReset(head.Header(), ev.Block.Header()) 349 head = ev.Block 350 } 351 352 // System shutdown. 353 case <-pool.chainHeadSub.Err(): 354 close(pool.reorgShutdownCh) 355 return 356 357 // Handle stats reporting ticks 358 case <-report.C: 359 pool.mu.RLock() 360 pending, queued := pool.stats() 361 stales := pool.priced.stales 362 pool.mu.RUnlock() 363 364 if pending != prevPending || queued != prevQueued || stales != prevStales { 365 log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) 366 prevPending, prevQueued, prevStales = pending, queued, stales 367 } 368 369 // Handle inactive account transaction eviction 370 case <-evict.C: 371 pool.mu.Lock() 372 for addr := range pool.queue { 373 // Skip local transactions from the eviction mechanism 374 if pool.locals.contains(addr) { 375 continue 376 } 377 // Any non-locals old enough should be removed 378 if time.Since(pool.beats[addr]) > pool.config.Lifetime { 379 list := pool.queue[addr].Flatten() 380 for _, tx := range list { 381 pool.removeTx(tx.Hash(), true) 382 } 383 queuedEvictionMeter.Mark(int64(len(list))) 384 } 385 } 386 pool.mu.Unlock() 387 388 // Handle local transaction journal rotation 389 case <-journal.C: 390 if pool.journal != nil { 391 pool.mu.Lock() 392 if err := pool.journal.rotate(pool.local()); err != nil { 393 log.Warn("Failed to rotate local tx journal", "err", err) 394 } 395 pool.mu.Unlock() 396 } 397 } 398 } 399 } 400 401 // Stop terminates the transaction pool. 402 func (pool *TxPool) Stop() { 403 // Unsubscribe all subscriptions registered from txpool 404 pool.scope.Close() 405 406 // Unsubscribe subscriptions registered from blockchain 407 pool.chainHeadSub.Unsubscribe() 408 pool.wg.Wait() 409 410 if pool.journal != nil { 411 pool.journal.close() 412 } 413 log.Info("Transaction pool stopped") 414 } 415 416 // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and 417 // starts sending event to the given channel. 418 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { 419 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 420 } 421 422 // GasPrice returns the current gas price enforced by the transaction pool. 423 func (pool *TxPool) GasPrice() *big.Int { 424 pool.mu.RLock() 425 defer pool.mu.RUnlock() 426 427 return new(big.Int).Set(pool.gasPrice) 428 } 429 430 // SetGasPrice updates the minimum price required by the transaction pool for a 431 // new transaction, and drops all transactions below this threshold. 432 func (pool *TxPool) SetGasPrice(price *big.Int) { 433 pool.mu.Lock() 434 defer pool.mu.Unlock() 435 436 old := pool.gasPrice 437 pool.gasPrice = price 438 // if the min miner fee increased, remove transactions below the new threshold 439 if price.Cmp(old) > 0 { 440 // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead 441 drop := pool.all.RemotesBelowTip(price) 442 for _, tx := range drop { 443 pool.removeTx(tx.Hash(), false) 444 } 445 pool.priced.Removed(len(drop)) 446 } 447 448 log.Info("Transaction pool price threshold updated", "price", price) 449 } 450 451 // Nonce returns the next nonce of an account, with all transactions executable 452 // by the pool already applied on top. 453 func (pool *TxPool) Nonce(addr common.Address) uint64 { 454 pool.mu.RLock() 455 defer pool.mu.RUnlock() 456 457 return pool.pendingNonces.get(addr) 458 } 459 460 // Stats retrieves the current pool stats, namely the number of pending and the 461 // number of queued (non-executable) transactions. 462 func (pool *TxPool) Stats() (int, int) { 463 pool.mu.RLock() 464 defer pool.mu.RUnlock() 465 466 return pool.stats() 467 } 468 469 // stats retrieves the current pool stats, namely the number of pending and the 470 // number of queued (non-executable) transactions. 471 func (pool *TxPool) stats() (int, int) { 472 pending := 0 473 for _, list := range pool.pending { 474 pending += list.Len() 475 } 476 queued := 0 477 for _, list := range pool.queue { 478 queued += list.Len() 479 } 480 return pending, queued 481 } 482 483 // Content retrieves the data content of the transaction pool, returning all the 484 // pending as well as queued transactions, grouped by account and sorted by nonce. 485 func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 486 pool.mu.Lock() 487 defer pool.mu.Unlock() 488 489 pending := make(map[common.Address]types.Transactions) 490 for addr, list := range pool.pending { 491 pending[addr] = list.Flatten() 492 } 493 queued := make(map[common.Address]types.Transactions) 494 for addr, list := range pool.queue { 495 queued[addr] = list.Flatten() 496 } 497 return pending, queued 498 } 499 500 // ContentFrom retrieves the data content of the transaction pool, returning the 501 // pending as well as queued transactions of this address, grouped by nonce. 502 func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { 503 pool.mu.RLock() 504 defer pool.mu.RUnlock() 505 506 var pending types.Transactions 507 if list, ok := pool.pending[addr]; ok { 508 pending = list.Flatten() 509 } 510 var queued types.Transactions 511 if list, ok := pool.queue[addr]; ok { 512 queued = list.Flatten() 513 } 514 return pending, queued 515 } 516 517 // Pending retrieves all currently processable transactions, grouped by origin 518 // account and sorted by nonce. The returned transaction set is a copy and can be 519 // freely modified by calling code. 520 // 521 // The enforceTips parameter can be used to do an extra filtering on the pending 522 // transactions and only return those whose **effective** tip is large enough in 523 // the next pending execution environment. 524 func (pool *TxPool) Pending(enforceTips bool) (map[common.Address]types.Transactions, error) { 525 pool.mu.Lock() 526 defer pool.mu.Unlock() 527 528 pending := make(map[common.Address]types.Transactions) 529 for addr, list := range pool.pending { 530 txs := list.Flatten() 531 532 // If the miner requests tip enforcement, cap the lists now 533 if enforceTips && !pool.locals.contains(addr) { 534 for i, tx := range txs { 535 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 536 txs = txs[:i] 537 break 538 } 539 } 540 } 541 if len(txs) > 0 { 542 pending[addr] = txs 543 } 544 } 545 return pending, nil 546 } 547 548 // Locals retrieves the accounts currently considered local by the pool. 549 func (pool *TxPool) Locals() []common.Address { 550 pool.mu.Lock() 551 defer pool.mu.Unlock() 552 553 return pool.locals.flatten() 554 } 555 556 // local retrieves all currently known local transactions, grouped by origin 557 // account and sorted by nonce. The returned transaction set is a copy and can be 558 // freely modified by calling code. 559 func (pool *TxPool) local() map[common.Address]types.Transactions { 560 txs := make(map[common.Address]types.Transactions) 561 for addr := range pool.locals.accounts { 562 if pending := pool.pending[addr]; pending != nil { 563 txs[addr] = append(txs[addr], pending.Flatten()...) 564 } 565 if queued := pool.queue[addr]; queued != nil { 566 txs[addr] = append(txs[addr], queued.Flatten()...) 567 } 568 } 569 return txs 570 } 571 572 // validateTx checks whether a transaction is valid according to the consensus 573 // rules and adheres to some heuristic limits of the local node (price and size). 574 func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { 575 // Accept only legacy transactions until EIP-2718/2930 activates. 576 if !pool.eip2718 && tx.Type() != types.LegacyTxType { 577 return ErrTxTypeNotSupported 578 } 579 // Reject dynamic fee transactions until EIP-1559 activates. 580 if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { 581 return ErrTxTypeNotSupported 582 } 583 // Reject transactions over defined size to prevent DOS attacks 584 if uint64(tx.Size()) > txMaxSize { 585 return ErrOversizedData 586 } 587 // Transactions can't be negative. This may never happen using RLP decoded 588 // transactions but may occur if you create a transaction using the RPC. 589 if tx.Value().Sign() < 0 { 590 return ErrNegativeValue 591 } 592 // Ensure the transaction doesn't exceed the current block limit gas. 593 if pool.currentMaxGas < tx.Gas() { 594 return ErrGasLimit 595 } 596 // Sanity check for extremely large numbers 597 if tx.GasFeeCap().BitLen() > 256 { 598 return ErrFeeCapVeryHigh 599 } 600 if tx.GasTipCap().BitLen() > 256 { 601 return ErrTipVeryHigh 602 } 603 // Ensure gasFeeCap is greater than or equal to gasTipCap. 604 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 605 return ErrTipAboveFeeCap 606 } 607 // Make sure the transaction is signed properly. 608 from, err := types.Sender(pool.signer, tx) 609 if err != nil { 610 return ErrInvalidSender 611 } 612 // Drop non-local transactions under our own minimal accepted gas price or tip 613 if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { 614 return ErrUnderpriced 615 } 616 // Ensure the transaction adheres to nonce ordering 617 if pool.currentState.GetNonce(from) > tx.Nonce() { 618 return ErrNonceTooLow 619 } 620 // Transactor should have enough funds to cover the costs 621 // cost == V + GP * GL 622 if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 623 return ErrInsufficientFunds 624 } 625 // Ensure the transaction has more gas than the basic tx fee. 626 intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) 627 if err != nil { 628 return err 629 } 630 if tx.Gas() < intrGas { 631 return ErrIntrinsicGas 632 } 633 634 //ADKGO ensure that the minimum GAS PRICE for contract creation is sufficient (adkphoenix) 635 // 636 // Note: 2021/11 Not needed any more as we use GAS recycle instead 637 // 638 // if (tx.To() == nil) { // contract creation 639 // minGasPrice := new(big.Int).SetUint64(params.TxContractCreationMinGasPrice) 640 // if ( tx.GasPrice().Cmp(minGasPrice) < 0 && bytes.Compare(from.Bytes(),common.HexToAddress(params.TxContractCreationGenesisAccount).Bytes()) != 0){ 641 // return ErrInsufficientGasPriceForContractCreation 642 // } 643 // } 644 645 //ADKGO only allow MESH transactions executed via PoW API to be "free" 646 647 // TxADKMeshContract 648 649 minGasPrice := new(big.Int).SetUint64(params.TxADKMinGasPrice) 650 651 // genesis account allowed to use 0 price 652 if (bytes.Compare(from.Bytes(),common.HexToAddress(params.TxContractCreationGenesisAccount).Bytes()) == 0){ 653 return nil // OK 654 } 655 656 // Transactions allowed to use 0 price GAS if target is the ADK Migration Contract 657 if (tx.To() != nil) && (tx.Data() != nil) { 658 // "7fa5e242": "PostTransactions(string)", 659 postTransactionsFunctionHash := []byte("\x7f\xa5\xe2\x42") 660 if ( ( bytes.Compare(tx.To().Bytes(),common.HexToAddress(params.TxAGSClaimContract).Bytes()) == 0 ) && 661 bytes.Compare(from.Bytes(),common.HexToAddress(params.TxAPIAccount).Bytes()) == 0 && 662 len(tx.Data()) > 4 && 663 bytes.Compare(postTransactionsFunctionHash,tx.Data()[0:4]) == 0 ){ 664 // OK, someone called the PostTransaction function using the generic APIAccount 665 // now check if PoW is done. 666 data := tx.Data()[68:] // note: function only has one string param, and string is all non-0 bytes (so we ignore trailing 0s) 667 dataLen := 0 668 validchars := "ABCDEFGHIJKLMNOPQRSTUVWXYZ9" 669 allNines := true 670 for idx := 0; idx < len(data); idx++{ 671 if data[idx] != 0 && strings.Contains(validchars,string(data[idx:idx+1])) { 672 dataLen++ 673 if (string(data[idx:idx+1])!="9") { 674 allNines = false 675 } 676 } else { 677 break 678 } 679 } 680 if allNines || dataLen == 0 || dataLen % 2673 != 0 { 681 return ErrTransactionInvalid 682 } 683 data = data[0:dataLen] 684 // ok by now we know we have a valid tryte. so now letch check for each if the PoW has been Done 685 for tidx := 0; tidx < len(data); tidx += 2673 { 686 transactionData := data[tidx:tidx+2673] 687 transactionDataTrytes,err:=gadk.ToTrytes(string(transactionData)) 688 if err != nil || !strings.HasSuffix(string(transactionDataTrytes.Hash()), "99999"){ // 15 trits difficulty 689 return ErrInsufficientPoW 690 } 691 } 692 // and finally check that we ahvent seen this exact transaction recently, again to prevent spam 693 // otherwise raise ErrTransactionRecent 694 695 // first clear expired ones 696 currTime := time.Now().Unix() // time in seconds since ep 697 for dt , unixtime := range seen_trytes { 698 if (unixtime < currTime - 60 ){ // allow same transaction again after 1 min 699 delete(seen_trytes, dt) 700 } 701 } 702 currTime = time.Now().Unix() // time in seconds since ep 703 if (seen_trytes[string(data)]==0){ 704 seen_trytes[string(data)] = currTime 705 return nil // all transactions OK 706 } else { 707 return ErrTransactionRecent 708 } 709 } 710 } 711 712 // Transactions ALSO allowed to use 0 price GAS if PoW has been done 713 zro := big.NewInt(0) 714 if ( tx.GasPrice().Cmp(zro) == 0 ){ // trying to send with 0 GAS, so check if PoW is done 715 //tx.Hash().Bytes() 716 PoWHash := Sha256Bytes2Bytes(tx.Hash().Bytes()) 717 // 718 if (PoWHash[0]==0 && 719 PoWHash[1]==0 && 720 PoWHash[2] < 64 ){ 721 return nil //OK 722 } else { 723 return ErrInsufficientPoW // NOT enough PoW 724 } 725 } 726 727 // otherwise minimum gas price is enforced 728 if ( tx.GasPrice().Cmp(minGasPrice) < 0 ) { 729 return ErrInsufficientGasPrice 730 } 731 732 return nil 733 } 734 735 func Sha256Bytes2Bytes(data []byte)([]byte){ 736 h := sha1.New() 737 h.Write(data) 738 return h.Sum(nil) 739 } 740 741 var seen_trytes map[string]int64 = make(map[string]int64) 742 743 // add validates a transaction and inserts it into the non-executable queue for later 744 // pending promotion and execution. If the transaction is a replacement for an already 745 // pending or queued one, it overwrites the previous transaction if its price is higher. 746 // 747 // If a newly added transaction is marked as local, its sending account will be 748 // be added to the allowlist, preventing any associated transaction from being dropped 749 // out of the pool due to pricing constraints. 750 func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { 751 // If the transaction is already known, discard it 752 hash := tx.Hash() 753 if pool.all.Get(hash) != nil { 754 log.Trace("Discarding already known transaction", "hash", hash) 755 knownTxMeter.Mark(1) 756 return false, ErrAlreadyKnown 757 } 758 // Make the local flag. If it's from local source or it's from the network but 759 // the sender is marked as local previously, treat it as the local transaction. 760 isLocal := local || pool.locals.containsTx(tx) 761 762 // If the transaction fails basic validation, discard it 763 if err := pool.validateTx(tx, isLocal); err != nil { 764 log.Trace("Discarding invalid transaction", "hash", hash, "err", err) 765 invalidTxMeter.Mark(1) 766 return false, err 767 } 768 // If the transaction pool is full, discard underpriced transactions 769 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 770 // If the new transaction is underpriced, don't accept it 771 if !isLocal && pool.priced.Underpriced(tx) { 772 log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 773 underpricedTxMeter.Mark(1) 774 return false, ErrUnderpriced 775 } 776 // New transaction is better than our worse ones, make room for it. 777 // If it's a local transaction, forcibly discard all available transactions. 778 // Otherwise if we can't make enough room for new one, abort the operation. 779 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 780 781 // Special case, we still can't make the room for the new remote one. 782 if !isLocal && !success { 783 log.Trace("Discarding overflown transaction", "hash", hash) 784 overflowedTxMeter.Mark(1) 785 return false, ErrTxPoolOverflow 786 } 787 // Kick out the underpriced remote transactions. 788 for _, tx := range drop { 789 log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) 790 underpricedTxMeter.Mark(1) 791 pool.removeTx(tx.Hash(), false) 792 } 793 } 794 // Try to replace an existing transaction in the pending pool 795 from, _ := types.Sender(pool.signer, tx) // already validated 796 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 797 // Nonce already pending, check if required price bump is met 798 inserted, old := list.Add(tx, pool.config.PriceBump) 799 if !inserted { 800 pendingDiscardMeter.Mark(1) 801 return false, ErrReplaceUnderpriced 802 } 803 // New transaction is better, replace old one 804 if old != nil { 805 pool.all.Remove(old.Hash()) 806 pool.priced.Removed(1) 807 pendingReplaceMeter.Mark(1) 808 } 809 pool.all.Add(tx, isLocal) 810 pool.priced.Put(tx, isLocal) 811 pool.journalTx(from, tx) 812 pool.queueTxEvent(tx) 813 log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) 814 815 // Successful promotion, bump the heartbeat 816 pool.beats[from] = time.Now() 817 return old != nil, nil 818 } 819 // New transaction isn't replacing a pending one, push into queue 820 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 821 if err != nil { 822 return false, err 823 } 824 // Mark local addresses and journal local transactions 825 if local && !pool.locals.contains(from) { 826 log.Info("Setting new local account", "address", from) 827 pool.locals.add(from) 828 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 829 } 830 if isLocal { 831 localGauge.Inc(1) 832 } 833 pool.journalTx(from, tx) 834 835 log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) 836 return replaced, nil 837 } 838 839 // enqueueTx inserts a new transaction into the non-executable transaction queue. 840 // 841 // Note, this method assumes the pool lock is held! 842 func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { 843 // Try to insert the transaction into the future queue 844 from, _ := types.Sender(pool.signer, tx) // already validated 845 if pool.queue[from] == nil { 846 pool.queue[from] = newTxList(false) 847 } 848 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 849 if !inserted { 850 // An older transaction was better, discard this 851 queuedDiscardMeter.Mark(1) 852 return false, ErrReplaceUnderpriced 853 } 854 // Discard any previous transaction and mark this 855 if old != nil { 856 pool.all.Remove(old.Hash()) 857 pool.priced.Removed(1) 858 queuedReplaceMeter.Mark(1) 859 } else { 860 // Nothing was replaced, bump the queued counter 861 queuedGauge.Inc(1) 862 } 863 // If the transaction isn't in lookup set but it's expected to be there, 864 // show the error log. 865 if pool.all.Get(hash) == nil && !addAll { 866 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 867 } 868 if addAll { 869 pool.all.Add(tx, local) 870 pool.priced.Put(tx, local) 871 } 872 // If we never record the heartbeat, do it right now. 873 if _, exist := pool.beats[from]; !exist { 874 pool.beats[from] = time.Now() 875 } 876 return old != nil, nil 877 } 878 879 // journalTx adds the specified transaction to the local disk journal if it is 880 // deemed to have been sent from a local account. 881 func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { 882 // Only journal if it's enabled and the transaction is local 883 if pool.journal == nil || !pool.locals.contains(from) { 884 return 885 } 886 if err := pool.journal.insert(tx); err != nil { 887 log.Warn("Failed to journal local transaction", "err", err) 888 } 889 } 890 891 // promoteTx adds a transaction to the pending (processable) list of transactions 892 // and returns whether it was inserted or an older was better. 893 // 894 // Note, this method assumes the pool lock is held! 895 func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { 896 // Try to insert the transaction into the pending queue 897 if pool.pending[addr] == nil { 898 pool.pending[addr] = newTxList(true) 899 } 900 list := pool.pending[addr] 901 902 inserted, old := list.Add(tx, pool.config.PriceBump) 903 if !inserted { 904 // An older transaction was better, discard this 905 pool.all.Remove(hash) 906 pool.priced.Removed(1) 907 pendingDiscardMeter.Mark(1) 908 return false 909 } 910 // Otherwise discard any previous transaction and mark this 911 if old != nil { 912 pool.all.Remove(old.Hash()) 913 pool.priced.Removed(1) 914 pendingReplaceMeter.Mark(1) 915 } else { 916 // Nothing was replaced, bump the pending counter 917 pendingGauge.Inc(1) 918 } 919 // Set the potentially new pending nonce and notify any subsystems of the new tx 920 pool.pendingNonces.set(addr, tx.Nonce()+1) 921 922 // Successful promotion, bump the heartbeat 923 pool.beats[addr] = time.Now() 924 return true 925 } 926 927 // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the 928 // senders as a local ones, ensuring they go around the local pricing constraints. 929 // 930 // This method is used to add transactions from the RPC API and performs synchronous pool 931 // reorganization and event propagation. 932 func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { 933 return pool.addTxs(txs, !pool.config.NoLocals, true) 934 } 935 936 // AddLocal enqueues a single local transaction into the pool if it is valid. This is 937 // a convenience wrapper aroundd AddLocals. 938 func (pool *TxPool) AddLocal(tx *types.Transaction) error { 939 errs := pool.AddLocals([]*types.Transaction{tx}) 940 return errs[0] 941 } 942 943 // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the 944 // senders are not among the locally tracked ones, full pricing constraints will apply. 945 // 946 // This method is used to add transactions from the p2p network and does not wait for pool 947 // reorganization and internal event propagation. 948 func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { 949 return pool.addTxs(txs, false, false) 950 } 951 952 // This is like AddRemotes, but waits for pool reorganization. Tests use this method. 953 func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { 954 return pool.addTxs(txs, false, true) 955 } 956 957 // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. 958 func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { 959 errs := pool.AddRemotesSync([]*types.Transaction{tx}) 960 return errs[0] 961 } 962 963 // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience 964 // wrapper around AddRemotes. 965 // 966 // Deprecated: use AddRemotes 967 func (pool *TxPool) AddRemote(tx *types.Transaction) error { 968 errs := pool.AddRemotes([]*types.Transaction{tx}) 969 return errs[0] 970 } 971 972 // addTxs attempts to queue a batch of transactions if they are valid. 973 func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { 974 // Filter out known ones without obtaining the pool lock or recovering signatures 975 var ( 976 errs = make([]error, len(txs)) 977 news = make([]*types.Transaction, 0, len(txs)) 978 ) 979 for i, tx := range txs { 980 // If the transaction is known, pre-set the error slot 981 if pool.all.Get(tx.Hash()) != nil { 982 errs[i] = ErrAlreadyKnown 983 knownTxMeter.Mark(1) 984 continue 985 } 986 // Exclude transactions with invalid signatures as soon as 987 // possible and cache senders in transactions before 988 // obtaining lock 989 _, err := types.Sender(pool.signer, tx) 990 if err != nil { 991 errs[i] = ErrInvalidSender 992 invalidTxMeter.Mark(1) 993 continue 994 } 995 // Accumulate all unknown transactions for deeper processing 996 news = append(news, tx) 997 } 998 if len(news) == 0 { 999 return errs 1000 } 1001 1002 // Process all the new transaction and merge any errors into the original slice 1003 pool.mu.Lock() 1004 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 1005 pool.mu.Unlock() 1006 1007 var nilSlot = 0 1008 for _, err := range newErrs { 1009 for errs[nilSlot] != nil { 1010 nilSlot++ 1011 } 1012 errs[nilSlot] = err 1013 nilSlot++ 1014 } 1015 // Reorg the pool internals if needed and return 1016 done := pool.requestPromoteExecutables(dirtyAddrs) 1017 if sync { 1018 <-done 1019 } 1020 return errs 1021 } 1022 1023 // addTxsLocked attempts to queue a batch of transactions if they are valid. 1024 // The transaction pool lock must be held. 1025 func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { 1026 dirty := newAccountSet(pool.signer) 1027 errs := make([]error, len(txs)) 1028 for i, tx := range txs { 1029 replaced, err := pool.add(tx, local) 1030 errs[i] = err 1031 if err == nil && !replaced { 1032 dirty.addTx(tx) 1033 } 1034 } 1035 validTxMeter.Mark(int64(len(dirty.accounts))) 1036 return errs, dirty 1037 } 1038 1039 // Status returns the status (unknown/pending/queued) of a batch of transactions 1040 // identified by their hashes. 1041 func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { 1042 status := make([]TxStatus, len(hashes)) 1043 for i, hash := range hashes { 1044 tx := pool.Get(hash) 1045 if tx == nil { 1046 continue 1047 } 1048 from, _ := types.Sender(pool.signer, tx) // already validated 1049 pool.mu.RLock() 1050 if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1051 status[i] = TxStatusPending 1052 } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { 1053 status[i] = TxStatusQueued 1054 } 1055 // implicit else: the tx may have been included into a block between 1056 // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct 1057 pool.mu.RUnlock() 1058 } 1059 return status 1060 } 1061 1062 // Get returns a transaction if it is contained in the pool and nil otherwise. 1063 func (pool *TxPool) Get(hash common.Hash) *types.Transaction { 1064 return pool.all.Get(hash) 1065 } 1066 1067 // Has returns an indicator whether txpool has a transaction cached with the 1068 // given hash. 1069 func (pool *TxPool) Has(hash common.Hash) bool { 1070 return pool.all.Get(hash) != nil 1071 } 1072 1073 // removeTx removes a single transaction from the queue, moving all subsequent 1074 // transactions back to the future queue. 1075 func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { 1076 // Fetch the transaction we wish to delete 1077 tx := pool.all.Get(hash) 1078 if tx == nil { 1079 return 1080 } 1081 addr, _ := types.Sender(pool.signer, tx) // already validated during insertion 1082 1083 // Remove it from the list of known transactions 1084 pool.all.Remove(hash) 1085 if outofbound { 1086 pool.priced.Removed(1) 1087 } 1088 if pool.locals.contains(addr) { 1089 localGauge.Dec(1) 1090 } 1091 // Remove the transaction from the pending lists and reset the account nonce 1092 if pending := pool.pending[addr]; pending != nil { 1093 if removed, invalids := pending.Remove(tx); removed { 1094 // If no more pending transactions are left, remove the list 1095 if pending.Empty() { 1096 delete(pool.pending, addr) 1097 } 1098 // Postpone any invalidated transactions 1099 for _, tx := range invalids { 1100 // Internal shuffle shouldn't touch the lookup set. 1101 pool.enqueueTx(tx.Hash(), tx, false, false) 1102 } 1103 // Update the account nonce if needed 1104 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1105 // Reduce the pending counter 1106 pendingGauge.Dec(int64(1 + len(invalids))) 1107 return 1108 } 1109 } 1110 // Transaction is in the future queue 1111 if future := pool.queue[addr]; future != nil { 1112 if removed, _ := future.Remove(tx); removed { 1113 // Reduce the queued counter 1114 queuedGauge.Dec(1) 1115 } 1116 if future.Empty() { 1117 delete(pool.queue, addr) 1118 delete(pool.beats, addr) 1119 } 1120 } 1121 } 1122 1123 // requestReset requests a pool reset to the new head block. 1124 // The returned channel is closed when the reset has occurred. 1125 func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { 1126 select { 1127 case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: 1128 return <-pool.reorgDoneCh 1129 case <-pool.reorgShutdownCh: 1130 return pool.reorgShutdownCh 1131 } 1132 } 1133 1134 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 1135 // The returned channel is closed when the promotion checks have occurred. 1136 func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { 1137 select { 1138 case pool.reqPromoteCh <- set: 1139 return <-pool.reorgDoneCh 1140 case <-pool.reorgShutdownCh: 1141 return pool.reorgShutdownCh 1142 } 1143 } 1144 1145 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 1146 func (pool *TxPool) queueTxEvent(tx *types.Transaction) { 1147 select { 1148 case pool.queueTxEventCh <- tx: 1149 case <-pool.reorgShutdownCh: 1150 } 1151 } 1152 1153 // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not 1154 // call those methods directly, but request them being run using requestReset and 1155 // requestPromoteExecutables instead. 1156 func (pool *TxPool) scheduleReorgLoop() { 1157 defer pool.wg.Done() 1158 1159 var ( 1160 curDone chan struct{} // non-nil while runReorg is active 1161 nextDone = make(chan struct{}) 1162 launchNextRun bool 1163 reset *txpoolResetRequest 1164 dirtyAccounts *accountSet 1165 queuedEvents = make(map[common.Address]*txSortedMap) 1166 ) 1167 for { 1168 // Launch next background reorg if needed 1169 if curDone == nil && launchNextRun { 1170 // Run the background reorg and announcements 1171 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1172 1173 // Prepare everything for the next round of reorg 1174 curDone, nextDone = nextDone, make(chan struct{}) 1175 launchNextRun = false 1176 1177 reset, dirtyAccounts = nil, nil 1178 queuedEvents = make(map[common.Address]*txSortedMap) 1179 } 1180 1181 select { 1182 case req := <-pool.reqResetCh: 1183 // Reset request: update head if request is already pending. 1184 if reset == nil { 1185 reset = req 1186 } else { 1187 reset.newHead = req.newHead 1188 } 1189 launchNextRun = true 1190 pool.reorgDoneCh <- nextDone 1191 1192 case req := <-pool.reqPromoteCh: 1193 // Promote request: update address set if request is already pending. 1194 if dirtyAccounts == nil { 1195 dirtyAccounts = req 1196 } else { 1197 dirtyAccounts.merge(req) 1198 } 1199 launchNextRun = true 1200 pool.reorgDoneCh <- nextDone 1201 1202 case tx := <-pool.queueTxEventCh: 1203 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1204 // request one later if they want the events sent. 1205 addr, _ := types.Sender(pool.signer, tx) 1206 if _, ok := queuedEvents[addr]; !ok { 1207 queuedEvents[addr] = newTxSortedMap() 1208 } 1209 queuedEvents[addr].Put(tx) 1210 1211 case <-curDone: 1212 curDone = nil 1213 1214 case <-pool.reorgShutdownCh: 1215 // Wait for current run to finish. 1216 if curDone != nil { 1217 <-curDone 1218 } 1219 close(nextDone) 1220 return 1221 } 1222 } 1223 } 1224 1225 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1226 func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { 1227 defer close(done) 1228 1229 var promoteAddrs []common.Address 1230 if dirtyAccounts != nil && reset == nil { 1231 // Only dirty accounts need to be promoted, unless we're resetting. 1232 // For resets, all addresses in the tx queue will be promoted and 1233 // the flatten operation can be avoided. 1234 promoteAddrs = dirtyAccounts.flatten() 1235 } 1236 pool.mu.Lock() 1237 if reset != nil { 1238 // Reset from the old head to the new, rescheduling any reorged transactions 1239 pool.reset(reset.oldHead, reset.newHead) 1240 1241 // Nonces were reset, discard any events that became stale 1242 for addr := range events { 1243 events[addr].Forward(pool.pendingNonces.get(addr)) 1244 if events[addr].Len() == 0 { 1245 delete(events, addr) 1246 } 1247 } 1248 // Reset needs promote for all addresses 1249 promoteAddrs = make([]common.Address, 0, len(pool.queue)) 1250 for addr := range pool.queue { 1251 promoteAddrs = append(promoteAddrs, addr) 1252 } 1253 } 1254 // Check for pending transactions for every account that sent new ones 1255 promoted := pool.promoteExecutables(promoteAddrs) 1256 1257 // If a new block appeared, validate the pool of pending transactions. This will 1258 // remove any transaction that has been included in the block or was invalidated 1259 // because of another transaction (e.g. higher gas price). 1260 if reset != nil { 1261 pool.demoteUnexecutables() 1262 if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { 1263 pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) 1264 pool.priced.SetBaseFee(pendingBaseFee) 1265 } 1266 } 1267 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1268 pool.truncatePending() 1269 pool.truncateQueue() 1270 1271 // Update all accounts to the latest known pending nonce 1272 for addr, list := range pool.pending { 1273 highestPending := list.LastElement() 1274 pool.pendingNonces.set(addr, highestPending.Nonce()+1) 1275 } 1276 pool.mu.Unlock() 1277 1278 // Notify subsystems for newly added transactions 1279 for _, tx := range promoted { 1280 addr, _ := types.Sender(pool.signer, tx) 1281 if _, ok := events[addr]; !ok { 1282 events[addr] = newTxSortedMap() 1283 } 1284 events[addr].Put(tx) 1285 } 1286 if len(events) > 0 { 1287 var txs []*types.Transaction 1288 for _, set := range events { 1289 txs = append(txs, set.Flatten()...) 1290 } 1291 pool.txFeed.Send(NewTxsEvent{txs}) 1292 } 1293 } 1294 1295 // reset retrieves the current state of the blockchain and ensures the content 1296 // of the transaction pool is valid with regard to the chain state. 1297 func (pool *TxPool) reset(oldHead, newHead *types.Header) { 1298 // If we're reorging an old state, reinject all dropped transactions 1299 var reinject types.Transactions 1300 1301 if oldHead != nil && oldHead.Hash() != newHead.ParentHash { 1302 // If the reorg is too deep, avoid doing it (will happen during fast sync) 1303 oldNum := oldHead.Number.Uint64() 1304 newNum := newHead.Number.Uint64() 1305 1306 if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { 1307 log.Debug("Skipping deep transaction reorg", "depth", depth) 1308 } else { 1309 // Reorg seems shallow enough to pull in all transactions into memory 1310 var discarded, included types.Transactions 1311 var ( 1312 rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) 1313 add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) 1314 ) 1315 if rem == nil { 1316 // This can happen if a setHead is performed, where we simply discard the old 1317 // head from the chain. 1318 // If that is the case, we don't have the lost transactions any more, and 1319 // there's nothing to add 1320 if newNum >= oldNum { 1321 // If we reorged to a same or higher number, then it's not a case of setHead 1322 log.Warn("Transaction pool reset with missing oldhead", 1323 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1324 return 1325 } 1326 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 1327 log.Debug("Skipping transaction reset caused by setHead", 1328 "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) 1329 // We still need to update the current state s.th. the lost transactions can be readded by the user 1330 } else { 1331 for rem.NumberU64() > add.NumberU64() { 1332 discarded = append(discarded, rem.Transactions()...) 1333 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1334 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1335 return 1336 } 1337 } 1338 for add.NumberU64() > rem.NumberU64() { 1339 included = append(included, add.Transactions()...) 1340 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1341 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1342 return 1343 } 1344 } 1345 for rem.Hash() != add.Hash() { 1346 discarded = append(discarded, rem.Transactions()...) 1347 if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { 1348 log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) 1349 return 1350 } 1351 included = append(included, add.Transactions()...) 1352 if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { 1353 log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) 1354 return 1355 } 1356 } 1357 reinject = types.TxDifference(discarded, included) 1358 } 1359 } 1360 } 1361 // Initialize the internal state to the current head 1362 if newHead == nil { 1363 newHead = pool.chain.CurrentBlock().Header() // Special case during testing 1364 } 1365 statedb, err := pool.chain.StateAt(newHead.Root) 1366 if err != nil { 1367 log.Error("Failed to reset txpool state", "err", err) 1368 return 1369 } 1370 pool.currentState = statedb 1371 pool.pendingNonces = newTxNoncer(statedb) 1372 pool.currentMaxGas = newHead.GasLimit 1373 1374 // Inject any transactions discarded due to reorgs 1375 log.Debug("Reinjecting stale transactions", "count", len(reinject)) 1376 senderCacher.recover(pool.signer, reinject) 1377 pool.addTxsLocked(reinject, false) 1378 1379 // Update all fork indicator by next pending block number. 1380 next := new(big.Int).Add(newHead.Number, big.NewInt(1)) 1381 pool.istanbul = pool.chainconfig.IsIstanbul(next) 1382 pool.eip2718 = pool.chainconfig.IsBerlin(next) 1383 pool.eip1559 = pool.chainconfig.IsLondon(next) 1384 } 1385 1386 // promoteExecutables moves transactions that have become processable from the 1387 // future queue to the set of pending transactions. During this process, all 1388 // invalidated transactions (low nonce, low balance) are deleted. 1389 func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { 1390 // Track the promoted transactions to broadcast them at once 1391 var promoted []*types.Transaction 1392 1393 // Iterate over all accounts and promote any executable transactions 1394 for _, addr := range accounts { 1395 list := pool.queue[addr] 1396 if list == nil { 1397 continue // Just in case someone calls with a non existing account 1398 } 1399 // Drop all transactions that are deemed too old (low nonce) 1400 forwards := list.Forward(pool.currentState.GetNonce(addr)) 1401 for _, tx := range forwards { 1402 hash := tx.Hash() 1403 pool.all.Remove(hash) 1404 } 1405 log.Trace("Removed old queued transactions", "count", len(forwards)) 1406 // Drop all transactions that are too costly (low balance or out of gas) 1407 drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1408 for _, tx := range drops { 1409 hash := tx.Hash() 1410 pool.all.Remove(hash) 1411 } 1412 log.Trace("Removed unpayable queued transactions", "count", len(drops)) 1413 queuedNofundsMeter.Mark(int64(len(drops))) 1414 1415 // Gather all executable transactions and promote them 1416 readies := list.Ready(pool.pendingNonces.get(addr)) 1417 for _, tx := range readies { 1418 hash := tx.Hash() 1419 if pool.promoteTx(addr, hash, tx) { 1420 promoted = append(promoted, tx) 1421 } 1422 } 1423 log.Trace("Promoted queued transactions", "count", len(promoted)) 1424 queuedGauge.Dec(int64(len(readies))) 1425 1426 // Drop all transactions over the allowed limit 1427 var caps types.Transactions 1428 if !pool.locals.contains(addr) { 1429 caps = list.Cap(int(pool.config.AccountQueue)) 1430 for _, tx := range caps { 1431 hash := tx.Hash() 1432 pool.all.Remove(hash) 1433 log.Trace("Removed cap-exceeding queued transaction", "hash", hash) 1434 } 1435 queuedRateLimitMeter.Mark(int64(len(caps))) 1436 } 1437 // Mark all the items dropped as removed 1438 pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 1439 queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1440 if pool.locals.contains(addr) { 1441 localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) 1442 } 1443 // Delete the entire queue entry if it became empty. 1444 if list.Empty() { 1445 delete(pool.queue, addr) 1446 delete(pool.beats, addr) 1447 } 1448 } 1449 return promoted 1450 } 1451 1452 // truncatePending removes transactions from the pending queue if the pool is above the 1453 // pending limit. The algorithm tries to reduce transaction counts by an approximately 1454 // equal number for all for accounts with many pending transactions. 1455 func (pool *TxPool) truncatePending() { 1456 pending := uint64(0) 1457 for _, list := range pool.pending { 1458 pending += uint64(list.Len()) 1459 } 1460 if pending <= pool.config.GlobalSlots { 1461 return 1462 } 1463 1464 pendingBeforeCap := pending 1465 // Assemble a spam order to penalize large transactors first 1466 spammers := prque.New(nil) 1467 for addr, list := range pool.pending { 1468 // Only evict transactions from high rollers 1469 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 1470 spammers.Push(addr, int64(list.Len())) 1471 } 1472 } 1473 // Gradually drop transactions from offenders 1474 offenders := []common.Address{} 1475 for pending > pool.config.GlobalSlots && !spammers.Empty() { 1476 // Retrieve the next offender if not local address 1477 offender, _ := spammers.Pop() 1478 offenders = append(offenders, offender.(common.Address)) 1479 1480 // Equalize balances until all the same or below threshold 1481 if len(offenders) > 1 { 1482 // Calculate the equalization threshold for all current offenders 1483 threshold := pool.pending[offender.(common.Address)].Len() 1484 1485 // Iteratively reduce all offenders until below limit or threshold reached 1486 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 1487 for i := 0; i < len(offenders)-1; i++ { 1488 list := pool.pending[offenders[i]] 1489 1490 caps := list.Cap(list.Len() - 1) 1491 for _, tx := range caps { 1492 // Drop the transaction from the global pools too 1493 hash := tx.Hash() 1494 pool.all.Remove(hash) 1495 1496 // Update the account nonce to the dropped transaction 1497 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 1498 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1499 } 1500 pool.priced.Removed(len(caps)) 1501 pendingGauge.Dec(int64(len(caps))) 1502 if pool.locals.contains(offenders[i]) { 1503 localGauge.Dec(int64(len(caps))) 1504 } 1505 pending-- 1506 } 1507 } 1508 } 1509 } 1510 1511 // If still above threshold, reduce to limit or min allowance 1512 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 1513 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 1514 for _, addr := range offenders { 1515 list := pool.pending[addr] 1516 1517 caps := list.Cap(list.Len() - 1) 1518 for _, tx := range caps { 1519 // Drop the transaction from the global pools too 1520 hash := tx.Hash() 1521 pool.all.Remove(hash) 1522 1523 // Update the account nonce to the dropped transaction 1524 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 1525 log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) 1526 } 1527 pool.priced.Removed(len(caps)) 1528 pendingGauge.Dec(int64(len(caps))) 1529 if pool.locals.contains(addr) { 1530 localGauge.Dec(int64(len(caps))) 1531 } 1532 pending-- 1533 } 1534 } 1535 } 1536 pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) 1537 } 1538 1539 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 1540 func (pool *TxPool) truncateQueue() { 1541 queued := uint64(0) 1542 for _, list := range pool.queue { 1543 queued += uint64(list.Len()) 1544 } 1545 if queued <= pool.config.GlobalQueue { 1546 return 1547 } 1548 1549 // Sort all accounts with queued transactions by heartbeat 1550 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 1551 for addr := range pool.queue { 1552 if !pool.locals.contains(addr) { // don't drop locals 1553 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 1554 } 1555 } 1556 sort.Sort(addresses) 1557 1558 // Drop transactions until the total is below the limit or only locals remain 1559 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 1560 addr := addresses[len(addresses)-1] 1561 list := pool.queue[addr.address] 1562 1563 addresses = addresses[:len(addresses)-1] 1564 1565 // Drop all transactions if they are less than the overflow 1566 if size := uint64(list.Len()); size <= drop { 1567 for _, tx := range list.Flatten() { 1568 pool.removeTx(tx.Hash(), true) 1569 } 1570 drop -= size 1571 queuedRateLimitMeter.Mark(int64(size)) 1572 continue 1573 } 1574 // Otherwise drop only last few transactions 1575 txs := list.Flatten() 1576 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 1577 pool.removeTx(txs[i].Hash(), true) 1578 drop-- 1579 queuedRateLimitMeter.Mark(1) 1580 } 1581 } 1582 } 1583 1584 // demoteUnexecutables removes invalid and processed transactions from the pools 1585 // executable/pending queue and any subsequent transactions that become unexecutable 1586 // are moved back into the future queue. 1587 // 1588 // Note: transactions are not marked as removed in the priced list because re-heaping 1589 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 1590 // to trigger a re-heap is this function 1591 func (pool *TxPool) demoteUnexecutables() { 1592 // Iterate over all accounts and demote any non-executable transactions 1593 for addr, list := range pool.pending { 1594 nonce := pool.currentState.GetNonce(addr) 1595 1596 // Drop all transactions that are deemed too old (low nonce) 1597 olds := list.Forward(nonce) 1598 for _, tx := range olds { 1599 hash := tx.Hash() 1600 pool.all.Remove(hash) 1601 log.Trace("Removed old pending transaction", "hash", hash) 1602 } 1603 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 1604 drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) 1605 for _, tx := range drops { 1606 hash := tx.Hash() 1607 log.Trace("Removed unpayable pending transaction", "hash", hash) 1608 pool.all.Remove(hash) 1609 } 1610 pendingNofundsMeter.Mark(int64(len(drops))) 1611 1612 for _, tx := range invalids { 1613 hash := tx.Hash() 1614 log.Trace("Demoting pending transaction", "hash", hash) 1615 1616 // Internal shuffle shouldn't touch the lookup set. 1617 pool.enqueueTx(hash, tx, false, false) 1618 } 1619 pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1620 if pool.locals.contains(addr) { 1621 localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) 1622 } 1623 // If there's a gap in front, alert (should never happen) and postpone all transactions 1624 if list.Len() > 0 && list.txs.Get(nonce) == nil { 1625 gapped := list.Cap(0) 1626 for _, tx := range gapped { 1627 hash := tx.Hash() 1628 log.Error("Demoting invalidated transaction", "hash", hash) 1629 1630 // Internal shuffle shouldn't touch the lookup set. 1631 pool.enqueueTx(hash, tx, false, false) 1632 } 1633 pendingGauge.Dec(int64(len(gapped))) 1634 // This might happen in a reorg, so log it to the metering 1635 blockReorgInvalidatedTx.Mark(int64(len(gapped))) 1636 } 1637 // Delete the entire pending entry if it became empty. 1638 if list.Empty() { 1639 delete(pool.pending, addr) 1640 } 1641 } 1642 } 1643 1644 // addressByHeartbeat is an account address tagged with its last activity timestamp. 1645 type addressByHeartbeat struct { 1646 address common.Address 1647 heartbeat time.Time 1648 } 1649 1650 type addressesByHeartbeat []addressByHeartbeat 1651 1652 func (a addressesByHeartbeat) Len() int { return len(a) } 1653 func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } 1654 func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1655 1656 // accountSet is simply a set of addresses to check for existence, and a signer 1657 // capable of deriving addresses from transactions. 1658 type accountSet struct { 1659 accounts map[common.Address]struct{} 1660 signer types.Signer 1661 cache *[]common.Address 1662 } 1663 1664 // newAccountSet creates a new address set with an associated signer for sender 1665 // derivations. 1666 func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { 1667 as := &accountSet{ 1668 accounts: make(map[common.Address]struct{}), 1669 signer: signer, 1670 } 1671 for _, addr := range addrs { 1672 as.add(addr) 1673 } 1674 return as 1675 } 1676 1677 // contains checks if a given address is contained within the set. 1678 func (as *accountSet) contains(addr common.Address) bool { 1679 _, exist := as.accounts[addr] 1680 return exist 1681 } 1682 1683 func (as *accountSet) empty() bool { 1684 return len(as.accounts) == 0 1685 } 1686 1687 // containsTx checks if the sender of a given tx is within the set. If the sender 1688 // cannot be derived, this method returns false. 1689 func (as *accountSet) containsTx(tx *types.Transaction) bool { 1690 if addr, err := types.Sender(as.signer, tx); err == nil { 1691 return as.contains(addr) 1692 } 1693 return false 1694 } 1695 1696 // add inserts a new address into the set to track. 1697 func (as *accountSet) add(addr common.Address) { 1698 as.accounts[addr] = struct{}{} 1699 as.cache = nil 1700 } 1701 1702 // addTx adds the sender of tx into the set. 1703 func (as *accountSet) addTx(tx *types.Transaction) { 1704 if addr, err := types.Sender(as.signer, tx); err == nil { 1705 as.add(addr) 1706 } 1707 } 1708 1709 // flatten returns the list of addresses within this set, also caching it for later 1710 // reuse. The returned slice should not be changed! 1711 func (as *accountSet) flatten() []common.Address { 1712 if as.cache == nil { 1713 accounts := make([]common.Address, 0, len(as.accounts)) 1714 for account := range as.accounts { 1715 accounts = append(accounts, account) 1716 } 1717 as.cache = &accounts 1718 } 1719 return *as.cache 1720 } 1721 1722 // merge adds all addresses from the 'other' set into 'as'. 1723 func (as *accountSet) merge(other *accountSet) { 1724 for addr := range other.accounts { 1725 as.accounts[addr] = struct{}{} 1726 } 1727 as.cache = nil 1728 } 1729 1730 // txLookup is used internally by TxPool to track transactions while allowing 1731 // lookup without mutex contention. 1732 // 1733 // Note, although this type is properly protected against concurrent access, it 1734 // is **not** a type that should ever be mutated or even exposed outside of the 1735 // transaction pool, since its internal state is tightly coupled with the pools 1736 // internal mechanisms. The sole purpose of the type is to permit out-of-bound 1737 // peeking into the pool in TxPool.Get without having to acquire the widely scoped 1738 // TxPool.mu mutex. 1739 // 1740 // This lookup set combines the notion of "local transactions", which is useful 1741 // to build upper-level structure. 1742 type txLookup struct { 1743 slots int 1744 lock sync.RWMutex 1745 locals map[common.Hash]*types.Transaction 1746 remotes map[common.Hash]*types.Transaction 1747 } 1748 1749 // newTxLookup returns a new txLookup structure. 1750 func newTxLookup() *txLookup { 1751 return &txLookup{ 1752 locals: make(map[common.Hash]*types.Transaction), 1753 remotes: make(map[common.Hash]*types.Transaction), 1754 } 1755 } 1756 1757 // Range calls f on each key and value present in the map. The callback passed 1758 // should return the indicator whether the iteration needs to be continued. 1759 // Callers need to specify which set (or both) to be iterated. 1760 func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { 1761 t.lock.RLock() 1762 defer t.lock.RUnlock() 1763 1764 if local { 1765 for key, value := range t.locals { 1766 if !f(key, value, true) { 1767 return 1768 } 1769 } 1770 } 1771 if remote { 1772 for key, value := range t.remotes { 1773 if !f(key, value, false) { 1774 return 1775 } 1776 } 1777 } 1778 } 1779 1780 // Get returns a transaction if it exists in the lookup, or nil if not found. 1781 func (t *txLookup) Get(hash common.Hash) *types.Transaction { 1782 t.lock.RLock() 1783 defer t.lock.RUnlock() 1784 1785 if tx := t.locals[hash]; tx != nil { 1786 return tx 1787 } 1788 return t.remotes[hash] 1789 } 1790 1791 // GetLocal returns a transaction if it exists in the lookup, or nil if not found. 1792 func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { 1793 t.lock.RLock() 1794 defer t.lock.RUnlock() 1795 1796 return t.locals[hash] 1797 } 1798 1799 // GetRemote returns a transaction if it exists in the lookup, or nil if not found. 1800 func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { 1801 t.lock.RLock() 1802 defer t.lock.RUnlock() 1803 1804 return t.remotes[hash] 1805 } 1806 1807 // Count returns the current number of transactions in the lookup. 1808 func (t *txLookup) Count() int { 1809 t.lock.RLock() 1810 defer t.lock.RUnlock() 1811 1812 return len(t.locals) + len(t.remotes) 1813 } 1814 1815 // LocalCount returns the current number of local transactions in the lookup. 1816 func (t *txLookup) LocalCount() int { 1817 t.lock.RLock() 1818 defer t.lock.RUnlock() 1819 1820 return len(t.locals) 1821 } 1822 1823 // RemoteCount returns the current number of remote transactions in the lookup. 1824 func (t *txLookup) RemoteCount() int { 1825 t.lock.RLock() 1826 defer t.lock.RUnlock() 1827 1828 return len(t.remotes) 1829 } 1830 1831 // Slots returns the current number of slots used in the lookup. 1832 func (t *txLookup) Slots() int { 1833 t.lock.RLock() 1834 defer t.lock.RUnlock() 1835 1836 return t.slots 1837 } 1838 1839 // Add adds a transaction to the lookup. 1840 func (t *txLookup) Add(tx *types.Transaction, local bool) { 1841 t.lock.Lock() 1842 defer t.lock.Unlock() 1843 1844 t.slots += numSlots(tx) 1845 slotsGauge.Update(int64(t.slots)) 1846 1847 if local { 1848 t.locals[tx.Hash()] = tx 1849 } else { 1850 t.remotes[tx.Hash()] = tx 1851 } 1852 } 1853 1854 // Remove removes a transaction from the lookup. 1855 func (t *txLookup) Remove(hash common.Hash) { 1856 t.lock.Lock() 1857 defer t.lock.Unlock() 1858 1859 tx, ok := t.locals[hash] 1860 if !ok { 1861 tx, ok = t.remotes[hash] 1862 } 1863 if !ok { 1864 log.Error("No transaction found to be deleted", "hash", hash) 1865 return 1866 } 1867 t.slots -= numSlots(tx) 1868 slotsGauge.Update(int64(t.slots)) 1869 1870 delete(t.locals, hash) 1871 delete(t.remotes, hash) 1872 } 1873 1874 // RemoteToLocals migrates the transactions belongs to the given locals to locals 1875 // set. The assumption is held the locals set is thread-safe to be used. 1876 func (t *txLookup) RemoteToLocals(locals *accountSet) int { 1877 t.lock.Lock() 1878 defer t.lock.Unlock() 1879 1880 var migrated int 1881 for hash, tx := range t.remotes { 1882 if locals.containsTx(tx) { 1883 t.locals[hash] = tx 1884 delete(t.remotes, hash) 1885 migrated += 1 1886 } 1887 } 1888 return migrated 1889 } 1890 1891 // RemotesBelowTip finds all remote transactions below the given tip threshold. 1892 func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { 1893 found := make(types.Transactions, 0, 128) 1894 t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { 1895 if tx.GasTipCapIntCmp(threshold) < 0 { 1896 found = append(found, tx) 1897 } 1898 return true 1899 }, false, true) // Only iterate remotes 1900 return found 1901 } 1902 1903 // numSlots calculates the number of slots needed for a single transaction. 1904 func numSlots(tx *types.Transaction) int { 1905 return int((tx.Size() + txSlotSize - 1) / txSlotSize) 1906 }