github.com/amazechain/amc@v0.1.3/internal/txspool/txs_pool.go (about) 1 // Copyright 2022 The AmazeChain Authors 2 // This file is part of the AmazeChain library. 3 // 4 // The AmazeChain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The AmazeChain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the AmazeChain library. If not, see <http://www.gnu.org/licenses/>. 16 17 package txspool 18 19 import ( 20 "context" 21 "fmt" 22 "github.com/amazechain/amc/contracts/deposit" 23 "github.com/amazechain/amc/internal" 24 "github.com/amazechain/amc/internal/consensus/misc" 25 "github.com/amazechain/amc/internal/metrics/prometheus" 26 "github.com/amazechain/amc/params" 27 "github.com/holiman/uint256" 28 "github.com/ledgerwatch/erigon-lib/kv" 29 "math" 30 "math/big" 31 "sort" 32 "sync" 33 "time" 34 "unsafe" 35 36 "github.com/amazechain/amc/common" 37 "github.com/amazechain/amc/common/block" 38 "github.com/amazechain/amc/common/prque" 39 "github.com/amazechain/amc/common/transaction" 40 "github.com/amazechain/amc/common/types" 41 "github.com/amazechain/amc/log" 42 event "github.com/amazechain/amc/modules/event/v2" 43 ) 44 45 const ( 46 47 // txSlotSize is used to calculate how many data slots a single transaction 48 // takes up based on its size. The slots are used as DoS protection, ensuring 49 // that validating a new transaction remains a constant operation (in reality 50 // O(maxslots), where max slots are 4 currently). 51 txSlotSize = 32 * 1024 52 // txMaxSize is the maximum size a single transaction can have. This field has 53 // non-trivial consequences: larger transactions are significantly harder and 54 // more expensive to propagate; larger transactions also take more resources 55 // to validate whether they fit into the pool or not. 56 txMaxSize = 4 * txSlotSize // 128KB 57 ) 58 59 var ( 60 ErrAlreadyKnown = fmt.Errorf("already known") 61 ErrInvalidSender = fmt.Errorf("invalid sender") 62 ErrOversizedData = fmt.Errorf("oversized data") 63 ErrNegativeValue = fmt.Errorf("negative value") 64 ErrGasLimit = fmt.Errorf("exceeds block gas limit") 65 ErrUnderpriced = fmt.Errorf("transaction underpriced") 66 ErrTxPoolOverflow = fmt.Errorf("txpool is full") 67 ErrReplaceUnderpriced = fmt.Errorf("replacement transaction underpriced") 68 69 ErrFeeCapVeryHigh = fmt.Errorf("max fee per gas higher than 2^256-1") 70 71 ErrNonceTooLow = fmt.Errorf("nonce too low") 72 ErrNonceTooHigh = fmt.Errorf("nonce too high") 73 74 ErrInsufficientFunds = fmt.Errorf("insufficient funds for gas * price + value") 75 76 // ErrTipAboveFeeCap is a sanity error to ensure no one is able to specify a 77 // transaction with a tip higher than the total fee cap. 78 ErrTipAboveFeeCap = fmt.Errorf("max priority fee per gas higher than max fee per gas") 79 80 pendingGauge = prometheus.GetOrCreateCounter("txpool_pending", true) 81 queuedGauge = prometheus.GetOrCreateCounter("txpool_queued", true) 82 localGauge = prometheus.GetOrCreateCounter("txpool_local", true) 83 ) 84 85 type txspoolResetRequest struct { 86 oldBlock, newBlock block.IBlock 87 } 88 89 type TxsPoolConfig struct { 90 Locals []types.Address 91 NoLocals bool 92 93 PriceLimit uint64 94 PriceBump uint64 95 96 AccountSlots uint64 97 GlobalSlots uint64 98 AccountQueue uint64 99 GlobalQueue uint64 100 101 Lifetime time.Duration 102 } 103 104 // DefaultTxPoolConfig default blockchain 105 var DefaultTxPoolConfig = TxsPoolConfig{ 106 107 PriceLimit: 1, 108 PriceBump: 10, 109 110 AccountSlots: 16, 111 GlobalSlots: 4096 + 1024, 112 AccountQueue: 64, 113 GlobalQueue: 1024, 114 115 Lifetime: 3 * time.Hour, 116 } 117 118 type TxsPool struct { 119 config TxsPoolConfig 120 chainconfig *params.ChainConfig 121 122 bc common.IBlockChain 123 124 //currentState *state.IntraBlockState 125 currentState ReadState 126 pendingNonces *txNoncer 127 currentMaxGas uint64 128 129 ctx context.Context // 130 cancel context.CancelFunc 131 wg sync.WaitGroup 132 mu sync.RWMutex // lock 133 134 istanbul bool // Fork indicator whether we are in the istanbul stage. 135 eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. 136 eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. 137 shanghai bool // Fork indicator whether we are in the Shanghai stage. 138 139 locals *accountSet 140 pending map[types.Address]*txsList 141 queue map[types.Address]*txsList 142 beats map[types.Address]time.Time 143 all *txLookup 144 priced *txPricedList 145 gasPrice *uint256.Int 146 147 // channel 148 reqResetCh chan *txspoolResetRequest 149 reqPromoteCh chan *accountSet 150 queueTxEventCh chan *transaction.Transaction 151 reorgDoneCh chan chan struct{} 152 reorgShutdownCh chan struct{} 153 154 changesSinceReorg int 155 156 isRun uint32 157 158 deposit *deposit.Deposit 159 } 160 161 func NewTxsPool(ctx context.Context, bc common.IBlockChain, depositContract *deposit.Deposit) (common.ITxsPool, error) { 162 163 c, cancel := context.WithCancel(ctx) 164 // for test 165 //log.Init(nil) 166 pool := &TxsPool{ 167 chainconfig: bc.Config(), 168 config: DefaultTxPoolConfig, 169 ctx: c, 170 cancel: cancel, 171 172 bc: bc, 173 deposit: depositContract, 174 // todo 175 //currentMaxGas: bc.CurrentBlock().GasLimit(), 176 // 177 locals: newAccountSet(), 178 179 pending: make(map[types.Address]*txsList), 180 queue: make(map[types.Address]*txsList), 181 beats: make(map[types.Address]time.Time), 182 all: newTxLookup(), 183 184 //chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), 185 reqResetCh: make(chan *txspoolResetRequest), 186 reqPromoteCh: make(chan *accountSet), 187 queueTxEventCh: make(chan *transaction.Transaction), 188 reorgDoneCh: make(chan chan struct{}), 189 reorgShutdownCh: make(chan struct{}), 190 gasPrice: uint256.NewInt(DefaultTxPoolConfig.PriceLimit), 191 } 192 193 // 194 pool.currentState = StateClient(ctx, bc.DB()) 195 pool.pendingNonces = newTxNoncer(pool.currentState) 196 197 pool.priced = newTxPricedList(pool.all) 198 pool.reset(nil, bc.CurrentBlock()) 199 200 pool.wg.Add(1) 201 go pool.scheduleLoop() 202 203 pool.wg.Add(1) 204 go pool.blockChangeLoop() 205 206 //todo for test 207 //pool.wg.Add(1) 208 //go pool.ethFetchTxPoolLoop() 209 210 //pool.wg.Add(1) 211 //go pool.ethImportTxPoolLoop() 212 213 //pool.wg.Add(1) 214 //go pool.ethTxPoolCheckLoop() 215 216 return pool, nil 217 } 218 219 // promoteTx adds a transaction to the pending (processable) list of transactions 220 // and returns whether it was inserted or an older was better. 221 // 222 // Note, this method assumes the pool lock is held! 223 func (pool *TxsPool) promoteTx(addr types.Address, hash types.Hash, tx *transaction.Transaction) bool { 224 // Try to insert the transaction into the pending queue 225 if pool.pending[addr] == nil { 226 pool.pending[addr] = newTxsList(true) 227 } 228 list := pool.pending[addr] 229 230 inserted, old := list.Add(tx, pool.config.PriceBump) 231 if !inserted { 232 // An older transaction was better, discard this 233 pool.all.Remove(hash) 234 pool.priced.Removed(1) 235 return false 236 } 237 // Otherwise discard any previous transaction and mark this 238 if old != nil { 239 hash := old.Hash() 240 pool.all.Remove(hash) 241 pool.priced.Removed(1) 242 } else { 243 // Nothing was replaced, bump the pending counter 244 pendingGauge.Inc() 245 } 246 // Set the potentially new pending nonce and notify any subsystems of the new tx 247 pool.pendingNonces.set(addr, tx.Nonce()+1) 248 249 // Successful promotion, bump the heartbeat 250 pool.beats[addr] = time.Now() 251 return true 252 } 253 254 // AddLocals 255 func (pool *TxsPool) AddLocals(txs []*transaction.Transaction) []error { 256 return pool.addTxs(txs, !pool.config.NoLocals, true) 257 } 258 259 // AddLocal 260 func (pool *TxsPool) AddLocal(tx *transaction.Transaction) error { 261 errs := pool.AddLocals([]*transaction.Transaction{tx}) 262 return errs[0] 263 } 264 265 // AddRemotes 266 func (pool *TxsPool) AddRemotes(txs []*transaction.Transaction) []error { 267 return pool.addTxs(txs, false, false) 268 } 269 270 // addTxs attempts to queue a batch of transactions if they are valid. 271 func (pool *TxsPool) addTxs(txs []*transaction.Transaction, local, sync bool) []error { 272 // Filter out known ones without obtaining the pool lock or recovering signatures 273 var ( 274 errs = make([]error, len(txs)) 275 news = make([]*transaction.Transaction, 0, len(txs)) 276 ) 277 for i, tx := range txs { 278 // If the transaction is known, pre-set the error slot 279 hash := tx.Hash() 280 if pool.all.Get(hash) != nil { 281 errs[i] = ErrAlreadyKnown 282 //knownTxMeter.Mark(1) 283 continue 284 } 285 if pool.validateSender(tx) == false { 286 errs[i] = ErrInvalidSender 287 continue 288 } 289 // Accumulate all unknown transactions for deeper processing 290 news = append(news, tx) 291 } 292 if len(news) == 0 { 293 return errs 294 } 295 296 // Process all the new transaction and merge any errors into the original slice 297 pool.mu.Lock() 298 newErrs, dirtyAddrs := pool.addTxsLocked(news, local) 299 pool.mu.Unlock() 300 301 var nilSlot = 0 302 for _, err := range newErrs { 303 for errs[nilSlot] != nil { 304 nilSlot++ 305 } 306 errs[nilSlot] = err 307 nilSlot++ 308 } 309 if local { 310 var localTxs = make([]*transaction.Transaction, 0, len(txs)) 311 for i, err := range errs { 312 if err == nil { 313 localTxs = append(localTxs, txs[i]) 314 } 315 } 316 //log.Infof("event new local txs : %v", localTxs) 317 event.GlobalEvent.Send(common.NewLocalTxsEvent{Txs: localTxs}) 318 } 319 // Reorg the pool internals if needed and return 320 done := pool.requestPromoteExecutables(dirtyAddrs) 321 if sync { 322 <-done 323 } 324 return errs 325 } 326 327 // addTxsLocked attempts to queue a batch of transactions if they are valid. 328 // The transaction pool lock must be held. 329 func (pool *TxsPool) addTxsLocked(txs []*transaction.Transaction, local bool) ([]error, *accountSet) { 330 dirty := newAccountSet() 331 errs := make([]error, len(txs)) 332 for i, tx := range txs { 333 replaced, err := pool.add(tx, local) 334 errs[i] = err 335 if err == nil && !replaced { 336 dirty.addTx(tx) 337 } 338 } 339 //validTxMeter.Mark(int64(len(dirty.accounts))) 340 return errs, dirty 341 } 342 343 // removeTx removes a single transaction from the queue, moving all subsequent 344 // transactions back to the future queue. 345 func (pool *TxsPool) removeTx(hash types.Hash, outofbound bool) { 346 // Fetch the transaction we wish to delete 347 tx := pool.all.Get(hash) 348 if tx == nil { 349 return 350 } 351 352 addr := *tx.From() // already verify 353 354 // Remove it from the list of known transactions 355 pool.all.Remove(hash) 356 if outofbound { 357 pool.priced.Removed(1) 358 } 359 if pool.locals.contains(addr) { 360 localGauge.Dec() 361 } 362 // Remove the transaction from the pending lists and reset the account nonce 363 if pending := pool.pending[addr]; pending != nil { 364 if removed, invalids := pending.Remove(tx); removed { 365 // If no more pending transactions are left, remove the list 366 if pending.Empty() { 367 delete(pool.pending, addr) 368 } 369 // Postpone any invalidated transactions 370 for _, tx := range invalids { 371 // Internal shuffle shouldn't touch the lookup set. 372 hash := tx.Hash() 373 pool.enqueueTx(hash, tx, false, false) 374 } 375 // Update the account nonce if needed 376 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 377 // Reduce the pending counter 378 pendingGauge.Add(-(1 + len(invalids))) 379 return 380 } 381 } 382 // Transaction is in the future queue 383 if future := pool.queue[addr]; future != nil { 384 if removed, _ := future.Remove(tx); removed { 385 // Reduce the queued counter 386 queuedGauge.Dec() 387 } 388 if future.Empty() { 389 delete(pool.queue, addr) 390 delete(pool.beats, addr) 391 } 392 } 393 } 394 395 // add validates a transaction and inserts it into the non-executable queue for later 396 // pending promotion and execution. If the transaction is a replacement for an already 397 // pending or queued one, it overwrites the previous transaction if its price is higher. 398 // 399 // If a newly added transaction is marked as local, its sending account will be 400 // be added to the allowlist, preventing any associated transaction from being dropped 401 // out of the pool due to pricing constraints. 402 func (pool *TxsPool) add(tx *transaction.Transaction, local bool) (replaced bool, err error) { 403 // if exist 404 hash := tx.Hash() 405 gasPrice := tx.GasPrice() 406 407 if pool.all.Get(hash) != nil { 408 log.Debug("Discarding already known transaction", "hash", hash) 409 return false, ErrAlreadyKnown 410 } 411 412 // Make the local flag. If it's from local source or it's from the network but 413 // the sender is marked as local previously, treat it as the local transaction. 414 isLocal := local || pool.locals.containsTx(tx) 415 416 // If the transaction fails basic validation, discard it 417 if err := pool.validateTx(tx, isLocal); err != nil { 418 //log.Debug("Discarding invalid transaction", "hash", hash, "err", err) 419 return false, err 420 } 421 // If the transaction pool is full, discard underpriced transactions 422 if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { 423 // If the new transaction is underpriced, don't accept it 424 if !isLocal && pool.priced.Underpriced(tx) { 425 log.Debug("Discarding underpriced transaction", "hash", hash, "gasTipCap", gasPrice, "gasFeeCap", gasPrice) 426 return false, ErrUnderpriced 427 } 428 // We're about to replace a transaction. The reorg does a more thorough 429 // analysis of what to remove and how, but it runs async. We don't want to 430 // do too many replacements between reorg-runs, so we cap the number of 431 // replacements to 25% of the slots 432 if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { 433 return false, ErrTxPoolOverflow 434 } 435 436 // New transaction is better than our worse ones, make room for it. 437 // If it's a local transaction, forcibly discard all available transactions. 438 // Otherwise if we can't make enough room for new one, abort the operation. 439 drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) 440 441 // Special case, we still can't make the room for the new remote one. 442 if !isLocal && !success { 443 log.Debug("Discarding overflown transaction", "hash", hash) 444 return false, ErrTxPoolOverflow 445 } 446 // Bump the counter of rejections-since-reorg 447 pool.changesSinceReorg += len(drop) 448 // Kick out the underpriced remote transactions. 449 for _, tx := range drop { 450 log.Debug("Discarding freshly underpriced transaction", "hash", hash, "gasTipCap", gasPrice, "gasFeeCap", gasPrice) 451 hash := tx.Hash() 452 pool.removeTx(hash, false) 453 } 454 } 455 // Try to replace an existing transaction in the pending pool 456 from := *tx.From() // 457 if list := pool.pending[from]; list != nil && list.Overlaps(tx) { 458 // Nonce already pending, check if required price bump is met 459 inserted, old := list.Add(tx, pool.config.PriceBump) 460 if !inserted { 461 return false, ErrReplaceUnderpriced 462 } 463 // New transaction is better, replace old one 464 if old != nil { 465 hash := old.Hash() 466 pool.all.Remove(hash) 467 pool.priced.Removed(1) 468 } 469 pool.all.Add(tx, isLocal) 470 pool.priced.Put(tx, isLocal) 471 pool.queueTxEvent(tx) 472 log.Debug("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To) 473 474 // Successful promotion, bump the heartbeat 475 pool.beats[from] = time.Now() 476 return old != nil, nil 477 } 478 // New transaction isn't replacing a pending one, push into queue 479 replaced, err = pool.enqueueTx(hash, tx, isLocal, true) 480 if err != nil { 481 return false, err 482 } 483 // Mark local addresses and journal local transactions 484 if local && !pool.locals.contains(from) { 485 log.Infof("Setting new local account address %v", from) 486 pool.locals.add(from) 487 pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. 488 } 489 if isLocal { 490 localGauge.Inc() 491 } 492 493 //log.Debug("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To) 494 return replaced, nil 495 } 496 497 // enqueueTx inserts a new transaction into the non-executable transaction queue. 498 // 499 // Note, this method assumes the pool lock is held! 500 func (pool *TxsPool) enqueueTx(hash types.Hash, tx *transaction.Transaction, local bool, addAll bool) (bool, error) { 501 // Try to insert the transaction into the future queue 502 from := *tx.From() // already validated 503 if pool.queue[from] == nil { 504 pool.queue[from] = newTxsList(false) 505 } 506 inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) 507 if !inserted { 508 // An older transaction was better, discard this 509 return false, ErrReplaceUnderpriced 510 } 511 // Discard any previous transaction and mark this 512 if old != nil { 513 hash := old.Hash() 514 pool.all.Remove(hash) 515 pool.priced.Removed(1) 516 } else { 517 queuedGauge.Inc() 518 } 519 520 // If the transaction isn't in lookup set but it's expected to be there, 521 // show the error log. 522 if pool.all.Get(hash) == nil && !addAll { 523 log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) 524 } 525 if addAll { 526 pool.all.Add(tx, local) 527 pool.priced.Put(tx, local) 528 } 529 // If we never record the heartbeat, do it right now. 530 if _, exist := pool.beats[from]; !exist { 531 pool.beats[from] = time.Now() 532 } 533 return old != nil, nil 534 } 535 536 // validateTx checks whether a transaction is valid according to the consensus 537 // rules and adheres to some heuristic limits of the local node (price and size). 538 func (pool *TxsPool) validateTx(tx *transaction.Transaction, local bool) error { 539 // Accept only legacy transactions until EIP-2718/2930 activates. 540 // todo 541 if !pool.eip2718 && tx.Type() != transaction.LegacyTxType { 542 return internal.ErrTxTypeNotSupported 543 } 544 // Reject dynamic fee transactions until EIP-1559 activates. 545 if !pool.eip1559 && tx.Type() == transaction.DynamicFeeTxType { 546 return internal.ErrTxTypeNotSupported 547 } 548 // Reject transactions over defined size to prevent DOS attacks 549 //if tx.Size() > txMaxSize { 550 // return ErrOversizedData 551 //} 552 553 gasPrice := tx.GasPrice() 554 addr := *tx.From() 555 556 if uint64(unsafe.Sizeof(tx)) > txMaxSize { 557 return ErrOversizedData 558 } 559 // Transactions can't be negative. This may never happen using RLP decoded 560 // transactions but may occur if you create a transaction using the RPC. 561 if tx.Value().Sign() < 0 { 562 return ErrNegativeValue 563 } 564 565 // Ensure the transaction doesn't exceed the current block limit gas. 566 if pool.currentMaxGas < tx.Gas() { 567 return ErrGasLimit 568 } 569 570 // Sanity check for extremely large numbers 571 if gasPrice.BitLen() > 256 { 572 return ErrFeeCapVeryHigh 573 } 574 if tx.GasTipCap().BitLen() > 256 { 575 return internal.ErrTipVeryHigh 576 } 577 // Ensure gasFeeCap is greater than or equal to gasTipCap. 578 if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { 579 return ErrTipAboveFeeCap 580 } 581 // Make sure the transaction is signed properly. 582 583 // Drop non-local transactions under our own minimal accepted gas price or tip 584 if !local && gasPrice.Cmp(pool.gasPrice) < 0 { 585 return ErrUnderpriced 586 } 587 // Ensure the transaction adheres to nonce ordering 588 if pool.currentState.GetNonce(addr) > tx.Nonce() { 589 return ErrNonceTooLow 590 } 591 // Transactor should have enough funds to cover the costs 592 // cost == V + GP * GL 593 if pool.currentState.GetBalance(addr).Cmp(tx.Cost()) < 0 { 594 return ErrInsufficientFunds 595 } 596 597 // Ensure the transaction has more gas than the basic tx fee. 598 intrGas, err := internal.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai) 599 if err != nil { 600 return err 601 } 602 if tx.Gas() < intrGas { 603 return internal.ErrIntrinsicGas 604 } 605 606 if pool.deposit != nil { 607 var depositInfo *deposit.Info 608 _ = pool.bc.DB().View(pool.ctx, func(tx kv.Tx) error { 609 depositInfo = deposit.GetDepositInfo(tx, addr) 610 return nil 611 }) 612 613 if depositInfo != nil { 614 if pool.deposit.IsDepositAction(tx) { 615 return internal.ErrAlreadyDeposited 616 } 617 } 618 } 619 return nil 620 } 621 622 // validateSender verify todo 623 func (pool *TxsPool) validateSender(tx *transaction.Transaction) bool { 624 625 return true 626 } 627 628 // requestReset requests a pool reset to the new head block. 629 // The returned channel is closed when the reset has occurred. 630 func (pool *TxsPool) requestReset(oldBlock block.IBlock, newBlock block.IBlock) <-chan struct{} { 631 select { 632 case pool.reqResetCh <- &txspoolResetRequest{oldBlock, newBlock}: 633 return <-pool.reorgDoneCh 634 //case <-pool.reorgShutdownCh: 635 // return pool.reorgShutdownCh 636 case <-pool.ctx.Done(): 637 return pool.ctx.Done() 638 } 639 } 640 641 // requestPromoteExecutables requests transaction promotion checks for the given addresses. 642 // The returned channel is closed when the promotion checks have occurred. 643 func (pool *TxsPool) requestPromoteExecutables(set *accountSet) <-chan struct{} { 644 select { 645 case pool.reqPromoteCh <- set: 646 return <-pool.reorgDoneCh 647 case <-pool.ctx.Done(): 648 return pool.ctx.Done() 649 } 650 } 651 652 // queueTxEvent enqueues a transaction event to be sent in the next reorg run. 653 func (pool *TxsPool) queueTxEvent(tx *transaction.Transaction) { 654 select { 655 case pool.queueTxEventCh <- tx: 656 //case <-pool.reorgShutdownCh: 657 case <-pool.ctx.Done(): 658 } 659 } 660 661 func (pool *TxsPool) reset(oldBlock, newBlock block.IBlock) { 662 // If we're reorging an old state, reinject all dropped transactions 663 var reinject []*transaction.Transaction 664 665 if oldBlock != nil && oldBlock.Header().Hash().String() != newBlock.ParentHash().String() { 666 // If the reorg is too deep, avoid doing it (will happen during fast sync) 667 oldNum := oldBlock.Number64() 668 newNum := newBlock.Number64() 669 670 if depth := uint64(math.Abs(float64(oldNum.Uint64()) - float64(newNum.Uint64()))); depth > 64 { 671 log.Debug("Skipping deep transaction reorg", "depth", depth) 672 } else { 673 // Reorg seems shallow enough to pull in all transactions into memorynewHash 674 var discarded, included []*transaction.Transaction 675 var ( 676 rem = oldBlock 677 add = newBlock 678 ) 679 if rem == nil { 680 // This can happen if a setHead is performed, where we simply discard the old 681 // head from the chain. 682 // If that is the case, we don't have the lost transactions any more, and 683 // there's nothing to add 684 if newNum.Cmp(oldNum) >= 0 { 685 // If we reorged to a same or higher number, then it's not a case of setHead 686 log.Warn("Transaction pool reset with missing oldhead", 687 "old", oldBlock.Header().Hash(), "oldnum", oldNum, "new", newBlock.Hash(), "newnum", newNum) 688 return 689 } 690 // If the reorg ended up on a lower number, it's indicative of setHead being the cause 691 log.Debug("Skipping transaction reset caused by setHead", 692 "old", oldBlock.Header().Hash(), "oldnum", oldNum, "new", newBlock.Hash(), "newnum", newNum) 693 // We still need to update the current state s.th. the lost transactions can be readded by the user 694 } else { 695 for rem.Number64().Cmp(add.Number64()) == 1 { 696 discarded = append(discarded, rem.Body().Transactions()...) 697 if rem, _ = pool.bc.GetBlockByHash(rem.ParentHash()); rem == nil { 698 log.Error("Unrooted old chain seen by tx pool", "block", oldBlock.Number64(), "hash", oldBlock.Hash()) 699 return 700 } 701 } 702 for add.Number64().Cmp(rem.Number64()) == 1 { 703 included = append(included, add.Body().Transactions()...) 704 if add, _ = pool.bc.GetBlockByHash(add.ParentHash()); add == nil { 705 log.Error("Unrooted new chain seen by tx pool", "block", newBlock.Number64(), "hash", newBlock.Hash()) 706 return 707 } 708 } 709 for rem.Hash() != add.Hash() { 710 discarded = append(discarded, rem.Body().Transactions()...) 711 if rem, _ = pool.bc.GetBlockByHash(rem.ParentHash()); rem == nil { 712 log.Error("Unrooted old chain seen by tx pool", "block", oldBlock.Number64(), "hash", oldBlock.Header().Hash()) 713 return 714 } 715 included = append(included, add.Body().Transactions()...) 716 if add, _ = pool.bc.GetBlockByHash(add.ParentHash()); add == nil { 717 log.Error("Unrooted new chain seen by tx pool", "block", newBlock.Number64(), "hash", newBlock.Hash()) 718 return 719 } 720 } 721 log.Debugf("start to reset txs pool4 discarded %d , included %d, old number :%d ,new number:%d", 722 len(discarded), len(included), oldNum.Uint64(), newNum.Uint64()) 723 reinject = pool.txDifference(discarded, included) 724 } 725 } 726 } 727 // Initialize the internal state to the current head 728 if newBlock == nil { 729 newBlock = pool.bc.CurrentBlock() // Special case during testing 730 } 731 732 //if err := pool.ResetState(newBlock.Header().Hash()); nil != err { 733 // log.Errorf("reset current state faild, %v", err) 734 // return 735 //} 736 // pool.pendingNonces = newTxNoncer(pool.currentState) //newTxNoncer(statedb) 737 pool.currentMaxGas = newBlock.GasLimit() 738 739 // Inject any transactions discarded due to reorgs 740 if len(reinject) > 0 { 741 log.Debugf("Reinjecting stale transactions count %d", len(reinject)) 742 } 743 744 //senderCacher.recover(pool.signer, reinject) 745 pool.addTxsLocked(reinject, false) 746 747 // Update all fork indicator by next pending block number. 748 next := new(big.Int).Add(newBlock.Number64().ToBig(), big.NewInt(1)) 749 pool.istanbul = pool.chainconfig.IsIstanbul(next.Uint64()) 750 pool.eip2718 = pool.chainconfig.IsBerlin(next.Uint64()) 751 pool.eip1559 = pool.chainconfig.IsLondon(next.Uint64()) 752 } 753 754 // promoteExecutables moves transactions that have become processable from the 755 // future queue to the set of pending transactions. During this process, all 756 // invalidated transactions (low nonce, low balance) are deleted. 757 func (pool *TxsPool) promoteExecutables(accounts []types.Address) []*transaction.Transaction { 758 // Track the promoted transactions to broadcast them at once 759 var promoted []*transaction.Transaction 760 761 // Iterate over all accounts and promote any executable transactions 762 for _, addr := range accounts { 763 list := pool.queue[addr] 764 if list == nil { 765 continue // Just in case someone calls with a non existing account 766 } 767 // Drop all transactions that are deemed too old (low nonce) 768 forwards := list.Forward(pool.currentState.GetNonce(addr)) 769 for _, tx := range forwards { 770 hash := tx.Hash() 771 pool.all.Remove(hash) 772 } 773 //log.Debug("Removed old queued transactions", "count", len(forwards)) 774 // Drop all transactions that are too costly (low balance or out of gas) 775 //todo remove GetBalance check 776 drops, _ := list.Filter(*pool.currentState.GetBalance(addr), pool.currentMaxGas) 777 for _, tx := range drops { 778 hash := tx.Hash() 779 pool.all.Remove(hash) 780 } 781 //log.Debug("Removed unpayable queued transactions", "count", len(drops)) 782 783 // Gather all executable transactions and promote them 784 readies := list.Ready(pool.pendingNonces.get(addr)) 785 for _, tx := range readies { 786 hash := tx.Hash() 787 if pool.promoteTx(addr, hash, tx) { 788 promoted = append(promoted, tx) 789 } 790 } 791 //log.Debug("Promoted queued transactions", "count", len(promoted)) 792 queuedGauge.Add(-(len(readies))) 793 794 // Drop all transactions over the allowed limit 795 var caps []*transaction.Transaction 796 if !pool.locals.contains(addr) { 797 caps = list.Cap(int(pool.config.AccountQueue)) 798 for _, tx := range caps { 799 hash := tx.Hash() 800 pool.all.Remove(hash) 801 //log.Debug("Removed cap-exceeding queued transaction", "hash", hash) 802 } 803 } 804 // Mark all the items dropped as removed 805 //todo pool.priced.Removed(len(forwards) + len(drops) + len(caps)) 806 pool.priced.Removed(len(caps)) 807 queuedGauge.Add(-(len(forwards) + len(drops) + len(caps))) 808 if pool.locals.contains(addr) { 809 localGauge.Add(-(len(forwards) + len(drops) + len(caps))) 810 } 811 // Delete the entire queue entry if it became empty. 812 if list.Empty() { 813 delete(pool.queue, addr) 814 delete(pool.beats, addr) 815 } 816 } 817 return promoted 818 } 819 820 // truncatePending removes transactions from the pending queue if the pool is above the 821 // pending limit. The algorithm tries to reduce transaction counts by an approximately 822 // equal number for all for accounts with many pending transactions. 823 func (pool *TxsPool) truncatePending() { 824 pending := uint64(0) 825 for _, list := range pool.pending { 826 pending += uint64(list.Len()) 827 } 828 if pending <= pool.config.GlobalSlots { 829 return 830 } 831 832 // Assemble a spam order to penalize large transactors first 833 spammers := prque.New(nil) 834 for addr, list := range pool.pending { 835 // Only evict transactions from high rollers 836 if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { 837 spammers.Push(addr, int64(list.Len())) 838 } 839 } 840 // Gradually drop transactions from offenders 841 offenders := []types.Address{} 842 for pending > pool.config.GlobalSlots && !spammers.Empty() { 843 // Retrieve the next offender if not local address 844 offender, _ := spammers.Pop() 845 offenders = append(offenders, offender.(types.Address)) 846 847 // Equalize balances until all the same or below threshold 848 if len(offenders) > 1 { 849 // Calculate the equalization threshold for all current offenders 850 threshold := pool.pending[offender.(types.Address)].Len() 851 852 // Iteratively reduce all offenders until below limit or threshold reached 853 for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { 854 for i := 0; i < len(offenders)-1; i++ { 855 list := pool.pending[offenders[i]] 856 857 caps := list.Cap(list.Len() - 1) 858 for _, tx := range caps { 859 // Drop the transaction from the global pools too 860 hash := tx.Hash() 861 pool.all.Remove(hash) 862 863 // Update the account nonce to the dropped transaction 864 pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) 865 log.Debug("Removed fairness-exceeding pending transaction", "hash", hash) 866 } 867 pool.priced.Removed(len(caps)) 868 pendingGauge.Add(-len(caps)) 869 if pool.locals.contains(offenders[i]) { 870 localGauge.Add(-(len(caps))) 871 } 872 pending-- 873 } 874 } 875 } 876 } 877 878 // If still above threshold, reduce to limit or min allowance 879 if pending > pool.config.GlobalSlots && len(offenders) > 0 { 880 for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { 881 for _, addr := range offenders { 882 list := pool.pending[addr] 883 884 caps := list.Cap(list.Len() - 1) 885 for _, tx := range caps { 886 // Drop the transaction from the global pools too 887 hash := tx.Hash() 888 pool.all.Remove(hash) 889 890 // Update the account nonce to the dropped transaction 891 pool.pendingNonces.setIfLower(addr, tx.Nonce()) 892 log.Debug("Removed fairness-exceeding pending transaction", "hash", hash) 893 } 894 pool.priced.Removed(len(caps)) 895 pendingGauge.Add(-len(caps)) 896 if pool.locals.contains(addr) { 897 localGauge.Add(-len(caps)) 898 } 899 pending-- 900 } 901 } 902 } 903 } 904 905 // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. 906 func (pool *TxsPool) truncateQueue() { 907 queued := uint64(0) 908 for _, list := range pool.queue { 909 queued += uint64(list.Len()) 910 } 911 if queued <= pool.config.GlobalQueue { 912 return 913 } 914 915 // Sort all accounts with queued transactions by heartbeat 916 addresses := make(addressesByHeartbeat, 0, len(pool.queue)) 917 for addr := range pool.queue { 918 if !pool.locals.contains(addr) { // don't drop locals 919 addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) 920 } 921 } 922 sort.Sort(addresses) 923 924 // Drop transactions until the total is below the limit or only locals remain 925 for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { 926 addr := addresses[len(addresses)-1] 927 list := pool.queue[addr.address] 928 929 addresses = addresses[:len(addresses)-1] 930 931 // Drop all transactions if they are less than the overflow 932 if size := uint64(list.Len()); size <= drop { 933 for _, tx := range list.Flatten() { 934 hash := tx.Hash() 935 pool.removeTx(hash, true) 936 } 937 drop -= size 938 continue 939 } 940 // Otherwise drop only last few transactions 941 txs := list.Flatten() 942 for i := len(txs) - 1; i >= 0 && drop > 0; i-- { 943 hash := txs[i].Hash() 944 pool.removeTx(hash, true) 945 drop-- 946 } 947 } 948 } 949 950 // demoteUnexecutables removes invalid and processed transactions from the pools 951 // executable/pending queue and any subsequent transactions that become unexecutable 952 // are moved back into the future queue. 953 // 954 // Note: transactions are not marked as removed in the priced list because re-heaping 955 // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful 956 // to trigger a re-heap is this function 957 func (pool *TxsPool) demoteUnexecutables() { 958 // Iterate over all accounts and demote any non-executable transactions 959 for addr, list := range pool.pending { 960 nonce := pool.currentState.GetNonce(addr) 961 962 // Drop all transactions that are deemed too old (low nonce) 963 olds := list.Forward(nonce) 964 for _, tx := range olds { 965 hash := tx.Hash() 966 pool.all.Remove(hash) 967 //log.Debug("Removed old pending transaction", "hash", hash) 968 } 969 // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later 970 drops, invalids := list.Filter(*pool.currentState.GetBalance(addr), pool.currentMaxGas) 971 for _, tx := range drops { 972 hash := tx.Hash() 973 //log.Debug("Removed unpayable pending transaction", "hash", hash) 974 pool.all.Remove(hash) 975 } 976 977 for _, tx := range invalids { 978 hash := tx.Hash() 979 //log.Debug("Demoting pending transaction", "hash", hash) 980 981 // Internal shuffle shouldn't touch the lookup set. 982 pool.enqueueTx(hash, tx, false, false) 983 } 984 pendingGauge.Add(-(len(olds) + len(drops) + len(invalids))) 985 if pool.locals.contains(addr) { 986 localGauge.Add(-(len(olds) + len(drops) + len(invalids))) 987 } 988 // If there's a gap in front, alert (should never happen) and postpone all transactions 989 if list.Len() > 0 && list.txs.Get(nonce) == nil { 990 gapped := list.Cap(0) 991 for _, tx := range gapped { 992 hash := tx.Hash() 993 log.Error("Demoting invalidated transaction", "hash", hash) 994 995 // Internal shuffle shouldn't touch the lookup set. 996 pool.enqueueTx(hash, tx, false, false) 997 } 998 // This might happen in a reorg, so log it to the metering 999 pendingGauge.Add(-(len(gapped))) 1000 } 1001 // Delete the entire pending entry if it became empty. 1002 if list.Empty() { 1003 delete(pool.pending, addr) 1004 } 1005 } 1006 } 1007 1008 // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. 1009 func (pool *TxsPool) runReorg(done chan struct{}, reset *txspoolResetRequest, dirtyAccounts *accountSet, events map[types.Address]*txsSortedMap) { 1010 defer close(done) 1011 1012 //go_logger.Logger.Sugar().Infof("start reset txs pool %v", reset) 1013 var promoteAddrs []types.Address 1014 if dirtyAccounts != nil && reset == nil { 1015 // Only dirty accounts need to be promoted, unless we're resetting. 1016 // For resets, all addresses in the tx queue will be promoted and 1017 // the flatten operation can be avoided. 1018 promoteAddrs = dirtyAccounts.flatten() 1019 } 1020 pool.mu.Lock() 1021 if reset != nil { 1022 // Reset from the old head to the new, rescheduling any reorged transactions 1023 pool.reset(reset.oldBlock, reset.newBlock) 1024 1025 // Nonces were reset, discard any events that became stale 1026 for addr := range events { 1027 events[addr].Forward(pool.pendingNonces.get(addr)) 1028 if events[addr].Len() == 0 { 1029 delete(events, addr) 1030 } 1031 } 1032 // Reset needs promote for all addresses 1033 promoteAddrs = make([]types.Address, 0, len(pool.queue)) 1034 for addr := range pool.queue { 1035 promoteAddrs = append(promoteAddrs, addr) 1036 } 1037 } 1038 // Check for pending transactions for every account that sent new ones 1039 promoted := pool.promoteExecutables(promoteAddrs) 1040 1041 // If a new block appeared, validate the pool of pending transactions. This will 1042 // remove any transaction that has been included in the block or was invalidated 1043 // because of another transaction (e.g. higher gas price). 1044 if reset != nil { 1045 pool.demoteUnexecutables() 1046 1047 if reset.newBlock != nil && pool.chainconfig.IsLondon(reset.newBlock.Number64().Uint64()+1) { 1048 pendingBaseFee, _ := uint256.FromBig(misc.CalcBaseFee(pool.chainconfig, reset.newBlock.Header().(*block.Header))) 1049 pool.priced.SetBaseFee(pendingBaseFee) 1050 } 1051 // Update all accounts to the latest known pending nonce 1052 nonces := make(map[types.Address]uint64, len(pool.pending)) 1053 for addr, list := range pool.pending { 1054 highestPending := list.LastElement() 1055 nonces[addr] = highestPending.Nonce() + 1 1056 } 1057 pool.pendingNonces.setAll(nonces) 1058 } 1059 // Ensure pool.queue and pool.pending sizes stay within the configured limits. 1060 pool.truncatePending() 1061 pool.truncateQueue() 1062 1063 pool.changesSinceReorg = 0 // Reset change counter 1064 pool.mu.Unlock() 1065 1066 // Notify subsystems for newly added transactions 1067 for _, tx := range promoted { 1068 addr := *tx.From() 1069 if _, ok := events[addr]; !ok { 1070 events[addr] = newTxSortedMap() 1071 } 1072 events[addr].Put(tx) 1073 } 1074 // todo 1075 if len(events) > 0 { 1076 var txs []*transaction.Transaction 1077 for _, set := range events { 1078 txs = append(txs, set.Flatten()...) 1079 } 1080 event.GlobalEvent.Send(common.NewTxsEvent{Txs: txs}) 1081 } 1082 } 1083 1084 // scheduleLoop 1085 func (pool *TxsPool) scheduleLoop() { 1086 defer pool.wg.Done() 1087 1088 var ( 1089 curDone chan struct{} // non-nil while runReorg is active 1090 nextDone = make(chan struct{}) 1091 launchNextRun bool 1092 reset *txspoolResetRequest 1093 dirtyAccounts *accountSet 1094 queuedEvents = make(map[types.Address]*txsSortedMap) 1095 ) 1096 1097 for { 1098 // Launch next background reorg if needed 1099 if curDone == nil && launchNextRun { 1100 // Run the background reorg and announcements 1101 go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) 1102 1103 curDone, nextDone = nextDone, make(chan struct{}) 1104 launchNextRun = false 1105 1106 reset, dirtyAccounts = nil, nil 1107 queuedEvents = make(map[types.Address]*txsSortedMap) 1108 } 1109 1110 select { 1111 case req := <-pool.reqResetCh: 1112 // Reset request: update head if request is already pending. 1113 if reset == nil { 1114 reset = req 1115 } else { 1116 reset.newBlock = req.newBlock 1117 } 1118 launchNextRun = true 1119 pool.reorgDoneCh <- nextDone 1120 1121 case req := <-pool.reqPromoteCh: 1122 // Promote request: update address set if request is already pending. 1123 if dirtyAccounts == nil { 1124 dirtyAccounts = req 1125 } else { 1126 dirtyAccounts.merge(req) 1127 } 1128 launchNextRun = true 1129 pool.reorgDoneCh <- nextDone 1130 1131 case tx := <-pool.queueTxEventCh: 1132 // Queue up the event, but don't schedule a reorg. It's up to the caller to 1133 // request one later if they want the events sent. 1134 addr := *tx.From() 1135 if _, ok := queuedEvents[addr]; !ok { 1136 queuedEvents[addr] = newTxSortedMap() 1137 } 1138 queuedEvents[addr].Put(tx) 1139 1140 case <-curDone: 1141 curDone = nil 1142 1143 //case <-pool.reorgShutdownCh: 1144 // // Wait for current run to finish. 1145 // if curDone != nil { 1146 // <-curDone 1147 // } 1148 // close(nextDone) 1149 // return 1150 case <-pool.ctx.Done(): 1151 // Wait for current run to finish. 1152 if curDone != nil { 1153 <-curDone 1154 } 1155 close(nextDone) 1156 return 1157 } 1158 1159 } 1160 } 1161 1162 // blockChangeLoop 1163 func (pool *TxsPool) blockChangeLoop() { 1164 defer pool.wg.Done() 1165 1166 highestBlockCh := make(chan common.ChainHighestBlock) 1167 defer close(highestBlockCh) 1168 highestSub := event.GlobalEvent.Subscribe(highestBlockCh) 1169 defer highestSub.Unsubscribe() 1170 1171 oldBlock := pool.bc.CurrentBlock() 1172 1173 for { 1174 select { 1175 case <-highestSub.Err(): 1176 return 1177 case <-pool.ctx.Done(): 1178 return 1179 case highestBlock, ok := <-highestBlockCh: 1180 if ok && highestBlock.Inserted { 1181 pool.requestReset(oldBlock, pool.bc.CurrentBlock()) 1182 oldBlock = pool.bc.CurrentBlock() 1183 } 1184 } 1185 } 1186 } 1187 1188 // Stop terminates the transaction pool. 1189 func (pool *TxsPool) Stop() error { 1190 pool.cancel() 1191 pool.wg.Wait() 1192 log.Info("Transaction pool stopped") 1193 return nil 1194 } 1195 1196 // txDifference 1197 func (pool *TxsPool) txDifference(a, b []*transaction.Transaction) []*transaction.Transaction { 1198 keep := make([]*transaction.Transaction, 0, len(a)) 1199 1200 remove := make(map[types.Hash]struct{}) 1201 for _, tx := range b { 1202 hash := tx.Hash() 1203 remove[hash] = struct{}{} 1204 } 1205 1206 for _, tx := range a { 1207 hash := tx.Hash() 1208 if _, ok := remove[hash]; !ok { 1209 keep = append(keep, tx) 1210 } 1211 } 1212 1213 return keep 1214 } 1215 1216 // Pending retrieves all currently processable transactions, grouped by origin 1217 // account and sorted by nonce. The returned transaction set is a copy and can be 1218 // freely modified by calling code. 1219 // 1220 // The enforceTips parameter can be used to do an extra filtering on the pending 1221 // transactions and only return those whose **effective** tip is large enough in 1222 // the next pending execution environment. 1223 func (pool *TxsPool) Pending(enforceTips bool) map[types.Address][]*transaction.Transaction { 1224 pool.mu.Lock() 1225 defer pool.mu.Unlock() 1226 1227 pending := make(map[types.Address][]*transaction.Transaction) 1228 for addr, list := range pool.pending { 1229 txs := list.Flatten() 1230 1231 // If the miner requests tip enforcement, cap the lists now 1232 if enforceTips && !pool.locals.contains(addr) { 1233 for i, tx := range txs { 1234 if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { 1235 txs = txs[:i] 1236 break 1237 } 1238 } 1239 } 1240 if len(txs) > 0 { 1241 pending[addr] = txs 1242 } 1243 } 1244 return pending 1245 } 1246 1247 // Has 1248 func (pool *TxsPool) Has(hash types.Hash) bool { 1249 return pool.all.Get(hash) != nil 1250 } 1251 1252 // GetTransaction 1253 func (pool *TxsPool) GetTransaction() (txs []*transaction.Transaction, err error) { 1254 // 1255 pending := pool.Pending(false) 1256 heads := make([]*transaction.Transaction, 0, len(pending)) 1257 for _, accTxs := range pending { 1258 //heads = append(heads, accTxs[0]) 1259 heads = append(heads, accTxs...) 1260 } 1261 return heads, nil 1262 } 1263 1264 // GetTx 1265 func (pool *TxsPool) GetTx(hash types.Hash) *transaction.Transaction { 1266 return pool.all.Get(hash) 1267 } 1268 1269 // Content 1270 func (pool *TxsPool) Content() (map[types.Address][]*transaction.Transaction, map[types.Address][]*transaction.Transaction) { 1271 pool.mu.Lock() 1272 defer pool.mu.Unlock() 1273 1274 pending := make(map[types.Address][]*transaction.Transaction) 1275 for addr, list := range pool.pending { 1276 pending[addr] = list.Flatten() 1277 } 1278 queued := make(map[types.Address][]*transaction.Transaction) 1279 for addr, list := range pool.queue { 1280 queued[addr] = list.Flatten() 1281 } 1282 return pending, queued 1283 } 1284 1285 func (pool *TxsPool) Nonce(addr types.Address) uint64 { 1286 pool.mu.RLock() 1287 defer pool.mu.RUnlock() 1288 1289 return pool.pendingNonces.get(addr) 1290 } 1291 1292 // StatsPrint 1293 func (pool *TxsPool) StatsPrint() { 1294 pool.mu.RLock() 1295 defer pool.mu.RUnlock() 1296 1297 pendingAddresses, pendingTxs, queuedAddresses, queuedTxs := pool.Stats() 1298 1299 log.Debugf("txs pool: pendingAddresses count: %d pendingTxs count: %d ", pendingAddresses, pendingTxs) 1300 log.Debugf("txs pool: queuedAddresses count: %d queuedTxs count: %d", queuedAddresses, queuedTxs) 1301 } 1302 1303 func (pool *TxsPool) Stats() (int, int, int, int) { 1304 pendingTxs := 0 1305 pendingAddresses := len(pool.pending) 1306 for _, list := range pool.pending { 1307 pendingTxs += list.Len() 1308 } 1309 queuedTxs := 0 1310 queuedAddresses := len(pool.queue) 1311 for _, list := range pool.queue { 1312 queuedTxs += list.Len() 1313 } 1314 return pendingAddresses, pendingTxs, queuedAddresses, queuedTxs 1315 } 1316 1317 func (pool *TxsPool) ResetState(blockHash types.Hash) error { 1318 //if pool.currentState != nil { 1319 // reader := pool.currentState.GetStateReader() 1320 // if reader != nil { 1321 // if hreader, ok := reader.(*state.HistoryStateReader); ok { 1322 // hreader.Rollback() 1323 // } 1324 // } 1325 //} 1326 // 1327 //tx, err := pool.bc.DB().BeginRo(pool.ctx) 1328 //if nil != err { 1329 // return err 1330 //} 1331 //blockNr := rawdb.ReadHeaderNumber(tx, blockHash) 1332 //if nil == blockNr { 1333 // return fmt.Errorf("invaild block hash") 1334 //} 1335 //stateReader := state.NewStateHistoryReader(tx, tx, *blockNr+1) 1336 //pool.currentState = state.New(stateReader) 1337 return nil 1338 }