github.com/core-coin/go-core/v2@v2.1.9/core/tx_pool.go (about)

     1  // Copyright 2014 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	"errors"
    21  	"math"
    22  	"math/big"
    23  	"sort"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/core-coin/go-core/v2/common"
    28  	"github.com/core-coin/go-core/v2/common/prque"
    29  	"github.com/core-coin/go-core/v2/core/state"
    30  	"github.com/core-coin/go-core/v2/core/types"
    31  	"github.com/core-coin/go-core/v2/event"
    32  	"github.com/core-coin/go-core/v2/log"
    33  	"github.com/core-coin/go-core/v2/metrics"
    34  	"github.com/core-coin/go-core/v2/params"
    35  )
    36  
    37  const (
    38  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    39  	chainHeadChanSize = 10
    40  
    41  	// txSlotSize is used to calculate how many data slots a single transaction
    42  	// takes up based on its size. The slots are used as DoS protection, ensuring
    43  	// that validating a new transaction remains a constant operation (in reality
    44  	// O(maxslots), where max slots are 4 currently).
    45  	txSlotSize = 32 * 1024
    46  
    47  	// txMaxSize is the maximum size a single transaction can have. This field has
    48  	// non-trivial consequences: larger transactions are significantly harder and
    49  	// more expensive to propagate; larger transactions also take more resources
    50  	// to validate whether they fit into the pool or not.
    51  	txMaxSize = 4 * txSlotSize // 128KB
    52  )
    53  
    54  var (
    55  	// ErrAlreadyKnown is returned if the transactions is already contained
    56  	// within the pool.
    57  	ErrAlreadyKnown = errors.New("already known")
    58  
    59  	// ErrInvalidRecipientOrSig is returned if the transaction contains an invalid signature.
    60  	ErrInvalidRecipientOrSig = errors.New("invalid signature or recipient")
    61  
    62  	// ErrUnderpriced is returned if a transaction's energy price is below the minimum
    63  	// configured for the transaction pool.
    64  	ErrUnderpriced = errors.New("transaction underpriced")
    65  
    66  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    67  	// with a different one without the required price bump.
    68  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    69  
    70  	// ErrEnergyLimit is returned if a transaction's requested energy limit exceeds the
    71  	// maximum allowance of the current block.
    72  	ErrEnergyLimit = errors.New("exceeds block energy limit")
    73  
    74  	// ErrNegativeValue is a sanity error to ensure no one is able to specify a
    75  	// transaction with a negative value.
    76  	ErrNegativeValue = errors.New("negative value")
    77  
    78  	// ErrOversizedData is returned if the input data of a transaction is greater
    79  	// than some meaningful limit a user might use. This is not a consensus error
    80  	// making the transaction invalid, rather a DOS protection.
    81  	ErrOversizedData = errors.New("oversized data")
    82  )
    83  
    84  var (
    85  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    86  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    87  )
    88  
    89  var (
    90  	// Metrics for the pending pool
    91  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
    92  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
    93  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
    94  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
    95  
    96  	// Metrics for the queued pool
    97  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
    98  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
    99  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
   100  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
   101  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
   102  
   103  	// General tx metrics
   104  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   105  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   106  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   107  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   108  
   109  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   110  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   111  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   112  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   113  )
   114  
   115  // TxStatus is the current status of a transaction as seen by the pool.
   116  type TxStatus uint
   117  
   118  const (
   119  	TxStatusUnknown TxStatus = iota
   120  	TxStatusQueued
   121  	TxStatusPending
   122  	TxStatusIncluded
   123  )
   124  
   125  // blockChain provides the state of blockchain and current energy limit to do
   126  // some pre checks in tx pool and event subscribers.
   127  type blockChain interface {
   128  	CurrentBlock() *types.Block
   129  	GetBlock(hash common.Hash, number uint64) *types.Block
   130  	StateAt(root common.Hash) (*state.StateDB, error)
   131  
   132  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   133  }
   134  
   135  // TxPoolConfig are the configuration parameters of the transaction pool.
   136  type TxPoolConfig struct {
   137  	Locals    []common.Address // Addresses that should be treated by default as local
   138  	NoLocals  bool             // Whether local transaction handling should be disabled
   139  	Journal   string           // Journal of local transactions to survive node restarts
   140  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   141  
   142  	PriceLimit uint64 // Minimum energy price to enforce for acceptance into the pool
   143  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   144  
   145  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   146  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   147  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   148  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   149  
   150  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   151  }
   152  
   153  // DefaultTxPoolConfig contains the default configurations for the transaction
   154  // pool.
   155  var DefaultTxPoolConfig = TxPoolConfig{
   156  	Journal:   "transactions.rlp",
   157  	Rejournal: time.Hour,
   158  
   159  	PriceLimit: 1,
   160  	PriceBump:  10,
   161  
   162  	AccountSlots: 16,
   163  	GlobalSlots:  4096,
   164  	AccountQueue: 64,
   165  	GlobalQueue:  1024,
   166  
   167  	Lifetime: 3 * time.Hour,
   168  }
   169  
   170  // sanitize checks the provided user configurations and changes anything that's
   171  // unreasonable or unworkable.
   172  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   173  	conf := *config
   174  	if conf.Rejournal < time.Second {
   175  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   176  		conf.Rejournal = time.Second
   177  	}
   178  	if conf.PriceLimit < 1 {
   179  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   180  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   181  	}
   182  	if conf.PriceBump < 1 {
   183  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   184  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   185  	}
   186  	if conf.AccountSlots < 1 {
   187  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   188  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   189  	}
   190  	if conf.GlobalSlots < 1 {
   191  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   192  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   193  	}
   194  	if conf.AccountQueue < 1 {
   195  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   196  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   197  	}
   198  	if conf.GlobalQueue < 1 {
   199  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   200  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   201  	}
   202  	if conf.Lifetime < 1 {
   203  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   204  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   205  	}
   206  	return conf
   207  }
   208  
   209  // TxPool contains all currently known transactions. Transactions
   210  // enter the pool when they are received from the network or submitted
   211  // locally. They exit the pool when they are included in the blockchain.
   212  //
   213  // The pool separates processable transactions (which can be applied to the
   214  // current state) and future transactions. Transactions move between those
   215  // two states over time as they are received and processed.
   216  type TxPool struct {
   217  	config      TxPoolConfig
   218  	chainconfig *params.ChainConfig
   219  	chain       blockChain
   220  	energyPrice *big.Int
   221  	txFeed      event.Feed
   222  	scope       event.SubscriptionScope
   223  	signer      types.Signer
   224  	mu          sync.RWMutex
   225  
   226  	currentState     *state.StateDB // Current state in the blockchain head
   227  	pendingNonces    *txNoncer      // Pending state tracking virtual nonces
   228  	currentMaxEnergy uint64         // Current energy limit for transaction caps
   229  
   230  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   231  	journal *txJournal  // Journal of local transaction to back up to disk
   232  
   233  	pending map[common.Address]*txList   // All currently processable transactions
   234  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   235  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   236  	all     *txLookup                    // All transactions to allow lookups
   237  	priced  *txPricedList                // All transactions sorted by price
   238  
   239  	chainHeadCh     chan ChainHeadEvent
   240  	chainHeadSub    event.Subscription
   241  	reqResetCh      chan *txpoolResetRequest
   242  	reqPromoteCh    chan *accountSet
   243  	queueTxEventCh  chan *types.Transaction
   244  	reorgDoneCh     chan chan struct{}
   245  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   246  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   247  }
   248  
   249  type txpoolResetRequest struct {
   250  	oldHead, newHead *types.Header
   251  }
   252  
   253  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   254  // transactions from the network.
   255  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   256  	// Sanitize the input to ensure no vulnerable energy prices are set
   257  	config = (&config).sanitize()
   258  
   259  	// Create the transaction pool with its initial settings
   260  	pool := &TxPool{
   261  		config:          config,
   262  		chainconfig:     chainconfig,
   263  		chain:           chain,
   264  		signer:          types.NewNucleusSigner(chainconfig.NetworkID),
   265  		pending:         make(map[common.Address]*txList),
   266  		queue:           make(map[common.Address]*txList),
   267  		beats:           make(map[common.Address]time.Time),
   268  		all:             newTxLookup(),
   269  		chainHeadCh:     make(chan ChainHeadEvent, chainHeadChanSize),
   270  		reqResetCh:      make(chan *txpoolResetRequest),
   271  		reqPromoteCh:    make(chan *accountSet),
   272  		queueTxEventCh:  make(chan *types.Transaction),
   273  		reorgDoneCh:     make(chan chan struct{}),
   274  		reorgShutdownCh: make(chan struct{}),
   275  		energyPrice:     new(big.Int).SetUint64(config.PriceLimit),
   276  	}
   277  	pool.locals = newAccountSet(pool.signer)
   278  	for _, addr := range config.Locals {
   279  		log.Info("Setting new local account", "address", addr)
   280  		pool.locals.add(addr)
   281  	}
   282  	pool.priced = newTxPricedList(pool.all)
   283  	pool.reset(nil, chain.CurrentBlock().Header())
   284  
   285  	// Start the reorg loop early so it can handle requests generated during journal loading.
   286  	pool.wg.Add(1)
   287  	go pool.scheduleReorgLoop()
   288  
   289  	// If local transactions and journaling is enabled, load from disk
   290  	if !config.NoLocals && config.Journal != "" {
   291  		pool.journal = newTxJournal(config.Journal)
   292  
   293  		if err := pool.journal.load(pool.AddLocals); err != nil {
   294  			log.Warn("Failed to load transaction journal", "err", err)
   295  		}
   296  		if err := pool.journal.rotate(pool.local()); err != nil {
   297  			log.Warn("Failed to rotate transaction journal", "err", err)
   298  		}
   299  	}
   300  
   301  	// Subscribe events from blockchain and start the main event loop.
   302  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   303  	pool.wg.Add(1)
   304  	go pool.loop()
   305  
   306  	return pool
   307  }
   308  
   309  // loop is the transaction pool's main event loop, waiting for and reacting to
   310  // outside blockchain events as well as for various reporting and transaction
   311  // eviction events.
   312  func (pool *TxPool) loop() {
   313  	defer pool.wg.Done()
   314  
   315  	var (
   316  		prevPending, prevQueued, prevStales int
   317  		// Start the stats reporting and transaction eviction tickers
   318  		report  = time.NewTicker(statsReportInterval)
   319  		evict   = time.NewTicker(evictionInterval)
   320  		journal = time.NewTicker(pool.config.Rejournal)
   321  		// Track the previous head headers for transaction reorgs
   322  		head = pool.chain.CurrentBlock()
   323  	)
   324  	defer report.Stop()
   325  	defer evict.Stop()
   326  	defer journal.Stop()
   327  
   328  	for {
   329  		select {
   330  		// Handle ChainHeadEvent
   331  		case ev := <-pool.chainHeadCh:
   332  			if ev.Block != nil {
   333  				pool.requestReset(head.Header(), ev.Block.Header())
   334  				head = ev.Block
   335  			}
   336  
   337  		// System shutdown.
   338  		case <-pool.chainHeadSub.Err():
   339  			close(pool.reorgShutdownCh)
   340  			return
   341  
   342  		// Handle stats reporting ticks
   343  		case <-report.C:
   344  			pool.mu.RLock()
   345  			pending, queued := pool.stats()
   346  			stales := pool.priced.stales
   347  			pool.mu.RUnlock()
   348  
   349  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   350  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   351  				prevPending, prevQueued, prevStales = pending, queued, stales
   352  			}
   353  
   354  		// Handle inactive account transaction eviction
   355  		case <-evict.C:
   356  			pool.mu.Lock()
   357  			for addr := range pool.queue {
   358  				// Skip local transactions from the eviction mechanism
   359  				if pool.locals.contains(addr) {
   360  					continue
   361  				}
   362  				// Any non-locals old enough should be removed
   363  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   364  					list := pool.queue[addr].Flatten()
   365  					for _, tx := range list {
   366  						pool.removeTx(tx.Hash(), true)
   367  					}
   368  					queuedEvictionMeter.Mark(int64(len(list)))
   369  				}
   370  			}
   371  			pool.mu.Unlock()
   372  
   373  		// Handle local transaction journal rotation
   374  		case <-journal.C:
   375  			if pool.journal != nil {
   376  				pool.mu.Lock()
   377  				if err := pool.journal.rotate(pool.local()); err != nil {
   378  					log.Warn("Failed to rotate local tx journal", "err", err)
   379  				}
   380  				pool.mu.Unlock()
   381  			}
   382  		}
   383  	}
   384  }
   385  
   386  // Stop terminates the transaction pool.
   387  func (pool *TxPool) Stop() {
   388  	// Unsubscribe all subscriptions registered from txpool
   389  	pool.scope.Close()
   390  
   391  	// Unsubscribe subscriptions registered from blockchain
   392  	pool.chainHeadSub.Unsubscribe()
   393  	pool.wg.Wait()
   394  
   395  	if pool.journal != nil {
   396  		pool.journal.close()
   397  	}
   398  	log.Info("Transaction pool stopped")
   399  }
   400  
   401  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   402  // starts sending event to the given channel.
   403  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   404  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   405  }
   406  
   407  // EnergyPrice returns the current energy price enforced by the transaction pool.
   408  func (pool *TxPool) EnergyPrice() *big.Int {
   409  	pool.mu.RLock()
   410  	defer pool.mu.RUnlock()
   411  
   412  	return new(big.Int).Set(pool.energyPrice)
   413  }
   414  
   415  // SetEnergyPrice updates the minimum price required by the transaction pool for a
   416  // new transaction, and drops all transactions below this threshold.
   417  func (pool *TxPool) SetEnergyPrice(price *big.Int) {
   418  	pool.mu.Lock()
   419  	defer pool.mu.Unlock()
   420  
   421  	pool.energyPrice = price
   422  	for _, tx := range pool.priced.Cap(price, pool.locals) {
   423  		pool.removeTx(tx.Hash(), false)
   424  	}
   425  	log.Info("Transaction pool price threshold updated", "price", price)
   426  }
   427  
   428  // Nonce returns the next nonce of an account, with all transactions executable
   429  // by the pool already applied on top.
   430  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   431  	pool.mu.RLock()
   432  	defer pool.mu.RUnlock()
   433  
   434  	return pool.pendingNonces.get(addr)
   435  }
   436  
   437  // Stats retrieves the current pool stats, namely the number of pending and the
   438  // number of queued (non-executable) transactions.
   439  func (pool *TxPool) Stats() (int, int) {
   440  	pool.mu.RLock()
   441  	defer pool.mu.RUnlock()
   442  
   443  	return pool.stats()
   444  }
   445  
   446  // stats retrieves the current pool stats, namely the number of pending and the
   447  // number of queued (non-executable) transactions.
   448  func (pool *TxPool) stats() (int, int) {
   449  	pending := 0
   450  	for _, list := range pool.pending {
   451  		pending += list.Len()
   452  	}
   453  	queued := 0
   454  	for _, list := range pool.queue {
   455  		queued += list.Len()
   456  	}
   457  	return pending, queued
   458  }
   459  
   460  // Content retrieves the data content of the transaction pool, returning all the
   461  // pending as well as queued transactions, grouped by account and sorted by nonce.
   462  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   463  	pool.mu.Lock()
   464  	defer pool.mu.Unlock()
   465  
   466  	pending := make(map[common.Address]types.Transactions)
   467  	for addr, list := range pool.pending {
   468  		pending[addr] = list.Flatten()
   469  	}
   470  	queued := make(map[common.Address]types.Transactions)
   471  	for addr, list := range pool.queue {
   472  		queued[addr] = list.Flatten()
   473  	}
   474  	return pending, queued
   475  }
   476  
   477  // Pending retrieves all currently processable transactions, grouped by origin
   478  // account and sorted by nonce. The returned transaction set is a copy and can be
   479  // freely modified by calling code.
   480  func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
   481  	pool.mu.Lock()
   482  	defer pool.mu.Unlock()
   483  
   484  	pending := make(map[common.Address]types.Transactions)
   485  	for addr, list := range pool.pending {
   486  		pending[addr] = list.Flatten()
   487  	}
   488  	return pending, nil
   489  }
   490  
   491  // Locals retrieves the accounts currently considered local by the pool.
   492  func (pool *TxPool) Locals() []common.Address {
   493  	pool.mu.Lock()
   494  	defer pool.mu.Unlock()
   495  
   496  	return pool.locals.flatten()
   497  }
   498  
   499  // local retrieves all currently known local transactions, grouped by origin
   500  // account and sorted by nonce. The returned transaction set is a copy and can be
   501  // freely modified by calling code.
   502  func (pool *TxPool) local() map[common.Address]types.Transactions {
   503  	txs := make(map[common.Address]types.Transactions)
   504  	for addr := range pool.locals.accounts {
   505  		if pending := pool.pending[addr]; pending != nil {
   506  			txs[addr] = append(txs[addr], pending.Flatten()...)
   507  		}
   508  		if queued := pool.queue[addr]; queued != nil {
   509  			txs[addr] = append(txs[addr], queued.Flatten()...)
   510  		}
   511  	}
   512  	return txs
   513  }
   514  
   515  // validateTx checks whether a transaction is valid according to the consensus
   516  // rules and adheres to some heuristic limits of the local node (price and size).
   517  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   518  	// Reject transactions over defined size to prevent DOS attacks
   519  	if uint64(tx.Size()) > txMaxSize {
   520  		return ErrOversizedData
   521  	}
   522  	// Transactions can't be negative. This may never happen using RLP decoded
   523  	// transactions but may occur if you create a transaction using the RPC.
   524  	if tx.Value().Sign() < 0 {
   525  		return ErrNegativeValue
   526  	}
   527  	// Ensure the transaction doesn't exceed the current block limit energy.
   528  	if pool.currentMaxEnergy < tx.Energy() {
   529  		return ErrEnergyLimit
   530  	}
   531  	// Make sure the transaction is signed properly
   532  	from, err := types.Sender(pool.signer, tx)
   533  	if err != nil {
   534  		return ErrInvalidRecipientOrSig
   535  	}
   536  	// Drop non-local transactions under our own minimal accepted energy price
   537  	local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network
   538  	if !local && tx.EnergyPriceIntCmp(pool.energyPrice) < 0 {
   539  		return ErrUnderpriced
   540  	}
   541  	// Ensure the transaction adheres to nonce ordering
   542  	if pool.currentState.GetNonce(from) > tx.Nonce() {
   543  		return ErrNonceTooLow
   544  	}
   545  	// Transactor should have enough funds to cover the costs
   546  	// cost == V + GP * GL
   547  	if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
   548  		return ErrInsufficientFunds
   549  	}
   550  	// Ensure the transaction has more energy than the basic tx fee.
   551  	intrEnergy, err := IntrinsicEnergy(tx.Data(), tx.To() == nil)
   552  	if err != nil {
   553  		return err
   554  	}
   555  	if tx.Energy() < intrEnergy {
   556  		return ErrIntrinsicEnergy
   557  	}
   558  	return nil
   559  }
   560  
   561  // add validates a transaction and inserts it into the non-executable queue for later
   562  // pending promotion and execution. If the transaction is a replacement for an already
   563  // pending or queued one, it overwrites the previous transaction if its price is higher.
   564  //
   565  // If a newly added transaction is marked as local, its sending account will be
   566  // whitelisted, preventing any associated transaction from being dropped out of the pool
   567  // due to pricing constraints.
   568  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   569  	// If the transaction is already known, discard it
   570  	hash := tx.Hash()
   571  	if pool.all.Get(hash) != nil {
   572  		log.Trace("Discarding already known transaction", "hash", hash)
   573  		knownTxMeter.Mark(1)
   574  		return false, ErrAlreadyKnown
   575  	}
   576  	// If the transaction fails basic validation, discard it
   577  	if err := pool.validateTx(tx, local); err != nil {
   578  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   579  		invalidTxMeter.Mark(1)
   580  		return false, err
   581  	}
   582  	// If the transaction pool is full, discard underpriced transactions
   583  	if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
   584  		// If the new transaction is underpriced, don't accept it
   585  		if !local && pool.priced.Underpriced(tx, pool.locals) {
   586  			log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.EnergyPrice())
   587  			underpricedTxMeter.Mark(1)
   588  			return false, ErrUnderpriced
   589  		}
   590  		// New transaction is better than our worse ones, make room for it
   591  		drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals)
   592  		for _, tx := range drop {
   593  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.EnergyPrice())
   594  			underpricedTxMeter.Mark(1)
   595  			pool.removeTx(tx.Hash(), false)
   596  		}
   597  	}
   598  	// Try to replace an existing transaction in the pending pool
   599  	from, err := types.Sender(pool.signer, tx) // already validated
   600  	if err != nil {
   601  		return false, err
   602  	}
   603  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   604  		// Nonce already pending, check if required price bump is met
   605  		inserted, old := list.Add(tx, pool.config.PriceBump)
   606  		if !inserted {
   607  			pendingDiscardMeter.Mark(1)
   608  			return false, ErrReplaceUnderpriced
   609  		}
   610  		// New transaction is better, replace old one
   611  		if old != nil {
   612  			pool.all.Remove(old.Hash())
   613  			pool.priced.Removed(1)
   614  			pendingReplaceMeter.Mark(1)
   615  		}
   616  		pool.all.Add(tx)
   617  		pool.priced.Put(tx)
   618  		pool.journalTx(from, tx)
   619  		pool.queueTxEvent(tx)
   620  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   621  
   622  		// Successful promotion, bump the heartbeat
   623  		pool.beats[from] = time.Now()
   624  		return old != nil, nil
   625  	}
   626  	// New transaction isn't replacing a pending one, push into queue
   627  	replaced, err = pool.enqueueTx(hash, tx)
   628  	if err != nil {
   629  		return false, err
   630  	}
   631  	// Mark local addresses and journal local transactions
   632  	if local {
   633  		if !pool.locals.contains(from) {
   634  			log.Info("Setting new local account", "address", from)
   635  			pool.locals.add(from)
   636  		}
   637  	}
   638  	if local || pool.locals.contains(from) {
   639  		localGauge.Inc(1)
   640  	}
   641  	pool.journalTx(from, tx)
   642  
   643  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   644  	return replaced, nil
   645  }
   646  
   647  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   648  //
   649  // Note, this method assumes the pool lock is held!
   650  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) {
   651  	// Try to insert the transaction into the future queue
   652  	from, err := types.Sender(pool.signer, tx) // already validated
   653  	if err != nil {
   654  		return false, err
   655  	}
   656  	if pool.queue[from] == nil {
   657  		pool.queue[from] = newTxList(false)
   658  	}
   659  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   660  	if !inserted {
   661  		// An older transaction was better, discard this
   662  		queuedDiscardMeter.Mark(1)
   663  		return false, ErrReplaceUnderpriced
   664  	}
   665  	// Discard any previous transaction and mark this
   666  	if old != nil {
   667  		pool.all.Remove(old.Hash())
   668  		pool.priced.Removed(1)
   669  		queuedReplaceMeter.Mark(1)
   670  	} else {
   671  		// Nothing was replaced, bump the queued counter
   672  		queuedGauge.Inc(1)
   673  	}
   674  	if pool.all.Get(hash) == nil {
   675  		pool.all.Add(tx)
   676  		pool.priced.Put(tx)
   677  	}
   678  	// If we never record the heartbeat, do it right now.
   679  	if _, exist := pool.beats[from]; !exist {
   680  		pool.beats[from] = time.Now()
   681  	}
   682  	return old != nil, nil
   683  }
   684  
   685  // journalTx adds the specified transaction to the local disk journal if it is
   686  // deemed to have been sent from a local account.
   687  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   688  	// Only journal if it's enabled and the transaction is local
   689  	if pool.journal == nil || !pool.locals.contains(from) {
   690  		return
   691  	}
   692  	if err := pool.journal.insert(tx); err != nil {
   693  		log.Warn("Failed to journal local transaction", "err", err)
   694  	}
   695  }
   696  
   697  // promoteTx adds a transaction to the pending (processable) list of transactions
   698  // and returns whether it was inserted or an older was better.
   699  //
   700  // Note, this method assumes the pool lock is held!
   701  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   702  	// Try to insert the transaction into the pending queue
   703  	if pool.pending[addr] == nil {
   704  		pool.pending[addr] = newTxList(true)
   705  	}
   706  	list := pool.pending[addr]
   707  
   708  	inserted, old := list.Add(tx, pool.config.PriceBump)
   709  	if !inserted {
   710  		// An older transaction was better, discard this
   711  		pool.all.Remove(hash)
   712  		pool.priced.Removed(1)
   713  		pendingDiscardMeter.Mark(1)
   714  		return false
   715  	}
   716  	// Otherwise discard any previous transaction and mark this
   717  	if old != nil {
   718  		pool.all.Remove(old.Hash())
   719  		pool.priced.Removed(1)
   720  		pendingReplaceMeter.Mark(1)
   721  	} else {
   722  		// Nothing was replaced, bump the pending counter
   723  		pendingGauge.Inc(1)
   724  	}
   725  	// Failsafe to work around direct pending inserts (tests)
   726  	if pool.all.Get(hash) == nil {
   727  		pool.all.Add(tx)
   728  		pool.priced.Put(tx)
   729  	}
   730  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   731  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   732  
   733  	// Successful promotion, bump the heartbeat
   734  	pool.beats[addr] = time.Now()
   735  	return true
   736  }
   737  
   738  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
   739  // senders as a local ones, ensuring they go around the local pricing constraints.
   740  //
   741  // This method is used to add transactions from the RPC API and performs synchronous pool
   742  // reorganization and event propagation.
   743  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
   744  	return pool.addTxs(txs, !pool.config.NoLocals, true)
   745  }
   746  
   747  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
   748  // a convenience wrapper aroundd AddLocals.
   749  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
   750  	errs := pool.AddLocals([]*types.Transaction{tx})
   751  	return errs[0]
   752  }
   753  
   754  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
   755  // senders are not among the locally tracked ones, full pricing constraints will apply.
   756  //
   757  // This method is used to add transactions from the p2p network and does not wait for pool
   758  // reorganization and internal event propagation.
   759  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
   760  	return pool.addTxs(txs, false, false)
   761  }
   762  
   763  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
   764  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
   765  	return pool.addTxs(txs, false, true)
   766  }
   767  
   768  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   769  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
   770  	errs := pool.AddRemotesSync([]*types.Transaction{tx})
   771  	return errs[0]
   772  }
   773  
   774  // addTxs attempts to queue a batch of transactions if they are valid.
   775  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
   776  	// Filter out known ones without obtaining the pool lock or recovering signatures
   777  	var (
   778  		errs = make([]error, len(txs))
   779  		news = make([]*types.Transaction, 0, len(txs))
   780  	)
   781  	for i, tx := range txs {
   782  		// If the transaction is known, pre-set the error slot
   783  		if pool.all.Get(tx.Hash()) != nil {
   784  			errs[i] = ErrAlreadyKnown
   785  			knownTxMeter.Mark(1)
   786  			continue
   787  		}
   788  		// Exclude transactions with invalid signatures as soon as
   789  		// possible and cache senders in transactions before
   790  		// obtaining lock
   791  		_, err := types.Sender(pool.signer, tx)
   792  		if err != nil {
   793  			errs[i] = ErrInvalidRecipientOrSig
   794  			invalidTxMeter.Mark(1)
   795  			continue
   796  		}
   797  		// Accumulate all unknown transactions for deeper processing
   798  		news = append(news, tx)
   799  	}
   800  	if len(news) == 0 {
   801  		return errs
   802  	}
   803  
   804  	// Process all the new transaction and merge any errors into the original slice
   805  	pool.mu.Lock()
   806  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
   807  	pool.mu.Unlock()
   808  
   809  	var nilSlot = 0
   810  	for _, err := range newErrs {
   811  		for errs[nilSlot] != nil {
   812  			nilSlot++
   813  		}
   814  		errs[nilSlot] = err
   815  		nilSlot++
   816  	}
   817  	// Reorg the pool internals if needed and return
   818  	done := pool.requestPromoteExecutables(dirtyAddrs)
   819  	if sync {
   820  		<-done
   821  	}
   822  	return errs
   823  }
   824  
   825  // addTxsLocked attempts to queue a batch of transactions if they are valid.
   826  // The transaction pool lock must be held.
   827  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
   828  	dirty := newAccountSet(pool.signer)
   829  	errs := make([]error, len(txs))
   830  	for i, tx := range txs {
   831  		replaced, err := pool.add(tx, local)
   832  		errs[i] = err
   833  		if err == nil && !replaced {
   834  			dirty.addTx(tx)
   835  		}
   836  	}
   837  	validTxMeter.Mark(int64(len(dirty.accounts)))
   838  	return errs, dirty
   839  }
   840  
   841  // Status returns the status (unknown/pending/queued) of a batch of transactions
   842  // identified by their hashes.
   843  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
   844  	status := make([]TxStatus, len(hashes))
   845  	for i, hash := range hashes {
   846  		tx := pool.Get(hash)
   847  		if tx == nil {
   848  			continue
   849  		}
   850  		from, _ := types.Sender(pool.signer, tx) // already validated
   851  		pool.mu.RLock()
   852  		if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   853  			status[i] = TxStatusPending
   854  		} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   855  			status[i] = TxStatusQueued
   856  		}
   857  		// implicit else: the tx may have been included into a block between
   858  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
   859  		pool.mu.RUnlock()
   860  	}
   861  	return status
   862  }
   863  
   864  // Get returns a transaction if it is contained in the pool and nil otherwise.
   865  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
   866  	return pool.all.Get(hash)
   867  }
   868  
   869  // Has returns an indicator whether txpool has a transaction cached with the
   870  // given hash.
   871  func (pool *TxPool) Has(hash common.Hash) bool {
   872  	return pool.all.Get(hash) != nil
   873  }
   874  
   875  // removeTx removes a single transaction from the queue, moving all subsequent
   876  // transactions back to the future queue.
   877  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
   878  	// Fetch the transaction we wish to delete
   879  	tx := pool.all.Get(hash)
   880  	if tx == nil {
   881  		return
   882  	}
   883  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
   884  
   885  	// Remove it from the list of known transactions
   886  	pool.all.Remove(hash)
   887  	if outofbound {
   888  		pool.priced.Removed(1)
   889  	}
   890  	if pool.locals.contains(addr) {
   891  		localGauge.Dec(1)
   892  	}
   893  	// Remove the transaction from the pending lists and reset the account nonce
   894  	if pending := pool.pending[addr]; pending != nil {
   895  		if removed, invalids := pending.Remove(tx); removed {
   896  			// If no more pending transactions are left, remove the list
   897  			if pending.Empty() {
   898  				delete(pool.pending, addr)
   899  			}
   900  			// Postpone any invalidated transactions
   901  			for _, tx := range invalids {
   902  				pool.enqueueTx(tx.Hash(), tx)
   903  			}
   904  			// Update the account nonce if needed
   905  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
   906  			// Reduce the pending counter
   907  			pendingGauge.Dec(int64(1 + len(invalids)))
   908  			return
   909  		}
   910  	}
   911  	// Transaction is in the future queue
   912  	if future := pool.queue[addr]; future != nil {
   913  		if removed, _ := future.Remove(tx); removed {
   914  			// Reduce the queued counter
   915  			queuedGauge.Dec(1)
   916  		}
   917  		if future.Empty() {
   918  			delete(pool.queue, addr)
   919  			delete(pool.beats, addr)
   920  		}
   921  	}
   922  }
   923  
   924  // requestPromoteExecutables requests a pool reset to the new head block.
   925  // The returned channel is closed when the reset has occurred.
   926  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
   927  	select {
   928  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
   929  		return <-pool.reorgDoneCh
   930  	case <-pool.reorgShutdownCh:
   931  		return pool.reorgShutdownCh
   932  	}
   933  }
   934  
   935  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
   936  // The returned channel is closed when the promotion checks have occurred.
   937  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
   938  	select {
   939  	case pool.reqPromoteCh <- set:
   940  		return <-pool.reorgDoneCh
   941  	case <-pool.reorgShutdownCh:
   942  		return pool.reorgShutdownCh
   943  	}
   944  }
   945  
   946  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
   947  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
   948  	select {
   949  	case pool.queueTxEventCh <- tx:
   950  	case <-pool.reorgShutdownCh:
   951  	}
   952  }
   953  
   954  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
   955  // call those methods directly, but request them being run using requestReset and
   956  // requestPromoteExecutables instead.
   957  func (pool *TxPool) scheduleReorgLoop() {
   958  	defer pool.wg.Done()
   959  
   960  	var (
   961  		curDone       chan struct{} // non-nil while runReorg is active
   962  		nextDone      = make(chan struct{})
   963  		launchNextRun bool
   964  		reset         *txpoolResetRequest
   965  		dirtyAccounts *accountSet
   966  		queuedEvents  = make(map[common.Address]*txSortedMap)
   967  	)
   968  	for {
   969  		// Launch next background reorg if needed
   970  		if curDone == nil && launchNextRun {
   971  			// Run the background reorg and announcements
   972  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
   973  
   974  			// Prepare everything for the next round of reorg
   975  			curDone, nextDone = nextDone, make(chan struct{})
   976  			launchNextRun = false
   977  
   978  			reset, dirtyAccounts = nil, nil
   979  			queuedEvents = make(map[common.Address]*txSortedMap)
   980  		}
   981  
   982  		select {
   983  		case req := <-pool.reqResetCh:
   984  			// Reset request: update head if request is already pending.
   985  			if reset == nil {
   986  				reset = req
   987  			} else {
   988  				reset.newHead = req.newHead
   989  			}
   990  			launchNextRun = true
   991  			pool.reorgDoneCh <- nextDone
   992  
   993  		case req := <-pool.reqPromoteCh:
   994  			// Promote request: update address set if request is already pending.
   995  			if dirtyAccounts == nil {
   996  				dirtyAccounts = req
   997  			} else {
   998  				dirtyAccounts.merge(req)
   999  			}
  1000  			launchNextRun = true
  1001  			pool.reorgDoneCh <- nextDone
  1002  
  1003  		case tx := <-pool.queueTxEventCh:
  1004  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1005  			// request one later if they want the events sent.
  1006  			addr, _ := types.Sender(pool.signer, tx)
  1007  			if _, ok := queuedEvents[addr]; !ok {
  1008  				queuedEvents[addr] = newTxSortedMap()
  1009  			}
  1010  			queuedEvents[addr].Put(tx)
  1011  
  1012  		case <-curDone:
  1013  			curDone = nil
  1014  
  1015  		case <-pool.reorgShutdownCh:
  1016  			// Wait for current run to finish.
  1017  			if curDone != nil {
  1018  				<-curDone
  1019  			}
  1020  			close(nextDone)
  1021  			return
  1022  		}
  1023  	}
  1024  }
  1025  
  1026  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1027  func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1028  	defer close(done)
  1029  
  1030  	var promoteAddrs []common.Address
  1031  	if dirtyAccounts != nil && reset == nil {
  1032  		// Only dirty accounts need to be promoted, unless we're resetting.
  1033  		// For resets, all addresses in the tx queue will be promoted and
  1034  		// the flatten operation can be avoided.
  1035  		promoteAddrs = dirtyAccounts.flatten()
  1036  	}
  1037  	pool.mu.Lock()
  1038  	if reset != nil {
  1039  		// Reset from the old head to the new, rescheduling any reorged transactions
  1040  		pool.reset(reset.oldHead, reset.newHead)
  1041  
  1042  		// Nonces were reset, discard any events that became stale
  1043  		for addr := range events {
  1044  			events[addr].Forward(pool.pendingNonces.get(addr))
  1045  			if events[addr].Len() == 0 {
  1046  				delete(events, addr)
  1047  			}
  1048  		}
  1049  		// Reset needs promote for all addresses
  1050  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1051  		for addr := range pool.queue {
  1052  			promoteAddrs = append(promoteAddrs, addr)
  1053  		}
  1054  	}
  1055  	// Check for pending transactions for every account that sent new ones
  1056  	promoted := pool.promoteExecutables(promoteAddrs)
  1057  
  1058  	// If a new block appeared, validate the pool of pending transactions. This will
  1059  	// remove any transaction that has been included in the block or was invalidated
  1060  	// because of another transaction (e.g. higher energy price).
  1061  	if reset != nil {
  1062  		pool.demoteUnexecutables()
  1063  	}
  1064  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1065  	pool.truncatePending()
  1066  	pool.truncateQueue()
  1067  
  1068  	// Update all accounts to the latest known pending nonce
  1069  	for addr, list := range pool.pending {
  1070  		highestPending := list.LastElement()
  1071  		pool.pendingNonces.set(addr, highestPending.Nonce()+1)
  1072  	}
  1073  	pool.mu.Unlock()
  1074  
  1075  	// Notify subsystems for newly added transactions
  1076  	for _, tx := range promoted {
  1077  		addr, _ := types.Sender(pool.signer, tx)
  1078  		if _, ok := events[addr]; !ok {
  1079  			events[addr] = newTxSortedMap()
  1080  		}
  1081  		events[addr].Put(tx)
  1082  	}
  1083  	if len(events) > 0 {
  1084  		var txs []*types.Transaction
  1085  		for _, set := range events {
  1086  			txs = append(txs, set.Flatten()...)
  1087  		}
  1088  		pool.txFeed.Send(NewTxsEvent{txs})
  1089  	}
  1090  }
  1091  
  1092  // reset retrieves the current state of the blockchain and ensures the content
  1093  // of the transaction pool is valid with regard to the chain state.
  1094  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1095  	// If we're reorging an old state, reinject all dropped transactions
  1096  	var reinject types.Transactions
  1097  
  1098  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1099  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1100  		oldNum := oldHead.Number.Uint64()
  1101  		newNum := newHead.Number.Uint64()
  1102  
  1103  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1104  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1105  		} else {
  1106  			// Reorg seems shallow enough to pull in all transactions into memory
  1107  			var discarded, included types.Transactions
  1108  			var (
  1109  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1110  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1111  			)
  1112  			if rem == nil {
  1113  				// This can happen if a setHead is performed, where we simply discard the old
  1114  				// head from the chain.
  1115  				// If that is the case, we don't have the lost transactions any more, and
  1116  				// there's nothing to add
  1117  				if newNum < oldNum {
  1118  					// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1119  					log.Debug("Skipping transaction reset caused by setHead",
  1120  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1121  				} else {
  1122  					// If we reorged to a same or higher number, then it's not a case of setHead
  1123  					log.Warn("Transaction pool reset with missing oldhead",
  1124  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1125  				}
  1126  				return
  1127  			}
  1128  			for rem.NumberU64() > add.NumberU64() {
  1129  				discarded = append(discarded, rem.Transactions()...)
  1130  				if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1131  					log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1132  					return
  1133  				}
  1134  			}
  1135  			for add.NumberU64() > rem.NumberU64() {
  1136  				included = append(included, add.Transactions()...)
  1137  				if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1138  					log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1139  					return
  1140  				}
  1141  			}
  1142  			for rem.Hash() != add.Hash() {
  1143  				discarded = append(discarded, rem.Transactions()...)
  1144  				if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1145  					log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1146  					return
  1147  				}
  1148  				included = append(included, add.Transactions()...)
  1149  				if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1150  					log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1151  					return
  1152  				}
  1153  			}
  1154  			reinject = types.TxDifference(discarded, included)
  1155  		}
  1156  	}
  1157  	// Initialize the internal state to the current head
  1158  	if newHead == nil {
  1159  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1160  	}
  1161  	statedb, err := pool.chain.StateAt(newHead.Root)
  1162  	if err != nil {
  1163  		log.Error("Failed to reset txpool state", "err", err)
  1164  		return
  1165  	}
  1166  	pool.currentState = statedb
  1167  	pool.pendingNonces = newTxNoncer(statedb)
  1168  	pool.currentMaxEnergy = newHead.EnergyLimit
  1169  
  1170  	// Inject any transactions discarded due to reorgs
  1171  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1172  	senderCacher.recover(pool.signer, reinject)
  1173  	pool.addTxsLocked(reinject, false)
  1174  }
  1175  
  1176  // promoteExecutables moves transactions that have become processable from the
  1177  // future queue to the set of pending transactions. During this process, all
  1178  // invalidated transactions (low nonce, low balance) are deleted.
  1179  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1180  	// Track the promoted transactions to broadcast them at once
  1181  	var promoted []*types.Transaction
  1182  
  1183  	// Iterate over all accounts and promote any executable transactions
  1184  	for _, addr := range accounts {
  1185  		list := pool.queue[addr]
  1186  		if list == nil {
  1187  			continue // Just in case someone calls with a non existing account
  1188  		}
  1189  		// Drop all transactions that are deemed too old (low nonce)
  1190  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1191  		for _, tx := range forwards {
  1192  			hash := tx.Hash()
  1193  			pool.all.Remove(hash)
  1194  		}
  1195  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1196  		// Drop all transactions that are too costly (low balance or out of energy)
  1197  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxEnergy)
  1198  		for _, tx := range drops {
  1199  			hash := tx.Hash()
  1200  			pool.all.Remove(hash)
  1201  		}
  1202  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1203  		queuedNofundsMeter.Mark(int64(len(drops)))
  1204  
  1205  		// Gather all executable transactions and promote them
  1206  		readies := list.Ready(pool.pendingNonces.get(addr))
  1207  		for _, tx := range readies {
  1208  			hash := tx.Hash()
  1209  			if pool.promoteTx(addr, hash, tx) {
  1210  				promoted = append(promoted, tx)
  1211  			}
  1212  		}
  1213  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1214  		queuedGauge.Dec(int64(len(readies)))
  1215  
  1216  		// Drop all transactions over the allowed limit
  1217  		var caps types.Transactions
  1218  		if !pool.locals.contains(addr) {
  1219  			caps = list.Cap(int(pool.config.AccountQueue))
  1220  			for _, tx := range caps {
  1221  				hash := tx.Hash()
  1222  				pool.all.Remove(hash)
  1223  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1224  			}
  1225  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1226  		}
  1227  		// Mark all the items dropped as removed
  1228  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1229  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1230  		if pool.locals.contains(addr) {
  1231  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1232  		}
  1233  		// Delete the entire queue entry if it became empty.
  1234  		if list.Empty() {
  1235  			delete(pool.queue, addr)
  1236  			delete(pool.beats, addr)
  1237  		}
  1238  	}
  1239  	return promoted
  1240  }
  1241  
  1242  // truncatePending removes transactions from the pending queue if the pool is above the
  1243  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1244  // equal number for all for accounts with many pending transactions.
  1245  func (pool *TxPool) truncatePending() {
  1246  	pending := uint64(0)
  1247  	for _, list := range pool.pending {
  1248  		pending += uint64(list.Len())
  1249  	}
  1250  	if pending <= pool.config.GlobalSlots {
  1251  		return
  1252  	}
  1253  
  1254  	pendingBeforeCap := pending
  1255  	// Assemble a spam order to penalize large transactors first
  1256  	spammers := prque.New(nil)
  1257  	for addr, list := range pool.pending {
  1258  		// Only evict transactions from high rollers
  1259  		if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
  1260  			spammers.Push(addr, int64(list.Len()))
  1261  		}
  1262  	}
  1263  	// Gradually drop transactions from offenders
  1264  	offenders := []common.Address{}
  1265  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1266  		// Retrieve the next offender if not local address
  1267  		offender, _ := spammers.Pop()
  1268  		offenders = append(offenders, offender.(common.Address))
  1269  
  1270  		// Equalize balances until all the same or below threshold
  1271  		if len(offenders) > 1 {
  1272  			// Calculate the equalization threshold for all current offenders
  1273  			threshold := pool.pending[offender.(common.Address)].Len()
  1274  
  1275  			// Iteratively reduce all offenders until below limit or threshold reached
  1276  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1277  				for i := 0; i < len(offenders)-1; i++ {
  1278  					list := pool.pending[offenders[i]]
  1279  
  1280  					caps := list.Cap(list.Len() - 1)
  1281  					for _, tx := range caps {
  1282  						// Drop the transaction from the global pools too
  1283  						hash := tx.Hash()
  1284  						pool.all.Remove(hash)
  1285  
  1286  						// Update the account nonce to the dropped transaction
  1287  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1288  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1289  					}
  1290  					pool.priced.Removed(len(caps))
  1291  					pendingGauge.Dec(int64(len(caps)))
  1292  					if pool.locals.contains(offenders[i]) {
  1293  						localGauge.Dec(int64(len(caps)))
  1294  					}
  1295  					pending--
  1296  				}
  1297  			}
  1298  		}
  1299  	}
  1300  
  1301  	// If still above threshold, reduce to limit or min allowance
  1302  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1303  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1304  			for _, addr := range offenders {
  1305  				list := pool.pending[addr]
  1306  
  1307  				caps := list.Cap(list.Len() - 1)
  1308  				for _, tx := range caps {
  1309  					// Drop the transaction from the global pools too
  1310  					hash := tx.Hash()
  1311  					pool.all.Remove(hash)
  1312  
  1313  					// Update the account nonce to the dropped transaction
  1314  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1315  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1316  				}
  1317  				pool.priced.Removed(len(caps))
  1318  				pendingGauge.Dec(int64(len(caps)))
  1319  				if pool.locals.contains(addr) {
  1320  					localGauge.Dec(int64(len(caps)))
  1321  				}
  1322  				pending--
  1323  			}
  1324  		}
  1325  	}
  1326  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1327  }
  1328  
  1329  // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit.
  1330  func (pool *TxPool) truncateQueue() {
  1331  	queued := uint64(0)
  1332  	for _, list := range pool.queue {
  1333  		queued += uint64(list.Len())
  1334  	}
  1335  	if queued <= pool.config.GlobalQueue {
  1336  		return
  1337  	}
  1338  
  1339  	// Sort all accounts with queued transactions by heartbeat
  1340  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1341  	for addr := range pool.queue {
  1342  		if !pool.locals.contains(addr) { // don't drop locals
  1343  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1344  		}
  1345  	}
  1346  	sort.Sort(addresses)
  1347  
  1348  	// Drop transactions until the total is below the limit or only locals remain
  1349  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1350  		addr := addresses[len(addresses)-1]
  1351  		list := pool.queue[addr.address]
  1352  
  1353  		addresses = addresses[:len(addresses)-1]
  1354  
  1355  		// Drop all transactions if they are less than the overflow
  1356  		if size := uint64(list.Len()); size <= drop {
  1357  			for _, tx := range list.Flatten() {
  1358  				pool.removeTx(tx.Hash(), true)
  1359  			}
  1360  			drop -= size
  1361  			queuedRateLimitMeter.Mark(int64(size))
  1362  			continue
  1363  		}
  1364  		// Otherwise drop only last few transactions
  1365  		txs := list.Flatten()
  1366  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1367  			pool.removeTx(txs[i].Hash(), true)
  1368  			drop--
  1369  			queuedRateLimitMeter.Mark(1)
  1370  		}
  1371  	}
  1372  }
  1373  
  1374  // demoteUnexecutables removes invalid and processed transactions from the pools
  1375  // executable/pending queue and any subsequent transactions that become unexecutable
  1376  // are moved back into the future queue.
  1377  func (pool *TxPool) demoteUnexecutables() {
  1378  	// Iterate over all accounts and demote any non-executable transactions
  1379  	for addr, list := range pool.pending {
  1380  		nonce := pool.currentState.GetNonce(addr)
  1381  
  1382  		// Drop all transactions that are deemed too old (low nonce)
  1383  		olds := list.Forward(nonce)
  1384  		for _, tx := range olds {
  1385  			hash := tx.Hash()
  1386  			pool.all.Remove(hash)
  1387  			log.Trace("Removed old pending transaction", "hash", hash)
  1388  		}
  1389  		// Drop all transactions that are too costly (low balance or out of energy), and queue any invalids back for later
  1390  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxEnergy)
  1391  		for _, tx := range drops {
  1392  			hash := tx.Hash()
  1393  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1394  			pool.all.Remove(hash)
  1395  		}
  1396  		pool.priced.Removed(len(olds) + len(drops))
  1397  		pendingNofundsMeter.Mark(int64(len(drops)))
  1398  
  1399  		for _, tx := range invalids {
  1400  			hash := tx.Hash()
  1401  			log.Trace("Demoting pending transaction", "hash", hash)
  1402  			pool.enqueueTx(hash, tx)
  1403  		}
  1404  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1405  		if pool.locals.contains(addr) {
  1406  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1407  		}
  1408  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1409  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1410  			gapped := list.Cap(0)
  1411  			for _, tx := range gapped {
  1412  				hash := tx.Hash()
  1413  				log.Error("Demoting invalidated transaction", "hash", hash)
  1414  				pool.enqueueTx(hash, tx)
  1415  			}
  1416  			pendingGauge.Dec(int64(len(gapped)))
  1417  			// This might happen in a reorg, so log it to the metering
  1418  			blockReorgInvalidatedTx.Mark(int64(len(gapped)))
  1419  		}
  1420  		// Delete the entire pending entry if it became empty.
  1421  		if list.Empty() {
  1422  			delete(pool.pending, addr)
  1423  		}
  1424  	}
  1425  }
  1426  
  1427  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1428  type addressByHeartbeat struct {
  1429  	address   common.Address
  1430  	heartbeat time.Time
  1431  }
  1432  
  1433  type addressesByHeartbeat []addressByHeartbeat
  1434  
  1435  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1436  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1437  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1438  
  1439  // accountSet is simply a set of addresses to check for existence, and a signer
  1440  // capable of deriving addresses from transactions.
  1441  type accountSet struct {
  1442  	accounts map[common.Address]struct{}
  1443  	signer   types.Signer
  1444  	cache    *[]common.Address
  1445  }
  1446  
  1447  // newAccountSet creates a new address set with an associated signer for sender
  1448  // derivations.
  1449  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1450  	as := &accountSet{
  1451  		accounts: make(map[common.Address]struct{}),
  1452  		signer:   signer,
  1453  	}
  1454  	for _, addr := range addrs {
  1455  		as.add(addr)
  1456  	}
  1457  	return as
  1458  }
  1459  
  1460  // contains checks if a given address is contained within the set.
  1461  func (as *accountSet) contains(addr common.Address) bool {
  1462  	_, exist := as.accounts[addr]
  1463  	return exist
  1464  }
  1465  
  1466  func (as *accountSet) empty() bool {
  1467  	return len(as.accounts) == 0
  1468  }
  1469  
  1470  // containsTx checks if the sender of a given tx is within the set. If the sender
  1471  // cannot be derived, this method returns false.
  1472  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1473  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1474  		return as.contains(addr)
  1475  	}
  1476  	return false
  1477  }
  1478  
  1479  // add inserts a new address into the set to track.
  1480  func (as *accountSet) add(addr common.Address) {
  1481  	as.accounts[addr] = struct{}{}
  1482  	as.cache = nil
  1483  }
  1484  
  1485  // addTx adds the sender of tx into the set.
  1486  func (as *accountSet) addTx(tx *types.Transaction) {
  1487  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1488  		as.add(addr)
  1489  	}
  1490  }
  1491  
  1492  // flatten returns the list of addresses within this set, also caching it for later
  1493  // reuse. The returned slice should not be changed!
  1494  func (as *accountSet) flatten() []common.Address {
  1495  	if as.cache == nil {
  1496  		accounts := make([]common.Address, 0, len(as.accounts))
  1497  		for account := range as.accounts {
  1498  			accounts = append(accounts, account)
  1499  		}
  1500  		as.cache = &accounts
  1501  	}
  1502  	return *as.cache
  1503  }
  1504  
  1505  // merge adds all addresses from the 'other' set into 'as'.
  1506  func (as *accountSet) merge(other *accountSet) {
  1507  	for addr := range other.accounts {
  1508  		as.accounts[addr] = struct{}{}
  1509  	}
  1510  	as.cache = nil
  1511  }
  1512  
  1513  // txLookup is used internally by TxPool to track transactions while allowing lookup without
  1514  // mutex contention.
  1515  //
  1516  // Note, although this type is properly protected against concurrent access, it
  1517  // is **not** a type that should ever be mutated or even exposed outside of the
  1518  // transaction pool, since its internal state is tightly coupled with the pools
  1519  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1520  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1521  // TxPool.mu mutex.
  1522  type txLookup struct {
  1523  	all   map[common.Hash]*types.Transaction
  1524  	slots int
  1525  	lock  sync.RWMutex
  1526  }
  1527  
  1528  // newTxLookup returns a new txLookup structure.
  1529  func newTxLookup() *txLookup {
  1530  	return &txLookup{
  1531  		all: make(map[common.Hash]*types.Transaction),
  1532  	}
  1533  }
  1534  
  1535  // Range calls f on each key and value present in the map.
  1536  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
  1537  	t.lock.RLock()
  1538  	defer t.lock.RUnlock()
  1539  
  1540  	for key, value := range t.all {
  1541  		if !f(key, value) {
  1542  			break
  1543  		}
  1544  	}
  1545  }
  1546  
  1547  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1548  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1549  	t.lock.RLock()
  1550  	defer t.lock.RUnlock()
  1551  
  1552  	return t.all[hash]
  1553  }
  1554  
  1555  // Count returns the current number of items in the lookup.
  1556  func (t *txLookup) Count() int {
  1557  	t.lock.RLock()
  1558  	defer t.lock.RUnlock()
  1559  
  1560  	return len(t.all)
  1561  }
  1562  
  1563  // Slots returns the current number of slots used in the lookup.
  1564  func (t *txLookup) Slots() int {
  1565  	t.lock.RLock()
  1566  	defer t.lock.RUnlock()
  1567  
  1568  	return t.slots
  1569  }
  1570  
  1571  // Add adds a transaction to the lookup.
  1572  func (t *txLookup) Add(tx *types.Transaction) {
  1573  	t.lock.Lock()
  1574  	defer t.lock.Unlock()
  1575  
  1576  	t.slots += numSlots(tx)
  1577  	slotsGauge.Update(int64(t.slots))
  1578  
  1579  	t.all[tx.Hash()] = tx
  1580  }
  1581  
  1582  // Remove removes a transaction from the lookup.
  1583  func (t *txLookup) Remove(hash common.Hash) {
  1584  	t.lock.Lock()
  1585  	defer t.lock.Unlock()
  1586  
  1587  	t.slots -= numSlots(t.all[hash])
  1588  	slotsGauge.Update(int64(t.slots))
  1589  
  1590  	delete(t.all, hash)
  1591  }
  1592  
  1593  // numSlots calculates the number of slots needed for a single transaction.
  1594  func numSlots(tx *types.Transaction) int {
  1595  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1596  }