github.com/Steality/go-ethereum@v1.9.7/core/tx_pool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math"
    23  	"math/big"
    24  	"sort"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/common/prque"
    30  	"github.com/ethereum/go-ethereum/core/state"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/event"
    33  	"github.com/ethereum/go-ethereum/log"
    34  	"github.com/ethereum/go-ethereum/metrics"
    35  	"github.com/ethereum/go-ethereum/params"
    36  )
    37  
    38  const (
    39  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    40  	chainHeadChanSize = 10
    41  )
    42  
    43  var (
    44  	// ErrInvalidSender is returned if the transaction contains an invalid signature.
    45  	ErrInvalidSender = errors.New("invalid sender")
    46  
    47  	// ErrNonceTooLow is returned if the nonce of a transaction is lower than the
    48  	// one present in the local chain.
    49  	ErrNonceTooLow = errors.New("nonce too low")
    50  
    51  	// ErrUnderpriced is returned if a transaction's gas price is below the minimum
    52  	// configured for the transaction pool.
    53  	ErrUnderpriced = errors.New("transaction underpriced")
    54  
    55  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    56  	// with a different one without the required price bump.
    57  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    58  
    59  	// ErrInsufficientFunds is returned if the total cost of executing a transaction
    60  	// is higher than the balance of the user's account.
    61  	ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
    62  
    63  	// ErrIntrinsicGas is returned if the transaction is specified to use less gas
    64  	// than required to start the invocation.
    65  	ErrIntrinsicGas = errors.New("intrinsic gas too low")
    66  
    67  	// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
    68  	// maximum allowance of the current block.
    69  	ErrGasLimit = errors.New("exceeds block gas limit")
    70  
    71  	// ErrNegativeValue is a sanity error to ensure noone is able to specify a
    72  	// transaction with a negative value.
    73  	ErrNegativeValue = errors.New("negative value")
    74  
    75  	// ErrOversizedData is returned if the input data of a transaction is greater
    76  	// than some meaningful limit a user might use. This is not a consensus error
    77  	// making the transaction invalid, rather a DOS protection.
    78  	ErrOversizedData = errors.New("oversized data")
    79  )
    80  
    81  var (
    82  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    83  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    84  )
    85  
    86  var (
    87  	// Metrics for the pending pool
    88  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
    89  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
    90  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
    91  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
    92  
    93  	// Metrics for the queued pool
    94  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
    95  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
    96  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
    97  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
    98  
    99  	// General tx metrics
   100  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   101  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   102  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   103  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   104  
   105  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   106  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   107  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   108  )
   109  
   110  // TxStatus is the current status of a transaction as seen by the pool.
   111  type TxStatus uint
   112  
   113  const (
   114  	TxStatusUnknown TxStatus = iota
   115  	TxStatusQueued
   116  	TxStatusPending
   117  	TxStatusIncluded
   118  )
   119  
   120  // blockChain provides the state of blockchain and current gas limit to do
   121  // some pre checks in tx pool and event subscribers.
   122  type blockChain interface {
   123  	CurrentBlock() *types.Block
   124  	GetBlock(hash common.Hash, number uint64) *types.Block
   125  	StateAt(root common.Hash) (*state.StateDB, error)
   126  
   127  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   128  }
   129  
   130  // TxPoolConfig are the configuration parameters of the transaction pool.
   131  type TxPoolConfig struct {
   132  	Locals    []common.Address // Addresses that should be treated by default as local
   133  	NoLocals  bool             // Whether local transaction handling should be disabled
   134  	Journal   string           // Journal of local transactions to survive node restarts
   135  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   136  
   137  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   138  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   139  
   140  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   141  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   142  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   143  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   144  
   145  	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
   146  }
   147  
   148  // DefaultTxPoolConfig contains the default configurations for the transaction
   149  // pool.
   150  var DefaultTxPoolConfig = TxPoolConfig{
   151  	Journal:   "transactions.rlp",
   152  	Rejournal: time.Hour,
   153  
   154  	PriceLimit: 1,
   155  	PriceBump:  10,
   156  
   157  	AccountSlots: 16,
   158  	GlobalSlots:  4096,
   159  	AccountQueue: 64,
   160  	GlobalQueue:  1024,
   161  
   162  	Lifetime: 3 * time.Hour,
   163  }
   164  
   165  // sanitize checks the provided user configurations and changes anything that's
   166  // unreasonable or unworkable.
   167  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   168  	conf := *config
   169  	if conf.Rejournal < time.Second {
   170  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   171  		conf.Rejournal = time.Second
   172  	}
   173  	if conf.PriceLimit < 1 {
   174  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   175  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   176  	}
   177  	if conf.PriceBump < 1 {
   178  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   179  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   180  	}
   181  	if conf.AccountSlots < 1 {
   182  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   183  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   184  	}
   185  	if conf.GlobalSlots < 1 {
   186  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   187  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   188  	}
   189  	if conf.AccountQueue < 1 {
   190  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   191  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   192  	}
   193  	if conf.GlobalQueue < 1 {
   194  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   195  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   196  	}
   197  	if conf.Lifetime < 1 {
   198  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   199  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   200  	}
   201  	return conf
   202  }
   203  
   204  // TxPool contains all currently known transactions. Transactions
   205  // enter the pool when they are received from the network or submitted
   206  // locally. They exit the pool when they are included in the blockchain.
   207  //
   208  // The pool separates processable transactions (which can be applied to the
   209  // current state) and future transactions. Transactions move between those
   210  // two states over time as they are received and processed.
   211  type TxPool struct {
   212  	config      TxPoolConfig
   213  	chainconfig *params.ChainConfig
   214  	chain       blockChain
   215  	gasPrice    *big.Int
   216  	txFeed      event.Feed
   217  	scope       event.SubscriptionScope
   218  	signer      types.Signer
   219  	mu          sync.RWMutex
   220  
   221  	istanbul bool // Fork indicator whether we are in the istanbul stage.
   222  
   223  	currentState  *state.StateDB // Current state in the blockchain head
   224  	pendingNonces *txNoncer      // Pending state tracking virtual nonces
   225  	currentMaxGas uint64         // Current gas limit for transaction caps
   226  
   227  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   228  	journal *txJournal  // Journal of local transaction to back up to disk
   229  
   230  	pending map[common.Address]*txList   // All currently processable transactions
   231  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   232  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   233  	all     *txLookup                    // All transactions to allow lookups
   234  	priced  *txPricedList                // All transactions sorted by price
   235  
   236  	chainHeadCh     chan ChainHeadEvent
   237  	chainHeadSub    event.Subscription
   238  	reqResetCh      chan *txpoolResetRequest
   239  	reqPromoteCh    chan *accountSet
   240  	queueTxEventCh  chan *types.Transaction
   241  	reorgDoneCh     chan chan struct{}
   242  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   243  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   244  }
   245  
   246  type txpoolResetRequest struct {
   247  	oldHead, newHead *types.Header
   248  }
   249  
   250  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   251  // transactions from the network.
   252  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   253  	// Sanitize the input to ensure no vulnerable gas prices are set
   254  	config = (&config).sanitize()
   255  
   256  	// Create the transaction pool with its initial settings
   257  	pool := &TxPool{
   258  		config:          config,
   259  		chainconfig:     chainconfig,
   260  		chain:           chain,
   261  		signer:          types.NewEIP155Signer(chainconfig.ChainID),
   262  		pending:         make(map[common.Address]*txList),
   263  		queue:           make(map[common.Address]*txList),
   264  		beats:           make(map[common.Address]time.Time),
   265  		all:             newTxLookup(),
   266  		chainHeadCh:     make(chan ChainHeadEvent, chainHeadChanSize),
   267  		reqResetCh:      make(chan *txpoolResetRequest),
   268  		reqPromoteCh:    make(chan *accountSet),
   269  		queueTxEventCh:  make(chan *types.Transaction),
   270  		reorgDoneCh:     make(chan chan struct{}),
   271  		reorgShutdownCh: make(chan struct{}),
   272  		gasPrice:        new(big.Int).SetUint64(config.PriceLimit),
   273  	}
   274  	pool.locals = newAccountSet(pool.signer)
   275  	for _, addr := range config.Locals {
   276  		log.Info("Setting new local account", "address", addr)
   277  		pool.locals.add(addr)
   278  	}
   279  	pool.priced = newTxPricedList(pool.all)
   280  	pool.reset(nil, chain.CurrentBlock().Header())
   281  
   282  	// Start the reorg loop early so it can handle requests generated during journal loading.
   283  	pool.wg.Add(1)
   284  	go pool.scheduleReorgLoop()
   285  
   286  	// If local transactions and journaling is enabled, load from disk
   287  	if !config.NoLocals && config.Journal != "" {
   288  		pool.journal = newTxJournal(config.Journal)
   289  
   290  		if err := pool.journal.load(pool.AddLocals); err != nil {
   291  			log.Warn("Failed to load transaction journal", "err", err)
   292  		}
   293  		if err := pool.journal.rotate(pool.local()); err != nil {
   294  			log.Warn("Failed to rotate transaction journal", "err", err)
   295  		}
   296  	}
   297  
   298  	// Subscribe events from blockchain and start the main event loop.
   299  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   300  	pool.wg.Add(1)
   301  	go pool.loop()
   302  
   303  	return pool
   304  }
   305  
   306  // loop is the transaction pool's main event loop, waiting for and reacting to
   307  // outside blockchain events as well as for various reporting and transaction
   308  // eviction events.
   309  func (pool *TxPool) loop() {
   310  	defer pool.wg.Done()
   311  
   312  	var (
   313  		prevPending, prevQueued, prevStales int
   314  		// Start the stats reporting and transaction eviction tickers
   315  		report  = time.NewTicker(statsReportInterval)
   316  		evict   = time.NewTicker(evictionInterval)
   317  		journal = time.NewTicker(pool.config.Rejournal)
   318  		// Track the previous head headers for transaction reorgs
   319  		head = pool.chain.CurrentBlock()
   320  	)
   321  	defer report.Stop()
   322  	defer evict.Stop()
   323  	defer journal.Stop()
   324  
   325  	for {
   326  		select {
   327  		// Handle ChainHeadEvent
   328  		case ev := <-pool.chainHeadCh:
   329  			if ev.Block != nil {
   330  				pool.requestReset(head.Header(), ev.Block.Header())
   331  				head = ev.Block
   332  			}
   333  
   334  		// System shutdown.
   335  		case <-pool.chainHeadSub.Err():
   336  			close(pool.reorgShutdownCh)
   337  			return
   338  
   339  		// Handle stats reporting ticks
   340  		case <-report.C:
   341  			pool.mu.RLock()
   342  			pending, queued := pool.stats()
   343  			stales := pool.priced.stales
   344  			pool.mu.RUnlock()
   345  
   346  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   347  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   348  				prevPending, prevQueued, prevStales = pending, queued, stales
   349  			}
   350  
   351  		// Handle inactive account transaction eviction
   352  		case <-evict.C:
   353  			pool.mu.Lock()
   354  			for addr := range pool.queue {
   355  				// Skip local transactions from the eviction mechanism
   356  				if pool.locals.contains(addr) {
   357  					continue
   358  				}
   359  				// Any non-locals old enough should be removed
   360  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   361  					for _, tx := range pool.queue[addr].Flatten() {
   362  						pool.removeTx(tx.Hash(), true)
   363  					}
   364  				}
   365  			}
   366  			pool.mu.Unlock()
   367  
   368  		// Handle local transaction journal rotation
   369  		case <-journal.C:
   370  			if pool.journal != nil {
   371  				pool.mu.Lock()
   372  				if err := pool.journal.rotate(pool.local()); err != nil {
   373  					log.Warn("Failed to rotate local tx journal", "err", err)
   374  				}
   375  				pool.mu.Unlock()
   376  			}
   377  		}
   378  	}
   379  }
   380  
   381  // Stop terminates the transaction pool.
   382  func (pool *TxPool) Stop() {
   383  	// Unsubscribe all subscriptions registered from txpool
   384  	pool.scope.Close()
   385  
   386  	// Unsubscribe subscriptions registered from blockchain
   387  	pool.chainHeadSub.Unsubscribe()
   388  	pool.wg.Wait()
   389  
   390  	if pool.journal != nil {
   391  		pool.journal.close()
   392  	}
   393  	log.Info("Transaction pool stopped")
   394  }
   395  
   396  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   397  // starts sending event to the given channel.
   398  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   399  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   400  }
   401  
   402  // GasPrice returns the current gas price enforced by the transaction pool.
   403  func (pool *TxPool) GasPrice() *big.Int {
   404  	pool.mu.RLock()
   405  	defer pool.mu.RUnlock()
   406  
   407  	return new(big.Int).Set(pool.gasPrice)
   408  }
   409  
   410  // SetGasPrice updates the minimum price required by the transaction pool for a
   411  // new transaction, and drops all transactions below this threshold.
   412  func (pool *TxPool) SetGasPrice(price *big.Int) {
   413  	pool.mu.Lock()
   414  	defer pool.mu.Unlock()
   415  
   416  	pool.gasPrice = price
   417  	for _, tx := range pool.priced.Cap(price, pool.locals) {
   418  		pool.removeTx(tx.Hash(), false)
   419  	}
   420  	log.Info("Transaction pool price threshold updated", "price", price)
   421  }
   422  
   423  // Nonce returns the next nonce of an account, with all transactions executable
   424  // by the pool already applied on top.
   425  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   426  	pool.mu.RLock()
   427  	defer pool.mu.RUnlock()
   428  
   429  	return pool.pendingNonces.get(addr)
   430  }
   431  
   432  // Stats retrieves the current pool stats, namely the number of pending and the
   433  // number of queued (non-executable) transactions.
   434  func (pool *TxPool) Stats() (int, int) {
   435  	pool.mu.RLock()
   436  	defer pool.mu.RUnlock()
   437  
   438  	return pool.stats()
   439  }
   440  
   441  // stats retrieves the current pool stats, namely the number of pending and the
   442  // number of queued (non-executable) transactions.
   443  func (pool *TxPool) stats() (int, int) {
   444  	pending := 0
   445  	for _, list := range pool.pending {
   446  		pending += list.Len()
   447  	}
   448  	queued := 0
   449  	for _, list := range pool.queue {
   450  		queued += list.Len()
   451  	}
   452  	return pending, queued
   453  }
   454  
   455  // Content retrieves the data content of the transaction pool, returning all the
   456  // pending as well as queued transactions, grouped by account and sorted by nonce.
   457  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   458  	pool.mu.Lock()
   459  	defer pool.mu.Unlock()
   460  
   461  	pending := make(map[common.Address]types.Transactions)
   462  	for addr, list := range pool.pending {
   463  		pending[addr] = list.Flatten()
   464  	}
   465  	queued := make(map[common.Address]types.Transactions)
   466  	for addr, list := range pool.queue {
   467  		queued[addr] = list.Flatten()
   468  	}
   469  	return pending, queued
   470  }
   471  
   472  // Pending retrieves all currently processable transactions, grouped by origin
   473  // account and sorted by nonce. The returned transaction set is a copy and can be
   474  // freely modified by calling code.
   475  func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
   476  	pool.mu.Lock()
   477  	defer pool.mu.Unlock()
   478  
   479  	pending := make(map[common.Address]types.Transactions)
   480  	for addr, list := range pool.pending {
   481  		pending[addr] = list.Flatten()
   482  	}
   483  	return pending, nil
   484  }
   485  
   486  // Locals retrieves the accounts currently considered local by the pool.
   487  func (pool *TxPool) Locals() []common.Address {
   488  	pool.mu.Lock()
   489  	defer pool.mu.Unlock()
   490  
   491  	return pool.locals.flatten()
   492  }
   493  
   494  // local retrieves all currently known local transactions, grouped by origin
   495  // account and sorted by nonce. The returned transaction set is a copy and can be
   496  // freely modified by calling code.
   497  func (pool *TxPool) local() map[common.Address]types.Transactions {
   498  	txs := make(map[common.Address]types.Transactions)
   499  	for addr := range pool.locals.accounts {
   500  		if pending := pool.pending[addr]; pending != nil {
   501  			txs[addr] = append(txs[addr], pending.Flatten()...)
   502  		}
   503  		if queued := pool.queue[addr]; queued != nil {
   504  			txs[addr] = append(txs[addr], queued.Flatten()...)
   505  		}
   506  	}
   507  	return txs
   508  }
   509  
   510  // validateTx checks whether a transaction is valid according to the consensus
   511  // rules and adheres to some heuristic limits of the local node (price and size).
   512  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   513  	// Heuristic limit, reject transactions over 32KB to prevent DOS attacks
   514  	if tx.Size() > 32*1024 {
   515  		return ErrOversizedData
   516  	}
   517  	// Transactions can't be negative. This may never happen using RLP decoded
   518  	// transactions but may occur if you create a transaction using the RPC.
   519  	if tx.Value().Sign() < 0 {
   520  		return ErrNegativeValue
   521  	}
   522  	// Ensure the transaction doesn't exceed the current block limit gas.
   523  	if pool.currentMaxGas < tx.Gas() {
   524  		return ErrGasLimit
   525  	}
   526  	// Make sure the transaction is signed properly
   527  	from, err := types.Sender(pool.signer, tx)
   528  	if err != nil {
   529  		return ErrInvalidSender
   530  	}
   531  	// Drop non-local transactions under our own minimal accepted gas price
   532  	local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network
   533  	if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
   534  		return ErrUnderpriced
   535  	}
   536  	// Ensure the transaction adheres to nonce ordering
   537  	if pool.currentState.GetNonce(from) > tx.Nonce() {
   538  		return ErrNonceTooLow
   539  	}
   540  	// Transactor should have enough funds to cover the costs
   541  	// cost == V + GP * GL
   542  	if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
   543  		return ErrInsufficientFunds
   544  	}
   545  	// Ensure the transaction has more gas than the basic tx fee.
   546  	intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul)
   547  	if err != nil {
   548  		return err
   549  	}
   550  	if tx.Gas() < intrGas {
   551  		return ErrIntrinsicGas
   552  	}
   553  	return nil
   554  }
   555  
   556  // add validates a transaction and inserts it into the non-executable queue for later
   557  // pending promotion and execution. If the transaction is a replacement for an already
   558  // pending or queued one, it overwrites the previous transaction if its price is higher.
   559  //
   560  // If a newly added transaction is marked as local, its sending account will be
   561  // whitelisted, preventing any associated transaction from being dropped out of the pool
   562  // due to pricing constraints.
   563  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   564  	// If the transaction is already known, discard it
   565  	hash := tx.Hash()
   566  	if pool.all.Get(hash) != nil {
   567  		log.Trace("Discarding already known transaction", "hash", hash)
   568  		knownTxMeter.Mark(1)
   569  		return false, fmt.Errorf("known transaction: %x", hash)
   570  	}
   571  	// If the transaction fails basic validation, discard it
   572  	if err := pool.validateTx(tx, local); err != nil {
   573  		log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   574  		invalidTxMeter.Mark(1)
   575  		return false, err
   576  	}
   577  	// If the transaction pool is full, discard underpriced transactions
   578  	if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
   579  		// If the new transaction is underpriced, don't accept it
   580  		if !local && pool.priced.Underpriced(tx, pool.locals) {
   581  			log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
   582  			underpricedTxMeter.Mark(1)
   583  			return false, ErrUnderpriced
   584  		}
   585  		// New transaction is better than our worse ones, make room for it
   586  		drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
   587  		for _, tx := range drop {
   588  			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
   589  			underpricedTxMeter.Mark(1)
   590  			pool.removeTx(tx.Hash(), false)
   591  		}
   592  	}
   593  	// Try to replace an existing transaction in the pending pool
   594  	from, _ := types.Sender(pool.signer, tx) // already validated
   595  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   596  		// Nonce already pending, check if required price bump is met
   597  		inserted, old := list.Add(tx, pool.config.PriceBump)
   598  		if !inserted {
   599  			pendingDiscardMeter.Mark(1)
   600  			return false, ErrReplaceUnderpriced
   601  		}
   602  		// New transaction is better, replace old one
   603  		if old != nil {
   604  			pool.all.Remove(old.Hash())
   605  			pool.priced.Removed(1)
   606  			pendingReplaceMeter.Mark(1)
   607  		}
   608  		pool.all.Add(tx)
   609  		pool.priced.Put(tx)
   610  		pool.journalTx(from, tx)
   611  		pool.queueTxEvent(tx)
   612  		log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   613  		return old != nil, nil
   614  	}
   615  	// New transaction isn't replacing a pending one, push into queue
   616  	replaced, err = pool.enqueueTx(hash, tx)
   617  	if err != nil {
   618  		return false, err
   619  	}
   620  	// Mark local addresses and journal local transactions
   621  	if local {
   622  		if !pool.locals.contains(from) {
   623  			log.Info("Setting new local account", "address", from)
   624  			pool.locals.add(from)
   625  		}
   626  	}
   627  	if local || pool.locals.contains(from) {
   628  		localGauge.Inc(1)
   629  	}
   630  	pool.journalTx(from, tx)
   631  
   632  	log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   633  	return replaced, nil
   634  }
   635  
   636  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   637  //
   638  // Note, this method assumes the pool lock is held!
   639  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) {
   640  	// Try to insert the transaction into the future queue
   641  	from, _ := types.Sender(pool.signer, tx) // already validated
   642  	if pool.queue[from] == nil {
   643  		pool.queue[from] = newTxList(false)
   644  	}
   645  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   646  	if !inserted {
   647  		// An older transaction was better, discard this
   648  		queuedDiscardMeter.Mark(1)
   649  		return false, ErrReplaceUnderpriced
   650  	}
   651  	// Discard any previous transaction and mark this
   652  	if old != nil {
   653  		pool.all.Remove(old.Hash())
   654  		pool.priced.Removed(1)
   655  		queuedReplaceMeter.Mark(1)
   656  	} else {
   657  		// Nothing was replaced, bump the queued counter
   658  		queuedGauge.Inc(1)
   659  	}
   660  	if pool.all.Get(hash) == nil {
   661  		pool.all.Add(tx)
   662  		pool.priced.Put(tx)
   663  	}
   664  	return old != nil, nil
   665  }
   666  
   667  // journalTx adds the specified transaction to the local disk journal if it is
   668  // deemed to have been sent from a local account.
   669  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   670  	// Only journal if it's enabled and the transaction is local
   671  	if pool.journal == nil || !pool.locals.contains(from) {
   672  		return
   673  	}
   674  	if err := pool.journal.insert(tx); err != nil {
   675  		log.Warn("Failed to journal local transaction", "err", err)
   676  	}
   677  }
   678  
   679  // promoteTx adds a transaction to the pending (processable) list of transactions
   680  // and returns whether it was inserted or an older was better.
   681  //
   682  // Note, this method assumes the pool lock is held!
   683  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   684  	// Try to insert the transaction into the pending queue
   685  	if pool.pending[addr] == nil {
   686  		pool.pending[addr] = newTxList(true)
   687  	}
   688  	list := pool.pending[addr]
   689  
   690  	inserted, old := list.Add(tx, pool.config.PriceBump)
   691  	if !inserted {
   692  		// An older transaction was better, discard this
   693  		pool.all.Remove(hash)
   694  		pool.priced.Removed(1)
   695  
   696  		pendingDiscardMeter.Mark(1)
   697  		return false
   698  	}
   699  	// Otherwise discard any previous transaction and mark this
   700  	if old != nil {
   701  		pool.all.Remove(old.Hash())
   702  		pool.priced.Removed(1)
   703  
   704  		pendingReplaceMeter.Mark(1)
   705  	} else {
   706  		// Nothing was replaced, bump the pending counter
   707  		pendingGauge.Inc(1)
   708  	}
   709  	// Failsafe to work around direct pending inserts (tests)
   710  	if pool.all.Get(hash) == nil {
   711  		pool.all.Add(tx)
   712  		pool.priced.Put(tx)
   713  	}
   714  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   715  	pool.beats[addr] = time.Now()
   716  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   717  
   718  	return true
   719  }
   720  
   721  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
   722  // senders as a local ones, ensuring they go around the local pricing constraints.
   723  //
   724  // This method is used to add transactions from the RPC API and performs synchronous pool
   725  // reorganization and event propagation.
   726  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
   727  	return pool.addTxs(txs, !pool.config.NoLocals, true)
   728  }
   729  
   730  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
   731  // a convenience wrapper aroundd AddLocals.
   732  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
   733  	errs := pool.AddLocals([]*types.Transaction{tx})
   734  	return errs[0]
   735  }
   736  
   737  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
   738  // senders are not among the locally tracked ones, full pricing constraints will apply.
   739  //
   740  // This method is used to add transactions from the p2p network and does not wait for pool
   741  // reorganization and internal event propagation.
   742  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
   743  	return pool.addTxs(txs, false, false)
   744  }
   745  
   746  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
   747  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
   748  	return pool.addTxs(txs, false, true)
   749  }
   750  
   751  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   752  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
   753  	errs := pool.AddRemotesSync([]*types.Transaction{tx})
   754  	return errs[0]
   755  }
   756  
   757  // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
   758  // wrapper around AddRemotes.
   759  //
   760  // Deprecated: use AddRemotes
   761  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
   762  	errs := pool.AddRemotes([]*types.Transaction{tx})
   763  	return errs[0]
   764  }
   765  
   766  // addTxs attempts to queue a batch of transactions if they are valid.
   767  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
   768  	// Filter out known ones without obtaining the pool lock or recovering signatures
   769  	var (
   770  		errs = make([]error, len(txs))
   771  		news = make([]*types.Transaction, 0, len(txs))
   772  	)
   773  	for i, tx := range txs {
   774  		// If the transaction is known, pre-set the error slot
   775  		if pool.all.Get(tx.Hash()) != nil {
   776  			errs[i] = fmt.Errorf("known transaction: %x", tx.Hash())
   777  			knownTxMeter.Mark(1)
   778  			continue
   779  		}
   780  		// Accumulate all unknown transactions for deeper processing
   781  		news = append(news, tx)
   782  	}
   783  	if len(news) == 0 {
   784  		return errs
   785  	}
   786  	// Cache senders in transactions before obtaining lock (pool.signer is immutable)
   787  	for _, tx := range news {
   788  		types.Sender(pool.signer, tx)
   789  	}
   790  	// Process all the new transaction and merge any errors into the original slice
   791  	pool.mu.Lock()
   792  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
   793  	pool.mu.Unlock()
   794  
   795  	var nilSlot = 0
   796  	for _, err := range newErrs {
   797  		for errs[nilSlot] != nil {
   798  			nilSlot++
   799  		}
   800  		errs[nilSlot] = err
   801  	}
   802  	// Reorg the pool internals if needed and return
   803  	done := pool.requestPromoteExecutables(dirtyAddrs)
   804  	if sync {
   805  		<-done
   806  	}
   807  	return errs
   808  }
   809  
   810  // addTxsLocked attempts to queue a batch of transactions if they are valid.
   811  // The transaction pool lock must be held.
   812  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
   813  	dirty := newAccountSet(pool.signer)
   814  	errs := make([]error, len(txs))
   815  	for i, tx := range txs {
   816  		replaced, err := pool.add(tx, local)
   817  		errs[i] = err
   818  		if err == nil && !replaced {
   819  			dirty.addTx(tx)
   820  		}
   821  	}
   822  	validTxMeter.Mark(int64(len(dirty.accounts)))
   823  	return errs, dirty
   824  }
   825  
   826  // Status returns the status (unknown/pending/queued) of a batch of transactions
   827  // identified by their hashes.
   828  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
   829  	status := make([]TxStatus, len(hashes))
   830  	for i, hash := range hashes {
   831  		tx := pool.Get(hash)
   832  		if tx == nil {
   833  			continue
   834  		}
   835  		from, _ := types.Sender(pool.signer, tx) // already validated
   836  		pool.mu.RLock()
   837  		if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   838  			status[i] = TxStatusPending
   839  		} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   840  			status[i] = TxStatusQueued
   841  		}
   842  		// implicit else: the tx may have been included into a block between
   843  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
   844  		pool.mu.RUnlock()
   845  	}
   846  	return status
   847  }
   848  
   849  // Get returns a transaction if it is contained in the pool and nil otherwise.
   850  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
   851  	return pool.all.Get(hash)
   852  }
   853  
   854  // removeTx removes a single transaction from the queue, moving all subsequent
   855  // transactions back to the future queue.
   856  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
   857  	// Fetch the transaction we wish to delete
   858  	tx := pool.all.Get(hash)
   859  	if tx == nil {
   860  		return
   861  	}
   862  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
   863  
   864  	// Remove it from the list of known transactions
   865  	pool.all.Remove(hash)
   866  	if outofbound {
   867  		pool.priced.Removed(1)
   868  	}
   869  	if pool.locals.contains(addr) {
   870  		localGauge.Dec(1)
   871  	}
   872  	// Remove the transaction from the pending lists and reset the account nonce
   873  	if pending := pool.pending[addr]; pending != nil {
   874  		if removed, invalids := pending.Remove(tx); removed {
   875  			// If no more pending transactions are left, remove the list
   876  			if pending.Empty() {
   877  				delete(pool.pending, addr)
   878  				delete(pool.beats, addr)
   879  			}
   880  			// Postpone any invalidated transactions
   881  			for _, tx := range invalids {
   882  				pool.enqueueTx(tx.Hash(), tx)
   883  			}
   884  			// Update the account nonce if needed
   885  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
   886  			// Reduce the pending counter
   887  			pendingGauge.Dec(int64(1 + len(invalids)))
   888  			return
   889  		}
   890  	}
   891  	// Transaction is in the future queue
   892  	if future := pool.queue[addr]; future != nil {
   893  		if removed, _ := future.Remove(tx); removed {
   894  			// Reduce the queued counter
   895  			queuedGauge.Dec(1)
   896  		}
   897  		if future.Empty() {
   898  			delete(pool.queue, addr)
   899  		}
   900  	}
   901  }
   902  
   903  // requestPromoteExecutables requests a pool reset to the new head block.
   904  // The returned channel is closed when the reset has occurred.
   905  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
   906  	select {
   907  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
   908  		return <-pool.reorgDoneCh
   909  	case <-pool.reorgShutdownCh:
   910  		return pool.reorgShutdownCh
   911  	}
   912  }
   913  
   914  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
   915  // The returned channel is closed when the promotion checks have occurred.
   916  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
   917  	select {
   918  	case pool.reqPromoteCh <- set:
   919  		return <-pool.reorgDoneCh
   920  	case <-pool.reorgShutdownCh:
   921  		return pool.reorgShutdownCh
   922  	}
   923  }
   924  
   925  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
   926  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
   927  	select {
   928  	case pool.queueTxEventCh <- tx:
   929  	case <-pool.reorgShutdownCh:
   930  	}
   931  }
   932  
   933  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
   934  // call those methods directly, but request them being run using requestReset and
   935  // requestPromoteExecutables instead.
   936  func (pool *TxPool) scheduleReorgLoop() {
   937  	defer pool.wg.Done()
   938  
   939  	var (
   940  		curDone       chan struct{} // non-nil while runReorg is active
   941  		nextDone      = make(chan struct{})
   942  		launchNextRun bool
   943  		reset         *txpoolResetRequest
   944  		dirtyAccounts *accountSet
   945  		queuedEvents  = make(map[common.Address]*txSortedMap)
   946  	)
   947  	for {
   948  		// Launch next background reorg if needed
   949  		if curDone == nil && launchNextRun {
   950  			// Run the background reorg and announcements
   951  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
   952  
   953  			// Prepare everything for the next round of reorg
   954  			curDone, nextDone = nextDone, make(chan struct{})
   955  			launchNextRun = false
   956  
   957  			reset, dirtyAccounts = nil, nil
   958  			queuedEvents = make(map[common.Address]*txSortedMap)
   959  		}
   960  
   961  		select {
   962  		case req := <-pool.reqResetCh:
   963  			// Reset request: update head if request is already pending.
   964  			if reset == nil {
   965  				reset = req
   966  			} else {
   967  				reset.newHead = req.newHead
   968  			}
   969  			launchNextRun = true
   970  			pool.reorgDoneCh <- nextDone
   971  
   972  		case req := <-pool.reqPromoteCh:
   973  			// Promote request: update address set if request is already pending.
   974  			if dirtyAccounts == nil {
   975  				dirtyAccounts = req
   976  			} else {
   977  				dirtyAccounts.merge(req)
   978  			}
   979  			launchNextRun = true
   980  			pool.reorgDoneCh <- nextDone
   981  
   982  		case tx := <-pool.queueTxEventCh:
   983  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
   984  			// request one later if they want the events sent.
   985  			addr, _ := types.Sender(pool.signer, tx)
   986  			if _, ok := queuedEvents[addr]; !ok {
   987  				queuedEvents[addr] = newTxSortedMap()
   988  			}
   989  			queuedEvents[addr].Put(tx)
   990  
   991  		case <-curDone:
   992  			curDone = nil
   993  
   994  		case <-pool.reorgShutdownCh:
   995  			// Wait for current run to finish.
   996  			if curDone != nil {
   997  				<-curDone
   998  			}
   999  			close(nextDone)
  1000  			return
  1001  		}
  1002  	}
  1003  }
  1004  
  1005  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1006  func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1007  	defer close(done)
  1008  
  1009  	var promoteAddrs []common.Address
  1010  	if dirtyAccounts != nil {
  1011  		promoteAddrs = dirtyAccounts.flatten()
  1012  	}
  1013  	pool.mu.Lock()
  1014  	if reset != nil {
  1015  		// Reset from the old head to the new, rescheduling any reorged transactions
  1016  		pool.reset(reset.oldHead, reset.newHead)
  1017  
  1018  		// Nonces were reset, discard any events that became stale
  1019  		for addr := range events {
  1020  			events[addr].Forward(pool.pendingNonces.get(addr))
  1021  			if events[addr].Len() == 0 {
  1022  				delete(events, addr)
  1023  			}
  1024  		}
  1025  		// Reset needs promote for all addresses
  1026  		promoteAddrs = promoteAddrs[:0]
  1027  		for addr := range pool.queue {
  1028  			promoteAddrs = append(promoteAddrs, addr)
  1029  		}
  1030  	}
  1031  	// Check for pending transactions for every account that sent new ones
  1032  	promoted := pool.promoteExecutables(promoteAddrs)
  1033  	for _, tx := range promoted {
  1034  		addr, _ := types.Sender(pool.signer, tx)
  1035  		if _, ok := events[addr]; !ok {
  1036  			events[addr] = newTxSortedMap()
  1037  		}
  1038  		events[addr].Put(tx)
  1039  	}
  1040  	// If a new block appeared, validate the pool of pending transactions. This will
  1041  	// remove any transaction that has been included in the block or was invalidated
  1042  	// because of another transaction (e.g. higher gas price).
  1043  	if reset != nil {
  1044  		pool.demoteUnexecutables()
  1045  	}
  1046  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1047  	pool.truncatePending()
  1048  	pool.truncateQueue()
  1049  
  1050  	// Update all accounts to the latest known pending nonce
  1051  	for addr, list := range pool.pending {
  1052  		txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway
  1053  		pool.pendingNonces.set(addr, txs[len(txs)-1].Nonce()+1)
  1054  	}
  1055  	pool.mu.Unlock()
  1056  
  1057  	// Notify subsystems for newly added transactions
  1058  	if len(events) > 0 {
  1059  		var txs []*types.Transaction
  1060  		for _, set := range events {
  1061  			txs = append(txs, set.Flatten()...)
  1062  		}
  1063  		pool.txFeed.Send(NewTxsEvent{txs})
  1064  	}
  1065  }
  1066  
  1067  // reset retrieves the current state of the blockchain and ensures the content
  1068  // of the transaction pool is valid with regard to the chain state.
  1069  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1070  	// If we're reorging an old state, reinject all dropped transactions
  1071  	var reinject types.Transactions
  1072  
  1073  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1074  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1075  		oldNum := oldHead.Number.Uint64()
  1076  		newNum := newHead.Number.Uint64()
  1077  
  1078  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1079  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1080  		} else {
  1081  			// Reorg seems shallow enough to pull in all transactions into memory
  1082  			var discarded, included types.Transactions
  1083  			var (
  1084  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1085  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1086  			)
  1087  			if rem == nil {
  1088  				// This can happen if a setHead is performed, where we simply discard the old
  1089  				// head from the chain.
  1090  				// If that is the case, we don't have the lost transactions any more, and
  1091  				// there's nothing to add
  1092  				if newNum < oldNum {
  1093  					// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1094  					log.Debug("Skipping transaction reset caused by setHead",
  1095  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1096  				} else {
  1097  					// If we reorged to a same or higher number, then it's not a case of setHead
  1098  					log.Warn("Transaction pool reset with missing oldhead",
  1099  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1100  				}
  1101  				return
  1102  			}
  1103  			for rem.NumberU64() > add.NumberU64() {
  1104  				discarded = append(discarded, rem.Transactions()...)
  1105  				if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1106  					log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1107  					return
  1108  				}
  1109  			}
  1110  			for add.NumberU64() > rem.NumberU64() {
  1111  				included = append(included, add.Transactions()...)
  1112  				if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1113  					log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1114  					return
  1115  				}
  1116  			}
  1117  			for rem.Hash() != add.Hash() {
  1118  				discarded = append(discarded, rem.Transactions()...)
  1119  				if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1120  					log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1121  					return
  1122  				}
  1123  				included = append(included, add.Transactions()...)
  1124  				if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1125  					log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1126  					return
  1127  				}
  1128  			}
  1129  			reinject = types.TxDifference(discarded, included)
  1130  		}
  1131  	}
  1132  	// Initialize the internal state to the current head
  1133  	if newHead == nil {
  1134  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1135  	}
  1136  	statedb, err := pool.chain.StateAt(newHead.Root)
  1137  	if err != nil {
  1138  		log.Error("Failed to reset txpool state", "err", err)
  1139  		return
  1140  	}
  1141  	pool.currentState = statedb
  1142  	pool.pendingNonces = newTxNoncer(statedb)
  1143  	pool.currentMaxGas = newHead.GasLimit
  1144  
  1145  	// Inject any transactions discarded due to reorgs
  1146  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1147  	senderCacher.recover(pool.signer, reinject)
  1148  	pool.addTxsLocked(reinject, false)
  1149  
  1150  	// Update all fork indicator by next pending block number.
  1151  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
  1152  	pool.istanbul = pool.chainconfig.IsIstanbul(next)
  1153  }
  1154  
  1155  // promoteExecutables moves transactions that have become processable from the
  1156  // future queue to the set of pending transactions. During this process, all
  1157  // invalidated transactions (low nonce, low balance) are deleted.
  1158  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1159  	// Track the promoted transactions to broadcast them at once
  1160  	var promoted []*types.Transaction
  1161  
  1162  	// Iterate over all accounts and promote any executable transactions
  1163  	for _, addr := range accounts {
  1164  		list := pool.queue[addr]
  1165  		if list == nil {
  1166  			continue // Just in case someone calls with a non existing account
  1167  		}
  1168  		// Drop all transactions that are deemed too old (low nonce)
  1169  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1170  		for _, tx := range forwards {
  1171  			hash := tx.Hash()
  1172  			pool.all.Remove(hash)
  1173  			log.Trace("Removed old queued transaction", "hash", hash)
  1174  		}
  1175  		// Drop all transactions that are too costly (low balance or out of gas)
  1176  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1177  		for _, tx := range drops {
  1178  			hash := tx.Hash()
  1179  			pool.all.Remove(hash)
  1180  			log.Trace("Removed unpayable queued transaction", "hash", hash)
  1181  		}
  1182  		queuedNofundsMeter.Mark(int64(len(drops)))
  1183  
  1184  		// Gather all executable transactions and promote them
  1185  		readies := list.Ready(pool.pendingNonces.get(addr))
  1186  		for _, tx := range readies {
  1187  			hash := tx.Hash()
  1188  			if pool.promoteTx(addr, hash, tx) {
  1189  				log.Trace("Promoting queued transaction", "hash", hash)
  1190  				promoted = append(promoted, tx)
  1191  			}
  1192  		}
  1193  		queuedGauge.Dec(int64(len(readies)))
  1194  
  1195  		// Drop all transactions over the allowed limit
  1196  		var caps types.Transactions
  1197  		if !pool.locals.contains(addr) {
  1198  			caps = list.Cap(int(pool.config.AccountQueue))
  1199  			for _, tx := range caps {
  1200  				hash := tx.Hash()
  1201  				pool.all.Remove(hash)
  1202  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1203  			}
  1204  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1205  		}
  1206  		// Mark all the items dropped as removed
  1207  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1208  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1209  		if pool.locals.contains(addr) {
  1210  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1211  		}
  1212  		// Delete the entire queue entry if it became empty.
  1213  		if list.Empty() {
  1214  			delete(pool.queue, addr)
  1215  		}
  1216  	}
  1217  	return promoted
  1218  }
  1219  
  1220  // truncatePending removes transactions from the pending queue if the pool is above the
  1221  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1222  // equal number for all for accounts with many pending transactions.
  1223  func (pool *TxPool) truncatePending() {
  1224  	pending := uint64(0)
  1225  	for _, list := range pool.pending {
  1226  		pending += uint64(list.Len())
  1227  	}
  1228  	if pending <= pool.config.GlobalSlots {
  1229  		return
  1230  	}
  1231  
  1232  	pendingBeforeCap := pending
  1233  	// Assemble a spam order to penalize large transactors first
  1234  	spammers := prque.New(nil)
  1235  	for addr, list := range pool.pending {
  1236  		// Only evict transactions from high rollers
  1237  		if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
  1238  			spammers.Push(addr, int64(list.Len()))
  1239  		}
  1240  	}
  1241  	// Gradually drop transactions from offenders
  1242  	offenders := []common.Address{}
  1243  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1244  		// Retrieve the next offender if not local address
  1245  		offender, _ := spammers.Pop()
  1246  		offenders = append(offenders, offender.(common.Address))
  1247  
  1248  		// Equalize balances until all the same or below threshold
  1249  		if len(offenders) > 1 {
  1250  			// Calculate the equalization threshold for all current offenders
  1251  			threshold := pool.pending[offender.(common.Address)].Len()
  1252  
  1253  			// Iteratively reduce all offenders until below limit or threshold reached
  1254  			for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
  1255  				for i := 0; i < len(offenders)-1; i++ {
  1256  					list := pool.pending[offenders[i]]
  1257  
  1258  					caps := list.Cap(list.Len() - 1)
  1259  					for _, tx := range caps {
  1260  						// Drop the transaction from the global pools too
  1261  						hash := tx.Hash()
  1262  						pool.all.Remove(hash)
  1263  
  1264  						// Update the account nonce to the dropped transaction
  1265  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1266  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1267  					}
  1268  					pool.priced.Removed(len(caps))
  1269  					pendingGauge.Dec(int64(len(caps)))
  1270  					if pool.locals.contains(offenders[i]) {
  1271  						localGauge.Dec(int64(len(caps)))
  1272  					}
  1273  					pending--
  1274  				}
  1275  			}
  1276  		}
  1277  	}
  1278  
  1279  	// If still above threshold, reduce to limit or min allowance
  1280  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1281  		for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
  1282  			for _, addr := range offenders {
  1283  				list := pool.pending[addr]
  1284  
  1285  				caps := list.Cap(list.Len() - 1)
  1286  				for _, tx := range caps {
  1287  					// Drop the transaction from the global pools too
  1288  					hash := tx.Hash()
  1289  					pool.all.Remove(hash)
  1290  
  1291  					// Update the account nonce to the dropped transaction
  1292  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1293  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1294  				}
  1295  				pool.priced.Removed(len(caps))
  1296  				pendingGauge.Dec(int64(len(caps)))
  1297  				if pool.locals.contains(addr) {
  1298  					localGauge.Dec(int64(len(caps)))
  1299  				}
  1300  				pending--
  1301  			}
  1302  		}
  1303  	}
  1304  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1305  }
  1306  
  1307  // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit.
  1308  func (pool *TxPool) truncateQueue() {
  1309  	queued := uint64(0)
  1310  	for _, list := range pool.queue {
  1311  		queued += uint64(list.Len())
  1312  	}
  1313  	if queued <= pool.config.GlobalQueue {
  1314  		return
  1315  	}
  1316  
  1317  	// Sort all accounts with queued transactions by heartbeat
  1318  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1319  	for addr := range pool.queue {
  1320  		if !pool.locals.contains(addr) { // don't drop locals
  1321  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1322  		}
  1323  	}
  1324  	sort.Sort(addresses)
  1325  
  1326  	// Drop transactions until the total is below the limit or only locals remain
  1327  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1328  		addr := addresses[len(addresses)-1]
  1329  		list := pool.queue[addr.address]
  1330  
  1331  		addresses = addresses[:len(addresses)-1]
  1332  
  1333  		// Drop all transactions if they are less than the overflow
  1334  		if size := uint64(list.Len()); size <= drop {
  1335  			for _, tx := range list.Flatten() {
  1336  				pool.removeTx(tx.Hash(), true)
  1337  			}
  1338  			drop -= size
  1339  			queuedRateLimitMeter.Mark(int64(size))
  1340  			continue
  1341  		}
  1342  		// Otherwise drop only last few transactions
  1343  		txs := list.Flatten()
  1344  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1345  			pool.removeTx(txs[i].Hash(), true)
  1346  			drop--
  1347  			queuedRateLimitMeter.Mark(1)
  1348  		}
  1349  	}
  1350  }
  1351  
  1352  // demoteUnexecutables removes invalid and processed transactions from the pools
  1353  // executable/pending queue and any subsequent transactions that become unexecutable
  1354  // are moved back into the future queue.
  1355  func (pool *TxPool) demoteUnexecutables() {
  1356  	// Iterate over all accounts and demote any non-executable transactions
  1357  	for addr, list := range pool.pending {
  1358  		nonce := pool.currentState.GetNonce(addr)
  1359  
  1360  		// Drop all transactions that are deemed too old (low nonce)
  1361  		olds := list.Forward(nonce)
  1362  		for _, tx := range olds {
  1363  			hash := tx.Hash()
  1364  			pool.all.Remove(hash)
  1365  			log.Trace("Removed old pending transaction", "hash", hash)
  1366  		}
  1367  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1368  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1369  		for _, tx := range drops {
  1370  			hash := tx.Hash()
  1371  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1372  			pool.all.Remove(hash)
  1373  		}
  1374  		pool.priced.Removed(len(olds) + len(drops))
  1375  		pendingNofundsMeter.Mark(int64(len(drops)))
  1376  
  1377  		for _, tx := range invalids {
  1378  			hash := tx.Hash()
  1379  			log.Trace("Demoting pending transaction", "hash", hash)
  1380  			pool.enqueueTx(hash, tx)
  1381  		}
  1382  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1383  		if pool.locals.contains(addr) {
  1384  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1385  		}
  1386  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1387  		if list.Len() > 0 && list.txs.Get(nonce) == nil {
  1388  			gapped := list.Cap(0)
  1389  			for _, tx := range gapped {
  1390  				hash := tx.Hash()
  1391  				log.Error("Demoting invalidated transaction", "hash", hash)
  1392  				pool.enqueueTx(hash, tx)
  1393  			}
  1394  			pendingGauge.Dec(int64(len(gapped)))
  1395  		}
  1396  		// Delete the entire queue entry if it became empty.
  1397  		if list.Empty() {
  1398  			delete(pool.pending, addr)
  1399  			delete(pool.beats, addr)
  1400  		}
  1401  	}
  1402  }
  1403  
  1404  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1405  type addressByHeartbeat struct {
  1406  	address   common.Address
  1407  	heartbeat time.Time
  1408  }
  1409  
  1410  type addressesByHeartbeat []addressByHeartbeat
  1411  
  1412  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1413  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1414  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1415  
  1416  // accountSet is simply a set of addresses to check for existence, and a signer
  1417  // capable of deriving addresses from transactions.
  1418  type accountSet struct {
  1419  	accounts map[common.Address]struct{}
  1420  	signer   types.Signer
  1421  	cache    *[]common.Address
  1422  }
  1423  
  1424  // newAccountSet creates a new address set with an associated signer for sender
  1425  // derivations.
  1426  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1427  	as := &accountSet{
  1428  		accounts: make(map[common.Address]struct{}),
  1429  		signer:   signer,
  1430  	}
  1431  	for _, addr := range addrs {
  1432  		as.add(addr)
  1433  	}
  1434  	return as
  1435  }
  1436  
  1437  // contains checks if a given address is contained within the set.
  1438  func (as *accountSet) contains(addr common.Address) bool {
  1439  	_, exist := as.accounts[addr]
  1440  	return exist
  1441  }
  1442  
  1443  // containsTx checks if the sender of a given tx is within the set. If the sender
  1444  // cannot be derived, this method returns false.
  1445  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1446  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1447  		return as.contains(addr)
  1448  	}
  1449  	return false
  1450  }
  1451  
  1452  // add inserts a new address into the set to track.
  1453  func (as *accountSet) add(addr common.Address) {
  1454  	as.accounts[addr] = struct{}{}
  1455  	as.cache = nil
  1456  }
  1457  
  1458  // addTx adds the sender of tx into the set.
  1459  func (as *accountSet) addTx(tx *types.Transaction) {
  1460  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1461  		as.add(addr)
  1462  	}
  1463  }
  1464  
  1465  // flatten returns the list of addresses within this set, also caching it for later
  1466  // reuse. The returned slice should not be changed!
  1467  func (as *accountSet) flatten() []common.Address {
  1468  	if as.cache == nil {
  1469  		accounts := make([]common.Address, 0, len(as.accounts))
  1470  		for account := range as.accounts {
  1471  			accounts = append(accounts, account)
  1472  		}
  1473  		as.cache = &accounts
  1474  	}
  1475  	return *as.cache
  1476  }
  1477  
  1478  // merge adds all addresses from the 'other' set into 'as'.
  1479  func (as *accountSet) merge(other *accountSet) {
  1480  	for addr := range other.accounts {
  1481  		as.accounts[addr] = struct{}{}
  1482  	}
  1483  	as.cache = nil
  1484  }
  1485  
  1486  // txLookup is used internally by TxPool to track transactions while allowing lookup without
  1487  // mutex contention.
  1488  //
  1489  // Note, although this type is properly protected against concurrent access, it
  1490  // is **not** a type that should ever be mutated or even exposed outside of the
  1491  // transaction pool, since its internal state is tightly coupled with the pools
  1492  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1493  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1494  // TxPool.mu mutex.
  1495  type txLookup struct {
  1496  	all  map[common.Hash]*types.Transaction
  1497  	lock sync.RWMutex
  1498  }
  1499  
  1500  // newTxLookup returns a new txLookup structure.
  1501  func newTxLookup() *txLookup {
  1502  	return &txLookup{
  1503  		all: make(map[common.Hash]*types.Transaction),
  1504  	}
  1505  }
  1506  
  1507  // Range calls f on each key and value present in the map.
  1508  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
  1509  	t.lock.RLock()
  1510  	defer t.lock.RUnlock()
  1511  
  1512  	for key, value := range t.all {
  1513  		if !f(key, value) {
  1514  			break
  1515  		}
  1516  	}
  1517  }
  1518  
  1519  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1520  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1521  	t.lock.RLock()
  1522  	defer t.lock.RUnlock()
  1523  
  1524  	return t.all[hash]
  1525  }
  1526  
  1527  // Count returns the current number of items in the lookup.
  1528  func (t *txLookup) Count() int {
  1529  	t.lock.RLock()
  1530  	defer t.lock.RUnlock()
  1531  
  1532  	return len(t.all)
  1533  }
  1534  
  1535  // Add adds a transaction to the lookup.
  1536  func (t *txLookup) Add(tx *types.Transaction) {
  1537  	t.lock.Lock()
  1538  	defer t.lock.Unlock()
  1539  
  1540  	t.all[tx.Hash()] = tx
  1541  }
  1542  
  1543  // Remove removes a transaction from the lookup.
  1544  func (t *txLookup) Remove(hash common.Hash) {
  1545  	t.lock.Lock()
  1546  	defer t.lock.Unlock()
  1547  
  1548  	delete(t.all, hash)
  1549  }