github.com/fff-chain/go-fff@v0.0.0-20220726032732-1c84420b8a99/core/tx_pool.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	"errors"
    21  	"math"
    22  	"math/big"
    23  	"sort"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/fff-chain/go-fff/common"
    28  	"github.com/fff-chain/go-fff/common/prque"
    29  	"github.com/fff-chain/go-fff/core/state"
    30  	"github.com/fff-chain/go-fff/core/types"
    31  	"github.com/fff-chain/go-fff/event"
    32  	"github.com/fff-chain/go-fff/log"
    33  	"github.com/fff-chain/go-fff/metrics"
    34  	"github.com/fff-chain/go-fff/params"
    35  )
    36  
    37  const (
    38  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    39  	chainHeadChanSize = 10
    40  
    41  	// txSlotSize is used to calculate how many data slots a single transaction
    42  	// takes up based on its size. The slots are used as DoS protection, ensuring
    43  	// that validating a new transaction remains a constant operation (in reality
    44  	// O(maxslots), where max slots are 4 currently).
    45  	txSlotSize = 32 * 1024
    46  
    47  	// txMaxSize is the maximum size a single transaction can have. This field has
    48  	// non-trivial consequences: larger transactions are significantly harder and
    49  	// more expensive to propagate; larger transactions also take more resources
    50  	// to validate whether they fit into the pool or not.
    51  	txMaxSize = 4 * txSlotSize // 128KB
    52  
    53  	// txReannoMaxNum is the maximum number of transactions a reannounce action can include.
    54  	txReannoMaxNum = 1024
    55  )
    56  
    57  var (
    58  	// ErrAlreadyKnown is returned if the transactions is already contained
    59  	// within the pool.
    60  	ErrAlreadyKnown = errors.New("already known")
    61  
    62  	// ErrInvalidSender is returned if the transaction contains an invalid signature.
    63  	ErrInvalidSender = errors.New("invalid sender")
    64  
    65  	// ErrUnderpriced is returned if a transaction's gas price is below the minimum
    66  	// configured for the transaction pool.
    67  	ErrUnderpriced = errors.New("transaction underpriced")
    68  
    69  	// ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet
    70  	// another remote transaction.
    71  	ErrTxPoolOverflow = errors.New("txpool is full")
    72  
    73  	// ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced
    74  	// with a different one without the required price bump.
    75  	ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
    76  
    77  	// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
    78  	// maximum allowance of the current block.
    79  	ErrGasLimit = errors.New("exceeds block gas limit")
    80  
    81  	// ErrNegativeValue is a sanity error to ensure no one is able to specify a
    82  	// transaction with a negative value.
    83  	ErrNegativeValue = errors.New("negative value")
    84  
    85  	// ErrOversizedData is returned if the input data of a transaction is greater
    86  	// than some meaningful limit a user might use. This is not a consensus error
    87  	// making the transaction invalid, rather a DOS protection.
    88  	ErrOversizedData = errors.New("oversized data")
    89  )
    90  
    91  var (
    92  	evictionInterval    = time.Minute     // Time interval to check for evictable transactions
    93  	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
    94  	reannounceInterval  = time.Minute     // Time interval to check for reannounce transactions
    95  )
    96  
    97  var (
    98  	// Metrics for the pending pool
    99  	pendingDiscardMeter   = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
   100  	pendingReplaceMeter   = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
   101  	pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
   102  	pendingNofundsMeter   = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil)   // Dropped due to out-of-funds
   103  
   104  	// Metrics for the queued pool
   105  	queuedDiscardMeter   = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
   106  	queuedReplaceMeter   = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
   107  	queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
   108  	queuedNofundsMeter   = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil)   // Dropped due to out-of-funds
   109  	queuedEvictionMeter  = metrics.NewRegisteredMeter("txpool/queued/eviction", nil)  // Dropped due to lifetime
   110  
   111  	// General tx metrics
   112  	knownTxMeter       = metrics.NewRegisteredMeter("txpool/known", nil)
   113  	validTxMeter       = metrics.NewRegisteredMeter("txpool/valid", nil)
   114  	invalidTxMeter     = metrics.NewRegisteredMeter("txpool/invalid", nil)
   115  	underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
   116  	overflowedTxMeter  = metrics.NewRegisteredMeter("txpool/overflowed", nil)
   117  
   118  	pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
   119  	queuedGauge  = metrics.NewRegisteredGauge("txpool/queued", nil)
   120  	localGauge   = metrics.NewRegisteredGauge("txpool/local", nil)
   121  	slotsGauge   = metrics.NewRegisteredGauge("txpool/slots", nil)
   122  )
   123  
   124  // TxStatus is the current status of a transaction as seen by the pool.
   125  type TxStatus uint
   126  
   127  const (
   128  	TxStatusUnknown TxStatus = iota
   129  	TxStatusQueued
   130  	TxStatusPending
   131  	TxStatusIncluded
   132  )
   133  
   134  // blockChain provides the state of blockchain and current gas limit to do
   135  // some pre checks in tx pool and event subscribers.
   136  type blockChain interface {
   137  	CurrentBlock() *types.Block
   138  	GetBlock(hash common.Hash, number uint64) *types.Block
   139  	StateAt(root common.Hash) (*state.StateDB, error)
   140  
   141  	SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
   142  }
   143  
   144  // TxPoolConfig are the configuration parameters of the transaction pool.
   145  type TxPoolConfig struct {
   146  	Locals    []common.Address // Addresses that should be treated by default as local
   147  	NoLocals  bool             // Whether local transaction handling should be disabled
   148  	Journal   string           // Journal of local transactions to survive node restarts
   149  	Rejournal time.Duration    // Time interval to regenerate the local transaction journal
   150  
   151  	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
   152  	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
   153  
   154  	AccountSlots uint64 // Number of executable transaction slots guaranteed per account
   155  	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts
   156  	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
   157  	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts
   158  
   159  	Lifetime       time.Duration // Maximum amount of time non-executable transaction are queued
   160  	ReannounceTime time.Duration // Duration for announcing local pending transactions again
   161  }
   162  
   163  // DefaultTxPoolConfig contains the default configurations for the transaction
   164  // pool.
   165  var DefaultTxPoolConfig = TxPoolConfig{
   166  	Journal:   "transactions.rlp",
   167  	Rejournal: time.Hour,
   168  
   169  	PriceLimit: 1,
   170  	PriceBump:  10,
   171  
   172  	AccountSlots: 16,
   173  	GlobalSlots:  4096,
   174  	AccountQueue: 64,
   175  	GlobalQueue:  1024,
   176  
   177  	Lifetime:       3 * time.Hour,
   178  	ReannounceTime: 10 * 365 * 24 * time.Hour,
   179  }
   180  
   181  // sanitize checks the provided user configurations and changes anything that's
   182  // unreasonable or unworkable.
   183  func (config *TxPoolConfig) sanitize() TxPoolConfig {
   184  	conf := *config
   185  	if conf.Rejournal < time.Second {
   186  		log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
   187  		conf.Rejournal = time.Second
   188  	}
   189  	if conf.PriceLimit < 1 {
   190  		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
   191  		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
   192  	}
   193  	if conf.PriceBump < 1 {
   194  		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
   195  		conf.PriceBump = DefaultTxPoolConfig.PriceBump
   196  	}
   197  	if conf.AccountSlots < 1 {
   198  		log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
   199  		conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
   200  	}
   201  	if conf.GlobalSlots < 1 {
   202  		log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
   203  		conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
   204  	}
   205  	if conf.AccountQueue < 1 {
   206  		log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
   207  		conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
   208  	}
   209  	if conf.GlobalQueue < 1 {
   210  		log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
   211  		conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
   212  	}
   213  	if conf.Lifetime < 1 {
   214  		log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
   215  		conf.Lifetime = DefaultTxPoolConfig.Lifetime
   216  	}
   217  	if conf.ReannounceTime < time.Minute {
   218  		log.Warn("Sanitizing invalid txpool reannounce time", "provided", conf.ReannounceTime, "updated", time.Minute)
   219  		conf.ReannounceTime = time.Minute
   220  	}
   221  	return conf
   222  }
   223  
   224  // TxPool contains all currently known transactions. Transactions
   225  // enter the pool when they are received from the network or submitted
   226  // locally. They exit the pool when they are included in the blockchain.
   227  //
   228  // The pool separates processable transactions (which can be applied to the
   229  // current state) and future transactions. Transactions move between those
   230  // two states over time as they are received and processed.
   231  type TxPool struct {
   232  	config       TxPoolConfig
   233  	chainconfig  *params.ChainConfig
   234  	chain        blockChain
   235  	gasPrice     *big.Int
   236  	txFeed       event.Feed
   237  	reannoTxFeed event.Feed // Event feed for announcing transactions again
   238  	scope        event.SubscriptionScope
   239  	signer       types.Signer
   240  	mu           sync.RWMutex
   241  
   242  	istanbul bool // Fork indicator whether we are in the istanbul stage.
   243  	eip2718  bool // Fork indicator whether we are using EIP-2718 type transactions.
   244  
   245  	currentState  *state.StateDB // Current state in the blockchain head
   246  	pendingNonces *txNoncer      // Pending state tracking virtual nonces
   247  	currentMaxGas uint64         // Current gas limit for transaction caps
   248  
   249  	locals  *accountSet // Set of local transaction to exempt from eviction rules
   250  	journal *txJournal  // Journal of local transaction to back up to disk
   251  
   252  	pending map[common.Address]*txList   // All currently processable transactions
   253  	queue   map[common.Address]*txList   // Queued but non-processable transactions
   254  	beats   map[common.Address]time.Time // Last heartbeat from each known account
   255  	all     *txLookup                    // All transactions to allow lookups
   256  	priced  *txPricedList                // All transactions sorted by price
   257  
   258  	chainHeadCh     chan ChainHeadEvent
   259  	chainHeadSub    event.Subscription
   260  	reqResetCh      chan *txpoolResetRequest
   261  	reqPromoteCh    chan *accountSet
   262  	queueTxEventCh  chan *types.Transaction
   263  	reorgDoneCh     chan chan struct{}
   264  	reorgShutdownCh chan struct{}  // requests shutdown of scheduleReorgLoop
   265  	wg              sync.WaitGroup // tracks loop, scheduleReorgLoop
   266  }
   267  
   268  type txpoolResetRequest struct {
   269  	oldHead, newHead *types.Header
   270  }
   271  
   272  // NewTxPool creates a new transaction pool to gather, sort and filter inbound
   273  // transactions from the network.
   274  func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
   275  	// Sanitize the input to ensure no vulnerable gas prices are set
   276  	config = (&config).sanitize()
   277  
   278  	// Create the transaction pool with its initial settings
   279  	pool := &TxPool{
   280  		config:          config,
   281  		chainconfig:     chainconfig,
   282  		chain:           chain,
   283  		signer:          types.LatestSigner(chainconfig),
   284  		pending:         make(map[common.Address]*txList),
   285  		queue:           make(map[common.Address]*txList),
   286  		beats:           make(map[common.Address]time.Time),
   287  		all:             newTxLookup(),
   288  		chainHeadCh:     make(chan ChainHeadEvent, chainHeadChanSize),
   289  		reqResetCh:      make(chan *txpoolResetRequest),
   290  		reqPromoteCh:    make(chan *accountSet),
   291  		queueTxEventCh:  make(chan *types.Transaction),
   292  		reorgDoneCh:     make(chan chan struct{}),
   293  		reorgShutdownCh: make(chan struct{}),
   294  		gasPrice:        new(big.Int).SetUint64(config.PriceLimit),
   295  	}
   296  	pool.locals = newAccountSet(pool.signer)
   297  	for _, addr := range config.Locals {
   298  		log.Info("Setting new local account", "address", addr)
   299  		pool.locals.add(addr)
   300  	}
   301  	pool.priced = newTxPricedList(pool.all)
   302  	pool.reset(nil, chain.CurrentBlock().Header())
   303  
   304  	// Start the reorg loop early so it can handle requests generated during journal loading.
   305  	pool.wg.Add(1)
   306  	go pool.scheduleReorgLoop()
   307  
   308  	// If local transactions and journaling is enabled, load from disk
   309  	if !config.NoLocals && config.Journal != "" {
   310  		pool.journal = newTxJournal(config.Journal)
   311  
   312  		if err := pool.journal.load(pool.AddLocals); err != nil {
   313  			log.Warn("Failed to load transaction journal", "err", err)
   314  		}
   315  		if err := pool.journal.rotate(pool.local()); err != nil {
   316  			log.Warn("Failed to rotate transaction journal", "err", err)
   317  		}
   318  	}
   319  
   320  	// Subscribe events from blockchain and start the main event loop.
   321  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   322  	pool.wg.Add(1)
   323  	go pool.loop()
   324  
   325  	return pool
   326  }
   327  
   328  // loop is the transaction pool's main event loop, waiting for and reacting to
   329  // outside blockchain events as well as for various reporting and transaction
   330  // eviction events.
   331  func (pool *TxPool) loop() {
   332  	defer pool.wg.Done()
   333  
   334  	var (
   335  		prevPending, prevQueued, prevStales int
   336  		// Start the stats reporting and transaction eviction tickers
   337  		report     = time.NewTicker(statsReportInterval)
   338  		evict      = time.NewTicker(evictionInterval)
   339  		reannounce = time.NewTicker(reannounceInterval)
   340  		journal    = time.NewTicker(pool.config.Rejournal)
   341  		// Track the previous head headers for transaction reorgs
   342  		head = pool.chain.CurrentBlock()
   343  	)
   344  	defer report.Stop()
   345  	defer evict.Stop()
   346  	defer reannounce.Stop()
   347  	defer journal.Stop()
   348  
   349  	for {
   350  		select {
   351  		// Handle ChainHeadEvent
   352  		case ev := <-pool.chainHeadCh:
   353  			if ev.Block != nil {
   354  				pool.requestReset(head.Header(), ev.Block.Header())
   355  				head = ev.Block
   356  			}
   357  
   358  		// System shutdown.
   359  		case <-pool.chainHeadSub.Err():
   360  			close(pool.reorgShutdownCh)
   361  			return
   362  
   363  		// Handle stats reporting ticks
   364  		case <-report.C:
   365  			pool.mu.RLock()
   366  			pending, queued := pool.stats()
   367  			stales := pool.priced.stales
   368  			pool.mu.RUnlock()
   369  
   370  			if pending != prevPending || queued != prevQueued || stales != prevStales {
   371  				log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
   372  				prevPending, prevQueued, prevStales = pending, queued, stales
   373  			}
   374  
   375  		// Handle inactive account transaction eviction
   376  		case <-evict.C:
   377  			pool.mu.Lock()
   378  			for addr := range pool.queue {
   379  				// Skip local transactions from the eviction mechanism
   380  				if pool.locals.contains(addr) {
   381  					continue
   382  				}
   383  				// Any non-locals old enough should be removed
   384  				if time.Since(pool.beats[addr]) > pool.config.Lifetime {
   385  					list := pool.queue[addr].Flatten()
   386  					for _, tx := range list {
   387  						pool.removeTx(tx.Hash(), true)
   388  					}
   389  					queuedEvictionMeter.Mark(int64(len(list)))
   390  				}
   391  			}
   392  			pool.mu.Unlock()
   393  
   394  		case <-reannounce.C:
   395  			pool.mu.RLock()
   396  			reannoTxs := func() []*types.Transaction {
   397  				txs := make([]*types.Transaction, 0)
   398  				for addr, list := range pool.pending {
   399  					if !pool.locals.contains(addr) {
   400  						continue
   401  					}
   402  
   403  					for _, tx := range list.Flatten() {
   404  						// Default ReannounceTime is 10 years, won't announce by default.
   405  						if time.Since(tx.Time()) < pool.config.ReannounceTime {
   406  							break
   407  						}
   408  						txs = append(txs, tx)
   409  						if len(txs) >= txReannoMaxNum {
   410  							return txs
   411  						}
   412  					}
   413  				}
   414  				return txs
   415  			}()
   416  			pool.mu.RUnlock()
   417  			if len(reannoTxs) > 0 {
   418  				pool.reannoTxFeed.Send(ReannoTxsEvent{reannoTxs})
   419  			}
   420  
   421  		// Handle local transaction journal rotation
   422  		case <-journal.C:
   423  			if pool.journal != nil {
   424  				pool.mu.Lock()
   425  				if err := pool.journal.rotate(pool.local()); err != nil {
   426  					log.Warn("Failed to rotate local tx journal", "err", err)
   427  				}
   428  				pool.mu.Unlock()
   429  			}
   430  		}
   431  	}
   432  }
   433  
   434  // Stop terminates the transaction pool.
   435  func (pool *TxPool) Stop() {
   436  	// Unsubscribe all subscriptions registered from txpool
   437  	pool.scope.Close()
   438  
   439  	// Unsubscribe subscriptions registered from blockchain
   440  	pool.chainHeadSub.Unsubscribe()
   441  	pool.wg.Wait()
   442  
   443  	if pool.journal != nil {
   444  		pool.journal.close()
   445  	}
   446  	log.Info("Transaction pool stopped")
   447  }
   448  
   449  // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
   450  // starts sending event to the given channel.
   451  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
   452  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   453  }
   454  
   455  // SubscribeReannoTxsEvent registers a subscription of ReannoTxsEvent and
   456  // starts sending event to the given channel.
   457  func (pool *TxPool) SubscribeReannoTxsEvent(ch chan<- ReannoTxsEvent) event.Subscription {
   458  	return pool.scope.Track(pool.reannoTxFeed.Subscribe(ch))
   459  }
   460  
   461  // GasPrice returns the current gas price enforced by the transaction pool.
   462  func (pool *TxPool) GasPrice() *big.Int {
   463  	pool.mu.RLock()
   464  	defer pool.mu.RUnlock()
   465  
   466  	return new(big.Int).Set(pool.gasPrice)
   467  }
   468  
   469  // SetGasPrice updates the minimum price required by the transaction pool for a
   470  // new transaction, and drops all transactions below this threshold.
   471  func (pool *TxPool) SetGasPrice(price *big.Int) {
   472  	pool.mu.Lock()
   473  	defer pool.mu.Unlock()
   474  
   475  	pool.gasPrice = price
   476  	for _, tx := range pool.priced.Cap(price) {
   477  		pool.removeTx(tx.Hash(), false)
   478  	}
   479  	log.Info("Transaction pool price threshold updated", "price", price)
   480  }
   481  
   482  // Nonce returns the next nonce of an account, with all transactions executable
   483  // by the pool already applied on top.
   484  func (pool *TxPool) Nonce(addr common.Address) uint64 {
   485  	pool.mu.RLock()
   486  	defer pool.mu.RUnlock()
   487  
   488  	return pool.pendingNonces.get(addr)
   489  }
   490  
   491  // Stats retrieves the current pool stats, namely the number of pending and the
   492  // number of queued (non-executable) transactions.
   493  func (pool *TxPool) Stats() (int, int) {
   494  	pool.mu.RLock()
   495  	defer pool.mu.RUnlock()
   496  
   497  	return pool.stats()
   498  }
   499  
   500  // stats retrieves the current pool stats, namely the number of pending and the
   501  // number of queued (non-executable) transactions.
   502  func (pool *TxPool) stats() (int, int) {
   503  	pending := 0
   504  	for _, list := range pool.pending {
   505  		pending += len(list.txs.items)
   506  	}
   507  	queued := 0
   508  	for _, list := range pool.queue {
   509  		queued += len(list.txs.items)
   510  	}
   511  	return pending, queued
   512  }
   513  
   514  // Content retrieves the data content of the transaction pool, returning all the
   515  // pending as well as queued transactions, grouped by account and sorted by nonce.
   516  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   517  	pool.mu.Lock()
   518  	defer pool.mu.Unlock()
   519  
   520  	pending := make(map[common.Address]types.Transactions)
   521  	for addr, list := range pool.pending {
   522  		pending[addr] = list.Flatten()
   523  	}
   524  	queued := make(map[common.Address]types.Transactions)
   525  	for addr, list := range pool.queue {
   526  		queued[addr] = list.Flatten()
   527  	}
   528  	return pending, queued
   529  }
   530  
   531  // Pending retrieves all currently processable transactions, grouped by origin
   532  // account and sorted by nonce. The returned transaction set is a copy and can be
   533  // freely modified by calling code.
   534  func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
   535  	pool.mu.Lock()
   536  	defer pool.mu.Unlock()
   537  
   538  	pending := make(map[common.Address]types.Transactions)
   539  	for addr, list := range pool.pending {
   540  		pending[addr] = list.Flatten()
   541  	}
   542  	return pending, nil
   543  }
   544  
   545  // Locals retrieves the accounts currently considered local by the pool.
   546  func (pool *TxPool) Locals() []common.Address {
   547  	pool.mu.Lock()
   548  	defer pool.mu.Unlock()
   549  
   550  	return pool.locals.flatten()
   551  }
   552  
   553  // local retrieves all currently known local transactions, grouped by origin
   554  // account and sorted by nonce. The returned transaction set is a copy and can be
   555  // freely modified by calling code.
   556  func (pool *TxPool) local() map[common.Address]types.Transactions {
   557  	txs := make(map[common.Address]types.Transactions)
   558  	for addr := range pool.locals.accounts {
   559  		if pending := pool.pending[addr]; pending != nil {
   560  			txs[addr] = append(txs[addr], pending.Flatten()...)
   561  		}
   562  		if queued := pool.queue[addr]; queued != nil {
   563  			txs[addr] = append(txs[addr], queued.Flatten()...)
   564  		}
   565  	}
   566  	return txs
   567  }
   568  
   569  // validateTx checks whether a transaction is valid according to the consensus
   570  // rules and adheres to some heuristic limits of the local node (price and size).
   571  func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
   572  	// Accept only legacy transactions until EIP-2718/2930 activates.
   573  	if !pool.eip2718 && tx.Type() != types.LegacyTxType {
   574  		return ErrTxTypeNotSupported
   575  	}
   576  	// Reject transactions over defined size to prevent DOS attacks
   577  	if uint64(tx.Size()) > txMaxSize {
   578  		return ErrOversizedData
   579  	}
   580  	// Transactions can't be negative. This may never happen using RLP decoded
   581  	// transactions but may occur if you create a transaction using the RPC.
   582  	if tx.Value().Sign() < 0 {
   583  		return ErrNegativeValue
   584  	}
   585  	// Ensure the transaction doesn't exceed the current block limit gas.
   586  	if pool.currentMaxGas < tx.Gas() {
   587  		return ErrGasLimit
   588  	}
   589  	// Make sure the transaction is signed properly.
   590  	from, err := types.Sender(pool.signer, tx)
   591  	if err != nil {
   592  		return ErrInvalidSender
   593  	}
   594  	// Drop non-local transactions under our own minimal accepted gas price
   595  	if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 {
   596  		return ErrUnderpriced
   597  	}
   598  	// Ensure the transaction adheres to nonce ordering
   599  	if pool.currentState.GetNonce(from) > tx.Nonce() {
   600  		return ErrNonceTooLow
   601  	}
   602  	// Transactor should have enough funds to cover the costs
   603  	// cost == V + GP * GL
   604  	if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
   605  		return ErrInsufficientFunds
   606  	}
   607  	// Ensure the transaction has more gas than the basic tx fee.
   608  	intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
   609  	if err != nil {
   610  		return err
   611  	}
   612  	if tx.Gas() < intrGas {
   613  		return ErrIntrinsicGas
   614  	}
   615  	return nil
   616  }
   617  
   618  // add validates a transaction and inserts it into the non-executable queue for later
   619  // pending promotion and execution. If the transaction is a replacement for an already
   620  // pending or queued one, it overwrites the previous transaction if its price is higher.
   621  //
   622  // If a newly added transaction is marked as local, its sending account will be
   623  // whitelisted, preventing any associated transaction from being dropped out of the pool
   624  // due to pricing constraints.
   625  func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
   626  	// If the transaction is already known, discard it
   627  	hash := tx.Hash()
   628  	if pool.all.Get(hash) != nil {
   629  		//log.Trace("Discarding already known transaction", "hash", hash)
   630  		knownTxMeter.Mark(1)
   631  		return false, ErrAlreadyKnown
   632  	}
   633  	// Make the local flag. If it's from local source or it's from the network but
   634  	// the sender is marked as local previously, treat it as the local transaction.
   635  	isLocal := local || pool.locals.containsTx(tx)
   636  
   637  	// If the transaction fails basic validation, discard it
   638  	if err := pool.validateTx(tx, isLocal); err != nil {
   639  		//log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
   640  		invalidTxMeter.Mark(1)
   641  		return false, err
   642  	}
   643  	// If the transaction pool is full, discard underpriced transactions
   644  	if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
   645  		// If the new transaction is underpriced, don't accept it
   646  		if !isLocal && pool.priced.Underpriced(tx) {
   647  			//log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
   648  			underpricedTxMeter.Mark(1)
   649  			return false, ErrUnderpriced
   650  		}
   651  		// New transaction is better than our worse ones, make room for it.
   652  		// If it's a local transaction, forcibly discard all available transactions.
   653  		// Otherwise if we can't make enough room for new one, abort the operation.
   654  		drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
   655  
   656  		// Special case, we still can't make the room for the new remote one.
   657  		if !isLocal && !success {
   658  			//log.Trace("Discarding overflown transaction", "hash", hash)
   659  			overflowedTxMeter.Mark(1)
   660  			return false, ErrTxPoolOverflow
   661  		}
   662  		// Kick out the underpriced remote transactions.
   663  		for _, tx := range drop {
   664  			//log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
   665  			underpricedTxMeter.Mark(1)
   666  			pool.removeTx(tx.Hash(), false)
   667  		}
   668  	}
   669  	// Try to replace an existing transaction in the pending pool
   670  	from, _ := types.Sender(pool.signer, tx) // already validated
   671  	if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
   672  		// Nonce already pending, check if required price bump is met
   673  		inserted, old := list.Add(tx, pool.config.PriceBump)
   674  		if !inserted {
   675  			pendingDiscardMeter.Mark(1)
   676  			return false, ErrReplaceUnderpriced
   677  		}
   678  		// New transaction is better, replace old one
   679  		if old != nil {
   680  			pool.all.Remove(old.Hash())
   681  			pool.priced.Removed(1)
   682  			pendingReplaceMeter.Mark(1)
   683  		}
   684  		pool.all.Add(tx, isLocal)
   685  		pool.priced.Put(tx, isLocal)
   686  		pool.journalTx(from, tx)
   687  		pool.queueTxEvent(tx)
   688  		//log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
   689  
   690  		// Successful promotion, bump the heartbeat
   691  		pool.beats[from] = time.Now()
   692  		return old != nil, nil
   693  	}
   694  	// New transaction isn't replacing a pending one, push into queue
   695  	replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
   696  	if err != nil {
   697  		return false, err
   698  	}
   699  	// Mark local addresses and journal local transactions
   700  	if local && !pool.locals.contains(from) {
   701  		//log.Info("Setting new local account", "address", from)
   702  		pool.locals.add(from)
   703  		pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
   704  	}
   705  	if isLocal {
   706  		localGauge.Inc(1)
   707  	}
   708  	pool.journalTx(from, tx)
   709  
   710  	//log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
   711  	return replaced, nil
   712  }
   713  
   714  // enqueueTx inserts a new transaction into the non-executable transaction queue.
   715  //
   716  // Note, this method assumes the pool lock is held!
   717  func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) {
   718  	// Try to insert the transaction into the future queue
   719  	from, _ := types.Sender(pool.signer, tx) // already validated
   720  	if pool.queue[from] == nil {
   721  		pool.queue[from] = newTxList(false)
   722  	}
   723  	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
   724  	if !inserted {
   725  		// An older transaction was better, discard this
   726  		queuedDiscardMeter.Mark(1)
   727  		return false, ErrReplaceUnderpriced
   728  	}
   729  	// Discard any previous transaction and mark this
   730  	if old != nil {
   731  		pool.all.Remove(old.Hash())
   732  		pool.priced.Removed(1)
   733  		queuedReplaceMeter.Mark(1)
   734  	} else {
   735  		// Nothing was replaced, bump the queued counter
   736  		queuedGauge.Inc(1)
   737  	}
   738  	// If the transaction isn't in lookup set but it's expected to be there,
   739  	// show the error log.
   740  	if pool.all.Get(hash) == nil && !addAll {
   741  		log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
   742  	}
   743  	if addAll {
   744  		pool.all.Add(tx, local)
   745  		pool.priced.Put(tx, local)
   746  	}
   747  	// If we never record the heartbeat, do it right now.
   748  	if _, exist := pool.beats[from]; !exist {
   749  		pool.beats[from] = time.Now()
   750  	}
   751  	return old != nil, nil
   752  }
   753  
   754  // journalTx adds the specified transaction to the local disk journal if it is
   755  // deemed to have been sent from a local account.
   756  func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
   757  	// Only journal if it's enabled and the transaction is local
   758  	if pool.journal == nil || !pool.locals.contains(from) {
   759  		return
   760  	}
   761  	if err := pool.journal.insert(tx); err != nil {
   762  		log.Warn("Failed to journal local transaction", "err", err)
   763  	}
   764  }
   765  
   766  // promoteTx adds a transaction to the pending (processable) list of transactions
   767  // and returns whether it was inserted or an older was better.
   768  //
   769  // Note, this method assumes the pool lock is held!
   770  func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
   771  	// Try to insert the transaction into the pending queue
   772  	if pool.pending[addr] == nil {
   773  		pool.pending[addr] = newTxList(true)
   774  	}
   775  	list := pool.pending[addr]
   776  
   777  	inserted, old := list.Add(tx, pool.config.PriceBump)
   778  	if !inserted {
   779  		// An older transaction was better, discard this
   780  		pool.all.Remove(hash)
   781  		pool.priced.Removed(1)
   782  		pendingDiscardMeter.Mark(1)
   783  		return false
   784  	}
   785  	// Otherwise discard any previous transaction and mark this
   786  	if old != nil {
   787  		pool.all.Remove(old.Hash())
   788  		pool.priced.Removed(1)
   789  		pendingReplaceMeter.Mark(1)
   790  	} else {
   791  		// Nothing was replaced, bump the pending counter
   792  		pendingGauge.Inc(1)
   793  	}
   794  	// Set the potentially new pending nonce and notify any subsystems of the new tx
   795  	pool.pendingNonces.set(addr, tx.Nonce()+1)
   796  
   797  	// Successful promotion, bump the heartbeat
   798  	pool.beats[addr] = time.Now()
   799  	return true
   800  }
   801  
   802  // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the
   803  // senders as a local ones, ensuring they go around the local pricing constraints.
   804  //
   805  // This method is used to add transactions from the RPC API and performs synchronous pool
   806  // reorganization and event propagation.
   807  func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
   808  	return pool.addTxs(txs, !pool.config.NoLocals, true)
   809  }
   810  
   811  // AddLocal enqueues a single local transaction into the pool if it is valid. This is
   812  // a convenience wrapper aroundd AddLocals.
   813  func (pool *TxPool) AddLocal(tx *types.Transaction) error {
   814  	errs := pool.AddLocals([]*types.Transaction{tx})
   815  	return errs[0]
   816  }
   817  
   818  // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
   819  // senders are not among the locally tracked ones, full pricing constraints will apply.
   820  //
   821  // This method is used to add transactions from the p2p network and does not wait for pool
   822  // reorganization and internal event propagation.
   823  func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
   824  	return pool.addTxs(txs, false, false)
   825  }
   826  
   827  // This is like AddRemotes, but waits for pool reorganization. Tests use this method.
   828  func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
   829  	return pool.addTxs(txs, false, true)
   830  }
   831  
   832  // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
   833  func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
   834  	errs := pool.AddRemotesSync([]*types.Transaction{tx})
   835  	return errs[0]
   836  }
   837  
   838  // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
   839  // wrapper around AddRemotes.
   840  //
   841  // Deprecated: use AddRemotes
   842  func (pool *TxPool) AddRemote(tx *types.Transaction) error {
   843  	errs := pool.AddRemotes([]*types.Transaction{tx})
   844  	return errs[0]
   845  }
   846  
   847  // addTxs attempts to queue a batch of transactions if they are valid.
   848  func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
   849  	// Filter out known ones without obtaining the pool lock or recovering signatures
   850  	var (
   851  		errs = make([]error, len(txs))
   852  		news = make([]*types.Transaction, 0, len(txs))
   853  	)
   854  	for i, tx := range txs {
   855  		// If the transaction is known, pre-set the error slot
   856  		if pool.all.Get(tx.Hash()) != nil {
   857  			errs[i] = ErrAlreadyKnown
   858  			knownTxMeter.Mark(1)
   859  			continue
   860  		}
   861  		// Exclude transactions with invalid signatures as soon as
   862  		// possible and cache senders in transactions before
   863  		// obtaining lock
   864  		_, err := types.Sender(pool.signer, tx)
   865  		if err != nil {
   866  			errs[i] = ErrInvalidSender
   867  			invalidTxMeter.Mark(1)
   868  			continue
   869  		}
   870  		// Accumulate all unknown transactions for deeper processing
   871  		news = append(news, tx)
   872  	}
   873  	if len(news) == 0 {
   874  		return errs
   875  	}
   876  
   877  	// Process all the new transaction and merge any errors into the original slice
   878  	pool.mu.Lock()
   879  	newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
   880  	pool.mu.Unlock()
   881  
   882  	var nilSlot = 0
   883  	for _, err := range newErrs {
   884  		for errs[nilSlot] != nil {
   885  			nilSlot++
   886  		}
   887  		errs[nilSlot] = err
   888  		nilSlot++
   889  	}
   890  	// Reorg the pool internals if needed and return
   891  	done := pool.requestPromoteExecutables(dirtyAddrs)
   892  	if sync {
   893  		<-done
   894  	}
   895  	return errs
   896  }
   897  
   898  // addTxsLocked attempts to queue a batch of transactions if they are valid.
   899  // The transaction pool lock must be held.
   900  func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
   901  	dirty := newAccountSet(pool.signer)
   902  	errs := make([]error, len(txs))
   903  	for i, tx := range txs {
   904  		replaced, err := pool.add(tx, local)
   905  		errs[i] = err
   906  		if err == nil && !replaced {
   907  			dirty.addTx(tx)
   908  		}
   909  	}
   910  	validTxMeter.Mark(int64(len(dirty.accounts)))
   911  	return errs, dirty
   912  }
   913  
   914  // Status returns the status (unknown/pending/queued) of a batch of transactions
   915  // identified by their hashes.
   916  func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
   917  	status := make([]TxStatus, len(hashes))
   918  	for i, hash := range hashes {
   919  		tx := pool.Get(hash)
   920  		if tx == nil {
   921  			continue
   922  		}
   923  		from, _ := types.Sender(pool.signer, tx) // already validated
   924  		pool.mu.RLock()
   925  		if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   926  			status[i] = TxStatusPending
   927  		} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
   928  			status[i] = TxStatusQueued
   929  		}
   930  		// implicit else: the tx may have been included into a block between
   931  		// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
   932  		pool.mu.RUnlock()
   933  	}
   934  	return status
   935  }
   936  
   937  // Get returns a transaction if it is contained in the pool and nil otherwise.
   938  func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
   939  	return pool.all.Get(hash)
   940  }
   941  
   942  // Has returns an indicator whether txpool has a transaction cached with the
   943  // given hash.
   944  func (pool *TxPool) Has(hash common.Hash) bool {
   945  	return pool.all.Get(hash) != nil
   946  }
   947  
   948  // removeTx removes a single transaction from the queue, moving all subsequent
   949  // transactions back to the future queue.
   950  func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
   951  	// Fetch the transaction we wish to delete
   952  	tx := pool.all.Get(hash)
   953  	if tx == nil {
   954  		return
   955  	}
   956  	addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
   957  
   958  	// Remove it from the list of known transactions
   959  	pool.all.Remove(hash)
   960  	if outofbound {
   961  		pool.priced.Removed(1)
   962  	}
   963  	if pool.locals.contains(addr) {
   964  		localGauge.Dec(1)
   965  	}
   966  	// Remove the transaction from the pending lists and reset the account nonce
   967  	if pending := pool.pending[addr]; pending != nil {
   968  		if removed, invalids := pending.Remove(tx); removed {
   969  			// If no more pending transactions are left, remove the list
   970  			if pending.Empty() {
   971  				delete(pool.pending, addr)
   972  			}
   973  			// Postpone any invalidated transactions
   974  			for _, tx := range invalids {
   975  				// Internal shuffle shouldn't touch the lookup set.
   976  				pool.enqueueTx(tx.Hash(), tx, false, false)
   977  			}
   978  			// Update the account nonce if needed
   979  			pool.pendingNonces.setIfLower(addr, tx.Nonce())
   980  			// Reduce the pending counter
   981  			pendingGauge.Dec(int64(1 + len(invalids)))
   982  			return
   983  		}
   984  	}
   985  	// Transaction is in the future queue
   986  	if future := pool.queue[addr]; future != nil {
   987  		if removed, _ := future.Remove(tx); removed {
   988  			// Reduce the queued counter
   989  			queuedGauge.Dec(1)
   990  		}
   991  		if future.Empty() {
   992  			delete(pool.queue, addr)
   993  			delete(pool.beats, addr)
   994  		}
   995  	}
   996  }
   997  
   998  // requestReset requests a pool reset to the new head block.
   999  // The returned channel is closed when the reset has occurred.
  1000  func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} {
  1001  	select {
  1002  	case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}:
  1003  		return <-pool.reorgDoneCh
  1004  	case <-pool.reorgShutdownCh:
  1005  		return pool.reorgShutdownCh
  1006  	}
  1007  }
  1008  
  1009  // requestPromoteExecutables requests transaction promotion checks for the given addresses.
  1010  // The returned channel is closed when the promotion checks have occurred.
  1011  func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} {
  1012  	select {
  1013  	case pool.reqPromoteCh <- set:
  1014  		return <-pool.reorgDoneCh
  1015  	case <-pool.reorgShutdownCh:
  1016  		return pool.reorgShutdownCh
  1017  	}
  1018  }
  1019  
  1020  // queueTxEvent enqueues a transaction event to be sent in the next reorg run.
  1021  func (pool *TxPool) queueTxEvent(tx *types.Transaction) {
  1022  	select {
  1023  	case pool.queueTxEventCh <- tx:
  1024  	case <-pool.reorgShutdownCh:
  1025  	}
  1026  }
  1027  
  1028  // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not
  1029  // call those methods directly, but request them being run using requestReset and
  1030  // requestPromoteExecutables instead.
  1031  func (pool *TxPool) scheduleReorgLoop() {
  1032  	defer pool.wg.Done()
  1033  
  1034  	var (
  1035  		curDone       chan struct{} // non-nil while runReorg is active
  1036  		nextDone      = make(chan struct{})
  1037  		launchNextRun bool
  1038  		reset         *txpoolResetRequest
  1039  		dirtyAccounts *accountSet
  1040  		queuedEvents  = make(map[common.Address]*txSortedMap)
  1041  	)
  1042  	for {
  1043  		// Launch next background reorg if needed
  1044  		if curDone == nil && launchNextRun {
  1045  			// Run the background reorg and announcements
  1046  			go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
  1047  
  1048  			// Prepare everything for the next round of reorg
  1049  			curDone, nextDone = nextDone, make(chan struct{})
  1050  			launchNextRun = false
  1051  
  1052  			reset, dirtyAccounts = nil, nil
  1053  			queuedEvents = make(map[common.Address]*txSortedMap)
  1054  		}
  1055  
  1056  		select {
  1057  		case req := <-pool.reqResetCh:
  1058  			// Reset request: update head if request is already pending.
  1059  			if reset == nil {
  1060  				reset = req
  1061  			} else {
  1062  				reset.newHead = req.newHead
  1063  			}
  1064  			launchNextRun = true
  1065  			pool.reorgDoneCh <- nextDone
  1066  
  1067  		case req := <-pool.reqPromoteCh:
  1068  			// Promote request: update address set if request is already pending.
  1069  			if dirtyAccounts == nil {
  1070  				dirtyAccounts = req
  1071  			} else {
  1072  				dirtyAccounts.merge(req)
  1073  			}
  1074  			launchNextRun = true
  1075  			pool.reorgDoneCh <- nextDone
  1076  
  1077  		case tx := <-pool.queueTxEventCh:
  1078  			// Queue up the event, but don't schedule a reorg. It's up to the caller to
  1079  			// request one later if they want the events sent.
  1080  			addr, _ := types.Sender(pool.signer, tx)
  1081  			if _, ok := queuedEvents[addr]; !ok {
  1082  				queuedEvents[addr] = newTxSortedMap()
  1083  			}
  1084  			queuedEvents[addr].Put(tx)
  1085  
  1086  		case <-curDone:
  1087  			curDone = nil
  1088  
  1089  		case <-pool.reorgShutdownCh:
  1090  			// Wait for current run to finish.
  1091  			if curDone != nil {
  1092  				<-curDone
  1093  			}
  1094  			close(nextDone)
  1095  			return
  1096  		}
  1097  	}
  1098  }
  1099  
  1100  // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
  1101  func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
  1102  	defer close(done)
  1103  
  1104  	var promoteAddrs []common.Address
  1105  	if dirtyAccounts != nil && reset == nil {
  1106  		// Only dirty accounts need to be promoted, unless we're resetting.
  1107  		// For resets, all addresses in the tx queue will be promoted and
  1108  		// the flatten operation can be avoided.
  1109  		promoteAddrs = dirtyAccounts.flatten()
  1110  	}
  1111  	pool.mu.Lock()
  1112  	if reset != nil {
  1113  		// Reset from the old head to the new, rescheduling any reorged transactions
  1114  		pool.reset(reset.oldHead, reset.newHead)
  1115  
  1116  		// Nonces were reset, discard any events that became stale
  1117  		for addr := range events {
  1118  			events[addr].Forward(pool.pendingNonces.get(addr))
  1119  			if len(events[addr].items) == 0 {
  1120  				delete(events, addr)
  1121  			}
  1122  		}
  1123  		// Reset needs promote for all addresses
  1124  		promoteAddrs = make([]common.Address, 0, len(pool.queue))
  1125  		for addr := range pool.queue {
  1126  			promoteAddrs = append(promoteAddrs, addr)
  1127  		}
  1128  	}
  1129  	// Check for pending transactions for every account that sent new ones
  1130  	promoted := pool.promoteExecutables(promoteAddrs)
  1131  
  1132  	// If a new block appeared, validate the pool of pending transactions. This will
  1133  	// remove any transaction that has been included in the block or was invalidated
  1134  	// because of another transaction (e.g. higher gas price).
  1135  	if reset != nil {
  1136  		pool.demoteUnexecutables()
  1137  	}
  1138  	// Ensure pool.queue and pool.pending sizes stay within the configured limits.
  1139  	pool.truncatePending()
  1140  	pool.truncateQueue()
  1141  
  1142  	// Update all accounts to the latest known pending nonce
  1143  	for addr, list := range pool.pending {
  1144  		highestPending := list.LastElement()
  1145  		pool.pendingNonces.set(addr, highestPending.Nonce()+1)
  1146  	}
  1147  	pool.mu.Unlock()
  1148  
  1149  	// Notify subsystems for newly added transactions
  1150  	for _, tx := range promoted {
  1151  		addr, _ := types.Sender(pool.signer, tx)
  1152  		if _, ok := events[addr]; !ok {
  1153  			events[addr] = newTxSortedMap()
  1154  		}
  1155  		events[addr].Put(tx)
  1156  	}
  1157  	if len(events) > 0 {
  1158  		var txs []*types.Transaction
  1159  		for _, set := range events {
  1160  			txs = append(txs, set.Flatten()...)
  1161  		}
  1162  		pool.txFeed.Send(NewTxsEvent{txs})
  1163  	}
  1164  }
  1165  
  1166  // reset retrieves the current state of the blockchain and ensures the content
  1167  // of the transaction pool is valid with regard to the chain state.
  1168  func (pool *TxPool) reset(oldHead, newHead *types.Header) {
  1169  	// If we're reorging an old state, reinject all dropped transactions
  1170  	var reinject types.Transactions
  1171  
  1172  	if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
  1173  		// If the reorg is too deep, avoid doing it (will happen during fast sync)
  1174  		oldNum := oldHead.Number.Uint64()
  1175  		newNum := newHead.Number.Uint64()
  1176  
  1177  		if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
  1178  			log.Debug("Skipping deep transaction reorg", "depth", depth)
  1179  		} else {
  1180  			// Reorg seems shallow enough to pull in all transactions into memory
  1181  			var discarded, included types.Transactions
  1182  			var (
  1183  				rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
  1184  				add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
  1185  			)
  1186  			if rem == nil {
  1187  				// This can happen if a setHead is performed, where we simply discard the old
  1188  				// head from the chain.
  1189  				// If that is the case, we don't have the lost transactions any more, and
  1190  				// there's nothing to add
  1191  				if newNum >= oldNum {
  1192  					// If we reorged to a same or higher number, then it's not a case of setHead
  1193  					log.Warn("Transaction pool reset with missing oldhead",
  1194  						"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1195  					return
  1196  				}
  1197  				// If the reorg ended up on a lower number, it's indicative of setHead being the cause
  1198  				log.Debug("Skipping transaction reset caused by setHead",
  1199  					"old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum)
  1200  				// We still need to update the current state s.th. the lost transactions can be readded by the user
  1201  			} else {
  1202  				for rem.NumberU64() > add.NumberU64() {
  1203  					discarded = append(discarded, rem.Transactions()...)
  1204  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1205  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1206  						return
  1207  					}
  1208  				}
  1209  				for add.NumberU64() > rem.NumberU64() {
  1210  					included = append(included, add.Transactions()...)
  1211  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1212  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1213  						return
  1214  					}
  1215  				}
  1216  				for rem.Hash() != add.Hash() {
  1217  					discarded = append(discarded, rem.Transactions()...)
  1218  					if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
  1219  						log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
  1220  						return
  1221  					}
  1222  					included = append(included, add.Transactions()...)
  1223  					if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
  1224  						log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
  1225  						return
  1226  					}
  1227  				}
  1228  				reinject = types.TxDifference(discarded, included)
  1229  			}
  1230  		}
  1231  	}
  1232  	// Initialize the internal state to the current head
  1233  	if newHead == nil {
  1234  		newHead = pool.chain.CurrentBlock().Header() // Special case during testing
  1235  	}
  1236  	statedb, err := pool.chain.StateAt(newHead.Root)
  1237  	if err != nil {
  1238  		log.Error("Failed to reset txpool state", "err", err)
  1239  		return
  1240  	}
  1241  	pool.currentState = statedb
  1242  	pool.pendingNonces = newTxNoncer(statedb)
  1243  	pool.currentMaxGas = newHead.GasLimit
  1244  
  1245  	// Inject any transactions discarded due to reorgs
  1246  	log.Debug("Reinjecting stale transactions", "count", len(reinject))
  1247  	senderCacher.recover(pool.signer, reinject)
  1248  	pool.addTxsLocked(reinject, false)
  1249  
  1250  	// Update all fork indicator by next pending block number.
  1251  	next := new(big.Int).Add(newHead.Number, big.NewInt(1))
  1252  	pool.istanbul = pool.chainconfig.IsIstanbul(next)
  1253  	pool.eip2718 = pool.chainconfig.IsBerlin(next)
  1254  }
  1255  
  1256  // promoteExecutables moves transactions that have become processable from the
  1257  // future queue to the set of pending transactions. During this process, all
  1258  // invalidated transactions (low nonce, low balance) are deleted.
  1259  func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
  1260  	// Track the promoted transactions to broadcast them at once
  1261  	var promoted []*types.Transaction
  1262  
  1263  	// Iterate over all accounts and promote any executable transactions
  1264  	for _, addr := range accounts {
  1265  		list := pool.queue[addr]
  1266  		if list == nil {
  1267  			continue // Just in case someone calls with a non existing account
  1268  		}
  1269  		// Drop all transactions that are deemed too old (low nonce)
  1270  		forwards := list.Forward(pool.currentState.GetNonce(addr))
  1271  		for _, tx := range forwards {
  1272  			hash := tx.Hash()
  1273  			pool.all.Remove(hash)
  1274  		}
  1275  		log.Trace("Removed old queued transactions", "count", len(forwards))
  1276  		// Drop all transactions that are too costly (low balance or out of gas)
  1277  		drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1278  		for _, tx := range drops {
  1279  			hash := tx.Hash()
  1280  			pool.all.Remove(hash)
  1281  		}
  1282  		log.Trace("Removed unpayable queued transactions", "count", len(drops))
  1283  		queuedNofundsMeter.Mark(int64(len(drops)))
  1284  
  1285  		// Gather all executable transactions and promote them
  1286  		readies := list.Ready(pool.pendingNonces.get(addr))
  1287  		for _, tx := range readies {
  1288  			hash := tx.Hash()
  1289  			if pool.promoteTx(addr, hash, tx) {
  1290  				promoted = append(promoted, tx)
  1291  			}
  1292  		}
  1293  		log.Trace("Promoted queued transactions", "count", len(promoted))
  1294  		queuedGauge.Dec(int64(len(readies)))
  1295  
  1296  		// Drop all transactions over the allowed limit
  1297  		var caps types.Transactions
  1298  		if !pool.locals.contains(addr) {
  1299  			caps = list.Cap(int(pool.config.AccountQueue))
  1300  			for _, tx := range caps {
  1301  				hash := tx.Hash()
  1302  				pool.all.Remove(hash)
  1303  				log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
  1304  			}
  1305  			queuedRateLimitMeter.Mark(int64(len(caps)))
  1306  		}
  1307  		// Mark all the items dropped as removed
  1308  		pool.priced.Removed(len(forwards) + len(drops) + len(caps))
  1309  		queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1310  		if pool.locals.contains(addr) {
  1311  			localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
  1312  		}
  1313  		// Delete the entire queue entry if it became empty.
  1314  		if list.Empty() {
  1315  			delete(pool.queue, addr)
  1316  			delete(pool.beats, addr)
  1317  		}
  1318  	}
  1319  	return promoted
  1320  }
  1321  
  1322  // truncatePending removes transactions from the pending queue if the pool is above the
  1323  // pending limit. The algorithm tries to reduce transaction counts by an approximately
  1324  // equal number for all for accounts with many pending transactions.
  1325  func (pool *TxPool) truncatePending() {
  1326  	pending := uint64(0)
  1327  	for _, list := range pool.pending {
  1328  		pending += uint64(len(list.txs.items))
  1329  	}
  1330  	if pending <= pool.config.GlobalSlots {
  1331  		return
  1332  	}
  1333  
  1334  	pendingBeforeCap := pending
  1335  	// Assemble a spam order to penalize large transactors first
  1336  	spammers := prque.New(nil)
  1337  	for addr, list := range pool.pending {
  1338  		// Only evict transactions from high rollers
  1339  		if !pool.locals.contains(addr) && uint64(len(list.txs.items)) > pool.config.AccountSlots {
  1340  			spammers.Push(addr, int64(len(list.txs.items)))
  1341  		}
  1342  	}
  1343  	// Gradually drop transactions from offenders
  1344  	offenders := []common.Address{}
  1345  	for pending > pool.config.GlobalSlots && !spammers.Empty() {
  1346  		// Retrieve the next offender if not local address
  1347  		offender, _ := spammers.Pop()
  1348  		offenders = append(offenders, offender.(common.Address))
  1349  
  1350  		// Equalize balances until all the same or below threshold
  1351  		if len(offenders) > 1 {
  1352  			// Calculate the equalization threshold for all current offenders
  1353  			threshold := len(pool.pending[offender.(common.Address)].txs.items)
  1354  
  1355  			// Iteratively reduce all offenders until below limit or threshold reached
  1356  			for pending > pool.config.GlobalSlots && len(pool.pending[offenders[len(offenders)-2]].txs.items) > threshold {
  1357  				for i := 0; i < len(offenders)-1; i++ {
  1358  					list := pool.pending[offenders[i]]
  1359  
  1360  					caps := list.Cap(len(list.txs.items) - 1)
  1361  					for _, tx := range caps {
  1362  						// Drop the transaction from the global pools too
  1363  						hash := tx.Hash()
  1364  						pool.all.Remove(hash)
  1365  
  1366  						// Update the account nonce to the dropped transaction
  1367  						pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
  1368  						log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1369  					}
  1370  					pool.priced.Removed(len(caps))
  1371  					pendingGauge.Dec(int64(len(caps)))
  1372  					if pool.locals.contains(offenders[i]) {
  1373  						localGauge.Dec(int64(len(caps)))
  1374  					}
  1375  					pending--
  1376  				}
  1377  			}
  1378  		}
  1379  	}
  1380  
  1381  	// If still above threshold, reduce to limit or min allowance
  1382  	if pending > pool.config.GlobalSlots && len(offenders) > 0 {
  1383  		for pending > pool.config.GlobalSlots && uint64(len(pool.pending[offenders[len(offenders)-1]].txs.items)) > pool.config.AccountSlots {
  1384  			for _, addr := range offenders {
  1385  				list := pool.pending[addr]
  1386  
  1387  				caps := list.Cap(len(list.txs.items) - 1)
  1388  				for _, tx := range caps {
  1389  					// Drop the transaction from the global pools too
  1390  					hash := tx.Hash()
  1391  					pool.all.Remove(hash)
  1392  
  1393  					// Update the account nonce to the dropped transaction
  1394  					pool.pendingNonces.setIfLower(addr, tx.Nonce())
  1395  					log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
  1396  				}
  1397  				pool.priced.Removed(len(caps))
  1398  				pendingGauge.Dec(int64(len(caps)))
  1399  				if pool.locals.contains(addr) {
  1400  					localGauge.Dec(int64(len(caps)))
  1401  				}
  1402  				pending--
  1403  			}
  1404  		}
  1405  	}
  1406  	pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
  1407  }
  1408  
  1409  // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit.
  1410  func (pool *TxPool) truncateQueue() {
  1411  	queued := uint64(0)
  1412  	for _, list := range pool.queue {
  1413  		queued += uint64(len(list.txs.items))
  1414  	}
  1415  	if queued <= pool.config.GlobalQueue {
  1416  		return
  1417  	}
  1418  
  1419  	// Sort all accounts with queued transactions by heartbeat
  1420  	addresses := make(addressesByHeartbeat, 0, len(pool.queue))
  1421  	for addr := range pool.queue {
  1422  		if !pool.locals.contains(addr) { // don't drop locals
  1423  			addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
  1424  		}
  1425  	}
  1426  	sort.Sort(addresses)
  1427  
  1428  	// Drop transactions until the total is below the limit or only locals remain
  1429  	for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
  1430  		addr := addresses[len(addresses)-1]
  1431  		list := pool.queue[addr.address]
  1432  
  1433  		addresses = addresses[:len(addresses)-1]
  1434  
  1435  		// Drop all transactions if they are less than the overflow
  1436  		if size := uint64(len(list.txs.items)); size <= drop {
  1437  			for _, tx := range list.Flatten() {
  1438  				pool.removeTx(tx.Hash(), true)
  1439  			}
  1440  			drop -= size
  1441  			queuedRateLimitMeter.Mark(int64(size))
  1442  			continue
  1443  		}
  1444  		// Otherwise drop only last few transactions
  1445  		txs := list.Flatten()
  1446  		for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
  1447  			pool.removeTx(txs[i].Hash(), true)
  1448  			drop--
  1449  			queuedRateLimitMeter.Mark(1)
  1450  		}
  1451  	}
  1452  }
  1453  
  1454  // demoteUnexecutables removes invalid and processed transactions from the pools
  1455  // executable/pending queue and any subsequent transactions that become unexecutable
  1456  // are moved back into the future queue.
  1457  func (pool *TxPool) demoteUnexecutables() {
  1458  	// Iterate over all accounts and demote any non-executable transactions
  1459  	for addr, list := range pool.pending {
  1460  		nonce := pool.currentState.GetNonce(addr)
  1461  
  1462  		// Drop all transactions that are deemed too old (low nonce)
  1463  		olds := list.Forward(nonce)
  1464  		for _, tx := range olds {
  1465  			hash := tx.Hash()
  1466  			pool.all.Remove(hash)
  1467  			log.Trace("Removed old pending transaction", "hash", hash)
  1468  		}
  1469  		// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
  1470  		drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
  1471  		for _, tx := range drops {
  1472  			hash := tx.Hash()
  1473  			log.Trace("Removed unpayable pending transaction", "hash", hash)
  1474  			pool.all.Remove(hash)
  1475  		}
  1476  		pool.priced.Removed(len(olds) + len(drops))
  1477  		pendingNofundsMeter.Mark(int64(len(drops)))
  1478  
  1479  		for _, tx := range invalids {
  1480  			hash := tx.Hash()
  1481  			log.Trace("Demoting pending transaction", "hash", hash)
  1482  
  1483  			// Internal shuffle shouldn't touch the lookup set.
  1484  			pool.enqueueTx(hash, tx, false, false)
  1485  		}
  1486  		pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1487  		if pool.locals.contains(addr) {
  1488  			localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
  1489  		}
  1490  		// If there's a gap in front, alert (should never happen) and postpone all transactions
  1491  		if len(list.txs.items) > 0 && list.txs.Get(nonce) == nil {
  1492  			gapped := list.Cap(0)
  1493  			for _, tx := range gapped {
  1494  				hash := tx.Hash()
  1495  				log.Error("Demoting invalidated transaction", "hash", hash)
  1496  
  1497  				// Internal shuffle shouldn't touch the lookup set.
  1498  				pool.enqueueTx(hash, tx, false, false)
  1499  			}
  1500  			pendingGauge.Dec(int64(len(gapped)))
  1501  			// This might happen in a reorg, so log it to the metering
  1502  			blockReorgInvalidatedTx.Mark(int64(len(gapped)))
  1503  		}
  1504  		// Delete the entire pending entry if it became empty.
  1505  		if list.Empty() {
  1506  			delete(pool.pending, addr)
  1507  		}
  1508  	}
  1509  }
  1510  
  1511  // addressByHeartbeat is an account address tagged with its last activity timestamp.
  1512  type addressByHeartbeat struct {
  1513  	address   common.Address
  1514  	heartbeat time.Time
  1515  }
  1516  
  1517  type addressesByHeartbeat []addressByHeartbeat
  1518  
  1519  func (a addressesByHeartbeat) Len() int           { return len(a) }
  1520  func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
  1521  func (a addressesByHeartbeat) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
  1522  
  1523  // accountSet is simply a set of addresses to check for existence, and a signer
  1524  // capable of deriving addresses from transactions.
  1525  type accountSet struct {
  1526  	accounts map[common.Address]struct{}
  1527  	signer   types.Signer
  1528  	cache    *[]common.Address
  1529  }
  1530  
  1531  // newAccountSet creates a new address set with an associated signer for sender
  1532  // derivations.
  1533  func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
  1534  	as := &accountSet{
  1535  		accounts: make(map[common.Address]struct{}),
  1536  		signer:   signer,
  1537  	}
  1538  	for _, addr := range addrs {
  1539  		as.add(addr)
  1540  	}
  1541  	return as
  1542  }
  1543  
  1544  // contains checks if a given address is contained within the set.
  1545  func (as *accountSet) contains(addr common.Address) bool {
  1546  	_, exist := as.accounts[addr]
  1547  	return exist
  1548  }
  1549  
  1550  func (as *accountSet) empty() bool {
  1551  	return len(as.accounts) == 0
  1552  }
  1553  
  1554  // containsTx checks if the sender of a given tx is within the set. If the sender
  1555  // cannot be derived, this method returns false.
  1556  func (as *accountSet) containsTx(tx *types.Transaction) bool {
  1557  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1558  		return as.contains(addr)
  1559  	}
  1560  	return false
  1561  }
  1562  
  1563  // add inserts a new address into the set to track.
  1564  func (as *accountSet) add(addr common.Address) {
  1565  	as.accounts[addr] = struct{}{}
  1566  	as.cache = nil
  1567  }
  1568  
  1569  // addTx adds the sender of tx into the set.
  1570  func (as *accountSet) addTx(tx *types.Transaction) {
  1571  	if addr, err := types.Sender(as.signer, tx); err == nil {
  1572  		as.add(addr)
  1573  	}
  1574  }
  1575  
  1576  // flatten returns the list of addresses within this set, also caching it for later
  1577  // reuse. The returned slice should not be changed!
  1578  func (as *accountSet) flatten() []common.Address {
  1579  	if as.cache == nil {
  1580  		accounts := make([]common.Address, 0, len(as.accounts))
  1581  		for account := range as.accounts {
  1582  			accounts = append(accounts, account)
  1583  		}
  1584  		as.cache = &accounts
  1585  	}
  1586  	return *as.cache
  1587  }
  1588  
  1589  // merge adds all addresses from the 'other' set into 'as'.
  1590  func (as *accountSet) merge(other *accountSet) {
  1591  	for addr := range other.accounts {
  1592  		as.accounts[addr] = struct{}{}
  1593  	}
  1594  	as.cache = nil
  1595  }
  1596  
  1597  // txLookup is used internally by TxPool to track transactions while allowing
  1598  // lookup without mutex contention.
  1599  //
  1600  // Note, although this type is properly protected against concurrent access, it
  1601  // is **not** a type that should ever be mutated or even exposed outside of the
  1602  // transaction pool, since its internal state is tightly coupled with the pools
  1603  // internal mechanisms. The sole purpose of the type is to permit out-of-bound
  1604  // peeking into the pool in TxPool.Get without having to acquire the widely scoped
  1605  // TxPool.mu mutex.
  1606  //
  1607  // This lookup set combines the notion of "local transactions", which is useful
  1608  // to build upper-level structure.
  1609  type txLookup struct {
  1610  	slots   int
  1611  	lock    sync.RWMutex
  1612  	locals  map[common.Hash]*types.Transaction
  1613  	remotes map[common.Hash]*types.Transaction
  1614  }
  1615  
  1616  // newTxLookup returns a new txLookup structure.
  1617  func newTxLookup() *txLookup {
  1618  	return &txLookup{
  1619  		locals:  make(map[common.Hash]*types.Transaction),
  1620  		remotes: make(map[common.Hash]*types.Transaction),
  1621  	}
  1622  }
  1623  
  1624  // Range calls f on each key and value present in the map. The callback passed
  1625  // should return the indicator whether the iteration needs to be continued.
  1626  // Callers need to specify which set (or both) to be iterated.
  1627  func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) {
  1628  	t.lock.RLock()
  1629  	defer t.lock.RUnlock()
  1630  
  1631  	if local {
  1632  		for key, value := range t.locals {
  1633  			if !f(key, value, true) {
  1634  				return
  1635  			}
  1636  		}
  1637  	}
  1638  	if remote {
  1639  		for key, value := range t.remotes {
  1640  			if !f(key, value, false) {
  1641  				return
  1642  			}
  1643  		}
  1644  	}
  1645  }
  1646  
  1647  // Get returns a transaction if it exists in the lookup, or nil if not found.
  1648  func (t *txLookup) Get(hash common.Hash) *types.Transaction {
  1649  	t.lock.RLock()
  1650  	defer t.lock.RUnlock()
  1651  
  1652  	if tx := t.locals[hash]; tx != nil {
  1653  		return tx
  1654  	}
  1655  	return t.remotes[hash]
  1656  }
  1657  
  1658  // GetLocal returns a transaction if it exists in the lookup, or nil if not found.
  1659  func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction {
  1660  	t.lock.RLock()
  1661  	defer t.lock.RUnlock()
  1662  
  1663  	return t.locals[hash]
  1664  }
  1665  
  1666  // GetRemote returns a transaction if it exists in the lookup, or nil if not found.
  1667  func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction {
  1668  	t.lock.RLock()
  1669  	defer t.lock.RUnlock()
  1670  
  1671  	return t.remotes[hash]
  1672  }
  1673  
  1674  // Count returns the current number of transactions in the lookup.
  1675  func (t *txLookup) Count() int {
  1676  	t.lock.RLock()
  1677  	defer t.lock.RUnlock()
  1678  
  1679  	return len(t.locals) + len(t.remotes)
  1680  }
  1681  
  1682  // LocalCount returns the current number of local transactions in the lookup.
  1683  func (t *txLookup) LocalCount() int {
  1684  	t.lock.RLock()
  1685  	defer t.lock.RUnlock()
  1686  
  1687  	return len(t.locals)
  1688  }
  1689  
  1690  // RemoteCount returns the current number of remote transactions in the lookup.
  1691  func (t *txLookup) RemoteCount() int {
  1692  	t.lock.RLock()
  1693  	defer t.lock.RUnlock()
  1694  
  1695  	return len(t.remotes)
  1696  }
  1697  
  1698  // Slots returns the current number of slots used in the lookup.
  1699  func (t *txLookup) Slots() int {
  1700  	t.lock.RLock()
  1701  	defer t.lock.RUnlock()
  1702  
  1703  	return t.slots
  1704  }
  1705  
  1706  // Add adds a transaction to the lookup.
  1707  func (t *txLookup) Add(tx *types.Transaction, local bool) {
  1708  	t.lock.Lock()
  1709  	defer t.lock.Unlock()
  1710  
  1711  	t.slots += numSlots(tx)
  1712  	slotsGauge.Update(int64(t.slots))
  1713  
  1714  	if local {
  1715  		t.locals[tx.Hash()] = tx
  1716  	} else {
  1717  		t.remotes[tx.Hash()] = tx
  1718  	}
  1719  }
  1720  
  1721  // Remove removes a transaction from the lookup.
  1722  func (t *txLookup) Remove(hash common.Hash) {
  1723  	t.lock.Lock()
  1724  	defer t.lock.Unlock()
  1725  
  1726  	tx, ok := t.locals[hash]
  1727  	if !ok {
  1728  		tx, ok = t.remotes[hash]
  1729  	}
  1730  	if !ok {
  1731  		log.Error("No transaction found to be deleted", "hash", hash)
  1732  		return
  1733  	}
  1734  	t.slots -= numSlots(tx)
  1735  	slotsGauge.Update(int64(t.slots))
  1736  
  1737  	delete(t.locals, hash)
  1738  	delete(t.remotes, hash)
  1739  }
  1740  
  1741  // RemoteToLocals migrates the transactions belongs to the given locals to locals
  1742  // set. The assumption is held the locals set is thread-safe to be used.
  1743  func (t *txLookup) RemoteToLocals(locals *accountSet) int {
  1744  	t.lock.Lock()
  1745  	defer t.lock.Unlock()
  1746  
  1747  	var migrated int
  1748  	for hash, tx := range t.remotes {
  1749  		if locals.containsTx(tx) {
  1750  			t.locals[hash] = tx
  1751  			delete(t.remotes, hash)
  1752  			migrated += 1
  1753  		}
  1754  	}
  1755  	return migrated
  1756  }
  1757  
  1758  // numSlots calculates the number of slots needed for a single transaction.
  1759  func numSlots(tx *types.Transaction) int {
  1760  	return int((tx.Size() + txSlotSize - 1) / txSlotSize)
  1761  }