github.com/jimmyx0x/go-ethereum@v1.10.28/light/txpool.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/core"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/core/state"
    30  	"github.com/ethereum/go-ethereum/core/txpool"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/ethdb"
    33  	"github.com/ethereum/go-ethereum/event"
    34  	"github.com/ethereum/go-ethereum/log"
    35  	"github.com/ethereum/go-ethereum/params"
    36  )
    37  
    38  const (
    39  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    40  	chainHeadChanSize = 10
    41  )
    42  
    43  // txPermanent is the number of mined blocks after a mined transaction is
    44  // considered permanent and no rollback is expected
    45  var txPermanent = uint64(500)
    46  
    47  // TxPool implements the transaction pool for light clients, which keeps track
    48  // of the status of locally created transactions, detecting if they are included
    49  // in a block (mined) or rolled back. There are no queued transactions since we
    50  // always receive all locally signed transactions in the same order as they are
    51  // created.
    52  type TxPool struct {
    53  	config       *params.ChainConfig
    54  	signer       types.Signer
    55  	quit         chan bool
    56  	txFeed       event.Feed
    57  	scope        event.SubscriptionScope
    58  	chainHeadCh  chan core.ChainHeadEvent
    59  	chainHeadSub event.Subscription
    60  	mu           sync.RWMutex
    61  	chain        *LightChain
    62  	odr          OdrBackend
    63  	chainDb      ethdb.Database
    64  	relay        TxRelayBackend
    65  	head         common.Hash
    66  	nonce        map[common.Address]uint64            // "pending" nonce
    67  	pending      map[common.Hash]*types.Transaction   // pending transactions by tx hash
    68  	mined        map[common.Hash][]*types.Transaction // mined transactions by block hash
    69  	clearIdx     uint64                               // earliest block nr that can contain mined tx info
    70  
    71  	istanbul bool // Fork indicator whether we are in the istanbul stage.
    72  	eip2718  bool // Fork indicator whether we are in the eip2718 stage.
    73  }
    74  
    75  // TxRelayBackend provides an interface to the mechanism that forwards transactions to the
    76  // ETH network. The implementations of the functions should be non-blocking.
    77  //
    78  // Send instructs backend to forward new transactions NewHead notifies backend about a new
    79  // head after processed by the tx pool, including mined and rolled back transactions since
    80  // the last event.
    81  //
    82  // Discard notifies backend about transactions that should be discarded either because
    83  // they have been replaced by a re-send or because they have been mined long ago and no
    84  // rollback is expected.
    85  type TxRelayBackend interface {
    86  	Send(txs types.Transactions)
    87  	NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
    88  	Discard(hashes []common.Hash)
    89  }
    90  
    91  // NewTxPool creates a new light transaction pool
    92  func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool {
    93  	pool := &TxPool{
    94  		config:      config,
    95  		signer:      types.LatestSigner(config),
    96  		nonce:       make(map[common.Address]uint64),
    97  		pending:     make(map[common.Hash]*types.Transaction),
    98  		mined:       make(map[common.Hash][]*types.Transaction),
    99  		quit:        make(chan bool),
   100  		chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
   101  		chain:       chain,
   102  		relay:       relay,
   103  		odr:         chain.Odr(),
   104  		chainDb:     chain.Odr().Database(),
   105  		head:        chain.CurrentHeader().Hash(),
   106  		clearIdx:    chain.CurrentHeader().Number.Uint64(),
   107  	}
   108  	// Subscribe events from blockchain
   109  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   110  	go pool.eventLoop()
   111  
   112  	return pool
   113  }
   114  
   115  // currentState returns the light state of the current head header
   116  func (pool *TxPool) currentState(ctx context.Context) *state.StateDB {
   117  	return NewState(ctx, pool.chain.CurrentHeader(), pool.odr)
   118  }
   119  
   120  // GetNonce returns the "pending" nonce of a given address. It always queries
   121  // the nonce belonging to the latest header too in order to detect if another
   122  // client using the same key sent a transaction.
   123  func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
   124  	state := pool.currentState(ctx)
   125  	nonce := state.GetNonce(addr)
   126  	if state.Error() != nil {
   127  		return 0, state.Error()
   128  	}
   129  	sn, ok := pool.nonce[addr]
   130  	if ok && sn > nonce {
   131  		nonce = sn
   132  	}
   133  	if !ok || sn < nonce {
   134  		pool.nonce[addr] = nonce
   135  	}
   136  	return nonce, nil
   137  }
   138  
   139  // txStateChanges stores the recent changes between pending/mined states of
   140  // transactions. True means mined, false means rolled back, no entry means no change
   141  type txStateChanges map[common.Hash]bool
   142  
   143  // setState sets the status of a tx to either recently mined or recently rolled back
   144  func (txc txStateChanges) setState(txHash common.Hash, mined bool) {
   145  	val, ent := txc[txHash]
   146  	if ent && (val != mined) {
   147  		delete(txc, txHash)
   148  	} else {
   149  		txc[txHash] = mined
   150  	}
   151  }
   152  
   153  // getLists creates lists of mined and rolled back tx hashes
   154  func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) {
   155  	for hash, val := range txc {
   156  		if val {
   157  			mined = append(mined, hash)
   158  		} else {
   159  			rollback = append(rollback, hash)
   160  		}
   161  	}
   162  	return
   163  }
   164  
   165  // checkMinedTxs checks newly added blocks for the currently pending transactions
   166  // and marks them as mined if necessary. It also stores block position in the db
   167  // and adds them to the received txStateChanges map.
   168  func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error {
   169  	// If no transactions are pending, we don't care about anything
   170  	if len(pool.pending) == 0 {
   171  		return nil
   172  	}
   173  	block, err := GetBlock(ctx, pool.odr, hash, number)
   174  	if err != nil {
   175  		return err
   176  	}
   177  	// Gather all the local transaction mined in this block
   178  	list := pool.mined[hash]
   179  	for _, tx := range block.Transactions() {
   180  		if _, ok := pool.pending[tx.Hash()]; ok {
   181  			list = append(list, tx)
   182  		}
   183  	}
   184  	// If some transactions have been mined, write the needed data to disk and update
   185  	if list != nil {
   186  		// Retrieve all the receipts belonging to this block and write the lookup table
   187  		if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results
   188  			return err
   189  		}
   190  		rawdb.WriteTxLookupEntriesByBlock(pool.chainDb, block)
   191  
   192  		// Update the transaction pool's state
   193  		for _, tx := range list {
   194  			delete(pool.pending, tx.Hash())
   195  			txc.setState(tx.Hash(), true)
   196  		}
   197  		pool.mined[hash] = list
   198  	}
   199  	return nil
   200  }
   201  
   202  // rollbackTxs marks the transactions contained in recently rolled back blocks
   203  // as rolled back. It also removes any positional lookup entries.
   204  func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
   205  	batch := pool.chainDb.NewBatch()
   206  	if list, ok := pool.mined[hash]; ok {
   207  		for _, tx := range list {
   208  			txHash := tx.Hash()
   209  			rawdb.DeleteTxLookupEntry(batch, txHash)
   210  			pool.pending[txHash] = tx
   211  			txc.setState(txHash, false)
   212  		}
   213  		delete(pool.mined, hash)
   214  	}
   215  	batch.Write()
   216  }
   217  
   218  // reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
   219  // the blocks since the last known head and returns a txStateChanges map containing
   220  // the recently mined and rolled back transaction hashes. If an error (context
   221  // timeout) occurs during checking new blocks, it leaves the locally known head
   222  // at the latest checked block and still returns a valid txStateChanges, making it
   223  // possible to continue checking the missing blocks at the next chain head event
   224  func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
   225  	txc := make(txStateChanges)
   226  	oldh := pool.chain.GetHeaderByHash(pool.head)
   227  	newh := newHeader
   228  	// find common ancestor, create list of rolled back and new block hashes
   229  	var oldHashes, newHashes []common.Hash
   230  	for oldh.Hash() != newh.Hash() {
   231  		if oldh.Number.Uint64() >= newh.Number.Uint64() {
   232  			oldHashes = append(oldHashes, oldh.Hash())
   233  			oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
   234  		}
   235  		if oldh.Number.Uint64() < newh.Number.Uint64() {
   236  			newHashes = append(newHashes, newh.Hash())
   237  			newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
   238  			if newh == nil {
   239  				// happens when CHT syncing, nothing to do
   240  				newh = oldh
   241  			}
   242  		}
   243  	}
   244  	if oldh.Number.Uint64() < pool.clearIdx {
   245  		pool.clearIdx = oldh.Number.Uint64()
   246  	}
   247  	// roll back old blocks
   248  	for _, hash := range oldHashes {
   249  		pool.rollbackTxs(hash, txc)
   250  	}
   251  	pool.head = oldh.Hash()
   252  	// check mined txs of new blocks (array is in reversed order)
   253  	for i := len(newHashes) - 1; i >= 0; i-- {
   254  		hash := newHashes[i]
   255  		if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil {
   256  			return txc, err
   257  		}
   258  		pool.head = hash
   259  	}
   260  
   261  	// clear old mined tx entries of old blocks
   262  	if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent {
   263  		idx2 := idx - txPermanent
   264  		if len(pool.mined) > 0 {
   265  			for i := pool.clearIdx; i < idx2; i++ {
   266  				hash := rawdb.ReadCanonicalHash(pool.chainDb, i)
   267  				if list, ok := pool.mined[hash]; ok {
   268  					hashes := make([]common.Hash, len(list))
   269  					for i, tx := range list {
   270  						hashes[i] = tx.Hash()
   271  					}
   272  					pool.relay.Discard(hashes)
   273  					delete(pool.mined, hash)
   274  				}
   275  			}
   276  		}
   277  		pool.clearIdx = idx2
   278  	}
   279  
   280  	return txc, nil
   281  }
   282  
   283  // blockCheckTimeout is the time limit for checking new blocks for mined
   284  // transactions. Checking resumes at the next chain head event if timed out.
   285  const blockCheckTimeout = time.Second * 3
   286  
   287  // eventLoop processes chain head events and also notifies the tx relay backend
   288  // about the new head hash and tx state changes
   289  func (pool *TxPool) eventLoop() {
   290  	for {
   291  		select {
   292  		case ev := <-pool.chainHeadCh:
   293  			pool.setNewHead(ev.Block.Header())
   294  			// hack in order to avoid hogging the lock; this part will
   295  			// be replaced by a subsequent PR.
   296  			time.Sleep(time.Millisecond)
   297  
   298  		// System stopped
   299  		case <-pool.chainHeadSub.Err():
   300  			return
   301  		}
   302  	}
   303  }
   304  
   305  func (pool *TxPool) setNewHead(head *types.Header) {
   306  	pool.mu.Lock()
   307  	defer pool.mu.Unlock()
   308  
   309  	ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout)
   310  	defer cancel()
   311  
   312  	txc, _ := pool.reorgOnNewHead(ctx, head)
   313  	m, r := txc.getLists()
   314  	pool.relay.NewHead(pool.head, m, r)
   315  
   316  	// Update fork indicator by next pending block number
   317  	next := new(big.Int).Add(head.Number, big.NewInt(1))
   318  	pool.istanbul = pool.config.IsIstanbul(next)
   319  	pool.eip2718 = pool.config.IsBerlin(next)
   320  }
   321  
   322  // Stop stops the light transaction pool
   323  func (pool *TxPool) Stop() {
   324  	// Unsubscribe all subscriptions registered from txpool
   325  	pool.scope.Close()
   326  	// Unsubscribe subscriptions registered from blockchain
   327  	pool.chainHeadSub.Unsubscribe()
   328  	close(pool.quit)
   329  	log.Info("Transaction pool stopped")
   330  }
   331  
   332  // SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
   333  // starts sending event to the given channel.
   334  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   335  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   336  }
   337  
   338  // Stats returns the number of currently pending (locally created) transactions
   339  func (pool *TxPool) Stats() (pending int) {
   340  	pool.mu.RLock()
   341  	defer pool.mu.RUnlock()
   342  
   343  	pending = len(pool.pending)
   344  	return
   345  }
   346  
   347  // validateTx checks whether a transaction is valid according to the consensus rules.
   348  func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error {
   349  	// Validate sender
   350  	var (
   351  		from common.Address
   352  		err  error
   353  	)
   354  
   355  	// Validate the transaction sender and it's sig. Throw
   356  	// if the from fields is invalid.
   357  	if from, err = types.Sender(pool.signer, tx); err != nil {
   358  		return txpool.ErrInvalidSender
   359  	}
   360  	// Last but not least check for nonce errors
   361  	currentState := pool.currentState(ctx)
   362  	if n := currentState.GetNonce(from); n > tx.Nonce() {
   363  		return core.ErrNonceTooLow
   364  	}
   365  
   366  	// Check the transaction doesn't exceed the current
   367  	// block limit gas.
   368  	header := pool.chain.GetHeaderByHash(pool.head)
   369  	if header.GasLimit < tx.Gas() {
   370  		return txpool.ErrGasLimit
   371  	}
   372  
   373  	// Transactions can't be negative. This may never happen
   374  	// using RLP decoded transactions but may occur if you create
   375  	// a transaction using the RPC for example.
   376  	if tx.Value().Sign() < 0 {
   377  		return txpool.ErrNegativeValue
   378  	}
   379  
   380  	// Transactor should have enough funds to cover the costs
   381  	// cost == V + GP * GL
   382  	if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 {
   383  		return core.ErrInsufficientFunds
   384  	}
   385  
   386  	// Should supply enough intrinsic gas
   387  	gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
   388  	if err != nil {
   389  		return err
   390  	}
   391  	if tx.Gas() < gas {
   392  		return core.ErrIntrinsicGas
   393  	}
   394  	return currentState.Error()
   395  }
   396  
   397  // add validates a new transaction and sets its state pending if processable.
   398  // It also updates the locally stored nonce if necessary.
   399  func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error {
   400  	hash := tx.Hash()
   401  
   402  	if pool.pending[hash] != nil {
   403  		return fmt.Errorf("known transaction (%x)", hash[:4])
   404  	}
   405  	err := pool.validateTx(ctx, tx)
   406  	if err != nil {
   407  		return err
   408  	}
   409  
   410  	if _, ok := pool.pending[hash]; !ok {
   411  		pool.pending[hash] = tx
   412  
   413  		nonce := tx.Nonce() + 1
   414  
   415  		addr, _ := types.Sender(pool.signer, tx)
   416  		if nonce > pool.nonce[addr] {
   417  			pool.nonce[addr] = nonce
   418  		}
   419  
   420  		// Notify the subscribers. This event is posted in a goroutine
   421  		// because it's possible that somewhere during the post "Remove transaction"
   422  		// gets called which will then wait for the global tx pool lock and deadlock.
   423  		go pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
   424  	}
   425  
   426  	// Print a log message if low enough level is set
   427  	log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
   428  	return nil
   429  }
   430  
   431  // Add adds a transaction to the pool if valid and passes it to the tx relay
   432  // backend
   433  func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
   434  	pool.mu.Lock()
   435  	defer pool.mu.Unlock()
   436  	data, err := tx.MarshalBinary()
   437  	if err != nil {
   438  		return err
   439  	}
   440  
   441  	if err := pool.add(ctx, tx); err != nil {
   442  		return err
   443  	}
   444  	//fmt.Println("Send", tx.Hash())
   445  	pool.relay.Send(types.Transactions{tx})
   446  
   447  	pool.chainDb.Put(tx.Hash().Bytes(), data)
   448  	return nil
   449  }
   450  
   451  // AddBatch adds all valid transactions to the pool and passes them to
   452  // the tx relay backend
   453  func (pool *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
   454  	pool.mu.Lock()
   455  	defer pool.mu.Unlock()
   456  	var sendTx types.Transactions
   457  
   458  	for _, tx := range txs {
   459  		if err := pool.add(ctx, tx); err == nil {
   460  			sendTx = append(sendTx, tx)
   461  		}
   462  	}
   463  	if len(sendTx) > 0 {
   464  		pool.relay.Send(sendTx)
   465  	}
   466  }
   467  
   468  // GetTransaction returns a transaction if it is contained in the pool
   469  // and nil otherwise.
   470  func (pool *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
   471  	// check the txs first
   472  	if tx, ok := pool.pending[hash]; ok {
   473  		return tx
   474  	}
   475  	return nil
   476  }
   477  
   478  // GetTransactions returns all currently processable transactions.
   479  // The returned slice may be modified by the caller.
   480  func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) {
   481  	pool.mu.RLock()
   482  	defer pool.mu.RUnlock()
   483  
   484  	txs = make(types.Transactions, len(pool.pending))
   485  	i := 0
   486  	for _, tx := range pool.pending {
   487  		txs[i] = tx
   488  		i++
   489  	}
   490  	return txs, nil
   491  }
   492  
   493  // Content retrieves the data content of the transaction pool, returning all the
   494  // pending as well as queued transactions, grouped by account and nonce.
   495  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   496  	pool.mu.RLock()
   497  	defer pool.mu.RUnlock()
   498  
   499  	// Retrieve all the pending transactions and sort by account and by nonce
   500  	pending := make(map[common.Address]types.Transactions)
   501  	for _, tx := range pool.pending {
   502  		account, _ := types.Sender(pool.signer, tx)
   503  		pending[account] = append(pending[account], tx)
   504  	}
   505  	// There are no queued transactions in a light pool, just return an empty map
   506  	queued := make(map[common.Address]types.Transactions)
   507  	return pending, queued
   508  }
   509  
   510  // ContentFrom retrieves the data content of the transaction pool, returning the
   511  // pending as well as queued transactions of this address, grouped by nonce.
   512  func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
   513  	pool.mu.RLock()
   514  	defer pool.mu.RUnlock()
   515  
   516  	// Retrieve the pending transactions and sort by nonce
   517  	var pending types.Transactions
   518  	for _, tx := range pool.pending {
   519  		account, _ := types.Sender(pool.signer, tx)
   520  		if account != addr {
   521  			continue
   522  		}
   523  		pending = append(pending, tx)
   524  	}
   525  	// There are no queued transactions in a light pool, just return an empty map
   526  	return pending, types.Transactions{}
   527  }
   528  
   529  // RemoveTransactions removes all given transactions from the pool.
   530  func (pool *TxPool) RemoveTransactions(txs types.Transactions) {
   531  	pool.mu.Lock()
   532  	defer pool.mu.Unlock()
   533  
   534  	var hashes []common.Hash
   535  	batch := pool.chainDb.NewBatch()
   536  	for _, tx := range txs {
   537  		hash := tx.Hash()
   538  		delete(pool.pending, hash)
   539  		batch.Delete(hash.Bytes())
   540  		hashes = append(hashes, hash)
   541  	}
   542  	batch.Write()
   543  	pool.relay.Discard(hashes)
   544  }
   545  
   546  // RemoveTx removes the transaction with the given hash from the pool.
   547  func (pool *TxPool) RemoveTx(hash common.Hash) {
   548  	pool.mu.Lock()
   549  	defer pool.mu.Unlock()
   550  	// delete from pending pool
   551  	delete(pool.pending, hash)
   552  	pool.chainDb.Delete(hash[:])
   553  	pool.relay.Discard([]common.Hash{hash})
   554  }