github.com/elastos/Elastos.ELA.SideChain.ETH@v0.2.2/light/txpool.go (about)

     1  // Copyright 2016 The Elastos.ELA.SideChain.ESC Authors
     2  // This file is part of the Elastos.ELA.SideChain.ESC library.
     3  //
     4  // The Elastos.ELA.SideChain.ESC library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Elastos.ELA.SideChain.ESC library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Elastos.ELA.SideChain.ESC library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package light
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/elastos/Elastos.ELA.SideChain.ESC/common"
    27  	"github.com/elastos/Elastos.ELA.SideChain.ESC/core"
    28  	"github.com/elastos/Elastos.ELA.SideChain.ESC/core/rawdb"
    29  	"github.com/elastos/Elastos.ELA.SideChain.ESC/core/state"
    30  	"github.com/elastos/Elastos.ELA.SideChain.ESC/core/types"
    31  	"github.com/elastos/Elastos.ELA.SideChain.ESC/ethdb"
    32  	"github.com/elastos/Elastos.ELA.SideChain.ESC/event"
    33  	"github.com/elastos/Elastos.ELA.SideChain.ESC/log"
    34  	"github.com/elastos/Elastos.ELA.SideChain.ESC/params"
    35  	"github.com/elastos/Elastos.ELA.SideChain.ESC/rlp"
    36  )
    37  
    38  const (
    39  	// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
    40  	chainHeadChanSize = 10
    41  )
    42  
    43  // txPermanent is the number of mined blocks after a mined transaction is
    44  // considered permanent and no rollback is expected
    45  var txPermanent = uint64(500)
    46  
    47  // TxPool implements the transaction pool for light clients, which keeps track
    48  // of the status of locally created transactions, detecting if they are included
    49  // in a block (mined) or rolled back. There are no queued transactions since we
    50  // always receive all locally signed transactions in the same order as they are
    51  // created.
    52  type TxPool struct {
    53  	config       *params.ChainConfig
    54  	signer       types.Signer
    55  	quit         chan bool
    56  	txFeed       event.Feed
    57  	scope        event.SubscriptionScope
    58  	chainHeadCh  chan core.ChainHeadEvent
    59  	chainHeadSub event.Subscription
    60  	mu           sync.RWMutex
    61  	chain        *LightChain
    62  	odr          OdrBackend
    63  	chainDb      ethdb.Database
    64  	relay        TxRelayBackend
    65  	head         common.Hash
    66  	nonce        map[common.Address]uint64            // "pending" nonce
    67  	pending      map[common.Hash]*types.Transaction   // pending transactions by tx hash
    68  	mined        map[common.Hash][]*types.Transaction // mined transactions by block hash
    69  	clearIdx     uint64                               // earliest block nr that can contain mined tx info
    70  
    71  	istanbul bool // Fork indicator whether we are in the istanbul stage.
    72  }
    73  
    74  // TxRelayBackend provides an interface to the mechanism that forwards transacions
    75  // to the ETH network. The implementations of the functions should be non-blocking.
    76  //
    77  // Send instructs backend to forward new transactions
    78  // NewHead notifies backend about a new head after processed by the tx pool,
    79  //  including  mined and rolled back transactions since the last event
    80  // Discard notifies backend about transactions that should be discarded either
    81  //  because they have been replaced by a re-send or because they have been mined
    82  //  long ago and no rollback is expected
    83  type TxRelayBackend interface {
    84  	Send(txs types.Transactions)
    85  	NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
    86  	Discard(hashes []common.Hash)
    87  }
    88  
    89  // NewTxPool creates a new light transaction pool
    90  func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool {
    91  	pool := &TxPool{
    92  		config:      config,
    93  		signer:      types.NewEIP155Signer(config.GetChainIDByHeight(chain.CurrentHeader().Number)),
    94  		nonce:       make(map[common.Address]uint64),
    95  		pending:     make(map[common.Hash]*types.Transaction),
    96  		mined:       make(map[common.Hash][]*types.Transaction),
    97  		quit:        make(chan bool),
    98  		chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
    99  		chain:       chain,
   100  		relay:       relay,
   101  		odr:         chain.Odr(),
   102  		chainDb:     chain.Odr().Database(),
   103  		head:        chain.CurrentHeader().Hash(),
   104  		clearIdx:    chain.CurrentHeader().Number.Uint64(),
   105  	}
   106  	pool.signer.(types.EIP155Signer).SetForkData(config, chain.CurrentHeader().Number)
   107  	// Subscribe events from blockchain
   108  	pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
   109  	go pool.eventLoop()
   110  
   111  	return pool
   112  }
   113  
   114  // currentState returns the light state of the current head header
   115  func (pool *TxPool) currentState(ctx context.Context) *state.StateDB {
   116  	return NewState(ctx, pool.chain.CurrentHeader(), pool.odr)
   117  }
   118  
   119  // GetNonce returns the "pending" nonce of a given address. It always queries
   120  // the nonce belonging to the latest header too in order to detect if another
   121  // client using the same key sent a transaction.
   122  func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
   123  	state := pool.currentState(ctx)
   124  	nonce := state.GetNonce(addr)
   125  	if state.Error() != nil {
   126  		return 0, state.Error()
   127  	}
   128  	sn, ok := pool.nonce[addr]
   129  	if ok && sn > nonce {
   130  		nonce = sn
   131  	}
   132  	if !ok || sn < nonce {
   133  		pool.nonce[addr] = nonce
   134  	}
   135  	return nonce, nil
   136  }
   137  
   138  // txStateChanges stores the recent changes between pending/mined states of
   139  // transactions. True means mined, false means rolled back, no entry means no change
   140  type txStateChanges map[common.Hash]bool
   141  
   142  // setState sets the status of a tx to either recently mined or recently rolled back
   143  func (txc txStateChanges) setState(txHash common.Hash, mined bool) {
   144  	val, ent := txc[txHash]
   145  	if ent && (val != mined) {
   146  		delete(txc, txHash)
   147  	} else {
   148  		txc[txHash] = mined
   149  	}
   150  }
   151  
   152  // getLists creates lists of mined and rolled back tx hashes
   153  func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) {
   154  	for hash, val := range txc {
   155  		if val {
   156  			mined = append(mined, hash)
   157  		} else {
   158  			rollback = append(rollback, hash)
   159  		}
   160  	}
   161  	return
   162  }
   163  
   164  // checkMinedTxs checks newly added blocks for the currently pending transactions
   165  // and marks them as mined if necessary. It also stores block position in the db
   166  // and adds them to the received txStateChanges map.
   167  func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error {
   168  	// If no transactions are pending, we don't care about anything
   169  	if len(pool.pending) == 0 {
   170  		return nil
   171  	}
   172  	block, err := GetBlock(ctx, pool.odr, hash, number)
   173  	if err != nil {
   174  		return err
   175  	}
   176  	// Gather all the local transaction mined in this block
   177  	list := pool.mined[hash]
   178  	for _, tx := range block.Transactions() {
   179  		if _, ok := pool.pending[tx.Hash()]; ok {
   180  			list = append(list, tx)
   181  		}
   182  	}
   183  	// If some transactions have been mined, write the needed data to disk and update
   184  	if list != nil {
   185  		// Retrieve all the receipts belonging to this block and write the loopup table
   186  		if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results
   187  			return err
   188  		}
   189  		rawdb.WriteTxLookupEntries(pool.chainDb, block)
   190  
   191  		// Update the transaction pool's state
   192  		for _, tx := range list {
   193  			delete(pool.pending, tx.Hash())
   194  			txc.setState(tx.Hash(), true)
   195  		}
   196  		pool.mined[hash] = list
   197  	}
   198  	return nil
   199  }
   200  
   201  // rollbackTxs marks the transactions contained in recently rolled back blocks
   202  // as rolled back. It also removes any positional lookup entries.
   203  func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
   204  	batch := pool.chainDb.NewBatch()
   205  	if list, ok := pool.mined[hash]; ok {
   206  		for _, tx := range list {
   207  			txHash := tx.Hash()
   208  			rawdb.DeleteTxLookupEntry(batch, txHash)
   209  			pool.pending[txHash] = tx
   210  			txc.setState(txHash, false)
   211  		}
   212  		delete(pool.mined, hash)
   213  	}
   214  	batch.Write()
   215  }
   216  
   217  // reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
   218  // the blocks since the last known head and returns a txStateChanges map containing
   219  // the recently mined and rolled back transaction hashes. If an error (context
   220  // timeout) occurs during checking new blocks, it leaves the locally known head
   221  // at the latest checked block and still returns a valid txStateChanges, making it
   222  // possible to continue checking the missing blocks at the next chain head event
   223  func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
   224  	txc := make(txStateChanges)
   225  	oldh := pool.chain.GetHeaderByHash(pool.head)
   226  	newh := newHeader
   227  	// find common ancestor, create list of rolled back and new block hashes
   228  	var oldHashes, newHashes []common.Hash
   229  	for oldh.Hash() != newh.Hash() {
   230  		if oldh.Number.Uint64() >= newh.Number.Uint64() {
   231  			oldHashes = append(oldHashes, oldh.Hash())
   232  			oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
   233  		}
   234  		if oldh.Number.Uint64() < newh.Number.Uint64() {
   235  			newHashes = append(newHashes, newh.Hash())
   236  			newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
   237  			if newh == nil {
   238  				// happens when CHT syncing, nothing to do
   239  				newh = oldh
   240  			}
   241  		}
   242  	}
   243  	if oldh.Number.Uint64() < pool.clearIdx {
   244  		pool.clearIdx = oldh.Number.Uint64()
   245  	}
   246  	// roll back old blocks
   247  	for _, hash := range oldHashes {
   248  		pool.rollbackTxs(hash, txc)
   249  	}
   250  	pool.head = oldh.Hash()
   251  	// check mined txs of new blocks (array is in reversed order)
   252  	for i := len(newHashes) - 1; i >= 0; i-- {
   253  		hash := newHashes[i]
   254  		if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil {
   255  			return txc, err
   256  		}
   257  		pool.head = hash
   258  	}
   259  
   260  	// clear old mined tx entries of old blocks
   261  	if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent {
   262  		idx2 := idx - txPermanent
   263  		if len(pool.mined) > 0 {
   264  			for i := pool.clearIdx; i < idx2; i++ {
   265  				hash := rawdb.ReadCanonicalHash(pool.chainDb, i)
   266  				if list, ok := pool.mined[hash]; ok {
   267  					hashes := make([]common.Hash, len(list))
   268  					for i, tx := range list {
   269  						hashes[i] = tx.Hash()
   270  					}
   271  					pool.relay.Discard(hashes)
   272  					delete(pool.mined, hash)
   273  				}
   274  			}
   275  		}
   276  		pool.clearIdx = idx2
   277  	}
   278  
   279  	return txc, nil
   280  }
   281  
   282  // blockCheckTimeout is the time limit for checking new blocks for mined
   283  // transactions. Checking resumes at the next chain head event if timed out.
   284  const blockCheckTimeout = time.Second * 3
   285  
   286  // eventLoop processes chain head events and also notifies the tx relay backend
   287  // about the new head hash and tx state changes
   288  func (pool *TxPool) eventLoop() {
   289  	for {
   290  		select {
   291  		case ev := <-pool.chainHeadCh:
   292  			pool.setNewHead(ev.Block.Header())
   293  			// hack in order to avoid hogging the lock; this part will
   294  			// be replaced by a subsequent PR.
   295  			time.Sleep(time.Millisecond)
   296  
   297  		// System stopped
   298  		case <-pool.chainHeadSub.Err():
   299  			return
   300  		}
   301  	}
   302  }
   303  
   304  func (pool *TxPool) setNewHead(head *types.Header) {
   305  	pool.mu.Lock()
   306  	defer pool.mu.Unlock()
   307  
   308  	ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout)
   309  	defer cancel()
   310  
   311  	txc, _ := pool.reorgOnNewHead(ctx, head)
   312  	m, r := txc.getLists()
   313  	pool.relay.NewHead(pool.head, m, r)
   314  
   315  	// Update fork indicator by next pending block number
   316  	next := new(big.Int).Add(head.Number, big.NewInt(1))
   317  	pool.istanbul = pool.config.IsIstanbul(next)
   318  }
   319  
   320  // Stop stops the light transaction pool
   321  func (pool *TxPool) Stop() {
   322  	// Unsubscribe all subscriptions registered from txpool
   323  	pool.scope.Close()
   324  	// Unsubscribe subscriptions registered from blockchain
   325  	pool.chainHeadSub.Unsubscribe()
   326  	close(pool.quit)
   327  	log.Info("Transaction pool stopped")
   328  }
   329  
   330  // SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
   331  // starts sending event to the given channel.
   332  func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   333  	return pool.scope.Track(pool.txFeed.Subscribe(ch))
   334  }
   335  
   336  // Stats returns the number of currently pending (locally created) transactions
   337  func (pool *TxPool) Stats() (pending int) {
   338  	pool.mu.RLock()
   339  	defer pool.mu.RUnlock()
   340  
   341  	pending = len(pool.pending)
   342  	return
   343  }
   344  
   345  // validateTx checks whether a transaction is valid according to the consensus rules.
   346  func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error {
   347  	// Validate sender
   348  	var (
   349  		from common.Address
   350  		err  error
   351  	)
   352  
   353  	// Validate the transaction sender and it's sig. Throw
   354  	// if the from fields is invalid.
   355  	if from, err = types.Sender(pool.signer, tx); err != nil {
   356  		return core.ErrInvalidSender
   357  	}
   358  	// Last but not least check for nonce errors
   359  	currentState := pool.currentState(ctx)
   360  	if n := currentState.GetNonce(from); n > tx.Nonce() {
   361  		return core.ErrNonceTooLow
   362  	}
   363  
   364  	// Check the transaction doesn't exceed the current
   365  	// block limit gas.
   366  	header := pool.chain.GetHeaderByHash(pool.head)
   367  	if header.GasLimit < tx.Gas() {
   368  		return core.ErrGasLimit
   369  	}
   370  
   371  	// Transactions can't be negative. This may never happen
   372  	// using RLP decoded transactions but may occur if you create
   373  	// a transaction using the RPC for example.
   374  	if tx.Value().Sign() < 0 {
   375  		return core.ErrNegativeValue
   376  	}
   377  
   378  	// Transactor should have enough funds to cover the costs
   379  	// cost == V + GP * GL
   380  	if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 {
   381  		return core.ErrInsufficientFunds
   382  	}
   383  
   384  	// Should supply enough intrinsic gas
   385  	gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul)
   386  	if err != nil {
   387  		return err
   388  	}
   389  	if tx.Gas() < gas {
   390  		return core.ErrIntrinsicGas
   391  	}
   392  	return currentState.Error()
   393  }
   394  
   395  // add validates a new transaction and sets its state pending if processable.
   396  // It also updates the locally stored nonce if necessary.
   397  func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error {
   398  	hash := tx.Hash()
   399  
   400  	if pool.pending[hash] != nil {
   401  		return fmt.Errorf("Known transaction (%x)", hash[:4])
   402  	}
   403  	err := pool.validateTx(ctx, tx)
   404  	if err != nil {
   405  		return err
   406  	}
   407  
   408  	if _, ok := pool.pending[hash]; !ok {
   409  		pool.pending[hash] = tx
   410  
   411  		nonce := tx.Nonce() + 1
   412  
   413  		addr, _ := types.Sender(pool.signer, tx)
   414  		if nonce > pool.nonce[addr] {
   415  			pool.nonce[addr] = nonce
   416  		}
   417  
   418  		// Notify the subscribers. This event is posted in a goroutine
   419  		// because it's possible that somewhere during the post "Remove transaction"
   420  		// gets called which will then wait for the global tx pool lock and deadlock.
   421  		go pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
   422  	}
   423  
   424  	// Print a log message if low enough level is set
   425  	log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
   426  	return nil
   427  }
   428  
   429  // Add adds a transaction to the pool if valid and passes it to the tx relay
   430  // backend
   431  func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
   432  	pool.mu.Lock()
   433  	defer pool.mu.Unlock()
   434  
   435  	data, err := rlp.EncodeToBytes(tx)
   436  	if err != nil {
   437  		return err
   438  	}
   439  
   440  	if err := pool.add(ctx, tx); err != nil {
   441  		return err
   442  	}
   443  	//fmt.Println("Send", tx.Hash())
   444  	pool.relay.Send(types.Transactions{tx})
   445  
   446  	pool.chainDb.Put(tx.Hash().Bytes(), data)
   447  	return nil
   448  }
   449  
   450  // AddTransactions adds all valid transactions to the pool and passes them to
   451  // the tx relay backend
   452  func (pool *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
   453  	pool.mu.Lock()
   454  	defer pool.mu.Unlock()
   455  	var sendTx types.Transactions
   456  
   457  	for _, tx := range txs {
   458  		if err := pool.add(ctx, tx); err == nil {
   459  			sendTx = append(sendTx, tx)
   460  		}
   461  	}
   462  	if len(sendTx) > 0 {
   463  		pool.relay.Send(sendTx)
   464  	}
   465  }
   466  
   467  // GetTransaction returns a transaction if it is contained in the pool
   468  // and nil otherwise.
   469  func (pool *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
   470  	// check the txs first
   471  	if tx, ok := pool.pending[hash]; ok {
   472  		return tx
   473  	}
   474  	return nil
   475  }
   476  
   477  // GetTransactions returns all currently processable transactions.
   478  // The returned slice may be modified by the caller.
   479  func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) {
   480  	pool.mu.RLock()
   481  	defer pool.mu.RUnlock()
   482  
   483  	txs = make(types.Transactions, len(pool.pending))
   484  	i := 0
   485  	for _, tx := range pool.pending {
   486  		txs[i] = tx
   487  		i++
   488  	}
   489  	return txs, nil
   490  }
   491  
   492  // Content retrieves the data content of the transaction pool, returning all the
   493  // pending as well as queued transactions, grouped by account and nonce.
   494  func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
   495  	pool.mu.RLock()
   496  	defer pool.mu.RUnlock()
   497  
   498  	// Retrieve all the pending transactions and sort by account and by nonce
   499  	pending := make(map[common.Address]types.Transactions)
   500  	for _, tx := range pool.pending {
   501  		account, _ := types.Sender(pool.signer, tx)
   502  		pending[account] = append(pending[account], tx)
   503  	}
   504  	// There are no queued transactions in a light pool, just return an empty map
   505  	queued := make(map[common.Address]types.Transactions)
   506  	return pending, queued
   507  }
   508  
   509  // RemoveTransactions removes all given transactions from the pool.
   510  func (pool *TxPool) RemoveTransactions(txs types.Transactions) {
   511  	pool.mu.Lock()
   512  	defer pool.mu.Unlock()
   513  
   514  	var hashes []common.Hash
   515  	batch := pool.chainDb.NewBatch()
   516  	for _, tx := range txs {
   517  		hash := tx.Hash()
   518  		delete(pool.pending, hash)
   519  		batch.Delete(hash.Bytes())
   520  		hashes = append(hashes, hash)
   521  	}
   522  	batch.Write()
   523  	pool.relay.Discard(hashes)
   524  }
   525  
   526  // RemoveTx removes the transaction with the given hash from the pool.
   527  func (pool *TxPool) RemoveTx(hash common.Hash) {
   528  	pool.mu.Lock()
   529  	defer pool.mu.Unlock()
   530  	// delete from pending pool
   531  	delete(pool.pending, hash)
   532  	pool.chainDb.Delete(hash[:])
   533  	pool.relay.Discard([]common.Hash{hash})
   534  }