github.com/evdatsion/aphelion-dpos-bft@v0.32.1/mempool/clist_mempool.go (about)

     1  package mempool
     2  
     3  import (
     4  	"bytes"
     5  	"container/list"
     6  	"crypto/sha256"
     7  	"fmt"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/pkg/errors"
    13  
    14  	abci "github.com/evdatsion/aphelion-dpos-bft/abci/types"
    15  	cfg "github.com/evdatsion/aphelion-dpos-bft/config"
    16  	auto "github.com/evdatsion/aphelion-dpos-bft/libs/autofile"
    17  	"github.com/evdatsion/aphelion-dpos-bft/libs/clist"
    18  	cmn "github.com/evdatsion/aphelion-dpos-bft/libs/common"
    19  	"github.com/evdatsion/aphelion-dpos-bft/libs/log"
    20  	"github.com/evdatsion/aphelion-dpos-bft/proxy"
    21  	"github.com/evdatsion/aphelion-dpos-bft/types"
    22  )
    23  
    24  //--------------------------------------------------------------------------------
    25  
    26  // CListMempool is an ordered in-memory pool for transactions before they are
    27  // proposed in a consensus round. Transaction validity is checked using the
    28  // CheckTx abci message before the transaction is added to the pool. The
    29  // mempool uses a concurrent list structure for storing transactions that can
    30  // be efficiently accessed by multiple concurrent readers.
    31  type CListMempool struct {
    32  	config *cfg.MempoolConfig
    33  
    34  	proxyMtx     sync.Mutex
    35  	proxyAppConn proxy.AppConnMempool
    36  	txs          *clist.CList // concurrent linked-list of good txs
    37  	preCheck     PreCheckFunc
    38  	postCheck    PostCheckFunc
    39  
    40  	// Track whether we're rechecking txs.
    41  	// These are not protected by a mutex and are expected to be mutated
    42  	// in serial (ie. by abci responses which are called in serial).
    43  	recheckCursor *clist.CElement // next expected response
    44  	recheckEnd    *clist.CElement // re-checking stops here
    45  
    46  	// notify listeners (ie. consensus) when txs are available
    47  	notifiedTxsAvailable bool
    48  	txsAvailable         chan struct{} // fires once for each height, when the mempool is not empty
    49  
    50  	// Map for quick access to txs to record sender in CheckTx.
    51  	// txsMap: txKey -> CElement
    52  	txsMap sync.Map
    53  
    54  	// Atomic integers
    55  	height     int64 // the last block Update()'d to
    56  	rechecking int32 // for re-checking filtered txs on Update()
    57  	txsBytes   int64 // total size of mempool, in bytes
    58  
    59  	// Keep a cache of already-seen txs.
    60  	// This reduces the pressure on the proxyApp.
    61  	cache txCache
    62  
    63  	// A log of mempool txs
    64  	wal *auto.AutoFile
    65  
    66  	logger log.Logger
    67  
    68  	metrics *Metrics
    69  }
    70  
    71  var _ Mempool = &CListMempool{}
    72  
    73  // CListMempoolOption sets an optional parameter on the mempool.
    74  type CListMempoolOption func(*CListMempool)
    75  
    76  // NewCListMempool returns a new mempool with the given configuration and connection to an application.
    77  func NewCListMempool(
    78  	config *cfg.MempoolConfig,
    79  	proxyAppConn proxy.AppConnMempool,
    80  	height int64,
    81  	options ...CListMempoolOption,
    82  ) *CListMempool {
    83  	mempool := &CListMempool{
    84  		config:        config,
    85  		proxyAppConn:  proxyAppConn,
    86  		txs:           clist.New(),
    87  		height:        height,
    88  		rechecking:    0,
    89  		recheckCursor: nil,
    90  		recheckEnd:    nil,
    91  		logger:        log.NewNopLogger(),
    92  		metrics:       NopMetrics(),
    93  	}
    94  	if config.CacheSize > 0 {
    95  		mempool.cache = newMapTxCache(config.CacheSize)
    96  	} else {
    97  		mempool.cache = nopTxCache{}
    98  	}
    99  	proxyAppConn.SetResponseCallback(mempool.globalCb)
   100  	for _, option := range options {
   101  		option(mempool)
   102  	}
   103  	return mempool
   104  }
   105  
   106  // NOTE: not thread safe - should only be called once, on startup
   107  func (mem *CListMempool) EnableTxsAvailable() {
   108  	mem.txsAvailable = make(chan struct{}, 1)
   109  }
   110  
   111  // SetLogger sets the Logger.
   112  func (mem *CListMempool) SetLogger(l log.Logger) {
   113  	mem.logger = l
   114  }
   115  
   116  // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
   117  // false. This is ran before CheckTx.
   118  func WithPreCheck(f PreCheckFunc) CListMempoolOption {
   119  	return func(mem *CListMempool) { mem.preCheck = f }
   120  }
   121  
   122  // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
   123  // false. This is ran after CheckTx.
   124  func WithPostCheck(f PostCheckFunc) CListMempoolOption {
   125  	return func(mem *CListMempool) { mem.postCheck = f }
   126  }
   127  
   128  // WithMetrics sets the metrics.
   129  func WithMetrics(metrics *Metrics) CListMempoolOption {
   130  	return func(mem *CListMempool) { mem.metrics = metrics }
   131  }
   132  
   133  // *panics* if can't create directory or open file.
   134  // *not thread safe*
   135  func (mem *CListMempool) InitWAL() {
   136  	walDir := mem.config.WalDir()
   137  	err := cmn.EnsureDir(walDir, 0700)
   138  	if err != nil {
   139  		panic(errors.Wrap(err, "Error ensuring WAL dir"))
   140  	}
   141  	af, err := auto.OpenAutoFile(walDir + "/wal")
   142  	if err != nil {
   143  		panic(errors.Wrap(err, "Error opening WAL file"))
   144  	}
   145  	mem.wal = af
   146  }
   147  
   148  func (mem *CListMempool) CloseWAL() {
   149  	mem.proxyMtx.Lock()
   150  	defer mem.proxyMtx.Unlock()
   151  
   152  	if err := mem.wal.Close(); err != nil {
   153  		mem.logger.Error("Error closing WAL", "err", err)
   154  	}
   155  	mem.wal = nil
   156  }
   157  
   158  func (mem *CListMempool) Lock() {
   159  	mem.proxyMtx.Lock()
   160  }
   161  
   162  func (mem *CListMempool) Unlock() {
   163  	mem.proxyMtx.Unlock()
   164  }
   165  
   166  func (mem *CListMempool) Size() int {
   167  	return mem.txs.Len()
   168  }
   169  
   170  func (mem *CListMempool) TxsBytes() int64 {
   171  	return atomic.LoadInt64(&mem.txsBytes)
   172  }
   173  
   174  func (mem *CListMempool) FlushAppConn() error {
   175  	return mem.proxyAppConn.FlushSync()
   176  }
   177  
   178  func (mem *CListMempool) Flush() {
   179  	mem.proxyMtx.Lock()
   180  	defer mem.proxyMtx.Unlock()
   181  
   182  	mem.cache.Reset()
   183  
   184  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   185  		mem.txs.Remove(e)
   186  		e.DetachPrev()
   187  	}
   188  
   189  	mem.txsMap = sync.Map{}
   190  	_ = atomic.SwapInt64(&mem.txsBytes, 0)
   191  }
   192  
   193  // TxsFront returns the first transaction in the ordered list for peer
   194  // goroutines to call .NextWait() on.
   195  // FIXME: leaking implementation details!
   196  func (mem *CListMempool) TxsFront() *clist.CElement {
   197  	return mem.txs.Front()
   198  }
   199  
   200  // TxsWaitChan returns a channel to wait on transactions. It will be closed
   201  // once the mempool is not empty (ie. the internal `mem.txs` has at least one
   202  // element)
   203  func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
   204  	return mem.txs.WaitChan()
   205  }
   206  
   207  // It blocks if we're waiting on Update() or Reap().
   208  // cb: A callback from the CheckTx command.
   209  //     It gets called from another goroutine.
   210  // CONTRACT: Either cb will get called, or err returned.
   211  func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
   212  	return mem.CheckTxWithInfo(tx, cb, TxInfo{SenderID: UnknownPeerID})
   213  }
   214  
   215  func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) {
   216  	mem.proxyMtx.Lock()
   217  	// use defer to unlock mutex because application (*local client*) might panic
   218  	defer mem.proxyMtx.Unlock()
   219  
   220  	var (
   221  		memSize  = mem.Size()
   222  		txsBytes = mem.TxsBytes()
   223  	)
   224  	if memSize >= mem.config.Size ||
   225  		int64(len(tx))+txsBytes > mem.config.MaxTxsBytes {
   226  		return ErrMempoolIsFull{
   227  			memSize, mem.config.Size,
   228  			txsBytes, mem.config.MaxTxsBytes}
   229  	}
   230  
   231  	// The size of the corresponding amino-encoded TxMessage
   232  	// can't be larger than the maxMsgSize, otherwise we can't
   233  	// relay it to peers.
   234  	if len(tx) > maxTxSize {
   235  		return ErrTxTooLarge
   236  	}
   237  
   238  	if mem.preCheck != nil {
   239  		if err := mem.preCheck(tx); err != nil {
   240  			return ErrPreCheck{err}
   241  		}
   242  	}
   243  
   244  	// CACHE
   245  	if !mem.cache.Push(tx) {
   246  		// Record a new sender for a tx we've already seen.
   247  		// Note it's possible a tx is still in the cache but no longer in the mempool
   248  		// (eg. after committing a block, txs are removed from mempool but not cache),
   249  		// so we only record the sender for txs still in the mempool.
   250  		if e, ok := mem.txsMap.Load(txKey(tx)); ok {
   251  			memTx := e.(*clist.CElement).Value.(*mempoolTx)
   252  			if _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true); loaded {
   253  				// TODO: consider punishing peer for dups,
   254  				// its non-trivial since invalid txs can become valid,
   255  				// but they can spam the same tx with little cost to them atm.
   256  			}
   257  		}
   258  
   259  		return ErrTxInCache
   260  	}
   261  	// END CACHE
   262  
   263  	// WAL
   264  	if mem.wal != nil {
   265  		// TODO: Notify administrators when WAL fails
   266  		_, err := mem.wal.Write([]byte(tx))
   267  		if err != nil {
   268  			mem.logger.Error("Error writing to WAL", "err", err)
   269  		}
   270  		_, err = mem.wal.Write([]byte("\n"))
   271  		if err != nil {
   272  			mem.logger.Error("Error writing to WAL", "err", err)
   273  		}
   274  	}
   275  	// END WAL
   276  
   277  	// NOTE: proxyAppConn may error if tx buffer is full
   278  	if err = mem.proxyAppConn.Error(); err != nil {
   279  		return err
   280  	}
   281  
   282  	reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
   283  	reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, cb))
   284  
   285  	return nil
   286  }
   287  
   288  // Global callback that will be called after every ABCI response.
   289  // Having a single global callback avoids needing to set a callback for each request.
   290  // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
   291  // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
   292  // include this information. If we're not in the midst of a recheck, this function will just return,
   293  // so the request specific callback can do the work.
   294  // When rechecking, we don't need the peerID, so the recheck callback happens here.
   295  func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
   296  	if mem.recheckCursor == nil {
   297  		return
   298  	}
   299  
   300  	mem.metrics.RecheckTimes.Add(1)
   301  	mem.resCbRecheck(req, res)
   302  
   303  	// update metrics
   304  	mem.metrics.Size.Set(float64(mem.Size()))
   305  }
   306  
   307  // Request specific callback that should be set on individual reqRes objects
   308  // to incorporate local information when processing the response.
   309  // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
   310  // NOTE: alternatively, we could include this information in the ABCI request itself.
   311  //
   312  // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
   313  // when all other response processing is complete.
   314  //
   315  // Used in CheckTxWithInfo to record PeerID who sent us the tx.
   316  func (mem *CListMempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) {
   317  	return func(res *abci.Response) {
   318  		if mem.recheckCursor != nil {
   319  			// this should never happen
   320  			panic("recheck cursor is not nil in reqResCb")
   321  		}
   322  
   323  		mem.resCbFirstTime(tx, peerID, res)
   324  
   325  		// update metrics
   326  		mem.metrics.Size.Set(float64(mem.Size()))
   327  
   328  		// passed in by the caller of CheckTx, eg. the RPC
   329  		if externalCb != nil {
   330  			externalCb(res)
   331  		}
   332  	}
   333  }
   334  
   335  // Called from:
   336  //  - resCbFirstTime (lock not held) if tx is valid
   337  func (mem *CListMempool) addTx(memTx *mempoolTx) {
   338  	e := mem.txs.PushBack(memTx)
   339  	mem.txsMap.Store(txKey(memTx.tx), e)
   340  	atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
   341  	mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
   342  }
   343  
   344  // Called from:
   345  //  - Update (lock held) if tx was committed
   346  // 	- resCbRecheck (lock not held) if tx was invalidated
   347  func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
   348  	mem.txs.Remove(elem)
   349  	elem.DetachPrev()
   350  	mem.txsMap.Delete(txKey(tx))
   351  	atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   352  
   353  	if removeFromCache {
   354  		mem.cache.Remove(tx)
   355  	}
   356  }
   357  
   358  // callback, which is called after the app checked the tx for the first time.
   359  //
   360  // The case where the app checks the tx for the second and subsequent times is
   361  // handled by the resCbRecheck callback.
   362  func (mem *CListMempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) {
   363  	switch r := res.Value.(type) {
   364  	case *abci.Response_CheckTx:
   365  		var postCheckErr error
   366  		if mem.postCheck != nil {
   367  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   368  		}
   369  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   370  			memTx := &mempoolTx{
   371  				height:    mem.height,
   372  				gasWanted: r.CheckTx.GasWanted,
   373  				tx:        tx,
   374  			}
   375  			memTx.senders.Store(peerID, true)
   376  			mem.addTx(memTx)
   377  			mem.logger.Info("Added good transaction",
   378  				"tx", txID(tx),
   379  				"res", r,
   380  				"height", memTx.height,
   381  				"total", mem.Size(),
   382  			)
   383  			mem.notifyTxsAvailable()
   384  		} else {
   385  			// ignore bad transaction
   386  			mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "res", r, "err", postCheckErr)
   387  			mem.metrics.FailedTxs.Add(1)
   388  			// remove from cache (it might be good later)
   389  			mem.cache.Remove(tx)
   390  		}
   391  	default:
   392  		// ignore other messages
   393  	}
   394  }
   395  
   396  // callback, which is called after the app rechecked the tx.
   397  //
   398  // The case where the app checks the tx for the first time is handled by the
   399  // resCbFirstTime callback.
   400  func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
   401  	switch r := res.Value.(type) {
   402  	case *abci.Response_CheckTx:
   403  		tx := req.GetCheckTx().Tx
   404  		memTx := mem.recheckCursor.Value.(*mempoolTx)
   405  		if !bytes.Equal(tx, memTx.tx) {
   406  			panic(fmt.Sprintf(
   407  				"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
   408  				memTx.tx,
   409  				tx))
   410  		}
   411  		var postCheckErr error
   412  		if mem.postCheck != nil {
   413  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   414  		}
   415  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   416  			// Good, nothing to do.
   417  		} else {
   418  			// Tx became invalidated due to newly committed block.
   419  			mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr)
   420  			// NOTE: we remove tx from the cache because it might be good later
   421  			mem.removeTx(tx, mem.recheckCursor, true)
   422  		}
   423  		if mem.recheckCursor == mem.recheckEnd {
   424  			mem.recheckCursor = nil
   425  		} else {
   426  			mem.recheckCursor = mem.recheckCursor.Next()
   427  		}
   428  		if mem.recheckCursor == nil {
   429  			// Done!
   430  			atomic.StoreInt32(&mem.rechecking, 0)
   431  			mem.logger.Info("Done rechecking txs")
   432  
   433  			// incase the recheck removed all txs
   434  			if mem.Size() > 0 {
   435  				mem.notifyTxsAvailable()
   436  			}
   437  		}
   438  	default:
   439  		// ignore other messages
   440  	}
   441  }
   442  
   443  func (mem *CListMempool) TxsAvailable() <-chan struct{} {
   444  	return mem.txsAvailable
   445  }
   446  
   447  func (mem *CListMempool) notifyTxsAvailable() {
   448  	if mem.Size() == 0 {
   449  		panic("notified txs available but mempool is empty!")
   450  	}
   451  	if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
   452  		// channel cap is 1, so this will send once
   453  		mem.notifiedTxsAvailable = true
   454  		select {
   455  		case mem.txsAvailable <- struct{}{}:
   456  		default:
   457  		}
   458  	}
   459  }
   460  
   461  func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
   462  	mem.proxyMtx.Lock()
   463  	defer mem.proxyMtx.Unlock()
   464  
   465  	for atomic.LoadInt32(&mem.rechecking) > 0 {
   466  		// TODO: Something better?
   467  		time.Sleep(time.Millisecond * 10)
   468  	}
   469  
   470  	var totalBytes int64
   471  	var totalGas int64
   472  	// TODO: we will get a performance boost if we have a good estimate of avg
   473  	// size per tx, and set the initial capacity based off of that.
   474  	// txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize))
   475  	txs := make([]types.Tx, 0, mem.txs.Len())
   476  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   477  		memTx := e.Value.(*mempoolTx)
   478  		// Check total size requirement
   479  		aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1)
   480  		if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes {
   481  			return txs
   482  		}
   483  		totalBytes += int64(len(memTx.tx)) + aminoOverhead
   484  		// Check total gas requirement.
   485  		// If maxGas is negative, skip this check.
   486  		// Since newTotalGas < masGas, which
   487  		// must be non-negative, it follows that this won't overflow.
   488  		newTotalGas := totalGas + memTx.gasWanted
   489  		if maxGas > -1 && newTotalGas > maxGas {
   490  			return txs
   491  		}
   492  		totalGas = newTotalGas
   493  		txs = append(txs, memTx.tx)
   494  	}
   495  	return txs
   496  }
   497  
   498  func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
   499  	mem.proxyMtx.Lock()
   500  	defer mem.proxyMtx.Unlock()
   501  
   502  	if max < 0 {
   503  		max = mem.txs.Len()
   504  	}
   505  
   506  	for atomic.LoadInt32(&mem.rechecking) > 0 {
   507  		// TODO: Something better?
   508  		time.Sleep(time.Millisecond * 10)
   509  	}
   510  
   511  	txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max))
   512  	for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
   513  		memTx := e.Value.(*mempoolTx)
   514  		txs = append(txs, memTx.tx)
   515  	}
   516  	return txs
   517  }
   518  
   519  func (mem *CListMempool) Update(
   520  	height int64,
   521  	txs types.Txs,
   522  	deliverTxResponses []*abci.ResponseDeliverTx,
   523  	preCheck PreCheckFunc,
   524  	postCheck PostCheckFunc,
   525  ) error {
   526  	// Set height
   527  	mem.height = height
   528  	mem.notifiedTxsAvailable = false
   529  
   530  	if preCheck != nil {
   531  		mem.preCheck = preCheck
   532  	}
   533  	if postCheck != nil {
   534  		mem.postCheck = postCheck
   535  	}
   536  
   537  	for i, tx := range txs {
   538  		if deliverTxResponses[i].Code == abci.CodeTypeOK {
   539  			// Add valid committed tx to the cache (if missing).
   540  			_ = mem.cache.Push(tx)
   541  		} else {
   542  			// Allow invalid transactions to be resubmitted.
   543  			mem.cache.Remove(tx)
   544  		}
   545  
   546  		// Remove committed tx from the mempool.
   547  		//
   548  		// Note an evil proposer can drop valid txs!
   549  		// Mempool before:
   550  		//   100 -> 101 -> 102
   551  		// Block, proposed by an evil proposer:
   552  		//   101 -> 102
   553  		// Mempool after:
   554  		//   100
   555  		// https://github.com/evdatsion/aphelion-dpos-bft/issues/3322.
   556  		if e, ok := mem.txsMap.Load(txKey(tx)); ok {
   557  			mem.removeTx(tx, e.(*clist.CElement), false)
   558  		}
   559  	}
   560  
   561  	// Either recheck non-committed txs to see if they became invalid
   562  	// or just notify there're some txs left.
   563  	if mem.Size() > 0 {
   564  		if mem.config.Recheck {
   565  			mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height)
   566  			mem.recheckTxs()
   567  			// At this point, mem.txs are being rechecked.
   568  			// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
   569  			// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
   570  		} else {
   571  			mem.notifyTxsAvailable()
   572  		}
   573  	}
   574  
   575  	// Update metrics
   576  	mem.metrics.Size.Set(float64(mem.Size()))
   577  
   578  	return nil
   579  }
   580  
   581  func (mem *CListMempool) recheckTxs() {
   582  	if mem.Size() == 0 {
   583  		panic("recheckTxs is called, but the mempool is empty")
   584  	}
   585  
   586  	atomic.StoreInt32(&mem.rechecking, 1)
   587  	mem.recheckCursor = mem.txs.Front()
   588  	mem.recheckEnd = mem.txs.Back()
   589  
   590  	// Push txs to proxyAppConn
   591  	// NOTE: globalCb may be called concurrently.
   592  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   593  		memTx := e.Value.(*mempoolTx)
   594  		mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{
   595  			Tx:   memTx.tx,
   596  			Type: abci.CheckTxType_Recheck,
   597  		})
   598  	}
   599  
   600  	mem.proxyAppConn.FlushAsync()
   601  }
   602  
   603  //--------------------------------------------------------------------------------
   604  
   605  // mempoolTx is a transaction that successfully ran
   606  type mempoolTx struct {
   607  	height    int64    // height that this tx had been validated in
   608  	gasWanted int64    // amount of gas this tx states it will require
   609  	tx        types.Tx //
   610  
   611  	// ids of peers who've sent us this tx (as a map for quick lookups).
   612  	// senders: PeerID -> bool
   613  	senders sync.Map
   614  }
   615  
   616  // Height returns the height for this transaction
   617  func (memTx *mempoolTx) Height() int64 {
   618  	return atomic.LoadInt64(&memTx.height)
   619  }
   620  
   621  //--------------------------------------------------------------------------------
   622  
   623  type txCache interface {
   624  	Reset()
   625  	Push(tx types.Tx) bool
   626  	Remove(tx types.Tx)
   627  }
   628  
   629  // mapTxCache maintains a LRU cache of transactions. This only stores the hash
   630  // of the tx, due to memory concerns.
   631  type mapTxCache struct {
   632  	mtx  sync.Mutex
   633  	size int
   634  	map_ map[[sha256.Size]byte]*list.Element
   635  	list *list.List
   636  }
   637  
   638  var _ txCache = (*mapTxCache)(nil)
   639  
   640  // newMapTxCache returns a new mapTxCache.
   641  func newMapTxCache(cacheSize int) *mapTxCache {
   642  	return &mapTxCache{
   643  		size: cacheSize,
   644  		map_: make(map[[sha256.Size]byte]*list.Element, cacheSize),
   645  		list: list.New(),
   646  	}
   647  }
   648  
   649  // Reset resets the cache to an empty state.
   650  func (cache *mapTxCache) Reset() {
   651  	cache.mtx.Lock()
   652  	cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size)
   653  	cache.list.Init()
   654  	cache.mtx.Unlock()
   655  }
   656  
   657  // Push adds the given tx to the cache and returns true. It returns
   658  // false if tx is already in the cache.
   659  func (cache *mapTxCache) Push(tx types.Tx) bool {
   660  	cache.mtx.Lock()
   661  	defer cache.mtx.Unlock()
   662  
   663  	// Use the tx hash in the cache
   664  	txHash := txKey(tx)
   665  	if moved, exists := cache.map_[txHash]; exists {
   666  		cache.list.MoveToBack(moved)
   667  		return false
   668  	}
   669  
   670  	if cache.list.Len() >= cache.size {
   671  		popped := cache.list.Front()
   672  		poppedTxHash := popped.Value.([sha256.Size]byte)
   673  		delete(cache.map_, poppedTxHash)
   674  		if popped != nil {
   675  			cache.list.Remove(popped)
   676  		}
   677  	}
   678  	e := cache.list.PushBack(txHash)
   679  	cache.map_[txHash] = e
   680  	return true
   681  }
   682  
   683  // Remove removes the given tx from the cache.
   684  func (cache *mapTxCache) Remove(tx types.Tx) {
   685  	cache.mtx.Lock()
   686  	txHash := txKey(tx)
   687  	popped := cache.map_[txHash]
   688  	delete(cache.map_, txHash)
   689  	if popped != nil {
   690  		cache.list.Remove(popped)
   691  	}
   692  
   693  	cache.mtx.Unlock()
   694  }
   695  
   696  type nopTxCache struct{}
   697  
   698  var _ txCache = (*nopTxCache)(nil)
   699  
   700  func (nopTxCache) Reset()             {}
   701  func (nopTxCache) Push(types.Tx) bool { return true }
   702  func (nopTxCache) Remove(types.Tx)    {}
   703  
   704  //--------------------------------------------------------------------------------
   705  
   706  // txKey is the fixed length array sha256 hash used as the key in maps.
   707  func txKey(tx types.Tx) [sha256.Size]byte {
   708  	return sha256.Sum256(tx)
   709  }
   710  
   711  // txID is the hex encoded hash of the bytes as a types.Tx.
   712  func txID(tx []byte) string {
   713  	return fmt.Sprintf("%X", types.Tx(tx).Hash())
   714  }