github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/bft/mempool/clist_mempool.go (about)

     1  package mempool
     2  
     3  import (
     4  	"bytes"
     5  	"container/list"
     6  	"crypto/sha256"
     7  	"fmt"
     8  	"log/slog"
     9  	"sync"
    10  	"sync/atomic"
    11  	"time"
    12  
    13  	auto "github.com/gnolang/gno/tm2/pkg/autofile"
    14  	abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types"
    15  	"github.com/gnolang/gno/tm2/pkg/bft/appconn"
    16  	cfg "github.com/gnolang/gno/tm2/pkg/bft/mempool/config"
    17  	"github.com/gnolang/gno/tm2/pkg/bft/types"
    18  	"github.com/gnolang/gno/tm2/pkg/clist"
    19  	"github.com/gnolang/gno/tm2/pkg/errors"
    20  	"github.com/gnolang/gno/tm2/pkg/log"
    21  	osm "github.com/gnolang/gno/tm2/pkg/os"
    22  )
    23  
    24  // --------------------------------------------------------------------------------
    25  
    26  // CListMempool is an ordered in-memory pool for transactions before they are
    27  // proposed in a consensus round. Transaction validity is checked using the
    28  // CheckTx abci message before the transaction is added to the pool. The
    29  // mempool uses a concurrent list structure for storing transactions that can
    30  // be efficiently accessed by multiple concurrent readers.
    31  type CListMempool struct {
    32  	config *cfg.MempoolConfig
    33  
    34  	mtx          sync.Mutex
    35  	proxyAppConn appconn.Mempool
    36  	txs          *clist.CList // concurrent linked-list of good txs
    37  	preCheck     PreCheckFunc
    38  	height       int64 // the last block Update()'d to
    39  	maxTxBytes   int64
    40  
    41  	// Track whether we're rechecking txs.
    42  	// These are not protected by a mutex and are expected to be mutated
    43  	// in serial (ie. by abci responses which are called in serial).
    44  	recheckCursor *clist.CElement // next expected response
    45  	recheckEnd    *clist.CElement // re-checking stops here
    46  
    47  	// notify listeners (ie. consensus) when txs are available
    48  	notifiedTxsAvailable bool
    49  	txsAvailable         chan struct{} // fires once for each height, when the mempool is not empty
    50  
    51  	// Map for quick access to txs to record sender in CheckTx.
    52  	// txsMap: txKey -> CElement
    53  	txsMap sync.Map
    54  
    55  	// Atomic integers
    56  	txsBytes   int64 // total size of mempool, in bytes
    57  	rechecking int32 // for re-checking filtered txs on Update()
    58  
    59  	// Keep a cache of already-seen txs.
    60  	// This reduces the pressure on the proxyApp.
    61  	cache txCache
    62  
    63  	// A log of mempool txs
    64  	wal *auto.AutoFile
    65  
    66  	logger *slog.Logger
    67  }
    68  
    69  var _ Mempool = &CListMempool{}
    70  
    71  // CListMempoolOption sets an optional parameter on the mempool.
    72  type CListMempoolOption func(*CListMempool)
    73  
    74  // NewCListMempool returns a new mempool with the given configuration and connection to an application.
    75  func NewCListMempool(
    76  	config *cfg.MempoolConfig,
    77  	proxyAppConn appconn.Mempool,
    78  	height int64,
    79  	maxTxBytes int64,
    80  	options ...CListMempoolOption,
    81  ) *CListMempool {
    82  	if maxTxBytes <= 0 {
    83  		panic("maxTxBytes must be positive")
    84  	}
    85  	mempool := &CListMempool{
    86  		config:        config,
    87  		proxyAppConn:  proxyAppConn,
    88  		txs:           clist.New(),
    89  		height:        height,
    90  		maxTxBytes:    maxTxBytes,
    91  		rechecking:    0,
    92  		recheckCursor: nil,
    93  		recheckEnd:    nil,
    94  		logger:        log.NewNoopLogger(),
    95  	}
    96  	if config.CacheSize > 0 {
    97  		mempool.cache = newMapTxCache(config.CacheSize)
    98  	} else {
    99  		mempool.cache = nopTxCache{}
   100  	}
   101  	proxyAppConn.SetResponseCallback(mempool.globalCb)
   102  	for _, option := range options {
   103  		option(mempool)
   104  	}
   105  	return mempool
   106  }
   107  
   108  // NOTE: not thread safe - should only be called once, on startup
   109  func (mem *CListMempool) EnableTxsAvailable() {
   110  	mem.txsAvailable = make(chan struct{}, 1)
   111  }
   112  
   113  // SetLogger sets the Logger.
   114  func (mem *CListMempool) SetLogger(l *slog.Logger) {
   115  	mem.logger = l
   116  }
   117  
   118  // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
   119  // false. This is ran before CheckTx.
   120  func WithPreCheck(f PreCheckFunc) CListMempoolOption {
   121  	return func(mem *CListMempool) { mem.preCheck = f }
   122  }
   123  
   124  // *panics* if can't create directory or open file.
   125  // *not thread safe*
   126  func (mem *CListMempool) InitWAL() {
   127  	walDir := mem.config.WalDir()
   128  	err := osm.EnsureDir(walDir, 0o700)
   129  	if err != nil {
   130  		panic(errors.Wrap(err, "Error ensuring WAL dir"))
   131  	}
   132  	af, err := auto.OpenAutoFile(walDir + "/wal")
   133  	if err != nil {
   134  		panic(errors.Wrap(err, "Error opening WAL file"))
   135  	}
   136  	mem.wal = af
   137  }
   138  
   139  func (mem *CListMempool) CloseWAL() {
   140  	mem.mtx.Lock()
   141  	defer mem.mtx.Unlock()
   142  
   143  	if err := mem.wal.Close(); err != nil {
   144  		mem.logger.Error("Error closing WAL", "err", err)
   145  	}
   146  	mem.wal = nil
   147  }
   148  
   149  func (mem *CListMempool) Lock() {
   150  	mem.mtx.Lock()
   151  }
   152  
   153  func (mem *CListMempool) Unlock() {
   154  	mem.mtx.Unlock()
   155  }
   156  
   157  func (mem *CListMempool) Size() int {
   158  	return mem.txs.Len()
   159  }
   160  
   161  func (mem *CListMempool) MaxTxBytes() int64 {
   162  	mem.mtx.Lock()
   163  	defer mem.mtx.Unlock()
   164  	return mem.maxTxBytes
   165  }
   166  
   167  func (mem *CListMempool) TxsBytes() int64 {
   168  	return atomic.LoadInt64(&mem.txsBytes)
   169  }
   170  
   171  func (mem *CListMempool) FlushAppConn() error {
   172  	return mem.proxyAppConn.FlushSync()
   173  }
   174  
   175  func (mem *CListMempool) Flush() {
   176  	mem.mtx.Lock()
   177  	defer mem.mtx.Unlock()
   178  
   179  	mem.cache.Reset()
   180  
   181  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   182  		mem.txs.Remove(e)
   183  		e.DetachPrev()
   184  	}
   185  
   186  	mem.txsMap = sync.Map{}
   187  	_ = atomic.SwapInt64(&mem.txsBytes, 0)
   188  }
   189  
   190  // TxsFront returns the first transaction in the ordered list for peer
   191  // goroutines to call .NextWait() on.
   192  // FIXME: leaking implementation details!
   193  func (mem *CListMempool) TxsFront() *clist.CElement {
   194  	return mem.txs.Front()
   195  }
   196  
   197  // TxsWaitChan returns a channel to wait on transactions. It will be closed
   198  // once the mempool is not empty (ie. the internal `mem.txs` has at least one
   199  // element)
   200  func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
   201  	return mem.txs.WaitChan()
   202  }
   203  
   204  // It blocks if we're waiting on Update() or Reap().
   205  // cb: A callback from the CheckTx command.
   206  //
   207  //	It gets called from another goroutine.
   208  //
   209  // CONTRACT: Either cb will get called, or err returned.
   210  func (mem *CListMempool) CheckTx(tx types.Tx, cb func(abci.Response)) (err error) {
   211  	return mem.CheckTxWithInfo(tx, cb, TxInfo{SenderID: UnknownPeerID})
   212  }
   213  
   214  func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(abci.Response), txInfo TxInfo) (err error) {
   215  	mem.mtx.Lock()
   216  	// use defer to unlock mutex because application (*local client*) might panic
   217  	defer mem.mtx.Unlock()
   218  
   219  	var (
   220  		memSize  = mem.Size()
   221  		txsBytes = mem.TxsBytes()
   222  		txSize   = len(tx)
   223  	)
   224  
   225  	// Check max pending txs bytes
   226  	if memSize >= mem.config.Size ||
   227  		int64(txSize)+txsBytes > mem.config.MaxPendingTxsBytes {
   228  		return MempoolIsFullError{
   229  			memSize, mem.config.Size,
   230  			txsBytes, mem.config.MaxPendingTxsBytes,
   231  		}
   232  	}
   233  
   234  	// Check max tx bytes
   235  	if int64(txSize) > mem.maxTxBytes {
   236  		return TxTooLargeError{mem.maxTxBytes, int64(txSize)}
   237  	}
   238  
   239  	// Check custom preCheck function
   240  	if mem.preCheck != nil {
   241  		if err := mem.preCheck(tx); err != nil {
   242  			return err
   243  		}
   244  	}
   245  
   246  	// CACHE
   247  	if !mem.cache.Push(tx) {
   248  		// Record a new sender for a tx we've already seen.
   249  		// Note it's possible a tx is still in the cache but no longer in the mempool
   250  		// (eg. after committing a block, txs are removed from mempool but not cache),
   251  		// so we only record the sender for txs still in the mempool.
   252  		if e, ok := mem.txsMap.Load(txKey(tx)); ok {
   253  			memTx := e.(*clist.CElement).Value.(*mempoolTx)
   254  			memTx.senders.LoadOrStore(txInfo.SenderID, true)
   255  			// TODO: consider punishing peer for dups,
   256  			// its non-trivial since invalid txs can become valid,
   257  			// but they can spam the same tx with little cost to them atm.
   258  		}
   259  
   260  		return ErrTxInCache
   261  	}
   262  	// END CACHE
   263  
   264  	// WAL
   265  	if mem.wal != nil {
   266  		// TODO: Notify administrators when WAL fails
   267  		_, err := mem.wal.Write([]byte(tx))
   268  		if err != nil {
   269  			mem.logger.Error("Error writing to WAL", "err", err)
   270  		}
   271  		_, err = mem.wal.Write([]byte("\n"))
   272  		if err != nil {
   273  			mem.logger.Error("Error writing to WAL", "err", err)
   274  		}
   275  	}
   276  	// END WAL
   277  
   278  	// NOTE: proxyAppConn may error if tx buffer is full
   279  	if err = mem.proxyAppConn.Error(); err != nil {
   280  		return err
   281  	}
   282  
   283  	reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
   284  	reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, cb))
   285  
   286  	return nil
   287  }
   288  
   289  // Global callback that will be called after every ABCI response.
   290  // Having a single global callback avoids needing to set a callback for each request.
   291  // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
   292  // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
   293  // include this information. If we're not in the midst of a recheck, this function will just return,
   294  // so the request specific callback can do the work.
   295  // When rechecking, we don't need the peerID, so the recheck callback happens here.
   296  func (mem *CListMempool) globalCb(req abci.Request, res abci.Response) {
   297  	if mem.recheckCursor == nil {
   298  		return
   299  	} else {
   300  		mem.resCbRecheck(req, res)
   301  	}
   302  }
   303  
   304  // Request specific callback that should be set on individual reqRes objects
   305  // to incorporate local information when processing the response.
   306  // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
   307  // NOTE: alternatively, we could include this information in the ABCI request itself.
   308  //
   309  // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
   310  // when all other response processing is complete.
   311  //
   312  // Used in CheckTxWithInfo to record PeerID who sent us the tx.
   313  func (mem *CListMempool) reqResCb(tx []byte, peerID uint16, externalCb func(abci.Response)) func(res abci.Response) {
   314  	return func(res abci.Response) {
   315  		if mem.recheckCursor != nil {
   316  			// this should never happen
   317  			panic("recheck cursor is not nil in reqResCb")
   318  		}
   319  
   320  		mem.resCbFirstTime(tx, peerID, res)
   321  
   322  		// Passed in by the caller of CheckTx, eg. the RPC.
   323  		// The external callback cannot modify the result.
   324  		if externalCb != nil {
   325  			externalCb(res)
   326  		}
   327  	}
   328  }
   329  
   330  // Called from:
   331  //   - resCbFirstTime (lock not held) if tx is valid
   332  func (mem *CListMempool) addTx(memTx *mempoolTx) {
   333  	e := mem.txs.PushBack(memTx)
   334  	mem.txsMap.Store(txKey(memTx.tx), e)
   335  	atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
   336  }
   337  
   338  // Called from:
   339  //   - Update (lock held) if tx was committed
   340  //   - resCbRecheck (lock not held) if tx was invalidated
   341  func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
   342  	mem.txs.Remove(elem)
   343  	elem.DetachPrev()
   344  	mem.txsMap.Delete(txKey(tx))
   345  	atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   346  
   347  	if removeFromCache {
   348  		mem.cache.Remove(tx)
   349  	}
   350  }
   351  
   352  // callback, which is called after the app checked the tx for the first time.
   353  //
   354  // The case where the app checks the tx for the second and subsequent times is
   355  // handled by the resCbRecheck callback.
   356  func (mem *CListMempool) resCbFirstTime(tx []byte, peerID uint16, res abci.Response) {
   357  	switch res := res.(type) {
   358  	case abci.ResponseCheckTx:
   359  		if res.Error == nil {
   360  			memTx := &mempoolTx{
   361  				height:    mem.height,
   362  				gasWanted: res.GasWanted,
   363  				tx:        tx,
   364  			}
   365  			memTx.senders.Store(peerID, true)
   366  			mem.addTx(memTx)
   367  			mem.logger.Info("Added good transaction",
   368  				"tx", txID(tx),
   369  				"res", res,
   370  				"height", memTx.height,
   371  				"total", mem.Size(),
   372  			)
   373  			mem.notifyTxsAvailable()
   374  		} else {
   375  			// ignore bad transaction
   376  			mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "res", res, "err", res.Error)
   377  			// remove from cache (it might be good later)
   378  			mem.cache.Remove(tx)
   379  		}
   380  	default:
   381  		// ignore other messages
   382  	}
   383  }
   384  
   385  // callback, which is called after the app rechecked the tx.
   386  //
   387  // The case where the app checks the tx for the first time is handled by the
   388  // resCbFirstTime callback.
   389  func (mem *CListMempool) resCbRecheck(req abci.Request, res abci.Response) {
   390  	switch res := res.(type) {
   391  	case abci.ResponseCheckTx:
   392  		tx := req.(abci.RequestCheckTx).Tx
   393  		memTx := mem.recheckCursor.Value.(*mempoolTx)
   394  		if !bytes.Equal(tx, memTx.tx) {
   395  			panic(fmt.Sprintf(
   396  				"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
   397  				memTx.tx,
   398  				tx))
   399  		}
   400  		if res.Error == nil {
   401  			// Good, nothing to do.
   402  		} else {
   403  			// Tx became invalidated due to newly committed block.
   404  			mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", res, "err", res.Error)
   405  			// NOTE: we remove tx from the cache because it might be good later
   406  			mem.removeTx(tx, mem.recheckCursor, true)
   407  		}
   408  		if mem.recheckCursor == mem.recheckEnd {
   409  			mem.recheckCursor = nil
   410  		} else {
   411  			mem.recheckCursor = mem.recheckCursor.Next()
   412  		}
   413  		if mem.recheckCursor == nil {
   414  			// Done!
   415  			atomic.StoreInt32(&mem.rechecking, 0)
   416  			mem.logger.Info("Done rechecking txs")
   417  
   418  			// incase the recheck removed all txs
   419  			if mem.Size() > 0 {
   420  				mem.notifyTxsAvailable()
   421  			}
   422  		}
   423  	default:
   424  		// ignore other messages
   425  	}
   426  }
   427  
   428  func (mem *CListMempool) TxsAvailable() <-chan struct{} {
   429  	return mem.txsAvailable
   430  }
   431  
   432  func (mem *CListMempool) notifyTxsAvailable() {
   433  	if mem.Size() == 0 {
   434  		panic("notified txs available but mempool is empty!")
   435  	}
   436  	if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
   437  		// channel cap is 1, so this will send once
   438  		mem.notifiedTxsAvailable = true
   439  		select {
   440  		case mem.txsAvailable <- struct{}{}:
   441  		default:
   442  		}
   443  	}
   444  }
   445  
   446  func (mem *CListMempool) ReapMaxBytesMaxGas(maxDataBytes, maxGas int64) types.Txs {
   447  	mem.mtx.Lock()
   448  	defer mem.mtx.Unlock()
   449  
   450  	if maxDataBytes == 0 {
   451  		panic("ReapMaxBytesMaxGas requires maxDataBytes > 0")
   452  	}
   453  
   454  	for atomic.LoadInt32(&mem.rechecking) > 0 {
   455  		// TODO: Something better?
   456  		time.Sleep(time.Millisecond * 10)
   457  	}
   458  
   459  	var totalBytes int64
   460  	var totalGas int64
   461  	// TODO: we will get a performance boost if we have a good estimate of avg
   462  	// size per tx, and set the initial capacity based off of that.
   463  	// txs := make([]types.Tx, 0, min(mem.txs.Len(), max/mem.avgTxSize))
   464  	txs := make([]types.Tx, 0, mem.txs.Len())
   465  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   466  		memTx := e.Value.(*mempoolTx)
   467  		// Check total size requirement
   468  		if maxDataBytes > -1 && totalBytes+int64(len(memTx.tx)) > maxDataBytes {
   469  			return txs
   470  		}
   471  		totalBytes += int64(len(memTx.tx))
   472  		// Check total gas requirement.
   473  		// If maxGas is negative, skip this check.
   474  		// Since newTotalGas < masGas, which
   475  		// must be non-negative, it follows that this won't overflow.
   476  		newTotalGas := totalGas + memTx.gasWanted
   477  		if maxGas > -1 && newTotalGas > maxGas {
   478  			return txs
   479  		}
   480  		totalGas = newTotalGas
   481  		txs = append(txs, memTx.tx)
   482  	}
   483  	return txs
   484  }
   485  
   486  func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
   487  	mem.mtx.Lock()
   488  	defer mem.mtx.Unlock()
   489  
   490  	if max < 0 {
   491  		max = mem.txs.Len()
   492  	}
   493  
   494  	for atomic.LoadInt32(&mem.rechecking) > 0 {
   495  		// TODO: Something better?
   496  		time.Sleep(time.Millisecond * 10)
   497  	}
   498  
   499  	txs := make([]types.Tx, 0, min(mem.txs.Len(), max))
   500  	for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
   501  		memTx := e.Value.(*mempoolTx)
   502  		txs = append(txs, memTx.tx)
   503  	}
   504  	return txs
   505  }
   506  
   507  func (mem *CListMempool) Update(
   508  	height int64,
   509  	txs types.Txs,
   510  	deliverTxResponses []abci.ResponseDeliverTx,
   511  	preCheck PreCheckFunc,
   512  	maxTxBytes int64,
   513  ) error {
   514  	// Set height
   515  	mem.height = height
   516  	mem.notifiedTxsAvailable = false
   517  
   518  	if preCheck != nil {
   519  		mem.preCheck = preCheck
   520  	}
   521  	if maxTxBytes != 0 {
   522  		mem.maxTxBytes = maxTxBytes
   523  	}
   524  
   525  	for i, tx := range txs {
   526  		if deliverTxResponses[i].Error == nil {
   527  			// Add valid committed tx to the cache (if missing).
   528  			_ = mem.cache.Push(tx)
   529  		} else {
   530  			// Allow invalid transactions to be resubmitted.
   531  			mem.cache.Remove(tx)
   532  		}
   533  
   534  		// Remove committed tx from the mempool.
   535  		//
   536  		// Note an evil proposer can drop valid txs!
   537  		// Mempool before:
   538  		//   100 -> 101 -> 102
   539  		// Block, proposed by an evil proposer:
   540  		//   101 -> 102
   541  		// Mempool after:
   542  		//   100
   543  		// https://github.com/tendermint/classic/issues/3322.
   544  		if e, ok := mem.txsMap.Load(txKey(tx)); ok {
   545  			mem.removeTx(tx, e.(*clist.CElement), false)
   546  		}
   547  	}
   548  
   549  	// Either recheck non-committed txs to see if they became invalid
   550  	// or just notify there're some txs left.
   551  	if mem.Size() > 0 {
   552  		if mem.config.Recheck {
   553  			mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height)
   554  			mem.recheckTxs()
   555  			// At this point, mem.txs are being rechecked.
   556  			// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
   557  			// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
   558  		} else {
   559  			mem.notifyTxsAvailable()
   560  		}
   561  	}
   562  
   563  	return nil
   564  }
   565  
   566  func (mem *CListMempool) recheckTxs() {
   567  	if mem.Size() == 0 {
   568  		panic("recheckTxs is called, but the mempool is empty")
   569  	}
   570  
   571  	atomic.StoreInt32(&mem.rechecking, 1)
   572  	mem.recheckCursor = mem.txs.Front()
   573  	mem.recheckEnd = mem.txs.Back()
   574  
   575  	// Push txs to proxyAppConn
   576  	// NOTE: globalCb may be called concurrently.
   577  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   578  		memTx := e.Value.(*mempoolTx)
   579  		// check tx size
   580  		if int64(len(memTx.tx)) > mem.maxTxBytes {
   581  			mem.removeTx(memTx.tx, e, false)
   582  			continue
   583  		}
   584  		// run precheck
   585  		if mem.preCheck != nil {
   586  			if err := mem.preCheck(memTx.tx); err != nil {
   587  				mem.removeTx(memTx.tx, e, false)
   588  				continue
   589  			}
   590  		}
   591  		// run proxy app checktx
   592  		mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{
   593  			Tx:   memTx.tx,
   594  			Type: abci.CheckTxTypeRecheck,
   595  		})
   596  	}
   597  
   598  	mem.proxyAppConn.FlushAsync()
   599  }
   600  
   601  // --------------------------------------------------------------------------------
   602  
   603  // mempoolTx is a transaction that successfully ran
   604  type mempoolTx struct {
   605  	height    int64    // height that this tx had been validated in
   606  	gasWanted int64    // amount of gas this tx states it will require
   607  	tx        types.Tx //
   608  
   609  	// ids of peers who've sent us this tx (as a map for quick lookups).
   610  	// senders: PeerID -> bool
   611  	senders sync.Map
   612  }
   613  
   614  // Height returns the height for this transaction
   615  func (memTx *mempoolTx) Height() int64 {
   616  	return atomic.LoadInt64(&memTx.height)
   617  }
   618  
   619  // --------------------------------------------------------------------------------
   620  
   621  type txCache interface {
   622  	Reset()
   623  	Push(tx types.Tx) bool
   624  	Remove(tx types.Tx)
   625  }
   626  
   627  // mapTxCache maintains a LRU cache of transactions. This only stores the hash
   628  // of the tx, due to memory concerns.
   629  type mapTxCache struct {
   630  	mtx  sync.Mutex
   631  	size int
   632  	map_ map[[sha256.Size]byte]*list.Element
   633  	list *list.List
   634  }
   635  
   636  var _ txCache = (*mapTxCache)(nil)
   637  
   638  // newMapTxCache returns a new mapTxCache.
   639  func newMapTxCache(cacheSize int) *mapTxCache {
   640  	return &mapTxCache{
   641  		size: cacheSize,
   642  		map_: make(map[[sha256.Size]byte]*list.Element, cacheSize),
   643  		list: list.New(),
   644  	}
   645  }
   646  
   647  // Reset resets the cache to an empty state.
   648  func (cache *mapTxCache) Reset() {
   649  	cache.mtx.Lock()
   650  	cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size)
   651  	cache.list.Init()
   652  	cache.mtx.Unlock()
   653  }
   654  
   655  // Push adds the given tx to the cache and returns true. It returns
   656  // false if tx is already in the cache.
   657  func (cache *mapTxCache) Push(tx types.Tx) bool {
   658  	cache.mtx.Lock()
   659  	defer cache.mtx.Unlock()
   660  
   661  	// Use the tx hash in the cache
   662  	txHash := txKey(tx)
   663  	if moved, exists := cache.map_[txHash]; exists {
   664  		cache.list.MoveToBack(moved)
   665  		return false
   666  	}
   667  
   668  	if cache.list.Len() >= cache.size {
   669  		popped := cache.list.Front()
   670  		if popped != nil {
   671  			poppedTxHash := popped.Value.([sha256.Size]byte)
   672  			delete(cache.map_, poppedTxHash)
   673  			cache.list.Remove(popped)
   674  		}
   675  	}
   676  	e := cache.list.PushBack(txHash)
   677  	cache.map_[txHash] = e
   678  	return true
   679  }
   680  
   681  // Remove removes the given tx from the cache.
   682  func (cache *mapTxCache) Remove(tx types.Tx) {
   683  	cache.mtx.Lock()
   684  	txHash := txKey(tx)
   685  	popped := cache.map_[txHash]
   686  	delete(cache.map_, txHash)
   687  	if popped != nil {
   688  		cache.list.Remove(popped)
   689  	}
   690  
   691  	cache.mtx.Unlock()
   692  }
   693  
   694  type nopTxCache struct{}
   695  
   696  var _ txCache = (*nopTxCache)(nil)
   697  
   698  func (nopTxCache) Reset()             {}
   699  func (nopTxCache) Push(types.Tx) bool { return true }
   700  func (nopTxCache) Remove(types.Tx)    {}
   701  
   702  // --------------------------------------------------------------------------------
   703  
   704  // txKey is the fixed length array sha256 hash used as the key in maps.
   705  func txKey(tx types.Tx) [sha256.Size]byte {
   706  	return sha256.Sum256(tx)
   707  }
   708  
   709  // txID is the hex encoded hash of the bytes as a types.Tx.
   710  func txID(tx []byte) string {
   711  	return fmt.Sprintf("%X", types.Tx(tx).Hash())
   712  }