github.com/vipernet-xyz/tm@v0.34.24/mempool/v0/clist_mempool.go (about)

     1  package v0
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"sync"
     7  	"sync/atomic"
     8  
     9  	abci "github.com/vipernet-xyz/tm/abci/types"
    10  	"github.com/vipernet-xyz/tm/config"
    11  	"github.com/vipernet-xyz/tm/libs/clist"
    12  	"github.com/vipernet-xyz/tm/libs/log"
    13  	tmmath "github.com/vipernet-xyz/tm/libs/math"
    14  	tmsync "github.com/vipernet-xyz/tm/libs/sync"
    15  	"github.com/vipernet-xyz/tm/mempool"
    16  	"github.com/vipernet-xyz/tm/p2p"
    17  	"github.com/vipernet-xyz/tm/proxy"
    18  	"github.com/vipernet-xyz/tm/types"
    19  )
    20  
    21  // CListMempool is an ordered in-memory pool for transactions before they are
    22  // proposed in a consensus round. Transaction validity is checked using the
    23  // CheckTx abci message before the transaction is added to the pool. The
    24  // mempool uses a concurrent list structure for storing transactions that can
    25  // be efficiently accessed by multiple concurrent readers.
    26  type CListMempool struct {
    27  	// Atomic integers
    28  	height   int64 // the last block Update()'d to
    29  	txsBytes int64 // total size of mempool, in bytes
    30  
    31  	// notify listeners (ie. consensus) when txs are available
    32  	notifiedTxsAvailable bool
    33  	txsAvailable         chan struct{} // fires once for each height, when the mempool is not empty
    34  
    35  	config *config.MempoolConfig
    36  
    37  	// Exclusive mutex for Update method to prevent concurrent execution of
    38  	// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
    39  	updateMtx tmsync.RWMutex
    40  	preCheck  mempool.PreCheckFunc
    41  	postCheck mempool.PostCheckFunc
    42  
    43  	txs          *clist.CList // concurrent linked-list of good txs
    44  	proxyAppConn proxy.AppConnMempool
    45  
    46  	// Track whether we're rechecking txs.
    47  	// These are not protected by a mutex and are expected to be mutated in
    48  	// serial (ie. by abci responses which are called in serial).
    49  	recheckCursor *clist.CElement // next expected response
    50  	recheckEnd    *clist.CElement // re-checking stops here
    51  
    52  	// Map for quick access to txs to record sender in CheckTx.
    53  	// txsMap: txKey -> CElement
    54  	txsMap sync.Map
    55  
    56  	// Keep a cache of already-seen txs.
    57  	// This reduces the pressure on the proxyApp.
    58  	cache mempool.TxCache
    59  
    60  	logger  log.Logger
    61  	metrics *mempool.Metrics
    62  }
    63  
    64  var _ mempool.Mempool = &CListMempool{}
    65  
    66  // CListMempoolOption sets an optional parameter on the mempool.
    67  type CListMempoolOption func(*CListMempool)
    68  
    69  // NewCListMempool returns a new mempool with the given configuration and
    70  // connection to an application.
    71  func NewCListMempool(
    72  	cfg *config.MempoolConfig,
    73  	proxyAppConn proxy.AppConnMempool,
    74  	height int64,
    75  	options ...CListMempoolOption,
    76  ) *CListMempool {
    77  
    78  	mp := &CListMempool{
    79  		config:        cfg,
    80  		proxyAppConn:  proxyAppConn,
    81  		txs:           clist.New(),
    82  		height:        height,
    83  		recheckCursor: nil,
    84  		recheckEnd:    nil,
    85  		logger:        log.NewNopLogger(),
    86  		metrics:       mempool.NopMetrics(),
    87  	}
    88  
    89  	if cfg.CacheSize > 0 {
    90  		mp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
    91  	} else {
    92  		mp.cache = mempool.NopTxCache{}
    93  	}
    94  
    95  	proxyAppConn.SetResponseCallback(mp.globalCb)
    96  
    97  	for _, option := range options {
    98  		option(mp)
    99  	}
   100  
   101  	return mp
   102  }
   103  
   104  // NOTE: not thread safe - should only be called once, on startup
   105  func (mem *CListMempool) EnableTxsAvailable() {
   106  	mem.txsAvailable = make(chan struct{}, 1)
   107  }
   108  
   109  // SetLogger sets the Logger.
   110  func (mem *CListMempool) SetLogger(l log.Logger) {
   111  	mem.logger = l
   112  }
   113  
   114  // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
   115  // false. This is ran before CheckTx. Only applies to the first created block.
   116  // After that, Update overwrites the existing value.
   117  func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption {
   118  	return func(mem *CListMempool) { mem.preCheck = f }
   119  }
   120  
   121  // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
   122  // false. This is ran after CheckTx. Only applies to the first created block.
   123  // After that, Update overwrites the existing value.
   124  func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption {
   125  	return func(mem *CListMempool) { mem.postCheck = f }
   126  }
   127  
   128  // WithMetrics sets the metrics.
   129  func WithMetrics(metrics *mempool.Metrics) CListMempoolOption {
   130  	return func(mem *CListMempool) { mem.metrics = metrics }
   131  }
   132  
   133  // Safe for concurrent use by multiple goroutines.
   134  func (mem *CListMempool) Lock() {
   135  	mem.updateMtx.Lock()
   136  }
   137  
   138  // Safe for concurrent use by multiple goroutines.
   139  func (mem *CListMempool) Unlock() {
   140  	mem.updateMtx.Unlock()
   141  }
   142  
   143  // Safe for concurrent use by multiple goroutines.
   144  func (mem *CListMempool) Size() int {
   145  	return mem.txs.Len()
   146  }
   147  
   148  // Safe for concurrent use by multiple goroutines.
   149  func (mem *CListMempool) SizeBytes() int64 {
   150  	return atomic.LoadInt64(&mem.txsBytes)
   151  }
   152  
   153  // Lock() must be help by the caller during execution.
   154  func (mem *CListMempool) FlushAppConn() error {
   155  	return mem.proxyAppConn.FlushSync()
   156  }
   157  
   158  // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state.
   159  func (mem *CListMempool) Flush() {
   160  	mem.updateMtx.RLock()
   161  	defer mem.updateMtx.RUnlock()
   162  
   163  	_ = atomic.SwapInt64(&mem.txsBytes, 0)
   164  	mem.cache.Reset()
   165  
   166  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   167  		mem.txs.Remove(e)
   168  		e.DetachPrev()
   169  	}
   170  
   171  	mem.txsMap.Range(func(key, _ interface{}) bool {
   172  		mem.txsMap.Delete(key)
   173  		return true
   174  	})
   175  }
   176  
   177  // TxsFront returns the first transaction in the ordered list for peer
   178  // goroutines to call .NextWait() on.
   179  // FIXME: leaking implementation details!
   180  //
   181  // Safe for concurrent use by multiple goroutines.
   182  func (mem *CListMempool) TxsFront() *clist.CElement {
   183  	return mem.txs.Front()
   184  }
   185  
   186  // TxsWaitChan returns a channel to wait on transactions. It will be closed
   187  // once the mempool is not empty (ie. the internal `mem.txs` has at least one
   188  // element)
   189  //
   190  // Safe for concurrent use by multiple goroutines.
   191  func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
   192  	return mem.txs.WaitChan()
   193  }
   194  
   195  // It blocks if we're waiting on Update() or Reap().
   196  // cb: A callback from the CheckTx command.
   197  //
   198  //	It gets called from another goroutine.
   199  //
   200  // CONTRACT: Either cb will get called, or err returned.
   201  //
   202  // Safe for concurrent use by multiple goroutines.
   203  func (mem *CListMempool) CheckTx(
   204  	tx types.Tx,
   205  	cb func(*abci.Response),
   206  	txInfo mempool.TxInfo,
   207  ) error {
   208  
   209  	mem.updateMtx.RLock()
   210  	// use defer to unlock mutex because application (*local client*) might panic
   211  	defer mem.updateMtx.RUnlock()
   212  
   213  	txSize := len(tx)
   214  
   215  	if err := mem.isFull(txSize); err != nil {
   216  		return err
   217  	}
   218  
   219  	if txSize > mem.config.MaxTxBytes {
   220  		return mempool.ErrTxTooLarge{
   221  			Max:    mem.config.MaxTxBytes,
   222  			Actual: txSize,
   223  		}
   224  	}
   225  
   226  	if mem.preCheck != nil {
   227  		if err := mem.preCheck(tx); err != nil {
   228  			return mempool.ErrPreCheck{
   229  				Reason: err,
   230  			}
   231  		}
   232  	}
   233  
   234  	// NOTE: proxyAppConn may error if tx buffer is full
   235  	if err := mem.proxyAppConn.Error(); err != nil {
   236  		return err
   237  	}
   238  
   239  	if !mem.cache.Push(tx) { // if the transaction already exists in the cache
   240  		// Record a new sender for a tx we've already seen.
   241  		// Note it's possible a tx is still in the cache but no longer in the mempool
   242  		// (eg. after committing a block, txs are removed from mempool but not cache),
   243  		// so we only record the sender for txs still in the mempool.
   244  		if e, ok := mem.txsMap.Load(tx.Key()); ok {
   245  			memTx := e.(*clist.CElement).Value.(*mempoolTx)
   246  			memTx.senders.LoadOrStore(txInfo.SenderID, true)
   247  			// TODO: consider punishing peer for dups,
   248  			// its non-trivial since invalid txs can become valid,
   249  			// but they can spam the same tx with little cost to them atm.
   250  		}
   251  		return mempool.ErrTxInCache
   252  	}
   253  
   254  	reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
   255  	reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb))
   256  
   257  	return nil
   258  }
   259  
   260  // Global callback that will be called after every ABCI response.
   261  // Having a single global callback avoids needing to set a callback for each request.
   262  // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
   263  // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
   264  // include this information. If we're not in the midst of a recheck, this function will just return,
   265  // so the request specific callback can do the work.
   266  //
   267  // When rechecking, we don't need the peerID, so the recheck callback happens
   268  // here.
   269  func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
   270  	if mem.recheckCursor == nil {
   271  		return
   272  	}
   273  
   274  	mem.metrics.RecheckTimes.Add(1)
   275  	mem.resCbRecheck(req, res)
   276  
   277  	// update metrics
   278  	mem.metrics.Size.Set(float64(mem.Size()))
   279  }
   280  
   281  // Request specific callback that should be set on individual reqRes objects
   282  // to incorporate local information when processing the response.
   283  // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
   284  // NOTE: alternatively, we could include this information in the ABCI request itself.
   285  //
   286  // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
   287  // when all other response processing is complete.
   288  //
   289  // Used in CheckTx to record PeerID who sent us the tx.
   290  func (mem *CListMempool) reqResCb(
   291  	tx []byte,
   292  	peerID uint16,
   293  	peerP2PID p2p.ID,
   294  	externalCb func(*abci.Response),
   295  ) func(res *abci.Response) {
   296  	return func(res *abci.Response) {
   297  		if mem.recheckCursor != nil {
   298  			// this should never happen
   299  			panic("recheck cursor is not nil in reqResCb")
   300  		}
   301  
   302  		mem.resCbFirstTime(tx, peerID, peerP2PID, res)
   303  
   304  		// update metrics
   305  		mem.metrics.Size.Set(float64(mem.Size()))
   306  
   307  		// passed in by the caller of CheckTx, eg. the RPC
   308  		if externalCb != nil {
   309  			externalCb(res)
   310  		}
   311  	}
   312  }
   313  
   314  // Called from:
   315  //   - resCbFirstTime (lock not held) if tx is valid
   316  func (mem *CListMempool) addTx(memTx *mempoolTx) {
   317  	e := mem.txs.PushBack(memTx)
   318  	mem.txsMap.Store(memTx.tx.Key(), e)
   319  	atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
   320  	mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
   321  }
   322  
   323  // Called from:
   324  //   - Update (lock held) if tx was committed
   325  //   - resCbRecheck (lock not held) if tx was invalidated
   326  func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
   327  	mem.txs.Remove(elem)
   328  	elem.DetachPrev()
   329  	mem.txsMap.Delete(tx.Key())
   330  	atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   331  
   332  	if removeFromCache {
   333  		mem.cache.Remove(tx)
   334  	}
   335  }
   336  
   337  // RemoveTxByKey removes a transaction from the mempool by its TxKey index.
   338  func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error {
   339  	if e, ok := mem.txsMap.Load(txKey); ok {
   340  		memTx := e.(*clist.CElement).Value.(*mempoolTx)
   341  		if memTx != nil {
   342  			mem.removeTx(memTx.tx, e.(*clist.CElement), false)
   343  			return nil
   344  		}
   345  		return errors.New("transaction not found")
   346  	}
   347  	return errors.New("invalid transaction found")
   348  }
   349  
   350  func (mem *CListMempool) isFull(txSize int) error {
   351  	var (
   352  		memSize  = mem.Size()
   353  		txsBytes = mem.SizeBytes()
   354  	)
   355  
   356  	if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes {
   357  		return mempool.ErrMempoolIsFull{
   358  			NumTxs:      memSize,
   359  			MaxTxs:      mem.config.Size,
   360  			TxsBytes:    txsBytes,
   361  			MaxTxsBytes: mem.config.MaxTxsBytes,
   362  		}
   363  	}
   364  
   365  	return nil
   366  }
   367  
   368  // callback, which is called after the app checked the tx for the first time.
   369  //
   370  // The case where the app checks the tx for the second and subsequent times is
   371  // handled by the resCbRecheck callback.
   372  func (mem *CListMempool) resCbFirstTime(
   373  	tx []byte,
   374  	peerID uint16,
   375  	peerP2PID p2p.ID,
   376  	res *abci.Response,
   377  ) {
   378  	switch r := res.Value.(type) {
   379  	case *abci.Response_CheckTx:
   380  		var postCheckErr error
   381  		if mem.postCheck != nil {
   382  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   383  		}
   384  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   385  			// Check mempool isn't full again to reduce the chance of exceeding the
   386  			// limits.
   387  			if err := mem.isFull(len(tx)); err != nil {
   388  				// remove from cache (mempool might have a space later)
   389  				mem.cache.Remove(tx)
   390  				mem.logger.Error(err.Error())
   391  				return
   392  			}
   393  
   394  			memTx := &mempoolTx{
   395  				height:    mem.height,
   396  				gasWanted: r.CheckTx.GasWanted,
   397  				tx:        tx,
   398  			}
   399  			memTx.senders.Store(peerID, true)
   400  			mem.addTx(memTx)
   401  			mem.logger.Debug(
   402  				"added good transaction",
   403  				"tx", types.Tx(tx).Hash(),
   404  				"res", r,
   405  				"height", memTx.height,
   406  				"total", mem.Size(),
   407  			)
   408  			mem.notifyTxsAvailable()
   409  		} else {
   410  			// ignore bad transaction
   411  			mem.logger.Debug(
   412  				"rejected bad transaction",
   413  				"tx", types.Tx(tx).Hash(),
   414  				"peerID", peerP2PID,
   415  				"res", r,
   416  				"err", postCheckErr,
   417  			)
   418  			mem.metrics.FailedTxs.Add(1)
   419  
   420  			if !mem.config.KeepInvalidTxsInCache {
   421  				// remove from cache (it might be good later)
   422  				mem.cache.Remove(tx)
   423  			}
   424  		}
   425  
   426  	default:
   427  		// ignore other messages
   428  	}
   429  }
   430  
   431  // callback, which is called after the app rechecked the tx.
   432  //
   433  // The case where the app checks the tx for the first time is handled by the
   434  // resCbFirstTime callback.
   435  func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
   436  	switch r := res.Value.(type) {
   437  	case *abci.Response_CheckTx:
   438  		tx := req.GetCheckTx().Tx
   439  		memTx := mem.recheckCursor.Value.(*mempoolTx)
   440  
   441  		// Search through the remaining list of tx to recheck for a transaction that matches
   442  		// the one we received from the ABCI application.
   443  		for {
   444  			if bytes.Equal(tx, memTx.tx) {
   445  				// We've found a tx in the recheck list that matches the tx that we
   446  				// received from the ABCI application.
   447  				// Break, and use this transaction for further checks.
   448  				break
   449  			}
   450  
   451  			mem.logger.Error(
   452  				"re-CheckTx transaction mismatch",
   453  				"got", types.Tx(tx),
   454  				"expected", memTx.tx,
   455  			)
   456  
   457  			if mem.recheckCursor == mem.recheckEnd {
   458  				// we reached the end of the recheckTx list without finding a tx
   459  				// matching the one we received from the ABCI application.
   460  				// Return without processing any tx.
   461  				mem.recheckCursor = nil
   462  				return
   463  			}
   464  
   465  			mem.recheckCursor = mem.recheckCursor.Next()
   466  			memTx = mem.recheckCursor.Value.(*mempoolTx)
   467  		}
   468  
   469  		var postCheckErr error
   470  		if mem.postCheck != nil {
   471  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   472  		}
   473  
   474  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   475  			// Good, nothing to do.
   476  		} else {
   477  			// Tx became invalidated due to newly committed block.
   478  			mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr)
   479  			// NOTE: we remove tx from the cache because it might be good later
   480  			mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache)
   481  		}
   482  		if mem.recheckCursor == mem.recheckEnd {
   483  			mem.recheckCursor = nil
   484  		} else {
   485  			mem.recheckCursor = mem.recheckCursor.Next()
   486  		}
   487  		if mem.recheckCursor == nil {
   488  			// Done!
   489  			mem.logger.Debug("done rechecking txs")
   490  
   491  			// incase the recheck removed all txs
   492  			if mem.Size() > 0 {
   493  				mem.notifyTxsAvailable()
   494  			}
   495  		}
   496  	default:
   497  		// ignore other messages
   498  	}
   499  }
   500  
   501  // Safe for concurrent use by multiple goroutines.
   502  func (mem *CListMempool) TxsAvailable() <-chan struct{} {
   503  	return mem.txsAvailable
   504  }
   505  
   506  func (mem *CListMempool) notifyTxsAvailable() {
   507  	if mem.Size() == 0 {
   508  		panic("notified txs available but mempool is empty!")
   509  	}
   510  	if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
   511  		// channel cap is 1, so this will send once
   512  		mem.notifiedTxsAvailable = true
   513  		select {
   514  		case mem.txsAvailable <- struct{}{}:
   515  		default:
   516  		}
   517  	}
   518  }
   519  
   520  // Safe for concurrent use by multiple goroutines.
   521  func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
   522  	mem.updateMtx.RLock()
   523  	defer mem.updateMtx.RUnlock()
   524  
   525  	var (
   526  		totalGas    int64
   527  		runningSize int64
   528  	)
   529  
   530  	// TODO: we will get a performance boost if we have a good estimate of avg
   531  	// size per tx, and set the initial capacity based off of that.
   532  	// txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize))
   533  	txs := make([]types.Tx, 0, mem.txs.Len())
   534  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   535  		memTx := e.Value.(*mempoolTx)
   536  
   537  		txs = append(txs, memTx.tx)
   538  
   539  		dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx})
   540  
   541  		// Check total size requirement
   542  		if maxBytes > -1 && runningSize+dataSize > maxBytes {
   543  			return txs[:len(txs)-1]
   544  		}
   545  
   546  		runningSize += dataSize
   547  
   548  		// Check total gas requirement.
   549  		// If maxGas is negative, skip this check.
   550  		// Since newTotalGas < masGas, which
   551  		// must be non-negative, it follows that this won't overflow.
   552  		newTotalGas := totalGas + memTx.gasWanted
   553  		if maxGas > -1 && newTotalGas > maxGas {
   554  			return txs[:len(txs)-1]
   555  		}
   556  		totalGas = newTotalGas
   557  	}
   558  	return txs
   559  }
   560  
   561  // Safe for concurrent use by multiple goroutines.
   562  func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
   563  	mem.updateMtx.RLock()
   564  	defer mem.updateMtx.RUnlock()
   565  
   566  	if max < 0 {
   567  		max = mem.txs.Len()
   568  	}
   569  
   570  	txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max))
   571  	for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
   572  		memTx := e.Value.(*mempoolTx)
   573  		txs = append(txs, memTx.tx)
   574  	}
   575  	return txs
   576  }
   577  
   578  // Lock() must be help by the caller during execution.
   579  func (mem *CListMempool) Update(
   580  	height int64,
   581  	txs types.Txs,
   582  	deliverTxResponses []*abci.ResponseDeliverTx,
   583  	preCheck mempool.PreCheckFunc,
   584  	postCheck mempool.PostCheckFunc,
   585  ) error {
   586  	// Set height
   587  	mem.height = height
   588  	mem.notifiedTxsAvailable = false
   589  
   590  	if preCheck != nil {
   591  		mem.preCheck = preCheck
   592  	}
   593  	if postCheck != nil {
   594  		mem.postCheck = postCheck
   595  	}
   596  
   597  	for i, tx := range txs {
   598  		if deliverTxResponses[i].Code == abci.CodeTypeOK {
   599  			// Add valid committed tx to the cache (if missing).
   600  			_ = mem.cache.Push(tx)
   601  		} else if !mem.config.KeepInvalidTxsInCache {
   602  			// Allow invalid transactions to be resubmitted.
   603  			mem.cache.Remove(tx)
   604  		}
   605  
   606  		// Remove committed tx from the mempool.
   607  		//
   608  		// Note an evil proposer can drop valid txs!
   609  		// Mempool before:
   610  		//   100 -> 101 -> 102
   611  		// Block, proposed by an evil proposer:
   612  		//   101 -> 102
   613  		// Mempool after:
   614  		//   100
   615  		// https://github.com/vipernet-xyz/tm/issues/3322.
   616  		if e, ok := mem.txsMap.Load(tx.Key()); ok {
   617  			mem.removeTx(tx, e.(*clist.CElement), false)
   618  		}
   619  	}
   620  
   621  	// Either recheck non-committed txs to see if they became invalid
   622  	// or just notify there're some txs left.
   623  	if mem.Size() > 0 {
   624  		if mem.config.Recheck {
   625  			mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height)
   626  			mem.recheckTxs()
   627  			// At this point, mem.txs are being rechecked.
   628  			// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
   629  			// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
   630  		} else {
   631  			mem.notifyTxsAvailable()
   632  		}
   633  	}
   634  
   635  	// Update metrics
   636  	mem.metrics.Size.Set(float64(mem.Size()))
   637  
   638  	return nil
   639  }
   640  
   641  func (mem *CListMempool) recheckTxs() {
   642  	if mem.Size() == 0 {
   643  		panic("recheckTxs is called, but the mempool is empty")
   644  	}
   645  
   646  	mem.recheckCursor = mem.txs.Front()
   647  	mem.recheckEnd = mem.txs.Back()
   648  
   649  	// Push txs to proxyAppConn
   650  	// NOTE: globalCb may be called concurrently.
   651  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   652  		memTx := e.Value.(*mempoolTx)
   653  		mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{
   654  			Tx:   memTx.tx,
   655  			Type: abci.CheckTxType_Recheck,
   656  		})
   657  	}
   658  
   659  	mem.proxyAppConn.FlushAsync()
   660  }
   661  
   662  //--------------------------------------------------------------------------------
   663  
   664  // mempoolTx is a transaction that successfully ran
   665  type mempoolTx struct {
   666  	height    int64    // height that this tx had been validated in
   667  	gasWanted int64    // amount of gas this tx states it will require
   668  	tx        types.Tx //
   669  
   670  	// ids of peers who've sent us this tx (as a map for quick lookups).
   671  	// senders: PeerID -> bool
   672  	senders sync.Map
   673  }
   674  
   675  // Height returns the height for this transaction
   676  func (memTx *mempoolTx) Height() int64 {
   677  	return atomic.LoadInt64(&memTx.height)
   678  }