github.com/Finschia/ostracon@v1.1.5/mempool/v0/clist_mempool.go (about)

     1  package v0
     2  
     3  import (
     4  	"errors"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  
     9  	"github.com/Finschia/ostracon/config"
    10  	"github.com/Finschia/ostracon/mempool"
    11  
    12  	abci "github.com/tendermint/tendermint/abci/types"
    13  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    14  
    15  	ocabci "github.com/Finschia/ostracon/abci/types"
    16  	"github.com/Finschia/ostracon/libs/clist"
    17  	"github.com/Finschia/ostracon/libs/log"
    18  	tmmath "github.com/Finschia/ostracon/libs/math"
    19  	tmsync "github.com/Finschia/ostracon/libs/sync"
    20  	"github.com/Finschia/ostracon/p2p"
    21  	"github.com/Finschia/ostracon/proxy"
    22  	"github.com/Finschia/ostracon/types"
    23  )
    24  
    25  // CListMempool is an ordered in-memory pool for transactions before they are
    26  // proposed in a consensus round. Transaction validity is checked using the
    27  // CheckTx abci message before the transaction is added to the pool. The
    28  // mempool uses a concurrent list structure for storing transactions that can
    29  // be efficiently accessed by multiple concurrent readers.
    30  type CListMempool struct {
    31  	// Atomic integers
    32  	height   int64 // the last block Update()'d to
    33  	txsBytes int64 // total size of mempool, in bytes
    34  
    35  	reserved      int   // the number of checking tx and it should be considered when checking mempool full
    36  	reservedBytes int64 // size of checking tx and it should be considered when checking mempool full
    37  	reservedMtx   sync.Mutex
    38  
    39  	// notify listeners (ie. consensus) when txs are available
    40  	notifiedTxsAvailable bool
    41  	txsAvailable         chan struct{} // fires once for each height, when the mempool is not empty
    42  
    43  	config *config.MempoolConfig
    44  
    45  	// Exclusive mutex for Update method to prevent concurrent execution of
    46  	// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
    47  	updateMtx tmsync.RWMutex
    48  	preCheck  mempool.PreCheckFunc
    49  	postCheck mempool.PostCheckFunc
    50  
    51  	chReqCheckTx chan *requestCheckTxAsync
    52  
    53  	txs          *clist.CList // concurrent linked-list of good txs
    54  	proxyAppConn proxy.AppConnMempool
    55  
    56  	// Map for quick access to txs to record sender in CheckTx.
    57  	// txsMap: txKey -> CElement
    58  	txsMap sync.Map
    59  
    60  	// Keep a cache of already-seen txs.
    61  	// This reduces the pressure on the proxyApp.
    62  	cache mempool.TxCache
    63  
    64  	logger  log.Logger
    65  	metrics *mempool.Metrics
    66  }
    67  
    68  type requestCheckTxAsync struct {
    69  	tx        types.Tx
    70  	txInfo    mempool.TxInfo
    71  	prepareCb func(error)
    72  	checkTxCb func(*ocabci.Response)
    73  }
    74  
    75  var _ mempool.Mempool = &CListMempool{}
    76  
    77  // CListMempoolOption sets an optional parameter on the mempool.
    78  type CListMempoolOption func(*CListMempool)
    79  
    80  // NewCListMempool returns a new mempool with the given configuration and
    81  // connection to an application.
    82  func NewCListMempool(
    83  	cfg *config.MempoolConfig,
    84  	proxyAppConn proxy.AppConnMempool,
    85  	height int64,
    86  	options ...CListMempoolOption,
    87  ) *CListMempool {
    88  	mp := &CListMempool{
    89  		config:       cfg,
    90  		proxyAppConn: proxyAppConn,
    91  		txs:          clist.New(),
    92  		height:       height,
    93  		chReqCheckTx: make(chan *requestCheckTxAsync, cfg.Size),
    94  		logger:       log.NewNopLogger(),
    95  		metrics:      mempool.NopMetrics(),
    96  	}
    97  
    98  	if cfg.CacheSize > 0 {
    99  		mp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
   100  	} else {
   101  		mp.cache = mempool.NopTxCache{}
   102  	}
   103  	proxyAppConn.SetGlobalCallback(mp.globalCb)
   104  
   105  	for _, option := range options {
   106  		option(mp)
   107  	}
   108  	go mp.checkTxAsyncReactor()
   109  	return mp
   110  }
   111  
   112  // NOTE: not thread safe - should only be called once, on startup
   113  func (mem *CListMempool) EnableTxsAvailable() {
   114  	mem.txsAvailable = make(chan struct{}, 1)
   115  }
   116  
   117  // SetLogger sets the Logger.
   118  func (mem *CListMempool) SetLogger(l log.Logger) {
   119  	mem.logger = l
   120  }
   121  
   122  // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
   123  // false. This is ran before CheckTx. Only applies to the first created block.
   124  // After that, Update overwrites the existing value.
   125  func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption {
   126  	return func(mem *CListMempool) { mem.preCheck = f }
   127  }
   128  
   129  // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
   130  // false. This is ran after CheckTx. Only applies to the first created block.
   131  // After that, Update overwrites the existing value.
   132  func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption {
   133  	return func(mem *CListMempool) { mem.postCheck = f }
   134  }
   135  
   136  // WithMetrics sets the metrics.
   137  func WithMetrics(metrics *mempool.Metrics) CListMempoolOption {
   138  	return func(mem *CListMempool) { mem.metrics = metrics }
   139  }
   140  
   141  // Safe for concurrent use by multiple goroutines.
   142  func (mem *CListMempool) Lock() {
   143  	mem.updateMtx.Lock()
   144  }
   145  
   146  // Safe for concurrent use by multiple goroutines.
   147  func (mem *CListMempool) Unlock() {
   148  	mem.updateMtx.Unlock()
   149  }
   150  
   151  // Safe for concurrent use by multiple goroutines.
   152  func (mem *CListMempool) Size() int {
   153  	return mem.txs.Len()
   154  }
   155  
   156  // Safe for concurrent use by multiple goroutines.
   157  func (mem *CListMempool) SizeBytes() int64 {
   158  	return atomic.LoadInt64(&mem.txsBytes)
   159  }
   160  
   161  // Lock() must be help by the caller during execution.
   162  func (mem *CListMempool) FlushAppConn() error {
   163  	_, err := mem.proxyAppConn.FlushSync()
   164  	return err
   165  }
   166  
   167  // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state.
   168  func (mem *CListMempool) Flush() {
   169  	mem.updateMtx.Lock()
   170  	defer mem.updateMtx.Unlock()
   171  
   172  	_ = atomic.SwapInt64(&mem.txsBytes, 0)
   173  	mem.cache.Reset()
   174  
   175  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   176  		mem.txs.Remove(e)
   177  		e.DetachPrev()
   178  	}
   179  
   180  	mem.txsMap.Range(func(key, _ interface{}) bool {
   181  		mem.txsMap.Delete(key)
   182  		return true
   183  	})
   184  }
   185  
   186  // TxsFront returns the first transaction in the ordered list for peer
   187  // goroutines to call .NextWait() on.
   188  // FIXME: leaking implementation details!
   189  //
   190  // Safe for concurrent use by multiple goroutines.
   191  func (mem *CListMempool) TxsFront() *clist.CElement {
   192  	return mem.txs.Front()
   193  }
   194  
   195  // TxsWaitChan returns a channel to wait on transactions. It will be closed
   196  // once the mempool is not empty (ie. the internal `mem.txs` has at least one
   197  // element)
   198  //
   199  // Safe for concurrent use by multiple goroutines.
   200  func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
   201  	return mem.txs.WaitChan()
   202  }
   203  
   204  // CheckTxSync : It blocks if we're waiting on Update() or Reap().
   205  // cb: A callback from the CheckTxSync command.
   206  //
   207  //	It gets called from another goroutine.
   208  //
   209  // CONTRACT: Either cb will get called, or err returned.
   210  //
   211  // Safe for concurrent use by multiple goroutines.
   212  func (mem *CListMempool) CheckTxSync(
   213  	tx types.Tx,
   214  	cb func(*ocabci.Response),
   215  	txInfo mempool.TxInfo,
   216  ) error {
   217  
   218  	mem.updateMtx.RLock()
   219  	// use defer to unlock mutex because application (*local client*) might panic
   220  	defer mem.updateMtx.RUnlock()
   221  
   222  	if err := mem.prepareCheckTx(tx, txInfo); err != nil {
   223  		return err
   224  	}
   225  
   226  	// CONTRACT: `app.CheckTxSync()` should check whether `GasWanted` is valid (0 <= GasWanted <= block.masGas)
   227  	var r *ocabci.ResponseCheckTx
   228  	r, err := mem.proxyAppConn.CheckTxSync(abci.RequestCheckTx{Tx: tx})
   229  	if err != nil {
   230  		return err
   231  	}
   232  
   233  	res := ocabci.ToResponseCheckTx(*r)
   234  	mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, res, cb)
   235  	return err
   236  }
   237  
   238  // cb: A callback from the CheckTx command.
   239  //
   240  //	It gets called from another goroutine.
   241  //
   242  // Safe for concurrent use by multiple goroutines.
   243  func (mem *CListMempool) CheckTxAsync(
   244  	tx types.Tx,
   245  	txInfo mempool.TxInfo,
   246  	prepareCb func(error),
   247  	checkTxCb func(*ocabci.Response),
   248  ) {
   249  	mem.chReqCheckTx <- &requestCheckTxAsync{tx: tx, txInfo: txInfo, prepareCb: prepareCb, checkTxCb: checkTxCb}
   250  }
   251  
   252  func (mem *CListMempool) checkTxAsyncReactor() {
   253  	for req := range mem.chReqCheckTx {
   254  		mem.checkTxAsync(req.tx, req.txInfo, req.prepareCb, req.checkTxCb)
   255  	}
   256  }
   257  
   258  // It blocks if we're waiting on Update() or Reap().
   259  func (mem *CListMempool) checkTxAsync(
   260  	tx types.Tx,
   261  	txInfo mempool.TxInfo,
   262  	prepareCb func(error),
   263  	checkTxCb func(*ocabci.Response),
   264  ) {
   265  	mem.updateMtx.RLock()
   266  	defer func() {
   267  		if r := recover(); r != nil {
   268  			mem.updateMtx.RUnlock()
   269  			panic(r)
   270  		}
   271  	}()
   272  
   273  	err := mem.prepareCheckTx(tx, txInfo)
   274  	if prepareCb != nil {
   275  		prepareCb(err)
   276  	}
   277  	if err != nil {
   278  		mem.updateMtx.RUnlock()
   279  		return
   280  	}
   281  
   282  	// CONTRACT: `app.CheckTxAsync()` should check whether `GasWanted` is valid (0 <= GasWanted <= block.masGas)
   283  	mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}, func(res *ocabci.Response) {
   284  		mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, res, func(response *ocabci.Response) {
   285  			if checkTxCb != nil {
   286  				checkTxCb(response)
   287  			}
   288  			mem.updateMtx.RUnlock()
   289  		})
   290  	})
   291  }
   292  
   293  // CONTRACT: `caller` should held `mem.updateMtx.RLock()`
   294  func (mem *CListMempool) prepareCheckTx(tx types.Tx, txInfo mempool.TxInfo) error {
   295  	// For keeping the consistency between `mem.txs` and `mem.txsMap`
   296  	if _, ok := mem.txsMap.Load(tx.Key()); ok {
   297  		return mempool.ErrTxInMap
   298  	}
   299  
   300  	txSize := len(tx)
   301  
   302  	if err := mem.isFull(txSize); err != nil {
   303  		return err
   304  	}
   305  
   306  	if txSize > mem.config.MaxTxBytes {
   307  		return mempool.ErrTxTooLarge{
   308  			Max:    mem.config.MaxTxBytes,
   309  			Actual: txSize,
   310  		}
   311  	}
   312  
   313  	if mem.preCheck != nil {
   314  		if err := mem.preCheck(tx); err != nil {
   315  			return mempool.ErrPreCheck{
   316  				Reason: err,
   317  			}
   318  		}
   319  	}
   320  
   321  	// NOTE: proxyAppConn may error if tx buffer is full
   322  	if err := mem.proxyAppConn.Error(); err != nil {
   323  		return err
   324  	}
   325  
   326  	if !mem.cache.Push(tx) { // if the transaction already exists in the cache
   327  		// Record a new sender for a tx we've already seen.
   328  		// Note it's possible a tx is still in the cache but no longer in the mempool
   329  		// (eg. after committing a block, txs are removed from mempool but not cache),
   330  		// so we only record the sender for txs still in the mempool.
   331  		if e, ok := mem.txsMap.Load(tx.Key()); ok {
   332  			memTx := e.(*clist.CElement).Value.(*mempoolTx)
   333  			memTx.senders.LoadOrStore(txInfo.SenderID, true)
   334  			// TODO: consider punishing peer for dups,
   335  			// its non-trivial since invalid txs can become valid,
   336  			// but they can spam the same tx with little cost to them atm.
   337  		}
   338  		return mempool.ErrTxInCache
   339  	}
   340  
   341  	// reserve mempool that should be called just before calling `mem.proxyAppConn.CheckTxAsync()`
   342  	if err := mem.reserve(int64(txSize)); err != nil {
   343  		// remove from cache
   344  		mem.cache.Remove(tx)
   345  		return err
   346  	}
   347  
   348  	return nil
   349  }
   350  
   351  // Global callback that will be called after every ABCI response.
   352  // Having a single global callback avoids needing to set a callback for each request.
   353  // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
   354  // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
   355  // include this information. If we're not in the midst of a recheck, this function will just return,
   356  // so the request specific callback can do the work.
   357  //
   358  // When rechecking, we don't need the peerID, so the recheck callback happens
   359  // here.
   360  func (mem *CListMempool) globalCb(req *ocabci.Request, res *ocabci.Response) {
   361  	checkTxReq := req.GetCheckTx()
   362  	if checkTxReq == nil {
   363  		return
   364  	}
   365  
   366  	if checkTxReq.Type == abci.CheckTxType_Recheck {
   367  		mem.metrics.RecheckTimes.Add(1)
   368  		mem.resCbRecheck(req, res)
   369  
   370  		// update metrics
   371  		mem.metrics.Size.Set(float64(mem.Size()))
   372  	}
   373  }
   374  
   375  // Request specific callback that should be set on individual reqRes objects
   376  // to incorporate local information when processing the response.
   377  // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
   378  // NOTE: alternatively, we could include this information in the ABCI request itself.
   379  //
   380  // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
   381  // when all other response processing is complete.
   382  //
   383  // Used in CheckTx to record PeerID who sent us the tx.
   384  func (mem *CListMempool) reqResCb(
   385  	tx []byte,
   386  	peerID uint16,
   387  	peerP2PID p2p.ID,
   388  	res *ocabci.Response,
   389  	externalCb func(*ocabci.Response),
   390  ) {
   391  	mem.resCbFirstTime(tx, peerID, peerP2PID, res)
   392  
   393  	// update metrics
   394  	mem.metrics.Size.Set(float64(mem.Size()))
   395  
   396  	// passed in by the caller of CheckTx, eg. the RPC
   397  	if externalCb != nil {
   398  		externalCb(res)
   399  	}
   400  }
   401  
   402  // Called from:
   403  //   - resCbFirstTime (lock not held) if tx is valid
   404  func (mem *CListMempool) addTx(memTx *mempoolTx) {
   405  	e := mem.txs.PushBack(memTx)
   406  	mem.txsMap.Store(memTx.tx.Key(), e)
   407  	atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
   408  	mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
   409  }
   410  
   411  // Called from:
   412  //   - Update (lock held) if tx was committed
   413  //   - resCbRecheck (lock not held) if tx was invalidated
   414  func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
   415  	mem.txs.Remove(elem)
   416  	elem.DetachPrev()
   417  	mem.txsMap.Delete(tx.Key())
   418  	atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   419  
   420  	if removeFromCache {
   421  		mem.cache.Remove(tx)
   422  	}
   423  }
   424  
   425  // RemoveTxByKey removes a transaction from the mempool by its TxKey index.
   426  func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error {
   427  	if e, ok := mem.txsMap.Load(txKey); ok {
   428  		memTx := e.(*clist.CElement).Value.(*mempoolTx)
   429  		if memTx != nil {
   430  			mem.removeTx(memTx.tx, e.(*clist.CElement), false)
   431  			return nil
   432  		}
   433  		return errors.New("transaction not found")
   434  	}
   435  	return errors.New("invalid transaction found")
   436  }
   437  
   438  func (mem *CListMempool) isFull(txSize int) error {
   439  	var (
   440  		memSize  = mem.Size()
   441  		txsBytes = mem.SizeBytes()
   442  	)
   443  
   444  	if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes {
   445  		return mempool.ErrMempoolIsFull{
   446  			NumTxs:      memSize,
   447  			MaxTxs:      mem.config.Size,
   448  			TxsBytes:    txsBytes,
   449  			MaxTxsBytes: mem.config.MaxTxsBytes,
   450  		}
   451  	}
   452  
   453  	return nil
   454  }
   455  
   456  func (mem *CListMempool) reserve(txSize int64) error {
   457  	mem.reservedMtx.Lock()
   458  	defer mem.reservedMtx.Unlock()
   459  
   460  	var (
   461  		memSize  = mem.Size()
   462  		txsBytes = mem.SizeBytes()
   463  	)
   464  
   465  	if memSize+mem.reserved >= mem.config.Size || txSize+mem.reservedBytes+txsBytes > mem.config.MaxTxsBytes {
   466  		return mempool.ErrMempoolIsFull{
   467  			NumTxs:      memSize + mem.reserved,
   468  			MaxTxs:      mem.config.Size,
   469  			TxsBytes:    txsBytes + mem.reservedBytes,
   470  			MaxTxsBytes: mem.config.MaxTxsBytes,
   471  		}
   472  	}
   473  
   474  	mem.reserved++
   475  	mem.reservedBytes += txSize
   476  	return nil
   477  }
   478  
   479  func (mem *CListMempool) releaseReserve(txSize int64) {
   480  	mem.reservedMtx.Lock()
   481  	defer mem.reservedMtx.Unlock()
   482  
   483  	mem.reserved--
   484  	mem.reservedBytes -= txSize
   485  }
   486  
   487  // callback, which is called after the app checked the tx for the first time.
   488  //
   489  // The case where the app checks the tx for the second and subsequent times is
   490  // handled by the resCbRecheck callback.
   491  func (mem *CListMempool) resCbFirstTime(
   492  	tx []byte,
   493  	peerID uint16,
   494  	peerP2PID p2p.ID,
   495  	res *ocabci.Response,
   496  ) {
   497  	switch r := res.Value.(type) {
   498  	case *ocabci.Response_CheckTx:
   499  		if r.CheckTx.Code == ocabci.CodeTypeOK {
   500  			memTx := &mempoolTx{
   501  				height:    mem.height,
   502  				gasWanted: r.CheckTx.GasWanted,
   503  				tx:        tx,
   504  			}
   505  			memTx.senders.Store(peerID, true)
   506  			mem.addTx(memTx)
   507  			mem.logger.Debug(
   508  				"added good transaction",
   509  				"tx", types.Tx(tx).Hash(),
   510  				"res", r,
   511  				"height", memTx.height,
   512  				"total", mem.Size(),
   513  			)
   514  			mem.notifyTxsAvailable()
   515  		} else {
   516  			// ignore bad transaction
   517  			mem.logger.Debug(
   518  				"rejected bad transaction",
   519  				"tx", types.Tx(tx).Hash(),
   520  				"peerID", peerP2PID,
   521  				"res", r,
   522  			)
   523  			mem.metrics.FailedTxs.Add(1)
   524  
   525  			if !mem.config.KeepInvalidTxsInCache {
   526  				// remove from cache (it might be good later)
   527  				mem.cache.Remove(tx)
   528  			}
   529  		}
   530  
   531  		// release `reserve` regardless it's OK or not (it might be good later)
   532  		mem.releaseReserve(int64(len(tx)))
   533  	default:
   534  		// ignore other messages
   535  	}
   536  }
   537  
   538  // callback, which is called after the app rechecked the tx.
   539  //
   540  // The case where the app checks the tx for the first time is handled by the
   541  // resCbFirstTime callback.
   542  func (mem *CListMempool) resCbRecheck(req *ocabci.Request, res *ocabci.Response) {
   543  	switch r := res.Value.(type) {
   544  	case *ocabci.Response_CheckTx:
   545  		tx := req.GetCheckTx().Tx
   546  		e, ok := mem.txsMap.Load(types.Tx(tx).Key())
   547  		if !ok {
   548  			mem.logger.Debug("re-CheckTx transaction does not exist", "expected", types.Tx(tx).Hash())
   549  			return
   550  		}
   551  
   552  		var postCheckErr error
   553  		if r.CheckTx.Code == ocabci.CodeTypeOK {
   554  			if mem.postCheck == nil {
   555  				return
   556  			}
   557  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   558  			if postCheckErr == nil {
   559  				return
   560  			}
   561  			r.CheckTx.MempoolError = postCheckErr.Error()
   562  		}
   563  		celem := e.(*clist.CElement)
   564  		// Tx became invalidated due to newly committed block.
   565  		mem.logger.Debug("tx is no longer valid",
   566  			"tx", types.Tx(tx).Hash(),
   567  			"res", r,
   568  			"err", postCheckErr,
   569  		)
   570  		// NOTE: we remove tx from the cache because it might be good later
   571  		mem.removeTx(tx, celem, !mem.config.KeepInvalidTxsInCache)
   572  	default:
   573  		// ignore other messages
   574  	}
   575  }
   576  
   577  // Safe for concurrent use by multiple goroutines.
   578  func (mem *CListMempool) TxsAvailable() <-chan struct{} {
   579  	return mem.txsAvailable
   580  }
   581  
   582  func (mem *CListMempool) notifyTxsAvailable() {
   583  	if mem.Size() == 0 {
   584  		mem.logger.Info("notified txs available but mempool is empty!")
   585  	}
   586  	if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
   587  		// channel cap is 1, so this will send once
   588  		mem.notifiedTxsAvailable = true
   589  		select {
   590  		case mem.txsAvailable <- struct{}{}:
   591  		default:
   592  		}
   593  	}
   594  }
   595  
   596  // Safe for concurrent use by multiple goroutines.
   597  func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
   598  	mem.updateMtx.RLock()
   599  	defer mem.updateMtx.RUnlock()
   600  
   601  	var (
   602  		totalGas    int64
   603  		runningSize int64
   604  	)
   605  
   606  	// TODO: we will get a performance boost if we have a good estimate of avg
   607  	// size per tx, and set the initial capacity based off of that.
   608  	// txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize))
   609  	txs := make([]types.Tx, 0, mem.txs.Len())
   610  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   611  		memTx := e.Value.(*mempoolTx)
   612  
   613  		txs = append(txs, memTx.tx)
   614  
   615  		protoTxs := tmproto.Data{}
   616  		protoTxs.Txs = append(protoTxs.Txs, memTx.tx)
   617  		dataSize := int64(protoTxs.Size())
   618  
   619  		// Check total size requirement
   620  		if maxBytes > -1 && runningSize+dataSize > maxBytes {
   621  			return txs[:len(txs)-1]
   622  		}
   623  
   624  		runningSize += dataSize
   625  
   626  		// Check total gas requirement.
   627  		// If maxGas is negative, skip this check.
   628  		// Since newTotalGas < masGas, which
   629  		// must be non-negative, it follows that this won't overflow.
   630  		newTotalGas := totalGas + memTx.gasWanted
   631  		if maxGas > -1 && newTotalGas > maxGas {
   632  			return txs[:len(txs)-1]
   633  		}
   634  		totalGas = newTotalGas
   635  	}
   636  	return txs
   637  }
   638  
   639  // Safe for concurrent use by multiple goroutines.
   640  func (mem *CListMempool) ReapMaxBytesMaxGasMaxTxs(maxBytes, maxGas, maxTxs int64) types.Txs {
   641  	mem.updateMtx.RLock()
   642  	defer mem.updateMtx.RUnlock()
   643  
   644  	if maxTxs <= 0 {
   645  		maxTxs = int64(mem.txs.Len())
   646  	}
   647  
   648  	var (
   649  		totalGas    int64
   650  		runningSize int64
   651  	)
   652  
   653  	// TODO: we will get a performance boost if we have a good estimate of avg
   654  	// size per tx, and set the initial capacity based off of that.
   655  	// txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize))
   656  	txs := make([]types.Tx, 0, mem.txs.Len())
   657  	for e := mem.txs.Front(); e != nil && len(txs) < int(maxTxs); e = e.Next() {
   658  		memTx := e.Value.(*mempoolTx)
   659  
   660  		txs = append(txs, memTx.tx)
   661  
   662  		protoTxs := tmproto.Data{}
   663  		protoTxs.Txs = append(protoTxs.Txs, memTx.tx)
   664  		dataSize := int64(protoTxs.Size())
   665  
   666  		// Check total size requirement
   667  		if maxBytes > -1 && runningSize+dataSize > maxBytes {
   668  			return txs[:len(txs)-1]
   669  		}
   670  
   671  		runningSize += dataSize
   672  
   673  		// Check total gas requirement.
   674  		// If maxGas is negative, skip this check.
   675  		// Since newTotalGas < masGas, which
   676  		// must be non-negative, it follows that this won't overflow.
   677  		newTotalGas := totalGas + memTx.gasWanted
   678  		if maxGas > -1 && newTotalGas > maxGas {
   679  			return txs[:len(txs)-1]
   680  		}
   681  		totalGas = newTotalGas
   682  	}
   683  	return txs
   684  }
   685  
   686  // Safe for concurrent use by multiple goroutines.
   687  func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
   688  	mem.updateMtx.RLock()
   689  	defer mem.updateMtx.RUnlock()
   690  
   691  	if max < 0 {
   692  		max = mem.txs.Len()
   693  	}
   694  
   695  	txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max))
   696  	for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
   697  		memTx := e.Value.(*mempoolTx)
   698  		txs = append(txs, memTx.tx)
   699  	}
   700  	return txs
   701  }
   702  
   703  // Lock() must be held by the caller during execution.
   704  func (mem *CListMempool) Update(
   705  	block *types.Block,
   706  	deliverTxResponses []*abci.ResponseDeliverTx,
   707  	preCheck mempool.PreCheckFunc,
   708  	postCheck mempool.PostCheckFunc,
   709  ) (err error) {
   710  	// Set height
   711  	mem.height = block.Height
   712  	mem.notifiedTxsAvailable = false
   713  
   714  	if preCheck != nil {
   715  		mem.preCheck = preCheck
   716  	}
   717  	if postCheck != nil {
   718  		mem.postCheck = postCheck
   719  	}
   720  
   721  	for i, tx := range block.Txs {
   722  		if deliverTxResponses[i].Code == ocabci.CodeTypeOK {
   723  			// Add valid committed tx to the cache (if missing).
   724  			_ = mem.cache.Push(tx)
   725  		} else if !mem.config.KeepInvalidTxsInCache {
   726  			// Allow invalid transactions to be resubmitted.
   727  			mem.cache.Remove(tx)
   728  		}
   729  
   730  		// Remove committed tx from the mempool.
   731  		//
   732  		// Note an evil proposer can drop valid txs!
   733  		// Mempool before:
   734  		//   100 -> 101 -> 102
   735  		// Block, proposed by an evil proposer:
   736  		//   101 -> 102
   737  		// Mempool after:
   738  		//   100
   739  		// https://github.com/tendermint/tendermint/issues/3322.
   740  		if e, ok := mem.txsMap.Load(tx.Key()); ok {
   741  			mem.removeTx(tx, e.(*clist.CElement), false)
   742  		}
   743  	}
   744  
   745  	if mem.config.Recheck {
   746  		// recheck non-committed txs to see if they became invalid
   747  		recheckStartTime := time.Now().UnixNano()
   748  
   749  		_, err = mem.proxyAppConn.BeginRecheckTxSync(ocabci.RequestBeginRecheckTx{
   750  			Header: types.OC2PB.Header(&block.Header),
   751  		})
   752  		if err != nil {
   753  			mem.logger.Error("error in proxyAppConn.BeginRecheckTxSync", "err", err)
   754  		}
   755  		mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", block.Height)
   756  		mem.recheckTxs()
   757  		_, err = mem.proxyAppConn.EndRecheckTxSync(ocabci.RequestEndRecheckTx{Height: block.Height})
   758  		if err != nil {
   759  			mem.logger.Error("error in proxyAppConn.EndRecheckTxSync", "err", err)
   760  		}
   761  
   762  		recheckEndTime := time.Now().UnixNano()
   763  
   764  		recheckTimeMs := float64(recheckEndTime-recheckStartTime) / 1000000
   765  		mem.metrics.RecheckTime.Set(recheckTimeMs)
   766  	}
   767  
   768  	// notify there're some txs left.
   769  	if mem.Size() > 0 {
   770  		mem.notifyTxsAvailable()
   771  	}
   772  
   773  	// Update metrics
   774  	mem.metrics.Size.Set(float64(mem.Size()))
   775  
   776  	return err
   777  }
   778  
   779  func (mem *CListMempool) recheckTxs() {
   780  	if mem.Size() == 0 {
   781  		return
   782  	}
   783  
   784  	wg := sync.WaitGroup{}
   785  
   786  	// Push txs to proxyAppConn
   787  	// NOTE: globalCb may be called concurrently.
   788  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   789  		wg.Add(1)
   790  
   791  		memTx := e.Value.(*mempoolTx)
   792  		req := abci.RequestCheckTx{
   793  			Tx:   memTx.tx,
   794  			Type: abci.CheckTxType_Recheck,
   795  		}
   796  
   797  		mem.proxyAppConn.CheckTxAsync(req, func(res *ocabci.Response) {
   798  			wg.Done()
   799  		})
   800  	}
   801  
   802  	mem.proxyAppConn.FlushAsync(func(res *ocabci.Response) {})
   803  	wg.Wait()
   804  }
   805  
   806  //--------------------------------------------------------------------------------
   807  
   808  // mempoolTx is a transaction that successfully ran
   809  type mempoolTx struct {
   810  	height    int64    // height that this tx had been validated in
   811  	gasWanted int64    // amount of gas this tx states it will require
   812  	tx        types.Tx //
   813  
   814  	// ids of peers who've sent us this tx (as a map for quick lookups).
   815  	// senders: PeerID -> bool
   816  	senders sync.Map
   817  }
   818  
   819  // Height returns the height for this transaction
   820  func (memTx *mempoolTx) Height() int64 {
   821  	return atomic.LoadInt64(&memTx.height)
   822  }