github.com/okex/exchain@v1.8.0/libs/tendermint/mempool/clist_mempool.go (about)

     1  package mempool
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"encoding/hex"
     7  	"fmt"
     8  	"math/big"
     9  	"strconv"
    10  	"sync"
    11  	"sync/atomic"
    12  	"time"
    13  
    14  	"github.com/VictoriaMetrics/fastcache"
    15  	"github.com/okex/exchain/libs/system/trace"
    16  	abci "github.com/okex/exchain/libs/tendermint/abci/types"
    17  	cfg "github.com/okex/exchain/libs/tendermint/config"
    18  	"github.com/okex/exchain/libs/tendermint/global"
    19  	"github.com/okex/exchain/libs/tendermint/libs/clist"
    20  	"github.com/okex/exchain/libs/tendermint/libs/log"
    21  	tmmath "github.com/okex/exchain/libs/tendermint/libs/math"
    22  	"github.com/okex/exchain/libs/tendermint/proxy"
    23  	"github.com/okex/exchain/libs/tendermint/types"
    24  	"github.com/tendermint/go-amino"
    25  )
    26  
    27  type TxInfoParser interface {
    28  	GetRawTxInfo(tx types.Tx) ExTxInfo
    29  	GetTxHistoryGasUsed(tx types.Tx, gasLimit int64) (int64, bool)
    30  	GetRealTxFromRawTx(rawTx types.Tx) abci.TxEssentials
    31  }
    32  
    33  var (
    34  	// GlobalRecommendedGP is initialized to 0.1GWei
    35  	GlobalRecommendedGP = big.NewInt(100000000)
    36  	IsCongested         = false
    37  )
    38  
    39  //--------------------------------------------------------------------------------
    40  
    41  // CListMempool is an ordered in-memory pool for transactions before they are
    42  // proposed in a consensus round. Transaction validity is checked using the
    43  // CheckTx abci message before the transaction is added to the pool. The
    44  // mempool uses a concurrent list structure for storing transactions that can
    45  // be efficiently accessed by multiple concurrent readers.
    46  type CListMempool struct {
    47  	// Atomic integers
    48  	height   int64 // the last block Update()'d to
    49  	txsBytes int64 // total size of mempool, in bytes
    50  
    51  	// notify listeners (ie. consensus) when txs are available
    52  	notifiedTxsAvailable bool
    53  	txsAvailable         chan struct{} // fires once for each height, when the mempool is not empty
    54  
    55  	config *cfg.MempoolConfig
    56  
    57  	// Exclusive mutex for Update method to prevent concurrent execution of
    58  	// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
    59  	updateMtx sync.RWMutex
    60  	preCheck  PreCheckFunc
    61  	postCheck PostCheckFunc
    62  
    63  	//bcTxsList    *clist.CList   // only for tx sort model
    64  	proxyAppConn proxy.AppConnMempool
    65  
    66  	// Track whether we're rechecking txs.
    67  	// These are not protected by a mutex and are expected to be mutated in
    68  	// serial (ie. by abci responses which are called in serial).
    69  	recheckCursor *clist.CElement // next expected response
    70  	recheckEnd    *clist.CElement // re-checking stops here
    71  
    72  	// Keep a cache of already-seen txs.
    73  	// This reduces the pressure on the proxyApp.
    74  	// Save wtx as value if occurs or save nil as value
    75  	cache txCache
    76  
    77  	eventBus types.TxEventPublisher
    78  
    79  	logger    log.Logger
    80  	pguLogger log.Logger
    81  
    82  	metrics *Metrics
    83  
    84  	pendingPool                *PendingPool
    85  	accountRetriever           AccountRetriever
    86  	pendingPoolNotify          chan map[string]uint64
    87  	consumePendingTxQueue      chan *AddressNonce
    88  	consumePendingTxQueueLimit int
    89  
    90  	txInfoparser TxInfoParser
    91  
    92  	checkCnt    int64
    93  	checkRPCCnt int64
    94  	checkP2PCnt int64
    95  
    96  	checkTotalTime    int64
    97  	checkRpcTotalTime int64
    98  	checkP2PTotalTime int64
    99  
   100  	txs ITransactionQueue
   101  
   102  	simQueue        chan *mempoolTx
   103  	rmPendingTxChan chan types.EventDataRmPendingTx
   104  
   105  	gpo *Oracle
   106  
   107  	info pguInfo
   108  }
   109  
   110  type pguInfo struct {
   111  	txCount int64
   112  	gasUsed int64
   113  }
   114  
   115  func (p *pguInfo) reset() {
   116  	p.txCount = 0
   117  	p.gasUsed = 0
   118  }
   119  
   120  var _ Mempool = &CListMempool{}
   121  
   122  // CListMempoolOption sets an optional parameter on the mempool.
   123  type CListMempoolOption func(*CListMempool)
   124  
   125  // NewCListMempool returns a new mempool with the given configuration and connection to an application.
   126  func NewCListMempool(
   127  	config *cfg.MempoolConfig,
   128  	proxyAppConn proxy.AppConnMempool,
   129  	height int64,
   130  	options ...CListMempoolOption,
   131  ) *CListMempool {
   132  	var txQueue ITransactionQueue
   133  	if config.SortTxByGp {
   134  		txQueue = NewOptimizedTxQueue(int64(config.TxPriceBump))
   135  	} else {
   136  		txQueue = NewBaseTxQueue()
   137  	}
   138  
   139  	gpoConfig := NewGPOConfig(cfg.DynamicConfig.GetDynamicGpWeight(), cfg.DynamicConfig.GetDynamicGpCheckBlocks())
   140  	gpo := NewOracle(gpoConfig)
   141  
   142  	mempool := &CListMempool{
   143  		config:        config,
   144  		proxyAppConn:  proxyAppConn,
   145  		height:        height,
   146  		recheckCursor: nil,
   147  		recheckEnd:    nil,
   148  		eventBus:      types.NopEventBus{},
   149  		logger:        log.NewNopLogger(),
   150  		pguLogger:     log.NewNopLogger(),
   151  		metrics:       NopMetrics(),
   152  		txs:           txQueue,
   153  		simQueue:      make(chan *mempoolTx, 200000),
   154  		gpo:           gpo,
   155  	}
   156  
   157  	if config.PendingRemoveEvent {
   158  		mempool.rmPendingTxChan = make(chan types.EventDataRmPendingTx, 1000)
   159  		go mempool.fireRmPendingTxEvents()
   160  	}
   161  
   162  	for i := 0; i < cfg.DynamicConfig.GetPGUConcurrency(); i++ {
   163  		go mempool.simulationRoutine()
   164  	}
   165  
   166  	if cfg.DynamicConfig.GetMempoolCacheSize() > 0 {
   167  		mempool.cache = newMapTxCache(cfg.DynamicConfig.GetMempoolCacheSize())
   168  	} else {
   169  		mempool.cache = nopTxCache{}
   170  	}
   171  	proxyAppConn.SetResponseCallback(mempool.globalCb)
   172  	for _, option := range options {
   173  		option(mempool)
   174  	}
   175  
   176  	if config.EnablePendingPool {
   177  		mempool.pendingPool = newPendingPool(config.PendingPoolSize, config.PendingPoolPeriod,
   178  			config.PendingPoolReserveBlocks, config.PendingPoolMaxTxPerAddress)
   179  		mempool.pendingPoolNotify = make(chan map[string]uint64, 1)
   180  		go mempool.pendingPoolJob()
   181  
   182  		// consumePendingTxQueueLimit use  PendingPoolSize, because consumePendingTx is consume pendingTx.
   183  		mempool.consumePendingTxQueueLimit = mempool.config.PendingPoolSize
   184  		mempool.consumePendingTxQueue = make(chan *AddressNonce, mempool.consumePendingTxQueueLimit)
   185  		go mempool.consumePendingTxQueueJob()
   186  	}
   187  
   188  	return mempool
   189  }
   190  
   191  // NOTE: not thread safe - should only be called once, on startup
   192  func (mem *CListMempool) EnableTxsAvailable() {
   193  	mem.txsAvailable = make(chan struct{}, 1)
   194  }
   195  
   196  // SetLogger sets the Logger.
   197  func (mem *CListMempool) SetEventBus(eventBus types.TxEventPublisher) {
   198  	mem.eventBus = eventBus
   199  }
   200  
   201  // SetLogger sets the Logger.
   202  func (mem *CListMempool) SetLogger(l log.Logger) {
   203  	mem.logger = l
   204  	mem.pguLogger = l.With("module", "pgu")
   205  }
   206  
   207  // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
   208  // false. This is ran before CheckTx.
   209  func WithPreCheck(f PreCheckFunc) CListMempoolOption {
   210  	return func(mem *CListMempool) { mem.preCheck = f }
   211  }
   212  
   213  // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
   214  // false. This is ran after CheckTx.
   215  func WithPostCheck(f PostCheckFunc) CListMempoolOption {
   216  	return func(mem *CListMempool) { mem.postCheck = f }
   217  }
   218  
   219  // WithMetrics sets the metrics.
   220  func WithMetrics(metrics *Metrics) CListMempoolOption {
   221  	return func(mem *CListMempool) { mem.metrics = metrics }
   222  }
   223  
   224  // Safe for concurrent use by multiple goroutines.
   225  func (mem *CListMempool) Lock() {
   226  	mem.updateMtx.Lock()
   227  }
   228  
   229  // Safe for concurrent use by multiple goroutines.
   230  func (mem *CListMempool) Unlock() {
   231  	mem.updateMtx.Unlock()
   232  }
   233  
   234  // Safe for concurrent use by multiple goroutines.
   235  func (mem *CListMempool) Size() int {
   236  	return mem.txs.Len()
   237  }
   238  
   239  // Safe for concurrent use by multiple goroutines.
   240  func (mem *CListMempool) TxsBytes() int64 {
   241  	return atomic.LoadInt64(&mem.txsBytes)
   242  }
   243  
   244  // Safe for concurrent use by multiple goroutines.
   245  func (mem *CListMempool) Height() int64 {
   246  	return atomic.LoadInt64(&mem.height)
   247  }
   248  
   249  // Lock() must be help by the caller during execution.
   250  func (mem *CListMempool) FlushAppConn() error {
   251  	return mem.proxyAppConn.FlushSync()
   252  }
   253  
   254  // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state.
   255  func (mem *CListMempool) Flush() {
   256  	mem.updateMtx.Lock()
   257  	defer mem.updateMtx.Unlock()
   258  
   259  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   260  		mem.removeTx(e)
   261  	}
   262  
   263  	_ = atomic.SwapInt64(&mem.txsBytes, 0)
   264  	mem.cache.Reset()
   265  }
   266  
   267  // TxsFront returns the first transaction in the ordered list for peer
   268  // goroutines to call .NextWait() on.
   269  // FIXME: leaking implementation details!
   270  //
   271  // Safe for concurrent use by multiple goroutines.
   272  func (mem *CListMempool) TxsFront() *clist.CElement {
   273  	return mem.txs.Front()
   274  }
   275  
   276  func (mem *CListMempool) BroadcastTxsFront() *clist.CElement {
   277  	return mem.txs.BroadcastFront()
   278  }
   279  
   280  // TxsWaitChan returns a channel to wait on transactions. It will be closed
   281  // once the mempool is not empty (ie. the internal `mem.txs` has at least one
   282  // element)
   283  //
   284  // Safe for concurrent use by multiple goroutines.
   285  func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
   286  	return mem.txs.TxsWaitChan()
   287  }
   288  
   289  // It blocks if we're waiting on Update() or Reap().
   290  // cb: A callback from the CheckTx command.
   291  //
   292  //	It gets called from another goroutine.
   293  //
   294  // CONTRACT: Either cb will get called, or err returned.
   295  //
   296  // Safe for concurrent use by multiple goroutines.
   297  func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error {
   298  	timeStart := int64(0)
   299  	if cfg.DynamicConfig.GetMempoolCheckTxCost() {
   300  		timeStart = time.Now().UnixMicro()
   301  	}
   302  
   303  	txSize := len(tx)
   304  	// the old logic for can not allow to delete low gasprice tx,then we must check mempool txs weather is full.
   305  	if !mem.GetEnableDeleteMinGPTx() {
   306  		if err := mem.isFull(txSize); err != nil {
   307  			return err
   308  		}
   309  	}
   310  	// TODO
   311  	// the new logic that even if mempool is full, we check tx gasprice weather > the minimum gas price tx in mempool. If true , we delete it.
   312  	// But For mempool is under the abci, it can not get tx gasprice, so the line we can not precheck gasprice. Maybe we can break abci level for
   313  
   314  	// The size of the corresponding amino-encoded TxMessage
   315  	// can't be larger than the maxMsgSize, otherwise we can't
   316  	// relay it to peers.
   317  	if txSize > mem.config.MaxTxBytes {
   318  		return ErrTxTooLarge{mem.config.MaxTxBytes, txSize}
   319  	}
   320  
   321  	var nonce uint64
   322  	wCMTx := mem.CheckAndGetWrapCMTx(tx, txInfo)
   323  	if wCMTx != nil {
   324  		txInfo.wrapCMTx = wCMTx
   325  		tx = wCMTx.GetTx()
   326  		nonce = wCMTx.GetNonce()
   327  		mem.logger.Debug("checkTx is wrapCMTx", "nonce", nonce)
   328  	}
   329  
   330  	txkey := txKey(tx)
   331  
   332  	// CACHE
   333  	if !mem.cache.PushKey(txkey) {
   334  		// Record a new sender for a tx we've already seen.
   335  		// Note it's possible a tx is still in the cache but no longer in the mempool
   336  		// (eg. after committing a block, txs are removed from mempool but not cache),
   337  		// so we only record the sender for txs still in the mempool.
   338  		if ele, ok := mem.txs.Load(txkey); ok {
   339  			memTx := ele.Value.(*mempoolTx)
   340  			memTx.senderMtx.Lock()
   341  			memTx.senders[txInfo.SenderID] = struct{}{}
   342  			memTx.senderMtx.Unlock()
   343  			// TODO: consider punishing peer for dups,
   344  			// its non-trivial since invalid txs can become valid,
   345  			// but they can spam the same tx with little cost to them atm.
   346  		}
   347  		return ErrTxInCache
   348  	}
   349  	// END CACHE
   350  
   351  	mem.updateMtx.RLock()
   352  	// use defer to unlock mutex because application (*local client*) might panic
   353  	defer mem.updateMtx.RUnlock()
   354  
   355  	var err error
   356  
   357  	if mem.preCheck != nil {
   358  		if err = mem.preCheck(tx); err != nil {
   359  			return ErrPreCheck{err}
   360  		}
   361  	}
   362  
   363  	// NOTE: proxyAppConn may error if tx buffer is full
   364  	if err = mem.proxyAppConn.Error(); err != nil {
   365  		return err
   366  	}
   367  
   368  	if txInfo.from != "" {
   369  		types.SignatureCache().Add(txkey[:], txInfo.from)
   370  	}
   371  
   372  	reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx, Type: txInfo.checkType, From: txInfo.wtx.GetFrom(), Nonce: nonce})
   373  	if r, ok := reqRes.Response.Value.(*abci.Response_CheckTx); ok {
   374  		gasLimit := r.CheckTx.GasWanted
   375  		if cfg.DynamicConfig.GetMaxGasUsedPerBlock() > -1 {
   376  			txHash := tx.Hash(mem.Height())
   377  			txInfo.gasUsed, txInfo.isGasPrecise = mem.txInfoparser.GetTxHistoryGasUsed(tx, gasLimit) // r.CheckTx.GasWanted is gasLimit
   378  			mem.logger.Info(fmt.Sprintf("mempool.SimulateTx: txhash<%s>, gasLimit<%d>, gasUsed<%d>",
   379  				hex.EncodeToString(txHash), r.CheckTx.GasWanted, txInfo.gasUsed))
   380  		}
   381  		if txInfo.gasUsed <= 0 || txInfo.gasUsed > gasLimit {
   382  			txInfo.gasUsed = gasLimit
   383  		}
   384  	}
   385  
   386  	reqRes.SetCallback(mem.reqResCb(tx, txInfo, cb))
   387  	atomic.AddInt64(&mem.checkCnt, 1)
   388  
   389  	if cfg.DynamicConfig.GetMempoolCheckTxCost() {
   390  		pastTime := time.Now().UnixMicro() - timeStart
   391  		if txInfo.SenderID != 0 {
   392  			atomic.AddInt64(&mem.checkP2PCnt, 1)
   393  			atomic.AddInt64(&mem.checkP2PTotalTime, pastTime)
   394  		} else {
   395  			atomic.AddInt64(&mem.checkRPCCnt, 1)
   396  			atomic.AddInt64(&mem.checkRpcTotalTime, pastTime)
   397  		}
   398  		atomic.AddInt64(&mem.checkTotalTime, pastTime)
   399  	}
   400  
   401  	return nil
   402  }
   403  
   404  func (mem *CListMempool) CheckAndGetWrapCMTx(tx types.Tx, txInfo TxInfo) *types.WrapCMTx {
   405  	if txInfo.wrapCMTx != nil { // from p2p
   406  		return txInfo.wrapCMTx
   407  	}
   408  	// from rpc should check if the tx is WrapCMTx
   409  	wtx := &types.WrapCMTx{}
   410  	err := cdc.UnmarshalJSON(tx, &wtx)
   411  	if err != nil {
   412  		return nil
   413  	}
   414  	return wtx
   415  }
   416  
   417  // Global callback that will be called after every ABCI response.
   418  // Having a single global callback avoids needing to set a callback for each request.
   419  // However, processing the checkTx response requires the peerID (so we can track which txs we heard from who),
   420  // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that
   421  // include this information. If we're not in the midst of a recheck, this function will just return,
   422  // so the request specific callback can do the work.
   423  //
   424  // When rechecking, we don't need the peerID, so the recheck callback happens
   425  // here.
   426  func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
   427  	if mem.recheckCursor == nil {
   428  		return
   429  	}
   430  
   431  	mem.metrics.RecheckTimes.Add(1)
   432  	mem.resCbRecheck(req, res)
   433  
   434  	// update metrics
   435  	mem.metrics.Size.Set(float64(mem.Size()))
   436  }
   437  
   438  // Request specific callback that should be set on individual reqRes objects
   439  // to incorporate local information when processing the response.
   440  // This allows us to track the peer that sent us this tx, so we can avoid sending it back to them.
   441  // NOTE: alternatively, we could include this information in the ABCI request itself.
   442  //
   443  // External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called
   444  // when all other response processing is complete.
   445  //
   446  // Used in CheckTx to record PeerID who sent us the tx.
   447  func (mem *CListMempool) reqResCb(
   448  	tx []byte,
   449  	txInfo TxInfo,
   450  	externalCb func(*abci.Response),
   451  ) func(res *abci.Response) {
   452  	return func(res *abci.Response) {
   453  		if mem.recheckCursor != nil {
   454  			// this should never happen
   455  			panic("recheck cursor is not nil in reqResCb")
   456  		}
   457  
   458  		mem.resCbFirstTime(tx, txInfo, res)
   459  
   460  		// update metrics
   461  		mem.metrics.Size.Set(float64(mem.Size()))
   462  		if mem.pendingPool != nil {
   463  			mem.metrics.PendingPoolSize.Set(float64(mem.pendingPool.Size()))
   464  		}
   465  
   466  		// passed in by the caller of CheckTx, eg. the RPC
   467  		if externalCb != nil {
   468  			externalCb(res)
   469  		}
   470  	}
   471  }
   472  
   473  // Called from:
   474  //   - resCbFirstTime (lock not held) if tx is valid
   475  func (mem *CListMempool) addTx(memTx *mempoolTx) error {
   476  	if err := mem.txs.Insert(memTx); err != nil {
   477  		return err
   478  	}
   479  	if cfg.DynamicConfig.GetMaxGasUsedPerBlock() > -1 && cfg.DynamicConfig.GetEnablePGU() && atomic.LoadUint32(&memTx.isSim) == 0 {
   480  		select {
   481  		case mem.simQueue <- memTx:
   482  		default:
   483  			mem.logger.Error("tx simulation queue is full")
   484  		}
   485  	}
   486  
   487  	atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
   488  	mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
   489  	mem.eventBus.PublishEventPendingTx(types.EventDataTx{
   490  		TxResult: types.TxResult{
   491  			Height: memTx.height,
   492  			Tx:     memTx.tx,
   493  		},
   494  		Nonce: memTx.senderNonce,
   495  	})
   496  
   497  	return nil
   498  }
   499  
   500  // Called from:
   501  //   - Update (lock held) if tx was committed
   502  //   - resCbRecheck (lock not held) if tx was invalidated
   503  func (mem *CListMempool) removeTx(elem *clist.CElement) {
   504  	mem.txs.Remove(elem)
   505  	tx := elem.Value.(*mempoolTx).tx
   506  	atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   507  }
   508  
   509  func (mem *CListMempool) removeTxByKey(key [32]byte) (elem *clist.CElement) {
   510  	elem = mem.txs.RemoveByKey(key)
   511  	if elem != nil {
   512  		tx := elem.Value.(*mempoolTx).tx
   513  		atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
   514  	}
   515  	return
   516  }
   517  
   518  func (mem *CListMempool) isFull(txSize int) error {
   519  	var (
   520  		memSize  = mem.Size()
   521  		txsBytes = mem.TxsBytes()
   522  	)
   523  	if memSize >= cfg.DynamicConfig.GetMempoolSize() || int64(txSize)+txsBytes > mem.config.MaxTxsBytes {
   524  		return ErrMempoolIsFull{
   525  			memSize, cfg.DynamicConfig.GetMempoolSize(),
   526  			txsBytes, mem.config.MaxTxsBytes,
   527  		}
   528  	}
   529  
   530  	return nil
   531  }
   532  
   533  func (mem *CListMempool) addPendingTx(memTx *mempoolTx) error {
   534  	// nonce is continuous
   535  	expectedNonce := memTx.senderNonce
   536  	pendingNonce, ok := mem.GetPendingNonce(memTx.from)
   537  	if ok {
   538  		expectedNonce = pendingNonce + 1
   539  	}
   540  	txNonce := memTx.realTx.GetNonce()
   541  	mem.logger.Debug("mempool", "addPendingTx", hex.EncodeToString(memTx.realTx.TxHash()), "nonce", memTx.realTx.GetNonce(), "gp", memTx.realTx.GetGasPrice(), "pending Nouce", pendingNonce, "excepectNouce", expectedNonce)
   542  	if txNonce == 0 || txNonce < expectedNonce {
   543  		return mem.addTx(memTx)
   544  	}
   545  	// add pending tx
   546  	if txNonce == expectedNonce {
   547  		err := mem.addTx(memTx)
   548  		if err == nil {
   549  			addrNonce := addressNoncePool.Get().(*AddressNonce)
   550  			addrNonce.addr = memTx.from
   551  			addrNonce.nonce = txNonce + 1
   552  			select {
   553  			case mem.consumePendingTxQueue <- addrNonce:
   554  			default:
   555  				//This line maybe be lead to user pendingTx will not be packed into block
   556  				//when extreme condition (mem.consumePendingTxQueue is block which is maintain caused by mempool is full).
   557  				//But we must be do thus,for protect chain's block can be product.
   558  				addressNoncePool.Put(addrNonce)
   559  				mem.logger.Error("mempool", "addPendingTx", "when consumePendingTxQueue and mempool is full, disable consume pending tx")
   560  			}
   561  			//go mem.consumePendingTx(memTx.from, txNonce+1)
   562  		}
   563  		return err
   564  	}
   565  
   566  	// add tx to PendingPool
   567  	if err := mem.pendingPool.validate(memTx.from, memTx.tx, memTx.height); err != nil {
   568  		return err
   569  	}
   570  	pendingTx := memTx
   571  	mem.pendingPool.addTx(pendingTx)
   572  	mem.logger.Debug("mempool", "add-pending-Tx", hex.EncodeToString(memTx.realTx.TxHash()), "nonce", memTx.realTx.GetNonce(), "gp", memTx.realTx.GetGasPrice())
   573  
   574  	mem.logger.Debug("pending pool addTx", "tx", pendingTx)
   575  
   576  	return nil
   577  }
   578  
   579  func (mem *CListMempool) consumePendingTx(address string, nonce uint64) {
   580  	for {
   581  		pendingTx := mem.pendingPool.getTx(address, nonce)
   582  		if pendingTx == nil {
   583  			return
   584  		}
   585  
   586  		if err := mem.isFull(len(pendingTx.tx)); err != nil {
   587  			minGPTx := mem.txs.Back().Value.(*mempoolTx)
   588  			// If disable deleteMinGPTx, it'old logic, must be remove cache key
   589  			// If enable deleteMinGPTx,it's new logic, check tx.gasprice < minimum tx gas price then remove cache key
   590  
   591  			thresholdGasPrice := MultiPriceBump(minGPTx.realTx.GetGasPrice(), int64(mem.config.TxPriceBump))
   592  			if !mem.GetEnableDeleteMinGPTx() || (mem.GetEnableDeleteMinGPTx() && thresholdGasPrice.Cmp(pendingTx.realTx.GetGasPrice()) >= 0) {
   593  				time.Sleep(time.Duration(mem.pendingPool.period) * time.Second)
   594  				continue
   595  			}
   596  		}
   597  		mem.logger.Debug("mempool", "consumePendingTx", hex.EncodeToString(pendingTx.realTx.TxHash()), "nonce", pendingTx.realTx.GetNonce(), "gp", pendingTx.realTx.GetGasPrice())
   598  
   599  		mempoolTx := pendingTx
   600  		mempoolTx.height = mem.Height()
   601  		if err := mem.addTx(mempoolTx); err != nil {
   602  			mem.logger.Error(fmt.Sprintf("Pending Pool add tx failed:%s", err.Error()))
   603  			mem.pendingPool.removeTx(address, nonce)
   604  			return
   605  		}
   606  
   607  		mem.logger.Info("Added good transaction",
   608  			"tx", txIDStringer{mempoolTx.tx, mempoolTx.height},
   609  			"height", mempoolTx.height,
   610  			"total", mem.Size(),
   611  		)
   612  		mem.notifyTxsAvailable()
   613  		mem.pendingPool.removeTx(address, nonce)
   614  		nonce++
   615  	}
   616  }
   617  
   618  type logAddTxData struct {
   619  	Params [8]interface{}
   620  	TxID   txIDStringer
   621  	Height int64
   622  	Total  int
   623  }
   624  
   625  var logAddTxDataPool = sync.Pool{
   626  	New: func() interface{} {
   627  		return &logAddTxData{}
   628  	},
   629  }
   630  
   631  func (mem *CListMempool) logAddTx(memTx *mempoolTx, r *abci.Response_CheckTx) {
   632  	logAddTxData := logAddTxDataPool.Get().(*logAddTxData)
   633  	logAddTxData.TxID = txIDStringer{memTx.tx, memTx.height}
   634  	logAddTxData.Height = memTx.height
   635  	logAddTxData.Total = mem.Size()
   636  
   637  	params := &logAddTxData.Params
   638  	params[0] = "tx"
   639  	params[1] = &logAddTxData.TxID
   640  	params[2] = "res"
   641  	params[3] = r
   642  	params[4] = "height"
   643  	params[5] = &logAddTxData.Height
   644  	params[6] = "total"
   645  	params[7] = &logAddTxData.Total
   646  	mem.logger.Info("Added good transaction", params[:8]...)
   647  	logAddTxDataPool.Put(logAddTxData)
   648  }
   649  
   650  // callback, which is called after the app checked the tx for the first time.
   651  //
   652  // The case where the app checks the tx for the second and subsequent times is
   653  // handled by the resCbRecheck callback.
   654  func (mem *CListMempool) resCbFirstTime(
   655  	tx []byte,
   656  	txInfo TxInfo,
   657  	res *abci.Response,
   658  ) {
   659  	switch r := res.Value.(type) {
   660  	case *abci.Response_CheckTx:
   661  		var postCheckErr error
   662  		if mem.postCheck != nil {
   663  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   664  		}
   665  		var txHash []byte
   666  		if r.CheckTx != nil && r.CheckTx.Tx != nil {
   667  			txHash = r.CheckTx.Tx.TxHash()
   668  		}
   669  		txkey := txOrTxHashToKey(tx, txHash, mem.height)
   670  
   671  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   672  			// Check mempool isn't full again to reduce the chance of exceeding the
   673  			// limits.
   674  			if err := mem.isFull(len(tx)); err != nil {
   675  				minGPTx := mem.txs.Back().Value.(*mempoolTx)
   676  				// If disable deleteMinGPTx, it'old logic, must be remove cache key
   677  				// If enable deleteMinGPTx,it's new logic, check tx.gasprice < minimum tx gas price then remove cache key
   678  				thresholdGasPrice := MultiPriceBump(minGPTx.realTx.GetGasPrice(), int64(mem.config.TxPriceBump))
   679  				if !mem.GetEnableDeleteMinGPTx() || (mem.GetEnableDeleteMinGPTx() && thresholdGasPrice.Cmp(r.CheckTx.Tx.GetGasPrice()) >= 0) {
   680  					// remove from cache (mempool might have a space later)
   681  					mem.cache.RemoveKey(txkey)
   682  					errStr := err.Error()
   683  					mem.logger.Info(errStr)
   684  					r.CheckTx.Code = 1
   685  					r.CheckTx.Log = errStr
   686  					return
   687  				}
   688  			}
   689  
   690  			//var exTxInfo ExTxInfo
   691  			//if err := json.Unmarshal(r.CheckTx.Data, &exTxInfo); err != nil {
   692  			//	mem.cache.Remove(tx)
   693  			//	mem.logger.Error(fmt.Sprintf("Unmarshal ExTxInfo error:%s", err.Error()))
   694  			//	return
   695  			//}
   696  			if r.CheckTx.Tx.GetGasPrice().Sign() <= 0 {
   697  				mem.cache.RemoveKey(txkey)
   698  				errMsg := "Failed to get extra info for this tx!"
   699  				mem.logger.Error(errMsg)
   700  				r.CheckTx.Code = 1
   701  				r.CheckTx.Log = errMsg
   702  				return
   703  			}
   704  
   705  			memTx := &mempoolTx{
   706  				height:      mem.Height(),
   707  				gasLimit:    r.CheckTx.GasWanted,
   708  				gasWanted:   txInfo.gasUsed,
   709  				tx:          tx,
   710  				realTx:      r.CheckTx.Tx,
   711  				nodeKey:     txInfo.wtx.GetNodeKey(),
   712  				signature:   txInfo.wtx.GetSignature(),
   713  				from:        r.CheckTx.Tx.GetEthAddr(),
   714  				senderNonce: r.CheckTx.SenderNonce,
   715  			}
   716  			if txInfo.isGasPrecise {
   717  				// gas for hgu is precise, just mark it simulated, so it will not be simulated again
   718  				memTx.isSim = 1
   719  				memTx.hguPrecise = true
   720  			}
   721  
   722  			if txInfo.wrapCMTx != nil {
   723  				memTx.isWrapCMTx = true
   724  				memTx.wrapCMNonce = txInfo.wrapCMTx.GetNonce()
   725  			}
   726  
   727  			memTx.senders = make(map[uint16]struct{})
   728  			memTx.senders[txInfo.SenderID] = struct{}{}
   729  
   730  			var err error
   731  			if mem.pendingPool != nil {
   732  				err = mem.addPendingTx(memTx)
   733  			} else {
   734  				err = mem.addTx(memTx)
   735  			}
   736  
   737  			if err == nil {
   738  				mem.logAddTx(memTx, r)
   739  				mem.notifyTxsAvailable()
   740  			} else {
   741  				// ignore bad transaction
   742  				mem.logger.Info("Fail to add transaction into mempool, rejected it",
   743  					"tx", txIDStringer{tx, mem.height}, "peerID", txInfo.SenderP2PID, "res", r, "err", err)
   744  				mem.metrics.FailedTxs.Add(1)
   745  				// remove from cache (it might be good later)
   746  				mem.cache.RemoveKey(txkey)
   747  
   748  				r.CheckTx.Code = 1
   749  				r.CheckTx.Log = err.Error()
   750  			}
   751  		} else {
   752  			// ignore bad transaction
   753  			mem.logger.Info("Rejected bad transaction",
   754  				"tx", txIDStringer{tx, mem.height}, "peerID", txInfo.SenderP2PID, "res", r, "err", postCheckErr)
   755  			mem.metrics.FailedTxs.Add(1)
   756  			// remove from cache (it might be good later)
   757  			mem.cache.RemoveKey(txkey)
   758  		}
   759  	default:
   760  		// ignore other messages
   761  	}
   762  }
   763  
   764  // callback, which is called after the app rechecked the tx.
   765  //
   766  // The case where the app checks the tx for the first time is handled by the
   767  // resCbFirstTime callback.
   768  func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
   769  	switch r := res.Value.(type) {
   770  	case *abci.Response_CheckTx:
   771  		tx := req.GetCheckTx().Tx
   772  		memTx := mem.recheckCursor.Value.(*mempoolTx)
   773  		if !bytes.Equal(tx, memTx.tx) {
   774  			panic(fmt.Sprintf(
   775  				"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
   776  				memTx.tx,
   777  				tx))
   778  		}
   779  		var postCheckErr error
   780  		if mem.postCheck != nil {
   781  			postCheckErr = mem.postCheck(tx, r.CheckTx)
   782  		}
   783  		if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
   784  			// Good, nothing to do.
   785  		} else {
   786  			// Tx became invalidated due to newly committed block.
   787  			mem.logger.Info("Tx is no longer valid", "tx", txIDStringer{tx, memTx.height}, "res", r, "err", postCheckErr)
   788  			// NOTE: we remove tx from the cache because it might be good later
   789  			mem.cache.Remove(tx)
   790  			mem.removeTx(mem.recheckCursor)
   791  
   792  			if mem.config.PendingRemoveEvent {
   793  				mem.rmPendingTxChan <- types.EventDataRmPendingTx{
   794  					memTx.realTx.TxHash(),
   795  					memTx.realTx.GetEthAddr(),
   796  					memTx.realTx.GetNonce(),
   797  					types.Recheck,
   798  				}
   799  			}
   800  		}
   801  		if mem.recheckCursor == mem.recheckEnd {
   802  			mem.recheckCursor = nil
   803  			mem.recheckEnd = nil
   804  		} else {
   805  			mem.recheckCursor = mem.recheckCursor.Next()
   806  		}
   807  		if mem.recheckCursor == nil {
   808  			// Done!
   809  			mem.logger.Info("Done rechecking txs")
   810  
   811  			// incase the recheck removed all txs
   812  			mem.notifyTxsAvailable()
   813  		}
   814  	default:
   815  		// ignore other messages
   816  	}
   817  }
   818  
   819  // Safe for concurrent use by multiple goroutines.
   820  func (mem *CListMempool) TxsAvailable() <-chan struct{} {
   821  	return mem.txsAvailable
   822  }
   823  
   824  func (mem *CListMempool) notifyTxsAvailable() {
   825  	if mem.Size() == 0 {
   826  		return
   827  	}
   828  	if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
   829  		// channel cap is 1, so this will send once
   830  		mem.notifiedTxsAvailable = true
   831  		select {
   832  		case mem.txsAvailable <- struct{}{}:
   833  		default:
   834  		}
   835  	}
   836  }
   837  
   838  func (mem *CListMempool) GetTxSimulateGas(txHash string) int64 {
   839  	return getPGUGas([]byte(txHash))
   840  }
   841  
   842  func (mem *CListMempool) ReapEssentialTx(tx types.Tx) abci.TxEssentials {
   843  	if ele, ok := mem.txs.Load(txKey(tx)); ok {
   844  		return ele.Value.(*mempoolTx).realTx
   845  	}
   846  	return nil
   847  }
   848  
   849  // Safe for concurrent use by multiple goroutines.
   850  func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) []types.Tx {
   851  	mem.updateMtx.RLock()
   852  	defer mem.updateMtx.RUnlock()
   853  
   854  	var (
   855  		totalBytes int64
   856  		totalGas   int64
   857  		totalTxNum int64
   858  	)
   859  	// TODO: we will get a performance boost if we have a good estimate of avg
   860  	// size per tx, and set the initial capacity based off of that.
   861  	// txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize))
   862  	txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), int(cfg.DynamicConfig.GetMaxTxNumPerBlock())))
   863  	txFilter := make(map[[32]byte]struct{})
   864  	var simCount, simGas int64
   865  	defer func() {
   866  		mem.logger.Info("ReapMaxBytesMaxGas", "ProposingHeight", mem.Height()+1,
   867  			"MempoolTxs", mem.txs.Len(), "ReapTxs", len(txs))
   868  		mem.info.txCount = simCount
   869  		mem.info.gasUsed = simGas
   870  	}()
   871  	for e := mem.txs.Front(); e != nil; e = e.Next() {
   872  		memTx := e.Value.(*mempoolTx)
   873  		key := txOrTxHashToKey(memTx.tx, memTx.realTx.TxHash(), mem.Height())
   874  		if _, ok := txFilter[key]; ok {
   875  			// Just log error and ignore the dup tx. and it will be packed into the next block and deleted from mempool
   876  			mem.logger.Error("found duptx in same block", "tx hash", hex.EncodeToString(key[:]))
   877  			continue
   878  		}
   879  		txFilter[key] = struct{}{}
   880  		// Check total size requirement
   881  		aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1)
   882  		if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes {
   883  			return txs
   884  		}
   885  		totalBytes += int64(len(memTx.tx)) + aminoOverhead
   886  		// Check total gas requirement.
   887  		// If maxGas is negative, skip this check.
   888  		// Since newTotalGas < masGas, which
   889  		// must be non-negative, it follows that this won't overflow.
   890  		atomic.AddUint32(&memTx.outdated, 1)
   891  		gasWanted := atomic.LoadInt64(&memTx.gasWanted)
   892  		newTotalGas := totalGas + gasWanted
   893  		if maxGas > -1 && gasWanted >= maxGas {
   894  			mem.logger.Error("tx gas overflow", "txHash", hex.EncodeToString(key[:]), "gasWanted", gasWanted, "isSim", memTx.isSim)
   895  		}
   896  		if maxGas > -1 && newTotalGas > maxGas && len(txs) > 0 {
   897  			return txs
   898  		}
   899  		if totalTxNum >= cfg.DynamicConfig.GetMaxTxNumPerBlock() {
   900  			return txs
   901  		}
   902  
   903  		totalTxNum++
   904  		totalGas = newTotalGas
   905  		txs = append(txs, memTx.tx)
   906  		simGas += gasWanted
   907  		if atomic.LoadUint32(&memTx.isSim) > 0 {
   908  			simCount++
   909  		}
   910  	}
   911  
   912  	return txs
   913  }
   914  
   915  // Safe for concurrent use by multiple goroutines.
   916  func (mem *CListMempool) ReapMaxTxs(max int) types.Txs {
   917  	mem.updateMtx.RLock()
   918  	defer mem.updateMtx.RUnlock()
   919  
   920  	if max < 0 {
   921  		max = mem.txs.Len()
   922  	}
   923  
   924  	txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max))
   925  	for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() {
   926  		memTx := e.Value.(*mempoolTx)
   927  		txs = append(txs, memTx.tx)
   928  	}
   929  	return txs
   930  }
   931  
   932  func (mem *CListMempool) GetTxByHash(hash [sha256.Size]byte) (types.Tx, error) {
   933  	if ele, ok := mem.txs.Load(hash); ok {
   934  		return ele.Value.(*mempoolTx).tx, nil
   935  	}
   936  	return nil, ErrNoSuchTx
   937  }
   938  
   939  func (mem *CListMempool) ReapUserTxsCnt(address string) int {
   940  	mem.updateMtx.RLock()
   941  	defer mem.updateMtx.RUnlock()
   942  
   943  	return mem.GetUserPendingTxsCnt(address)
   944  }
   945  
   946  func (mem *CListMempool) ReapUserTxs(address string, max int) types.Txs {
   947  	max = tmmath.MinInt(mem.txs.Len(), max)
   948  	return mem.txs.GetAddressTxs(address, max)
   949  }
   950  
   951  func (mem *CListMempool) GetUserPendingTxsCnt(address string) int {
   952  	return mem.txs.GetAddressTxsCnt(address)
   953  }
   954  
   955  func (mem *CListMempool) GetAddressList() []string {
   956  	return mem.txs.GetAddressList()
   957  }
   958  
   959  func (mem *CListMempool) GetPendingNonce(address string) (uint64, bool) {
   960  	return mem.txs.GetAddressNonce(address)
   961  }
   962  
   963  type logData struct {
   964  	Params  [4]interface{}
   965  	Address string
   966  	Nonce   uint64
   967  }
   968  
   969  var logDataPool = sync.Pool{
   970  	New: func() interface{} {
   971  		return &logData{}
   972  	},
   973  }
   974  
   975  func (mem *CListMempool) logUpdate(address string, nonce uint64) {
   976  	logData := logDataPool.Get().(*logData)
   977  	logData.Address = address
   978  	logData.Nonce = nonce
   979  	params := &logData.Params
   980  	params[0] = "address"
   981  	params[1] = &logData.Address
   982  	params[2] = "nonce"
   983  	params[3] = &logData.Nonce
   984  	mem.logger.Debug("mempool update", params[:4]...)
   985  	logDataPool.Put(logData)
   986  }
   987  
   988  // Lock() must be help by the caller during execution.
   989  func (mem *CListMempool) Update(
   990  	height int64,
   991  	txs types.Txs,
   992  	deliverTxResponses []*abci.ResponseDeliverTx,
   993  	preCheck PreCheckFunc,
   994  	postCheck PostCheckFunc,
   995  ) error {
   996  	// no need to update when mempool is unavailable
   997  	if mem.config.Sealed {
   998  		return mem.updateSealed(height, txs, deliverTxResponses)
   999  	}
  1000  	trace.GetElapsedInfo().AddInfo(trace.SimTx, fmt.Sprintf("%d", mem.info.txCount))
  1001  	trace.GetElapsedInfo().AddInfo(trace.SimGasUsed, fmt.Sprintf("%d", mem.info.gasUsed))
  1002  	mem.info.reset()
  1003  
  1004  	// Set height
  1005  	atomic.StoreInt64(&mem.height, height)
  1006  	mem.notifiedTxsAvailable = false
  1007  
  1008  	if preCheck != nil {
  1009  		mem.preCheck = preCheck
  1010  	}
  1011  	if postCheck != nil {
  1012  		mem.postCheck = postCheck
  1013  	}
  1014  
  1015  	var gasUsed uint64
  1016  	var toCleanAccMap, addressNonce map[string]uint64
  1017  	toCleanAccMap = make(map[string]uint64)
  1018  	if mem.pendingPool != nil {
  1019  		addressNonce = make(map[string]uint64)
  1020  	}
  1021  
  1022  	for i, tx := range txs {
  1023  		txCode := deliverTxResponses[i].Code
  1024  		addr := ""
  1025  		nonce := uint64(0)
  1026  		txhash := tx.Hash(height)
  1027  		gasUsedPerTx := deliverTxResponses[i].GasUsed
  1028  		gasPricePerTx := big.NewInt(0)
  1029  		if ele := mem.cleanTx(height, tx, txCode); ele != nil {
  1030  			atomic.AddUint32(&(ele.Value.(*mempoolTx).outdated), 1)
  1031  			addr = ele.Address
  1032  			nonce = ele.Nonce
  1033  			gasPricePerTx = ele.GasPrice
  1034  			mem.logUpdate(ele.Address, ele.Nonce)
  1035  		} else {
  1036  			if mem.txInfoparser != nil {
  1037  				txInfo := mem.txInfoparser.GetRawTxInfo(tx)
  1038  				gasPricePerTx = txInfo.GasPrice
  1039  				addr = txInfo.Sender
  1040  				nonce = txInfo.Nonce
  1041  			}
  1042  
  1043  			// remove tx signature cache
  1044  			types.SignatureCache().Remove(txhash)
  1045  		}
  1046  
  1047  		if txCode == abci.CodeTypeOK || txCode > abci.CodeTypeNonceInc {
  1048  			toCleanAccMap[addr] = nonce
  1049  			gasUsed += uint64(gasUsedPerTx)
  1050  		}
  1051  
  1052  		if cfg.DynamicConfig.GetDynamicGpMode() != types.MinimalGpMode {
  1053  			// Collect gas price and gas used of txs in current block for gas price recommendation
  1054  			mem.gpo.CurrentBlockGPs.Update(gasPricePerTx, uint64(gasUsedPerTx))
  1055  		}
  1056  
  1057  		if mem.pendingPool != nil {
  1058  			addressNonce[addr] = nonce
  1059  		}
  1060  
  1061  		if mem.pendingPool != nil {
  1062  			mem.pendingPool.removeTxByHash(amino.HexEncodeToStringUpper(txhash))
  1063  		}
  1064  		if mem.config.PendingRemoveEvent {
  1065  			mem.rmPendingTxChan <- types.EventDataRmPendingTx{txhash, addr, nonce, types.Confirmed}
  1066  		}
  1067  	}
  1068  
  1069  	if cfg.DynamicConfig.GetDynamicGpMode() != types.MinimalGpMode {
  1070  		currentBlockGPsCopy := mem.gpo.CurrentBlockGPs.Copy()
  1071  		_ = mem.gpo.BlockGPQueue.Push(currentBlockGPsCopy)
  1072  		GlobalRecommendedGP, IsCongested = mem.gpo.RecommendGP()
  1073  		mem.gpo.CurrentBlockGPs.Clear()
  1074  	}
  1075  
  1076  	mem.metrics.GasUsed.Set(float64(gasUsed))
  1077  	trace.GetElapsedInfo().AddInfo(trace.GasUsed, strconv.FormatUint(gasUsed, 10))
  1078  
  1079  	for accAddr, accMaxNonce := range toCleanAccMap {
  1080  		mem.txs.CleanItems(accAddr, accMaxNonce)
  1081  	}
  1082  
  1083  	// Either recheck non-committed txs to see if they became invalid
  1084  	// or just notify there're some txs left.
  1085  	if mem.Size() > 0 {
  1086  		if cfg.DynamicConfig.GetMempoolRecheck() || height%cfg.DynamicConfig.GetMempoolForceRecheckGap() == 0 {
  1087  			mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height)
  1088  			mem.recheckTxs()
  1089  			mem.logger.Info("After Recheck txs", "numtxs", mem.Size(), "height", height)
  1090  			// At this point, mem.txs are being rechecked.
  1091  			// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
  1092  			// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
  1093  		} else {
  1094  			mem.notifyTxsAvailable()
  1095  		}
  1096  	} else if height%cfg.DynamicConfig.GetMempoolForceRecheckGap() == 0 {
  1097  		// saftly clean dirty data that stucks in the cache
  1098  		mem.cache.Reset()
  1099  	}
  1100  
  1101  	// Update metrics
  1102  	mem.metrics.Size.Set(float64(mem.Size()))
  1103  	if mem.pendingPool != nil {
  1104  		select {
  1105  		case mem.pendingPoolNotify <- addressNonce:
  1106  			mem.metrics.PendingPoolSize.Set(float64(mem.pendingPool.Size()))
  1107  		default:
  1108  			//This line maybe be lead to user pendingTx will not be packed into block
  1109  			//when extreme condition (mem.pendingPoolNotify is block which is maintain caused by mempool is full).
  1110  			//But we must be do thus,for protect chain's block can be product.
  1111  			mem.logger.Error("mempool", "Update", "when mempool  is  full and consume pendingPool, disable consume pending tx")
  1112  		}
  1113  	}
  1114  
  1115  	if cfg.DynamicConfig.GetMempoolCheckTxCost() {
  1116  		mem.checkTxCost()
  1117  	} else {
  1118  		trace.GetElapsedInfo().AddInfo(trace.MempoolCheckTxCnt, strconv.FormatInt(atomic.LoadInt64(&mem.checkCnt), 10))
  1119  		trace.GetElapsedInfo().AddInfo(trace.MempoolTxsCnt, strconv.Itoa(mem.txs.Len()))
  1120  		atomic.StoreInt64(&mem.checkCnt, 0)
  1121  	}
  1122  
  1123  	if cfg.DynamicConfig.GetEnableDeleteMinGPTx() {
  1124  		mem.deleteMinGPTxOnlyFull()
  1125  	}
  1126  	// WARNING: The txs inserted between [ReapMaxBytesMaxGas, Update) is insert-sorted in the mempool.txs,
  1127  	// but they are not included in the latest block, after remove the latest block txs, these txs may
  1128  	// in unsorted state. We need to resort them again for the the purpose of absolute order, or just let it go for they are
  1129  	// already sorted int the last round (will only affect the account that send these txs).
  1130  
  1131  	return nil
  1132  }
  1133  
  1134  func (mem *CListMempool) fireRmPendingTxEvents() {
  1135  	for rmTx := range mem.rmPendingTxChan {
  1136  		mem.eventBus.PublishEventRmPendingTx(rmTx)
  1137  	}
  1138  }
  1139  
  1140  func (mem *CListMempool) checkTxCost() {
  1141  	trace.GetElapsedInfo().AddInfo(trace.MempoolCheckTxCnt,
  1142  		strconv.FormatInt(atomic.LoadInt64(&mem.checkCnt), 10)+","+
  1143  			strconv.FormatInt(atomic.LoadInt64(&mem.checkRPCCnt), 10)+","+
  1144  			strconv.FormatInt(atomic.LoadInt64(&mem.checkP2PCnt), 10))
  1145  	atomic.StoreInt64(&mem.checkCnt, 0)
  1146  	atomic.StoreInt64(&mem.checkRPCCnt, 0)
  1147  	atomic.StoreInt64(&mem.checkP2PCnt, 0)
  1148  
  1149  	trace.GetElapsedInfo().AddInfo(trace.MempoolCheckTxTime,
  1150  		strconv.FormatInt(atomic.LoadInt64(&mem.checkTotalTime)/1000, 10)+"ms,"+
  1151  			strconv.FormatInt(atomic.LoadInt64(&mem.checkRpcTotalTime)/1000, 10)+"ms,"+
  1152  			strconv.FormatInt(atomic.LoadInt64(&mem.checkP2PTotalTime)/1000, 10)+"ms")
  1153  	atomic.StoreInt64(&mem.checkTotalTime, 0)
  1154  	atomic.StoreInt64(&mem.checkRpcTotalTime, 0)
  1155  	atomic.StoreInt64(&mem.checkP2PTotalTime, 0)
  1156  }
  1157  
  1158  func (mem *CListMempool) cleanTx(height int64, tx types.Tx, txCode uint32) *clist.CElement {
  1159  	var txHash []byte
  1160  	if mem.txInfoparser != nil {
  1161  		if realTx := mem.txInfoparser.GetRealTxFromRawTx(tx); realTx != nil {
  1162  			txHash = realTx.TxHash()
  1163  		}
  1164  	}
  1165  	txKey := txOrTxHashToKey(tx, txHash, height)
  1166  	// CodeTypeOK means tx was successfully executed.
  1167  	// CodeTypeNonceInc means tx fails but the nonce of the account increases,
  1168  	// e.g., the transaction gas has been consumed.
  1169  	if txCode == abci.CodeTypeOK || txCode > abci.CodeTypeNonceInc {
  1170  		// Add valid committed tx to the cache (if missing).
  1171  		_ = mem.cache.PushKey(txKey)
  1172  	} else {
  1173  		// Allow invalid transactions to be resubmitted.
  1174  		mem.cache.RemoveKey(txKey)
  1175  	}
  1176  	// Remove committed tx from the mempool.
  1177  	//
  1178  	// Note an evil proposer can drop valid txs!
  1179  	// Mempool before:
  1180  	//   100 -> 101 -> 102
  1181  	// Block, proposed by an evil proposer:
  1182  	//   101 -> 102
  1183  	// Mempool after:
  1184  	//   100
  1185  	// https://github.com/tendermint/tendermint/issues/3322.
  1186  	return mem.removeTxByKey(txKey)
  1187  }
  1188  
  1189  func (mem *CListMempool) updateSealed(height int64, txs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx) error {
  1190  	// Set height
  1191  	atomic.StoreInt64(&mem.height, height)
  1192  	mem.notifiedTxsAvailable = false
  1193  	// no need to update mempool
  1194  	if mem.Size() <= 0 {
  1195  		return nil
  1196  	}
  1197  	toCleanAccMap := make(map[string]uint64)
  1198  	// update mempool
  1199  	for i, tx := range txs {
  1200  		txCode := deliverTxResponses[i].Code
  1201  		// remove tx from mempool
  1202  		if ele := mem.cleanTx(height, tx, txCode); ele != nil {
  1203  			if txCode == abci.CodeTypeOK || txCode > abci.CodeTypeNonceInc {
  1204  				toCleanAccMap[ele.Address] = ele.Nonce
  1205  			}
  1206  			mem.logUpdate(ele.Address, ele.Nonce)
  1207  		}
  1208  	}
  1209  	for accAddr, accMaxNonce := range toCleanAccMap {
  1210  		mem.txs.CleanItems(accAddr, accMaxNonce)
  1211  	}
  1212  	// mempool logs
  1213  	trace.GetElapsedInfo().AddInfo(trace.MempoolCheckTxCnt, strconv.FormatInt(atomic.LoadInt64(&mem.checkCnt), 10))
  1214  	trace.GetElapsedInfo().AddInfo(trace.MempoolTxsCnt, strconv.Itoa(mem.txs.Len()))
  1215  	atomic.StoreInt64(&mem.checkCnt, 0)
  1216  	return nil
  1217  }
  1218  
  1219  func (mem *CListMempool) recheckTxs() {
  1220  	if mem.Size() == 0 {
  1221  		panic("recheckTxs is called, but the mempool is empty")
  1222  	}
  1223  
  1224  	mem.recheckCursor = mem.txs.Front()
  1225  	mem.recheckEnd = mem.txs.Back()
  1226  
  1227  	// Push txs to proxyAppConn
  1228  	// NOTE: globalCb may be called concurrently.
  1229  	for e := mem.txs.Front(); e != nil; e = e.Next() {
  1230  		memTx := e.Value.(*mempoolTx)
  1231  		mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{
  1232  			Tx:   memTx.tx,
  1233  			Type: abci.CheckTxType_Recheck,
  1234  		})
  1235  	}
  1236  
  1237  	mem.proxyAppConn.FlushAsync()
  1238  }
  1239  
  1240  func (mem *CListMempool) GetConfig() *cfg.MempoolConfig {
  1241  	return mem.config
  1242  }
  1243  
  1244  func MultiPriceBump(rawPrice *big.Int, priceBump int64) *big.Int {
  1245  	tmpPrice := new(big.Int).Div(rawPrice, big.NewInt(100))
  1246  	inc := new(big.Int).Mul(tmpPrice, big.NewInt(priceBump))
  1247  
  1248  	return new(big.Int).Add(inc, rawPrice)
  1249  }
  1250  
  1251  //--------------------------------------------------------------------------------
  1252  
  1253  // mempoolTx is a transaction that successfully ran
  1254  type mempoolTx struct {
  1255  	height      int64 // height that this tx had been validated in
  1256  	gasWanted   int64 // amount of gas this tx states it will require
  1257  	gasLimit    int64
  1258  	tx          types.Tx //
  1259  	realTx      abci.TxEssentials
  1260  	nodeKey     []byte
  1261  	signature   []byte
  1262  	from        string
  1263  	senderNonce uint64
  1264  
  1265  	outdated uint32
  1266  	isSim    uint32
  1267  
  1268  	// `hguPrecise` is true means hgu for this tx is precise and simulation is not necessary
  1269  	hguPrecise bool
  1270  
  1271  	isWrapCMTx  bool
  1272  	wrapCMNonce uint64
  1273  
  1274  	// ids of peers who've sent us this tx (as a map for quick lookups).
  1275  	// senders: PeerID -> bool
  1276  	senders   map[uint16]struct{}
  1277  	senderMtx sync.RWMutex
  1278  }
  1279  
  1280  // Height returns the height for this transaction
  1281  func (memTx *mempoolTx) Height() int64 {
  1282  	return atomic.LoadInt64(&memTx.height)
  1283  }
  1284  
  1285  func (memTx *mempoolTx) ToWrappedMempoolTx() types.WrappedMempoolTx {
  1286  	return types.WrappedMempoolTx{
  1287  		Height:      memTx.height,
  1288  		GasWanted:   memTx.gasWanted,
  1289  		GasLimit:    memTx.gasLimit,
  1290  		Tx:          memTx.tx,
  1291  		NodeKey:     memTx.nodeKey,
  1292  		Signature:   memTx.signature,
  1293  		From:        memTx.from,
  1294  		SenderNonce: memTx.senderNonce,
  1295  		Outdated:    memTx.outdated,
  1296  		IsSim:       memTx.isSim,
  1297  		IsWrapCMTx:  memTx.isWrapCMTx,
  1298  		WrapCMNonce: memTx.wrapCMNonce,
  1299  	}
  1300  }
  1301  
  1302  //--------------------------------------------------------------------------------
  1303  
  1304  type txCache interface {
  1305  	Reset()
  1306  	Push(tx types.Tx) bool
  1307  	PushKey(key [sha256.Size]byte) bool
  1308  	Remove(tx types.Tx)
  1309  	RemoveKey(key [sha256.Size]byte)
  1310  }
  1311  
  1312  // mapTxCache maintains a LRU cache of transactions. This only stores the hash
  1313  // of the tx, due to memory concerns.
  1314  type mapTxCache struct {
  1315  	mtx      sync.Mutex
  1316  	size     int
  1317  	cacheMap *fastcache.Cache
  1318  }
  1319  
  1320  var _ txCache = (*mapTxCache)(nil)
  1321  
  1322  // newMapTxCache returns a new mapTxCache.
  1323  func newMapTxCache(cacheSize int) *mapTxCache {
  1324  	return &mapTxCache{
  1325  		size:     cacheSize,
  1326  		cacheMap: fastcache.New(cacheSize * 32),
  1327  	}
  1328  }
  1329  
  1330  // Reset resets the cache to an empty state.
  1331  func (cache *mapTxCache) Reset() {
  1332  	cache.mtx.Lock()
  1333  	cache.cacheMap = fastcache.New(cache.size * 32)
  1334  	cache.mtx.Unlock()
  1335  }
  1336  
  1337  // Push adds the given tx to the cache and returns true. It returns
  1338  // false if tx is already in the cache.
  1339  func (cache *mapTxCache) Push(tx types.Tx) bool {
  1340  	// Use the tx hash in the cache
  1341  	txHash := txKey(tx)
  1342  
  1343  	return cache.PushKey(txHash)
  1344  }
  1345  
  1346  func (cache *mapTxCache) PushKey(txHash [32]byte) bool {
  1347  	cache.mtx.Lock()
  1348  	defer cache.mtx.Unlock()
  1349  
  1350  	if exists := cache.cacheMap.Has(txHash[:]); exists {
  1351  		return false
  1352  	}
  1353  
  1354  	cache.cacheMap.Set(txHash[:], nil)
  1355  	return true
  1356  }
  1357  
  1358  // Remove removes the given tx from the cache.
  1359  func (cache *mapTxCache) Remove(tx types.Tx) {
  1360  	txHash := txKey(tx)
  1361  	cache.cacheMap.Del(txHash[:])
  1362  }
  1363  
  1364  func (cache *mapTxCache) RemoveKey(key [32]byte) {
  1365  	cache.cacheMap.Del(key[:])
  1366  }
  1367  
  1368  type nopTxCache struct{}
  1369  
  1370  var _ txCache = (*nopTxCache)(nil)
  1371  
  1372  func (nopTxCache) Reset()                    {}
  1373  func (nopTxCache) Push(types.Tx) bool        { return true }
  1374  func (nopTxCache) PushKey(key [32]byte) bool { return true }
  1375  func (nopTxCache) Remove(types.Tx)           {}
  1376  func (nopTxCache) RemoveKey(key [32]byte)    {}
  1377  
  1378  // --------------------------------------------------------------------------------
  1379  // txKey is the fixed length array sha256 hash used as the key in maps.
  1380  func txKey(tx types.Tx) (retHash [sha256.Size]byte) {
  1381  	copy(retHash[:], tx.Hash(types.GetVenusHeight())[:sha256.Size])
  1382  	return
  1383  }
  1384  
  1385  func txOrTxHashToKey(tx types.Tx, txHash []byte, height int64) (retHash [sha256.Size]byte) {
  1386  	if len(txHash) == sha256.Size && types.HigherThanVenus(height) {
  1387  		copy(retHash[:], txHash)
  1388  		return
  1389  	} else {
  1390  		return txKey(tx)
  1391  	}
  1392  }
  1393  
  1394  type txIDStringer struct {
  1395  	tx     []byte
  1396  	height int64
  1397  }
  1398  
  1399  func (txs txIDStringer) String() string {
  1400  	return amino.HexEncodeToStringUpper(types.Tx(txs.tx).Hash(txs.height))
  1401  }
  1402  
  1403  // txID is the hex encoded hash of the bytes as a types.Tx.
  1404  func txID(tx []byte, height int64) string {
  1405  	return amino.HexEncodeToStringUpper(types.Tx(tx).Hash(height))
  1406  }
  1407  
  1408  // --------------------------------------------------------------------------------
  1409  type ExTxInfo struct {
  1410  	Sender      string   `json:"sender"`
  1411  	SenderNonce uint64   `json:"sender_nonce"`
  1412  	GasPrice    *big.Int `json:"gas_price"`
  1413  	Nonce       uint64   `json:"nonce"`
  1414  }
  1415  
  1416  func (mem *CListMempool) SetAccountRetriever(retriever AccountRetriever) {
  1417  	mem.accountRetriever = retriever
  1418  }
  1419  
  1420  func (mem *CListMempool) SetTxInfoParser(parser TxInfoParser) {
  1421  	mem.txInfoparser = parser
  1422  }
  1423  
  1424  func (mem *CListMempool) pendingPoolJob() {
  1425  	for addressNonce := range mem.pendingPoolNotify {
  1426  		timeStart := time.Now()
  1427  		mem.logger.Debug("pending pool job begin", "poolSize", mem.pendingPool.Size())
  1428  		addrNonceMap := mem.pendingPool.handlePendingTx(addressNonce)
  1429  		for addr, nonce := range addrNonceMap {
  1430  			mem.consumePendingTx(addr, nonce)
  1431  		}
  1432  		mem.pendingPool.handlePeriodCounter()
  1433  		timeElapse := time.Since(timeStart).Microseconds()
  1434  		mem.logger.Debug("pending pool job end", "interval(ms)", timeElapse,
  1435  			"poolSize", mem.pendingPool.Size(),
  1436  			"addressNonceMap", addrNonceMap)
  1437  	}
  1438  }
  1439  
  1440  func (mem *CListMempool) consumePendingTxQueueJob() {
  1441  	for addrNonce := range mem.consumePendingTxQueue {
  1442  		mem.consumePendingTx(addrNonce.addr, addrNonce.nonce)
  1443  		addressNoncePool.Put(addrNonce)
  1444  	}
  1445  }
  1446  
  1447  func (mem *CListMempool) simulateTx(tx types.Tx, gasLimit int64) (int64, error) {
  1448  	var simuRes SimulationResponse
  1449  	res, err := mem.proxyAppConn.QuerySync(abci.RequestQuery{
  1450  		Path: "app/simulate/mempool",
  1451  		Data: tx,
  1452  	})
  1453  	if err != nil {
  1454  		return 0, err
  1455  	}
  1456  	err = cdc.UnmarshalBinaryBare(res.Value, &simuRes)
  1457  	if err != nil {
  1458  		return 0, err
  1459  	}
  1460  	gas := int64(simuRes.GasUsed) * int64(cfg.DynamicConfig.GetPGUAdjustment()*100) / 100
  1461  	mem.pguLogger.Info("simulateTx", "txHash", hex.EncodeToString(tx.Hash(mem.Height())), "gas", gas, "gasLimit", gasLimit)
  1462  	if gas > gasLimit {
  1463  		gas = gasLimit
  1464  	}
  1465  	txHash := tx.Hash(mem.Height())
  1466  	if err = updatePGU(txHash, gas); err != nil {
  1467  		mem.logger.Error("updatePGU", "txHash", hex.EncodeToString(tx.Hash(mem.Height())), "simGas", gas, "error", err)
  1468  	}
  1469  	return gas, err
  1470  }
  1471  
  1472  func (mem *CListMempool) simulationRoutine() {
  1473  	for memTx := range mem.simQueue {
  1474  		mem.simulationJob(memTx)
  1475  	}
  1476  }
  1477  
  1478  func (mem *CListMempool) simulationJob(memTx *mempoolTx) {
  1479  	defer types.SignatureCache().Remove(memTx.realTx.TxHash())
  1480  	if atomic.LoadUint32(&memTx.outdated) != 0 {
  1481  		// memTx is outdated
  1482  		return
  1483  	}
  1484  	global.WaitCommit()
  1485  	gas, err := mem.simulateTx(memTx.tx, memTx.gasLimit)
  1486  	if err != nil {
  1487  		mem.logger.Error("simulateTx", "error", err, "txHash", memTx.tx.Hash(mem.Height()))
  1488  		return
  1489  	}
  1490  	atomic.StoreInt64(&memTx.gasWanted, gas)
  1491  	atomic.AddUint32(&memTx.isSim, 1)
  1492  }
  1493  
  1494  // trySimulate4BlockAfterNext will be called during Update()
  1495  // assume that next step is to proposal a block of height `n` through ReapMaxBytesMaxGas
  1496  // trySimulate4NextBlock will skip those txs which would be packed into that block,
  1497  // and simulate txs to be packed into block of height `n+1`
  1498  func (mem *CListMempool) trySimulate4NextBlock() {
  1499  	maxGu := cfg.DynamicConfig.GetMaxGasUsedPerBlock()
  1500  	if maxGu < 0 || !cfg.DynamicConfig.GetEnablePGU() {
  1501  		return
  1502  	}
  1503  
  1504  	var gu int64
  1505  	var ele *clist.CElement
  1506  	// skip the txs that will be packed into next block
  1507  	for ele = mem.txs.Front(); ele != nil; ele = ele.Next() {
  1508  		gu += ele.Value.(*mempoolTx).gasWanted
  1509  		if gu > maxGu {
  1510  			break
  1511  		}
  1512  	}
  1513  
  1514  	// reset gu for next cycle
  1515  	gu = 0
  1516  
  1517  	for ; ele != nil && gu < maxGu; ele = ele.Next() {
  1518  		memTx := ele.Value.(*mempoolTx)
  1519  		var gas int64
  1520  		var err error
  1521  		if !memTx.hguPrecise {
  1522  			gas, err = mem.simulateTx(memTx.tx, memTx.gasLimit)
  1523  			if err != nil {
  1524  				mem.logger.Error("trySimulate4BlockAfterNext", "error", err, "txHash", memTx.tx.Hash(mem.Height()))
  1525  				return
  1526  			}
  1527  			atomic.StoreInt64(&memTx.gasWanted, gas)
  1528  			atomic.AddUint32(&memTx.isSim, 1)
  1529  		} else {
  1530  			gas = memTx.gasWanted
  1531  		}
  1532  
  1533  		gu += gas
  1534  	}
  1535  
  1536  }
  1537  
  1538  func (mem *CListMempool) deleteMinGPTxOnlyFull() {
  1539  	//check weather exceed mempool size,then need to delet the minimum gas price
  1540  	for mem.Size() > cfg.DynamicConfig.GetMempoolSize() || mem.TxsBytes() > mem.config.MaxTxsBytes {
  1541  		removeTx := mem.txs.Back()
  1542  		mem.removeTx(removeTx)
  1543  
  1544  		removeMemTx := removeTx.Value.(*mempoolTx)
  1545  		var removeMemTxHash []byte
  1546  		if removeMemTx.realTx != nil {
  1547  			removeMemTxHash = removeMemTx.realTx.TxHash()
  1548  		}
  1549  		mem.logger.Debug("mempool", "delete Tx", hex.EncodeToString(removeMemTxHash), "nonce", removeMemTx.realTx.GetNonce(), "gp", removeMemTx.realTx.GetGasPrice())
  1550  		mem.cache.RemoveKey(txOrTxHashToKey(removeMemTx.tx, removeMemTxHash, removeMemTx.Height()))
  1551  
  1552  		if mem.config.PendingRemoveEvent {
  1553  			mem.rmPendingTxChan <- types.EventDataRmPendingTx{removeMemTxHash, removeMemTx.realTx.GetEthAddr(), removeMemTx.realTx.GetNonce(), types.MinGasPrice}
  1554  		}
  1555  	}
  1556  }
  1557  
  1558  func (mem *CListMempool) GetEnableDeleteMinGPTx() bool {
  1559  	return cfg.DynamicConfig.GetEnableDeleteMinGPTx()
  1560  }
  1561  
  1562  func (mem *CListMempool) GetPendingPoolTxsBytes() map[string]map[string]types.WrappedMempoolTx {
  1563  	return mem.pendingPool.GetWrappedAddressTxsMap()
  1564  }