github.com/Finschia/ostracon@v1.1.5/mempool/v1/mempool_test.go (about)

     1  //go:build deprecated
     2  
     3  package v1
     4  
     5  import (
     6  	"bytes"
     7  	"errors"
     8  	"fmt"
     9  	"math/rand"
    10  	"os"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/stretchr/testify/require"
    19  
    20  	"github.com/tendermint/tendermint/abci/example/code"
    21  	"github.com/tendermint/tendermint/abci/example/kvstore"
    22  	abci "github.com/tendermint/tendermint/abci/types"
    23  	"github.com/tendermint/tendermint/config"
    24  	"github.com/tendermint/tendermint/libs/log"
    25  	"github.com/tendermint/tendermint/mempool"
    26  	"github.com/tendermint/tendermint/proxy"
    27  	"github.com/tendermint/tendermint/types"
    28  )
    29  
    30  // application extends the KV store application by overriding CheckTx to provide
    31  // transaction priority based on the value in the key/value pair.
    32  type application struct {
    33  	*kvstore.Application
    34  }
    35  
    36  type testTx struct {
    37  	tx       types.Tx
    38  	priority int64
    39  }
    40  
    41  func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
    42  	var (
    43  		priority int64
    44  		sender   string
    45  	)
    46  
    47  	// infer the priority from the raw transaction value (sender=key=value)
    48  	parts := bytes.Split(req.Tx, []byte("="))
    49  	if len(parts) == 3 {
    50  		v, err := strconv.ParseInt(string(parts[2]), 10, 64)
    51  		if err != nil {
    52  			return abci.ResponseCheckTx{
    53  				Priority:  priority,
    54  				Code:      100,
    55  				GasWanted: 1,
    56  			}
    57  		}
    58  
    59  		priority = v
    60  		sender = string(parts[0])
    61  	} else {
    62  		return abci.ResponseCheckTx{
    63  			Priority:  priority,
    64  			Code:      101,
    65  			GasWanted: 1,
    66  		}
    67  	}
    68  
    69  	return abci.ResponseCheckTx{
    70  		Priority:  priority,
    71  		Sender:    sender,
    72  		Code:      code.CodeTypeOK,
    73  		GasWanted: 1,
    74  	}
    75  }
    76  
    77  func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
    78  	t.Helper()
    79  
    80  	app := &application{kvstore.NewApplication()}
    81  	cc := proxy.NewLocalClientCreator(app)
    82  
    83  	cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
    84  	cfg.Mempool.CacheSize = cacheSize
    85  
    86  	appConnMem, err := cc.NewABCIClient()
    87  	require.NoError(t, err)
    88  	require.NoError(t, appConnMem.Start())
    89  
    90  	t.Cleanup(func() {
    91  		os.RemoveAll(cfg.RootDir)
    92  		require.NoError(t, appConnMem.Stop())
    93  	})
    94  
    95  	return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
    96  }
    97  
    98  // mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
    99  // its callback has finished executing. It fails t if CheckTx fails.
   100  func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) {
   101  	done := make(chan struct{})
   102  	if err := txmp.CheckTx([]byte(spec), func(*abci.Response) {
   103  		close(done)
   104  	}, mempool.TxInfo{}); err != nil {
   105  		t.Fatalf("CheckTx for %q failed: %v", spec, err)
   106  	}
   107  	<-done
   108  }
   109  
   110  func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
   111  	txs := make([]testTx, numTxs)
   112  	txInfo := mempool.TxInfo{SenderID: peerID}
   113  
   114  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   115  
   116  	for i := 0; i < numTxs; i++ {
   117  		prefix := make([]byte, 20)
   118  		_, err := rng.Read(prefix)
   119  		require.NoError(t, err)
   120  
   121  		priority := int64(rng.Intn(9999-1000) + 1000)
   122  
   123  		txs[i] = testTx{
   124  			tx:       []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
   125  			priority: priority,
   126  		}
   127  		require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo))
   128  	}
   129  
   130  	return txs
   131  }
   132  
   133  func TestTxMempool_TxsAvailable(t *testing.T) {
   134  	txmp := setup(t, 0)
   135  	txmp.EnableTxsAvailable()
   136  
   137  	ensureNoTxFire := func() {
   138  		timer := time.NewTimer(500 * time.Millisecond)
   139  		select {
   140  		case <-txmp.TxsAvailable():
   141  			require.Fail(t, "unexpected transactions event")
   142  		case <-timer.C:
   143  		}
   144  	}
   145  
   146  	ensureTxFire := func() {
   147  		timer := time.NewTimer(500 * time.Millisecond)
   148  		select {
   149  		case <-txmp.TxsAvailable():
   150  		case <-timer.C:
   151  			require.Fail(t, "expected transactions event")
   152  		}
   153  	}
   154  
   155  	// ensure no event as we have not executed any transactions yet
   156  	ensureNoTxFire()
   157  
   158  	// Execute CheckTx for some transactions and ensure TxsAvailable only fires
   159  	// once.
   160  	txs := checkTxs(t, txmp, 100, 0)
   161  	ensureTxFire()
   162  	ensureNoTxFire()
   163  
   164  	rawTxs := make([]types.Tx, len(txs))
   165  	for i, tx := range txs {
   166  		rawTxs[i] = tx.tx
   167  	}
   168  
   169  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   170  	for i := 0; i < len(responses); i++ {
   171  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   172  	}
   173  
   174  	// commit half the transactions and ensure we fire an event
   175  	txmp.Lock()
   176  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   177  	txmp.Unlock()
   178  	ensureTxFire()
   179  	ensureNoTxFire()
   180  
   181  	// Execute CheckTx for more transactions and ensure we do not fire another
   182  	// event as we're still on the same height (1).
   183  	_ = checkTxs(t, txmp, 100, 0)
   184  	ensureNoTxFire()
   185  }
   186  
   187  func TestTxMempool_Size(t *testing.T) {
   188  	txmp := setup(t, 0)
   189  	txs := checkTxs(t, txmp, 100, 0)
   190  	require.Equal(t, len(txs), txmp.Size())
   191  	require.Equal(t, int64(5690), txmp.SizeBytes())
   192  
   193  	rawTxs := make([]types.Tx, len(txs))
   194  	for i, tx := range txs {
   195  		rawTxs[i] = tx.tx
   196  	}
   197  
   198  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   199  	for i := 0; i < len(responses); i++ {
   200  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   201  	}
   202  
   203  	txmp.Lock()
   204  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   205  	txmp.Unlock()
   206  
   207  	require.Equal(t, len(rawTxs)/2, txmp.Size())
   208  	require.Equal(t, int64(2850), txmp.SizeBytes())
   209  }
   210  
   211  func TestTxMempool_Eviction(t *testing.T) {
   212  	txmp := setup(t, 1000)
   213  	txmp.config.Size = 5
   214  	txmp.config.MaxTxsBytes = 60
   215  	txExists := func(spec string) bool {
   216  		txmp.Lock()
   217  		defer txmp.Unlock()
   218  		key := types.Tx(spec).Key()
   219  		_, ok := txmp.txByKey[key]
   220  		return ok
   221  	}
   222  
   223  	// A transaction bigger than the mempool should be rejected even when there
   224  	// are slots available.
   225  	mustCheckTx(t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
   226  	require.Equal(t, 0, txmp.Size())
   227  
   228  	// Nearly-fill the mempool with a low-priority transaction, to show that it
   229  	// is evicted even when slots are available for a higher-priority tx.
   230  	const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
   231  	mustCheckTx(t, txmp, bigTx)
   232  	require.Equal(t, 1, txmp.Size()) // bigTx is the only element
   233  	require.True(t, txExists(bigTx))
   234  	require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
   235  
   236  	// The next transaction should evict bigTx, because it is higher priority
   237  	// but does not fit on size.
   238  	mustCheckTx(t, txmp, "key1=0000=25")
   239  	require.True(t, txExists("key1=0000=25"))
   240  	require.False(t, txExists(bigTx))
   241  	require.False(t, txmp.cache.Has([]byte(bigTx)))
   242  	require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
   243  
   244  	// Now fill up the rest of the slots with other transactions.
   245  	mustCheckTx(t, txmp, "key2=0001=5")
   246  	mustCheckTx(t, txmp, "key3=0002=10")
   247  	mustCheckTx(t, txmp, "key4=0003=3")
   248  	mustCheckTx(t, txmp, "key5=0004=3")
   249  
   250  	// A new transaction with low priority should be discarded.
   251  	mustCheckTx(t, txmp, "key6=0005=1")
   252  	require.False(t, txExists("key6=0005=1"))
   253  
   254  	// A new transaction with higher priority should evict key5, which is the
   255  	// newest of the two transactions with lowest priority.
   256  	mustCheckTx(t, txmp, "key7=0006=7")
   257  	require.True(t, txExists("key7=0006=7"))  // new transaction added
   258  	require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
   259  	require.True(t, txExists("key4=0003=3"))  // older low-priority tx retained
   260  
   261  	// Another new transaction evicts the other low-priority element.
   262  	mustCheckTx(t, txmp, "key8=0007=20")
   263  	require.True(t, txExists("key8=0007=20"))
   264  	require.False(t, txExists("key4=0003=3"))
   265  
   266  	// Now the lowest-priority tx is 5, so that should be the next to go.
   267  	mustCheckTx(t, txmp, "key9=0008=9")
   268  	require.True(t, txExists("key9=0008=9"))
   269  	require.False(t, txExists("k3y2=0001=5"))
   270  
   271  	// Add a transaction that requires eviction of multiple lower-priority
   272  	// entries, in order to fit the size of the element.
   273  	mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
   274  	require.True(t, txExists("key1=0000=25"))
   275  	require.True(t, txExists("key8=0007=20"))
   276  	require.True(t, txExists("key10=0123456789abcdef=11"))
   277  	require.False(t, txExists("key3=0002=10"))
   278  	require.False(t, txExists("key9=0008=9"))
   279  	require.False(t, txExists("key7=0006=7"))
   280  }
   281  
   282  func TestTxMempool_Flush(t *testing.T) {
   283  	txmp := setup(t, 0)
   284  	txs := checkTxs(t, txmp, 100, 0)
   285  	require.Equal(t, len(txs), txmp.Size())
   286  	require.Equal(t, int64(5690), txmp.SizeBytes())
   287  
   288  	rawTxs := make([]types.Tx, len(txs))
   289  	for i, tx := range txs {
   290  		rawTxs[i] = tx.tx
   291  	}
   292  
   293  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   294  	for i := 0; i < len(responses); i++ {
   295  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   296  	}
   297  
   298  	txmp.Lock()
   299  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   300  	txmp.Unlock()
   301  
   302  	txmp.Flush()
   303  	require.Zero(t, txmp.Size())
   304  	require.Equal(t, int64(0), txmp.SizeBytes())
   305  }
   306  
   307  func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
   308  	txmp := setup(t, 0)
   309  	tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
   310  	require.Equal(t, len(tTxs), txmp.Size())
   311  	require.Equal(t, int64(5690), txmp.SizeBytes())
   312  
   313  	txMap := make(map[types.TxKey]testTx)
   314  	priorities := make([]int64, len(tTxs))
   315  	for i, tTx := range tTxs {
   316  		txMap[tTx.tx.Key()] = tTx
   317  		priorities[i] = tTx.priority
   318  	}
   319  
   320  	sort.Slice(priorities, func(i, j int) bool {
   321  		// sort by priority, i.e. decreasing order
   322  		return priorities[i] > priorities[j]
   323  	})
   324  
   325  	ensurePrioritized := func(reapedTxs types.Txs) {
   326  		reapedPriorities := make([]int64, len(reapedTxs))
   327  		for i, rTx := range reapedTxs {
   328  			reapedPriorities[i] = txMap[rTx.Key()].priority
   329  		}
   330  
   331  		require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
   332  	}
   333  
   334  	// reap by gas capacity only
   335  	reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
   336  	ensurePrioritized(reapedTxs)
   337  	require.Equal(t, len(tTxs), txmp.Size())
   338  	require.Equal(t, int64(5690), txmp.SizeBytes())
   339  	require.Len(t, reapedTxs, 50)
   340  
   341  	// reap by transaction bytes only
   342  	reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
   343  	ensurePrioritized(reapedTxs)
   344  	require.Equal(t, len(tTxs), txmp.Size())
   345  	require.Equal(t, int64(5690), txmp.SizeBytes())
   346  	require.GreaterOrEqual(t, len(reapedTxs), 16)
   347  
   348  	// Reap by both transaction bytes and gas, where the size yields 31 reaped
   349  	// transactions and the gas limit reaps 25 transactions.
   350  	reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
   351  	ensurePrioritized(reapedTxs)
   352  	require.Equal(t, len(tTxs), txmp.Size())
   353  	require.Equal(t, int64(5690), txmp.SizeBytes())
   354  	require.Len(t, reapedTxs, 25)
   355  }
   356  
   357  func TestTxMempool_ReapMaxTxs(t *testing.T) {
   358  	txmp := setup(t, 0)
   359  	tTxs := checkTxs(t, txmp, 100, 0)
   360  	require.Equal(t, len(tTxs), txmp.Size())
   361  	require.Equal(t, int64(5690), txmp.SizeBytes())
   362  
   363  	txMap := make(map[types.TxKey]testTx)
   364  	priorities := make([]int64, len(tTxs))
   365  	for i, tTx := range tTxs {
   366  		txMap[tTx.tx.Key()] = tTx
   367  		priorities[i] = tTx.priority
   368  	}
   369  
   370  	sort.Slice(priorities, func(i, j int) bool {
   371  		// sort by priority, i.e. decreasing order
   372  		return priorities[i] > priorities[j]
   373  	})
   374  
   375  	ensurePrioritized := func(reapedTxs types.Txs) {
   376  		reapedPriorities := make([]int64, len(reapedTxs))
   377  		for i, rTx := range reapedTxs {
   378  			reapedPriorities[i] = txMap[rTx.Key()].priority
   379  		}
   380  
   381  		require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
   382  	}
   383  
   384  	// reap all transactions
   385  	reapedTxs := txmp.ReapMaxTxs(-1)
   386  	ensurePrioritized(reapedTxs)
   387  	require.Equal(t, len(tTxs), txmp.Size())
   388  	require.Equal(t, int64(5690), txmp.SizeBytes())
   389  	require.Len(t, reapedTxs, len(tTxs))
   390  
   391  	// reap a single transaction
   392  	reapedTxs = txmp.ReapMaxTxs(1)
   393  	ensurePrioritized(reapedTxs)
   394  	require.Equal(t, len(tTxs), txmp.Size())
   395  	require.Equal(t, int64(5690), txmp.SizeBytes())
   396  	require.Len(t, reapedTxs, 1)
   397  
   398  	// reap half of the transactions
   399  	reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
   400  	ensurePrioritized(reapedTxs)
   401  	require.Equal(t, len(tTxs), txmp.Size())
   402  	require.Equal(t, int64(5690), txmp.SizeBytes())
   403  	require.Len(t, reapedTxs, len(tTxs)/2)
   404  }
   405  
   406  func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
   407  	txmp := setup(t, 0)
   408  
   409  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   410  	tx := make([]byte, txmp.config.MaxTxBytes+1)
   411  	_, err := rng.Read(tx)
   412  	require.NoError(t, err)
   413  
   414  	require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
   415  
   416  	tx = make([]byte, txmp.config.MaxTxBytes-1)
   417  	_, err = rng.Read(tx)
   418  	require.NoError(t, err)
   419  
   420  	require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
   421  }
   422  
   423  func TestTxMempool_CheckTxSamePeer(t *testing.T) {
   424  	txmp := setup(t, 100)
   425  	peerID := uint16(1)
   426  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   427  
   428  	prefix := make([]byte, 20)
   429  	_, err := rng.Read(prefix)
   430  	require.NoError(t, err)
   431  
   432  	tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
   433  
   434  	require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   435  	require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   436  }
   437  
   438  func TestTxMempool_CheckTxSameSender(t *testing.T) {
   439  	txmp := setup(t, 100)
   440  	peerID := uint16(1)
   441  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   442  
   443  	prefix1 := make([]byte, 20)
   444  	_, err := rng.Read(prefix1)
   445  	require.NoError(t, err)
   446  
   447  	prefix2 := make([]byte, 20)
   448  	_, err = rng.Read(prefix2)
   449  	require.NoError(t, err)
   450  
   451  	tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
   452  	tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
   453  
   454  	require.NoError(t, txmp.CheckTx(tx1, nil, mempool.TxInfo{SenderID: peerID}))
   455  	require.Equal(t, 1, txmp.Size())
   456  	require.NoError(t, txmp.CheckTx(tx2, nil, mempool.TxInfo{SenderID: peerID}))
   457  	require.Equal(t, 1, txmp.Size())
   458  }
   459  
   460  func TestTxMempool_ConcurrentTxs(t *testing.T) {
   461  	txmp := setup(t, 100)
   462  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   463  	checkTxDone := make(chan struct{})
   464  
   465  	var wg sync.WaitGroup
   466  
   467  	wg.Add(1)
   468  	go func() {
   469  		for i := 0; i < 20; i++ {
   470  			_ = checkTxs(t, txmp, 100, 0)
   471  			dur := rng.Intn(1000-500) + 500
   472  			time.Sleep(time.Duration(dur) * time.Millisecond)
   473  		}
   474  
   475  		wg.Done()
   476  		close(checkTxDone)
   477  	}()
   478  
   479  	wg.Add(1)
   480  	go func() {
   481  		ticker := time.NewTicker(time.Second)
   482  		defer ticker.Stop()
   483  		defer wg.Done()
   484  
   485  		var height int64 = 1
   486  
   487  		for range ticker.C {
   488  			reapedTxs := txmp.ReapMaxTxs(200)
   489  			if len(reapedTxs) > 0 {
   490  				responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   491  				for i := 0; i < len(responses); i++ {
   492  					var code uint32
   493  
   494  					if i%10 == 0 {
   495  						code = 100
   496  					} else {
   497  						code = abci.CodeTypeOK
   498  					}
   499  
   500  					responses[i] = &abci.ResponseDeliverTx{Code: code}
   501  				}
   502  
   503  				txmp.Lock()
   504  				require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
   505  				txmp.Unlock()
   506  
   507  				height++
   508  			} else {
   509  				// only return once we know we finished the CheckTx loop
   510  				select {
   511  				case <-checkTxDone:
   512  					return
   513  				default:
   514  				}
   515  			}
   516  		}
   517  	}()
   518  
   519  	wg.Wait()
   520  	require.Zero(t, txmp.Size())
   521  	require.Zero(t, txmp.SizeBytes())
   522  }
   523  
   524  func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) {
   525  	txmp := setup(t, 5000)
   526  	txmp.config.TTLDuration = 5 * time.Millisecond
   527  
   528  	added1 := checkTxs(t, txmp, 10, 0)
   529  	require.Equal(t, len(added1), txmp.Size())
   530  
   531  	// Wait a while, then add some more transactions that should not be expired
   532  	// when the first batch TTLs out.
   533  	//
   534  	// ms: 0   1   2   3   4   5   6
   535  	//     ^           ^       ^   ^
   536  	//     |           |       |   +-- Update (triggers pruning)
   537  	//     |           |       +------ first batch expires
   538  	//     |           +-------------- second batch added
   539  	//     +-------------------------- first batch added
   540  	//
   541  	// The exact intervals are not important except that the delta should be
   542  	// large relative to the cost of CheckTx (ms vs. ns is fine here).
   543  	time.Sleep(3 * time.Millisecond)
   544  	added2 := checkTxs(t, txmp, 10, 1)
   545  
   546  	// Wait a while longer, so that the first batch will expire.
   547  	time.Sleep(3 * time.Millisecond)
   548  
   549  	// Trigger an update so that pruning will occur.
   550  	txmp.Lock()
   551  	defer txmp.Unlock()
   552  	require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
   553  
   554  	// All the transactions in the original set should have been purged.
   555  	for _, tx := range added1 {
   556  		if _, ok := txmp.txByKey[tx.tx.Key()]; ok {
   557  			t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
   558  		}
   559  		if txmp.cache.Has(tx.tx) {
   560  			t.Errorf("Transaction %X should have been removed from the cache", tx.tx.Key())
   561  		}
   562  	}
   563  
   564  	// All the transactions added later should still be around.
   565  	for _, tx := range added2 {
   566  		if _, ok := txmp.txByKey[tx.tx.Key()]; !ok {
   567  			t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
   568  		}
   569  	}
   570  }
   571  
   572  func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
   573  	txmp := setup(t, 500)
   574  	txmp.height = 100
   575  	txmp.config.TTLNumBlocks = 10
   576  
   577  	tTxs := checkTxs(t, txmp, 100, 0)
   578  	require.Equal(t, len(tTxs), txmp.Size())
   579  
   580  	// reap 5 txs at the next height -- no txs should expire
   581  	reapedTxs := txmp.ReapMaxTxs(5)
   582  	responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   583  	for i := 0; i < len(responses); i++ {
   584  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   585  	}
   586  
   587  	txmp.Lock()
   588  	require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
   589  	txmp.Unlock()
   590  
   591  	require.Equal(t, 95, txmp.Size())
   592  
   593  	// check more txs at height 101
   594  	_ = checkTxs(t, txmp, 50, 1)
   595  	require.Equal(t, 145, txmp.Size())
   596  
   597  	// Reap 5 txs at a height that would expire all the transactions from before
   598  	// the previous Update (height 100).
   599  	//
   600  	// NOTE: When we reap txs below, we do not know if we're picking txs from the
   601  	// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
   602  	// cannot guarantee that all 95 txs are remaining that should be expired and
   603  	// removed. However, we do know that that at most 95 txs can be expired and
   604  	// removed.
   605  	reapedTxs = txmp.ReapMaxTxs(5)
   606  	responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
   607  	for i := 0; i < len(responses); i++ {
   608  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   609  	}
   610  
   611  	txmp.Lock()
   612  	require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
   613  	txmp.Unlock()
   614  
   615  	require.GreaterOrEqual(t, txmp.Size(), 45)
   616  }
   617  
   618  func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
   619  	cases := []struct {
   620  		name string
   621  		err  error
   622  	}{
   623  		{
   624  			name: "error",
   625  			err:  errors.New("test error"),
   626  		},
   627  		{
   628  			name: "no error",
   629  			err:  nil,
   630  		},
   631  	}
   632  	for _, tc := range cases {
   633  		testCase := tc
   634  		t.Run(testCase.name, func(t *testing.T) {
   635  			postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
   636  				return testCase.err
   637  			}
   638  			txmp := setup(t, 0, WithPostCheck(postCheckFn))
   639  			rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   640  			tx := make([]byte, txmp.config.MaxTxBytes-1)
   641  			_, err := rng.Read(tx)
   642  			require.NoError(t, err)
   643  
   644  			callback := func(res *abci.Response) {
   645  				checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
   646  				require.True(t, ok)
   647  				expectedErrString := ""
   648  				if testCase.err != nil {
   649  					expectedErrString = testCase.err.Error()
   650  				}
   651  				require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
   652  			}
   653  			require.NoError(t, txmp.CheckTx(tx, callback, mempool.TxInfo{SenderID: 0}))
   654  		})
   655  	}
   656  }