github.com/vipernet-xyz/tm@v0.34.24/mempool/v1/mempool_test.go (about)

     1  package v1
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"fmt"
     7  	"math/rand"
     8  	"os"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  	"sync"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/stretchr/testify/require"
    17  
    18  	"github.com/vipernet-xyz/tm/abci/example/code"
    19  	"github.com/vipernet-xyz/tm/abci/example/kvstore"
    20  	abci "github.com/vipernet-xyz/tm/abci/types"
    21  	"github.com/vipernet-xyz/tm/config"
    22  	"github.com/vipernet-xyz/tm/libs/log"
    23  	"github.com/vipernet-xyz/tm/mempool"
    24  	"github.com/vipernet-xyz/tm/proxy"
    25  	"github.com/vipernet-xyz/tm/types"
    26  )
    27  
    28  // application extends the KV store application by overriding CheckTx to provide
    29  // transaction priority based on the value in the key/value pair.
    30  type application struct {
    31  	*kvstore.Application
    32  }
    33  
    34  type testTx struct {
    35  	tx       types.Tx
    36  	priority int64
    37  }
    38  
    39  func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
    40  	var (
    41  		priority int64
    42  		sender   string
    43  	)
    44  
    45  	// infer the priority from the raw transaction value (sender=key=value)
    46  	parts := bytes.Split(req.Tx, []byte("="))
    47  	if len(parts) == 3 {
    48  		v, err := strconv.ParseInt(string(parts[2]), 10, 64)
    49  		if err != nil {
    50  			return abci.ResponseCheckTx{
    51  				Priority:  priority,
    52  				Code:      100,
    53  				GasWanted: 1,
    54  			}
    55  		}
    56  
    57  		priority = v
    58  		sender = string(parts[0])
    59  	} else {
    60  		return abci.ResponseCheckTx{
    61  			Priority:  priority,
    62  			Code:      101,
    63  			GasWanted: 1,
    64  		}
    65  	}
    66  
    67  	return abci.ResponseCheckTx{
    68  		Priority:  priority,
    69  		Sender:    sender,
    70  		Code:      code.CodeTypeOK,
    71  		GasWanted: 1,
    72  	}
    73  }
    74  
    75  func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
    76  	t.Helper()
    77  
    78  	app := &application{kvstore.NewApplication()}
    79  	cc := proxy.NewLocalClientCreator(app)
    80  
    81  	cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
    82  	cfg.Mempool.CacheSize = cacheSize
    83  
    84  	appConnMem, err := cc.NewABCIClient()
    85  	require.NoError(t, err)
    86  	require.NoError(t, appConnMem.Start())
    87  
    88  	t.Cleanup(func() {
    89  		os.RemoveAll(cfg.RootDir)
    90  		require.NoError(t, appConnMem.Stop())
    91  	})
    92  
    93  	return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
    94  }
    95  
    96  // mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
    97  // its callback has finished executing. It fails t if CheckTx fails.
    98  func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) {
    99  	done := make(chan struct{})
   100  	if err := txmp.CheckTx([]byte(spec), func(*abci.Response) {
   101  		close(done)
   102  	}, mempool.TxInfo{}); err != nil {
   103  		t.Fatalf("CheckTx for %q failed: %v", spec, err)
   104  	}
   105  	<-done
   106  }
   107  
   108  func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
   109  	txs := make([]testTx, numTxs)
   110  	txInfo := mempool.TxInfo{SenderID: peerID}
   111  
   112  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   113  
   114  	for i := 0; i < numTxs; i++ {
   115  		prefix := make([]byte, 20)
   116  		_, err := rng.Read(prefix)
   117  		require.NoError(t, err)
   118  
   119  		priority := int64(rng.Intn(9999-1000) + 1000)
   120  
   121  		txs[i] = testTx{
   122  			tx:       []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
   123  			priority: priority,
   124  		}
   125  		require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo))
   126  	}
   127  
   128  	return txs
   129  }
   130  
   131  func TestTxMempool_TxsAvailable(t *testing.T) {
   132  	txmp := setup(t, 0)
   133  	txmp.EnableTxsAvailable()
   134  
   135  	ensureNoTxFire := func() {
   136  		timer := time.NewTimer(500 * time.Millisecond)
   137  		select {
   138  		case <-txmp.TxsAvailable():
   139  			require.Fail(t, "unexpected transactions event")
   140  		case <-timer.C:
   141  		}
   142  	}
   143  
   144  	ensureTxFire := func() {
   145  		timer := time.NewTimer(500 * time.Millisecond)
   146  		select {
   147  		case <-txmp.TxsAvailable():
   148  		case <-timer.C:
   149  			require.Fail(t, "expected transactions event")
   150  		}
   151  	}
   152  
   153  	// ensure no event as we have not executed any transactions yet
   154  	ensureNoTxFire()
   155  
   156  	// Execute CheckTx for some transactions and ensure TxsAvailable only fires
   157  	// once.
   158  	txs := checkTxs(t, txmp, 100, 0)
   159  	ensureTxFire()
   160  	ensureNoTxFire()
   161  
   162  	rawTxs := make([]types.Tx, len(txs))
   163  	for i, tx := range txs {
   164  		rawTxs[i] = tx.tx
   165  	}
   166  
   167  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   168  	for i := 0; i < len(responses); i++ {
   169  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   170  	}
   171  
   172  	// commit half the transactions and ensure we fire an event
   173  	txmp.Lock()
   174  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   175  	txmp.Unlock()
   176  	ensureTxFire()
   177  	ensureNoTxFire()
   178  
   179  	// Execute CheckTx for more transactions and ensure we do not fire another
   180  	// event as we're still on the same height (1).
   181  	_ = checkTxs(t, txmp, 100, 0)
   182  	ensureNoTxFire()
   183  }
   184  
   185  func TestTxMempool_Size(t *testing.T) {
   186  	txmp := setup(t, 0)
   187  	txs := checkTxs(t, txmp, 100, 0)
   188  	require.Equal(t, len(txs), txmp.Size())
   189  	require.Equal(t, int64(5690), txmp.SizeBytes())
   190  
   191  	rawTxs := make([]types.Tx, len(txs))
   192  	for i, tx := range txs {
   193  		rawTxs[i] = tx.tx
   194  	}
   195  
   196  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   197  	for i := 0; i < len(responses); i++ {
   198  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   199  	}
   200  
   201  	txmp.Lock()
   202  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   203  	txmp.Unlock()
   204  
   205  	require.Equal(t, len(rawTxs)/2, txmp.Size())
   206  	require.Equal(t, int64(2850), txmp.SizeBytes())
   207  }
   208  
   209  func TestTxMempool_Eviction(t *testing.T) {
   210  	txmp := setup(t, 1000)
   211  	txmp.config.Size = 5
   212  	txmp.config.MaxTxsBytes = 60
   213  	txExists := func(spec string) bool {
   214  		txmp.Lock()
   215  		defer txmp.Unlock()
   216  		key := types.Tx(spec).Key()
   217  		_, ok := txmp.txByKey[key]
   218  		return ok
   219  	}
   220  
   221  	// A transaction bigger than the mempool should be rejected even when there
   222  	// are slots available.
   223  	mustCheckTx(t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
   224  	require.Equal(t, 0, txmp.Size())
   225  
   226  	// Nearly-fill the mempool with a low-priority transaction, to show that it
   227  	// is evicted even when slots are available for a higher-priority tx.
   228  	const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
   229  	mustCheckTx(t, txmp, bigTx)
   230  	require.Equal(t, 1, txmp.Size()) // bigTx is the only element
   231  	require.True(t, txExists(bigTx))
   232  	require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
   233  
   234  	// The next transaction should evict bigTx, because it is higher priority
   235  	// but does not fit on size.
   236  	mustCheckTx(t, txmp, "key1=0000=25")
   237  	require.True(t, txExists("key1=0000=25"))
   238  	require.False(t, txExists(bigTx))
   239  	require.False(t, txmp.cache.Has([]byte(bigTx)))
   240  	require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
   241  
   242  	// Now fill up the rest of the slots with other transactions.
   243  	mustCheckTx(t, txmp, "key2=0001=5")
   244  	mustCheckTx(t, txmp, "key3=0002=10")
   245  	mustCheckTx(t, txmp, "key4=0003=3")
   246  	mustCheckTx(t, txmp, "key5=0004=3")
   247  
   248  	// A new transaction with low priority should be discarded.
   249  	mustCheckTx(t, txmp, "key6=0005=1")
   250  	require.False(t, txExists("key6=0005=1"))
   251  
   252  	// A new transaction with higher priority should evict key5, which is the
   253  	// newest of the two transactions with lowest priority.
   254  	mustCheckTx(t, txmp, "key7=0006=7")
   255  	require.True(t, txExists("key7=0006=7"))  // new transaction added
   256  	require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
   257  	require.True(t, txExists("key4=0003=3"))  // older low-priority tx retained
   258  
   259  	// Another new transaction evicts the other low-priority element.
   260  	mustCheckTx(t, txmp, "key8=0007=20")
   261  	require.True(t, txExists("key8=0007=20"))
   262  	require.False(t, txExists("key4=0003=3"))
   263  
   264  	// Now the lowest-priority tx is 5, so that should be the next to go.
   265  	mustCheckTx(t, txmp, "key9=0008=9")
   266  	require.True(t, txExists("key9=0008=9"))
   267  	require.False(t, txExists("k3y2=0001=5"))
   268  
   269  	// Add a transaction that requires eviction of multiple lower-priority
   270  	// entries, in order to fit the size of the element.
   271  	mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
   272  	require.True(t, txExists("key1=0000=25"))
   273  	require.True(t, txExists("key8=0007=20"))
   274  	require.True(t, txExists("key10=0123456789abcdef=11"))
   275  	require.False(t, txExists("key3=0002=10"))
   276  	require.False(t, txExists("key9=0008=9"))
   277  	require.False(t, txExists("key7=0006=7"))
   278  }
   279  
   280  func TestTxMempool_Flush(t *testing.T) {
   281  	txmp := setup(t, 0)
   282  	txs := checkTxs(t, txmp, 100, 0)
   283  	require.Equal(t, len(txs), txmp.Size())
   284  	require.Equal(t, int64(5690), txmp.SizeBytes())
   285  
   286  	rawTxs := make([]types.Tx, len(txs))
   287  	for i, tx := range txs {
   288  		rawTxs[i] = tx.tx
   289  	}
   290  
   291  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   292  	for i := 0; i < len(responses); i++ {
   293  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   294  	}
   295  
   296  	txmp.Lock()
   297  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   298  	txmp.Unlock()
   299  
   300  	txmp.Flush()
   301  	require.Zero(t, txmp.Size())
   302  	require.Equal(t, int64(0), txmp.SizeBytes())
   303  }
   304  
   305  func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
   306  	txmp := setup(t, 0)
   307  	tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
   308  	require.Equal(t, len(tTxs), txmp.Size())
   309  	require.Equal(t, int64(5690), txmp.SizeBytes())
   310  
   311  	txMap := make(map[types.TxKey]testTx)
   312  	priorities := make([]int64, len(tTxs))
   313  	for i, tTx := range tTxs {
   314  		txMap[tTx.tx.Key()] = tTx
   315  		priorities[i] = tTx.priority
   316  	}
   317  
   318  	sort.Slice(priorities, func(i, j int) bool {
   319  		// sort by priority, i.e. decreasing order
   320  		return priorities[i] > priorities[j]
   321  	})
   322  
   323  	ensurePrioritized := func(reapedTxs types.Txs) {
   324  		reapedPriorities := make([]int64, len(reapedTxs))
   325  		for i, rTx := range reapedTxs {
   326  			reapedPriorities[i] = txMap[rTx.Key()].priority
   327  		}
   328  
   329  		require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
   330  	}
   331  
   332  	// reap by gas capacity only
   333  	reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
   334  	ensurePrioritized(reapedTxs)
   335  	require.Equal(t, len(tTxs), txmp.Size())
   336  	require.Equal(t, int64(5690), txmp.SizeBytes())
   337  	require.Len(t, reapedTxs, 50)
   338  
   339  	// reap by transaction bytes only
   340  	reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
   341  	ensurePrioritized(reapedTxs)
   342  	require.Equal(t, len(tTxs), txmp.Size())
   343  	require.Equal(t, int64(5690), txmp.SizeBytes())
   344  	require.GreaterOrEqual(t, len(reapedTxs), 16)
   345  
   346  	// Reap by both transaction bytes and gas, where the size yields 31 reaped
   347  	// transactions and the gas limit reaps 25 transactions.
   348  	reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
   349  	ensurePrioritized(reapedTxs)
   350  	require.Equal(t, len(tTxs), txmp.Size())
   351  	require.Equal(t, int64(5690), txmp.SizeBytes())
   352  	require.Len(t, reapedTxs, 25)
   353  }
   354  
   355  func TestTxMempool_ReapMaxTxs(t *testing.T) {
   356  	txmp := setup(t, 0)
   357  	tTxs := checkTxs(t, txmp, 100, 0)
   358  	require.Equal(t, len(tTxs), txmp.Size())
   359  	require.Equal(t, int64(5690), txmp.SizeBytes())
   360  
   361  	txMap := make(map[types.TxKey]testTx)
   362  	priorities := make([]int64, len(tTxs))
   363  	for i, tTx := range tTxs {
   364  		txMap[tTx.tx.Key()] = tTx
   365  		priorities[i] = tTx.priority
   366  	}
   367  
   368  	sort.Slice(priorities, func(i, j int) bool {
   369  		// sort by priority, i.e. decreasing order
   370  		return priorities[i] > priorities[j]
   371  	})
   372  
   373  	ensurePrioritized := func(reapedTxs types.Txs) {
   374  		reapedPriorities := make([]int64, len(reapedTxs))
   375  		for i, rTx := range reapedTxs {
   376  			reapedPriorities[i] = txMap[rTx.Key()].priority
   377  		}
   378  
   379  		require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
   380  	}
   381  
   382  	// reap all transactions
   383  	reapedTxs := txmp.ReapMaxTxs(-1)
   384  	ensurePrioritized(reapedTxs)
   385  	require.Equal(t, len(tTxs), txmp.Size())
   386  	require.Equal(t, int64(5690), txmp.SizeBytes())
   387  	require.Len(t, reapedTxs, len(tTxs))
   388  
   389  	// reap a single transaction
   390  	reapedTxs = txmp.ReapMaxTxs(1)
   391  	ensurePrioritized(reapedTxs)
   392  	require.Equal(t, len(tTxs), txmp.Size())
   393  	require.Equal(t, int64(5690), txmp.SizeBytes())
   394  	require.Len(t, reapedTxs, 1)
   395  
   396  	// reap half of the transactions
   397  	reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
   398  	ensurePrioritized(reapedTxs)
   399  	require.Equal(t, len(tTxs), txmp.Size())
   400  	require.Equal(t, int64(5690), txmp.SizeBytes())
   401  	require.Len(t, reapedTxs, len(tTxs)/2)
   402  }
   403  
   404  func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
   405  	txmp := setup(t, 0)
   406  
   407  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   408  	tx := make([]byte, txmp.config.MaxTxBytes+1)
   409  	_, err := rng.Read(tx)
   410  	require.NoError(t, err)
   411  
   412  	require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
   413  
   414  	tx = make([]byte, txmp.config.MaxTxBytes-1)
   415  	_, err = rng.Read(tx)
   416  	require.NoError(t, err)
   417  
   418  	require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
   419  }
   420  
   421  func TestTxMempool_CheckTxSamePeer(t *testing.T) {
   422  	txmp := setup(t, 100)
   423  	peerID := uint16(1)
   424  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   425  
   426  	prefix := make([]byte, 20)
   427  	_, err := rng.Read(prefix)
   428  	require.NoError(t, err)
   429  
   430  	tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
   431  
   432  	require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   433  	require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   434  }
   435  
   436  func TestTxMempool_CheckTxSameSender(t *testing.T) {
   437  	txmp := setup(t, 100)
   438  	peerID := uint16(1)
   439  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   440  
   441  	prefix1 := make([]byte, 20)
   442  	_, err := rng.Read(prefix1)
   443  	require.NoError(t, err)
   444  
   445  	prefix2 := make([]byte, 20)
   446  	_, err = rng.Read(prefix2)
   447  	require.NoError(t, err)
   448  
   449  	tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
   450  	tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
   451  
   452  	require.NoError(t, txmp.CheckTx(tx1, nil, mempool.TxInfo{SenderID: peerID}))
   453  	require.Equal(t, 1, txmp.Size())
   454  	require.NoError(t, txmp.CheckTx(tx2, nil, mempool.TxInfo{SenderID: peerID}))
   455  	require.Equal(t, 1, txmp.Size())
   456  }
   457  
   458  func TestTxMempool_ConcurrentTxs(t *testing.T) {
   459  	txmp := setup(t, 100)
   460  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   461  	checkTxDone := make(chan struct{})
   462  
   463  	var wg sync.WaitGroup
   464  
   465  	wg.Add(1)
   466  	go func() {
   467  		for i := 0; i < 20; i++ {
   468  			_ = checkTxs(t, txmp, 100, 0)
   469  			dur := rng.Intn(1000-500) + 500
   470  			time.Sleep(time.Duration(dur) * time.Millisecond)
   471  		}
   472  
   473  		wg.Done()
   474  		close(checkTxDone)
   475  	}()
   476  
   477  	wg.Add(1)
   478  	go func() {
   479  		ticker := time.NewTicker(time.Second)
   480  		defer ticker.Stop()
   481  		defer wg.Done()
   482  
   483  		var height int64 = 1
   484  
   485  		for range ticker.C {
   486  			reapedTxs := txmp.ReapMaxTxs(200)
   487  			if len(reapedTxs) > 0 {
   488  				responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   489  				for i := 0; i < len(responses); i++ {
   490  					var code uint32
   491  
   492  					if i%10 == 0 {
   493  						code = 100
   494  					} else {
   495  						code = abci.CodeTypeOK
   496  					}
   497  
   498  					responses[i] = &abci.ResponseDeliverTx{Code: code}
   499  				}
   500  
   501  				txmp.Lock()
   502  				require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
   503  				txmp.Unlock()
   504  
   505  				height++
   506  			} else {
   507  				// only return once we know we finished the CheckTx loop
   508  				select {
   509  				case <-checkTxDone:
   510  					return
   511  				default:
   512  				}
   513  			}
   514  		}
   515  	}()
   516  
   517  	wg.Wait()
   518  	require.Zero(t, txmp.Size())
   519  	require.Zero(t, txmp.SizeBytes())
   520  }
   521  
   522  func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) {
   523  	txmp := setup(t, 5000)
   524  	txmp.config.TTLDuration = 5 * time.Millisecond
   525  
   526  	added1 := checkTxs(t, txmp, 10, 0)
   527  	require.Equal(t, len(added1), txmp.Size())
   528  
   529  	// Wait a while, then add some more transactions that should not be expired
   530  	// when the first batch TTLs out.
   531  	//
   532  	// ms: 0   1   2   3   4   5   6
   533  	//     ^           ^       ^   ^
   534  	//     |           |       |   +-- Update (triggers pruning)
   535  	//     |           |       +------ first batch expires
   536  	//     |           +-------------- second batch added
   537  	//     +-------------------------- first batch added
   538  	//
   539  	// The exact intervals are not important except that the delta should be
   540  	// large relative to the cost of CheckTx (ms vs. ns is fine here).
   541  	time.Sleep(3 * time.Millisecond)
   542  	added2 := checkTxs(t, txmp, 10, 1)
   543  
   544  	// Wait a while longer, so that the first batch will expire.
   545  	time.Sleep(3 * time.Millisecond)
   546  
   547  	// Trigger an update so that pruning will occur.
   548  	txmp.Lock()
   549  	defer txmp.Unlock()
   550  	require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
   551  
   552  	// All the transactions in the original set should have been purged.
   553  	for _, tx := range added1 {
   554  		if _, ok := txmp.txByKey[tx.tx.Key()]; ok {
   555  			t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
   556  		}
   557  		if txmp.cache.Has(tx.tx) {
   558  			t.Errorf("Transaction %X should have been removed from the cache", tx.tx.Key())
   559  		}
   560  	}
   561  
   562  	// All the transactions added later should still be around.
   563  	for _, tx := range added2 {
   564  		if _, ok := txmp.txByKey[tx.tx.Key()]; !ok {
   565  			t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
   566  		}
   567  	}
   568  }
   569  
   570  func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
   571  	txmp := setup(t, 500)
   572  	txmp.height = 100
   573  	txmp.config.TTLNumBlocks = 10
   574  
   575  	tTxs := checkTxs(t, txmp, 100, 0)
   576  	require.Equal(t, len(tTxs), txmp.Size())
   577  
   578  	// reap 5 txs at the next height -- no txs should expire
   579  	reapedTxs := txmp.ReapMaxTxs(5)
   580  	responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   581  	for i := 0; i < len(responses); i++ {
   582  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   583  	}
   584  
   585  	txmp.Lock()
   586  	require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
   587  	txmp.Unlock()
   588  
   589  	require.Equal(t, 95, txmp.Size())
   590  
   591  	// check more txs at height 101
   592  	_ = checkTxs(t, txmp, 50, 1)
   593  	require.Equal(t, 145, txmp.Size())
   594  
   595  	// Reap 5 txs at a height that would expire all the transactions from before
   596  	// the previous Update (height 100).
   597  	//
   598  	// NOTE: When we reap txs below, we do not know if we're picking txs from the
   599  	// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
   600  	// cannot guarantee that all 95 txs are remaining that should be expired and
   601  	// removed. However, we do know that that at most 95 txs can be expired and
   602  	// removed.
   603  	reapedTxs = txmp.ReapMaxTxs(5)
   604  	responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
   605  	for i := 0; i < len(responses); i++ {
   606  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   607  	}
   608  
   609  	txmp.Lock()
   610  	require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
   611  	txmp.Unlock()
   612  
   613  	require.GreaterOrEqual(t, txmp.Size(), 45)
   614  }
   615  
   616  func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
   617  	cases := []struct {
   618  		name string
   619  		err  error
   620  	}{
   621  		{
   622  			name: "error",
   623  			err:  errors.New("test error"),
   624  		},
   625  		{
   626  			name: "no error",
   627  			err:  nil,
   628  		},
   629  	}
   630  	for _, tc := range cases {
   631  		testCase := tc
   632  		t.Run(testCase.name, func(t *testing.T) {
   633  			postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
   634  				return testCase.err
   635  			}
   636  			txmp := setup(t, 0, WithPostCheck(postCheckFn))
   637  			rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   638  			tx := make([]byte, txmp.config.MaxTxBytes-1)
   639  			_, err := rng.Read(tx)
   640  			require.NoError(t, err)
   641  
   642  			callback := func(res *abci.Response) {
   643  				checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
   644  				require.True(t, ok)
   645  				expectedErrString := ""
   646  				if testCase.err != nil {
   647  					expectedErrString = testCase.err.Error()
   648  				}
   649  				require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
   650  			}
   651  			require.NoError(t, txmp.CheckTx(tx, callback, mempool.TxInfo{SenderID: 0}))
   652  		})
   653  	}
   654  }