github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/cat/pool_test.go (about)

     1  package cat
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"math/rand"
     9  	"os"
    10  	"sort"
    11  	"strconv"
    12  	"sync"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/stretchr/testify/assert"
    17  	"github.com/stretchr/testify/require"
    18  
    19  	"github.com/badrootd/celestia-core/abci/example/code"
    20  	"github.com/badrootd/celestia-core/abci/example/kvstore"
    21  	abci "github.com/badrootd/celestia-core/abci/types"
    22  	"github.com/badrootd/celestia-core/config"
    23  	"github.com/badrootd/celestia-core/libs/log"
    24  	"github.com/badrootd/celestia-core/mempool"
    25  	"github.com/badrootd/celestia-core/pkg/consts"
    26  	tmproto "github.com/badrootd/celestia-core/proto/tendermint/types"
    27  	"github.com/badrootd/celestia-core/proxy"
    28  	"github.com/badrootd/celestia-core/types"
    29  )
    30  
    31  // application extends the KV store application by overriding CheckTx to provide
    32  // transaction priority based on the value in the key/value pair.
    33  type application struct {
    34  	*kvstore.Application
    35  }
    36  
    37  type testTx struct {
    38  	tx       types.Tx
    39  	priority int64
    40  }
    41  
    42  func newTx(i int, peerID uint16, msg []byte, priority int64) []byte {
    43  	return []byte(fmt.Sprintf("sender-%03d-%d=%X=%d", i, peerID, msg, priority))
    44  }
    45  
    46  func newDefaultTx(msg string) types.Tx {
    47  	return types.Tx(newTx(0, 0, []byte(msg), 1))
    48  }
    49  
    50  func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
    51  	var (
    52  		priority int64
    53  		sender   string
    54  	)
    55  
    56  	// infer the priority from the raw transaction value (sender=key=value)
    57  	parts := bytes.Split(req.Tx, []byte("="))
    58  	if len(parts) == 3 {
    59  		v, err := strconv.ParseInt(string(parts[2]), 10, 64)
    60  		if err != nil {
    61  			return abci.ResponseCheckTx{
    62  				Priority:  priority,
    63  				Code:      100,
    64  				GasWanted: 1,
    65  			}
    66  		}
    67  
    68  		priority = v
    69  		sender = string(parts[0])
    70  	} else {
    71  		return abci.ResponseCheckTx{
    72  			Priority:  priority,
    73  			Code:      101,
    74  			GasWanted: 1,
    75  		}
    76  	}
    77  
    78  	return abci.ResponseCheckTx{
    79  		Priority:  priority,
    80  		Sender:    sender,
    81  		Code:      code.CodeTypeOK,
    82  		GasWanted: 1,
    83  	}
    84  }
    85  
    86  func setup(t testing.TB, cacheSize int, options ...TxPoolOption) *TxPool {
    87  	t.Helper()
    88  
    89  	app := &application{kvstore.NewApplication()}
    90  	cc := proxy.NewLocalClientCreator(app)
    91  
    92  	cfg := config.TestMempoolConfig()
    93  	cfg.CacheSize = cacheSize
    94  
    95  	appConnMem, err := cc.NewABCIClient()
    96  	require.NoError(t, err)
    97  	require.NoError(t, appConnMem.Start())
    98  
    99  	t.Cleanup(func() {
   100  		os.RemoveAll(cfg.RootDir)
   101  		require.NoError(t, appConnMem.Stop())
   102  	})
   103  
   104  	return NewTxPool(log.TestingLogger().With("test", t.Name()), cfg, appConnMem, 1, options...)
   105  }
   106  
   107  // mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
   108  // its callback has finished executing. It fails t if CheckTx fails.
   109  func mustCheckTx(t *testing.T, txmp *TxPool, spec string) {
   110  	require.NoError(t, txmp.CheckTx([]byte(spec), nil, mempool.TxInfo{}))
   111  }
   112  
   113  func checkTxs(t *testing.T, txmp *TxPool, numTxs int, peerID uint16) []testTx {
   114  	txs := make([]testTx, numTxs)
   115  	txInfo := mempool.TxInfo{SenderID: peerID}
   116  
   117  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   118  
   119  	current := txmp.Size()
   120  	for i := 0; i < numTxs; i++ {
   121  		prefix := make([]byte, 20)
   122  		_, err := rng.Read(prefix)
   123  		require.NoError(t, err)
   124  
   125  		priority := int64(rng.Intn(9999-1000) + 1000)
   126  
   127  		txs[i] = testTx{
   128  			tx:       newTx(i, peerID, prefix, priority),
   129  			priority: priority,
   130  		}
   131  		require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo))
   132  		// assert that none of them get silently evicted
   133  		require.Equal(t, current+i+1, txmp.Size())
   134  	}
   135  
   136  	return txs
   137  }
   138  
   139  func TestTxPool_TxsAvailable(t *testing.T) {
   140  	txmp := setup(t, 0)
   141  	txmp.EnableTxsAvailable()
   142  
   143  	ensureNoTxFire := func() {
   144  		timer := time.NewTimer(500 * time.Millisecond)
   145  		select {
   146  		case <-txmp.TxsAvailable():
   147  			require.Fail(t, "unexpected transactions event")
   148  		case <-timer.C:
   149  		}
   150  	}
   151  
   152  	ensureTxFire := func() {
   153  		timer := time.NewTimer(500 * time.Millisecond)
   154  		select {
   155  		case <-txmp.TxsAvailable():
   156  		case <-timer.C:
   157  			require.Fail(t, "expected transactions event")
   158  		}
   159  	}
   160  
   161  	// ensure no event as we have not executed any transactions yet
   162  	ensureNoTxFire()
   163  
   164  	// Execute CheckTx for some transactions and ensure TxsAvailable only fires
   165  	// once.
   166  	txs := checkTxs(t, txmp, 100, 0)
   167  	ensureTxFire()
   168  	ensureNoTxFire()
   169  
   170  	rawTxs := make([]types.Tx, len(txs))
   171  	for i, tx := range txs {
   172  		rawTxs[i] = tx.tx
   173  	}
   174  
   175  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   176  	for i := 0; i < len(responses); i++ {
   177  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   178  	}
   179  
   180  	require.Equal(t, 100, txmp.Size())
   181  
   182  	// commit half the transactions and ensure we fire an event
   183  	txmp.Lock()
   184  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   185  	txmp.Unlock()
   186  	ensureTxFire()
   187  	ensureNoTxFire()
   188  
   189  	// Execute CheckTx for more transactions and ensure we do not fire another
   190  	// event as we're still on the same height (1).
   191  	_ = checkTxs(t, txmp, 100, 0)
   192  	ensureNoTxFire()
   193  }
   194  
   195  func TestTxPool_Size(t *testing.T) {
   196  	txmp := setup(t, 0)
   197  	txs := checkTxs(t, txmp, 100, 0)
   198  	require.Equal(t, len(txs), txmp.Size())
   199  	require.Equal(t, int64(5800), txmp.SizeBytes())
   200  
   201  	rawTxs := make([]types.Tx, len(txs))
   202  	for i, tx := range txs {
   203  		rawTxs[i] = tx.tx
   204  	}
   205  
   206  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   207  	for i := 0; i < len(responses); i++ {
   208  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   209  	}
   210  
   211  	txmp.Lock()
   212  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   213  	txmp.Unlock()
   214  
   215  	require.Equal(t, len(rawTxs)/2, txmp.Size())
   216  	require.Equal(t, int64(2900), txmp.SizeBytes())
   217  }
   218  
   219  func TestTxPool_Eviction(t *testing.T) {
   220  	txmp := setup(t, 1000)
   221  	txmp.config.Size = 5
   222  	txmp.config.MaxTxsBytes = 60
   223  	txExists := func(spec string) bool {
   224  		return txmp.Has(types.Tx(spec).Key())
   225  	}
   226  
   227  	// A transaction bigger than the mempool should be rejected even when there
   228  	// are slots available.
   229  	err := txmp.CheckTx(types.Tx("big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1"), nil, mempool.TxInfo{})
   230  	require.Error(t, err)
   231  	require.Contains(t, err.Error(), "mempool is full")
   232  	require.Equal(t, 0, txmp.Size())
   233  
   234  	// Nearly-fill the mempool with a low-priority transaction, to show that it
   235  	// is evicted even when slots are available for a higher-priority tx.
   236  	const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
   237  	mustCheckTx(t, txmp, bigTx)
   238  	require.Equal(t, 1, txmp.Size()) // bigTx is the only element
   239  	require.True(t, txExists(bigTx))
   240  	require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
   241  
   242  	// The next transaction should evict bigTx, because it is higher priority
   243  	// but does not fit on size.
   244  	mustCheckTx(t, txmp, "key1=0000=25")
   245  	require.True(t, txExists("key1=0000=25"))
   246  	require.False(t, txExists(bigTx))
   247  	require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
   248  
   249  	// Now fill up the rest of the slots with other transactions.
   250  	mustCheckTx(t, txmp, "key2=0001=5")
   251  	mustCheckTx(t, txmp, "key3=0002=10")
   252  	mustCheckTx(t, txmp, "key4=0003=3")
   253  	mustCheckTx(t, txmp, "key5=0004=3")
   254  
   255  	// A new transaction with low priority should be discarded.
   256  	err = txmp.CheckTx(types.Tx("key6=0005=1"), nil, mempool.TxInfo{})
   257  	require.Error(t, err)
   258  	require.Contains(t, err.Error(), "mempool is full")
   259  	require.False(t, txExists("key6=0005=1"))
   260  
   261  	// A new transaction with higher priority should evict key5, which is the
   262  	// newest of the two transactions with lowest priority.
   263  	mustCheckTx(t, txmp, "key7=0006=7")
   264  	require.True(t, txExists("key7=0006=7"))  // new transaction added
   265  	require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
   266  	require.True(t, txExists("key4=0003=3"))  // older low-priority tx retained
   267  
   268  	// Another new transaction evicts the other low-priority element.
   269  	mustCheckTx(t, txmp, "key8=0007=20")
   270  	require.True(t, txExists("key8=0007=20"))
   271  	require.False(t, txExists("key4=0003=3"))
   272  
   273  	// Now the lowest-priority tx is 5, so that should be the next to go.
   274  	mustCheckTx(t, txmp, "key9=0008=9")
   275  	require.True(t, txExists("key9=0008=9"))
   276  	require.False(t, txExists("key2=0001=5"))
   277  
   278  	// Add a transaction that requires eviction of multiple lower-priority
   279  	// entries, in order to fit the size of the element.
   280  	mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
   281  	require.True(t, txExists("key1=0000=25"))
   282  	require.True(t, txExists("key8=0007=20"))
   283  	require.True(t, txExists("key10=0123456789abcdef=11"))
   284  	require.False(t, txExists("key3=0002=10"))
   285  	require.False(t, txExists("key9=0008=9"))
   286  	require.False(t, txExists("key7=0006=7"))
   287  
   288  	// Free up some space so we can add back previously evicted txs
   289  	err = txmp.Update(1, types.Txs{types.Tx("key10=0123456789abcdef=11")}, []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}}, nil, nil)
   290  	require.NoError(t, err)
   291  	require.False(t, txExists("key10=0123456789abcdef=11"))
   292  	mustCheckTx(t, txmp, "key3=0002=10")
   293  	require.True(t, txExists("key3=0002=10"))
   294  
   295  	// remove a high priority tx and check if there is
   296  	// space for the previously evicted tx
   297  	require.NoError(t, txmp.RemoveTxByKey(types.Tx("key8=0007=20").Key()))
   298  	require.False(t, txExists("key8=0007=20"))
   299  }
   300  
   301  func TestTxPool_Flush(t *testing.T) {
   302  	txmp := setup(t, 0)
   303  	txs := checkTxs(t, txmp, 100, 0)
   304  	require.Equal(t, len(txs), txmp.Size())
   305  	require.Equal(t, int64(5800), txmp.SizeBytes())
   306  
   307  	rawTxs := make([]types.Tx, len(txs))
   308  	for i, tx := range txs {
   309  		rawTxs[i] = tx.tx
   310  	}
   311  
   312  	responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
   313  	for i := 0; i < len(responses); i++ {
   314  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   315  	}
   316  
   317  	txmp.Lock()
   318  	require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
   319  	txmp.Unlock()
   320  
   321  	txmp.Flush()
   322  	require.Zero(t, txmp.Size())
   323  	require.Equal(t, int64(0), txmp.SizeBytes())
   324  }
   325  
   326  func TestTxPool_ReapMaxBytesMaxGas(t *testing.T) {
   327  	txmp := setup(t, 0)
   328  	tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
   329  	require.Equal(t, len(tTxs), txmp.Size())
   330  	require.Equal(t, int64(5800), txmp.SizeBytes())
   331  
   332  	txMap := make(map[types.TxKey]testTx)
   333  	priorities := make([]int64, len(tTxs))
   334  	for i, tTx := range tTxs {
   335  		txMap[tTx.tx.Key()] = tTx
   336  		priorities[i] = tTx.priority
   337  	}
   338  
   339  	sort.Slice(priorities, func(i, j int) bool {
   340  		// sort by priority, i.e. decreasing order
   341  		return priorities[i] > priorities[j]
   342  	})
   343  
   344  	ensurePrioritized := func(reapedTxs types.Txs) {
   345  		reapedPriorities := make([]int64, len(reapedTxs))
   346  		for i, rTx := range reapedTxs {
   347  			reapedPriorities[i] = txMap[rTx.Key()].priority
   348  		}
   349  
   350  		require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
   351  	}
   352  
   353  	// reap by gas capacity only
   354  	reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
   355  	ensurePrioritized(reapedTxs)
   356  	require.Equal(t, len(tTxs), txmp.Size())
   357  	require.Equal(t, int64(5800), txmp.SizeBytes())
   358  	require.Len(t, reapedTxs, 50)
   359  
   360  	// reap by transaction bytes only
   361  	reapedTxs = txmp.ReapMaxBytesMaxGas(1200, -1)
   362  	ensurePrioritized(reapedTxs)
   363  	require.Equal(t, len(tTxs), txmp.Size())
   364  	require.Equal(t, int64(5800), txmp.SizeBytes())
   365  	// each tx is 57 bytes, 20 * 57 = 1140 + overhead for proto encoding
   366  	require.Equal(t, len(reapedTxs), 20)
   367  
   368  	// Reap by both transaction bytes and gas, where the size yields 31 reaped
   369  	// transactions and the gas limit reaps 25 transactions.
   370  	reapedTxs = txmp.ReapMaxBytesMaxGas(2000, 25)
   371  	ensurePrioritized(reapedTxs)
   372  	require.Equal(t, len(tTxs), txmp.Size())
   373  	require.Equal(t, int64(5800), txmp.SizeBytes())
   374  	require.Len(t, reapedTxs, 25)
   375  }
   376  
   377  func TestTxMempoolTxLargerThanMaxBytes(t *testing.T) {
   378  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   379  	txmp := setup(t, 0)
   380  	bigPrefix := make([]byte, 100)
   381  	_, err := rng.Read(bigPrefix)
   382  	require.NoError(t, err)
   383  	// large high priority tx
   384  	bigTx := []byte(fmt.Sprintf("sender-1-1=%X=2", bigPrefix))
   385  	smallPrefix := make([]byte, 20)
   386  	_, err = rng.Read(smallPrefix)
   387  	require.NoError(t, err)
   388  	// smaller low priority tx with different sender
   389  	smallTx := []byte(fmt.Sprintf("sender-2-1=%X=1", smallPrefix))
   390  	require.NoError(t, txmp.CheckTx(bigTx, nil, mempool.TxInfo{SenderID: 1}))
   391  	require.NoError(t, txmp.CheckTx(smallTx, nil, mempool.TxInfo{SenderID: 1}))
   392  
   393  	// reap by max bytes less than the large tx
   394  	reapedTxs := txmp.ReapMaxBytesMaxGas(100, -1)
   395  	require.Len(t, reapedTxs, 1)
   396  	require.Equal(t, types.Tx(smallTx), reapedTxs[0])
   397  }
   398  
   399  func TestTxPool_ReapMaxTxs(t *testing.T) {
   400  	txmp := setup(t, 0)
   401  	txs := checkTxs(t, txmp, 100, 0)
   402  	require.Equal(t, len(txs), txmp.Size())
   403  	require.Equal(t, int64(5800), txmp.SizeBytes())
   404  
   405  	txMap := make(map[types.TxKey]int64)
   406  	for _, tx := range txs {
   407  		txMap[tx.tx.Key()] = tx.priority
   408  	}
   409  
   410  	ensurePrioritized := func(reapedTxs types.Txs) {
   411  		for i := 0; i < len(reapedTxs)-1; i++ {
   412  			currPriority := txMap[reapedTxs[i].Key()]
   413  			nextPriority := txMap[reapedTxs[i+1].Key()]
   414  			require.GreaterOrEqual(t, currPriority, nextPriority)
   415  		}
   416  	}
   417  
   418  	// reap all transactions
   419  	reapedTxs := txmp.ReapMaxTxs(-1)
   420  	ensurePrioritized(reapedTxs)
   421  	require.Equal(t, len(txs), txmp.Size())
   422  	require.Equal(t, int64(5800), txmp.SizeBytes())
   423  	require.Len(t, reapedTxs, len(txs))
   424  
   425  	// reap a single transaction
   426  	reapedTxs = txmp.ReapMaxTxs(1)
   427  	ensurePrioritized(reapedTxs)
   428  	require.Equal(t, len(txs), txmp.Size())
   429  	require.Equal(t, int64(5800), txmp.SizeBytes())
   430  	require.Len(t, reapedTxs, 1)
   431  
   432  	// reap half of the transactions
   433  	reapedTxs = txmp.ReapMaxTxs(len(txs) / 2)
   434  	ensurePrioritized(reapedTxs)
   435  	require.Equal(t, len(txs), txmp.Size())
   436  	require.Equal(t, int64(5800), txmp.SizeBytes())
   437  	require.Len(t, reapedTxs, len(txs)/2)
   438  }
   439  
   440  func TestTxPool_CheckTxExceedsMaxSize(t *testing.T) {
   441  	txmp := setup(t, 0)
   442  
   443  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   444  	tx := make([]byte, txmp.config.MaxTxBytes+1)
   445  	_, err := rng.Read(tx)
   446  	require.NoError(t, err)
   447  
   448  	err = txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})
   449  	require.Equal(t, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)}, err)
   450  
   451  	tx = make([]byte, txmp.config.MaxTxBytes-1)
   452  	_, err = rng.Read(tx)
   453  	require.NoError(t, err)
   454  
   455  	err = txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})
   456  	require.NotEqual(t, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)}, err)
   457  }
   458  
   459  func TestTxPool_CheckTxSamePeer(t *testing.T) {
   460  	txmp := setup(t, 100)
   461  	peerID := uint16(1)
   462  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   463  
   464  	prefix := make([]byte, 20)
   465  	_, err := rng.Read(prefix)
   466  	require.NoError(t, err)
   467  
   468  	tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
   469  
   470  	require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   471  	require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
   472  }
   473  
   474  func TestTxPool_ConcurrentTxs(t *testing.T) {
   475  	txmp := setup(t, 100)
   476  	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
   477  	checkTxDone := make(chan struct{})
   478  
   479  	var wg sync.WaitGroup
   480  
   481  	wg.Add(1)
   482  	go func() {
   483  		for i := 0; i < 20; i++ {
   484  			_ = checkTxs(t, txmp, 100, 0)
   485  			dur := rng.Intn(1000-500) + 500
   486  			time.Sleep(time.Duration(dur) * time.Millisecond)
   487  		}
   488  
   489  		wg.Done()
   490  		close(checkTxDone)
   491  	}()
   492  
   493  	wg.Add(1)
   494  	go func() {
   495  		ticker := time.NewTicker(time.Second)
   496  		defer ticker.Stop()
   497  		defer wg.Done()
   498  
   499  		var height int64 = 1
   500  
   501  		for range ticker.C {
   502  			reapedTxs := txmp.ReapMaxTxs(200)
   503  			if len(reapedTxs) > 0 {
   504  				responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   505  				for i := 0; i < len(responses); i++ {
   506  					var code uint32
   507  
   508  					if i%10 == 0 {
   509  						code = 100
   510  					} else {
   511  						code = abci.CodeTypeOK
   512  					}
   513  
   514  					responses[i] = &abci.ResponseDeliverTx{Code: code}
   515  				}
   516  
   517  				txmp.Lock()
   518  				require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
   519  				txmp.Unlock()
   520  
   521  				height++
   522  			} else {
   523  				// only return once we know we finished the CheckTx loop
   524  				select {
   525  				case <-checkTxDone:
   526  					return
   527  				default:
   528  				}
   529  			}
   530  		}
   531  	}()
   532  
   533  	wg.Wait()
   534  	require.Zero(t, txmp.Size())
   535  	require.Zero(t, txmp.SizeBytes())
   536  }
   537  
   538  func TestTxPool_ExpiredTxs_Timestamp(t *testing.T) {
   539  	txmp := setup(t, 5000)
   540  	txmp.config.TTLDuration = 5 * time.Millisecond
   541  
   542  	added1 := checkTxs(t, txmp, 10, 0)
   543  	require.Equal(t, len(added1), txmp.Size())
   544  
   545  	// Wait a while, then add some more transactions that should not be expired
   546  	// when the first batch TTLs out.
   547  	//
   548  	// ms: 0   1   2   3   4   5   6
   549  	//     ^           ^       ^   ^
   550  	//     |           |       |   +-- Update (triggers pruning)
   551  	//     |           |       +------ first batch expires
   552  	//     |           +-------------- second batch added
   553  	//     +-------------------------- first batch added
   554  	//
   555  	// The exact intervals are not important except that the delta should be
   556  	// large relative to the cost of CheckTx (ms vs. ns is fine here).
   557  	time.Sleep(3 * time.Millisecond)
   558  	added2 := checkTxs(t, txmp, 10, 1)
   559  
   560  	// Wait a while longer, so that the first batch will expire.
   561  	time.Sleep(3 * time.Millisecond)
   562  
   563  	// Trigger an update so that pruning will occur.
   564  	txmp.Lock()
   565  	defer txmp.Unlock()
   566  	require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
   567  
   568  	// All the transactions in the original set should have been purged.
   569  	for _, tx := range added1 {
   570  		if txmp.store.has(tx.tx.Key()) {
   571  			t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
   572  		}
   573  		if txmp.rejectedTxCache.Has(tx.tx.Key()) {
   574  			t.Errorf("Transaction %X should have been removed from the cache", tx.tx.Key())
   575  		}
   576  	}
   577  
   578  	// All the transactions added later should still be around.
   579  	for _, tx := range added2 {
   580  		if !txmp.store.has(tx.tx.Key()) {
   581  			t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
   582  		}
   583  	}
   584  }
   585  
   586  func TestTxPool_ExpiredTxs_NumBlocks(t *testing.T) {
   587  	txmp := setup(t, 500)
   588  	txmp.height = 100
   589  	txmp.config.TTLNumBlocks = 10
   590  
   591  	tTxs := checkTxs(t, txmp, 100, 0)
   592  	require.Equal(t, len(tTxs), txmp.Size())
   593  
   594  	// reap 5 txs at the next height -- no txs should expire
   595  	reapedTxs := txmp.ReapMaxTxs(5)
   596  	responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
   597  	for i := 0; i < len(responses); i++ {
   598  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   599  	}
   600  
   601  	txmp.Lock()
   602  	require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
   603  	txmp.Unlock()
   604  
   605  	require.Equal(t, 95, txmp.Size())
   606  
   607  	// check more txs at height 101
   608  	_ = checkTxs(t, txmp, 50, 1)
   609  	require.Equal(t, 145, txmp.Size())
   610  
   611  	// Reap 5 txs at a height that would expire all the transactions from before
   612  	// the previous Update (height 100).
   613  	//
   614  	// NOTE: When we reap txs below, we do not know if we're picking txs from the
   615  	// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
   616  	// cannot guarantee that all 95 txs are remaining that should be expired and
   617  	// removed. However, we do know that that at most 95 txs can be expired and
   618  	// removed.
   619  	reapedTxs = txmp.ReapMaxTxs(5)
   620  	responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
   621  	for i := 0; i < len(responses); i++ {
   622  		responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
   623  	}
   624  
   625  	txmp.Lock()
   626  	require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
   627  	txmp.Unlock()
   628  
   629  	require.GreaterOrEqual(t, txmp.Size(), 45)
   630  }
   631  
   632  func TestTxPool_CheckTxPostCheckError(t *testing.T) {
   633  	cases := []struct {
   634  		name string
   635  		err  error
   636  	}{
   637  		{
   638  			name: "error",
   639  			err:  errors.New("test error"),
   640  		},
   641  		{
   642  			name: "no error",
   643  			err:  nil,
   644  		},
   645  	}
   646  	for _, tc := range cases {
   647  		testCase := tc
   648  		t.Run(testCase.name, func(t *testing.T) {
   649  			postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
   650  				return testCase.err
   651  			}
   652  			txmp := setup(t, 0, WithPostCheck(postCheckFn))
   653  			tx := []byte("sender=0000=1")
   654  			err := txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})
   655  			require.True(t, errors.Is(err, testCase.err))
   656  		})
   657  	}
   658  }
   659  
   660  func TestTxPool_RemoveBlobTx(t *testing.T) {
   661  	app := kvstore.NewApplication()
   662  	cc := proxy.NewLocalClientCreator(app)
   663  
   664  	cfg := config.TestMempoolConfig()
   665  	cfg.CacheSize = 100
   666  
   667  	appConnMem, err := cc.NewABCIClient()
   668  	require.NoError(t, err)
   669  	require.NoError(t, appConnMem.Start())
   670  
   671  	t.Cleanup(func() {
   672  		os.RemoveAll(cfg.RootDir)
   673  		require.NoError(t, appConnMem.Stop())
   674  	})
   675  
   676  	txmp := NewTxPool(log.TestingLogger(), cfg, appConnMem, 1)
   677  
   678  	originalTx := []byte{1, 2, 3, 4}
   679  	indexWrapper, err := types.MarshalIndexWrapper(originalTx, 100)
   680  	require.NoError(t, err)
   681  	namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize)
   682  
   683  	// create the blobTx
   684  	b := tmproto.Blob{
   685  		NamespaceId:      namespaceOne,
   686  		Data:             []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9},
   687  		ShareVersion:     0,
   688  		NamespaceVersion: 0,
   689  	}
   690  	bTx, err := types.MarshalBlobTx(originalTx, &b)
   691  	require.NoError(t, err)
   692  
   693  	err = txmp.CheckTx(bTx, nil, mempool.TxInfo{})
   694  	require.NoError(t, err)
   695  
   696  	err = txmp.Update(1, []types.Tx{indexWrapper}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   697  	require.NoError(t, err)
   698  	require.EqualValues(t, 0, txmp.Size())
   699  	require.EqualValues(t, 0, txmp.SizeBytes())
   700  }
   701  
   702  func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
   703  	responses := make([]*abci.ResponseDeliverTx, 0, n)
   704  	for i := 0; i < n; i++ {
   705  		responses = append(responses, &abci.ResponseDeliverTx{Code: code})
   706  	}
   707  	return responses
   708  }
   709  
   710  func TestTxPool_ConcurrentlyAddingTx(t *testing.T) {
   711  	txmp := setup(t, 500)
   712  	tx := types.Tx("sender=0000=1")
   713  
   714  	numTxs := 10
   715  	errCh := make(chan error, numTxs)
   716  	wg := &sync.WaitGroup{}
   717  	for i := 0; i < numTxs; i++ {
   718  		wg.Add(1)
   719  		go func(sender uint16) {
   720  			defer wg.Done()
   721  			_, err := txmp.TryAddNewTx(tx, tx.Key(), mempool.TxInfo{SenderID: sender})
   722  			errCh <- err
   723  		}(uint16(i + 1))
   724  	}
   725  	go func() {
   726  		wg.Wait()
   727  		close(errCh)
   728  	}()
   729  
   730  	errCount := 0
   731  	for err := range errCh {
   732  		if err != nil {
   733  			require.Equal(t, ErrTxInMempool, err)
   734  			errCount++
   735  		}
   736  	}
   737  	require.Equal(t, numTxs-1, errCount)
   738  }
   739  
   740  func TestTxPool_BroadcastQueue(t *testing.T) {
   741  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   742  	defer cancel()
   743  	txmp := setup(t, 1)
   744  	txs := 10
   745  
   746  	wg := sync.WaitGroup{}
   747  	wg.Add(1)
   748  	go func() {
   749  		defer wg.Done()
   750  		for i := 0; i < txs; i++ {
   751  			select {
   752  			case <-ctx.Done():
   753  				assert.FailNowf(t, "failed to receive all txs (got %d/%d)", "", i+1, txs)
   754  			case wtx := <-txmp.next():
   755  				require.Equal(t, wtx.tx, newDefaultTx(fmt.Sprintf("%d", i)))
   756  			}
   757  			time.Sleep(10 * time.Millisecond)
   758  		}
   759  	}()
   760  
   761  	for i := 0; i < txs; i++ {
   762  		tx := newDefaultTx(fmt.Sprintf("%d", i))
   763  		require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
   764  	}
   765  
   766  	wg.Wait()
   767  }