github.com/vipernet-xyz/tm@v0.34.24/mempool/v0/clist_mempool_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"crypto/rand"
     5  	"encoding/binary"
     6  	"fmt"
     7  	mrand "math/rand"
     8  	"os"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/gogo/protobuf/proto"
    13  	gogotypes "github.com/gogo/protobuf/types"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/mock"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	abciclient "github.com/vipernet-xyz/tm/abci/client"
    19  	abciclimocks "github.com/vipernet-xyz/tm/abci/client/mocks"
    20  	"github.com/vipernet-xyz/tm/abci/example/kvstore"
    21  	abciserver "github.com/vipernet-xyz/tm/abci/server"
    22  	abci "github.com/vipernet-xyz/tm/abci/types"
    23  	"github.com/vipernet-xyz/tm/config"
    24  	"github.com/vipernet-xyz/tm/libs/log"
    25  	tmrand "github.com/vipernet-xyz/tm/libs/rand"
    26  	"github.com/vipernet-xyz/tm/libs/service"
    27  	"github.com/vipernet-xyz/tm/mempool"
    28  	"github.com/vipernet-xyz/tm/proxy"
    29  	"github.com/vipernet-xyz/tm/types"
    30  )
    31  
    32  // A cleanupFunc cleans up any config / test files created for a particular
    33  // test.
    34  type cleanupFunc func()
    35  
    36  func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) {
    37  	conf := config.ResetTestRoot("mempool_test")
    38  
    39  	mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client)
    40  	return mp, cu, nil
    41  }
    42  
    43  func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator,
    44  	cfg *config.Config,
    45  	client abciclient.Client) (*CListMempool, cleanupFunc) {
    46  	appConnMem := client
    47  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
    48  	err := appConnMem.Start()
    49  	if err != nil {
    50  		panic(err)
    51  	}
    52  
    53  	mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
    54  	mp.SetLogger(log.TestingLogger())
    55  
    56  	return mp, func() { os.RemoveAll(cfg.RootDir) }
    57  }
    58  
    59  func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
    60  	conf := config.ResetTestRoot("mempool_test")
    61  
    62  	mp, cu := newMempoolWithAppAndConfig(cc, conf)
    63  	return mp, cu
    64  }
    65  
    66  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) {
    67  	appConnMem, _ := cc.NewABCIClient()
    68  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
    69  	err := appConnMem.Start()
    70  	if err != nil {
    71  		panic(err)
    72  	}
    73  
    74  	mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
    75  	mp.SetLogger(log.TestingLogger())
    76  
    77  	return mp, func() { os.RemoveAll(cfg.RootDir) }
    78  }
    79  
    80  func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    81  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    82  	select {
    83  	case <-ch:
    84  		t.Fatal("Expected not to fire")
    85  	case <-timer.C:
    86  	}
    87  }
    88  
    89  func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    90  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    91  	select {
    92  	case <-ch:
    93  	case <-timer.C:
    94  		t.Fatal("Expected to fire")
    95  	}
    96  }
    97  
    98  func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs {
    99  	txs := make(types.Txs, count)
   100  	txInfo := mempool.TxInfo{SenderID: peerID}
   101  	for i := 0; i < count; i++ {
   102  		txBytes := make([]byte, 20)
   103  		txs[i] = txBytes
   104  		_, err := rand.Read(txBytes)
   105  		if err != nil {
   106  			t.Error(err)
   107  		}
   108  		if err := mp.CheckTx(txBytes, nil, txInfo); err != nil {
   109  			// Skip invalid txs.
   110  			// TestMempoolFilters will fail otherwise. It asserts a number of txs
   111  			// returned.
   112  			if mempool.IsPreCheckError(err) {
   113  				continue
   114  			}
   115  			t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
   116  		}
   117  	}
   118  	return txs
   119  }
   120  
   121  func TestReapMaxBytesMaxGas(t *testing.T) {
   122  	app := kvstore.NewApplication()
   123  	cc := proxy.NewLocalClientCreator(app)
   124  	mp, cleanup := newMempoolWithApp(cc)
   125  	defer cleanup()
   126  
   127  	// Ensure gas calculation behaves as expected
   128  	checkTxs(t, mp, 1, mempool.UnknownPeerID)
   129  	tx0 := mp.TxsFront().Value.(*mempoolTx)
   130  	// assert that kv store has gas wanted = 1.
   131  	require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
   132  	require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
   133  	// ensure each tx is 20 bytes long
   134  	require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
   135  	mp.Flush()
   136  
   137  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   138  	// each tx has 20 bytes
   139  	tests := []struct {
   140  		numTxsToCreate int
   141  		maxBytes       int64
   142  		maxGas         int64
   143  		expectedNumTxs int
   144  	}{
   145  		{20, -1, -1, 20},
   146  		{20, -1, 0, 0},
   147  		{20, -1, 10, 10},
   148  		{20, -1, 30, 20},
   149  		{20, 0, -1, 0},
   150  		{20, 0, 10, 0},
   151  		{20, 10, 10, 0},
   152  		{20, 24, 10, 1},
   153  		{20, 240, 5, 5},
   154  		{20, 240, -1, 10},
   155  		{20, 240, 10, 10},
   156  		{20, 240, 15, 10},
   157  		{20, 20000, -1, 20},
   158  		{20, 20000, 5, 5},
   159  		{20, 20000, 30, 20},
   160  	}
   161  	for tcIndex, tt := range tests {
   162  		checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
   163  		got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
   164  		assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
   165  			len(got), tt.expectedNumTxs, tcIndex)
   166  		mp.Flush()
   167  	}
   168  }
   169  
   170  func TestMempoolFilters(t *testing.T) {
   171  	app := kvstore.NewApplication()
   172  	cc := proxy.NewLocalClientCreator(app)
   173  	mp, cleanup := newMempoolWithApp(cc)
   174  	defer cleanup()
   175  	emptyTxArr := []types.Tx{[]byte{}}
   176  
   177  	nopPreFilter := func(tx types.Tx) error { return nil }
   178  	nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil }
   179  
   180  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   181  	// each tx has 20 bytes
   182  	tests := []struct {
   183  		numTxsToCreate int
   184  		preFilter      mempool.PreCheckFunc
   185  		postFilter     mempool.PostCheckFunc
   186  		expectedNumTxs int
   187  	}{
   188  		{10, nopPreFilter, nopPostFilter, 10},
   189  		{10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0},
   190  		{10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10},
   191  		{10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10},
   192  		{10, nopPreFilter, mempool.PostCheckMaxGas(0), 0},
   193  		{10, nopPreFilter, mempool.PostCheckMaxGas(1), 10},
   194  		{10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10},
   195  		{10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0},
   196  		{10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10},
   197  		{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10},
   198  		{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0},
   199  	}
   200  	for tcIndex, tt := range tests {
   201  		err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
   202  		require.NoError(t, err)
   203  		checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
   204  		require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
   205  		mp.Flush()
   206  	}
   207  }
   208  
   209  func TestMempoolUpdate(t *testing.T) {
   210  	app := kvstore.NewApplication()
   211  	cc := proxy.NewLocalClientCreator(app)
   212  	mp, cleanup := newMempoolWithApp(cc)
   213  	defer cleanup()
   214  
   215  	// 1. Adds valid txs to the cache
   216  	{
   217  		err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   218  		require.NoError(t, err)
   219  		err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
   220  		if assert.Error(t, err) {
   221  			assert.Equal(t, mempool.ErrTxInCache, err)
   222  		}
   223  	}
   224  
   225  	// 2. Removes valid txs from the mempool
   226  	{
   227  		err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{})
   228  		require.NoError(t, err)
   229  		err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   230  		require.NoError(t, err)
   231  		assert.Zero(t, mp.Size())
   232  	}
   233  
   234  	// 3. Removes invalid transactions from the cache and the mempool (if present)
   235  	{
   236  		err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
   237  		require.NoError(t, err)
   238  		err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
   239  		require.NoError(t, err)
   240  		assert.Zero(t, mp.Size())
   241  
   242  		err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
   243  		require.NoError(t, err)
   244  	}
   245  }
   246  
   247  func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
   248  	var callback abciclient.Callback
   249  	mockClient := new(abciclimocks.Client)
   250  	mockClient.On("Start").Return(nil)
   251  	mockClient.On("SetLogger", mock.Anything)
   252  
   253  	mockClient.On("Error").Return(nil).Times(4)
   254  	mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil)
   255  	mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true }))
   256  
   257  	app := kvstore.NewApplication()
   258  	cc := proxy.NewLocalClientCreator(app)
   259  	mp, cleanup, err := newMempoolWithAppMock(cc, mockClient)
   260  	require.NoError(t, err)
   261  	defer cleanup()
   262  
   263  	// Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them.
   264  	txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}}
   265  	for _, tx := range txs {
   266  		reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
   267  		reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
   268  
   269  		mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
   270  		err := mp.CheckTx(tx, nil, mempool.TxInfo{})
   271  		require.NoError(t, err)
   272  
   273  		// ensure that the callback that the mempool sets on the ReqRes is run.
   274  		reqRes.InvokeCallback()
   275  	}
   276  
   277  	// Calling update to remove the first transaction from the mempool.
   278  	// This call also triggers the mempool to recheck its remaining transactions.
   279  	err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   280  	require.Nil(t, err)
   281  
   282  	// The mempool has now sent its requests off to the client to be rechecked
   283  	// and is waiting for the corresponding callbacks to be called.
   284  	// We now call the mempool-supplied callback on the first and third transaction.
   285  	// This simulates the client dropping the second request.
   286  	// Previous versions of this code panicked when the ABCI application missed
   287  	// a recheck-tx request.
   288  	resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK}
   289  	req := abci.RequestCheckTx{Tx: txs[1]}
   290  	callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
   291  
   292  	req = abci.RequestCheckTx{Tx: txs[3]}
   293  	callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
   294  	mockClient.AssertExpectations(t)
   295  }
   296  
   297  func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
   298  	app := kvstore.NewApplication()
   299  	cc := proxy.NewLocalClientCreator(app)
   300  	wcfg := config.DefaultConfig()
   301  	wcfg.Mempool.KeepInvalidTxsInCache = true
   302  	mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
   303  	defer cleanup()
   304  
   305  	// 1. An invalid transaction must remain in the cache after Update
   306  	{
   307  		a := make([]byte, 8)
   308  		binary.BigEndian.PutUint64(a, 0)
   309  
   310  		b := make([]byte, 8)
   311  		binary.BigEndian.PutUint64(b, 1)
   312  
   313  		err := mp.CheckTx(b, nil, mempool.TxInfo{})
   314  		require.NoError(t, err)
   315  
   316  		// simulate new block
   317  		_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
   318  		_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
   319  		err = mp.Update(1, []types.Tx{a, b},
   320  			[]*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil)
   321  		require.NoError(t, err)
   322  
   323  		// a must be added to the cache
   324  		err = mp.CheckTx(a, nil, mempool.TxInfo{})
   325  		if assert.Error(t, err) {
   326  			assert.Equal(t, mempool.ErrTxInCache, err)
   327  		}
   328  
   329  		// b must remain in the cache
   330  		err = mp.CheckTx(b, nil, mempool.TxInfo{})
   331  		if assert.Error(t, err) {
   332  			assert.Equal(t, mempool.ErrTxInCache, err)
   333  		}
   334  	}
   335  
   336  	// 2. An invalid transaction must remain in the cache
   337  	{
   338  		a := make([]byte, 8)
   339  		binary.BigEndian.PutUint64(a, 0)
   340  
   341  		// remove a from the cache to test (2)
   342  		mp.cache.Remove(a)
   343  
   344  		err := mp.CheckTx(a, nil, mempool.TxInfo{})
   345  		require.NoError(t, err)
   346  	}
   347  }
   348  
   349  func TestTxsAvailable(t *testing.T) {
   350  	app := kvstore.NewApplication()
   351  	cc := proxy.NewLocalClientCreator(app)
   352  	mp, cleanup := newMempoolWithApp(cc)
   353  	defer cleanup()
   354  	mp.EnableTxsAvailable()
   355  
   356  	timeoutMS := 500
   357  
   358  	// with no txs, it shouldnt fire
   359  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   360  
   361  	// send a bunch of txs, it should only fire once
   362  	txs := checkTxs(t, mp, 100, mempool.UnknownPeerID)
   363  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   364  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   365  
   366  	// call update with half the txs.
   367  	// it should fire once now for the new height
   368  	// since there are still txs left
   369  	committedTxs, txs := txs[:50], txs[50:]
   370  	if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
   371  		t.Error(err)
   372  	}
   373  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   374  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   375  
   376  	// send a bunch more txs. we already fired for this height so it shouldnt fire again
   377  	moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID)
   378  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   379  
   380  	// now call update with all the txs. it should not fire as there are no txs left
   381  	committedTxs = append(txs, moreTxs...)
   382  	if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
   383  		t.Error(err)
   384  	}
   385  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   386  
   387  	// send a bunch more txs, it should only fire once
   388  	checkTxs(t, mp, 100, mempool.UnknownPeerID)
   389  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   390  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   391  }
   392  
   393  func TestSerialReap(t *testing.T) {
   394  	app := kvstore.NewApplication()
   395  	cc := proxy.NewLocalClientCreator(app)
   396  
   397  	mp, cleanup := newMempoolWithApp(cc)
   398  	defer cleanup()
   399  
   400  	appConnCon, _ := cc.NewABCIClient()
   401  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   402  	err := appConnCon.Start()
   403  	require.Nil(t, err)
   404  
   405  	cacheMap := make(map[string]struct{})
   406  	deliverTxsRange := func(start, end int) {
   407  		// Deliver some txs.
   408  		for i := start; i < end; i++ {
   409  
   410  			// This will succeed
   411  			txBytes := make([]byte, 8)
   412  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   413  			err := mp.CheckTx(txBytes, nil, mempool.TxInfo{})
   414  			_, cached := cacheMap[string(txBytes)]
   415  			if cached {
   416  				require.NotNil(t, err, "expected error for cached tx")
   417  			} else {
   418  				require.Nil(t, err, "expected no err for uncached tx")
   419  			}
   420  			cacheMap[string(txBytes)] = struct{}{}
   421  
   422  			// Duplicates are cached and should return error
   423  			err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
   424  			require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
   425  		}
   426  	}
   427  
   428  	reapCheck := func(exp int) {
   429  		txs := mp.ReapMaxBytesMaxGas(-1, -1)
   430  		require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
   431  	}
   432  
   433  	updateRange := func(start, end int) {
   434  		txs := make([]types.Tx, 0)
   435  		for i := start; i < end; i++ {
   436  			txBytes := make([]byte, 8)
   437  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   438  			txs = append(txs, txBytes)
   439  		}
   440  		if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
   441  			t.Error(err)
   442  		}
   443  	}
   444  
   445  	commitRange := func(start, end int) {
   446  		// Deliver some txs.
   447  		for i := start; i < end; i++ {
   448  			txBytes := make([]byte, 8)
   449  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   450  			res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   451  			if err != nil {
   452  				t.Errorf("client error committing tx: %v", err)
   453  			}
   454  			if res.IsErr() {
   455  				t.Errorf("error committing tx. Code:%v result:%X log:%v",
   456  					res.Code, res.Data, res.Log)
   457  			}
   458  		}
   459  		res, err := appConnCon.CommitSync()
   460  		if err != nil {
   461  			t.Errorf("client error committing: %v", err)
   462  		}
   463  		if len(res.Data) != 8 {
   464  			t.Errorf("error committing. Hash:%X", res.Data)
   465  		}
   466  	}
   467  
   468  	//----------------------------------------
   469  
   470  	// Deliver some txs.
   471  	deliverTxsRange(0, 100)
   472  
   473  	// Reap the txs.
   474  	reapCheck(100)
   475  
   476  	// Reap again.  We should get the same amount
   477  	reapCheck(100)
   478  
   479  	// Deliver 0 to 999, we should reap 900 new txs
   480  	// because 100 were already counted.
   481  	deliverTxsRange(0, 1000)
   482  
   483  	// Reap the txs.
   484  	reapCheck(1000)
   485  
   486  	// Reap again.  We should get the same amount
   487  	reapCheck(1000)
   488  
   489  	// Commit from the conensus AppConn
   490  	commitRange(0, 500)
   491  	updateRange(0, 500)
   492  
   493  	// We should have 500 left.
   494  	reapCheck(500)
   495  
   496  	// Deliver 100 invalid txs and 100 valid txs
   497  	deliverTxsRange(900, 1100)
   498  
   499  	// We should have 600 now.
   500  	reapCheck(600)
   501  }
   502  
   503  func TestMempool_CheckTxChecksTxSize(t *testing.T) {
   504  	app := kvstore.NewApplication()
   505  	cc := proxy.NewLocalClientCreator(app)
   506  
   507  	mempl, cleanup := newMempoolWithApp(cc)
   508  	defer cleanup()
   509  
   510  	maxTxSize := mempl.config.MaxTxBytes
   511  
   512  	testCases := []struct {
   513  		len int
   514  		err bool
   515  	}{
   516  		// check small txs. no error
   517  		0: {10, false},
   518  		1: {1000, false},
   519  		2: {1000000, false},
   520  
   521  		// check around maxTxSize
   522  		3: {maxTxSize - 1, false},
   523  		4: {maxTxSize, false},
   524  		5: {maxTxSize + 1, true},
   525  	}
   526  
   527  	for i, testCase := range testCases {
   528  		caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
   529  
   530  		tx := tmrand.Bytes(testCase.len)
   531  
   532  		err := mempl.CheckTx(tx, nil, mempool.TxInfo{})
   533  		bv := gogotypes.BytesValue{Value: tx}
   534  		bz, err2 := bv.Marshal()
   535  		require.NoError(t, err2)
   536  		require.Equal(t, len(bz), proto.Size(&bv), caseString)
   537  
   538  		if !testCase.err {
   539  			require.NoError(t, err, caseString)
   540  		} else {
   541  			require.Equal(t, err, mempool.ErrTxTooLarge{
   542  				Max:    maxTxSize,
   543  				Actual: testCase.len,
   544  			}, caseString)
   545  		}
   546  	}
   547  }
   548  
   549  func TestMempoolTxsBytes(t *testing.T) {
   550  	app := kvstore.NewApplication()
   551  	cc := proxy.NewLocalClientCreator(app)
   552  
   553  	cfg := config.ResetTestRoot("mempool_test")
   554  
   555  	cfg.Mempool.MaxTxsBytes = 10
   556  	mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
   557  	defer cleanup()
   558  
   559  	// 1. zero by default
   560  	assert.EqualValues(t, 0, mp.SizeBytes())
   561  
   562  	// 2. len(tx) after CheckTx
   563  	err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
   564  	require.NoError(t, err)
   565  	assert.EqualValues(t, 1, mp.SizeBytes())
   566  
   567  	// 3. zero again after tx is removed by Update
   568  	err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   569  	require.NoError(t, err)
   570  	assert.EqualValues(t, 0, mp.SizeBytes())
   571  
   572  	// 4. zero after Flush
   573  	err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{})
   574  	require.NoError(t, err)
   575  	assert.EqualValues(t, 2, mp.SizeBytes())
   576  
   577  	mp.Flush()
   578  	assert.EqualValues(t, 0, mp.SizeBytes())
   579  
   580  	// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
   581  	err = mp.CheckTx(
   582  		[]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04},
   583  		nil,
   584  		mempool.TxInfo{},
   585  	)
   586  	require.NoError(t, err)
   587  
   588  	err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{})
   589  	if assert.Error(t, err) {
   590  		assert.IsType(t, mempool.ErrMempoolIsFull{}, err)
   591  	}
   592  
   593  	// 6. zero after tx is rechecked and removed due to not being valid anymore
   594  	app2 := kvstore.NewApplication()
   595  	cc = proxy.NewLocalClientCreator(app2)
   596  
   597  	mp, cleanup = newMempoolWithApp(cc)
   598  	defer cleanup()
   599  
   600  	txBytes := make([]byte, 8)
   601  	binary.BigEndian.PutUint64(txBytes, uint64(0))
   602  
   603  	err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
   604  	require.NoError(t, err)
   605  	assert.EqualValues(t, 8, mp.SizeBytes())
   606  
   607  	appConnCon, _ := cc.NewABCIClient()
   608  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   609  	err = appConnCon.Start()
   610  	require.Nil(t, err)
   611  	t.Cleanup(func() {
   612  		if err := appConnCon.Stop(); err != nil {
   613  			t.Error(err)
   614  		}
   615  	})
   616  
   617  	res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   618  	require.NoError(t, err)
   619  	require.EqualValues(t, 0, res.Code)
   620  
   621  	res2, err := appConnCon.CommitSync()
   622  	require.NoError(t, err)
   623  	require.NotEmpty(t, res2.Data)
   624  
   625  	// Pretend like we committed nothing so txBytes gets rechecked and removed.
   626  	err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
   627  	require.NoError(t, err)
   628  	assert.EqualValues(t, 8, mp.SizeBytes())
   629  
   630  	// 7. Test RemoveTxByKey function
   631  	err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{})
   632  	require.NoError(t, err)
   633  	assert.EqualValues(t, 9, mp.SizeBytes())
   634  	assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key()))
   635  	assert.EqualValues(t, 9, mp.SizeBytes())
   636  	assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key()))
   637  	assert.EqualValues(t, 8, mp.SizeBytes())
   638  
   639  }
   640  
   641  // This will non-deterministically catch some concurrency failures like
   642  // https://github.com/vipernet-xyz/tm/issues/3509
   643  // TODO: all of the tests should probably also run using the remote proxy app
   644  // since otherwise we're not actually testing the concurrency of the mempool here!
   645  func TestMempoolRemoteAppConcurrency(t *testing.T) {
   646  	sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
   647  	app := kvstore.NewApplication()
   648  	_, server := newRemoteApp(t, sockPath, app)
   649  	t.Cleanup(func() {
   650  		if err := server.Stop(); err != nil {
   651  			t.Error(err)
   652  		}
   653  	})
   654  
   655  	cfg := config.ResetTestRoot("mempool_test")
   656  
   657  	mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg)
   658  	defer cleanup()
   659  
   660  	// generate small number of txs
   661  	nTxs := 10
   662  	txLen := 200
   663  	txs := make([]types.Tx, nTxs)
   664  	for i := 0; i < nTxs; i++ {
   665  		txs[i] = tmrand.Bytes(txLen)
   666  	}
   667  
   668  	// simulate a group of peers sending them over and over
   669  	N := cfg.Mempool.Size
   670  	maxPeers := 5
   671  	for i := 0; i < N; i++ {
   672  		peerID := mrand.Intn(maxPeers)
   673  		txNum := mrand.Intn(nTxs)
   674  		tx := txs[txNum]
   675  
   676  		// this will err with ErrTxInCache many times ...
   677  		mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
   678  	}
   679  
   680  	require.NoError(t, mp.FlushAppConn())
   681  }
   682  
   683  // caller must close server
   684  func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) {
   685  	clientCreator, err := abciclient.NewClient(addr, "socket", true)
   686  	require.NoError(t, err)
   687  
   688  	// Start server
   689  	server := abciserver.NewSocketServer(addr, app)
   690  	server.SetLogger(log.TestingLogger().With("module", "abci-server"))
   691  	if err := server.Start(); err != nil {
   692  		t.Fatalf("Error starting socket server: %v", err.Error())
   693  	}
   694  
   695  	return clientCreator, server
   696  }
   697  
   698  func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
   699  	responses := make([]*abci.ResponseDeliverTx, 0, n)
   700  	for i := 0; i < n; i++ {
   701  		responses = append(responses, &abci.ResponseDeliverTx{Code: code})
   702  	}
   703  	return responses
   704  }