github.com/Finschia/ostracon@v1.1.5/mempool/v0/clist_mempool_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"crypto/rand"
     5  	"encoding/binary"
     6  	"errors"
     7  	"fmt"
     8  	mrand "math/rand"
     9  	"os"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	abci "github.com/tendermint/tendermint/abci/types"
    15  
    16  	"github.com/gogo/protobuf/proto"
    17  	gogotypes "github.com/gogo/protobuf/types"
    18  	"github.com/stretchr/testify/assert"
    19  	"github.com/stretchr/testify/require"
    20  
    21  	abciclient "github.com/Finschia/ostracon/abci/client"
    22  	"github.com/Finschia/ostracon/abci/example/kvstore"
    23  	abciserver "github.com/Finschia/ostracon/abci/server"
    24  	ocabci "github.com/Finschia/ostracon/abci/types"
    25  	"github.com/Finschia/ostracon/config"
    26  	"github.com/Finschia/ostracon/libs/log"
    27  	tmrand "github.com/Finschia/ostracon/libs/rand"
    28  	"github.com/Finschia/ostracon/libs/service"
    29  	"github.com/Finschia/ostracon/mempool"
    30  	"github.com/Finschia/ostracon/proxy"
    31  	"github.com/Finschia/ostracon/types"
    32  )
    33  
    34  // A cleanupFunc cleans up any config / test files created for a particular
    35  // test.
    36  type cleanupFunc func()
    37  
    38  //func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) {
    39  //	conf := config.ResetTestRoot("mempool_test")
    40  //
    41  //	mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client)
    42  //	return mp, cu, nil
    43  //}
    44  //
    45  //func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator,
    46  //	cfg *config.Config,
    47  //	client abciclient.Client) (*CListMempool, cleanupFunc) {
    48  //	appConnMem := client
    49  //	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
    50  //	err := appConnMem.Start()
    51  //	if err != nil {
    52  //		panic(err)
    53  //	}
    54  //
    55  //	mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
    56  //	mp.SetLogger(log.TestingLogger())
    57  //
    58  //	return mp, func() { os.RemoveAll(cfg.RootDir) }
    59  //}
    60  
    61  func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
    62  	conf := config.ResetTestRoot("mempool_test")
    63  
    64  	mp, cu := newMempoolWithAppAndConfig(cc, conf)
    65  	return mp, cu
    66  }
    67  
    68  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) {
    69  	appConnMem, _ := cc.NewABCIClient()
    70  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
    71  	err := appConnMem.Start()
    72  	if err != nil {
    73  		panic(err)
    74  	}
    75  
    76  	mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
    77  	mp.SetLogger(log.TestingLogger())
    78  
    79  	return mp, func() { os.RemoveAll(cfg.RootDir) }
    80  }
    81  
    82  func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    83  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    84  	select {
    85  	case <-ch:
    86  		t.Fatal("Expected not to fire")
    87  	case <-timer.C:
    88  	}
    89  }
    90  
    91  func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    92  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    93  	select {
    94  	case <-ch:
    95  	case <-timer.C:
    96  		t.Fatal("Expected to fire")
    97  	}
    98  }
    99  
   100  func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs {
   101  	txs := make(types.Txs, count)
   102  	txInfo := mempool.TxInfo{SenderID: peerID}
   103  	for i := 0; i < count; i++ {
   104  		txBytes := make([]byte, 20)
   105  		txs[i] = txBytes
   106  		_, err := rand.Read(txBytes)
   107  		if err != nil {
   108  			t.Error(err)
   109  		}
   110  		if err := mp.CheckTxSync(txBytes, nil, txInfo); err != nil {
   111  			// Skip invalid txs.
   112  			// TestMempoolFilters will fail otherwise. It asserts a number of txs
   113  			// returned.
   114  			if mempool.IsPreCheckError(err) {
   115  				continue
   116  			}
   117  			t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
   118  		}
   119  	}
   120  	return txs
   121  }
   122  
   123  func TestReapMaxBytesMaxGas(t *testing.T) {
   124  	app := kvstore.NewApplication()
   125  	cc := proxy.NewLocalClientCreator(app)
   126  	mp, cleanup := newMempoolWithApp(cc)
   127  	defer cleanup()
   128  
   129  	// Ensure gas calculation behaves as expected
   130  	checkTxs(t, mp, 1, mempool.UnknownPeerID)
   131  	tx0 := mp.TxsFront().Value.(*mempoolTx)
   132  	// assert that kv store has gas wanted = 1.
   133  	require.Equal(t,
   134  		app.CheckTxSync(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
   135  	require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
   136  	// ensure each tx is 20 bytes long
   137  	require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
   138  	mp.Flush()
   139  
   140  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   141  	// each tx has 20 bytes
   142  	tests := []struct {
   143  		numTxsToCreate int
   144  		maxBytes       int64
   145  		maxGas         int64
   146  		expectedNumTxs int
   147  		maxTxs         int64
   148  	}{
   149  		{20, -1, -1, 20, 0},
   150  		{20, -1, 0, 0, 0},
   151  		{20, -1, 10, 10, 0},
   152  		{20, -1, 30, 20, 0},
   153  		{20, 0, -1, 0, 0},
   154  		{20, 0, 10, 0, 0},
   155  		{20, 10, 10, 0, 0},
   156  		{20, 24, 10, 1, 0},
   157  		{20, 240, 5, 5, 0},
   158  		{20, 240, -1, 10, 0},
   159  		{20, 240, 10, 10, 0},
   160  		{20, 240, 15, 10, 0},
   161  		{20, 20000, -1, 20, 0},
   162  		{20, 20000, 5, 5, 0},
   163  		{20, 20000, 30, 20, 0},
   164  		{20, 20000, 30, 20, 0},
   165  		{20, 20000, 30, 10, 10},
   166  		{20, 20000, 30, 20, 100},
   167  	}
   168  	for tcIndex, tt := range tests {
   169  		checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
   170  		var got types.Txs
   171  		if tt.maxTxs <= 0 {
   172  			got = mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
   173  		} else {
   174  			got = mp.ReapMaxBytesMaxGasMaxTxs(tt.maxBytes, tt.maxGas, tt.maxTxs)
   175  		}
   176  		assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
   177  			len(got), tt.expectedNumTxs, tcIndex)
   178  		mp.Flush()
   179  	}
   180  }
   181  
   182  func TestMempoolFilters(t *testing.T) {
   183  	app := kvstore.NewApplication()
   184  	cc := proxy.NewLocalClientCreator(app)
   185  	mp, cleanup := newMempoolWithApp(cc)
   186  	defer cleanup()
   187  	emptyTxArr := []types.Tx{[]byte{}}
   188  
   189  	nopPreFilter := func(tx types.Tx) error { return nil }
   190  
   191  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   192  	// each tx has 20 bytes
   193  	tests := []struct {
   194  		numTxsToCreate int
   195  		preFilter      mempool.PreCheckFunc
   196  		expectedNumTxs int
   197  	}{
   198  		{10, nopPreFilter, 10},
   199  		{10, mempool.PreCheckMaxBytes(10), 0},
   200  		{10, mempool.PreCheckMaxBytes(20), 0},
   201  		{10, mempool.PreCheckMaxBytes(22), 10},
   202  		{10, mempool.PreCheckMaxBytes(30), 10},
   203  	}
   204  	for tcIndex, tt := range tests {
   205  		err := mp.Update(newTestBlock(1, emptyTxArr), abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, nil)
   206  		require.NoError(t, err)
   207  		checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
   208  		require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
   209  		mp.Flush()
   210  	}
   211  }
   212  
   213  func TestMempoolUpdate(t *testing.T) {
   214  	app := kvstore.NewApplication()
   215  	cc := proxy.NewLocalClientCreator(app)
   216  	mp, cleanup := newMempoolWithApp(cc)
   217  	defer cleanup()
   218  
   219  	// 1. Adds valid txs to the cache
   220  	{
   221  		err := mp.Update(newTestBlock(1, []types.Tx{[]byte{0x01}}),
   222  			abciResponses(1, ocabci.CodeTypeOK), nil, nil)
   223  		require.NoError(t, err)
   224  		err = mp.CheckTxSync([]byte{0x01}, nil, mempool.TxInfo{})
   225  		if assert.Error(t, err) {
   226  			assert.Equal(t, mempool.ErrTxInCache, err)
   227  		}
   228  	}
   229  
   230  	// 2. Removes valid txs from the mempool
   231  	{
   232  		err := mp.CheckTxSync([]byte{0x02}, nil, mempool.TxInfo{})
   233  		require.NoError(t, err)
   234  		err = mp.Update(newTestBlock(1, []types.Tx{[]byte{0x02}}), abciResponses(1, ocabci.CodeTypeOK), nil, nil)
   235  		require.NoError(t, err)
   236  		assert.Zero(t, mp.Size())
   237  	}
   238  
   239  	// 3. Removes invalid transactions from the cache and the mempool (if present)
   240  	{
   241  		err := mp.CheckTxSync([]byte{0x03}, nil, mempool.TxInfo{})
   242  		require.NoError(t, err)
   243  		err = mp.Update(newTestBlock(1, []types.Tx{[]byte{0x03}}), abciResponses(1, 1), nil, nil)
   244  		require.NoError(t, err)
   245  		assert.Zero(t, mp.Size())
   246  
   247  		err = mp.CheckTxSync([]byte{0x03}, nil, mempool.TxInfo{})
   248  		require.NoError(t, err)
   249  	}
   250  }
   251  
   252  // FIXME: need to adjust Ostracon's abci
   253  //func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
   254  //	var callback abciclient.Callback
   255  //	mockClient := new(abciclimocks.Client)
   256  //	mockClient.On("Start").Return(nil)
   257  //	mockClient.On("SetLogger", mock.Anything)
   258  //
   259  //	mockClient.On("Error").Return(nil).Times(4)
   260  //	mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil)
   261  //	mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true }))
   262  //
   263  //	app := kvstore.NewApplication()
   264  //	cc := proxy.NewLocalClientCreator(app)
   265  //	mp, cleanup, err := newMempoolWithAppMock(cc, mockClient)
   266  //	require.NoError(t, err)
   267  //	defer cleanup()
   268  //
   269  //	// Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them.
   270  //	txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}}
   271  //	for _, tx := range txs {
   272  //		reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
   273  //		reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
   274  //
   275  //		mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
   276  //		err := mp.CheckTxSync(tx, nil, mempool.TxInfo{})
   277  //		require.NoError(t, err)
   278  //
   279  //		// ensure that the callback that the mempool sets on the ReqRes is run.
   280  //		reqRes.InvokeCallback()
   281  //	}
   282  //
   283  //	// Calling update to remove the first transaction from the mempool.
   284  //	// This call also triggers the mempool to recheck its remaining transactions.
   285  //	err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   286  //	require.Nil(t, err)
   287  //
   288  //	// The mempool has now sent its requests off to the client to be rechecked
   289  //	// and is waiting for the corresponding callbacks to be called.
   290  //	// We now call the mempool-supplied callback on the first and third transaction.
   291  //	// This simulates the client dropping the second request.
   292  //	// Previous versions of this code panicked when the ABCI application missed
   293  //	// a recheck-tx request.
   294  //	resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK}
   295  //	req := abci.RequestCheckTx{Tx: txs[1]}
   296  //	callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
   297  //
   298  //	req = abci.RequestCheckTx{Tx: txs[3]}
   299  //	callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
   300  //	mockClient.AssertExpectations(t)
   301  //}
   302  
   303  func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
   304  	app := kvstore.NewApplication()
   305  	cc := proxy.NewLocalClientCreator(app)
   306  	wcfg := config.DefaultConfig()
   307  	wcfg.Mempool.KeepInvalidTxsInCache = true
   308  	mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
   309  	defer cleanup()
   310  
   311  	// 1. An invalid transaction must remain in the cache after Update
   312  	{
   313  		a := make([]byte, 8)
   314  		binary.BigEndian.PutUint64(a, 0)
   315  
   316  		b := make([]byte, 8)
   317  		binary.BigEndian.PutUint64(b, 1)
   318  
   319  		err := mp.CheckTxSync(b, nil, mempool.TxInfo{})
   320  		require.NoError(t, err)
   321  
   322  		// simulate new block
   323  		_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
   324  		_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
   325  		err = mp.Update(newTestBlock(1, []types.Tx{a, b}),
   326  			[]*abci.ResponseDeliverTx{{Code: ocabci.CodeTypeOK}, {Code: 2}}, nil, nil)
   327  		require.NoError(t, err)
   328  
   329  		// a must be added to the cache
   330  		err = mp.CheckTxSync(a, nil, mempool.TxInfo{})
   331  		if assert.Error(t, err) {
   332  			assert.Equal(t, mempool.ErrTxInCache, err)
   333  		}
   334  
   335  		// b must remain in the cache
   336  		err = mp.CheckTxSync(b, nil, mempool.TxInfo{})
   337  		if assert.Error(t, err) {
   338  			assert.Equal(t, mempool.ErrTxInCache, err)
   339  		}
   340  	}
   341  
   342  	// 2. An invalid transaction must remain in the Map since checking first in Map on prepareCheckTx
   343  	{
   344  		a := make([]byte, 8)
   345  		binary.BigEndian.PutUint64(a, 0)
   346  
   347  		// remove a from the cache to test (2)
   348  		mp.cache.Remove(a)
   349  
   350  		err := mp.CheckTxSync(a, nil, mempool.TxInfo{})
   351  		require.NoError(t, err)
   352  
   353  		err = mp.CheckTxSync(a, nil, mempool.TxInfo{})
   354  		if assert.Error(t, err) {
   355  			// NOTE: After changing the application KVStore from Counter,
   356  			// KVStore doesn't return CodeTypeBadNonce like before Counter
   357  			assert.Equal(t, mempool.ErrTxInMap, err)
   358  		}
   359  	}
   360  }
   361  
   362  func TestTxsAvailable(t *testing.T) {
   363  	app := kvstore.NewApplication()
   364  	cc := proxy.NewLocalClientCreator(app)
   365  	mp, cleanup := newMempoolWithApp(cc)
   366  	defer cleanup()
   367  	mp.EnableTxsAvailable()
   368  
   369  	timeoutMS := 500
   370  
   371  	// with no txs, it shouldnt fire
   372  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   373  
   374  	// send a bunch of txs, it should only fire once
   375  	txs := checkTxs(t, mp, 100, mempool.UnknownPeerID)
   376  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   377  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   378  
   379  	// call update with half the txs.
   380  	// it should fire once now for the new height
   381  	// since there are still txs left
   382  	committedTxs, txs := txs[:50], txs[50:]
   383  	if err := mp.Update(newTestBlock(1, committedTxs),
   384  		abciResponses(len(committedTxs), ocabci.CodeTypeOK), nil, nil); err != nil {
   385  		t.Error(err)
   386  	}
   387  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   388  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   389  
   390  	// send a bunch more txs. we already fired for this height so it shouldnt fire again
   391  	moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID)
   392  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   393  
   394  	// now call update with all the txs. it should not fire as there are no txs left
   395  	committedTxs = append(txs, moreTxs...) // nolint: gocritic
   396  	if err := mp.Update(newTestBlock(2, committedTxs),
   397  		abciResponses(len(committedTxs), ocabci.CodeTypeOK), nil, nil); err != nil {
   398  		t.Error(err)
   399  	}
   400  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   401  
   402  	// send a bunch more txs, it should only fire once
   403  	checkTxs(t, mp, 100, mempool.UnknownPeerID)
   404  	ensureFire(t, mp.TxsAvailable(), timeoutMS)
   405  	ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
   406  }
   407  
   408  func TestSerialReap(t *testing.T) {
   409  	app := kvstore.NewApplication()
   410  	cc := proxy.NewLocalClientCreator(app)
   411  
   412  	mp, cleanup := newMempoolWithApp(cc)
   413  	defer cleanup()
   414  
   415  	appConnCon, _ := cc.NewABCIClient()
   416  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   417  	err := appConnCon.Start()
   418  	require.Nil(t, err)
   419  
   420  	cacheMap := make(map[string]struct{})
   421  	deliverTxsRange := func(start, end int) {
   422  		// Deliver some txs.
   423  		for i := start; i < end; i++ {
   424  
   425  			// This will succeed
   426  			txBytes := make([]byte, 8)
   427  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   428  			err := mp.CheckTxSync(txBytes, nil, mempool.TxInfo{})
   429  			_, cached := cacheMap[string(txBytes)]
   430  			if cached {
   431  				require.NotNil(t, err, "expected error for cached tx")
   432  			} else {
   433  				require.Nil(t, err, "expected no err for uncached tx")
   434  			}
   435  			cacheMap[string(txBytes)] = struct{}{}
   436  
   437  			// Duplicates are cached and should return error
   438  			err = mp.CheckTxSync(txBytes, nil, mempool.TxInfo{})
   439  			require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
   440  		}
   441  	}
   442  
   443  	reapCheck := func(exp int) {
   444  		txs := mp.ReapMaxBytesMaxGas(-1, -1)
   445  		require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
   446  	}
   447  
   448  	updateRange := func(start, end int) {
   449  		txs := make([]types.Tx, 0)
   450  		for i := start; i < end; i++ {
   451  			txBytes := make([]byte, 8)
   452  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   453  			txs = append(txs, txBytes)
   454  		}
   455  		if err := mp.Update(newTestBlock(0, txs),
   456  			abciResponses(len(txs), ocabci.CodeTypeOK), nil, nil); err != nil {
   457  			t.Error(err)
   458  		}
   459  	}
   460  
   461  	commitRange := func(start, end int) {
   462  		// Deliver some txs.
   463  		for i := start; i < end; i++ {
   464  			txBytes := make([]byte, 8)
   465  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   466  			res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   467  			if err != nil {
   468  				t.Errorf("client error committing tx: %v", err)
   469  			}
   470  			if res.IsErr() {
   471  				t.Errorf("error committing tx. Code:%v result:%X log:%v",
   472  					res.Code, res.Data, res.Log)
   473  			}
   474  		}
   475  		res, err := appConnCon.CommitSync()
   476  		if err != nil {
   477  			t.Errorf("client error committing: %v", err)
   478  		}
   479  		if len(res.Data) != 8 {
   480  			t.Errorf("error committing. Hash:%X", res.Data)
   481  		}
   482  	}
   483  
   484  	// ----------------------------------------
   485  
   486  	// Deliver some txs.
   487  	deliverTxsRange(0, 100)
   488  
   489  	// Reap the txs.
   490  	reapCheck(100)
   491  
   492  	// Reap again.  We should get the same amount
   493  	reapCheck(100)
   494  
   495  	// Deliver 0 to 999, we should reap 900 new txs
   496  	// because 100 were already counted.
   497  	deliverTxsRange(0, 1000)
   498  
   499  	// Reap the txs.
   500  	reapCheck(1000)
   501  
   502  	// Reap again.  We should get the same amount
   503  	reapCheck(1000)
   504  
   505  	// Commit from the conensus AppConn
   506  	commitRange(0, 500)
   507  	updateRange(0, 500)
   508  
   509  	// We should have 500 left.
   510  	reapCheck(500)
   511  
   512  	// Deliver 100 invalid txs and 100 valid txs
   513  	deliverTxsRange(900, 1100)
   514  
   515  	// We should have 600 now.
   516  	reapCheck(600)
   517  }
   518  
   519  func TestMempool_CheckTxChecksTxSize(t *testing.T) {
   520  	app := kvstore.NewApplication()
   521  	cc := proxy.NewLocalClientCreator(app)
   522  
   523  	mempl, cleanup := newMempoolWithApp(cc)
   524  	defer cleanup()
   525  
   526  	maxTxSize := mempl.config.MaxTxBytes
   527  
   528  	testCases := []struct {
   529  		len int
   530  		err bool
   531  	}{
   532  		// check small txs. no error
   533  		0: {10, false},
   534  		1: {1000, false},
   535  		2: {1000000, false},
   536  
   537  		// check around maxTxSize
   538  		3: {maxTxSize - 1, false},
   539  		4: {maxTxSize, false},
   540  		5: {maxTxSize + 1, true},
   541  	}
   542  
   543  	for i, testCase := range testCases {
   544  		caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
   545  
   546  		tx := tmrand.Bytes(testCase.len)
   547  
   548  		err := mempl.CheckTxSync(tx, nil, mempool.TxInfo{})
   549  		bv := gogotypes.BytesValue{Value: tx}
   550  		bz, err2 := bv.Marshal()
   551  		require.NoError(t, err2)
   552  		require.Equal(t, len(bz), proto.Size(&bv), caseString)
   553  
   554  		if !testCase.err {
   555  			require.NoError(t, err, caseString)
   556  		} else {
   557  			require.Equal(t, err, mempool.ErrTxTooLarge{
   558  				Max:    maxTxSize,
   559  				Actual: testCase.len,
   560  			}, caseString)
   561  		}
   562  	}
   563  }
   564  
   565  func TestMempoolTxsBytes(t *testing.T) {
   566  	app := kvstore.NewApplication()
   567  	cc := proxy.NewLocalClientCreator(app)
   568  
   569  	cfg := config.ResetTestRoot("mempool_test")
   570  
   571  	cfg.Mempool.MaxTxsBytes = 10
   572  	mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
   573  	defer cleanup()
   574  
   575  	// 1. zero by default
   576  	assert.EqualValues(t, 0, mp.SizeBytes())
   577  
   578  	// 2. len(tx) after CheckTx
   579  	err := mp.CheckTxSync([]byte{0x01}, nil, mempool.TxInfo{})
   580  	require.NoError(t, err)
   581  	assert.EqualValues(t, 1, mp.SizeBytes())
   582  
   583  	// 3. zero again after tx is removed by Update
   584  	err = mp.Update(newTestBlock(1, []types.Tx{[]byte{0x01}}),
   585  		abciResponses(1, ocabci.CodeTypeOK), nil, nil)
   586  	require.NoError(t, err)
   587  	assert.EqualValues(t, 0, mp.SizeBytes())
   588  
   589  	// 4. zero after Flush
   590  	err = mp.CheckTxSync([]byte{0x02, 0x03}, nil, mempool.TxInfo{})
   591  	require.NoError(t, err)
   592  	assert.EqualValues(t, 2, mp.SizeBytes())
   593  
   594  	mp.Flush()
   595  	assert.EqualValues(t, 0, mp.SizeBytes())
   596  
   597  	// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
   598  	err = mp.CheckTxSync(
   599  		[]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04},
   600  		nil,
   601  		mempool.TxInfo{},
   602  	)
   603  	require.NoError(t, err)
   604  	err = mp.CheckTxSync([]byte{0x05}, nil, mempool.TxInfo{})
   605  	if assert.Error(t, err) {
   606  		assert.IsType(t, mempool.ErrMempoolIsFull{}, err)
   607  	}
   608  
   609  	// 6. zero after tx is rechecked and removed due to not being valid anymore
   610  	app2 := kvstore.NewApplication()
   611  	cc = proxy.NewLocalClientCreator(app2)
   612  
   613  	mp, cleanup = newMempoolWithApp(cc)
   614  	defer cleanup()
   615  
   616  	txBytes := make([]byte, 8)
   617  	binary.BigEndian.PutUint64(txBytes, uint64(0))
   618  
   619  	err = mp.CheckTxSync(txBytes, nil, mempool.TxInfo{})
   620  	require.NoError(t, err)
   621  	assert.EqualValues(t, 8, mp.SizeBytes())
   622  
   623  	appConnCon, _ := cc.NewABCIClient()
   624  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   625  	err = appConnCon.Start()
   626  	require.Nil(t, err)
   627  	t.Cleanup(func() {
   628  		if err := appConnCon.Stop(); err != nil {
   629  			t.Error(err)
   630  		}
   631  	})
   632  
   633  	res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   634  	require.NoError(t, err)
   635  	require.EqualValues(t, 0, res.Code)
   636  
   637  	res2, err := appConnCon.CommitSync()
   638  	require.NoError(t, err)
   639  	require.NotEmpty(t, res2.Data)
   640  
   641  	// Pretend like we committed nothing so txBytes gets rechecked and removed.
   642  	err = mp.Update(newTestBlock(1, []types.Tx{}), abciResponses(0, ocabci.CodeTypeOK), nil, nil)
   643  	require.NoError(t, err)
   644  	assert.EqualValues(t, 8, mp.SizeBytes())
   645  
   646  	// 7. Test RemoveTxByKey function
   647  	err = mp.CheckTxSync([]byte{0x06}, nil, mempool.TxInfo{})
   648  	require.NoError(t, err)
   649  	assert.EqualValues(t, 9, mp.SizeBytes())
   650  	assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key()))
   651  	assert.EqualValues(t, 9, mp.SizeBytes())
   652  	assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key()))
   653  	assert.EqualValues(t, 8, mp.SizeBytes())
   654  
   655  }
   656  
   657  // This will non-deterministically catch some concurrency failures like
   658  // https://github.com/tendermint/tendermint/issues/3509
   659  // TODO: all of the tests should probably also run using the remote proxy app
   660  // since otherwise we're not actually testing the concurrency of the mempool here!
   661  func TestMempoolRemoteAppConcurrency(t *testing.T) {
   662  	sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
   663  	app := kvstore.NewApplication()
   664  	_, server := newRemoteApp(t, sockPath, app)
   665  	t.Cleanup(func() {
   666  		if err := server.Stop(); err != nil {
   667  			t.Error(err)
   668  		}
   669  	})
   670  
   671  	cfg := config.ResetTestRoot("mempool_test")
   672  
   673  	mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg)
   674  	defer cleanup()
   675  
   676  	// generate small number of txs
   677  	nTxs := 10
   678  	txLen := 200
   679  	txs := make([]types.Tx, nTxs)
   680  	for i := 0; i < nTxs; i++ {
   681  		txs[i] = tmrand.Bytes(txLen)
   682  	}
   683  
   684  	// simulate a group of peers sending them over and over
   685  	N := cfg.Mempool.Size
   686  	maxPeers := 5
   687  	for i := 0; i < N; i++ {
   688  		peerID := mrand.Intn(maxPeers)
   689  		txNum := mrand.Intn(nTxs)
   690  		tx := txs[txNum]
   691  
   692  		// this will err with ErrTxInCache many times ...
   693  		mp.CheckTxSync(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) // nolint: errcheck
   694  	}
   695  
   696  	require.NoError(t, mp.FlushAppConn())
   697  }
   698  
   699  func newTestBlock(height int64, txs types.Txs) *types.Block {
   700  	return &types.Block{
   701  		Header: types.Header{
   702  			Height: height,
   703  		},
   704  		Data: types.Data{
   705  			Txs: txs,
   706  		},
   707  	}
   708  }
   709  
   710  // caller must close server
   711  func newRemoteApp(t *testing.T, addr string, app ocabci.Application) (abciclient.Client, service.Service) {
   712  	clientCreator, err := abciclient.NewClient(addr, "socket", true)
   713  	require.NoError(t, err)
   714  
   715  	// Start server
   716  	server := abciserver.NewSocketServer(addr, app)
   717  	server.SetLogger(log.TestingLogger().With("module", "abci-server"))
   718  	if err := server.Start(); err != nil {
   719  		t.Fatalf("Error starting socket server: %v", err.Error())
   720  	}
   721  
   722  	return clientCreator, server
   723  }
   724  
   725  func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
   726  	responses := make([]*abci.ResponseDeliverTx, 0, n)
   727  	for i := 0; i < n; i++ {
   728  		responses = append(responses, &abci.ResponseDeliverTx{Code: code})
   729  	}
   730  	return responses
   731  }
   732  
   733  func TestTxMempoolPostCheckError(t *testing.T) {
   734  	cases := []struct {
   735  		name string
   736  		err  error
   737  	}{
   738  		{
   739  			name: "error",
   740  			err:  errors.New("test error"),
   741  		},
   742  		{
   743  			name: "no error",
   744  			err:  nil,
   745  		},
   746  	}
   747  	for _, tc := range cases {
   748  		testCase := tc
   749  		t.Run(testCase.name, func(t *testing.T) {
   750  			app := kvstore.NewApplication()
   751  			cc := proxy.NewLocalClientCreator(app)
   752  			mp, cleanup := newMempoolWithApp(cc)
   753  			defer cleanup()
   754  
   755  			mp.postCheck = func(_ types.Tx, _ *ocabci.ResponseCheckTx) error {
   756  				return testCase.err
   757  			}
   758  
   759  			tx := types.Tx{1}
   760  			err := mp.CheckTxSync(tx, nil, mempool.TxInfo{})
   761  			require.NoError(t, err)
   762  
   763  			req := abci.RequestCheckTx{
   764  				Tx:   tx,
   765  				Type: abci.CheckTxType_Recheck,
   766  			}
   767  			res := &ocabci.Response{}
   768  
   769  			m := sync.Mutex{}
   770  			m.Lock()
   771  			mp.proxyAppConn.CheckTxAsync(req, func(r *ocabci.Response) {
   772  				res = r
   773  				m.Unlock()
   774  			})
   775  
   776  			checkTxRes, ok := res.Value.(*ocabci.Response_CheckTx)
   777  			require.True(t, ok)
   778  			expectedErrString := ""
   779  			if testCase.err != nil {
   780  				expectedErrString = testCase.err.Error()
   781  			}
   782  			require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
   783  		})
   784  	}
   785  }