github.com/pokt-network/tendermint@v0.32.11-0.20230426215212-59310158d3e9/mempool/clist_mempool_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"crypto/rand"
     5  	"crypto/sha256"
     6  	"encoding/binary"
     7  	"fmt"
     8  	"io/ioutil"
     9  	mrand "math/rand"
    10  	"os"
    11  	"path/filepath"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	amino "github.com/tendermint/go-amino"
    19  
    20  	"github.com/tendermint/tendermint/abci/example/counter"
    21  	"github.com/tendermint/tendermint/abci/example/kvstore"
    22  	abciserver "github.com/tendermint/tendermint/abci/server"
    23  	abci "github.com/tendermint/tendermint/abci/types"
    24  	cfg "github.com/tendermint/tendermint/config"
    25  	"github.com/tendermint/tendermint/libs/log"
    26  	tmrand "github.com/tendermint/tendermint/libs/rand"
    27  	"github.com/tendermint/tendermint/libs/service"
    28  	"github.com/tendermint/tendermint/proxy"
    29  	"github.com/tendermint/tendermint/types"
    30  )
    31  
    32  // A cleanupFunc cleans up any config / test files created for a particular
    33  // test.
    34  type cleanupFunc func()
    35  
    36  func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
    37  	return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
    38  }
    39  
    40  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) {
    41  	appConnMem, _ := cc.NewABCIClient()
    42  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
    43  	err := appConnMem.Start()
    44  	if err != nil {
    45  		panic(err)
    46  	}
    47  	mempool := NewCListMempool(config.Mempool, appConnMem, 0)
    48  	mempool.SetLogger(log.TestingLogger())
    49  	return mempool, func() { os.RemoveAll(config.RootDir) }
    50  }
    51  
    52  func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    53  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    54  	select {
    55  	case <-ch:
    56  		t.Fatal("Expected not to fire")
    57  	case <-timer.C:
    58  	}
    59  }
    60  
    61  func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
    62  	timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
    63  	select {
    64  	case <-ch:
    65  	case <-timer.C:
    66  		t.Fatal("Expected to fire")
    67  	}
    68  }
    69  
    70  func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs {
    71  	txs := make(types.Txs, count)
    72  	txInfo := TxInfo{SenderID: peerID}
    73  	for i := 0; i < count; i++ {
    74  		txBytes := make([]byte, 20)
    75  		txs[i] = txBytes
    76  		_, err := rand.Read(txBytes)
    77  		if err != nil {
    78  			t.Error(err)
    79  		}
    80  		if err := mempool.CheckTx(txBytes, nil, txInfo); err != nil {
    81  			// Skip invalid txs.
    82  			// TestMempoolFilters will fail otherwise. It asserts a number of txs
    83  			// returned.
    84  			if IsPreCheckError(err) {
    85  				continue
    86  			}
    87  			t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
    88  		}
    89  	}
    90  	return txs
    91  }
    92  
    93  func TestReapMaxBytesMaxGas(t *testing.T) {
    94  	app := kvstore.NewApplication()
    95  	cc := proxy.NewLocalClientCreator(app)
    96  	mempool, cleanup := newMempoolWithApp(cc)
    97  	defer cleanup()
    98  
    99  	// Ensure gas calculation behaves as expected
   100  	checkTxs(t, mempool, 1, UnknownPeerID)
   101  	tx0 := mempool.TxsFront().Value.(*mempoolTx)
   102  	// assert that kv store has gas wanted = 1.
   103  	require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
   104  	require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
   105  	// ensure each tx is 20 bytes long
   106  	require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
   107  	mempool.Flush()
   108  
   109  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   110  	// each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
   111  	tests := []struct {
   112  		numTxsToCreate int
   113  		maxBytes       int64
   114  		maxGas         int64
   115  		expectedNumTxs int
   116  	}{
   117  		{20, -1, -1, 20},
   118  		{20, -1, 0, 0},
   119  		{20, -1, 10, 10},
   120  		{20, -1, 30, 20},
   121  		{20, 0, -1, 0},
   122  		{20, 0, 10, 0},
   123  		{20, 10, 10, 0},
   124  		{20, 22, 10, 1},
   125  		{20, 220, -1, 10},
   126  		{20, 220, 5, 5},
   127  		{20, 220, 10, 10},
   128  		{20, 220, 15, 10},
   129  		{20, 20000, -1, 20},
   130  		{20, 20000, 5, 5},
   131  		{20, 20000, 30, 20},
   132  	}
   133  	for tcIndex, tt := range tests {
   134  		checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
   135  		got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
   136  		assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
   137  			len(got), tt.expectedNumTxs, tcIndex)
   138  		mempool.Flush()
   139  	}
   140  }
   141  
   142  func TestMempoolFilters(t *testing.T) {
   143  	app := kvstore.NewApplication()
   144  	cc := proxy.NewLocalClientCreator(app)
   145  	mempool, cleanup := newMempoolWithApp(cc)
   146  	defer cleanup()
   147  	emptyTxArr := []types.Tx{[]byte{}}
   148  
   149  	nopPreFilter := func(tx types.Tx) error { return nil }
   150  	nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil }
   151  
   152  	// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
   153  	// each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
   154  	tests := []struct {
   155  		numTxsToCreate int
   156  		preFilter      PreCheckFunc
   157  		postFilter     PostCheckFunc
   158  		expectedNumTxs int
   159  	}{
   160  		{10, nopPreFilter, nopPostFilter, 10},
   161  		{10, PreCheckAminoMaxBytes(10), nopPostFilter, 0},
   162  		{10, PreCheckAminoMaxBytes(20), nopPostFilter, 0},
   163  		{10, PreCheckAminoMaxBytes(22), nopPostFilter, 10},
   164  		{10, nopPreFilter, PostCheckMaxGas(-1), 10},
   165  		{10, nopPreFilter, PostCheckMaxGas(0), 0},
   166  		{10, nopPreFilter, PostCheckMaxGas(1), 10},
   167  		{10, nopPreFilter, PostCheckMaxGas(3000), 10},
   168  		{10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0},
   169  		{10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10},
   170  		{10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10},
   171  		{10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0},
   172  	}
   173  	for tcIndex, tt := range tests {
   174  		mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
   175  		checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
   176  		require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
   177  		mempool.Flush()
   178  	}
   179  }
   180  
   181  func TestMempoolUpdate(t *testing.T) {
   182  	app := kvstore.NewApplication()
   183  	cc := proxy.NewLocalClientCreator(app)
   184  	mempool, cleanup := newMempoolWithApp(cc)
   185  	defer cleanup()
   186  
   187  	// 1. Adds valid txs to the cache
   188  	{
   189  		mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   190  		err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
   191  		if assert.Error(t, err) {
   192  			assert.Equal(t, ErrTxInCache, err)
   193  		}
   194  	}
   195  
   196  	// 2. Removes valid txs from the mempool
   197  	{
   198  		err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{})
   199  		require.NoError(t, err)
   200  		mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   201  		assert.Zero(t, mempool.Size())
   202  	}
   203  
   204  	// 3. Removes invalid transactions from the cache and the mempool (if present)
   205  	{
   206  		err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
   207  		require.NoError(t, err)
   208  		mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
   209  		assert.Zero(t, mempool.Size())
   210  
   211  		err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
   212  		assert.NoError(t, err)
   213  	}
   214  }
   215  
   216  func TestTxsAvailable(t *testing.T) {
   217  	app := kvstore.NewApplication()
   218  	cc := proxy.NewLocalClientCreator(app)
   219  	mempool, cleanup := newMempoolWithApp(cc)
   220  	defer cleanup()
   221  	mempool.EnableTxsAvailable()
   222  
   223  	timeoutMS := 500
   224  
   225  	// with no txs, it shouldnt fire
   226  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   227  
   228  	// send a bunch of txs, it should only fire once
   229  	txs := checkTxs(t, mempool, 100, UnknownPeerID)
   230  	ensureFire(t, mempool.TxsAvailable(), timeoutMS)
   231  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   232  
   233  	// call update with half the txs.
   234  	// it should fire once now for the new height
   235  	// since there are still txs left
   236  	committedTxs, txs := txs[:50], txs[50:]
   237  	if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
   238  		t.Error(err)
   239  	}
   240  	ensureFire(t, mempool.TxsAvailable(), timeoutMS)
   241  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   242  
   243  	// send a bunch more txs. we already fired for this height so it shouldnt fire again
   244  	moreTxs := checkTxs(t, mempool, 50, UnknownPeerID)
   245  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   246  
   247  	// now call update with all the txs. it should not fire as there are no txs left
   248  	committedTxs = append(txs, moreTxs...) //nolint: gocritic
   249  	if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
   250  		t.Error(err)
   251  	}
   252  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   253  
   254  	// send a bunch more txs, it should only fire once
   255  	checkTxs(t, mempool, 100, UnknownPeerID)
   256  	ensureFire(t, mempool.TxsAvailable(), timeoutMS)
   257  	ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
   258  }
   259  
   260  func TestTxEviction(t *testing.T) {
   261  	app := kvstore.NewApplication()
   262  	cc := proxy.NewLocalClientCreator(app)
   263  
   264  	config := cfg.ResetTestRoot("mempool_test")
   265  
   266  	appConnMem, _ := cc.NewABCIClient()
   267  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
   268  	err := appConnMem.Start()
   269  	if err != nil {
   270  		panic(err)
   271  	}
   272  	mempool := NewCListMempool(config.Mempool, appConnMem, 1)
   273  	mempool.SetLogger(log.TestingLogger())
   274  
   275  	txBytes := make([]byte, 20)
   276  	_, err = rand.Read(txBytes)
   277  	if err != nil {
   278  		t.Error(err)
   279  	}
   280  
   281  	// add tx to mempool at height 1
   282  	if err := mempool.CheckTx(txBytes, nil, TxInfo{}); err != nil {
   283  		t.Fatalf("CheckTx failed: %v while checking tx", err)
   284  	}
   285  
   286  	// commit 2 blocks
   287  	for h := 2; h < 4; h++ {
   288  		require.NoError(t, mempool.Update(int64(h), []types.Tx{}, nil, nil, nil))
   289  		require.Equal(t, 1, mempool.Size())
   290  	}
   291  
   292  	// tx should be evicted from the mempool on this block commit
   293  	require.NoError(t, mempool.Update(4, nil, nil, nil, nil))
   294  	require.Equal(t, 0, mempool.Size())
   295  
   296  	// tx should remain in the mempool cache
   297  	require.Error(t, ErrTxInCache, mempool.CheckTx(txBytes, nil, TxInfo{}))
   298  }
   299  
   300  func TestSerialReap(t *testing.T) {
   301  	app := counter.NewApplication(true)
   302  	app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
   303  	cc := proxy.NewLocalClientCreator(app)
   304  
   305  	mempool, cleanup := newMempoolWithApp(cc)
   306  	defer cleanup()
   307  
   308  	appConnCon, _ := cc.NewABCIClient()
   309  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   310  	err := appConnCon.Start()
   311  	require.Nil(t, err)
   312  
   313  	cacheMap := make(map[string]struct{})
   314  	deliverTxsRange := func(start, end int) {
   315  		// Deliver some txs.
   316  		for i := start; i < end; i++ {
   317  
   318  			// This will succeed
   319  			txBytes := make([]byte, 8)
   320  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   321  			err := mempool.CheckTx(txBytes, nil, TxInfo{})
   322  			_, cached := cacheMap[string(txBytes)]
   323  			if cached {
   324  				require.NotNil(t, err, "expected error for cached tx")
   325  			} else {
   326  				require.Nil(t, err, "expected no err for uncached tx")
   327  			}
   328  			cacheMap[string(txBytes)] = struct{}{}
   329  
   330  			// Duplicates are cached and should return error
   331  			err = mempool.CheckTx(txBytes, nil, TxInfo{})
   332  			require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
   333  		}
   334  	}
   335  
   336  	reapCheck := func(exp int) {
   337  		txs := mempool.ReapMaxBytesMaxGas(-1, -1)
   338  		require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
   339  	}
   340  
   341  	updateRange := func(start, end int) {
   342  		txs := make([]types.Tx, 0)
   343  		for i := start; i < end; i++ {
   344  			txBytes := make([]byte, 8)
   345  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   346  			txs = append(txs, txBytes)
   347  		}
   348  		if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
   349  			t.Error(err)
   350  		}
   351  	}
   352  
   353  	commitRange := func(start, end int) {
   354  		// Deliver some txs.
   355  		for i := start; i < end; i++ {
   356  			txBytes := make([]byte, 8)
   357  			binary.BigEndian.PutUint64(txBytes, uint64(i))
   358  			res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   359  			if err != nil {
   360  				t.Errorf("client error committing tx: %v", err)
   361  			}
   362  			if res.IsErr() {
   363  				t.Errorf("error committing tx. Code:%v result:%X log:%v",
   364  					res.Code, res.Data, res.Log)
   365  			}
   366  		}
   367  		res, err := appConnCon.CommitSync()
   368  		if err != nil {
   369  			t.Errorf("client error committing: %v", err)
   370  		}
   371  		if len(res.Data) != 8 {
   372  			t.Errorf("error committing. Hash:%X", res.Data)
   373  		}
   374  	}
   375  
   376  	//----------------------------------------
   377  
   378  	// Deliver some txs.
   379  	deliverTxsRange(0, 100)
   380  
   381  	// Reap the txs.
   382  	reapCheck(100)
   383  
   384  	// Reap again.  We should get the same amount
   385  	reapCheck(100)
   386  
   387  	// Deliver 0 to 999, we should reap 900 new txs
   388  	// because 100 were already counted.
   389  	deliverTxsRange(0, 1000)
   390  
   391  	// Reap the txs.
   392  	reapCheck(1000)
   393  
   394  	// Reap again.  We should get the same amount
   395  	reapCheck(1000)
   396  
   397  	// Commit from the conensus AppConn
   398  	commitRange(0, 500)
   399  	updateRange(0, 500)
   400  
   401  	// We should have 500 left.
   402  	reapCheck(500)
   403  
   404  	// Deliver 100 invalid txs and 100 valid txs
   405  	deliverTxsRange(900, 1100)
   406  
   407  	// We should have 600 now.
   408  	reapCheck(600)
   409  }
   410  
   411  func TestMempoolCloseWAL(t *testing.T) {
   412  	// 1. Create the temporary directory for mempool and WAL testing.
   413  	rootDir, err := ioutil.TempDir("", "mempool-test")
   414  	require.Nil(t, err, "expecting successful tmpdir creation")
   415  
   416  	// 2. Ensure that it doesn't contain any elements -- Sanity check
   417  	m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
   418  	require.Nil(t, err, "successful globbing expected")
   419  	require.Equal(t, 0, len(m1), "no matches yet")
   420  
   421  	// 3. Create the mempool
   422  	wcfg := cfg.DefaultConfig()
   423  	wcfg.Mempool.RootDir = rootDir
   424  	app := kvstore.NewApplication()
   425  	cc := proxy.NewLocalClientCreator(app)
   426  	mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
   427  	defer cleanup()
   428  	mempool.height = 10
   429  	mempool.InitWAL()
   430  
   431  	// 4. Ensure that the directory contains the WAL file
   432  	m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
   433  	require.Nil(t, err, "successful globbing expected")
   434  	require.Equal(t, 1, len(m2), "expecting the wal match in")
   435  
   436  	// 5. Write some contents to the WAL
   437  	mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
   438  	walFilepath := mempool.wal.Path
   439  	sum1 := checksumFile(walFilepath, t)
   440  
   441  	// 6. Sanity check to ensure that the written TX matches the expectation.
   442  	require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
   443  
   444  	// 7. Invoke CloseWAL() and ensure it discards the
   445  	// WAL thus any other write won't go through.
   446  	mempool.CloseWAL()
   447  	mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
   448  	sum2 := checksumFile(walFilepath, t)
   449  	require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
   450  
   451  	// 8. Sanity check to ensure that the WAL file still exists
   452  	m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
   453  	require.Nil(t, err, "successful globbing expected")
   454  	require.Equal(t, 1, len(m3), "expecting the wal match in")
   455  }
   456  
   457  // Size of the amino encoded TxMessage is the length of the
   458  // encoded byte array, plus 1 for the struct field, plus 4
   459  // for the amino prefix.
   460  func txMessageSize(tx types.Tx) int {
   461  	return amino.ByteSliceSize(tx) + 1 + 4
   462  }
   463  
   464  func TestMempoolMaxMsgSize(t *testing.T) {
   465  	app := kvstore.NewApplication()
   466  	cc := proxy.NewLocalClientCreator(app)
   467  	mempl, cleanup := newMempoolWithApp(cc)
   468  	defer cleanup()
   469  
   470  	maxTxSize := mempl.config.MaxTxBytes
   471  	maxMsgSize := calcMaxMsgSize(maxTxSize)
   472  
   473  	testCases := []struct {
   474  		len int
   475  		err bool
   476  	}{
   477  		// check small txs. no error
   478  		{10, false},
   479  		{1000, false},
   480  		{1000000, false},
   481  
   482  		// check around maxTxSize
   483  		// changes from no error to error
   484  		{maxTxSize - 2, false},
   485  		{maxTxSize - 1, false},
   486  		{maxTxSize, false},
   487  		{maxTxSize + 1, true},
   488  		{maxTxSize + 2, true},
   489  
   490  		// check around maxMsgSize. all error
   491  		{maxMsgSize - 1, true},
   492  		{maxMsgSize, true},
   493  		{maxMsgSize + 1, true},
   494  	}
   495  
   496  	for i, testCase := range testCases {
   497  		caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
   498  
   499  		tx := tmrand.Bytes(testCase.len)
   500  		err := mempl.CheckTx(tx, nil, TxInfo{})
   501  		msg := &TxMessage{tx}
   502  		encoded := cdc.MustMarshalBinaryBare(msg)
   503  		require.Equal(t, len(encoded), txMessageSize(tx), caseString)
   504  		if !testCase.err {
   505  			require.True(t, len(encoded) <= maxMsgSize, caseString)
   506  			require.NoError(t, err, caseString)
   507  		} else {
   508  			require.True(t, len(encoded) > maxMsgSize, caseString)
   509  			require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString)
   510  		}
   511  	}
   512  
   513  }
   514  
   515  func TestMempoolTxsBytes(t *testing.T) {
   516  	app := kvstore.NewApplication()
   517  	cc := proxy.NewLocalClientCreator(app)
   518  	config := cfg.ResetTestRoot("mempool_test")
   519  	config.Mempool.MaxTxsBytes = 10
   520  	mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
   521  	defer cleanup()
   522  
   523  	// 1. zero by default
   524  	assert.EqualValues(t, 0, mempool.TxsBytes())
   525  
   526  	// 2. len(tx) after CheckTx
   527  	err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
   528  	require.NoError(t, err)
   529  	assert.EqualValues(t, 1, mempool.TxsBytes())
   530  
   531  	// 3. zero again after tx is removed by Update
   532  	mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
   533  	assert.EqualValues(t, 0, mempool.TxsBytes())
   534  
   535  	// 4. zero after Flush
   536  	err = mempool.CheckTx([]byte{0x02, 0x03}, nil, TxInfo{})
   537  	require.NoError(t, err)
   538  	assert.EqualValues(t, 2, mempool.TxsBytes())
   539  
   540  	mempool.Flush()
   541  	assert.EqualValues(t, 0, mempool.TxsBytes())
   542  
   543  	// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
   544  	err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil, TxInfo{})
   545  	require.NoError(t, err)
   546  	err = mempool.CheckTx([]byte{0x05}, nil, TxInfo{})
   547  	if assert.Error(t, err) {
   548  		assert.IsType(t, ErrMempoolIsFull{}, err)
   549  	}
   550  
   551  	// 6. zero after tx is rechecked and removed due to not being valid anymore
   552  	app2 := counter.NewApplication(true)
   553  	cc = proxy.NewLocalClientCreator(app2)
   554  	mempool, cleanup = newMempoolWithApp(cc)
   555  	defer cleanup()
   556  
   557  	txBytes := make([]byte, 8)
   558  	binary.BigEndian.PutUint64(txBytes, uint64(0))
   559  
   560  	err = mempool.CheckTx(txBytes, nil, TxInfo{})
   561  	require.NoError(t, err)
   562  	assert.EqualValues(t, 8, mempool.TxsBytes())
   563  
   564  	appConnCon, _ := cc.NewABCIClient()
   565  	appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
   566  	err = appConnCon.Start()
   567  	require.Nil(t, err)
   568  	defer appConnCon.Stop()
   569  	res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
   570  	require.NoError(t, err)
   571  	require.EqualValues(t, 0, res.Code)
   572  	res2, err := appConnCon.CommitSync()
   573  	require.NoError(t, err)
   574  	require.NotEmpty(t, res2.Data)
   575  
   576  	// Pretend like we committed nothing so txBytes gets rechecked and removed.
   577  	mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
   578  	assert.EqualValues(t, 0, mempool.TxsBytes())
   579  }
   580  
   581  // This will non-deterministically catch some concurrency failures like
   582  // https://github.com/tendermint/tendermint/issues/3509
   583  // TODO: all of the tests should probably also run using the remote proxy app
   584  // since otherwise we're not actually testing the concurrency of the mempool here!
   585  func TestMempoolRemoteAppConcurrency(t *testing.T) {
   586  	sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
   587  	app := kvstore.NewApplication()
   588  	cc, server := newRemoteApp(t, sockPath, app)
   589  	defer server.Stop()
   590  	config := cfg.ResetTestRoot("mempool_test")
   591  	mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
   592  	defer cleanup()
   593  
   594  	// generate small number of txs
   595  	nTxs := 10
   596  	txLen := 200
   597  	txs := make([]types.Tx, nTxs)
   598  	for i := 0; i < nTxs; i++ {
   599  		txs[i] = tmrand.Bytes(txLen)
   600  	}
   601  
   602  	// simulate a group of peers sending them over and over
   603  	N := config.Mempool.Size
   604  	maxPeers := 5
   605  	for i := 0; i < N; i++ {
   606  		peerID := mrand.Intn(maxPeers)
   607  		txNum := mrand.Intn(nTxs)
   608  		tx := txs[txNum]
   609  
   610  		// this will err with ErrTxInCache many times ...
   611  		mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)})
   612  	}
   613  	err := mempool.FlushAppConn()
   614  	require.NoError(t, err)
   615  }
   616  
   617  // caller must close server
   618  func newRemoteApp(
   619  	t *testing.T,
   620  	addr string,
   621  	app abci.Application,
   622  ) (
   623  	clientCreator proxy.ClientCreator,
   624  	server service.Service,
   625  ) {
   626  	clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true)
   627  
   628  	// Start server
   629  	server = abciserver.NewSocketServer(addr, app)
   630  	server.SetLogger(log.TestingLogger().With("module", "abci-server"))
   631  	if err := server.Start(); err != nil {
   632  		t.Fatalf("Error starting socket server: %v", err.Error())
   633  	}
   634  	return clientCreator, server
   635  }
   636  func checksumIt(data []byte) string {
   637  	h := sha256.New()
   638  	h.Write(data)
   639  	return fmt.Sprintf("%x", h.Sum(nil))
   640  }
   641  
   642  func checksumFile(p string, t *testing.T) string {
   643  	data, err := ioutil.ReadFile(p)
   644  	require.Nil(t, err, "expecting successful read of %q", p)
   645  	return checksumIt(data)
   646  }
   647  
   648  func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
   649  	responses := make([]*abci.ResponseDeliverTx, 0, n)
   650  	for i := 0; i < n; i++ {
   651  		responses = append(responses, &abci.ResponseDeliverTx{Code: code})
   652  	}
   653  	return responses
   654  }