github.com/aakash4dev/cometbft@v0.38.2/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/fortytw2/leaktest"
    11  	"github.com/go-kit/log/term"
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	"github.com/aakash4dev/cometbft/abci/example/kvstore"
    16  	abci "github.com/aakash4dev/cometbft/abci/types"
    17  	cfg "github.com/aakash4dev/cometbft/config"
    18  	"github.com/aakash4dev/cometbft/libs/log"
    19  	"github.com/aakash4dev/cometbft/p2p"
    20  	"github.com/aakash4dev/cometbft/p2p/mock"
    21  	memproto "github.com/aakash4dev/cometbft/proto/tendermint/mempool"
    22  	"github.com/aakash4dev/cometbft/proxy"
    23  	"github.com/aakash4dev/cometbft/types"
    24  )
    25  
    26  const (
    27  	numTxs  = 1000
    28  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    29  )
    30  
    31  type peerState struct {
    32  	height int64
    33  }
    34  
    35  func (ps peerState) GetHeight() int64 {
    36  	return ps.height
    37  }
    38  
    39  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    40  // be received in the others.
    41  func TestReactorBroadcastTxsMessage(t *testing.T) {
    42  	config := cfg.TestConfig()
    43  	// if there were more than two reactors, the order of transactions could not be
    44  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    45  	// replace Connect2Switches (full mesh) with a func, which connects first
    46  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    47  	const N = 2
    48  	reactors, _ := makeAndConnectReactors(config, N)
    49  	defer func() {
    50  		for _, r := range reactors {
    51  			if err := r.Stop(); err != nil {
    52  				assert.NoError(t, err)
    53  			}
    54  		}
    55  	}()
    56  	for _, r := range reactors {
    57  		for _, peer := range r.Switch.Peers().List() {
    58  			peer.Set(types.PeerStateKey, peerState{1})
    59  		}
    60  	}
    61  
    62  	txs := checkTxs(t, reactors[0].mempool, numTxs)
    63  	waitForReactors(t, txs, reactors, checkTxsInOrder)
    64  }
    65  
    66  // regression test for https://github.com/tendermint/tendermint/issues/5408
    67  func TestReactorConcurrency(t *testing.T) {
    68  	config := cfg.TestConfig()
    69  	config.Mempool.Size = 5000
    70  	config.Mempool.CacheSize = 5000
    71  	const N = 2
    72  	reactors, _ := makeAndConnectReactors(config, N)
    73  	defer func() {
    74  		for _, r := range reactors {
    75  			if err := r.Stop(); err != nil {
    76  				assert.NoError(t, err)
    77  			}
    78  		}
    79  	}()
    80  	for _, r := range reactors {
    81  		for _, peer := range r.Switch.Peers().List() {
    82  			peer.Set(types.PeerStateKey, peerState{1})
    83  		}
    84  	}
    85  	var wg sync.WaitGroup
    86  
    87  	const numTxs = 5
    88  
    89  	for i := 0; i < 1000; i++ {
    90  		wg.Add(2)
    91  
    92  		// 1. submit a bunch of txs
    93  		// 2. update the whole mempool
    94  		txs := checkTxs(t, reactors[0].mempool, numTxs)
    95  		go func() {
    96  			defer wg.Done()
    97  
    98  			reactors[0].mempool.Lock()
    99  			defer reactors[0].mempool.Unlock()
   100  
   101  			err := reactors[0].mempool.Update(1, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil)
   102  			assert.NoError(t, err)
   103  		}()
   104  
   105  		// 1. submit a bunch of txs
   106  		// 2. update none
   107  		_ = checkTxs(t, reactors[1].mempool, numTxs)
   108  		go func() {
   109  			defer wg.Done()
   110  
   111  			reactors[1].mempool.Lock()
   112  			defer reactors[1].mempool.Unlock()
   113  			err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil)
   114  			assert.NoError(t, err)
   115  		}()
   116  
   117  		// 1. flush the mempool
   118  		reactors[1].mempool.Flush()
   119  	}
   120  
   121  	wg.Wait()
   122  }
   123  
   124  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   125  // ensure peer gets no txs.
   126  func TestReactorNoBroadcastToSender(t *testing.T) {
   127  	config := cfg.TestConfig()
   128  	const N = 2
   129  	reactors, _ := makeAndConnectReactors(config, N)
   130  	defer func() {
   131  		for _, r := range reactors {
   132  			if err := r.Stop(); err != nil {
   133  				assert.NoError(t, err)
   134  			}
   135  		}
   136  	}()
   137  	for _, r := range reactors {
   138  		for _, peer := range r.Switch.Peers().List() {
   139  			peer.Set(types.PeerStateKey, peerState{1})
   140  		}
   141  	}
   142  
   143  	// create random transactions
   144  	txs := NewRandomTxs(numTxs, 20)
   145  
   146  	const peerID0 = 0
   147  	const peerID1 = 1
   148  	// the second peer sends all the transactions to the first peer
   149  	for _, tx := range txs {
   150  		reactors[0].addSender(tx.Key(), peerID1)
   151  		_, err := reactors[peerID0].mempool.CheckTx(tx)
   152  		require.NoError(t, err)
   153  	}
   154  
   155  	// the second peer should not receive any transaction
   156  	ensureNoTxs(t, reactors[peerID1], 100*time.Millisecond)
   157  }
   158  
   159  func TestReactor_MaxTxBytes(t *testing.T) {
   160  	config := cfg.TestConfig()
   161  
   162  	const N = 2
   163  	reactors, _ := makeAndConnectReactors(config, N)
   164  	defer func() {
   165  		for _, r := range reactors {
   166  			if err := r.Stop(); err != nil {
   167  				assert.NoError(t, err)
   168  			}
   169  		}
   170  	}()
   171  	for _, r := range reactors {
   172  		for _, peer := range r.Switch.Peers().List() {
   173  			peer.Set(types.PeerStateKey, peerState{1})
   174  		}
   175  	}
   176  
   177  	// Broadcast a tx, which has the max size
   178  	// => ensure it's received by the second reactor.
   179  	tx1 := kvstore.NewRandomTx(config.Mempool.MaxTxBytes)
   180  	reqRes, err := reactors[0].mempool.CheckTx(tx1)
   181  	require.NoError(t, err)
   182  	require.False(t, reqRes.Response.GetCheckTx().IsErr())
   183  	waitForReactors(t, []types.Tx{tx1}, reactors, checkTxsInOrder)
   184  
   185  	reactors[0].mempool.Flush()
   186  	reactors[1].mempool.Flush()
   187  
   188  	// Broadcast a tx, which is beyond the max size
   189  	// => ensure it's not sent
   190  	tx2 := kvstore.NewRandomTx(config.Mempool.MaxTxBytes + 1)
   191  	reqRes, err = reactors[0].mempool.CheckTx(tx2)
   192  	require.Error(t, err)
   193  	require.Nil(t, reqRes)
   194  }
   195  
   196  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   197  	if testing.Short() {
   198  		t.Skip("skipping test in short mode.")
   199  	}
   200  
   201  	config := cfg.TestConfig()
   202  	const N = 2
   203  	reactors, _ := makeAndConnectReactors(config, N)
   204  	defer func() {
   205  		for _, r := range reactors {
   206  			if err := r.Stop(); err != nil {
   207  				assert.NoError(t, err)
   208  			}
   209  		}
   210  	}()
   211  
   212  	// stop peer
   213  	sw := reactors[1].Switch
   214  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   215  
   216  	// check that we are not leaking any go-routines
   217  	// i.e. broadcastTxRoutine finishes when peer is stopped
   218  	leaktest.CheckTimeout(t, 10*time.Second)()
   219  }
   220  
   221  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   222  	if testing.Short() {
   223  		t.Skip("skipping test in short mode.")
   224  	}
   225  
   226  	config := cfg.TestConfig()
   227  	const N = 2
   228  	_, switches := makeAndConnectReactors(config, N)
   229  
   230  	// stop reactors
   231  	for _, s := range switches {
   232  		assert.NoError(t, s.Stop())
   233  	}
   234  
   235  	// check that we are not leaking any go-routines
   236  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   237  	leaktest.CheckTimeout(t, 10*time.Second)()
   238  }
   239  
   240  // TODO: This test tests that we don't panic and are able to generate new
   241  // PeerIDs for each peer we add. It seems as though we should be able to test
   242  // this in a much more direct way.
   243  // https://github.com/aakash4dev/cometbft/issues/9639
   244  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   245  	config := cfg.TestConfig()
   246  	const N = 1
   247  	reactors, _ := makeAndConnectReactors(config, N)
   248  	defer func() {
   249  		for _, r := range reactors {
   250  			if err := r.Stop(); err != nil {
   251  				assert.NoError(t, err)
   252  			}
   253  		}
   254  	}()
   255  	reactor := reactors[0]
   256  
   257  	for i := 0; i < MaxActiveIDs+1; i++ {
   258  		peer := mock.NewPeer(nil)
   259  		reactor.Receive(p2p.Envelope{
   260  			ChannelID: MempoolChannel,
   261  			Src:       peer,
   262  			Message:   &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.
   263  		},
   264  		)
   265  		reactor.AddPeer(peer)
   266  	}
   267  }
   268  
   269  func TestReactorTxSendersLocal(t *testing.T) {
   270  	config := cfg.TestConfig()
   271  	const N = 1
   272  	reactors, _ := makeAndConnectReactors(config, N)
   273  	defer func() {
   274  		for _, r := range reactors {
   275  			if err := r.Stop(); err != nil {
   276  				assert.NoError(t, err)
   277  			}
   278  		}
   279  	}()
   280  	reactor := reactors[0]
   281  
   282  	tx1 := kvstore.NewTxFromID(1)
   283  	tx2 := kvstore.NewTxFromID(2)
   284  	require.False(t, reactor.isSender(types.Tx(tx1).Key(), 1))
   285  
   286  	reactor.addSender(types.Tx(tx1).Key(), 1)
   287  	reactor.addSender(types.Tx(tx1).Key(), 2)
   288  	reactor.addSender(types.Tx(tx2).Key(), 1)
   289  	require.True(t, reactor.isSender(types.Tx(tx1).Key(), 1))
   290  	require.True(t, reactor.isSender(types.Tx(tx1).Key(), 2))
   291  	require.True(t, reactor.isSender(types.Tx(tx2).Key(), 1))
   292  
   293  	reactor.removeSenders(types.Tx(tx1).Key())
   294  	require.False(t, reactor.isSender(types.Tx(tx1).Key(), 1))
   295  	require.False(t, reactor.isSender(types.Tx(tx1).Key(), 2))
   296  	require.True(t, reactor.isSender(types.Tx(tx2).Key(), 1))
   297  }
   298  
   299  // Test that:
   300  // - If a transaction came from a peer AND if the transaction is added to the
   301  // mempool, it must have a non-empty list of senders in the reactor.
   302  // - If a transaction is removed from the mempool, it must also be removed from
   303  // the list of senders in the reactor.
   304  func TestReactorTxSendersMultiNode(t *testing.T) {
   305  	config := cfg.TestConfig()
   306  	config.Mempool.Size = 1000
   307  	config.Mempool.CacheSize = 1000
   308  	const N = 3
   309  	reactors, _ := makeAndConnectReactors(config, N)
   310  	defer func() {
   311  		for _, r := range reactors {
   312  			if err := r.Stop(); err != nil {
   313  				assert.NoError(t, err)
   314  			}
   315  		}
   316  	}()
   317  	for _, r := range reactors {
   318  		for _, peer := range r.Switch.Peers().List() {
   319  			peer.Set(types.PeerStateKey, peerState{1})
   320  		}
   321  	}
   322  	firstReactor := reactors[0]
   323  
   324  	numTxs := config.Mempool.Size
   325  	txs := newUniqueTxs(numTxs)
   326  
   327  	// Initially, there are no transactions (and no senders).
   328  	for _, r := range reactors {
   329  		require.Zero(t, len(r.txSenders))
   330  	}
   331  
   332  	// Add transactions to the first reactor.
   333  	callCheckTx(t, firstReactor.mempool, txs)
   334  
   335  	// Wait for all txs to be in the mempool of each reactor.
   336  	waitForReactors(t, txs, reactors, checkTxsInMempool)
   337  	for i, r := range reactors {
   338  		checkTxsInMempoolAndSenders(t, r, txs, i)
   339  	}
   340  
   341  	// Split the transactions in three groups of different sizes.
   342  	splitIndex := numTxs / 6
   343  	validTxs := txs[:splitIndex]                 // will be used to update the mempool, as valid txs
   344  	invalidTxs := txs[splitIndex : 3*splitIndex] // will be used to update the mempool, as invalid txs
   345  	ignoredTxs := txs[3*splitIndex:]             // will remain in the mempool
   346  
   347  	// Update the mempools with a list of valid and invalid transactions.
   348  	for i, r := range reactors {
   349  		updateMempool(t, r.mempool, validTxs, invalidTxs)
   350  
   351  		// Txs included in a block should have been removed from the mempool and
   352  		// have no senders.
   353  		for _, tx := range append(validTxs, invalidTxs...) {
   354  			require.False(t, r.mempool.InMempool(tx.Key()))
   355  			_, hasSenders := r.txSenders[tx.Key()]
   356  			require.False(t, hasSenders)
   357  		}
   358  
   359  		// Ignored txs should still be in the mempool.
   360  		checkTxsInMempoolAndSenders(t, r, ignoredTxs, i)
   361  	}
   362  
   363  	// The first reactor should not receive transactions from other peers.
   364  	require.Zero(t, len(firstReactor.txSenders))
   365  }
   366  
   367  // Check that the mempool has exactly the given list of txs and, if it's not the
   368  // first reactor (reactorIndex == 0), then each tx has a non-empty list of senders.
   369  func checkTxsInMempoolAndSenders(t *testing.T, r *Reactor, txs types.Txs, reactorIndex int) {
   370  	r.txSendersMtx.Lock()
   371  	defer r.txSendersMtx.Unlock()
   372  
   373  	require.Equal(t, len(txs), r.mempool.Size())
   374  	if reactorIndex == 0 {
   375  		require.Zero(t, len(r.txSenders))
   376  	} else {
   377  		require.Equal(t, len(txs), len(r.txSenders))
   378  	}
   379  
   380  	// Each transaction is in the mempool and, if it's not the first reactor, it
   381  	// has a non-empty list of senders.
   382  	for _, tx := range txs {
   383  		assert.True(t, r.mempool.InMempool(tx.Key()))
   384  		senders, hasSenders := r.txSenders[tx.Key()]
   385  		if reactorIndex == 0 {
   386  			require.False(t, hasSenders)
   387  		} else {
   388  			require.True(t, hasSenders && len(senders) > 0)
   389  		}
   390  	}
   391  }
   392  
   393  // mempoolLogger is a TestingLogger which uses a different
   394  // color for each validator ("validator" key must exist).
   395  func mempoolLogger() log.Logger {
   396  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   397  		for i := 0; i < len(keyvals)-1; i += 2 {
   398  			if keyvals[i] == "validator" {
   399  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   400  			}
   401  		}
   402  		return term.FgBgColor{}
   403  	})
   404  }
   405  
   406  // connect N mempool reactors through N switches
   407  func makeAndConnectReactors(config *cfg.Config, n int) ([]*Reactor, []*p2p.Switch) {
   408  	reactors := make([]*Reactor, n)
   409  	logger := mempoolLogger()
   410  	for i := 0; i < n; i++ {
   411  		app := kvstore.NewInMemoryApplication()
   412  		cc := proxy.NewLocalClientCreator(app)
   413  		mempool, cleanup := newMempoolWithApp(cc)
   414  		defer cleanup()
   415  
   416  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   417  		reactors[i].SetLogger(logger.With("validator", i))
   418  	}
   419  
   420  	switches := p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   421  		s.AddReactor("MEMPOOL", reactors[i])
   422  		return s
   423  
   424  	}, p2p.Connect2Switches)
   425  	return reactors, switches
   426  }
   427  
   428  func newUniqueTxs(n int) types.Txs {
   429  	txs := make(types.Txs, n)
   430  	for i := 0; i < n; i++ {
   431  		txs[i] = kvstore.NewTxFromID(i)
   432  	}
   433  	return txs
   434  }
   435  
   436  // Wait for all reactors to finish applying a testing function to a list of
   437  // transactions.
   438  func waitForReactors(t *testing.T, txs types.Txs, reactors []*Reactor, testFunc func(*testing.T, types.Txs, *Reactor, int)) {
   439  	wg := new(sync.WaitGroup)
   440  	for i, reactor := range reactors {
   441  		wg.Add(1)
   442  		go func(r *Reactor, reactorIndex int) {
   443  			defer wg.Done()
   444  			testFunc(t, txs, r, reactorIndex)
   445  		}(reactor, i)
   446  	}
   447  
   448  	done := make(chan struct{})
   449  	go func() {
   450  		wg.Wait()
   451  		close(done)
   452  	}()
   453  
   454  	timer := time.After(timeout)
   455  	select {
   456  	case <-timer:
   457  		t.Fatal("Timed out waiting for txs")
   458  	case <-done:
   459  	}
   460  }
   461  
   462  // Wait until the mempool has a certain number of transactions.
   463  func waitForNumTxsInMempool(numTxs int, mempool Mempool) {
   464  	for mempool.Size() < numTxs {
   465  		time.Sleep(time.Millisecond * 100)
   466  	}
   467  }
   468  
   469  // Wait until all txs are in the mempool and check that the number of txs in the
   470  // mempool is as expected.
   471  func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) {
   472  	waitForNumTxsInMempool(len(txs), reactor.mempool)
   473  
   474  	reapedTxs := reactor.mempool.ReapMaxTxs(len(txs))
   475  	require.Equal(t, len(txs), len(reapedTxs))
   476  	require.Equal(t, len(txs), reactor.mempool.Size())
   477  }
   478  
   479  // Wait until all txs are in the mempool and check that they are in the same
   480  // order as given.
   481  func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   482  	waitForNumTxsInMempool(len(txs), reactor.mempool)
   483  
   484  	// Check that all transactions in the mempool are in the same order as txs.
   485  	reapedTxs := reactor.mempool.ReapMaxTxs(len(txs))
   486  	for i, tx := range txs {
   487  		assert.Equalf(t, tx, reapedTxs[i],
   488  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   489  	}
   490  }
   491  
   492  func updateMempool(t *testing.T, mp Mempool, validTxs types.Txs, invalidTxs types.Txs) {
   493  	allTxs := append(validTxs, invalidTxs...)
   494  
   495  	validTxResponses := abciResponses(len(validTxs), abci.CodeTypeOK)
   496  	invalidTxResponses := abciResponses(len(invalidTxs), 1)
   497  	allResponses := append(validTxResponses, invalidTxResponses...)
   498  
   499  	mp.Lock()
   500  	err := mp.Update(1, allTxs, allResponses, nil, nil)
   501  	mp.Unlock()
   502  
   503  	require.NoError(t, err)
   504  }
   505  
   506  // ensure no txs on reactor after some timeout
   507  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   508  	time.Sleep(timeout) // wait for the txs in all mempools
   509  	assert.Zero(t, reactor.mempool.Size())
   510  }
   511  
   512  func TestMempoolVectors(t *testing.T) {
   513  	testCases := []struct {
   514  		testName string
   515  		tx       []byte
   516  		expBytes string
   517  	}{
   518  		{"tx 1", []byte{123}, "0a030a017b"},
   519  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   520  	}
   521  
   522  	for _, tc := range testCases {
   523  		tc := tc
   524  
   525  		msg := memproto.Message{
   526  			Sum: &memproto.Message_Txs{
   527  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   528  			},
   529  		}
   530  		bz, err := msg.Marshal()
   531  		require.NoError(t, err, tc.testName)
   532  
   533  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   534  	}
   535  }