github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/mempool/v0/reactor_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"net"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/fortytw2/leaktest"
    12  	"github.com/go-kit/log/term"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/badrootd/nibiru-cometbft/abci/example/kvstore"
    17  	abci "github.com/badrootd/nibiru-cometbft/abci/types"
    18  	cfg "github.com/badrootd/nibiru-cometbft/config"
    19  	"github.com/badrootd/nibiru-cometbft/libs/log"
    20  	cmtrand "github.com/badrootd/nibiru-cometbft/libs/rand"
    21  	"github.com/badrootd/nibiru-cometbft/mempool"
    22  	"github.com/badrootd/nibiru-cometbft/p2p"
    23  	"github.com/badrootd/nibiru-cometbft/p2p/mock"
    24  	memproto "github.com/badrootd/nibiru-cometbft/proto/tendermint/mempool"
    25  	"github.com/badrootd/nibiru-cometbft/proxy"
    26  	"github.com/badrootd/nibiru-cometbft/types"
    27  )
    28  
    29  const (
    30  	numTxs  = 1000
    31  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    32  )
    33  
    34  type peerState struct {
    35  	height int64
    36  }
    37  
    38  func (ps peerState) GetHeight() int64 {
    39  	return ps.height
    40  }
    41  
    42  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    43  // be received in the others.
    44  func TestReactorBroadcastTxsMessage(t *testing.T) {
    45  	config := cfg.TestConfig()
    46  	// if there were more than two reactors, the order of transactions could not be
    47  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    48  	// replace Connect2Switches (full mesh) with a func, which connects first
    49  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    50  	const N = 2
    51  	reactors := makeAndConnectReactors(config, N)
    52  	defer func() {
    53  		for _, r := range reactors {
    54  			if err := r.Stop(); err != nil {
    55  				assert.NoError(t, err)
    56  			}
    57  		}
    58  	}()
    59  	for _, r := range reactors {
    60  		for _, peer := range r.Switch.Peers().List() {
    61  			peer.Set(types.PeerStateKey, peerState{1})
    62  		}
    63  	}
    64  
    65  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    66  	waitForTxsOnReactors(t, txs, reactors)
    67  }
    68  
    69  // regression test for https://github.com/tendermint/tendermint/issues/5408
    70  func TestReactorConcurrency(t *testing.T) {
    71  	config := cfg.TestConfig()
    72  	const N = 2
    73  	reactors := makeAndConnectReactors(config, N)
    74  	defer func() {
    75  		for _, r := range reactors {
    76  			if err := r.Stop(); err != nil {
    77  				assert.NoError(t, err)
    78  			}
    79  		}
    80  	}()
    81  	for _, r := range reactors {
    82  		for _, peer := range r.Switch.Peers().List() {
    83  			peer.Set(types.PeerStateKey, peerState{1})
    84  		}
    85  	}
    86  	var wg sync.WaitGroup
    87  
    88  	const numTxs = 5
    89  
    90  	for i := 0; i < 1000; i++ {
    91  		wg.Add(2)
    92  
    93  		// 1. submit a bunch of txs
    94  		// 2. update the whole mempool
    95  		txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    96  		go func() {
    97  			defer wg.Done()
    98  
    99  			reactors[0].mempool.Lock()
   100  			defer reactors[0].mempool.Unlock()
   101  
   102  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   103  			for i := range txs {
   104  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   105  			}
   106  			err := reactors[0].mempool.Update(1, txs, deliverTxResponses, nil, nil)
   107  			assert.NoError(t, err)
   108  		}()
   109  
   110  		// 1. submit a bunch of txs
   111  		// 2. update none
   112  		_ = checkTxs(t, reactors[1].mempool, numTxs, mempool.UnknownPeerID)
   113  		go func() {
   114  			defer wg.Done()
   115  
   116  			reactors[1].mempool.Lock()
   117  			defer reactors[1].mempool.Unlock()
   118  			err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
   119  			assert.NoError(t, err)
   120  		}()
   121  
   122  		// 1. flush the mempool
   123  		reactors[1].mempool.Flush()
   124  	}
   125  
   126  	wg.Wait()
   127  }
   128  
   129  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   130  // ensure peer gets no txs.
   131  func TestReactorNoBroadcastToSender(t *testing.T) {
   132  	config := cfg.TestConfig()
   133  	const N = 2
   134  	reactors := makeAndConnectReactors(config, N)
   135  	defer func() {
   136  		for _, r := range reactors {
   137  			if err := r.Stop(); err != nil {
   138  				assert.NoError(t, err)
   139  			}
   140  		}
   141  	}()
   142  	for _, r := range reactors {
   143  		for _, peer := range r.Switch.Peers().List() {
   144  			peer.Set(types.PeerStateKey, peerState{1})
   145  		}
   146  	}
   147  
   148  	const peerID = 1
   149  	checkTxs(t, reactors[0].mempool, numTxs, peerID)
   150  	ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
   151  }
   152  
   153  func TestReactor_MaxTxBytes(t *testing.T) {
   154  	config := cfg.TestConfig()
   155  
   156  	const N = 2
   157  	reactors := makeAndConnectReactors(config, N)
   158  	defer func() {
   159  		for _, r := range reactors {
   160  			if err := r.Stop(); err != nil {
   161  				assert.NoError(t, err)
   162  			}
   163  		}
   164  	}()
   165  	for _, r := range reactors {
   166  		for _, peer := range r.Switch.Peers().List() {
   167  			peer.Set(types.PeerStateKey, peerState{1})
   168  		}
   169  	}
   170  
   171  	// Broadcast a tx, which has the max size
   172  	// => ensure it's received by the second reactor.
   173  	tx1 := cmtrand.Bytes(config.Mempool.MaxTxBytes)
   174  	err := reactors[0].mempool.CheckTx(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   175  	require.NoError(t, err)
   176  	waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
   177  
   178  	reactors[0].mempool.Flush()
   179  	reactors[1].mempool.Flush()
   180  
   181  	// Broadcast a tx, which is beyond the max size
   182  	// => ensure it's not sent
   183  	tx2 := cmtrand.Bytes(config.Mempool.MaxTxBytes + 1)
   184  	err = reactors[0].mempool.CheckTx(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   185  	require.Error(t, err)
   186  }
   187  
   188  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   189  	if testing.Short() {
   190  		t.Skip("skipping test in short mode.")
   191  	}
   192  
   193  	config := cfg.TestConfig()
   194  	const N = 2
   195  	reactors := makeAndConnectReactors(config, N)
   196  	defer func() {
   197  		for _, r := range reactors {
   198  			if err := r.Stop(); err != nil {
   199  				assert.NoError(t, err)
   200  			}
   201  		}
   202  	}()
   203  
   204  	// stop peer
   205  	sw := reactors[1].Switch
   206  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   207  
   208  	// check that we are not leaking any go-routines
   209  	// i.e. broadcastTxRoutine finishes when peer is stopped
   210  	leaktest.CheckTimeout(t, 10*time.Second)()
   211  }
   212  
   213  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   214  	if testing.Short() {
   215  		t.Skip("skipping test in short mode.")
   216  	}
   217  
   218  	config := cfg.TestConfig()
   219  	const N = 2
   220  	reactors := makeAndConnectReactors(config, N)
   221  
   222  	// stop reactors
   223  	for _, r := range reactors {
   224  		if err := r.Stop(); err != nil {
   225  			assert.NoError(t, err)
   226  		}
   227  	}
   228  
   229  	// check that we are not leaking any go-routines
   230  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   231  	leaktest.CheckTimeout(t, 10*time.Second)()
   232  }
   233  
   234  func TestMempoolIDsBasic(t *testing.T) {
   235  	ids := newMempoolIDs()
   236  
   237  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   238  
   239  	ids.ReserveForPeer(peer)
   240  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   241  	ids.Reclaim(peer)
   242  
   243  	ids.ReserveForPeer(peer)
   244  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   245  	ids.Reclaim(peer)
   246  }
   247  
   248  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   249  	if testing.Short() {
   250  		return
   251  	}
   252  
   253  	// 0 is already reserved for UnknownPeerID
   254  	ids := newMempoolIDs()
   255  
   256  	for i := 0; i < mempool.MaxActiveIDs-1; i++ {
   257  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   258  		ids.ReserveForPeer(peer)
   259  	}
   260  
   261  	assert.Panics(t, func() {
   262  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   263  		ids.ReserveForPeer(peer)
   264  	})
   265  }
   266  
   267  // TODO: This test tests that we don't panic and are able to generate new
   268  // PeerIDs for each peer we add. It seems as though we should be able to test
   269  // this in a much more direct way.
   270  // https://github.com/tendermint/tendermint/issues/9639
   271  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   272  	config := cfg.TestConfig()
   273  	const N = 1
   274  	reactors := makeAndConnectReactors(config, N)
   275  	defer func() {
   276  		for _, r := range reactors {
   277  			if err := r.Stop(); err != nil {
   278  				assert.NoError(t, err)
   279  			}
   280  		}
   281  	}()
   282  	reactor := reactors[0]
   283  
   284  	for i := 0; i < mempool.MaxActiveIDs+1; i++ {
   285  		peer := mock.NewPeer(nil)
   286  		reactor.ReceiveEnvelope(p2p.Envelope{
   287  			ChannelID: mempool.MempoolChannel,
   288  			Src:       peer,
   289  			Message:   &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.
   290  		},
   291  		)
   292  		reactor.AddPeer(peer)
   293  	}
   294  }
   295  
   296  // Test the experimental feature that limits the number of outgoing connections for gossiping
   297  // transactions (only non-persistent peers).
   298  // Note: in this test we know which gossip connections are active or not because of how the p2p
   299  // functions are currently implemented, which affects the order in which peers are added to the
   300  // mempool reactor.
   301  func TestMempoolReactorMaxActiveOutboundConnections(t *testing.T) {
   302  	config := cfg.TestConfig()
   303  	config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1
   304  	reactors := makeAndConnectReactors(config, 4)
   305  	defer func() {
   306  		for _, r := range reactors {
   307  			if err := r.Stop(); err != nil {
   308  				assert.NoError(t, err)
   309  			}
   310  		}
   311  	}()
   312  	for _, r := range reactors {
   313  		for _, peer := range r.Switch.Peers().List() {
   314  			peer.Set(types.PeerStateKey, peerState{1})
   315  		}
   316  	}
   317  
   318  	// Add a bunch transactions to the first reactor.
   319  	txs := newUniqueTxs(100)
   320  	callCheckTx(t, reactors[0].mempool, txs)
   321  
   322  	// Wait for all txs to be in the mempool of the second reactor; the other reactors should not
   323  	// receive any tx. (The second reactor only sends transactions to the first reactor.)
   324  	checkTxsInMempool(t, txs, reactors[1], 0)
   325  	for _, r := range reactors[2:] {
   326  		require.Zero(t, r.mempool.Size())
   327  	}
   328  
   329  	// Disconnect the second reactor from the first reactor.
   330  	firstPeer := reactors[0].Switch.Peers().List()[0]
   331  	reactors[0].Switch.StopPeerGracefully(firstPeer)
   332  
   333  	// Now the third reactor should start receiving transactions from the first reactor; the fourth
   334  	// reactor's mempool should still be empty.
   335  	checkTxsInMempool(t, txs, reactors[2], 0)
   336  	for _, r := range reactors[3:] {
   337  		require.Zero(t, r.mempool.Size())
   338  	}
   339  }
   340  
   341  // mempoolLogger is a TestingLogger which uses a different
   342  // color for each validator ("validator" key must exist).
   343  func mempoolLogger() log.Logger {
   344  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   345  		for i := 0; i < len(keyvals)-1; i += 2 {
   346  			if keyvals[i] == "validator" {
   347  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   348  			}
   349  		}
   350  		return term.FgBgColor{}
   351  	})
   352  }
   353  
   354  // connect N mempool reactors through N switches
   355  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   356  	reactors := make([]*Reactor, n)
   357  	logger := mempoolLogger()
   358  	for i := 0; i < n; i++ {
   359  		app := kvstore.NewApplication()
   360  		cc := proxy.NewLocalClientCreator(app)
   361  		mempool, cleanup := newMempoolWithApp(cc)
   362  		defer cleanup()
   363  
   364  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   365  		reactors[i].SetLogger(logger.With("validator", i))
   366  	}
   367  
   368  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   369  		s.AddReactor("MEMPOOL", reactors[i])
   370  		return s
   371  
   372  	}, p2p.Connect2Switches)
   373  	return reactors
   374  }
   375  
   376  func newUniqueTxs(n int) types.Txs {
   377  	txs := make(types.Txs, n)
   378  	for i := 0; i < n; i++ {
   379  		txs[i] = kvstore.NewTxFromID(i)
   380  	}
   381  	return txs
   382  }
   383  
   384  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   385  	// wait for the txs in all mempools
   386  	wg := new(sync.WaitGroup)
   387  	for i, reactor := range reactors {
   388  		wg.Add(1)
   389  		go func(r *Reactor, reactorIndex int) {
   390  			defer wg.Done()
   391  			checkTxsInOrder(t, txs, r, reactorIndex)
   392  		}(reactor, i)
   393  	}
   394  
   395  	done := make(chan struct{})
   396  	go func() {
   397  		wg.Wait()
   398  		close(done)
   399  	}()
   400  
   401  	timer := time.After(timeout)
   402  	select {
   403  	case <-timer:
   404  		t.Fatal("Timed out waiting for txs")
   405  	case <-done:
   406  	}
   407  }
   408  
   409  // Wait until the mempool has a certain number of transactions.
   410  func waitForNumTxsInMempool(numTxs int, reactor *Reactor) {
   411  	for reactor.mempool.Size() < numTxs {
   412  		time.Sleep(time.Millisecond * 100)
   413  	}
   414  }
   415  
   416  // Wait until all txs are in the mempool and check that the number of txs in the
   417  // mempool is as expected.
   418  func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) {
   419  	waitForNumTxsInMempool(len(txs), reactor)
   420  
   421  	reapedTxs := reactor.mempool.ReapMaxTxs(len(txs))
   422  	require.Equal(t, len(txs), len(reapedTxs))
   423  	require.Equal(t, len(txs), reactor.mempool.Size())
   424  }
   425  
   426  // Wait until all txs are in the mempool and check that they are in the same
   427  // order as given.
   428  func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   429  	waitForNumTxsInMempool(len(txs), reactor)
   430  
   431  	// Check that all transactions in the mempool are in the same order as txs.
   432  	reapedTxs := reactor.mempool.ReapMaxTxs(len(txs))
   433  	for i, tx := range txs {
   434  		assert.Equalf(t, tx, reapedTxs[i],
   435  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   436  	}
   437  }
   438  
   439  // ensure no txs on reactor after some timeout
   440  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   441  	time.Sleep(timeout) // wait for the txs in all mempools
   442  	assert.Zero(t, reactor.mempool.Size())
   443  }
   444  
   445  func TestMempoolVectors(t *testing.T) {
   446  	testCases := []struct {
   447  		testName string
   448  		tx       []byte
   449  		expBytes string
   450  	}{
   451  		{"tx 1", []byte{123}, "0a030a017b"},
   452  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   453  	}
   454  
   455  	for _, tc := range testCases {
   456  		tc := tc
   457  
   458  		msg := memproto.Message{
   459  			Sum: &memproto.Message_Txs{
   460  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   461  			},
   462  		}
   463  		bz, err := msg.Marshal()
   464  		require.NoError(t, err, tc.testName)
   465  
   466  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   467  	}
   468  }