github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/cat/reactor_test.go (about)

     1  package cat
     2  
     3  import (
     4  	"encoding/hex"
     5  	"os"
     6  	"sort"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/go-kit/log/term"
    12  	"github.com/gogo/protobuf/proto"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/badrootd/celestia-core/abci/example/kvstore"
    17  	"github.com/badrootd/celestia-core/crypto/ed25519"
    18  	p2pmock "github.com/badrootd/celestia-core/p2p/mock"
    19  
    20  	cfg "github.com/badrootd/celestia-core/config"
    21  
    22  	"github.com/badrootd/celestia-core/libs/log"
    23  	"github.com/badrootd/celestia-core/mempool"
    24  	"github.com/badrootd/celestia-core/p2p"
    25  	"github.com/badrootd/celestia-core/p2p/mocks"
    26  	protomem "github.com/badrootd/celestia-core/proto/tendermint/mempool"
    27  	"github.com/badrootd/celestia-core/proxy"
    28  	"github.com/badrootd/celestia-core/types"
    29  )
    30  
    31  const (
    32  	numTxs  = 10
    33  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    34  )
    35  
    36  type peerState struct {
    37  	height int64
    38  }
    39  
    40  func (ps peerState) GetHeight() int64 {
    41  	return ps.height
    42  }
    43  
    44  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    45  // be received in the others.
    46  func TestReactorBroadcastTxsMessage(t *testing.T) {
    47  	config := cfg.TestConfig()
    48  	const N = 5
    49  	reactors := makeAndConnectReactors(t, config, N)
    50  
    51  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    52  	sort.Slice(txs, func(i, j int) bool {
    53  		return txs[i].priority > txs[j].priority // N.B. higher priorities first
    54  	})
    55  	transactions := make(types.Txs, len(txs))
    56  	for idx, tx := range txs {
    57  		transactions[idx] = tx.tx
    58  	}
    59  
    60  	waitForTxsOnReactors(t, transactions, reactors)
    61  }
    62  
    63  func TestReactorSendWantTxAfterReceiveingSeenTx(t *testing.T) {
    64  	reactor, _ := setupReactor(t)
    65  
    66  	tx := newDefaultTx("hello")
    67  	key := tx.Key()
    68  	msgSeen := &protomem.Message{
    69  		Sum: &protomem.Message_SeenTx{SeenTx: &protomem.SeenTx{TxKey: key[:]}},
    70  	}
    71  	msgSeenB, err := msgSeen.Marshal()
    72  	require.NoError(t, err)
    73  
    74  	msgWant := &protomem.Message{
    75  		Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}},
    76  	}
    77  	envWant := p2p.Envelope{
    78  		Message:   msgWant,
    79  		ChannelID: MempoolStateChannel,
    80  	}
    81  
    82  	peer := genPeer()
    83  	peer.On("SendEnvelope", envWant).Return(true)
    84  
    85  	reactor.InitPeer(peer)
    86  	reactor.Receive(MempoolStateChannel, peer, msgSeenB)
    87  
    88  	peer.AssertExpectations(t)
    89  }
    90  
    91  func TestReactorSendsTxAfterReceivingWantTx(t *testing.T) {
    92  	reactor, pool := setupReactor(t)
    93  
    94  	tx := newDefaultTx("hello")
    95  	key := tx.Key()
    96  	txEnvelope := p2p.Envelope{
    97  		Message:   &protomem.Txs{Txs: [][]byte{tx}},
    98  		ChannelID: mempool.MempoolChannel,
    99  	}
   100  
   101  	msgWant := &protomem.Message{
   102  		Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}},
   103  	}
   104  	msgWantB, err := msgWant.Marshal()
   105  	require.NoError(t, err)
   106  
   107  	peer := genPeer()
   108  	peer.On("SendEnvelope", txEnvelope).Return(true)
   109  
   110  	// add the transaction to the nodes pool. It's not connected to
   111  	// any peers so it shouldn't broadcast anything yet
   112  	require.NoError(t, pool.CheckTx(tx, nil, mempool.TxInfo{}))
   113  
   114  	// Add the peer
   115  	reactor.InitPeer(peer)
   116  	// The peer sends a want msg for this tx
   117  	reactor.Receive(MempoolStateChannel, peer, msgWantB)
   118  
   119  	// Should send the tx to the peer in response
   120  	peer.AssertExpectations(t)
   121  
   122  	// pool should have marked the peer as having seen the tx
   123  	peerID := reactor.ids.GetIDForPeer(peer.ID())
   124  	require.True(t, pool.seenByPeersSet.Has(key, peerID))
   125  }
   126  
   127  func TestReactorBroadcastsSeenTxAfterReceivingTx(t *testing.T) {
   128  	reactor, _ := setupReactor(t)
   129  
   130  	tx := newDefaultTx("hello")
   131  	key := tx.Key()
   132  	txMsg := &protomem.Message{
   133  		Sum: &protomem.Message_Txs{Txs: &protomem.Txs{Txs: [][]byte{tx}}},
   134  	}
   135  	txMsgBytes, err := txMsg.Marshal()
   136  	require.NoError(t, err)
   137  
   138  	seenMsg := &protomem.Message{
   139  		Sum: &protomem.Message_SeenTx{SeenTx: &protomem.SeenTx{TxKey: key[:]}},
   140  	}
   141  
   142  	peers := genPeers(2)
   143  	// only peer 1 should receive the seen tx message as peer 0 broadcasted
   144  	// the transaction in the first place
   145  	seenEnv := p2p.Envelope{
   146  		Message:   seenMsg,
   147  		ChannelID: MempoolStateChannel,
   148  	}
   149  
   150  	peers[1].On("SendEnvelope", seenEnv).Return(true)
   151  
   152  	reactor.InitPeer(peers[0])
   153  	reactor.InitPeer(peers[1])
   154  	reactor.Receive(mempool.MempoolChannel, peers[0], txMsgBytes)
   155  
   156  	peers[0].AssertExpectations(t)
   157  	peers[1].AssertExpectations(t)
   158  }
   159  
   160  func TestRemovePeerRequestFromOtherPeer(t *testing.T) {
   161  	reactor, _ := setupReactor(t)
   162  
   163  	tx := newDefaultTx("hello")
   164  	key := tx.Key()
   165  	peers := genPeers(2)
   166  	reactor.InitPeer(peers[0])
   167  	reactor.InitPeer(peers[1])
   168  
   169  	seenMsg := &protomem.SeenTx{TxKey: key[:]}
   170  
   171  	wantEnv := p2p.Envelope{
   172  		Message: &protomem.Message{
   173  			Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}},
   174  		},
   175  		ChannelID: MempoolStateChannel,
   176  	}
   177  	peers[0].On("SendEnvelope", wantEnv).Return(true)
   178  	peers[1].On("SendEnvelope", wantEnv).Return(true)
   179  
   180  	reactor.ReceiveEnvelope(p2p.Envelope{
   181  		Src:       peers[0],
   182  		Message:   seenMsg,
   183  		ChannelID: MempoolStateChannel,
   184  	})
   185  	time.Sleep(100 * time.Millisecond)
   186  	reactor.ReceiveEnvelope(p2p.Envelope{
   187  		Src:       peers[1],
   188  		Message:   seenMsg,
   189  		ChannelID: MempoolStateChannel,
   190  	})
   191  
   192  	reactor.RemovePeer(peers[0], "test")
   193  
   194  	peers[0].AssertExpectations(t)
   195  	peers[1].AssertExpectations(t)
   196  
   197  	require.True(t, reactor.mempool.seenByPeersSet.Has(key, 2))
   198  	// we should have automatically sent another request out for peer 2
   199  	require.EqualValues(t, 2, reactor.requests.ForTx(key))
   200  	require.True(t, reactor.requests.Has(2, key))
   201  	require.False(t, reactor.mempool.seenByPeersSet.Has(key, 1))
   202  }
   203  
   204  func TestMempoolVectors(t *testing.T) {
   205  	testCases := []struct {
   206  		testName string
   207  		tx       []byte
   208  		expBytes string
   209  	}{
   210  		{"tx 1", []byte{123}, "0a030a017b"},
   211  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   212  	}
   213  
   214  	for _, tc := range testCases {
   215  		tc := tc
   216  
   217  		msg := protomem.Message{
   218  			Sum: &protomem.Message_Txs{
   219  				Txs: &protomem.Txs{Txs: [][]byte{tc.tx}},
   220  			},
   221  		}
   222  		bz, err := msg.Marshal()
   223  		require.NoError(t, err, tc.testName)
   224  
   225  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   226  	}
   227  }
   228  
   229  func TestReactorEventuallyRemovesExpiredTransaction(t *testing.T) {
   230  	reactor, _ := setupReactor(t)
   231  	reactor.mempool.config.TTLDuration = 100 * time.Millisecond
   232  
   233  	tx := newDefaultTx("hello")
   234  	key := tx.Key()
   235  	txMsg := &protomem.Message{
   236  		Sum: &protomem.Message_Txs{Txs: &protomem.Txs{Txs: [][]byte{tx}}},
   237  	}
   238  	txMsgBytes, err := txMsg.Marshal()
   239  	require.NoError(t, err)
   240  
   241  	peer := genPeer()
   242  	require.NoError(t, reactor.Start())
   243  	reactor.InitPeer(peer)
   244  	reactor.Receive(mempool.MempoolChannel, peer, txMsgBytes)
   245  	require.True(t, reactor.mempool.Has(key))
   246  
   247  	// wait for the transaction to expire
   248  	time.Sleep(reactor.mempool.config.TTLDuration * 2)
   249  	require.False(t, reactor.mempool.Has(key))
   250  }
   251  
   252  func TestLegacyReactorReceiveBasic(t *testing.T) {
   253  	config := cfg.TestConfig()
   254  	// if there were more than two reactors, the order of transactions could not be
   255  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
   256  	// replace Connect2Switches (full mesh) with a func, which connects first
   257  	// reactor to others and nothing else, this test should also pass with >2 reactors.
   258  	const N = 1
   259  	reactors := makeAndConnectReactors(t, config, N)
   260  	var (
   261  		reactor = reactors[0]
   262  		peer    = p2pmock.NewPeer(nil)
   263  	)
   264  	defer func() {
   265  		err := reactor.Stop()
   266  		assert.NoError(t, err)
   267  	}()
   268  
   269  	reactor.InitPeer(peer)
   270  	reactor.AddPeer(peer)
   271  
   272  	msg := &protomem.Message{
   273  		Sum: &protomem.Message_Txs{
   274  			Txs: &protomem.Txs{Txs: [][]byte{}},
   275  		},
   276  	}
   277  	m, err := proto.Marshal(msg)
   278  	assert.NoError(t, err)
   279  
   280  	assert.NotPanics(t, func() {
   281  		reactor.Receive(mempool.MempoolChannel, peer, m)
   282  	})
   283  }
   284  
   285  func setupReactor(t *testing.T) (*Reactor, *TxPool) {
   286  	app := &application{kvstore.NewApplication()}
   287  	cc := proxy.NewLocalClientCreator(app)
   288  	pool, cleanup := newMempoolWithApp(cc)
   289  	t.Cleanup(cleanup)
   290  	reactor, err := NewReactor(pool, &ReactorOptions{})
   291  	require.NoError(t, err)
   292  	return reactor, pool
   293  }
   294  
   295  func makeAndConnectReactors(t *testing.T, config *cfg.Config, n int) []*Reactor {
   296  	reactors := make([]*Reactor, n)
   297  	logger := mempoolLogger()
   298  	for i := 0; i < n; i++ {
   299  		var pool *TxPool
   300  		reactors[i], pool = setupReactor(t)
   301  		pool.logger = logger.With("validator", i)
   302  		reactors[i].SetLogger(logger.With("validator", i))
   303  	}
   304  
   305  	switches := p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   306  		s.AddReactor("MEMPOOL", reactors[i])
   307  		return s
   308  	}, p2p.Connect2Switches)
   309  
   310  	t.Cleanup(func() {
   311  		for _, s := range switches {
   312  			if err := s.Stop(); err != nil {
   313  				assert.NoError(t, err)
   314  			}
   315  		}
   316  	})
   317  
   318  	for _, r := range reactors {
   319  		for _, peer := range r.Switch.Peers().List() {
   320  			peer.Set(types.PeerStateKey, peerState{1})
   321  		}
   322  	}
   323  	return reactors
   324  }
   325  
   326  // mempoolLogger is a TestingLogger which uses a different
   327  // color for each validator ("validator" key must exist).
   328  func mempoolLogger() log.Logger {
   329  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   330  		for i := 0; i < len(keyvals)-1; i += 2 {
   331  			if keyvals[i] == "validator" {
   332  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   333  			}
   334  		}
   335  		return term.FgBgColor{}
   336  	})
   337  }
   338  
   339  func newMempoolWithApp(cc proxy.ClientCreator) (*TxPool, func()) {
   340  	conf := cfg.ResetTestRoot("mempool_test")
   341  
   342  	mp, cu := newMempoolWithAppAndConfig(cc, conf)
   343  	return mp, cu
   344  }
   345  
   346  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxPool, func()) {
   347  	appConnMem, _ := cc.NewABCIClient()
   348  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
   349  	err := appConnMem.Start()
   350  	if err != nil {
   351  		panic(err)
   352  	}
   353  
   354  	mp := NewTxPool(log.TestingLogger(), conf.Mempool, appConnMem, 1)
   355  
   356  	return mp, func() { os.RemoveAll(conf.RootDir) }
   357  }
   358  
   359  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   360  	// wait for the txs in all mempools
   361  	wg := new(sync.WaitGroup)
   362  	for i, reactor := range reactors {
   363  		wg.Add(1)
   364  		go func(r *Reactor, reactorIndex int) {
   365  			defer wg.Done()
   366  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   367  		}(reactor, i)
   368  	}
   369  
   370  	done := make(chan struct{})
   371  	go func() {
   372  		wg.Wait()
   373  		close(done)
   374  	}()
   375  
   376  	timer := time.After(timeout)
   377  	select {
   378  	case <-timer:
   379  		t.Fatal("Timed out waiting for txs")
   380  	case <-done:
   381  	}
   382  }
   383  
   384  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   385  	mempool := reactor.mempool
   386  	for mempool.Size() < len(txs) {
   387  		time.Sleep(time.Millisecond * 100)
   388  	}
   389  
   390  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   391  	for i, tx := range txs {
   392  		require.Contains(t, reapedTxs, tx)
   393  		require.Equal(t, tx, reapedTxs[i],
   394  			"txs at index %d on reactor %d don't match: %x vs %x", i, reactorIndex, tx, reapedTxs[i])
   395  	}
   396  }
   397  
   398  func genPeers(n int) []*mocks.Peer {
   399  	peers := make([]*mocks.Peer, n)
   400  	for i := 0; i < n; i++ {
   401  		peers[i] = genPeer()
   402  	}
   403  	return peers
   404  
   405  }
   406  
   407  func genPeer() *mocks.Peer {
   408  	peer := &mocks.Peer{}
   409  	nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
   410  	peer.On("ID").Return(nodeKey.ID())
   411  	peer.On("Get", types.PeerStateKey).Return(nil).Maybe()
   412  	return peer
   413  }