github.com/vipernet-xyz/tm@v0.34.24/mempool/v0/reactor_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"net"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/fortytw2/leaktest"
    12  	"github.com/go-kit/log/term"
    13  	"github.com/gogo/protobuf/proto"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	"github.com/vipernet-xyz/tm/abci/example/kvstore"
    18  	abci "github.com/vipernet-xyz/tm/abci/types"
    19  	cfg "github.com/vipernet-xyz/tm/config"
    20  	"github.com/vipernet-xyz/tm/libs/log"
    21  	tmrand "github.com/vipernet-xyz/tm/libs/rand"
    22  	"github.com/vipernet-xyz/tm/mempool"
    23  	"github.com/vipernet-xyz/tm/p2p"
    24  	"github.com/vipernet-xyz/tm/p2p/mock"
    25  	memproto "github.com/vipernet-xyz/tm/proto/tendermint/mempool"
    26  	"github.com/vipernet-xyz/tm/proxy"
    27  	"github.com/vipernet-xyz/tm/types"
    28  )
    29  
    30  const (
    31  	numTxs  = 1000
    32  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    33  )
    34  
    35  type peerState struct {
    36  	height int64
    37  }
    38  
    39  func (ps peerState) GetHeight() int64 {
    40  	return ps.height
    41  }
    42  
    43  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    44  // be received in the others.
    45  func TestReactorBroadcastTxsMessage(t *testing.T) {
    46  	config := cfg.TestConfig()
    47  	// if there were more than two reactors, the order of transactions could not be
    48  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    49  	// replace Connect2Switches (full mesh) with a func, which connects first
    50  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    51  	const N = 2
    52  	reactors := makeAndConnectReactors(config, N)
    53  	defer func() {
    54  		for _, r := range reactors {
    55  			if err := r.Stop(); err != nil {
    56  				assert.NoError(t, err)
    57  			}
    58  		}
    59  	}()
    60  	for _, r := range reactors {
    61  		for _, peer := range r.Switch.Peers().List() {
    62  			peer.Set(types.PeerStateKey, peerState{1})
    63  		}
    64  	}
    65  
    66  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    67  	waitForTxsOnReactors(t, txs, reactors)
    68  }
    69  
    70  // regression test for https://github.com/vipernet-xyz/tm/issues/5408
    71  func TestReactorConcurrency(t *testing.T) {
    72  	config := cfg.TestConfig()
    73  	const N = 2
    74  	reactors := makeAndConnectReactors(config, N)
    75  	defer func() {
    76  		for _, r := range reactors {
    77  			if err := r.Stop(); err != nil {
    78  				assert.NoError(t, err)
    79  			}
    80  		}
    81  	}()
    82  	for _, r := range reactors {
    83  		for _, peer := range r.Switch.Peers().List() {
    84  			peer.Set(types.PeerStateKey, peerState{1})
    85  		}
    86  	}
    87  	var wg sync.WaitGroup
    88  
    89  	const numTxs = 5
    90  
    91  	for i := 0; i < 1000; i++ {
    92  		wg.Add(2)
    93  
    94  		// 1. submit a bunch of txs
    95  		// 2. update the whole mempool
    96  		txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    97  		go func() {
    98  			defer wg.Done()
    99  
   100  			reactors[0].mempool.Lock()
   101  			defer reactors[0].mempool.Unlock()
   102  
   103  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   104  			for i := range txs {
   105  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   106  			}
   107  			err := reactors[0].mempool.Update(1, txs, deliverTxResponses, nil, nil)
   108  			assert.NoError(t, err)
   109  		}()
   110  
   111  		// 1. submit a bunch of txs
   112  		// 2. update none
   113  		_ = checkTxs(t, reactors[1].mempool, numTxs, mempool.UnknownPeerID)
   114  		go func() {
   115  			defer wg.Done()
   116  
   117  			reactors[1].mempool.Lock()
   118  			defer reactors[1].mempool.Unlock()
   119  			err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
   120  			assert.NoError(t, err)
   121  		}()
   122  
   123  		// 1. flush the mempool
   124  		reactors[1].mempool.Flush()
   125  	}
   126  
   127  	wg.Wait()
   128  }
   129  
   130  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   131  // ensure peer gets no txs.
   132  func TestReactorNoBroadcastToSender(t *testing.T) {
   133  	config := cfg.TestConfig()
   134  	const N = 2
   135  	reactors := makeAndConnectReactors(config, N)
   136  	defer func() {
   137  		for _, r := range reactors {
   138  			if err := r.Stop(); err != nil {
   139  				assert.NoError(t, err)
   140  			}
   141  		}
   142  	}()
   143  	for _, r := range reactors {
   144  		for _, peer := range r.Switch.Peers().List() {
   145  			peer.Set(types.PeerStateKey, peerState{1})
   146  		}
   147  	}
   148  
   149  	const peerID = 1
   150  	checkTxs(t, reactors[0].mempool, numTxs, peerID)
   151  	ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
   152  }
   153  
   154  func TestReactor_MaxTxBytes(t *testing.T) {
   155  	config := cfg.TestConfig()
   156  
   157  	const N = 2
   158  	reactors := makeAndConnectReactors(config, N)
   159  	defer func() {
   160  		for _, r := range reactors {
   161  			if err := r.Stop(); err != nil {
   162  				assert.NoError(t, err)
   163  			}
   164  		}
   165  	}()
   166  	for _, r := range reactors {
   167  		for _, peer := range r.Switch.Peers().List() {
   168  			peer.Set(types.PeerStateKey, peerState{1})
   169  		}
   170  	}
   171  
   172  	// Broadcast a tx, which has the max size
   173  	// => ensure it's received by the second reactor.
   174  	tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
   175  	err := reactors[0].mempool.CheckTx(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   176  	require.NoError(t, err)
   177  	waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
   178  
   179  	reactors[0].mempool.Flush()
   180  	reactors[1].mempool.Flush()
   181  
   182  	// Broadcast a tx, which is beyond the max size
   183  	// => ensure it's not sent
   184  	tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
   185  	err = reactors[0].mempool.CheckTx(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   186  	require.Error(t, err)
   187  }
   188  
   189  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   190  	if testing.Short() {
   191  		t.Skip("skipping test in short mode.")
   192  	}
   193  
   194  	config := cfg.TestConfig()
   195  	const N = 2
   196  	reactors := makeAndConnectReactors(config, N)
   197  	defer func() {
   198  		for _, r := range reactors {
   199  			if err := r.Stop(); err != nil {
   200  				assert.NoError(t, err)
   201  			}
   202  		}
   203  	}()
   204  
   205  	// stop peer
   206  	sw := reactors[1].Switch
   207  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   208  
   209  	// check that we are not leaking any go-routines
   210  	// i.e. broadcastTxRoutine finishes when peer is stopped
   211  	leaktest.CheckTimeout(t, 10*time.Second)()
   212  }
   213  
   214  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   215  	if testing.Short() {
   216  		t.Skip("skipping test in short mode.")
   217  	}
   218  
   219  	config := cfg.TestConfig()
   220  	const N = 2
   221  	reactors := makeAndConnectReactors(config, N)
   222  
   223  	// stop reactors
   224  	for _, r := range reactors {
   225  		if err := r.Stop(); err != nil {
   226  			assert.NoError(t, err)
   227  		}
   228  	}
   229  
   230  	// check that we are not leaking any go-routines
   231  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   232  	leaktest.CheckTimeout(t, 10*time.Second)()
   233  }
   234  
   235  func TestMempoolIDsBasic(t *testing.T) {
   236  	ids := newMempoolIDs()
   237  
   238  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   239  
   240  	ids.ReserveForPeer(peer)
   241  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   242  	ids.Reclaim(peer)
   243  
   244  	ids.ReserveForPeer(peer)
   245  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   246  	ids.Reclaim(peer)
   247  }
   248  
   249  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   250  	if testing.Short() {
   251  		return
   252  	}
   253  
   254  	// 0 is already reserved for UnknownPeerID
   255  	ids := newMempoolIDs()
   256  
   257  	for i := 0; i < mempool.MaxActiveIDs-1; i++ {
   258  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   259  		ids.ReserveForPeer(peer)
   260  	}
   261  
   262  	assert.Panics(t, func() {
   263  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   264  		ids.ReserveForPeer(peer)
   265  	})
   266  }
   267  
   268  // TODO: This test tests that we don't panic and are able to generate new
   269  // PeerIDs for each peer we add. It seems as though we should be able to test
   270  // this in a much more direct way.
   271  // https://github.com/vipernet-xyz/tm/issues/9639
   272  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   273  	config := cfg.TestConfig()
   274  	const N = 1
   275  	reactors := makeAndConnectReactors(config, N)
   276  	defer func() {
   277  		for _, r := range reactors {
   278  			if err := r.Stop(); err != nil {
   279  				assert.NoError(t, err)
   280  			}
   281  		}
   282  	}()
   283  	reactor := reactors[0]
   284  
   285  	for i := 0; i < mempool.MaxActiveIDs+1; i++ {
   286  		peer := mock.NewPeer(nil)
   287  		reactor.ReceiveEnvelope(p2p.Envelope{
   288  			ChannelID: mempool.MempoolChannel,
   289  			Src:       peer,
   290  			Message:   &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.
   291  		},
   292  		)
   293  		reactor.AddPeer(peer)
   294  	}
   295  }
   296  
   297  func TestLegacyReactorReceiveBasic(t *testing.T) {
   298  	config := cfg.TestConfig()
   299  	const N = 1
   300  	reactors := makeAndConnectReactors(config, N)
   301  	var (
   302  		reactor = reactors[0]
   303  		peer    = mock.NewPeer(nil)
   304  	)
   305  	defer func() {
   306  		err := reactor.Stop()
   307  		assert.NoError(t, err)
   308  	}()
   309  
   310  	reactor.InitPeer(peer)
   311  	reactor.AddPeer(peer)
   312  	m := &memproto.Txs{}
   313  	wm := m.Wrap()
   314  	msg, err := proto.Marshal(wm)
   315  	assert.NoError(t, err)
   316  
   317  	assert.NotPanics(t, func() {
   318  		reactor.Receive(mempool.MempoolChannel, peer, msg)
   319  	})
   320  }
   321  
   322  // mempoolLogger is a TestingLogger which uses a different
   323  // color for each validator ("validator" key must exist).
   324  func mempoolLogger() log.Logger {
   325  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   326  		for i := 0; i < len(keyvals)-1; i += 2 {
   327  			if keyvals[i] == "validator" {
   328  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   329  			}
   330  		}
   331  		return term.FgBgColor{}
   332  	})
   333  }
   334  
   335  // connect N mempool reactors through N switches
   336  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   337  	reactors := make([]*Reactor, n)
   338  	logger := mempoolLogger()
   339  	for i := 0; i < n; i++ {
   340  		app := kvstore.NewApplication()
   341  		cc := proxy.NewLocalClientCreator(app)
   342  		mempool, cleanup := newMempoolWithApp(cc)
   343  		defer cleanup()
   344  
   345  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   346  		reactors[i].SetLogger(logger.With("validator", i))
   347  	}
   348  
   349  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   350  		s.AddReactor("MEMPOOL", reactors[i])
   351  		return s
   352  
   353  	}, p2p.Connect2Switches)
   354  	return reactors
   355  }
   356  
   357  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   358  	// wait for the txs in all mempools
   359  	wg := new(sync.WaitGroup)
   360  	for i, reactor := range reactors {
   361  		wg.Add(1)
   362  		go func(r *Reactor, reactorIndex int) {
   363  			defer wg.Done()
   364  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   365  		}(reactor, i)
   366  	}
   367  
   368  	done := make(chan struct{})
   369  	go func() {
   370  		wg.Wait()
   371  		close(done)
   372  	}()
   373  
   374  	timer := time.After(timeout)
   375  	select {
   376  	case <-timer:
   377  		t.Fatal("Timed out waiting for txs")
   378  	case <-done:
   379  	}
   380  }
   381  
   382  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   383  	mempool := reactor.mempool
   384  	for mempool.Size() < len(txs) {
   385  		time.Sleep(time.Millisecond * 100)
   386  	}
   387  
   388  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   389  	for i, tx := range txs {
   390  		assert.Equalf(t, tx, reapedTxs[i],
   391  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   392  	}
   393  }
   394  
   395  // ensure no txs on reactor after some timeout
   396  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   397  	time.Sleep(timeout) // wait for the txs in all mempools
   398  	assert.Zero(t, reactor.mempool.Size())
   399  }
   400  
   401  func TestMempoolVectors(t *testing.T) {
   402  	testCases := []struct {
   403  		testName string
   404  		tx       []byte
   405  		expBytes string
   406  	}{
   407  		{"tx 1", []byte{123}, "0a030a017b"},
   408  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   409  	}
   410  
   411  	for _, tc := range testCases {
   412  		tc := tc
   413  
   414  		msg := memproto.Message{
   415  			Sum: &memproto.Message_Txs{
   416  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   417  			},
   418  		}
   419  		bz, err := msg.Marshal()
   420  		require.NoError(t, err, tc.testName)
   421  
   422  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   423  	}
   424  }