github.com/line/ostracon@v1.0.10-0.20230328032236-7f20145f065d/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"net"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/fortytw2/leaktest"
    12  	"github.com/go-kit/log/term"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	abci "github.com/tendermint/tendermint/abci/types"
    17  	memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
    18  
    19  	"github.com/line/ostracon/abci/example/kvstore"
    20  	cfg "github.com/line/ostracon/config"
    21  	"github.com/line/ostracon/libs/log"
    22  	tmrand "github.com/line/ostracon/libs/rand"
    23  	"github.com/line/ostracon/p2p"
    24  	"github.com/line/ostracon/p2p/mock"
    25  	"github.com/line/ostracon/proxy"
    26  	"github.com/line/ostracon/types"
    27  )
    28  
    29  const (
    30  	numTxs  = 1000
    31  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    32  )
    33  
    34  type peerState struct {
    35  	height int64
    36  }
    37  
    38  func (ps peerState) GetHeight() int64 {
    39  	return ps.height
    40  }
    41  
    42  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    43  // be received in the others.
    44  func TestReactorBroadcastTxsMessage(t *testing.T) {
    45  	config := cfg.TestConfig()
    46  	// if there were more than two reactors, the order of transactions could not be
    47  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    48  	// replace Connect2Switches (full mesh) with a func, which connects first
    49  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    50  	const N = 2
    51  
    52  	// In this test, a reactor receives 1000 tx message from a peer.
    53  	// A reactor has N peer, so up to (N-1)×1000 txs can be stacked
    54  	config.P2P.MempoolRecvBufSize = (N - 1) * 1000
    55  
    56  	reactors := makeAndConnectReactors(config, N)
    57  	defer func() {
    58  		for _, r := range reactors {
    59  			if err := r.Stop(); err != nil {
    60  				assert.NoError(t, err)
    61  			}
    62  		}
    63  	}()
    64  	for _, r := range reactors {
    65  		for _, peer := range r.Switch.Peers().List() {
    66  			peer.Set(types.PeerStateKey, peerState{1})
    67  		}
    68  	}
    69  
    70  	txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
    71  	waitForTxsOnReactors(t, txs, reactors)
    72  }
    73  
    74  // regression test for https://github.com/tendermint/tendermint/issues/5408
    75  func TestReactorConcurrency(t *testing.T) {
    76  	config := cfg.TestConfig()
    77  	const N = 2
    78  	reactors := makeAndConnectReactors(config, N)
    79  	defer func() {
    80  		for _, r := range reactors {
    81  			if err := r.Stop(); err != nil {
    82  				assert.NoError(t, err)
    83  			}
    84  		}
    85  	}()
    86  	for _, r := range reactors {
    87  		for _, peer := range r.Switch.Peers().List() {
    88  			peer.Set(types.PeerStateKey, peerState{1})
    89  		}
    90  	}
    91  	var wg sync.WaitGroup
    92  
    93  	const numTxs = 5
    94  
    95  	for i := 0; i < 1000; i++ {
    96  		wg.Add(2)
    97  
    98  		// 1. submit a bunch of txs
    99  		// 2. update the whole mempool
   100  		txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
   101  		go func() {
   102  			defer wg.Done()
   103  
   104  			reactors[0].mempool.Lock()
   105  			defer reactors[0].mempool.Unlock()
   106  
   107  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   108  			for i := range txs {
   109  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   110  			}
   111  			err := reactors[0].mempool.Update(newTestBlock(1, txs), deliverTxResponses, nil, nil)
   112  			assert.NoError(t, err)
   113  		}()
   114  
   115  		// 1. submit a bunch of txs
   116  		// 2. update none
   117  		_ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID)
   118  		go func() {
   119  			defer wg.Done()
   120  
   121  			reactors[1].mempool.Lock()
   122  			defer reactors[1].mempool.Unlock()
   123  			err := reactors[1].mempool.Update(newTestBlock(1, []types.Tx{}),
   124  				make([]*abci.ResponseDeliverTx, 0), nil, nil)
   125  			assert.NoError(t, err)
   126  		}()
   127  
   128  		// 1. flush the mempool
   129  		reactors[1].mempool.Flush()
   130  	}
   131  
   132  	wg.Wait()
   133  }
   134  
   135  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   136  // ensure peer gets no txs.
   137  func TestReactorNoBroadcastToSender(t *testing.T) {
   138  	config := cfg.TestConfig()
   139  	const N = 2
   140  	reactors := makeAndConnectReactors(config, N)
   141  	defer func() {
   142  		for _, r := range reactors {
   143  			if err := r.Stop(); err != nil {
   144  				assert.NoError(t, err)
   145  			}
   146  		}
   147  	}()
   148  	for _, r := range reactors {
   149  		for _, peer := range r.Switch.Peers().List() {
   150  			peer.Set(types.PeerStateKey, peerState{1})
   151  		}
   152  	}
   153  
   154  	const peerID = 1
   155  	checkTxs(t, reactors[0].mempool, numTxs, peerID)
   156  	ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
   157  }
   158  
   159  func TestReactor_MaxTxBytes(t *testing.T) {
   160  	config := cfg.TestConfig()
   161  
   162  	const N = 2
   163  	reactors := makeAndConnectReactors(config, N)
   164  	defer func() {
   165  		for _, r := range reactors {
   166  			if err := r.Stop(); err != nil {
   167  				assert.NoError(t, err)
   168  			}
   169  		}
   170  	}()
   171  	for _, r := range reactors {
   172  		for _, peer := range r.Switch.Peers().List() {
   173  			peer.Set(types.PeerStateKey, peerState{1})
   174  		}
   175  	}
   176  
   177  	// Broadcast a tx, which has the max size
   178  	// => ensure it's received by the second reactor.
   179  	tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
   180  	_, err := reactors[0].mempool.CheckTxSync(tx1, TxInfo{SenderID: UnknownPeerID})
   181  	require.NoError(t, err)
   182  	waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
   183  
   184  	reactors[0].mempool.Flush()
   185  	reactors[1].mempool.Flush()
   186  
   187  	// Broadcast a tx, which is beyond the max size
   188  	// => ensure it's not sent
   189  	tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
   190  	_, err = reactors[0].mempool.CheckTxSync(tx2, TxInfo{SenderID: UnknownPeerID})
   191  	require.Error(t, err)
   192  }
   193  
   194  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   195  	if testing.Short() {
   196  		t.Skip("skipping test in short mode.")
   197  	}
   198  
   199  	config := cfg.TestConfig()
   200  	const N = 2
   201  	reactors := makeAndConnectReactors(config, N)
   202  	defer func() {
   203  		for _, r := range reactors {
   204  			if err := r.Stop(); err != nil {
   205  				assert.NoError(t, err)
   206  			}
   207  		}
   208  	}()
   209  
   210  	// stop peer
   211  	sw := reactors[1].Switch
   212  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   213  
   214  	// check that we are not leaking any go-routines
   215  	// i.e. broadcastTxRoutine finishes when peer is stopped
   216  	leaktest.CheckTimeout(t, 10*time.Second)()
   217  }
   218  
   219  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   220  	if testing.Short() {
   221  		t.Skip("skipping test in short mode.")
   222  	}
   223  
   224  	config := cfg.TestConfig()
   225  	const N = 2
   226  	reactors := makeAndConnectReactors(config, N)
   227  
   228  	// stop reactors
   229  	for _, r := range reactors {
   230  		if err := r.Stop(); err != nil {
   231  			assert.NoError(t, err)
   232  		}
   233  	}
   234  
   235  	// check that we are not leaking any go-routines
   236  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   237  	leaktest.CheckTimeout(t, 10*time.Second)()
   238  }
   239  
   240  func TestMempoolIDsBasic(t *testing.T) {
   241  	ids := newMempoolIDs()
   242  
   243  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   244  
   245  	ids.ReserveForPeer(peer)
   246  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   247  	ids.Reclaim(peer)
   248  
   249  	ids.ReserveForPeer(peer)
   250  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   251  	ids.Reclaim(peer)
   252  }
   253  
   254  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   255  	if testing.Short() {
   256  		return
   257  	}
   258  
   259  	// 0 is already reserved for UnknownPeerID
   260  	ids := newMempoolIDs()
   261  
   262  	for i := 0; i < maxActiveIDs-1; i++ {
   263  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   264  		ids.ReserveForPeer(peer)
   265  	}
   266  
   267  	assert.Panics(t, func() {
   268  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   269  		ids.ReserveForPeer(peer)
   270  	})
   271  }
   272  
   273  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   274  	config := cfg.TestConfig()
   275  	const N = 1
   276  	reactors := makeAndConnectReactors(config, N)
   277  	defer func() {
   278  		for _, r := range reactors {
   279  			if err := r.Stop(); err != nil {
   280  				assert.NoError(t, err)
   281  			}
   282  		}
   283  	}()
   284  	reactor := reactors[0]
   285  
   286  	for i := 0; i < maxActiveIDs+1; i++ {
   287  		peer := mock.NewPeer(nil)
   288  		reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
   289  		reactor.AddPeer(peer)
   290  	}
   291  }
   292  
   293  // mempoolLogger is a TestingLogger which uses a different
   294  // color for each validator ("validator" key must exist).
   295  func mempoolLogger() log.Logger {
   296  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   297  		for i := 0; i < len(keyvals)-1; i += 2 {
   298  			if keyvals[i] == "validator" {
   299  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   300  			}
   301  		}
   302  		return term.FgBgColor{}
   303  	})
   304  }
   305  
   306  // connect N mempool reactors through N switches
   307  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   308  	reactors := make([]*Reactor, n)
   309  	logger := mempoolLogger()
   310  	for i := 0; i < n; i++ {
   311  		app := kvstore.NewApplication()
   312  		cc := proxy.NewLocalClientCreator(app)
   313  		mempool, cleanup := newMempoolWithApp(cc)
   314  		defer cleanup()
   315  
   316  		// so we dont start the consensus states
   317  		reactors[i] = NewReactor(config.Mempool, config.P2P.RecvAsync, config.P2P.MempoolRecvBufSize, mempool)
   318  		reactors[i].SetLogger(logger.With("validator", i))
   319  	}
   320  
   321  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
   322  		s.AddReactor("MEMPOOL", reactors[i])
   323  		return s
   324  
   325  	}, p2p.Connect2Switches)
   326  	return reactors
   327  }
   328  
   329  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   330  	// wait for the txs in all mempools
   331  	wg := new(sync.WaitGroup)
   332  	for i, reactor := range reactors {
   333  		wg.Add(1)
   334  		go func(r *Reactor, reactorIndex int) {
   335  			defer wg.Done()
   336  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   337  		}(reactor, i)
   338  	}
   339  
   340  	done := make(chan struct{})
   341  	go func() {
   342  		wg.Wait()
   343  		close(done)
   344  	}()
   345  
   346  	timer := time.After(timeout)
   347  	select {
   348  	case <-timer:
   349  		t.Fatal("Timed out waiting for txs")
   350  	case <-done:
   351  	}
   352  }
   353  
   354  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   355  	mempool := reactor.mempool
   356  	for mempool.Size() < len(txs) {
   357  		time.Sleep(time.Millisecond * 1000)
   358  	}
   359  
   360  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   361  	for i, tx := range txs {
   362  		assert.Equalf(t, tx, reapedTxs[i],
   363  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   364  	}
   365  }
   366  
   367  // ensure no txs on reactor after some timeout
   368  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   369  	time.Sleep(timeout) // wait for the txs in all mempools
   370  	assert.Zero(t, reactor.mempool.Size())
   371  }
   372  
   373  func TestMempoolVectors(t *testing.T) {
   374  	testCases := []struct {
   375  		testName string
   376  		tx       []byte
   377  		expBytes string
   378  	}{
   379  		{"tx 1", []byte{123}, "0a030a017b"},
   380  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   381  	}
   382  
   383  	for _, tc := range testCases {
   384  		tc := tc
   385  
   386  		msg := memproto.Message{
   387  			Sum: &memproto.Message_Txs{
   388  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   389  			},
   390  		}
   391  		bz, err := msg.Marshal()
   392  		require.NoError(t, err, tc.testName)
   393  
   394  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   395  	}
   396  }