github.com/Finschia/ostracon@v1.1.5/mempool/v0/reactor_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"net"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/fortytw2/leaktest"
    12  	"github.com/go-kit/log/term"
    13  	"github.com/gogo/protobuf/proto"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	abci "github.com/tendermint/tendermint/abci/types"
    18  	memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
    19  
    20  	"github.com/Finschia/ostracon/abci/example/kvstore"
    21  	cfg "github.com/Finschia/ostracon/config"
    22  	"github.com/Finschia/ostracon/libs/log"
    23  	tmrand "github.com/Finschia/ostracon/libs/rand"
    24  	"github.com/Finschia/ostracon/mempool"
    25  	"github.com/Finschia/ostracon/p2p"
    26  	"github.com/Finschia/ostracon/p2p/mock"
    27  	"github.com/Finschia/ostracon/proxy"
    28  	"github.com/Finschia/ostracon/types"
    29  )
    30  
    31  const (
    32  	numTxs  = 1000
    33  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    34  )
    35  
    36  type peerState struct {
    37  	height int64
    38  }
    39  
    40  func (ps peerState) GetHeight() int64 {
    41  	return ps.height
    42  }
    43  
    44  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    45  // be received in the others.
    46  func TestReactorBroadcastTxsMessage(t *testing.T) {
    47  	config := cfg.TestConfig()
    48  	// if there were more than two reactors, the order of transactions could not be
    49  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    50  	// replace Connect2Switches (full mesh) with a func, which connects first
    51  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    52  	const N = 2
    53  
    54  	// In this test, a reactor receives 1000 tx message from a peer.
    55  	// A reactor has N peer, so up to (N-1)×1000 txs can be stacked
    56  	config.P2P.MempoolRecvBufSize = (N - 1) * 1000
    57  
    58  	reactors := makeAndConnectReactors(config, N)
    59  	defer func() {
    60  		for _, r := range reactors {
    61  			if err := r.Stop(); err != nil {
    62  				assert.NoError(t, err)
    63  			}
    64  		}
    65  	}()
    66  	for _, r := range reactors {
    67  		for _, peer := range r.Switch.Peers().List() {
    68  			peer.Set(types.PeerStateKey, peerState{1})
    69  		}
    70  	}
    71  
    72  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    73  	waitForTxsOnReactors(t, txs, reactors)
    74  }
    75  
    76  // regression test for https://github.com/tendermint/tendermint/issues/5408
    77  func TestReactorConcurrency(t *testing.T) {
    78  	config := cfg.TestConfig()
    79  	const N = 2
    80  	reactors := makeAndConnectReactors(config, N)
    81  	defer func() {
    82  		for _, r := range reactors {
    83  			if err := r.Stop(); err != nil {
    84  				assert.NoError(t, err)
    85  			}
    86  		}
    87  	}()
    88  	for _, r := range reactors {
    89  		for _, peer := range r.Switch.Peers().List() {
    90  			peer.Set(types.PeerStateKey, peerState{1})
    91  		}
    92  	}
    93  	var wg sync.WaitGroup
    94  
    95  	const numTxs = 5
    96  
    97  	for i := 0; i < 1000; i++ {
    98  		wg.Add(2)
    99  
   100  		// 1. submit a bunch of txs
   101  		// 2. update the whole mempool
   102  		txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
   103  		go func() {
   104  			defer wg.Done()
   105  
   106  			reactors[0].mempool.Lock()
   107  			defer reactors[0].mempool.Unlock()
   108  
   109  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   110  			for i := range txs {
   111  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   112  			}
   113  			err := reactors[0].mempool.Update(newTestBlock(1, txs), deliverTxResponses, nil, nil)
   114  			assert.NoError(t, err)
   115  		}()
   116  
   117  		// 1. submit a bunch of txs
   118  		// 2. update none
   119  		_ = checkTxs(t, reactors[1].mempool, numTxs, mempool.UnknownPeerID)
   120  		go func() {
   121  			defer wg.Done()
   122  
   123  			reactors[1].mempool.Lock()
   124  			defer reactors[1].mempool.Unlock()
   125  			err := reactors[1].mempool.Update(newTestBlock(1, []types.Tx{}),
   126  				make([]*abci.ResponseDeliverTx, 0), nil, nil)
   127  			assert.NoError(t, err)
   128  		}()
   129  
   130  		// 1. flush the mempool
   131  		reactors[1].mempool.Flush()
   132  	}
   133  
   134  	wg.Wait()
   135  }
   136  
   137  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   138  // ensure peer gets no txs.
   139  func TestReactorNoBroadcastToSender(t *testing.T) {
   140  	config := cfg.TestConfig()
   141  	const N = 2
   142  	reactors := makeAndConnectReactors(config, N)
   143  	defer func() {
   144  		for _, r := range reactors {
   145  			if err := r.Stop(); err != nil {
   146  				assert.NoError(t, err)
   147  			}
   148  		}
   149  	}()
   150  	for _, r := range reactors {
   151  		for _, peer := range r.Switch.Peers().List() {
   152  			peer.Set(types.PeerStateKey, peerState{1})
   153  		}
   154  	}
   155  
   156  	const peerID = 1
   157  	checkTxs(t, reactors[0].mempool, numTxs, peerID)
   158  	ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
   159  }
   160  
   161  func TestReactor_MaxTxBytes(t *testing.T) {
   162  	config := cfg.TestConfig()
   163  
   164  	const N = 2
   165  	reactors := makeAndConnectReactors(config, N)
   166  	defer func() {
   167  		for _, r := range reactors {
   168  			if err := r.Stop(); err != nil {
   169  				assert.NoError(t, err)
   170  			}
   171  		}
   172  	}()
   173  	for _, r := range reactors {
   174  		for _, peer := range r.Switch.Peers().List() {
   175  			peer.Set(types.PeerStateKey, peerState{1})
   176  		}
   177  	}
   178  
   179  	// Broadcast a tx, which has the max size
   180  	// => ensure it's received by the second reactor.
   181  	tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
   182  	err := reactors[0].mempool.CheckTxSync(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   183  	require.NoError(t, err)
   184  	waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
   185  
   186  	reactors[0].mempool.Flush()
   187  	reactors[1].mempool.Flush()
   188  
   189  	// Broadcast a tx, which is beyond the max size
   190  	// => ensure it's not sent
   191  	tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
   192  	err = reactors[0].mempool.CheckTxSync(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   193  	require.Error(t, err)
   194  }
   195  
   196  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   197  	if testing.Short() {
   198  		t.Skip("skipping test in short mode.")
   199  	}
   200  
   201  	config := cfg.TestConfig()
   202  	const N = 2
   203  	reactors := makeAndConnectReactors(config, N)
   204  	defer func() {
   205  		for _, r := range reactors {
   206  			if err := r.Stop(); err != nil {
   207  				assert.NoError(t, err)
   208  			}
   209  		}
   210  	}()
   211  
   212  	// stop peer
   213  	sw := reactors[1].Switch
   214  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   215  
   216  	// check that we are not leaking any go-routines
   217  	// i.e. broadcastTxRoutine finishes when peer is stopped
   218  	leaktest.CheckTimeout(t, 10*time.Second)()
   219  }
   220  
   221  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   222  	if testing.Short() {
   223  		t.Skip("skipping test in short mode.")
   224  	}
   225  
   226  	config := cfg.TestConfig()
   227  	const N = 2
   228  	reactors := makeAndConnectReactors(config, N)
   229  
   230  	// stop reactors
   231  	for _, r := range reactors {
   232  		if err := r.Stop(); err != nil {
   233  			assert.NoError(t, err)
   234  		}
   235  	}
   236  
   237  	// check that we are not leaking any go-routines
   238  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   239  	leaktest.CheckTimeout(t, 10*time.Second)()
   240  }
   241  
   242  func TestMempoolIDsBasic(t *testing.T) {
   243  	ids := newMempoolIDs()
   244  
   245  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   246  
   247  	ids.ReserveForPeer(peer)
   248  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   249  	ids.Reclaim(peer)
   250  
   251  	ids.ReserveForPeer(peer)
   252  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   253  	ids.Reclaim(peer)
   254  }
   255  
   256  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   257  	if testing.Short() {
   258  		return
   259  	}
   260  
   261  	// 0 is already reserved for UnknownPeerID
   262  	ids := newMempoolIDs()
   263  
   264  	for i := 0; i < mempool.MaxActiveIDs-1; i++ {
   265  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   266  		ids.ReserveForPeer(peer)
   267  	}
   268  
   269  	assert.Panics(t, func() {
   270  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   271  		ids.ReserveForPeer(peer)
   272  	})
   273  }
   274  
   275  // TODO: This test tests that we don't panic and are able to generate new
   276  // PeerIDs for each peer we add. It seems as though we should be able to test
   277  // this in a much more direct way.
   278  // https://github.com/tendermint/tendermint/issues/9639
   279  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   280  	config := cfg.TestConfig()
   281  	const N = 1
   282  	reactors := makeAndConnectReactors(config, N)
   283  	defer func() {
   284  		for _, r := range reactors {
   285  			if err := r.Stop(); err != nil {
   286  				assert.NoError(t, err)
   287  			}
   288  		}
   289  	}()
   290  	reactor := reactors[0]
   291  
   292  	for i := 0; i < mempool.MaxActiveIDs+1; i++ {
   293  		peer := mock.NewPeer(nil)
   294  		reactor.ReceiveEnvelope(p2p.Envelope{
   295  			ChannelID: mempool.MempoolChannel,
   296  			Src:       peer,
   297  			Message:   &memproto.Message{}, // This uses the wrong message type on purpose to stop the peer as in an error state in the reactor.
   298  		},
   299  		)
   300  		reactor.AddPeer(peer)
   301  	}
   302  }
   303  
   304  func TestLegacyReactorReceiveBasic(t *testing.T) {
   305  	config := cfg.TestConfig()
   306  	const N = 1
   307  	reactors := makeAndConnectReactors(config, N)
   308  	var (
   309  		reactor = reactors[0]
   310  		peer    = mock.NewPeer(nil)
   311  	)
   312  	defer func() {
   313  		err := reactor.Stop()
   314  		assert.NoError(t, err)
   315  	}()
   316  
   317  	reactor.InitPeer(peer)
   318  	reactor.AddPeer(peer)
   319  	m := &memproto.Txs{}
   320  	wm := m.Wrap()
   321  	msg, err := proto.Marshal(wm)
   322  	assert.NoError(t, err)
   323  
   324  	assert.NotPanics(t, func() {
   325  		reactor.Receive(mempool.MempoolChannel, peer, msg)
   326  	})
   327  }
   328  
   329  // mempoolLogger is a TestingLogger which uses a different
   330  // color for each validator ("validator" key must exist).
   331  func mempoolLogger() log.Logger {
   332  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   333  		for i := 0; i < len(keyvals)-1; i += 2 {
   334  			if keyvals[i] == "validator" {
   335  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   336  			}
   337  		}
   338  		return term.FgBgColor{}
   339  	})
   340  }
   341  
   342  // connect N mempool reactors through N switches
   343  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   344  	reactors := make([]*Reactor, n)
   345  	logger := mempoolLogger()
   346  	for i := 0; i < n; i++ {
   347  		app := kvstore.NewApplication()
   348  		cc := proxy.NewLocalClientCreator(app)
   349  		mempool, cleanup := newMempoolWithApp(cc)
   350  		defer cleanup()
   351  
   352  		// so we dont start the consensus states
   353  		reactors[i] = NewReactor(config.Mempool, config.P2P.RecvAsync, config.P2P.MempoolRecvBufSize, mempool)
   354  		reactors[i].SetLogger(logger.With("validator", i))
   355  	}
   356  
   357  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
   358  		s.AddReactor("MEMPOOL", reactors[i])
   359  		return s
   360  
   361  	}, p2p.Connect2Switches)
   362  	return reactors
   363  }
   364  
   365  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   366  	// wait for the txs in all mempools
   367  	wg := new(sync.WaitGroup)
   368  	for i, reactor := range reactors {
   369  		wg.Add(1)
   370  		go func(r *Reactor, reactorIndex int) {
   371  			defer wg.Done()
   372  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   373  		}(reactor, i)
   374  	}
   375  
   376  	done := make(chan struct{})
   377  	go func() {
   378  		wg.Wait()
   379  		close(done)
   380  	}()
   381  
   382  	timer := time.After(timeout)
   383  	select {
   384  	case <-timer:
   385  		t.Fatal("Timed out waiting for txs")
   386  	case <-done:
   387  	}
   388  }
   389  
   390  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   391  	mempool := reactor.mempool
   392  	for mempool.Size() < len(txs) {
   393  		time.Sleep(time.Millisecond * 1000)
   394  	}
   395  
   396  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   397  	for i, tx := range txs {
   398  		assert.Equalf(t, tx, reapedTxs[i],
   399  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   400  	}
   401  }
   402  
   403  // ensure no txs on reactor after some timeout
   404  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   405  	time.Sleep(timeout) // wait for the txs in all mempools
   406  	assert.Zero(t, reactor.mempool.Size())
   407  }
   408  
   409  func TestMempoolVectors(t *testing.T) {
   410  	testCases := []struct {
   411  		testName string
   412  		tx       []byte
   413  		expBytes string
   414  	}{
   415  		{"tx 1", []byte{123}, "0a030a017b"},
   416  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   417  	}
   418  
   419  	for _, tc := range testCases {
   420  		tc := tc
   421  
   422  		msg := memproto.Message{
   423  			Sum: &memproto.Message_Txs{
   424  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   425  			},
   426  		}
   427  		bz, err := msg.Marshal()
   428  		require.NoError(t, err, tc.testName)
   429  
   430  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   431  	}
   432  }