bitbucket.org/number571/tendermint@v0.8.14/internal/mempool/v0/reactor_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"context"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/stretchr/testify/require"
    10  
    11  	"bitbucket.org/number571/tendermint/abci/example/kvstore"
    12  	abci "bitbucket.org/number571/tendermint/abci/types"
    13  	cfg "bitbucket.org/number571/tendermint/config"
    14  	"bitbucket.org/number571/tendermint/internal/mempool"
    15  	"bitbucket.org/number571/tendermint/internal/p2p"
    16  	"bitbucket.org/number571/tendermint/internal/p2p/p2ptest"
    17  	"bitbucket.org/number571/tendermint/libs/log"
    18  	tmrand "bitbucket.org/number571/tendermint/libs/rand"
    19  	protomem "bitbucket.org/number571/tendermint/proto/tendermint/mempool"
    20  	"bitbucket.org/number571/tendermint/proxy"
    21  	"bitbucket.org/number571/tendermint/types"
    22  )
    23  
    24  type reactorTestSuite struct {
    25  	network *p2ptest.Network
    26  	logger  log.Logger
    27  
    28  	reactors       map[types.NodeID]*Reactor
    29  	mempoolChnnels map[types.NodeID]*p2p.Channel
    30  	mempools       map[types.NodeID]*CListMempool
    31  	kvstores       map[types.NodeID]*kvstore.Application
    32  
    33  	peerChans   map[types.NodeID]chan p2p.PeerUpdate
    34  	peerUpdates map[types.NodeID]*p2p.PeerUpdates
    35  
    36  	nodes []types.NodeID
    37  }
    38  
    39  func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite {
    40  	t.Helper()
    41  
    42  	rts := &reactorTestSuite{
    43  		logger:         log.TestingLogger().With("testCase", t.Name()),
    44  		network:        p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
    45  		reactors:       make(map[types.NodeID]*Reactor, numNodes),
    46  		mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes),
    47  		mempools:       make(map[types.NodeID]*CListMempool, numNodes),
    48  		kvstores:       make(map[types.NodeID]*kvstore.Application, numNodes),
    49  		peerChans:      make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
    50  		peerUpdates:    make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
    51  	}
    52  
    53  	chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)}
    54  	rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf))
    55  
    56  	for nodeID := range rts.network.Nodes {
    57  		rts.kvstores[nodeID] = kvstore.NewApplication()
    58  		cc := proxy.NewLocalClientCreator(rts.kvstores[nodeID])
    59  
    60  		mempool, memCleanup := newMempoolWithApp(cc)
    61  		t.Cleanup(memCleanup)
    62  		mempool.SetLogger(rts.logger)
    63  		rts.mempools[nodeID] = mempool
    64  
    65  		rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
    66  		rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
    67  		rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
    68  
    69  		rts.reactors[nodeID] = NewReactor(
    70  			rts.logger.With("nodeID", nodeID),
    71  			cfg,
    72  			rts.network.Nodes[nodeID].PeerManager,
    73  			mempool,
    74  			rts.mempoolChnnels[nodeID],
    75  			rts.peerUpdates[nodeID],
    76  		)
    77  
    78  		rts.nodes = append(rts.nodes, nodeID)
    79  
    80  		require.NoError(t, rts.reactors[nodeID].Start())
    81  		require.True(t, rts.reactors[nodeID].IsRunning())
    82  	}
    83  
    84  	require.Len(t, rts.reactors, numNodes)
    85  
    86  	t.Cleanup(func() {
    87  		for nodeID := range rts.reactors {
    88  			if rts.reactors[nodeID].IsRunning() {
    89  				require.NoError(t, rts.reactors[nodeID].Stop())
    90  				require.False(t, rts.reactors[nodeID].IsRunning())
    91  			}
    92  		}
    93  	})
    94  
    95  	return rts
    96  }
    97  
    98  func (rts *reactorTestSuite) start(t *testing.T) {
    99  	t.Helper()
   100  	rts.network.Start(t)
   101  	require.Len(t,
   102  		rts.network.RandomNode().PeerManager.Peers(),
   103  		len(rts.nodes)-1,
   104  		"network does not have expected number of nodes")
   105  }
   106  
   107  func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) {
   108  	t.Helper()
   109  
   110  	for id, r := range rts.reactors {
   111  		require.NoError(t, r.Stop(), "stopping reactor %s", id)
   112  		r.Wait()
   113  		require.False(t, r.IsRunning(), "reactor %s did not stop", id)
   114  	}
   115  
   116  	for _, mch := range rts.mempoolChnnels {
   117  		require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out))
   118  	}
   119  }
   120  
   121  func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) {
   122  	t.Helper()
   123  
   124  	fn := func(pool *CListMempool) {
   125  		for pool.Size() < len(txs) {
   126  			time.Sleep(50 * time.Millisecond)
   127  		}
   128  
   129  		reapedTxs := pool.ReapMaxTxs(len(txs))
   130  		require.Equal(t, len(txs), len(reapedTxs))
   131  		for i, tx := range txs {
   132  			require.Equalf(t,
   133  				tx,
   134  				reapedTxs[i],
   135  				"txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i],
   136  			)
   137  		}
   138  	}
   139  
   140  	if len(ids) == 1 {
   141  		fn(rts.reactors[ids[0]].mempool)
   142  		return
   143  	}
   144  
   145  	wg := &sync.WaitGroup{}
   146  	for id := range rts.mempools {
   147  		if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) {
   148  			continue
   149  		}
   150  
   151  		wg.Add(1)
   152  		func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id)
   153  	}
   154  
   155  	wg.Wait()
   156  }
   157  
   158  func TestReactorBroadcastTxs(t *testing.T) {
   159  	numTxs := 1000
   160  	numNodes := 10
   161  	config := cfg.TestConfig()
   162  
   163  	rts := setup(t, config.Mempool, numNodes, 0)
   164  
   165  	primary := rts.nodes[0]
   166  	secondaries := rts.nodes[1:]
   167  
   168  	txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID)
   169  
   170  	// run the router
   171  	rts.start(t)
   172  
   173  	// Wait till all secondary suites (reactor) received all mempool txs from the
   174  	// primary suite (node).
   175  	rts.waitForTxns(t, txs, secondaries...)
   176  
   177  	for _, pool := range rts.mempools {
   178  		require.Equal(t, len(txs), pool.Size())
   179  	}
   180  
   181  	rts.assertMempoolChannelsDrained(t)
   182  }
   183  
   184  // regression test for https://bitbucket.org/number571/tendermint/issues/5408
   185  func TestReactorConcurrency(t *testing.T) {
   186  	numTxs := 5
   187  	numNodes := 2
   188  	config := cfg.TestConfig()
   189  
   190  	rts := setup(t, config.Mempool, numNodes, 0)
   191  
   192  	primary := rts.nodes[0]
   193  	secondary := rts.nodes[1]
   194  
   195  	rts.start(t)
   196  
   197  	var wg sync.WaitGroup
   198  
   199  	for i := 0; i < 1000; i++ {
   200  		wg.Add(2)
   201  
   202  		// 1. submit a bunch of txs
   203  		// 2. update the whole mempool
   204  
   205  		txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID)
   206  		go func() {
   207  			defer wg.Done()
   208  
   209  			mempool := rts.mempools[primary]
   210  
   211  			mempool.Lock()
   212  			defer mempool.Unlock()
   213  
   214  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   215  			for i := range txs {
   216  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   217  			}
   218  
   219  			require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil))
   220  		}()
   221  
   222  		// 1. submit a bunch of txs
   223  		// 2. update none
   224  		_ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID)
   225  		go func() {
   226  			defer wg.Done()
   227  
   228  			mempool := rts.mempools[secondary]
   229  
   230  			mempool.Lock()
   231  			defer mempool.Unlock()
   232  
   233  			err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
   234  			require.NoError(t, err)
   235  		}()
   236  
   237  		// flush the mempool
   238  		rts.mempools[secondary].Flush()
   239  	}
   240  
   241  	wg.Wait()
   242  }
   243  
   244  func TestReactorNoBroadcastToSender(t *testing.T) {
   245  	numTxs := 1000
   246  	numNodes := 2
   247  	config := cfg.TestConfig()
   248  
   249  	rts := setup(t, config.Mempool, numNodes, uint(numTxs))
   250  
   251  	primary := rts.nodes[0]
   252  	secondary := rts.nodes[1]
   253  
   254  	peerID := uint16(1)
   255  	_ = checkTxs(t, rts.mempools[primary], numTxs, peerID)
   256  
   257  	rts.start(t)
   258  
   259  	time.Sleep(100 * time.Millisecond)
   260  
   261  	require.Eventually(t, func() bool {
   262  		return rts.mempools[secondary].Size() == 0
   263  	}, time.Minute, 100*time.Millisecond)
   264  
   265  	rts.assertMempoolChannelsDrained(t)
   266  }
   267  
   268  func TestReactor_MaxTxBytes(t *testing.T) {
   269  	numNodes := 2
   270  	config := cfg.TestConfig()
   271  
   272  	rts := setup(t, config.Mempool, numNodes, 0)
   273  
   274  	primary := rts.nodes[0]
   275  	secondary := rts.nodes[1]
   276  
   277  	// Broadcast a tx, which has the max size and ensure it's received by the
   278  	// second reactor.
   279  	tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
   280  	err := rts.reactors[primary].mempool.CheckTx(
   281  		context.Background(),
   282  		tx1,
   283  		nil,
   284  		mempool.TxInfo{
   285  			SenderID: mempool.UnknownPeerID,
   286  		},
   287  	)
   288  	require.NoError(t, err)
   289  
   290  	rts.start(t)
   291  
   292  	// Wait till all secondary suites (reactor) received all mempool txs from the
   293  	// primary suite (node).
   294  	rts.waitForTxns(t, []types.Tx{tx1}, secondary)
   295  
   296  	rts.reactors[primary].mempool.Flush()
   297  	rts.reactors[secondary].mempool.Flush()
   298  
   299  	// broadcast a tx, which is beyond the max size and ensure it's not sent
   300  	tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
   301  	err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
   302  	require.Error(t, err)
   303  
   304  	rts.assertMempoolChannelsDrained(t)
   305  }
   306  
   307  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   308  	config := cfg.TestConfig()
   309  
   310  	// we're creating a single node network, but not starting the
   311  	// network.
   312  	rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1)
   313  
   314  	nodeID := rts.nodes[0]
   315  
   316  	peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
   317  	require.NoError(t, err)
   318  
   319  	// ensure the reactor does not panic (i.e. exhaust active IDs)
   320  	for i := 0; i < mempool.MaxActiveIDs+1; i++ {
   321  		rts.peerChans[nodeID] <- p2p.PeerUpdate{
   322  			Status: p2p.PeerStatusUp,
   323  			NodeID: peerID,
   324  		}
   325  
   326  		rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{
   327  			To: peerID,
   328  			Message: &protomem.Txs{
   329  				Txs: [][]byte{},
   330  			},
   331  		}
   332  	}
   333  
   334  	require.Eventually(
   335  		t,
   336  		func() bool {
   337  			for _, mch := range rts.mempoolChnnels {
   338  				if len(mch.Out) > 0 {
   339  					return false
   340  				}
   341  			}
   342  
   343  			return true
   344  		},
   345  		time.Minute,
   346  		10*time.Millisecond,
   347  	)
   348  
   349  	rts.assertMempoolChannelsDrained(t)
   350  }
   351  
   352  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   353  	if testing.Short() {
   354  		t.Skip("skipping test in short mode")
   355  	}
   356  
   357  	// 0 is already reserved for UnknownPeerID
   358  	ids := mempool.NewMempoolIDs()
   359  
   360  	peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
   361  	require.NoError(t, err)
   362  
   363  	for i := 0; i < mempool.MaxActiveIDs-1; i++ {
   364  		ids.ReserveForPeer(peerID)
   365  	}
   366  
   367  	require.Panics(t, func() {
   368  		ids.ReserveForPeer(peerID)
   369  	})
   370  }
   371  
   372  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   373  	if testing.Short() {
   374  		t.Skip("skipping test in short mode")
   375  	}
   376  
   377  	config := cfg.TestConfig()
   378  
   379  	rts := setup(t, config.Mempool, 2, 0)
   380  
   381  	primary := rts.nodes[0]
   382  	secondary := rts.nodes[1]
   383  
   384  	rts.start(t)
   385  
   386  	// disconnect peer
   387  	rts.peerChans[primary] <- p2p.PeerUpdate{
   388  		Status: p2p.PeerStatusDown,
   389  		NodeID: secondary,
   390  	}
   391  }