gitlab.com/gpdionisio/tendermint@v0.34.19-dev2/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"encoding/hex"
     5  	"errors"
     6  	"net"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/fortytw2/leaktest"
    12  	"github.com/go-kit/log/term"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/tendermint/tendermint/abci/example/kvstore"
    17  	abci "github.com/tendermint/tendermint/abci/types"
    18  	cfg "github.com/tendermint/tendermint/config"
    19  	"github.com/tendermint/tendermint/libs/log"
    20  	tmrand "github.com/tendermint/tendermint/libs/rand"
    21  	"github.com/tendermint/tendermint/p2p"
    22  	"github.com/tendermint/tendermint/p2p/mock"
    23  	memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
    24  	"github.com/tendermint/tendermint/proxy"
    25  	"github.com/tendermint/tendermint/types"
    26  )
    27  
    28  const (
    29  	numTxs  = 1000
    30  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    31  )
    32  
    33  type peerState struct {
    34  	height int64
    35  }
    36  
    37  func (ps peerState) GetHeight() int64 {
    38  	return ps.height
    39  }
    40  
    41  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    42  // be received in the others.
    43  func TestReactorBroadcastTxsMessage(t *testing.T) {
    44  	config := cfg.TestConfig()
    45  	// if there were more than two reactors, the order of transactions could not be
    46  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    47  	// replace Connect2Switches (full mesh) with a func, which connects first
    48  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    49  	const N = 2
    50  	reactors := makeAndConnectReactors(config, N)
    51  	defer func() {
    52  		for _, r := range reactors {
    53  			if err := r.Stop(); err != nil {
    54  				assert.NoError(t, err)
    55  			}
    56  		}
    57  	}()
    58  	for _, r := range reactors {
    59  		for _, peer := range r.Switch.Peers().List() {
    60  			peer.Set(types.PeerStateKey, peerState{1})
    61  		}
    62  	}
    63  
    64  	txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
    65  	waitForTxsOnReactors(t, txs, reactors)
    66  }
    67  
    68  // regression test for https://github.com/tendermint/tendermint/issues/5408
    69  func TestReactorConcurrency(t *testing.T) {
    70  	config := cfg.TestConfig()
    71  	const N = 2
    72  	reactors := makeAndConnectReactors(config, N)
    73  	defer func() {
    74  		for _, r := range reactors {
    75  			if err := r.Stop(); err != nil {
    76  				assert.NoError(t, err)
    77  			}
    78  		}
    79  	}()
    80  	for _, r := range reactors {
    81  		for _, peer := range r.Switch.Peers().List() {
    82  			peer.Set(types.PeerStateKey, peerState{1})
    83  		}
    84  	}
    85  	var wg sync.WaitGroup
    86  
    87  	const numTxs = 5
    88  
    89  	for i := 0; i < 1000; i++ {
    90  		wg.Add(2)
    91  
    92  		// 1. submit a bunch of txs
    93  		// 2. update the whole mempool
    94  		txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
    95  		go func() {
    96  			defer wg.Done()
    97  
    98  			reactors[0].mempool.Lock()
    99  			defer reactors[0].mempool.Unlock()
   100  
   101  			deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs))
   102  			for i := range txs {
   103  				deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0}
   104  			}
   105  			err := reactors[0].mempool.Update(1, txs, deliverTxResponses, nil, nil)
   106  			assert.NoError(t, err)
   107  		}()
   108  
   109  		// 1. submit a bunch of txs
   110  		// 2. update none
   111  		_ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID)
   112  		go func() {
   113  			defer wg.Done()
   114  
   115  			reactors[1].mempool.Lock()
   116  			defer reactors[1].mempool.Unlock()
   117  			err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil)
   118  			assert.NoError(t, err)
   119  		}()
   120  
   121  		// 1. flush the mempool
   122  		reactors[1].mempool.Flush()
   123  	}
   124  
   125  	wg.Wait()
   126  }
   127  
   128  // Send a bunch of txs to the first reactor's mempool, claiming it came from peer
   129  // ensure peer gets no txs.
   130  func TestReactorNoBroadcastToSender(t *testing.T) {
   131  	config := cfg.TestConfig()
   132  	const N = 2
   133  	reactors := makeAndConnectReactors(config, N)
   134  	defer func() {
   135  		for _, r := range reactors {
   136  			if err := r.Stop(); err != nil {
   137  				assert.NoError(t, err)
   138  			}
   139  		}
   140  	}()
   141  	for _, r := range reactors {
   142  		for _, peer := range r.Switch.Peers().List() {
   143  			peer.Set(types.PeerStateKey, peerState{1})
   144  		}
   145  	}
   146  
   147  	const peerID = 1
   148  	checkTxs(t, reactors[0].mempool, numTxs, peerID)
   149  	ensureNoTxs(t, reactors[peerID], 100*time.Millisecond)
   150  }
   151  
   152  func TestReactor_MaxTxBytes(t *testing.T) {
   153  	config := cfg.TestConfig()
   154  
   155  	const N = 2
   156  	reactors := makeAndConnectReactors(config, N)
   157  	defer func() {
   158  		for _, r := range reactors {
   159  			if err := r.Stop(); err != nil {
   160  				assert.NoError(t, err)
   161  			}
   162  		}
   163  	}()
   164  	for _, r := range reactors {
   165  		for _, peer := range r.Switch.Peers().List() {
   166  			peer.Set(types.PeerStateKey, peerState{1})
   167  		}
   168  	}
   169  
   170  	// Broadcast a tx, which has the max size
   171  	// => ensure it's received by the second reactor.
   172  	tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
   173  	err := reactors[0].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID})
   174  	require.NoError(t, err)
   175  	waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
   176  
   177  	reactors[0].mempool.Flush()
   178  	reactors[1].mempool.Flush()
   179  
   180  	// Broadcast a tx, which is beyond the max size
   181  	// => ensure it's not sent
   182  	tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
   183  	err = reactors[0].mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID})
   184  	require.Error(t, err)
   185  }
   186  
   187  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   188  	if testing.Short() {
   189  		t.Skip("skipping test in short mode.")
   190  	}
   191  
   192  	config := cfg.TestConfig()
   193  	const N = 2
   194  	reactors := makeAndConnectReactors(config, N)
   195  	defer func() {
   196  		for _, r := range reactors {
   197  			if err := r.Stop(); err != nil {
   198  				assert.NoError(t, err)
   199  			}
   200  		}
   201  	}()
   202  
   203  	// stop peer
   204  	sw := reactors[1].Switch
   205  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   206  
   207  	// check that we are not leaking any go-routines
   208  	// i.e. broadcastTxRoutine finishes when peer is stopped
   209  	leaktest.CheckTimeout(t, 10*time.Second)()
   210  }
   211  
   212  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   213  	if testing.Short() {
   214  		t.Skip("skipping test in short mode.")
   215  	}
   216  
   217  	config := cfg.TestConfig()
   218  	const N = 2
   219  	reactors := makeAndConnectReactors(config, N)
   220  
   221  	// stop reactors
   222  	for _, r := range reactors {
   223  		if err := r.Stop(); err != nil {
   224  			assert.NoError(t, err)
   225  		}
   226  	}
   227  
   228  	// check that we are not leaking any go-routines
   229  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   230  	leaktest.CheckTimeout(t, 10*time.Second)()
   231  }
   232  
   233  func TestMempoolIDsBasic(t *testing.T) {
   234  	ids := newMempoolIDs()
   235  
   236  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   237  
   238  	ids.ReserveForPeer(peer)
   239  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   240  	ids.Reclaim(peer)
   241  
   242  	ids.ReserveForPeer(peer)
   243  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   244  	ids.Reclaim(peer)
   245  }
   246  
   247  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   248  	if testing.Short() {
   249  		return
   250  	}
   251  
   252  	// 0 is already reserved for UnknownPeerID
   253  	ids := newMempoolIDs()
   254  
   255  	for i := 0; i < maxActiveIDs-1; i++ {
   256  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   257  		ids.ReserveForPeer(peer)
   258  	}
   259  
   260  	assert.Panics(t, func() {
   261  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   262  		ids.ReserveForPeer(peer)
   263  	})
   264  }
   265  
   266  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   267  	config := cfg.TestConfig()
   268  	const N = 1
   269  	reactors := makeAndConnectReactors(config, N)
   270  	defer func() {
   271  		for _, r := range reactors {
   272  			if err := r.Stop(); err != nil {
   273  				assert.NoError(t, err)
   274  			}
   275  		}
   276  	}()
   277  	reactor := reactors[0]
   278  
   279  	for i := 0; i < maxActiveIDs+1; i++ {
   280  		peer := mock.NewPeer(nil)
   281  		reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
   282  		reactor.AddPeer(peer)
   283  	}
   284  }
   285  
   286  // mempoolLogger is a TestingLogger which uses a different
   287  // color for each validator ("validator" key must exist).
   288  func mempoolLogger() log.Logger {
   289  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   290  		for i := 0; i < len(keyvals)-1; i += 2 {
   291  			if keyvals[i] == "validator" {
   292  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   293  			}
   294  		}
   295  		return term.FgBgColor{}
   296  	})
   297  }
   298  
   299  // connect N mempool reactors through N switches
   300  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   301  	reactors := make([]*Reactor, n)
   302  	logger := mempoolLogger()
   303  	for i := 0; i < n; i++ {
   304  		app := kvstore.NewApplication()
   305  		cc := proxy.NewLocalClientCreator(app)
   306  		mempool, cleanup := newMempoolWithApp(cc)
   307  		defer cleanup()
   308  
   309  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   310  		reactors[i].SetLogger(logger.With("validator", i))
   311  	}
   312  
   313  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   314  		s.AddReactor("MEMPOOL", reactors[i])
   315  		return s
   316  
   317  	}, p2p.Connect2Switches)
   318  	return reactors
   319  }
   320  
   321  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   322  	// wait for the txs in all mempools
   323  	wg := new(sync.WaitGroup)
   324  	for i, reactor := range reactors {
   325  		wg.Add(1)
   326  		go func(r *Reactor, reactorIndex int) {
   327  			defer wg.Done()
   328  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   329  		}(reactor, i)
   330  	}
   331  
   332  	done := make(chan struct{})
   333  	go func() {
   334  		wg.Wait()
   335  		close(done)
   336  	}()
   337  
   338  	timer := time.After(timeout)
   339  	select {
   340  	case <-timer:
   341  		t.Fatal("Timed out waiting for txs")
   342  	case <-done:
   343  	}
   344  }
   345  
   346  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   347  	mempool := reactor.mempool
   348  	for mempool.Size() < len(txs) {
   349  		time.Sleep(time.Millisecond * 100)
   350  	}
   351  
   352  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   353  	for i, tx := range txs {
   354  		assert.Equalf(t, tx, reapedTxs[i],
   355  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   356  	}
   357  }
   358  
   359  // ensure no txs on reactor after some timeout
   360  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   361  	time.Sleep(timeout) // wait for the txs in all mempools
   362  	assert.Zero(t, reactor.mempool.Size())
   363  }
   364  
   365  func TestMempoolVectors(t *testing.T) {
   366  	testCases := []struct {
   367  		testName string
   368  		tx       []byte
   369  		expBytes string
   370  	}{
   371  		{"tx 1", []byte{123}, "0a030a017b"},
   372  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
   373  	}
   374  
   375  	for _, tc := range testCases {
   376  		tc := tc
   377  
   378  		msg := memproto.Message{
   379  			Sum: &memproto.Message_Txs{
   380  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
   381  			},
   382  		}
   383  		bz, err := msg.Marshal()
   384  		require.NoError(t, err, tc.testName)
   385  
   386  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   387  	}
   388  }