github.com/vipernet-xyz/tm@v0.34.24/mempool/v1/reactor_test.go (about)

     1  package v1
     2  
     3  import (
     4  	"encoding/hex"
     5  	"os"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/go-kit/log/term"
    11  	"github.com/gogo/protobuf/proto"
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	"github.com/vipernet-xyz/tm/abci/example/kvstore"
    16  	"github.com/vipernet-xyz/tm/p2p/mock"
    17  
    18  	cfg "github.com/vipernet-xyz/tm/config"
    19  
    20  	"github.com/vipernet-xyz/tm/libs/log"
    21  	"github.com/vipernet-xyz/tm/mempool"
    22  	"github.com/vipernet-xyz/tm/p2p"
    23  	memproto "github.com/vipernet-xyz/tm/proto/tendermint/mempool"
    24  	"github.com/vipernet-xyz/tm/proxy"
    25  	"github.com/vipernet-xyz/tm/types"
    26  )
    27  
    28  const (
    29  	numTxs  = 1000
    30  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    31  )
    32  
    33  type peerState struct {
    34  	height int64
    35  }
    36  
    37  func (ps peerState) GetHeight() int64 {
    38  	return ps.height
    39  }
    40  
    41  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    42  // be received in the others.
    43  func TestReactorBroadcastTxsMessage(t *testing.T) {
    44  	config := cfg.TestConfig()
    45  	// if there were more than two reactors, the order of transactions could not be
    46  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    47  	// replace Connect2Switches (full mesh) with a func, which connects first
    48  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    49  	const N = 2
    50  	reactors := makeAndConnectReactors(config, N)
    51  	defer func() {
    52  		for _, r := range reactors {
    53  			if err := r.Stop(); err != nil {
    54  				assert.NoError(t, err)
    55  			}
    56  		}
    57  	}()
    58  	for _, r := range reactors {
    59  		for _, peer := range r.Switch.Peers().List() {
    60  			peer.Set(types.PeerStateKey, peerState{1})
    61  		}
    62  	}
    63  
    64  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    65  	transactions := make(types.Txs, len(txs))
    66  	for idx, tx := range txs {
    67  		transactions[idx] = tx.tx
    68  	}
    69  
    70  	waitForTxsOnReactors(t, transactions, reactors)
    71  }
    72  
    73  func TestMempoolVectors(t *testing.T) {
    74  	testCases := []struct {
    75  		testName string
    76  		tx       []byte
    77  		expBytes string
    78  	}{
    79  		{"tx 1", []byte{123}, "0a030a017b"},
    80  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
    81  	}
    82  
    83  	for _, tc := range testCases {
    84  		tc := tc
    85  
    86  		msg := memproto.Message{
    87  			Sum: &memproto.Message_Txs{
    88  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
    89  			},
    90  		}
    91  		bz, err := msg.Marshal()
    92  		require.NoError(t, err, tc.testName)
    93  
    94  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
    95  	}
    96  }
    97  
    98  func TestLegacyReactorReceiveBasic(t *testing.T) {
    99  	config := cfg.TestConfig()
   100  	// if there were more than two reactors, the order of transactions could not be
   101  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
   102  	// replace Connect2Switches (full mesh) with a func, which connects first
   103  	// reactor to others and nothing else, this test should also pass with >2 reactors.
   104  	const N = 1
   105  	reactors := makeAndConnectReactors(config, N)
   106  	var (
   107  		reactor = reactors[0]
   108  		peer    = mock.NewPeer(nil)
   109  	)
   110  	defer func() {
   111  		err := reactor.Stop()
   112  		assert.NoError(t, err)
   113  	}()
   114  
   115  	reactor.InitPeer(peer)
   116  	reactor.AddPeer(peer)
   117  	m := &memproto.Txs{}
   118  	wm := m.Wrap()
   119  	msg, err := proto.Marshal(wm)
   120  	assert.NoError(t, err)
   121  
   122  	assert.NotPanics(t, func() {
   123  		reactor.Receive(mempool.MempoolChannel, peer, msg)
   124  	})
   125  }
   126  
   127  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   128  	reactors := make([]*Reactor, n)
   129  	logger := mempoolLogger()
   130  	for i := 0; i < n; i++ {
   131  		app := kvstore.NewApplication()
   132  		cc := proxy.NewLocalClientCreator(app)
   133  		mempool, cleanup := newMempoolWithApp(cc)
   134  		defer cleanup()
   135  
   136  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   137  		reactors[i].SetLogger(logger.With("validator", i))
   138  	}
   139  
   140  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   141  		s.AddReactor("MEMPOOL", reactors[i])
   142  		return s
   143  
   144  	}, p2p.Connect2Switches)
   145  	return reactors
   146  }
   147  
   148  // mempoolLogger is a TestingLogger which uses a different
   149  // color for each validator ("validator" key must exist).
   150  func mempoolLogger() log.Logger {
   151  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   152  		for i := 0; i < len(keyvals)-1; i += 2 {
   153  			if keyvals[i] == "validator" {
   154  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   155  			}
   156  		}
   157  		return term.FgBgColor{}
   158  	})
   159  }
   160  
   161  func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) {
   162  	conf := cfg.ResetTestRoot("mempool_test")
   163  
   164  	mp, cu := newMempoolWithAppAndConfig(cc, conf)
   165  	return mp, cu
   166  }
   167  
   168  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) {
   169  	appConnMem, _ := cc.NewABCIClient()
   170  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
   171  	err := appConnMem.Start()
   172  	if err != nil {
   173  		panic(err)
   174  	}
   175  
   176  	mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0)
   177  
   178  	return mp, func() { os.RemoveAll(conf.RootDir) }
   179  }
   180  
   181  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   182  	// wait for the txs in all mempools
   183  	wg := new(sync.WaitGroup)
   184  	for i, reactor := range reactors {
   185  		wg.Add(1)
   186  		go func(r *Reactor, reactorIndex int) {
   187  			defer wg.Done()
   188  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   189  		}(reactor, i)
   190  	}
   191  
   192  	done := make(chan struct{})
   193  	go func() {
   194  		wg.Wait()
   195  		close(done)
   196  	}()
   197  
   198  	timer := time.After(timeout)
   199  	select {
   200  	case <-timer:
   201  		t.Fatal("Timed out waiting for txs")
   202  	case <-done:
   203  	}
   204  }
   205  
   206  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   207  	mempool := reactor.mempool
   208  	for mempool.Size() < len(txs) {
   209  		time.Sleep(time.Millisecond * 100)
   210  	}
   211  
   212  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   213  	for i, tx := range txs {
   214  		assert.Equalf(t, tx, reapedTxs[i],
   215  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   216  	}
   217  }