github.com/Finschia/ostracon@v1.1.5/mempool/v1/reactor_test.go (about)

     1  //go:build deprecated
     2  
     3  package v1
     4  
     5  import (
     6  	"encoding/hex"
     7  	"os"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/go-kit/log/term"
    13  	"github.com/gogo/protobuf/proto"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	"github.com/tendermint/tendermint/abci/example/kvstore"
    18  	"github.com/tendermint/tendermint/p2p/mock"
    19  
    20  	cfg "github.com/tendermint/tendermint/config"
    21  
    22  	"github.com/tendermint/tendermint/libs/log"
    23  	"github.com/tendermint/tendermint/mempool"
    24  	"github.com/tendermint/tendermint/p2p"
    25  	memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
    26  	"github.com/tendermint/tendermint/proxy"
    27  	"github.com/tendermint/tendermint/types"
    28  )
    29  
    30  const (
    31  	numTxs  = 1000
    32  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
    33  )
    34  
    35  type peerState struct {
    36  	height int64
    37  }
    38  
    39  func (ps peerState) GetHeight() int64 {
    40  	return ps.height
    41  }
    42  
    43  // Send a bunch of txs to the first reactor's mempool and wait for them all to
    44  // be received in the others.
    45  func TestReactorBroadcastTxsMessage(t *testing.T) {
    46  	config := cfg.TestConfig()
    47  	// if there were more than two reactors, the order of transactions could not be
    48  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
    49  	// replace Connect2Switches (full mesh) with a func, which connects first
    50  	// reactor to others and nothing else, this test should also pass with >2 reactors.
    51  	const N = 2
    52  	reactors := makeAndConnectReactors(config, N)
    53  	defer func() {
    54  		for _, r := range reactors {
    55  			if err := r.Stop(); err != nil {
    56  				assert.NoError(t, err)
    57  			}
    58  		}
    59  	}()
    60  	for _, r := range reactors {
    61  		for _, peer := range r.Switch.Peers().List() {
    62  			peer.Set(types.PeerStateKey, peerState{1})
    63  		}
    64  	}
    65  
    66  	txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
    67  	transactions := make(types.Txs, len(txs))
    68  	for idx, tx := range txs {
    69  		transactions[idx] = tx.tx
    70  	}
    71  
    72  	waitForTxsOnReactors(t, transactions, reactors)
    73  }
    74  
    75  func TestMempoolVectors(t *testing.T) {
    76  	testCases := []struct {
    77  		testName string
    78  		tx       []byte
    79  		expBytes string
    80  	}{
    81  		{"tx 1", []byte{123}, "0a030a017b"},
    82  		{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
    83  	}
    84  
    85  	for _, tc := range testCases {
    86  		tc := tc
    87  
    88  		msg := memproto.Message{
    89  			Sum: &memproto.Message_Txs{
    90  				Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
    91  			},
    92  		}
    93  		bz, err := msg.Marshal()
    94  		require.NoError(t, err, tc.testName)
    95  
    96  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
    97  	}
    98  }
    99  
   100  func TestLegacyReactorReceiveBasic(t *testing.T) {
   101  	config := cfg.TestConfig()
   102  	// if there were more than two reactors, the order of transactions could not be
   103  	// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
   104  	// replace Connect2Switches (full mesh) with a func, which connects first
   105  	// reactor to others and nothing else, this test should also pass with >2 reactors.
   106  	const N = 1
   107  	reactors := makeAndConnectReactors(config, N)
   108  	var (
   109  		reactor = reactors[0]
   110  		peer    = mock.NewPeer(nil)
   111  	)
   112  	defer func() {
   113  		err := reactor.Stop()
   114  		assert.NoError(t, err)
   115  	}()
   116  
   117  	reactor.InitPeer(peer)
   118  	reactor.AddPeer(peer)
   119  	m := &memproto.Txs{}
   120  	wm := m.Wrap()
   121  	msg, err := proto.Marshal(wm)
   122  	assert.NoError(t, err)
   123  
   124  	assert.NotPanics(t, func() {
   125  		reactor.Receive(mempool.MempoolChannel, peer, msg)
   126  	})
   127  }
   128  
   129  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
   130  	reactors := make([]*Reactor, n)
   131  	logger := mempoolLogger()
   132  	for i := 0; i < n; i++ {
   133  		app := kvstore.NewApplication()
   134  		cc := proxy.NewLocalClientCreator(app)
   135  		mempool, cleanup := newMempoolWithApp(cc)
   136  		defer cleanup()
   137  
   138  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
   139  		reactors[i].SetLogger(logger.With("validator", i))
   140  	}
   141  
   142  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
   143  		s.AddReactor("MEMPOOL", reactors[i])
   144  		return s
   145  
   146  	}, p2p.Connect2Switches)
   147  	return reactors
   148  }
   149  
   150  // mempoolLogger is a TestingLogger which uses a different
   151  // color for each validator ("validator" key must exist).
   152  func mempoolLogger() log.Logger {
   153  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   154  		for i := 0; i < len(keyvals)-1; i += 2 {
   155  			if keyvals[i] == "validator" {
   156  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   157  			}
   158  		}
   159  		return term.FgBgColor{}
   160  	})
   161  }
   162  
   163  func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) {
   164  	conf := cfg.ResetTestRoot("mempool_test")
   165  
   166  	mp, cu := newMempoolWithAppAndConfig(cc, conf)
   167  	return mp, cu
   168  }
   169  
   170  func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) {
   171  	appConnMem, _ := cc.NewABCIClient()
   172  	appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
   173  	err := appConnMem.Start()
   174  	if err != nil {
   175  		panic(err)
   176  	}
   177  
   178  	mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0)
   179  
   180  	return mp, func() { os.RemoveAll(conf.RootDir) }
   181  }
   182  
   183  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
   184  	// wait for the txs in all mempools
   185  	wg := new(sync.WaitGroup)
   186  	for i, reactor := range reactors {
   187  		wg.Add(1)
   188  		go func(r *Reactor, reactorIndex int) {
   189  			defer wg.Done()
   190  			waitForTxsOnReactor(t, txs, r, reactorIndex)
   191  		}(reactor, i)
   192  	}
   193  
   194  	done := make(chan struct{})
   195  	go func() {
   196  		wg.Wait()
   197  		close(done)
   198  	}()
   199  
   200  	timer := time.After(timeout)
   201  	select {
   202  	case <-timer:
   203  		t.Fatal("Timed out waiting for txs")
   204  	case <-done:
   205  	}
   206  }
   207  
   208  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
   209  	mempool := reactor.mempool
   210  	for mempool.Size() < len(txs) {
   211  		time.Sleep(time.Millisecond * 100)
   212  	}
   213  
   214  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   215  	for i, tx := range txs {
   216  		assert.Equalf(t, tx, reapedTxs[i],
   217  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   218  	}
   219  }