github.com/okex/exchain@v1.8.0/libs/tendermint/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"math/rand"
     5  	"net"
     6  	"os"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/stretchr/testify/require"
    12  
    13  	"github.com/okex/exchain/libs/tendermint/crypto/ed25519"
    14  
    15  	"github.com/fortytw2/leaktest"
    16  	"github.com/go-kit/kit/log/term"
    17  	"github.com/okex/exchain/libs/tendermint/abci/example/kvstore"
    18  	cfg "github.com/okex/exchain/libs/tendermint/config"
    19  	"github.com/okex/exchain/libs/tendermint/libs/log"
    20  	rrand "github.com/okex/exchain/libs/tendermint/libs/rand"
    21  	"github.com/okex/exchain/libs/tendermint/p2p"
    22  	"github.com/okex/exchain/libs/tendermint/p2p/mock"
    23  	"github.com/okex/exchain/libs/tendermint/proxy"
    24  	"github.com/okex/exchain/libs/tendermint/types"
    25  	"github.com/pkg/errors"
    26  	"github.com/stretchr/testify/assert"
    27  )
    28  
    29  type peerState struct {
    30  	height int64
    31  }
    32  
    33  func (ps peerState) GetHeight() int64 {
    34  	return ps.height
    35  }
    36  
    37  // mempoolLogger is a TestingLogger which uses a different
    38  // color for each validator ("validator" key must exist).
    39  func mempoolLogger() log.Logger {
    40  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
    41  		for i := 0; i < len(keyvals)-1; i += 2 {
    42  			if keyvals[i] == "validator" {
    43  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
    44  			}
    45  		}
    46  		return term.FgBgColor{}
    47  	})
    48  }
    49  
    50  // connect N mempool reactors through N switches
    51  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
    52  	reactors := make([]*Reactor, n)
    53  	logger := mempoolLogger()
    54  	for i := 0; i < n; i++ {
    55  		app := kvstore.NewApplication()
    56  		cc := proxy.NewLocalClientCreator(app)
    57  		mempool, cleanup := newMempoolWithApp(cc)
    58  		defer cleanup()
    59  
    60  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
    61  		reactors[i].SetLogger(logger.With("validator", i))
    62  	}
    63  
    64  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
    65  		s.AddReactor("MEMPOOL", reactors[i])
    66  		return s
    67  
    68  	}, p2p.Connect2Switches)
    69  	return reactors
    70  }
    71  
    72  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
    73  	// wait for the txs in all mempools
    74  	wg := new(sync.WaitGroup)
    75  	for i, reactor := range reactors {
    76  		wg.Add(1)
    77  		go func(r *Reactor, reactorIndex int) {
    78  			defer wg.Done()
    79  			waitForTxsOnReactor(t, txs, r, reactorIndex)
    80  		}(reactor, i)
    81  	}
    82  
    83  	done := make(chan struct{})
    84  	go func() {
    85  		wg.Wait()
    86  		close(done)
    87  	}()
    88  
    89  	timer := time.After(Timeout)
    90  	select {
    91  	case <-timer:
    92  		t.Fatal("Timed out waiting for txs")
    93  	case <-done:
    94  	}
    95  }
    96  
    97  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
    98  	mempool := reactor.mempool
    99  	for mempool.Size() < len(txs) {
   100  		time.Sleep(time.Millisecond * 50)
   101  	}
   102  
   103  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   104  	for i, tx := range txs {
   105  		assert.Equalf(t, tx, reapedTxs[i],
   106  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   107  	}
   108  }
   109  
   110  // ensure no txs on reactor after some timeout
   111  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   112  	time.Sleep(timeout) // wait for the txs in all mempools
   113  	assert.Zero(t, reactor.mempool.Size())
   114  }
   115  
   116  const (
   117  	NumTxs  = 1000
   118  	Timeout = 120 * time.Second // ridiculously high because CircleCI is slow
   119  )
   120  
   121  // TODO fix random failure case
   122  func testReactorBroadcastTxMessage(t *testing.T) {
   123  	config := cfg.TestConfig()
   124  	const N = 4
   125  	reactors := makeAndConnectReactors(config, N)
   126  	defer func() {
   127  		for _, r := range reactors {
   128  			r.Stop()
   129  		}
   130  	}()
   131  	for _, r := range reactors {
   132  		for _, peer := range r.Switch.Peers().List() {
   133  			peer.Set(types.PeerStateKey, peerState{1})
   134  		}
   135  	}
   136  
   137  	// send a bunch of txs to the first reactor's mempool
   138  	// and wait for them all to be received in the others
   139  	txs := checkTxs(t, reactors[0].mempool, NumTxs, UnknownPeerID)
   140  	waitForTxsOnReactors(t, txs, reactors)
   141  }
   142  
   143  func TestReactorNoBroadcastToSender(t *testing.T) {
   144  	config := cfg.TestConfig()
   145  	const N = 2
   146  	reactors := makeAndConnectReactors(config, N)
   147  	defer func() {
   148  		for _, r := range reactors {
   149  			r.Stop()
   150  		}
   151  	}()
   152  
   153  	// send a bunch of txs to the first reactor's mempool, claiming it came from peer
   154  	// ensure peer gets no txs
   155  	checkTxs(t, reactors[0].mempool, NumTxs, 1)
   156  	ensureNoTxs(t, reactors[1], 100*time.Millisecond)
   157  }
   158  
   159  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   160  	if testing.Short() {
   161  		t.Skip("skipping test in short mode.")
   162  	}
   163  
   164  	config := cfg.TestConfig()
   165  	const N = 2
   166  	reactors := makeAndConnectReactors(config, N)
   167  	defer func() {
   168  		for _, r := range reactors {
   169  			r.Stop()
   170  		}
   171  	}()
   172  
   173  	// stop peer
   174  	sw := reactors[1].Switch
   175  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   176  
   177  	// check that we are not leaking any go-routines
   178  	// i.e. broadcastTxRoutine finishes when peer is stopped
   179  	leaktest.CheckTimeout(t, 10*time.Second)()
   180  }
   181  
   182  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   183  	if testing.Short() {
   184  		t.Skip("skipping test in short mode.")
   185  	}
   186  
   187  	config := cfg.TestConfig()
   188  	const N = 2
   189  	reactors := makeAndConnectReactors(config, N)
   190  
   191  	// stop reactors
   192  	for _, r := range reactors {
   193  		r.Stop()
   194  	}
   195  
   196  	// check that we are not leaking any go-routines
   197  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   198  	leaktest.CheckTimeout(t, 10*time.Second)()
   199  }
   200  
   201  func TestMempoolIDsBasic(t *testing.T) {
   202  	ids := newMempoolIDs()
   203  
   204  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   205  
   206  	ids.ReserveForPeer(peer)
   207  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   208  	ids.Reclaim(peer)
   209  
   210  	ids.ReserveForPeer(peer)
   211  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   212  	ids.Reclaim(peer)
   213  }
   214  
   215  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   216  	if testing.Short() {
   217  		return
   218  	}
   219  
   220  	// 0 is already reserved for UnknownPeerID
   221  	ids := newMempoolIDs()
   222  
   223  	for i := 0; i < maxActiveIDs-1; i++ {
   224  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   225  		ids.ReserveForPeer(peer)
   226  	}
   227  
   228  	assert.Panics(t, func() {
   229  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   230  		ids.ReserveForPeer(peer)
   231  	})
   232  }
   233  
   234  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   235  	config := cfg.TestConfig()
   236  	const N = 1
   237  	reactors := makeAndConnectReactors(config, N)
   238  	defer func() {
   239  		for _, r := range reactors {
   240  			r.Stop()
   241  		}
   242  	}()
   243  	reactor := reactors[0]
   244  
   245  	for i := 0; i < maxActiveIDs+1; i++ {
   246  		peer := mock.NewPeer(nil)
   247  		reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
   248  		reactor.AddPeer(peer)
   249  	}
   250  }
   251  
   252  func TestVerifyWtx(t *testing.T) {
   253  	nodeKey := &p2p.NodeKey{
   254  		PrivKey: ed25519.GenPrivKey(),
   255  	}
   256  	memR := &Reactor{
   257  		nodeKey: nodeKey,
   258  	}
   259  
   260  	wtx, err := memR.wrapTx([]byte("test-tx"), "test-from")
   261  	assert.Nil(t, err)
   262  
   263  	nodeKeyWhitelist := make(map[string]struct{})
   264  	err = wtx.verify(nodeKeyWhitelist)
   265  	assert.NotNil(t, err)
   266  
   267  	nodeKeyWhitelist[string(p2p.PubKeyToID(nodeKey.PubKey()))] = struct{}{}
   268  	err = wtx.verify(nodeKeyWhitelist)
   269  	assert.Nil(t, err)
   270  }
   271  
   272  func TestTxMessageAmino(t *testing.T) {
   273  	testcases := []TxMessage{
   274  		{},
   275  		{[]byte{}, ""},
   276  		{[]byte{1, 2, 3, 4, 5, 6, 7}, "From"},
   277  		{[]byte{}, "f"},
   278  	}
   279  
   280  	var typePrefix = make([]byte, 8)
   281  	tpLen, err := cdc.GetTypePrefix(TxMessage{}, typePrefix)
   282  	require.NoError(t, err)
   283  	typePrefix = typePrefix[:tpLen]
   284  	reactor := Reactor{
   285  		config: &cfg.MempoolConfig{
   286  			MaxTxBytes: 1024 * 1024,
   287  		},
   288  	}
   289  
   290  	for _, tx := range testcases {
   291  		var m Message
   292  		m = tx
   293  		expectBz, err := cdc.MarshalBinaryBare(m)
   294  		require.NoError(t, err)
   295  		actualBz, err := tx.MarshalToAmino(cdc)
   296  		require.NoError(t, err)
   297  
   298  		require.Equal(t, expectBz, append(typePrefix, actualBz...))
   299  		require.Equal(t, len(expectBz), tpLen+tx.AminoSize(cdc))
   300  
   301  		actualBz, err = cdc.MarshalBinaryBareWithRegisteredMarshaller(tx)
   302  		require.NoError(t, err)
   303  
   304  		require.Equal(t, expectBz, actualBz)
   305  		require.Equal(t, cdc.MustMarshalBinaryBare(m), reactor.encodeMsg(&tx))
   306  		require.Equal(t, cdc.MustMarshalBinaryBare(m), reactor.encodeMsg(tx))
   307  
   308  		var expectValue Message
   309  		err = cdc.UnmarshalBinaryBare(expectBz, &expectValue)
   310  		require.NoError(t, err)
   311  		var actualValue Message
   312  		actualValue, err = cdc.UnmarshalBinaryBareWithRegisteredUnmarshaller(expectBz, &actualValue)
   313  		require.Equal(t, expectValue, actualValue)
   314  
   315  		actualValue, err = reactor.decodeMsg(expectBz)
   316  		require.NoError(t, err)
   317  		require.Equal(t, expectValue, actualValue)
   318  		actualValue.(*TxMessage).Tx = nil
   319  		txMessageDeocdePool.Put(actualValue)
   320  	}
   321  
   322  	// special case
   323  	{
   324  		var bz = []byte{1<<3 | 2, 0}
   325  		bz = append(typePrefix, bz...)
   326  		var expectValue Message
   327  		err = cdc.UnmarshalBinaryBare(bz, &expectValue)
   328  		require.NoError(t, err)
   329  		var actualValue Message
   330  		actualValue, err = cdc.UnmarshalBinaryBareWithRegisteredUnmarshaller(bz, &actualValue)
   331  		require.NoError(t, err)
   332  		require.Equal(t, expectValue, actualValue)
   333  
   334  		actualValue, err = reactor.decodeMsg(bz)
   335  		require.NoError(t, err)
   336  		require.Equal(t, expectValue, actualValue)
   337  	}
   338  }
   339  
   340  func BenchmarkTxMessageAminoMarshal(b *testing.B) {
   341  	var bz = make([]byte, 256)
   342  	rand.Read(bz)
   343  	reactor := &Reactor{}
   344  	var msg Message
   345  	b.ResetTimer()
   346  
   347  	b.Run("amino", func(b *testing.B) {
   348  		b.ReportAllocs()
   349  		for i := 0; i < b.N; i++ {
   350  			msg = TxMessage{bz, ""}
   351  			_, err := cdc.MarshalBinaryBare(&msg)
   352  			if err != nil {
   353  				b.Fatal(err)
   354  			}
   355  		}
   356  	})
   357  	b.Run("marshaller", func(b *testing.B) {
   358  		b.ReportAllocs()
   359  		for i := 0; i < b.N; i++ {
   360  			msg = &TxMessage{bz, ""}
   361  			_, err := cdc.MarshalBinaryBareWithRegisteredMarshaller(msg)
   362  			if err != nil {
   363  				b.Fatal(err)
   364  			}
   365  		}
   366  	})
   367  	b.Run("encodeMsgOld", func(b *testing.B) {
   368  		b.ReportAllocs()
   369  		for i := 0; i < b.N; i++ {
   370  			msg = &TxMessage{bz, ""}
   371  			reactor.encodeMsg(msg)
   372  		}
   373  	})
   374  	b.Run("encodeMsg", func(b *testing.B) {
   375  		b.ReportAllocs()
   376  		for i := 0; i < b.N; i++ {
   377  			txm := txMessageDeocdePool.Get().(*TxMessage)
   378  			txm.Tx = bz
   379  			msg = txm
   380  			reactor.encodeMsg(msg)
   381  			txMessageDeocdePool.Put(txm)
   382  		}
   383  	})
   384  }
   385  
   386  func decodeMsgOld(memR *Reactor, bz []byte) (msg Message, err error) {
   387  	maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
   388  	if l := len(bz); l > maxMsgSize {
   389  		return msg, ErrTxTooLarge{maxMsgSize, l}
   390  	}
   391  	err = cdc.UnmarshalBinaryBare(bz, &msg)
   392  	return
   393  }
   394  
   395  func BenchmarkTxMessageUnmarshal(b *testing.B) {
   396  	txMsg := TxMessage{
   397  		Tx: make([]byte, 512),
   398  	}
   399  	rand.Read(txMsg.Tx)
   400  	bz := cdc.MustMarshalBinaryBare(&txMsg)
   401  
   402  	//msg := conn.PacketMsg{
   403  	//	ChannelID: MempoolChannel,
   404  	//	Bytes:     bz,
   405  	//}
   406  	// msgBz := cdc.MustMarshalBinaryBare(&msg)
   407  
   408  	//hashMap := make(map[string]struct{})
   409  	var h []byte
   410  
   411  	reactor := &Reactor{
   412  		config: &cfg.MempoolConfig{
   413  			MaxTxBytes: 1024 * 1024,
   414  		},
   415  	}
   416  
   417  	var msg Message
   418  	var err error
   419  
   420  	b.ResetTimer()
   421  
   422  	b.Run("decode", func(b *testing.B) {
   423  		b.ReportAllocs()
   424  		for i := 0; i < b.N; i++ {
   425  			//var m conn.PacketMsg
   426  			//err := m.UnmarshalFromAmino(nil, msgBz)
   427  			//if err != nil {
   428  			//	b.Fatal(err)
   429  			//}
   430  			msg, err = reactor.decodeMsg(bz)
   431  			if err != nil {
   432  				b.Fatal(err)
   433  			}
   434  			msg.(*TxMessage).Tx = nil
   435  			txMessageDeocdePool.Put(msg)
   436  		}
   437  	})
   438  	b.Run("decode-old", func(b *testing.B) {
   439  		b.ReportAllocs()
   440  		for i := 0; i < b.N; i++ {
   441  			//var m conn.PacketMsg
   442  			//err := m.UnmarshalFromAmino(nil, msgBz)
   443  			//if err != nil {
   444  			//	b.Fatal(err)
   445  			//}
   446  			msg, err = decodeMsgOld(reactor, bz)
   447  			if err != nil {
   448  				b.Fatal(err)
   449  			}
   450  		}
   451  	})
   452  	b.Run("amino", func(b *testing.B) {
   453  		b.ReportAllocs()
   454  		for i := 0; i < b.N; i++ {
   455  			var m TxMessage
   456  			err := m.UnmarshalFromAmino(cdc, bz[4:])
   457  			msg = &m
   458  			if err != nil {
   459  				b.Fatal(err)
   460  			}
   461  		}
   462  	})
   463  	//b.Run("hash", func(b *testing.B) {
   464  	//	b.ReportAllocs()
   465  	//	for i := 0; i < b.N; i++ {
   466  	//		var m conn.PacketMsg
   467  	//		err := m.UnmarshalFromAmino(nil, msgBz)
   468  	//		if err != nil {
   469  	//			b.Fatal(err)
   470  	//		}
   471  	//		_ = crypto.Sha256(bz)
   472  	//	}
   473  	//})
   474  	_ = h
   475  	_ = msg
   476  }
   477  
   478  func BenchmarkReactorLogReceive(b *testing.B) {
   479  	logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "benchmark")
   480  	var options []log.Option
   481  	options = append(options, log.AllowInfoWith("module", "benchmark"))
   482  	logger = log.NewFilter(logger, options...)
   483  
   484  	memR := &Reactor{}
   485  	memR.Logger = logger
   486  
   487  	chID := byte(10)
   488  	var msg Message = &TxMessage{Tx: make([]byte, 512)}
   489  	var src p2p.Peer
   490  
   491  	b.Run("pool", func(b *testing.B) {
   492  		b.ReportAllocs()
   493  		for i := 0; i < b.N; i++ {
   494  			memR.logReceive(src, chID, msg)
   495  		}
   496  	})
   497  
   498  	b.Run("logger", func(b *testing.B) {
   499  		b.ReportAllocs()
   500  		for i := 0; i < b.N; i++ {
   501  			memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   502  		}
   503  	})
   504  }
   505  
   506  func BenchmarkReactorLogCheckTxError(b *testing.B) {
   507  	logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "benchmark")
   508  	var options []log.Option
   509  	options = append(options, log.AllowErrorWith("module", "benchmark"))
   510  	logger = log.NewFilter(logger, options...)
   511  
   512  	memR := &Reactor{}
   513  	memR.Logger = logger
   514  	memR.mempool = &CListMempool{height: 123456}
   515  
   516  	var msg Message = &TxMessage{Tx: make([]byte, 512)}
   517  	tx := msg.(*TxMessage).Tx
   518  	err := errors.New("error")
   519  
   520  	b.Run("pool", func(b *testing.B) {
   521  		b.ReportAllocs()
   522  		for i := 0; i < b.N; i++ {
   523  			memR.logCheckTxError(tx, memR.mempool.height, err)
   524  		}
   525  	})
   526  
   527  	b.Run("logger", func(b *testing.B) {
   528  		b.ReportAllocs()
   529  		for i := 0; i < b.N; i++ {
   530  			memR.Logger.Info("Could not check tx", "tx", txIDStringer{tx, memR.mempool.height}, "err", err)
   531  		}
   532  	})
   533  }
   534  
   535  func BenchmarkGetRealTxFromWrapCMTx(b *testing.B) {
   536  	N := b.N
   537  	var data = make([][]byte, N)
   538  	for i := 0; i < N; i++ {
   539  		wtx := &types.WrapCMTx{Tx: rrand.Bytes(256), Nonce: uint64(i)}
   540  		d, err := cdc.MarshalJSON(wtx)
   541  		assert.NoError(b, err)
   542  		data[i] = d
   543  	}
   544  	b.ResetTimer()
   545  	var re = make([]int, N)
   546  	for i := 0; i < N; i++ {
   547  		res := GetRealTxFromWrapCMTx(data[i])
   548  		re[i] = len(res)
   549  	}
   550  }