github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/libs/tendermint/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"math/rand"
     5  	"net"
     6  	"os"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/stretchr/testify/require"
    12  
    13  	"github.com/fibonacci-chain/fbc/libs/tendermint/crypto/ed25519"
    14  
    15  	"github.com/fibonacci-chain/fbc/libs/tendermint/abci/example/kvstore"
    16  	cfg "github.com/fibonacci-chain/fbc/libs/tendermint/config"
    17  	"github.com/fibonacci-chain/fbc/libs/tendermint/libs/log"
    18  	"github.com/fibonacci-chain/fbc/libs/tendermint/p2p"
    19  	"github.com/fibonacci-chain/fbc/libs/tendermint/p2p/mock"
    20  	"github.com/fibonacci-chain/fbc/libs/tendermint/proxy"
    21  	"github.com/fibonacci-chain/fbc/libs/tendermint/types"
    22  	"github.com/fortytw2/leaktest"
    23  	"github.com/go-kit/kit/log/term"
    24  	"github.com/pkg/errors"
    25  	"github.com/stretchr/testify/assert"
    26  )
    27  
    28  type peerState struct {
    29  	height int64
    30  }
    31  
    32  func (ps peerState) GetHeight() int64 {
    33  	return ps.height
    34  }
    35  
    36  // mempoolLogger is a TestingLogger which uses a different
    37  // color for each validator ("validator" key must exist).
    38  func mempoolLogger() log.Logger {
    39  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
    40  		for i := 0; i < len(keyvals)-1; i += 2 {
    41  			if keyvals[i] == "validator" {
    42  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
    43  			}
    44  		}
    45  		return term.FgBgColor{}
    46  	})
    47  }
    48  
    49  // connect N mempool reactors through N switches
    50  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
    51  	reactors := make([]*Reactor, n)
    52  	logger := mempoolLogger()
    53  	for i := 0; i < n; i++ {
    54  		app := kvstore.NewApplication()
    55  		cc := proxy.NewLocalClientCreator(app)
    56  		mempool, cleanup := newMempoolWithApp(cc)
    57  		defer cleanup()
    58  
    59  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
    60  		reactors[i].SetLogger(logger.With("validator", i))
    61  	}
    62  
    63  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
    64  		s.AddReactor("MEMPOOL", reactors[i])
    65  		return s
    66  
    67  	}, p2p.Connect2Switches)
    68  	return reactors
    69  }
    70  
    71  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
    72  	// wait for the txs in all mempools
    73  	wg := new(sync.WaitGroup)
    74  	for i, reactor := range reactors {
    75  		wg.Add(1)
    76  		go func(r *Reactor, reactorIndex int) {
    77  			defer wg.Done()
    78  			waitForTxsOnReactor(t, txs, r, reactorIndex)
    79  		}(reactor, i)
    80  	}
    81  
    82  	done := make(chan struct{})
    83  	go func() {
    84  		wg.Wait()
    85  		close(done)
    86  	}()
    87  
    88  	timer := time.After(Timeout)
    89  	select {
    90  	case <-timer:
    91  		t.Fatal("Timed out waiting for txs")
    92  	case <-done:
    93  	}
    94  }
    95  
    96  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
    97  	mempool := reactor.mempool
    98  	for mempool.Size() < len(txs) {
    99  		time.Sleep(time.Millisecond * 50)
   100  	}
   101  
   102  	reapedTxs := mempool.ReapMaxTxs(len(txs))
   103  	for i, tx := range txs {
   104  		assert.Equalf(t, tx, reapedTxs[i],
   105  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   106  	}
   107  }
   108  
   109  // ensure no txs on reactor after some timeout
   110  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   111  	time.Sleep(timeout) // wait for the txs in all mempools
   112  	assert.Zero(t, reactor.mempool.Size())
   113  }
   114  
   115  const (
   116  	NumTxs  = 1000
   117  	Timeout = 120 * time.Second // ridiculously high because CircleCI is slow
   118  )
   119  
   120  // TODO fix random failure case
   121  func testReactorBroadcastTxMessage(t *testing.T) {
   122  	config := cfg.TestConfig()
   123  	const N = 4
   124  	reactors := makeAndConnectReactors(config, N)
   125  	defer func() {
   126  		for _, r := range reactors {
   127  			r.Stop()
   128  		}
   129  	}()
   130  	for _, r := range reactors {
   131  		for _, peer := range r.Switch.Peers().List() {
   132  			peer.Set(types.PeerStateKey, peerState{1})
   133  		}
   134  	}
   135  
   136  	// send a bunch of txs to the first reactor's mempool
   137  	// and wait for them all to be received in the others
   138  	txs := checkTxs(t, reactors[0].mempool, NumTxs, UnknownPeerID)
   139  	waitForTxsOnReactors(t, txs, reactors)
   140  }
   141  
   142  func TestReactorNoBroadcastToSender(t *testing.T) {
   143  	config := cfg.TestConfig()
   144  	const N = 2
   145  	reactors := makeAndConnectReactors(config, N)
   146  	defer func() {
   147  		for _, r := range reactors {
   148  			r.Stop()
   149  		}
   150  	}()
   151  
   152  	// send a bunch of txs to the first reactor's mempool, claiming it came from peer
   153  	// ensure peer gets no txs
   154  	checkTxs(t, reactors[0].mempool, NumTxs, 1)
   155  	ensureNoTxs(t, reactors[1], 100*time.Millisecond)
   156  }
   157  
   158  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   159  	if testing.Short() {
   160  		t.Skip("skipping test in short mode.")
   161  	}
   162  
   163  	config := cfg.TestConfig()
   164  	const N = 2
   165  	reactors := makeAndConnectReactors(config, N)
   166  	defer func() {
   167  		for _, r := range reactors {
   168  			r.Stop()
   169  		}
   170  	}()
   171  
   172  	// stop peer
   173  	sw := reactors[1].Switch
   174  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   175  
   176  	// check that we are not leaking any go-routines
   177  	// i.e. broadcastTxRoutine finishes when peer is stopped
   178  	leaktest.CheckTimeout(t, 10*time.Second)()
   179  }
   180  
   181  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   182  	if testing.Short() {
   183  		t.Skip("skipping test in short mode.")
   184  	}
   185  
   186  	config := cfg.TestConfig()
   187  	const N = 2
   188  	reactors := makeAndConnectReactors(config, N)
   189  
   190  	// stop reactors
   191  	for _, r := range reactors {
   192  		r.Stop()
   193  	}
   194  
   195  	// check that we are not leaking any go-routines
   196  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   197  	leaktest.CheckTimeout(t, 10*time.Second)()
   198  }
   199  
   200  func TestMempoolIDsBasic(t *testing.T) {
   201  	ids := newMempoolIDs()
   202  
   203  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   204  
   205  	ids.ReserveForPeer(peer)
   206  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   207  	ids.Reclaim(peer)
   208  
   209  	ids.ReserveForPeer(peer)
   210  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   211  	ids.Reclaim(peer)
   212  }
   213  
   214  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   215  	if testing.Short() {
   216  		return
   217  	}
   218  
   219  	// 0 is already reserved for UnknownPeerID
   220  	ids := newMempoolIDs()
   221  
   222  	for i := 0; i < maxActiveIDs-1; i++ {
   223  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   224  		ids.ReserveForPeer(peer)
   225  	}
   226  
   227  	assert.Panics(t, func() {
   228  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   229  		ids.ReserveForPeer(peer)
   230  	})
   231  }
   232  
   233  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   234  	config := cfg.TestConfig()
   235  	const N = 1
   236  	reactors := makeAndConnectReactors(config, N)
   237  	defer func() {
   238  		for _, r := range reactors {
   239  			r.Stop()
   240  		}
   241  	}()
   242  	reactor := reactors[0]
   243  
   244  	for i := 0; i < maxActiveIDs+1; i++ {
   245  		peer := mock.NewPeer(nil)
   246  		reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
   247  		reactor.AddPeer(peer)
   248  	}
   249  }
   250  
   251  func TestVerifyWtx(t *testing.T) {
   252  	nodeKey := &p2p.NodeKey{
   253  		PrivKey: ed25519.GenPrivKey(),
   254  	}
   255  	memR := &Reactor{
   256  		nodeKey: nodeKey,
   257  	}
   258  
   259  	wtx, err := memR.wrapTx([]byte("test-tx"), "test-from")
   260  	assert.Nil(t, err)
   261  
   262  	nodeKeyWhitelist := make(map[string]struct{})
   263  	err = wtx.verify(nodeKeyWhitelist)
   264  	assert.NotNil(t, err)
   265  
   266  	nodeKeyWhitelist[string(p2p.PubKeyToID(nodeKey.PubKey()))] = struct{}{}
   267  	err = wtx.verify(nodeKeyWhitelist)
   268  	assert.Nil(t, err)
   269  }
   270  
   271  func TestTxMessageAmino(t *testing.T) {
   272  	testcases := []TxMessage{
   273  		{},
   274  		{[]byte{}, ""},
   275  		{[]byte{1, 2, 3, 4, 5, 6, 7}, "From"},
   276  		{[]byte{}, "f"},
   277  	}
   278  
   279  	var typePrefix = make([]byte, 8)
   280  	tpLen, err := cdc.GetTypePrefix(TxMessage{}, typePrefix)
   281  	require.NoError(t, err)
   282  	typePrefix = typePrefix[:tpLen]
   283  	reactor := Reactor{
   284  		config: &cfg.MempoolConfig{
   285  			MaxTxBytes: 1024 * 1024,
   286  		},
   287  	}
   288  
   289  	for _, tx := range testcases {
   290  		var m Message
   291  		m = tx
   292  		expectBz, err := cdc.MarshalBinaryBare(m)
   293  		require.NoError(t, err)
   294  		actualBz, err := tx.MarshalToAmino(cdc)
   295  		require.NoError(t, err)
   296  
   297  		require.Equal(t, expectBz, append(typePrefix, actualBz...))
   298  		require.Equal(t, len(expectBz), tpLen+tx.AminoSize(cdc))
   299  
   300  		actualBz, err = cdc.MarshalBinaryBareWithRegisteredMarshaller(tx)
   301  		require.NoError(t, err)
   302  
   303  		require.Equal(t, expectBz, actualBz)
   304  		require.Equal(t, cdc.MustMarshalBinaryBare(m), reactor.encodeMsg(&tx))
   305  		require.Equal(t, cdc.MustMarshalBinaryBare(m), reactor.encodeMsg(tx))
   306  
   307  		var expectValue Message
   308  		err = cdc.UnmarshalBinaryBare(expectBz, &expectValue)
   309  		require.NoError(t, err)
   310  		var actualValue Message
   311  		actualValue, err = cdc.UnmarshalBinaryBareWithRegisteredUnmarshaller(expectBz, &actualValue)
   312  		require.Equal(t, expectValue, actualValue)
   313  
   314  		actualValue, err = reactor.decodeMsg(expectBz)
   315  		require.NoError(t, err)
   316  		require.Equal(t, expectValue, actualValue)
   317  		actualValue.(*TxMessage).Tx = nil
   318  		txMessageDeocdePool.Put(actualValue)
   319  	}
   320  
   321  	// special case
   322  	{
   323  		var bz = []byte{1<<3 | 2, 0}
   324  		bz = append(typePrefix, bz...)
   325  		var expectValue Message
   326  		err = cdc.UnmarshalBinaryBare(bz, &expectValue)
   327  		require.NoError(t, err)
   328  		var actualValue Message
   329  		actualValue, err = cdc.UnmarshalBinaryBareWithRegisteredUnmarshaller(bz, &actualValue)
   330  		require.NoError(t, err)
   331  		require.Equal(t, expectValue, actualValue)
   332  
   333  		actualValue, err = reactor.decodeMsg(bz)
   334  		require.NoError(t, err)
   335  		require.Equal(t, expectValue, actualValue)
   336  	}
   337  }
   338  
   339  func BenchmarkTxMessageAminoMarshal(b *testing.B) {
   340  	var bz = make([]byte, 256)
   341  	rand.Read(bz)
   342  	reactor := &Reactor{}
   343  	var msg Message
   344  	b.ResetTimer()
   345  
   346  	b.Run("amino", func(b *testing.B) {
   347  		b.ReportAllocs()
   348  		for i := 0; i < b.N; i++ {
   349  			msg = TxMessage{bz, ""}
   350  			_, err := cdc.MarshalBinaryBare(&msg)
   351  			if err != nil {
   352  				b.Fatal(err)
   353  			}
   354  		}
   355  	})
   356  	b.Run("marshaller", func(b *testing.B) {
   357  		b.ReportAllocs()
   358  		for i := 0; i < b.N; i++ {
   359  			msg = &TxMessage{bz, ""}
   360  			_, err := cdc.MarshalBinaryBareWithRegisteredMarshaller(msg)
   361  			if err != nil {
   362  				b.Fatal(err)
   363  			}
   364  		}
   365  	})
   366  	b.Run("encodeMsgOld", func(b *testing.B) {
   367  		b.ReportAllocs()
   368  		for i := 0; i < b.N; i++ {
   369  			msg = &TxMessage{bz, ""}
   370  			reactor.encodeMsg(msg)
   371  		}
   372  	})
   373  	b.Run("encodeMsg", func(b *testing.B) {
   374  		b.ReportAllocs()
   375  		for i := 0; i < b.N; i++ {
   376  			txm := txMessageDeocdePool.Get().(*TxMessage)
   377  			txm.Tx = bz
   378  			msg = txm
   379  			reactor.encodeMsg(msg)
   380  			txMessageDeocdePool.Put(txm)
   381  		}
   382  	})
   383  }
   384  
   385  func decodeMsgOld(memR *Reactor, bz []byte) (msg Message, err error) {
   386  	maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
   387  	if l := len(bz); l > maxMsgSize {
   388  		return msg, ErrTxTooLarge{maxMsgSize, l}
   389  	}
   390  	err = cdc.UnmarshalBinaryBare(bz, &msg)
   391  	return
   392  }
   393  
   394  func BenchmarkTxMessageUnmarshal(b *testing.B) {
   395  	txMsg := TxMessage{
   396  		Tx: make([]byte, 512),
   397  	}
   398  	rand.Read(txMsg.Tx)
   399  	bz := cdc.MustMarshalBinaryBare(&txMsg)
   400  
   401  	//msg := conn.PacketMsg{
   402  	//	ChannelID: MempoolChannel,
   403  	//	Bytes:     bz,
   404  	//}
   405  	// msgBz := cdc.MustMarshalBinaryBare(&msg)
   406  
   407  	//hashMap := make(map[string]struct{})
   408  	var h []byte
   409  
   410  	reactor := &Reactor{
   411  		config: &cfg.MempoolConfig{
   412  			MaxTxBytes: 1024 * 1024,
   413  		},
   414  	}
   415  
   416  	var msg Message
   417  	var err error
   418  
   419  	b.ResetTimer()
   420  
   421  	b.Run("decode", func(b *testing.B) {
   422  		b.ReportAllocs()
   423  		for i := 0; i < b.N; i++ {
   424  			//var m conn.PacketMsg
   425  			//err := m.UnmarshalFromAmino(nil, msgBz)
   426  			//if err != nil {
   427  			//	b.Fatal(err)
   428  			//}
   429  			msg, err = reactor.decodeMsg(bz)
   430  			if err != nil {
   431  				b.Fatal(err)
   432  			}
   433  			msg.(*TxMessage).Tx = nil
   434  			txMessageDeocdePool.Put(msg)
   435  		}
   436  	})
   437  	b.Run("decode-old", func(b *testing.B) {
   438  		b.ReportAllocs()
   439  		for i := 0; i < b.N; i++ {
   440  			//var m conn.PacketMsg
   441  			//err := m.UnmarshalFromAmino(nil, msgBz)
   442  			//if err != nil {
   443  			//	b.Fatal(err)
   444  			//}
   445  			msg, err = decodeMsgOld(reactor, bz)
   446  			if err != nil {
   447  				b.Fatal(err)
   448  			}
   449  		}
   450  	})
   451  	b.Run("amino", func(b *testing.B) {
   452  		b.ReportAllocs()
   453  		for i := 0; i < b.N; i++ {
   454  			var m TxMessage
   455  			err := m.UnmarshalFromAmino(cdc, bz[4:])
   456  			msg = &m
   457  			if err != nil {
   458  				b.Fatal(err)
   459  			}
   460  		}
   461  	})
   462  	//b.Run("hash", func(b *testing.B) {
   463  	//	b.ReportAllocs()
   464  	//	for i := 0; i < b.N; i++ {
   465  	//		var m conn.PacketMsg
   466  	//		err := m.UnmarshalFromAmino(nil, msgBz)
   467  	//		if err != nil {
   468  	//			b.Fatal(err)
   469  	//		}
   470  	//		_ = crypto.Sha256(bz)
   471  	//	}
   472  	//})
   473  	_ = h
   474  	_ = msg
   475  }
   476  
   477  func BenchmarkReactorLogReceive(b *testing.B) {
   478  	logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "benchmark")
   479  	var options []log.Option
   480  	options = append(options, log.AllowInfoWith("module", "benchmark"))
   481  	logger = log.NewFilter(logger, options...)
   482  
   483  	memR := &Reactor{}
   484  	memR.Logger = logger
   485  
   486  	chID := byte(10)
   487  	var msg Message = &TxMessage{Tx: make([]byte, 512)}
   488  	var src p2p.Peer
   489  
   490  	b.Run("pool", func(b *testing.B) {
   491  		b.ReportAllocs()
   492  		for i := 0; i < b.N; i++ {
   493  			memR.logReceive(src, chID, msg)
   494  		}
   495  	})
   496  
   497  	b.Run("logger", func(b *testing.B) {
   498  		b.ReportAllocs()
   499  		for i := 0; i < b.N; i++ {
   500  			memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   501  		}
   502  	})
   503  }
   504  
   505  func BenchmarkReactorLogCheckTxError(b *testing.B) {
   506  	logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "benchmark")
   507  	var options []log.Option
   508  	options = append(options, log.AllowErrorWith("module", "benchmark"))
   509  	logger = log.NewFilter(logger, options...)
   510  
   511  	memR := &Reactor{}
   512  	memR.Logger = logger
   513  	memR.mempool = &CListMempool{height: 123456}
   514  
   515  	var msg Message = &TxMessage{Tx: make([]byte, 512)}
   516  	tx := msg.(*TxMessage).Tx
   517  	err := errors.New("error")
   518  
   519  	b.Run("pool", func(b *testing.B) {
   520  		b.ReportAllocs()
   521  		for i := 0; i < b.N; i++ {
   522  			memR.logCheckTxError(tx, memR.mempool.height, err)
   523  		}
   524  	})
   525  
   526  	b.Run("logger", func(b *testing.B) {
   527  		b.ReportAllocs()
   528  		for i := 0; i < b.N; i++ {
   529  			memR.Logger.Info("Could not check tx", "tx", txIDStringer{tx, memR.mempool.height}, "err", err)
   530  		}
   531  	})
   532  }