github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/bft/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"net"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/fortytw2/leaktest"
    10  	"github.com/stretchr/testify/assert"
    11  
    12  	"github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore"
    13  	memcfg "github.com/gnolang/gno/tm2/pkg/bft/mempool/config"
    14  	"github.com/gnolang/gno/tm2/pkg/bft/proxy"
    15  	"github.com/gnolang/gno/tm2/pkg/bft/types"
    16  	"github.com/gnolang/gno/tm2/pkg/errors"
    17  	"github.com/gnolang/gno/tm2/pkg/log"
    18  	"github.com/gnolang/gno/tm2/pkg/p2p"
    19  	p2pcfg "github.com/gnolang/gno/tm2/pkg/p2p/config"
    20  	"github.com/gnolang/gno/tm2/pkg/p2p/mock"
    21  	"github.com/gnolang/gno/tm2/pkg/testutils"
    22  )
    23  
    24  type peerState struct {
    25  	height int64
    26  }
    27  
    28  func (ps peerState) GetHeight() int64 {
    29  	return ps.height
    30  }
    31  
    32  // connect N mempool reactors through N switches
    33  func makeAndConnectReactors(mconfig *memcfg.MempoolConfig, pconfig *p2pcfg.P2PConfig, n int) []*Reactor {
    34  	reactors := make([]*Reactor, n)
    35  	logger := log.NewNoopLogger()
    36  	for i := 0; i < n; i++ {
    37  		app := kvstore.NewKVStoreApplication()
    38  		cc := proxy.NewLocalClientCreator(app)
    39  		mempool, cleanup := newMempoolWithApp(cc)
    40  		defer cleanup()
    41  
    42  		reactors[i] = NewReactor(mconfig, mempool) // so we dont start the consensus states
    43  		reactors[i].SetLogger(logger.With("validator", i))
    44  	}
    45  
    46  	p2p.MakeConnectedSwitches(pconfig, n, func(i int, s *p2p.Switch) *p2p.Switch {
    47  		s.AddReactor("MEMPOOL", reactors[i])
    48  		return s
    49  	}, p2p.Connect2Switches)
    50  	return reactors
    51  }
    52  
    53  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
    54  	t.Helper()
    55  
    56  	// wait for the txs in all mempools
    57  	wg := new(sync.WaitGroup)
    58  	for i, reactor := range reactors {
    59  		wg.Add(1)
    60  		go func(r *Reactor, reactorIndex int) {
    61  			defer wg.Done()
    62  			waitForTxsOnReactor(t, txs, r, reactorIndex)
    63  		}(reactor, i)
    64  	}
    65  
    66  	done := make(chan struct{})
    67  	go func() {
    68  		wg.Wait()
    69  		close(done)
    70  	}()
    71  
    72  	timer := time.After(timeout)
    73  	select {
    74  	case <-timer:
    75  		t.Fatal("Timed out waiting for txs")
    76  	case <-done:
    77  	}
    78  }
    79  
    80  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
    81  	t.Helper()
    82  
    83  	mempool := reactor.mempool
    84  	for mempool.Size() < len(txs) {
    85  		time.Sleep(time.Millisecond * 100)
    86  	}
    87  
    88  	reapedTxs := mempool.ReapMaxTxs(len(txs))
    89  	for i, tx := range txs {
    90  		assert.Equalf(t, tx, reapedTxs[i],
    91  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
    92  	}
    93  }
    94  
    95  // ensure no txs on reactor after some timeout
    96  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
    97  	t.Helper()
    98  
    99  	time.Sleep(timeout) // wait for the txs in all mempools
   100  	assert.Zero(t, reactor.mempool.Size())
   101  }
   102  
   103  const (
   104  	numTxs  = 1000
   105  	timeout = 120 * time.Second // ridiculously high because CircleCI is slow
   106  )
   107  
   108  func TestReactorBroadcastTxMessage(t *testing.T) {
   109  	t.Parallel()
   110  
   111  	mconfig := memcfg.TestMempoolConfig()
   112  	pconfig := p2pcfg.TestP2PConfig()
   113  	const N = 4
   114  	reactors := makeAndConnectReactors(mconfig, pconfig, N)
   115  	defer func() {
   116  		for _, r := range reactors {
   117  			r.Stop()
   118  		}
   119  	}()
   120  	for _, r := range reactors {
   121  		for _, peer := range r.Switch.Peers().List() {
   122  			peer.Set(types.PeerStateKey, peerState{1})
   123  		}
   124  	}
   125  
   126  	// send a bunch of txs to the first reactor's mempool
   127  	// and wait for them all to be received in the others
   128  	txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID, true)
   129  	waitForTxsOnReactors(t, txs, reactors)
   130  }
   131  
   132  func TestReactorNoBroadcastToSender(t *testing.T) {
   133  	t.Parallel()
   134  
   135  	mconfig := memcfg.TestMempoolConfig()
   136  	pconfig := p2pcfg.TestP2PConfig()
   137  	const N = 2
   138  	reactors := makeAndConnectReactors(mconfig, pconfig, N)
   139  	defer func() {
   140  		for _, r := range reactors {
   141  			r.Stop()
   142  		}
   143  	}()
   144  
   145  	// send a bunch of txs to the first reactor's mempool, claiming it came from peer
   146  	// ensure peer gets no txs
   147  	checkTxs(t, reactors[0].mempool, numTxs, 1, true)
   148  	ensureNoTxs(t, reactors[1], 100*time.Millisecond)
   149  }
   150  
   151  func TestFlappyBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   152  	t.Parallel()
   153  
   154  	testutils.FilterStability(t, testutils.Flappy)
   155  
   156  	if testing.Short() {
   157  		t.Skip("skipping test in short mode.")
   158  	}
   159  
   160  	mconfig := memcfg.TestMempoolConfig()
   161  	pconfig := p2pcfg.TestP2PConfig()
   162  	const N = 2
   163  	reactors := makeAndConnectReactors(mconfig, pconfig, N)
   164  	defer func() {
   165  		for _, r := range reactors {
   166  			r.Stop()
   167  		}
   168  	}()
   169  
   170  	// stop peer
   171  	sw := reactors[1].Switch
   172  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   173  
   174  	// check that we are not leaking any go-routines
   175  	// i.e. broadcastTxRoutine finishes when peer is stopped
   176  	leaktest.CheckTimeout(t, 10*time.Second)()
   177  }
   178  
   179  func TestFlappyBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   180  	t.Parallel()
   181  
   182  	testutils.FilterStability(t, testutils.Flappy)
   183  
   184  	if testing.Short() {
   185  		t.Skip("skipping test in short mode.")
   186  	}
   187  
   188  	mconfig := memcfg.TestMempoolConfig()
   189  	pconfig := p2pcfg.TestP2PConfig()
   190  	const N = 2
   191  	reactors := makeAndConnectReactors(mconfig, pconfig, N)
   192  
   193  	// stop reactors
   194  	for _, r := range reactors {
   195  		r.Stop()
   196  	}
   197  
   198  	// check that we are not leaking any go-routines
   199  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   200  	leaktest.CheckTimeout(t, 10*time.Second)()
   201  }
   202  
   203  func TestMempoolIDsBasic(t *testing.T) {
   204  	t.Parallel()
   205  
   206  	ids := newMempoolIDs()
   207  
   208  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   209  
   210  	ids.ReserveForPeer(peer)
   211  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   212  	ids.Reclaim(peer)
   213  
   214  	ids.ReserveForPeer(peer)
   215  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   216  	ids.Reclaim(peer)
   217  }
   218  
   219  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   220  	t.Parallel()
   221  
   222  	if testing.Short() {
   223  		return
   224  	}
   225  
   226  	// 0 is already reserved for UnknownPeerID
   227  	ids := newMempoolIDs()
   228  
   229  	for i := 0; i < maxActiveIDs-1; i++ {
   230  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   231  		ids.ReserveForPeer(peer)
   232  	}
   233  
   234  	assert.Panics(t, func() {
   235  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   236  		ids.ReserveForPeer(peer)
   237  	})
   238  }