github.com/pokt-network/tendermint@v0.32.11-0.20230426215212-59310158d3e9/mempool/reactor_test.go (about)

     1  package mempool
     2  
     3  import (
     4  	"net"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/fortytw2/leaktest"
    10  	"github.com/go-kit/kit/log/term"
    11  	"github.com/pkg/errors"
    12  	"github.com/stretchr/testify/assert"
    13  
    14  	"github.com/tendermint/tendermint/abci/example/kvstore"
    15  	cfg "github.com/tendermint/tendermint/config"
    16  	"github.com/tendermint/tendermint/libs/log"
    17  	"github.com/tendermint/tendermint/p2p"
    18  	"github.com/tendermint/tendermint/p2p/mock"
    19  	"github.com/tendermint/tendermint/proxy"
    20  	"github.com/tendermint/tendermint/types"
    21  )
    22  
    23  type peerState struct {
    24  	height int64
    25  }
    26  
    27  func (ps peerState) GetHeight() int64 {
    28  	return ps.height
    29  }
    30  
    31  // mempoolLogger is a TestingLogger which uses a different
    32  // color for each validator ("validator" key must exist).
    33  func mempoolLogger() log.Logger {
    34  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
    35  		for i := 0; i < len(keyvals)-1; i += 2 {
    36  			if keyvals[i] == "validator" {
    37  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
    38  			}
    39  		}
    40  		return term.FgBgColor{}
    41  	})
    42  }
    43  
    44  // connect N mempool reactors through N switches
    45  func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
    46  	reactors := make([]*Reactor, n)
    47  	logger := mempoolLogger()
    48  	for i := 0; i < n; i++ {
    49  		app := kvstore.NewApplication()
    50  		cc := proxy.NewLocalClientCreator(app)
    51  		mempool, cleanup := newMempoolWithApp(cc)
    52  		defer cleanup()
    53  
    54  		reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
    55  		reactors[i].SetLogger(logger.With("validator", i))
    56  	}
    57  
    58  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
    59  		s.AddReactor("MEMPOOL", reactors[i])
    60  		return s
    61  
    62  	}, p2p.Connect2Switches)
    63  	return reactors
    64  }
    65  
    66  func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
    67  	// wait for the txs in all mempools
    68  	wg := new(sync.WaitGroup)
    69  	for i, reactor := range reactors {
    70  		wg.Add(1)
    71  		go func(r *Reactor, reactorIndex int) {
    72  			defer wg.Done()
    73  			waitForTxsOnReactor(t, txs, r, reactorIndex)
    74  		}(reactor, i)
    75  	}
    76  
    77  	done := make(chan struct{})
    78  	go func() {
    79  		wg.Wait()
    80  		close(done)
    81  	}()
    82  
    83  	timer := time.After(Timeout)
    84  	select {
    85  	case <-timer:
    86  		t.Fatal("Timed out waiting for txs")
    87  	case <-done:
    88  	}
    89  }
    90  
    91  func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
    92  	mempool := reactor.mempool
    93  	for mempool.Size() < len(txs) {
    94  		time.Sleep(time.Millisecond * 100)
    95  	}
    96  
    97  	reapedTxs := mempool.ReapMaxTxs(len(txs))
    98  	for i, tx := range txs {
    99  		assert.Equalf(t, tx, reapedTxs[i],
   100  			"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
   101  	}
   102  }
   103  
   104  // ensure no txs on reactor after some timeout
   105  func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {
   106  	time.Sleep(timeout) // wait for the txs in all mempools
   107  	assert.Zero(t, reactor.mempool.Size())
   108  }
   109  
   110  const (
   111  	NumTxs  = 1000
   112  	Timeout = 120 * time.Second // ridiculously high because CircleCI is slow
   113  )
   114  
   115  func TestReactorBroadcastTxMessage(t *testing.T) {
   116  	config := cfg.TestConfig()
   117  	const N = 4
   118  	reactors := makeAndConnectReactors(config, N)
   119  	defer func() {
   120  		for _, r := range reactors {
   121  			r.Stop()
   122  		}
   123  	}()
   124  	for _, r := range reactors {
   125  		for _, peer := range r.Switch.Peers().List() {
   126  			peer.Set(types.PeerStateKey, peerState{1})
   127  		}
   128  	}
   129  
   130  	// send a bunch of txs to the first reactor's mempool
   131  	// and wait for them all to be received in the others
   132  	txs := checkTxs(t, reactors[0].mempool, NumTxs, UnknownPeerID)
   133  	waitForTxsOnReactors(t, txs, reactors)
   134  }
   135  
   136  func TestReactorNoBroadcastToSender(t *testing.T) {
   137  	config := cfg.TestConfig()
   138  	const N = 2
   139  	reactors := makeAndConnectReactors(config, N)
   140  	defer func() {
   141  		for _, r := range reactors {
   142  			r.Stop()
   143  		}
   144  	}()
   145  
   146  	// send a bunch of txs to the first reactor's mempool, claiming it came from peer
   147  	// ensure peer gets no txs
   148  	checkTxs(t, reactors[0].mempool, NumTxs, 1)
   149  	ensureNoTxs(t, reactors[1], 100*time.Millisecond)
   150  }
   151  
   152  func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) {
   153  	if testing.Short() {
   154  		t.Skip("skipping test in short mode.")
   155  	}
   156  
   157  	config := cfg.TestConfig()
   158  	const N = 2
   159  	reactors := makeAndConnectReactors(config, N)
   160  	defer func() {
   161  		for _, r := range reactors {
   162  			r.Stop()
   163  		}
   164  	}()
   165  
   166  	// stop peer
   167  	sw := reactors[1].Switch
   168  	sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason"))
   169  
   170  	// check that we are not leaking any go-routines
   171  	// i.e. broadcastTxRoutine finishes when peer is stopped
   172  	leaktest.CheckTimeout(t, 10*time.Second)()
   173  }
   174  
   175  func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) {
   176  	if testing.Short() {
   177  		t.Skip("skipping test in short mode.")
   178  	}
   179  
   180  	config := cfg.TestConfig()
   181  	const N = 2
   182  	reactors := makeAndConnectReactors(config, N)
   183  
   184  	// stop reactors
   185  	for _, r := range reactors {
   186  		r.Stop()
   187  	}
   188  
   189  	// check that we are not leaking any go-routines
   190  	// i.e. broadcastTxRoutine finishes when reactor is stopped
   191  	leaktest.CheckTimeout(t, 10*time.Second)()
   192  }
   193  
   194  func TestMempoolIDsBasic(t *testing.T) {
   195  	ids := newMempoolIDs()
   196  
   197  	peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   198  
   199  	ids.ReserveForPeer(peer)
   200  	assert.EqualValues(t, 1, ids.GetForPeer(peer))
   201  	ids.Reclaim(peer)
   202  
   203  	ids.ReserveForPeer(peer)
   204  	assert.EqualValues(t, 2, ids.GetForPeer(peer))
   205  	ids.Reclaim(peer)
   206  }
   207  
   208  func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
   209  	if testing.Short() {
   210  		return
   211  	}
   212  
   213  	// 0 is already reserved for UnknownPeerID
   214  	ids := newMempoolIDs()
   215  
   216  	for i := 0; i < maxActiveIDs-1; i++ {
   217  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   218  		ids.ReserveForPeer(peer)
   219  	}
   220  
   221  	assert.Panics(t, func() {
   222  		peer := mock.NewPeer(net.IP{127, 0, 0, 1})
   223  		ids.ReserveForPeer(peer)
   224  	})
   225  }
   226  
   227  func TestDontExhaustMaxActiveIDs(t *testing.T) {
   228  	config := cfg.TestConfig()
   229  	const N = 1
   230  	reactors := makeAndConnectReactors(config, N)
   231  	defer func() {
   232  		for _, r := range reactors {
   233  			r.Stop()
   234  		}
   235  	}()
   236  	reactor := reactors[0]
   237  
   238  	for i := 0; i < maxActiveIDs+1; i++ {
   239  		peer := mock.NewPeer(nil)
   240  		reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
   241  		reactor.AddPeer(peer)
   242  	}
   243  }