github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/mempool/v1/reactor_test.go (about) 1 package v1 2 3 import ( 4 "encoding/hex" 5 "os" 6 "sync" 7 "testing" 8 "time" 9 10 "github.com/go-kit/log/term" 11 "github.com/gogo/protobuf/proto" 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 15 "github.com/badrootd/celestia-core/abci/example/kvstore" 16 "github.com/badrootd/celestia-core/p2p/mock" 17 "github.com/badrootd/celestia-core/pkg/trace" 18 19 cfg "github.com/badrootd/celestia-core/config" 20 21 "github.com/badrootd/celestia-core/libs/log" 22 "github.com/badrootd/celestia-core/mempool" 23 "github.com/badrootd/celestia-core/p2p" 24 memproto "github.com/badrootd/celestia-core/proto/tendermint/mempool" 25 "github.com/badrootd/celestia-core/proxy" 26 "github.com/badrootd/celestia-core/types" 27 ) 28 29 const ( 30 numTxs = 1000 31 timeout = 120 * time.Second // ridiculously high because CircleCI is slow 32 ) 33 34 type peerState struct { 35 height int64 36 } 37 38 func (ps peerState) GetHeight() int64 { 39 return ps.height 40 } 41 42 // Send a bunch of txs to the first reactor's mempool and wait for them all to 43 // be received in the others. 44 func TestReactorBroadcastTxsMessage(t *testing.T) { 45 config := cfg.TestConfig() 46 // if there were more than two reactors, the order of transactions could not be 47 // asserted in waitForTxsOnReactors (due to transactions gossiping). If we 48 // replace Connect2Switches (full mesh) with a func, which connects first 49 // reactor to others and nothing else, this test should also pass with >2 reactors. 50 const N = 2 51 reactors := makeAndConnectReactors(config, N) 52 defer func() { 53 for _, r := range reactors { 54 if err := r.Stop(); err != nil { 55 assert.NoError(t, err) 56 } 57 } 58 }() 59 for _, r := range reactors { 60 for _, peer := range r.Switch.Peers().List() { 61 peer.Set(types.PeerStateKey, peerState{1}) 62 } 63 } 64 65 txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID) 66 transactions := make(types.Txs, len(txs)) 67 for idx, tx := range txs { 68 transactions[idx] = tx.tx 69 } 70 71 waitForTxsOnReactors(t, transactions, reactors) 72 } 73 74 func TestMempoolVectors(t *testing.T) { 75 testCases := []struct { 76 testName string 77 tx []byte 78 expBytes string 79 }{ 80 {"tx 1", []byte{123}, "0a030a017b"}, 81 {"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"}, 82 } 83 84 for _, tc := range testCases { 85 tc := tc 86 87 msg := memproto.Message{ 88 Sum: &memproto.Message_Txs{ 89 Txs: &memproto.Txs{Txs: [][]byte{tc.tx}}, 90 }, 91 } 92 bz, err := msg.Marshal() 93 require.NoError(t, err, tc.testName) 94 95 require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) 96 } 97 } 98 99 func TestReactorEventuallyRemovesExpiredTransaction(t *testing.T) { 100 config := cfg.TestConfig() 101 config.Mempool.TTLDuration = 100 * time.Millisecond 102 const N = 1 103 reactor := makeAndConnectReactors(config, N)[0] 104 105 tx := types.Tx([]byte("test")) 106 key := tx.Key() 107 txMsg := &memproto.Message{ 108 Sum: &memproto.Message_Txs{Txs: &memproto.Txs{Txs: [][]byte{tx}}}, 109 } 110 txMsgBytes, err := txMsg.Marshal() 111 require.NoError(t, err) 112 113 peer := mock.NewPeer(nil) 114 reactor.InitPeer(peer) 115 reactor.Receive(mempool.MempoolChannel, peer, txMsgBytes) 116 reactor.mempool.Lock() 117 _, has := reactor.mempool.txByKey[key] 118 reactor.mempool.Unlock() 119 require.True(t, has) 120 121 // wait for the transaction to expire 122 time.Sleep(reactor.mempool.config.TTLDuration * 2) 123 reactor.mempool.Lock() 124 _, has = reactor.mempool.txByKey[key] 125 reactor.mempool.Unlock() 126 require.False(t, has) 127 } 128 129 func TestLegacyReactorReceiveBasic(t *testing.T) { 130 config := cfg.TestConfig() 131 // if there were more than two reactors, the order of transactions could not be 132 // asserted in waitForTxsOnReactors (due to transactions gossiping). If we 133 // replace Connect2Switches (full mesh) with a func, which connects first 134 // reactor to others and nothing else, this test should also pass with >2 reactors. 135 const N = 1 136 reactors := makeAndConnectReactors(config, N) 137 var ( 138 reactor = reactors[0] 139 peer = mock.NewPeer(nil) 140 ) 141 defer func() { 142 err := reactor.Stop() 143 assert.NoError(t, err) 144 }() 145 146 reactor.InitPeer(peer) 147 reactor.AddPeer(peer) 148 m := &memproto.Txs{} 149 wm := m.Wrap() 150 msg, err := proto.Marshal(wm) 151 assert.NoError(t, err) 152 153 assert.NotPanics(t, func() { 154 reactor.Receive(mempool.MempoolChannel, peer, msg) 155 }) 156 } 157 158 func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { 159 reactors := make([]*Reactor, n) 160 logger := mempoolLogger() 161 for i := 0; i < n; i++ { 162 app := kvstore.NewApplication() 163 cc := proxy.NewLocalClientCreator(app) 164 mempool, cleanup := newMempoolWithAppAndConfig(cc, config) 165 defer cleanup() 166 167 reactors[i] = NewReactor(config.Mempool, mempool, &trace.Client{}) // so we dont start the consensus states 168 reactors[i].SetLogger(logger.With("validator", i)) 169 } 170 171 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 172 s.AddReactor("MEMPOOL", reactors[i]) 173 return s 174 175 }, p2p.Connect2Switches) 176 return reactors 177 } 178 179 // mempoolLogger is a TestingLogger which uses a different 180 // color for each validator ("validator" key must exist). 181 func mempoolLogger() log.Logger { 182 return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { 183 for i := 0; i < len(keyvals)-1; i += 2 { 184 if keyvals[i] == "validator" { 185 return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} 186 } 187 } 188 return term.FgBgColor{} 189 }) 190 } 191 192 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) { 193 appConnMem, _ := cc.NewABCIClient() 194 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 195 err := appConnMem.Start() 196 if err != nil { 197 panic(err) 198 } 199 200 mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0) 201 202 return mp, func() { os.RemoveAll(conf.RootDir) } 203 } 204 205 func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { 206 // wait for the txs in all mempools 207 wg := new(sync.WaitGroup) 208 for i, reactor := range reactors { 209 wg.Add(1) 210 go func(r *Reactor, reactorIndex int) { 211 defer wg.Done() 212 waitForTxsOnReactor(t, txs, r, reactorIndex) 213 }(reactor, i) 214 } 215 216 done := make(chan struct{}) 217 go func() { 218 wg.Wait() 219 close(done) 220 }() 221 222 timer := time.After(timeout) 223 select { 224 case <-timer: 225 t.Fatal("Timed out waiting for txs") 226 case <-done: 227 } 228 } 229 230 func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { 231 mempool := reactor.mempool 232 for mempool.Size() < len(txs) { 233 time.Sleep(time.Millisecond * 100) 234 } 235 236 reapedTxs := mempool.ReapMaxTxs(len(txs)) 237 for i, tx := range txs { 238 assert.Equalf(t, tx, reapedTxs[i], 239 "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) 240 } 241 }