github.com/MagHErmit/tendermint@v0.282.1/mempool/v1/reactor_test.go (about) 1 package v1 2 3 import ( 4 "encoding/hex" 5 "os" 6 "sync" 7 "testing" 8 "time" 9 10 "github.com/go-kit/log/term" 11 "github.com/stretchr/testify/assert" 12 "github.com/stretchr/testify/require" 13 14 "github.com/MagHErmit/tendermint/abci/example/kvstore" 15 16 cfg "github.com/MagHErmit/tendermint/config" 17 18 "github.com/MagHErmit/tendermint/libs/log" 19 "github.com/MagHErmit/tendermint/mempool" 20 "github.com/MagHErmit/tendermint/p2p" 21 memproto "github.com/MagHErmit/tendermint/proto/tendermint/mempool" 22 "github.com/MagHErmit/tendermint/proxy" 23 "github.com/MagHErmit/tendermint/types" 24 ) 25 26 const ( 27 numTxs = 1000 28 timeout = 120 * time.Second // ridiculously high because CircleCI is slow 29 ) 30 31 type peerState struct { 32 height int64 33 } 34 35 func (ps peerState) GetHeight() int64 { 36 return ps.height 37 } 38 39 // Send a bunch of txs to the first reactor's mempool and wait for them all to 40 // be received in the others. 41 func TestReactorBroadcastTxsMessage(t *testing.T) { 42 config := cfg.TestConfig() 43 // if there were more than two reactors, the order of transactions could not be 44 // asserted in waitForTxsOnReactors (due to transactions gossiping). If we 45 // replace Connect2Switches (full mesh) with a func, which connects first 46 // reactor to others and nothing else, this test should also pass with >2 reactors. 47 const N = 2 48 reactors := makeAndConnectReactors(config, N) 49 defer func() { 50 for _, r := range reactors { 51 if err := r.Stop(); err != nil { 52 assert.NoError(t, err) 53 } 54 } 55 }() 56 for _, r := range reactors { 57 for _, peer := range r.Switch.Peers().List() { 58 peer.Set(types.PeerStateKey, peerState{1}) 59 } 60 } 61 62 txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID) 63 transactions := make(types.Txs, len(txs)) 64 for idx, tx := range txs { 65 transactions[idx] = tx.tx 66 } 67 68 waitForTxsOnReactors(t, transactions, reactors) 69 } 70 71 func TestMempoolVectors(t *testing.T) { 72 testCases := []struct { 73 testName string 74 tx []byte 75 expBytes string 76 }{ 77 {"tx 1", []byte{123}, "0a030a017b"}, 78 {"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"}, 79 } 80 81 for _, tc := range testCases { 82 tc := tc 83 84 msg := memproto.Message{ 85 Sum: &memproto.Message_Txs{ 86 Txs: &memproto.Txs{Txs: [][]byte{tc.tx}}, 87 }, 88 } 89 bz, err := msg.Marshal() 90 require.NoError(t, err, tc.testName) 91 92 require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) 93 } 94 } 95 96 func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { 97 reactors := make([]*Reactor, n) 98 logger := mempoolLogger() 99 for i := 0; i < n; i++ { 100 app := kvstore.NewApplication() 101 cc := proxy.NewLocalClientCreator(app) 102 mempool, cleanup := newMempoolWithApp(cc) 103 defer cleanup() 104 105 reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states 106 reactors[i].SetLogger(logger.With("validator", i)) 107 } 108 109 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 110 s.AddReactor("MEMPOOL", reactors[i]) 111 return s 112 113 }, p2p.Connect2Switches) 114 return reactors 115 } 116 117 // mempoolLogger is a TestingLogger which uses a different 118 // color for each validator ("validator" key must exist). 119 func mempoolLogger() log.Logger { 120 return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { 121 for i := 0; i < len(keyvals)-1; i += 2 { 122 if keyvals[i] == "validator" { 123 return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} 124 } 125 } 126 return term.FgBgColor{} 127 }) 128 } 129 130 func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) { 131 conf := cfg.ResetTestRoot("mempool_test") 132 133 mp, cu := newMempoolWithAppAndConfig(cc, conf) 134 return mp, cu 135 } 136 137 func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) { 138 appConnMem, _ := cc.NewABCIClient() 139 appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) 140 err := appConnMem.Start() 141 if err != nil { 142 panic(err) 143 } 144 145 mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0) 146 147 return mp, func() { os.RemoveAll(conf.RootDir) } 148 } 149 150 func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { 151 // wait for the txs in all mempools 152 wg := new(sync.WaitGroup) 153 for i, reactor := range reactors { 154 wg.Add(1) 155 go func(r *Reactor, reactorIndex int) { 156 defer wg.Done() 157 waitForTxsOnReactor(t, txs, r, reactorIndex) 158 }(reactor, i) 159 } 160 161 done := make(chan struct{}) 162 go func() { 163 wg.Wait() 164 close(done) 165 }() 166 167 timer := time.After(timeout) 168 select { 169 case <-timer: 170 t.Fatal("Timed out waiting for txs") 171 case <-done: 172 } 173 } 174 175 func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { 176 mempool := reactor.mempool 177 for mempool.Size() < len(txs) { 178 time.Sleep(time.Millisecond * 100) 179 } 180 181 reapedTxs := mempool.ReapMaxTxs(len(txs)) 182 for i, tx := range txs { 183 assert.Equalf(t, tx, reapedTxs[i], 184 "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) 185 } 186 }