github.com/Team-Kujira/tendermint@v0.34.24-indexer/evidence/reactor_test.go (about) 1 package evidence_test 2 3 import ( 4 "encoding/hex" 5 "fmt" 6 "sync" 7 "testing" 8 "time" 9 10 "github.com/fortytw2/leaktest" 11 "github.com/go-kit/log/term" 12 "github.com/gogo/protobuf/proto" 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/mock" 15 "github.com/stretchr/testify/require" 16 17 dbm "github.com/tendermint/tm-db" 18 19 cfg "github.com/tendermint/tendermint/config" 20 "github.com/tendermint/tendermint/crypto" 21 "github.com/tendermint/tendermint/crypto/tmhash" 22 "github.com/tendermint/tendermint/evidence" 23 "github.com/tendermint/tendermint/evidence/mocks" 24 "github.com/tendermint/tendermint/libs/log" 25 "github.com/tendermint/tendermint/p2p" 26 p2pmocks "github.com/tendermint/tendermint/p2p/mocks" 27 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 28 sm "github.com/tendermint/tendermint/state" 29 "github.com/tendermint/tendermint/types" 30 ) 31 32 var ( 33 numEvidence = 10 34 timeout = 120 * time.Second // ridiculously high because CircleCI is slow 35 ) 36 37 // We have N evidence reactors connected to one another. The first reactor 38 // receives a number of evidence at varying heights. We test that all 39 // other reactors receive the evidence and add it to their own respective 40 // evidence pools. 41 func TestReactorBroadcastEvidence(t *testing.T) { 42 config := cfg.TestConfig() 43 N := 7 44 45 // create statedb for everyone 46 stateDBs := make([]sm.Store, N) 47 val := types.NewMockPV() 48 // we need validators saved for heights at least as high as we have evidence for 49 height := int64(numEvidence) + 10 50 for i := 0; i < N; i++ { 51 stateDBs[i] = initializeValidatorState(val, height) 52 } 53 54 // make reactors from statedb 55 reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs) 56 57 // set the peer height on each reactor 58 for _, r := range reactors { 59 for _, peer := range r.Switch.Peers().List() { 60 ps := peerState{height} 61 peer.Set(types.PeerStateKey, ps) 62 } 63 } 64 65 // send a bunch of valid evidence to the first reactor's evpool 66 // and wait for them all to be received in the others 67 evList := sendEvidence(t, pools[0], val, numEvidence) 68 waitForEvidence(t, evList, pools) 69 } 70 71 // We have two evidence reactors connected to one another but are at different heights. 72 // Reactor 1 which is ahead receives a number of evidence. It should only send the evidence 73 // that is below the height of the peer to that peer. 74 func TestReactorSelectiveBroadcast(t *testing.T) { 75 config := cfg.TestConfig() 76 77 val := types.NewMockPV() 78 height1 := int64(numEvidence) + 10 79 height2 := int64(numEvidence) / 2 80 81 // DB1 is ahead of DB2 82 stateDB1 := initializeValidatorState(val, height1) 83 stateDB2 := initializeValidatorState(val, height2) 84 85 // make reactors from statedb 86 reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) 87 88 // set the peer height on each reactor 89 for _, r := range reactors { 90 for _, peer := range r.Switch.Peers().List() { 91 ps := peerState{height1} 92 peer.Set(types.PeerStateKey, ps) 93 } 94 } 95 96 // update the first reactor peer's height to be very small 97 peer := reactors[0].Switch.Peers().List()[0] 98 ps := peerState{height2} 99 peer.Set(types.PeerStateKey, ps) 100 101 // send a bunch of valid evidence to the first reactor's evpool 102 evList := sendEvidence(t, pools[0], val, numEvidence) 103 104 // only ones less than the peers height should make it through 105 waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]}) 106 107 // peers should still be connected 108 peers := reactors[1].Switch.Peers().List() 109 assert.Equal(t, 1, len(peers)) 110 } 111 112 // This tests aims to ensure that reactors don't send evidence that they have committed or that ar 113 // not ready for the peer through three scenarios. 114 // First, committed evidence to a newly connected peer 115 // Second, evidence to a peer that is behind 116 // Third, evidence that was pending and became committed just before the peer caught up 117 func TestReactorsGossipNoCommittedEvidence(t *testing.T) { 118 config := cfg.TestConfig() 119 120 val := types.NewMockPV() 121 var height int64 = 10 122 123 // DB1 is ahead of DB2 124 stateDB1 := initializeValidatorState(val, height-1) 125 stateDB2 := initializeValidatorState(val, height-2) 126 state, err := stateDB1.Load() 127 require.NoError(t, err) 128 state.LastBlockHeight++ 129 130 // make reactors from statedb 131 reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) 132 133 evList := sendEvidence(t, pools[0], val, 2) 134 pools[0].Update(state, evList) 135 require.EqualValues(t, uint32(0), pools[0].Size()) 136 137 time.Sleep(100 * time.Millisecond) 138 139 peer := reactors[0].Switch.Peers().List()[0] 140 ps := peerState{height - 2} 141 peer.Set(types.PeerStateKey, ps) 142 143 peer = reactors[1].Switch.Peers().List()[0] 144 ps = peerState{height} 145 peer.Set(types.PeerStateKey, ps) 146 147 // wait to see that no evidence comes through 148 time.Sleep(300 * time.Millisecond) 149 150 // the second pool should not have received any evidence because it has already been committed 151 assert.Equal(t, uint32(0), pools[1].Size(), "second reactor should not have received evidence") 152 153 // the first reactor receives three more evidence 154 evList = make([]types.Evidence, 3) 155 for i := 0; i < 3; i++ { 156 ev := types.NewMockDuplicateVoteEvidenceWithValidator(height-3+int64(i), 157 time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, state.ChainID) 158 err := pools[0].AddEvidence(ev) 159 require.NoError(t, err) 160 evList[i] = ev 161 } 162 163 // wait to see that only one evidence is sent 164 time.Sleep(300 * time.Millisecond) 165 166 // the second pool should only have received the first evidence because it is behind 167 peerEv, _ := pools[1].PendingEvidence(10000) 168 assert.EqualValues(t, []types.Evidence{evList[0]}, peerEv) 169 170 // the last evidence is committed and the second reactor catches up in state to the first 171 // reactor. We therefore expect that the second reactor only receives one more evidence, the 172 // one that is still pending and not the evidence that has already been committed. 173 state.LastBlockHeight++ 174 pools[0].Update(state, []types.Evidence{evList[2]}) 175 // the first reactor should have the two remaining pending evidence 176 require.EqualValues(t, uint32(2), pools[0].Size()) 177 178 // now update the state of the second reactor 179 pools[1].Update(state, types.EvidenceList{}) 180 peer = reactors[0].Switch.Peers().List()[0] 181 ps = peerState{height} 182 peer.Set(types.PeerStateKey, ps) 183 184 // wait to see that only two evidence is sent 185 time.Sleep(300 * time.Millisecond) 186 187 peerEv, _ = pools[1].PendingEvidence(1000) 188 assert.EqualValues(t, []types.Evidence{evList[0], evList[1]}, peerEv) 189 } 190 191 func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { 192 evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 193 evidenceDB := dbm.NewMemDB() 194 blockStore := &mocks.BlockStore{} 195 blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( 196 &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, 197 ) 198 val := types.NewMockPV() 199 stateStore := initializeValidatorState(val, 1) 200 pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) 201 require.NoError(t, err) 202 203 p := &p2pmocks.Peer{} 204 205 p.On("IsRunning").Once().Return(true) 206 p.On("IsRunning").Return(false) 207 // check that we are not leaking any go-routines 208 // i.e. broadcastEvidenceRoutine finishes when peer is stopped 209 defer leaktest.CheckTimeout(t, 10*time.Second)() 210 211 p.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool { 212 e, ok := i.(p2p.Envelope) 213 return ok && e.ChannelID == evidence.EvidenceChannel 214 })).Return(false) 215 quitChan := make(<-chan struct{}) 216 p.On("Quit").Return(quitChan) 217 ps := peerState{2} 218 p.On("Get", types.PeerStateKey).Return(ps) 219 p.On("ID").Return("ABC") 220 p.On("String").Return("mock") 221 222 r := evidence.NewReactor(pool) 223 r.SetLogger(log.TestingLogger()) 224 r.AddPeer(p) 225 226 _ = sendEvidence(t, pool, val, 2) 227 } 228 229 // evidenceLogger is a TestingLogger which uses a different 230 // color for each validator ("validator" key must exist). 231 func evidenceLogger() log.Logger { 232 return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { 233 for i := 0; i < len(keyvals)-1; i += 2 { 234 if keyvals[i] == "validator" { 235 return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} 236 } 237 } 238 return term.FgBgColor{} 239 }) 240 } 241 242 // connect N evidence reactors through N switches 243 func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) ([]*evidence.Reactor, 244 []*evidence.Pool) { 245 N := len(stateStores) 246 247 reactors := make([]*evidence.Reactor, N) 248 pools := make([]*evidence.Pool, N) 249 logger := evidenceLogger() 250 evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 251 252 for i := 0; i < N; i++ { 253 evidenceDB := dbm.NewMemDB() 254 blockStore := &mocks.BlockStore{} 255 blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( 256 &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, 257 ) 258 pool, err := evidence.NewPool(evidenceDB, stateStores[i], blockStore) 259 if err != nil { 260 panic(err) 261 } 262 pools[i] = pool 263 reactors[i] = evidence.NewReactor(pool) 264 reactors[i].SetLogger(logger.With("validator", i)) 265 } 266 267 p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { 268 s.AddReactor("EVIDENCE", reactors[i]) 269 return s 270 271 }, p2p.Connect2Switches) 272 273 return reactors, pools 274 } 275 276 // wait for all evidence on all reactors 277 func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Pool) { 278 // wait for the evidence in all evpools 279 wg := new(sync.WaitGroup) 280 for i := 0; i < len(pools); i++ { 281 wg.Add(1) 282 go _waitForEvidence(t, wg, evs, i, pools) 283 } 284 285 done := make(chan struct{}) 286 go func() { 287 wg.Wait() 288 close(done) 289 }() 290 291 timer := time.After(timeout) 292 select { 293 case <-timer: 294 t.Fatal("Timed out waiting for evidence") 295 case <-done: 296 } 297 } 298 299 // wait for all evidence on a single evpool 300 func _waitForEvidence( 301 t *testing.T, 302 wg *sync.WaitGroup, 303 evs types.EvidenceList, 304 poolIdx int, 305 pools []*evidence.Pool, 306 ) { 307 evpool := pools[poolIdx] 308 var evList []types.Evidence 309 currentPoolSize := 0 310 for currentPoolSize != len(evs) { 311 evList, _ = evpool.PendingEvidence(int64(len(evs) * 500)) // each evidence should not be more than 500 bytes 312 currentPoolSize = len(evList) 313 time.Sleep(time.Millisecond * 100) 314 } 315 316 // put the reaped evidence in a map so we can quickly check we got everything 317 evMap := make(map[string]types.Evidence) 318 for _, e := range evList { 319 evMap[string(e.Hash())] = e 320 } 321 for i, expectedEv := range evs { 322 gotEv := evMap[string(expectedEv.Hash())] 323 assert.Equal(t, expectedEv, gotEv, 324 fmt.Sprintf("evidence at index %d on pool %d don't match: %v vs %v", 325 i, poolIdx, expectedEv, gotEv)) 326 } 327 328 wg.Done() 329 } 330 331 func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, n int) types.EvidenceList { 332 evList := make([]types.Evidence, n) 333 for i := 0; i < n; i++ { 334 ev := types.NewMockDuplicateVoteEvidenceWithValidator(int64(i+1), 335 time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, evidenceChainID) 336 err := evpool.AddEvidence(ev) 337 require.NoError(t, err) 338 evList[i] = ev 339 } 340 return evList 341 } 342 343 type peerState struct { 344 height int64 345 } 346 347 func (ps peerState) GetHeight() int64 { 348 return ps.height 349 } 350 351 func exampleVote(t byte) *types.Vote { 352 var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") 353 if err != nil { 354 panic(err) 355 } 356 357 return &types.Vote{ 358 Type: tmproto.SignedMsgType(t), 359 Height: 3, 360 Round: 2, 361 Timestamp: stamp, 362 BlockID: types.BlockID{ 363 Hash: tmhash.Sum([]byte("blockID_hash")), 364 PartSetHeader: types.PartSetHeader{ 365 Total: 1000000, 366 Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), 367 }, 368 }, 369 ValidatorAddress: crypto.AddressHash([]byte("validator_address")), 370 ValidatorIndex: 56789, 371 } 372 } 373 func TestLegacyReactorReceiveBasic(t *testing.T) { 374 config := cfg.TestConfig() 375 N := 1 376 377 stateDBs := make([]sm.Store, N) 378 val := types.NewMockPV() 379 stateDBs[0] = initializeValidatorState(val, 1) 380 381 reactors, _ := makeAndConnectReactorsAndPools(config, stateDBs) 382 383 var ( 384 reactor = reactors[0] 385 peer = &p2pmocks.Peer{} 386 ) 387 quitChan := make(<-chan struct{}) 388 peer.On("Quit").Return(quitChan) 389 390 reactor.InitPeer(peer) 391 reactor.AddPeer(peer) 392 e := &tmproto.EvidenceList{} 393 msg, err := proto.Marshal(e) 394 assert.NoError(t, err) 395 396 assert.NotPanics(t, func() { 397 reactor.Receive(evidence.EvidenceChannel, peer, msg) 398 }) 399 } 400 401 //nolint:lll //ignore line length for tests 402 func TestEvidenceVectors(t *testing.T) { 403 404 val := &types.Validator{ 405 Address: crypto.AddressHash([]byte("validator_address")), 406 VotingPower: 10, 407 } 408 409 valSet := types.NewValidatorSet([]*types.Validator{val}) 410 411 dupl := types.NewDuplicateVoteEvidence( 412 exampleVote(1), 413 exampleVote(2), 414 defaultEvidenceTime, 415 valSet, 416 ) 417 418 testCases := []struct { 419 testName string 420 evidenceList []types.Evidence 421 expBytes string 422 }{ 423 {"DuplicateVoteEvidence", []types.Evidence{dupl}, "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105"}, 424 } 425 426 for _, tc := range testCases { 427 tc := tc 428 429 evi := make([]tmproto.Evidence, len(tc.evidenceList)) 430 for i := 0; i < len(tc.evidenceList); i++ { 431 ev, err := types.EvidenceToProto(tc.evidenceList[i]) 432 require.NoError(t, err, tc.testName) 433 evi[i] = *ev 434 } 435 436 epl := tmproto.EvidenceList{ 437 Evidence: evi, 438 } 439 440 bz, err := epl.Marshal() 441 require.NoError(t, err, tc.testName) 442 443 require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) 444 445 } 446 447 }