github.com/DFWallet/tendermint-cosmos@v0.0.2/consensus/reactor_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "runtime" 9 "runtime/pprof" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/mock" 16 "github.com/stretchr/testify/require" 17 18 dbm "github.com/tendermint/tm-db" 19 20 abcicli "github.com/DFWallet/tendermint-cosmos/abci/client" 21 "github.com/DFWallet/tendermint-cosmos/abci/example/kvstore" 22 abci "github.com/DFWallet/tendermint-cosmos/abci/types" 23 cfg "github.com/DFWallet/tendermint-cosmos/config" 24 cstypes "github.com/DFWallet/tendermint-cosmos/consensus/types" 25 cryptoenc "github.com/DFWallet/tendermint-cosmos/crypto/encoding" 26 "github.com/DFWallet/tendermint-cosmos/crypto/tmhash" 27 "github.com/DFWallet/tendermint-cosmos/libs/bits" 28 "github.com/DFWallet/tendermint-cosmos/libs/bytes" 29 "github.com/DFWallet/tendermint-cosmos/libs/log" 30 tmsync "github.com/DFWallet/tendermint-cosmos/libs/sync" 31 mempl "github.com/DFWallet/tendermint-cosmos/mempool" 32 "github.com/DFWallet/tendermint-cosmos/p2p" 33 p2pmock "github.com/DFWallet/tendermint-cosmos/p2p/mock" 34 tmproto "github.com/DFWallet/tendermint-cosmos/proto/tendermint/types" 35 sm "github.com/DFWallet/tendermint-cosmos/state" 36 statemocks "github.com/DFWallet/tendermint-cosmos/state/mocks" 37 "github.com/DFWallet/tendermint-cosmos/store" 38 "github.com/DFWallet/tendermint-cosmos/types" 39 ) 40 41 //---------------------------------------------- 42 // in-process testnets 43 44 var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 45 46 func startConsensusNet(t *testing.T, css []*State, n int) ( 47 []*Reactor, 48 []types.Subscription, 49 []*types.EventBus, 50 ) { 51 reactors := make([]*Reactor, n) 52 blocksSubs := make([]types.Subscription, 0) 53 eventBuses := make([]*types.EventBus, n) 54 for i := 0; i < n; i++ { 55 /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") 56 if err != nil { t.Fatal(err)}*/ 57 reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states 58 reactors[i].SetLogger(css[i].Logger) 59 60 // eventBus is already started with the cs 61 eventBuses[i] = css[i].eventBus 62 reactors[i].SetEventBus(eventBuses[i]) 63 64 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 65 require.NoError(t, err) 66 blocksSubs = append(blocksSubs, blocksSub) 67 68 if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake 69 if err := css[i].blockExec.Store().Save(css[i].state); err != nil { 70 t.Error(err) 71 } 72 73 } 74 } 75 // make connected switches and start all reactors 76 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 77 s.AddReactor("CONSENSUS", reactors[i]) 78 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 79 return s 80 }, p2p.Connect2Switches) 81 82 // now that everyone is connected, start the state machines 83 // If we started the state machines before everyone was connected, 84 // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors 85 // TODO: is this still true with new pubsub? 86 for i := 0; i < n; i++ { 87 s := reactors[i].conS.GetState() 88 reactors[i].SwitchToConsensus(s, false) 89 } 90 return reactors, blocksSubs, eventBuses 91 } 92 93 func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) { 94 logger.Info("stopConsensusNet", "n", len(reactors)) 95 for i, r := range reactors { 96 logger.Info("stopConsensusNet: Stopping Reactor", "i", i) 97 if err := r.Switch.Stop(); err != nil { 98 logger.Error("error trying to stop switch", "error", err) 99 } 100 } 101 for i, b := range eventBuses { 102 logger.Info("stopConsensusNet: Stopping eventBus", "i", i) 103 if err := b.Stop(); err != nil { 104 logger.Error("error trying to stop eventbus", "error", err) 105 } 106 } 107 logger.Info("stopConsensusNet: DONE", "n", len(reactors)) 108 } 109 110 // Ensure a testnet makes blocks 111 func TestReactorBasic(t *testing.T) { 112 N := 4 113 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 114 defer cleanup() 115 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 116 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 117 // wait till everyone makes the first new block 118 timeoutWaitGroup(t, N, func(j int) { 119 <-blocksSubs[j].Out() 120 }, css) 121 } 122 123 // Ensure we can process blocks with evidence 124 func TestReactorWithEvidence(t *testing.T) { 125 nValidators := 4 126 testName := "consensus_reactor_test" 127 tickerFunc := newMockTickerFunc(true) 128 appFunc := newCounter 129 130 // heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction 131 // to unroll unwieldy abstractions. Here we duplicate the code from: 132 // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 133 134 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 135 css := make([]*State, nValidators) 136 logger := consensusLogger() 137 for i := 0; i < nValidators; i++ { 138 stateDB := dbm.NewMemDB() // each state needs its own db 139 stateStore := sm.NewStore(stateDB) 140 state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) 141 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 142 defer os.RemoveAll(thisConfig.RootDir) 143 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal 144 app := appFunc() 145 vals := types.TM2PB.ValidatorUpdates(state.Validators) 146 app.InitChain(abci.RequestInitChain{Validators: vals}) 147 148 pv := privVals[i] 149 // duplicate code from: 150 // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) 151 152 blockDB := dbm.NewMemDB() 153 blockStore := store.NewBlockStore(blockDB) 154 155 // one for mempool, one for consensus 156 mtx := new(tmsync.Mutex) 157 proxyAppConnMem := abcicli.NewLocalClient(mtx, app) 158 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 159 160 // Make Mempool 161 mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) 162 mempool.SetLogger(log.TestingLogger().With("module", "mempool")) 163 if thisConfig.Consensus.WaitForTxs() { 164 mempool.EnableTxsAvailable() 165 } 166 167 // mock the evidence pool 168 // everyone includes evidence of another double signing 169 vIdx := (i + 1) % nValidators 170 ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) 171 evpool := &statemocks.EvidencePool{} 172 evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) 173 evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ 174 ev}, int64(len(ev.Bytes()))) 175 evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() 176 177 evpool2 := sm.EmptyEvidencePool{} 178 179 // Make State 180 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) 181 cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) 182 cs.SetLogger(log.TestingLogger().With("module", "consensus")) 183 cs.SetPrivValidator(pv) 184 185 eventBus := types.NewEventBus() 186 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 187 err := eventBus.Start() 188 require.NoError(t, err) 189 cs.SetEventBus(eventBus) 190 191 cs.SetTimeoutTicker(tickerFunc()) 192 cs.SetLogger(logger.With("validator", i, "module", "consensus")) 193 194 css[i] = cs 195 } 196 197 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) 198 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 199 200 // we expect for each validator that is the proposer to propose one piece of evidence. 201 for i := 0; i < nValidators; i++ { 202 timeoutWaitGroup(t, nValidators, func(j int) { 203 msg := <-blocksSubs[j].Out() 204 block := msg.Data().(types.EventDataNewBlock).Block 205 assert.Len(t, block.Evidence.Evidence, 1) 206 }, css) 207 } 208 } 209 210 //------------------------------------ 211 212 // Ensure a testnet makes blocks when there are txs 213 func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { 214 N := 4 215 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, 216 func(c *cfg.Config) { 217 c.Consensus.CreateEmptyBlocks = false 218 }) 219 defer cleanup() 220 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 221 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 222 223 // send a tx 224 if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil { 225 t.Error(err) 226 } 227 228 // wait till everyone makes the first new block 229 timeoutWaitGroup(t, N, func(j int) { 230 <-blocksSubs[j].Out() 231 }, css) 232 } 233 234 func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { 235 N := 1 236 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 237 defer cleanup() 238 reactors, _, eventBuses := startConsensusNet(t, css, N) 239 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 240 241 var ( 242 reactor = reactors[0] 243 peer = p2pmock.NewPeer(nil) 244 msg = MustEncode(&HasVoteMessage{Height: 1, 245 Round: 1, Index: 1, Type: tmproto.PrevoteType}) 246 ) 247 248 reactor.InitPeer(peer) 249 250 // simulate switch calling Receive before AddPeer 251 assert.NotPanics(t, func() { 252 reactor.Receive(StateChannel, peer, msg) 253 reactor.AddPeer(peer) 254 }) 255 } 256 257 func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { 258 N := 1 259 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 260 defer cleanup() 261 reactors, _, eventBuses := startConsensusNet(t, css, N) 262 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 263 264 var ( 265 reactor = reactors[0] 266 peer = p2pmock.NewPeer(nil) 267 msg = MustEncode(&HasVoteMessage{Height: 1, 268 Round: 1, Index: 1, Type: tmproto.PrevoteType}) 269 ) 270 271 // we should call InitPeer here 272 273 // simulate switch calling Receive before AddPeer 274 assert.Panics(t, func() { 275 reactor.Receive(StateChannel, peer, msg) 276 }) 277 } 278 279 // Test we record stats about votes and block parts from other peers. 280 func TestReactorRecordsVotesAndBlockParts(t *testing.T) { 281 N := 4 282 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 283 defer cleanup() 284 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 285 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 286 287 // wait till everyone makes the first new block 288 timeoutWaitGroup(t, N, func(j int) { 289 <-blocksSubs[j].Out() 290 }, css) 291 292 // Get peer 293 peer := reactors[1].Switch.Peers().List()[0] 294 // Get peer state 295 ps := peer.Get(types.PeerStateKey).(*PeerState) 296 297 assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") 298 assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased") 299 } 300 301 //------------------------------------------------------------- 302 // ensure we can make blocks despite cycling a validator set 303 304 func TestReactorVotingPowerChange(t *testing.T) { 305 nVals := 4 306 logger := log.TestingLogger() 307 css, cleanup := randConsensusNet( 308 nVals, 309 "consensus_voting_power_changes_test", 310 newMockTickerFunc(true), 311 newPersistentKVStore) 312 defer cleanup() 313 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) 314 defer stopConsensusNet(logger, reactors, eventBuses) 315 316 // map of active validators 317 activeVals := make(map[string]struct{}) 318 for i := 0; i < nVals; i++ { 319 pubKey, err := css[i].privValidator.GetPubKey() 320 require.NoError(t, err) 321 addr := pubKey.Address() 322 activeVals[string(addr)] = struct{}{} 323 } 324 325 // wait till everyone makes block 1 326 timeoutWaitGroup(t, nVals, func(j int) { 327 <-blocksSubs[j].Out() 328 }, css) 329 330 //--------------------------------------------------------------------------- 331 logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") 332 333 val1PubKey, err := css[0].privValidator.GetPubKey() 334 require.NoError(t, err) 335 336 val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) 337 require.NoError(t, err) 338 updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) 339 previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() 340 341 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 342 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 343 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 344 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 345 346 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 347 t.Fatalf( 348 "expected voting power to change (before: %d, after: %d)", 349 previousTotalVotingPower, 350 css[0].GetRoundState().LastValidators.TotalVotingPower()) 351 } 352 353 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) 354 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 355 356 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 357 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 358 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 359 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 360 361 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 362 t.Fatalf( 363 "expected voting power to change (before: %d, after: %d)", 364 previousTotalVotingPower, 365 css[0].GetRoundState().LastValidators.TotalVotingPower()) 366 } 367 368 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) 369 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 370 371 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 372 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 373 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 374 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 375 376 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 377 t.Fatalf( 378 "expected voting power to change (before: %d, after: %d)", 379 previousTotalVotingPower, 380 css[0].GetRoundState().LastValidators.TotalVotingPower()) 381 } 382 } 383 384 func TestReactorValidatorSetChanges(t *testing.T) { 385 nPeers := 7 386 nVals := 4 387 css, _, _, cleanup := randConsensusNetWithPeers( 388 nVals, 389 nPeers, 390 "consensus_val_set_changes_test", 391 newMockTickerFunc(true), 392 newPersistentKVStoreWithPath) 393 394 defer cleanup() 395 logger := log.TestingLogger() 396 397 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers) 398 defer stopConsensusNet(logger, reactors, eventBuses) 399 400 // map of active validators 401 activeVals := make(map[string]struct{}) 402 for i := 0; i < nVals; i++ { 403 pubKey, err := css[i].privValidator.GetPubKey() 404 require.NoError(t, err) 405 activeVals[string(pubKey.Address())] = struct{}{} 406 } 407 408 // wait till everyone makes block 1 409 timeoutWaitGroup(t, nPeers, func(j int) { 410 <-blocksSubs[j].Out() 411 }, css) 412 413 //--------------------------------------------------------------------------- 414 logger.Info("---------------------------- Testing adding one validator") 415 416 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 417 assert.NoError(t, err) 418 valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) 419 assert.NoError(t, err) 420 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 421 422 // wait till everyone makes block 2 423 // ensure the commit includes all validators 424 // send newValTx to change vals in block 3 425 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 426 427 // wait till everyone makes block 3. 428 // it includes the commit for block 2, which is by the original validator set 429 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 430 431 // wait till everyone makes block 4. 432 // it includes the commit for block 3, which is by the original validator set 433 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 434 435 // the commits for block 4 should be with the updated validator set 436 activeVals[string(newValidatorPubKey1.Address())] = struct{}{} 437 438 // wait till everyone makes block 5 439 // it includes the commit for block 4, which should have the updated validator set 440 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 441 442 //--------------------------------------------------------------------------- 443 logger.Info("---------------------------- Testing changing the voting power of one validator") 444 445 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 446 require.NoError(t, err) 447 updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) 448 require.NoError(t, err) 449 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 450 previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() 451 452 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 453 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 454 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 455 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 456 457 if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 458 t.Errorf( 459 "expected voting power to change (before: %d, after: %d)", 460 previousTotalVotingPower, 461 css[nVals].GetRoundState().LastValidators.TotalVotingPower()) 462 } 463 464 //--------------------------------------------------------------------------- 465 logger.Info("---------------------------- Testing adding two validators at once") 466 467 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 468 require.NoError(t, err) 469 newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) 470 require.NoError(t, err) 471 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 472 473 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 474 require.NoError(t, err) 475 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 476 require.NoError(t, err) 477 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 478 479 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 480 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 481 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 482 activeVals[string(newValidatorPubKey2.Address())] = struct{}{} 483 activeVals[string(newValidatorPubKey3.Address())] = struct{}{} 484 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 485 486 //--------------------------------------------------------------------------- 487 logger.Info("---------------------------- Testing removing two validators at once") 488 489 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 490 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 491 492 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 493 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 494 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 495 delete(activeVals, string(newValidatorPubKey2.Address())) 496 delete(activeVals, string(newValidatorPubKey3.Address())) 497 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 498 } 499 500 // Check we can make blocks with skip_timeout_commit=false 501 func TestReactorWithTimeoutCommit(t *testing.T) { 502 N := 4 503 css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) 504 defer cleanup() 505 // override default SkipTimeoutCommit == true for tests 506 for i := 0; i < N; i++ { 507 css[i].config.SkipTimeoutCommit = false 508 } 509 510 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) 511 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 512 513 // wait till everyone makes the first new block 514 timeoutWaitGroup(t, N-1, func(j int) { 515 <-blocksSubs[j].Out() 516 }, css) 517 } 518 519 func waitForAndValidateBlock( 520 t *testing.T, 521 n int, 522 activeVals map[string]struct{}, 523 blocksSubs []types.Subscription, 524 css []*State, 525 txs ...[]byte, 526 ) { 527 timeoutWaitGroup(t, n, func(j int) { 528 css[j].Logger.Debug("waitForAndValidateBlock") 529 msg := <-blocksSubs[j].Out() 530 newBlock := msg.Data().(types.EventDataNewBlock).Block 531 css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) 532 err := validateBlock(newBlock, activeVals) 533 assert.Nil(t, err) 534 for _, tx := range txs { 535 err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 536 assert.Nil(t, err) 537 } 538 }, css) 539 } 540 541 func waitForAndValidateBlockWithTx( 542 t *testing.T, 543 n int, 544 activeVals map[string]struct{}, 545 blocksSubs []types.Subscription, 546 css []*State, 547 txs ...[]byte, 548 ) { 549 timeoutWaitGroup(t, n, func(j int) { 550 ntxs := 0 551 BLOCK_TX_LOOP: 552 for { 553 css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) 554 msg := <-blocksSubs[j].Out() 555 newBlock := msg.Data().(types.EventDataNewBlock).Block 556 css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) 557 err := validateBlock(newBlock, activeVals) 558 assert.Nil(t, err) 559 560 // check that txs match the txs we're waiting for. 561 // note they could be spread over multiple blocks, 562 // but they should be in order. 563 for _, tx := range newBlock.Data.Txs { 564 assert.EqualValues(t, txs[ntxs], tx) 565 ntxs++ 566 } 567 568 if ntxs == len(txs) { 569 break BLOCK_TX_LOOP 570 } 571 } 572 573 }, css) 574 } 575 576 func waitForBlockWithUpdatedValsAndValidateIt( 577 t *testing.T, 578 n int, 579 updatedVals map[string]struct{}, 580 blocksSubs []types.Subscription, 581 css []*State, 582 ) { 583 timeoutWaitGroup(t, n, func(j int) { 584 585 var newBlock *types.Block 586 LOOP: 587 for { 588 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") 589 msg := <-blocksSubs[j].Out() 590 newBlock = msg.Data().(types.EventDataNewBlock).Block 591 if newBlock.LastCommit.Size() == len(updatedVals) { 592 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) 593 break LOOP 594 } else { 595 css[j].Logger.Debug( 596 "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", 597 "height", 598 newBlock.Height) 599 } 600 } 601 602 err := validateBlock(newBlock, updatedVals) 603 assert.Nil(t, err) 604 }, css) 605 } 606 607 // expects high synchrony! 608 func validateBlock(block *types.Block, activeVals map[string]struct{}) error { 609 if block.LastCommit.Size() != len(activeVals) { 610 return fmt.Errorf( 611 "commit size doesn't match number of active validators. Got %d, expected %d", 612 block.LastCommit.Size(), 613 len(activeVals)) 614 } 615 616 for _, commitSig := range block.LastCommit.Signatures { 617 if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok { 618 return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress) 619 } 620 } 621 return nil 622 } 623 624 func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { 625 wg := new(sync.WaitGroup) 626 wg.Add(n) 627 for i := 0; i < n; i++ { 628 go func(j int) { 629 f(j) 630 wg.Done() 631 }(i) 632 } 633 634 done := make(chan struct{}) 635 go func() { 636 wg.Wait() 637 close(done) 638 }() 639 640 // we're running many nodes in-process, possibly in in a virtual machine, 641 // and spewing debug messages - making a block could take a while, 642 timeout := time.Second * 120 643 644 select { 645 case <-done: 646 case <-time.After(timeout): 647 for i, cs := range css { 648 t.Log("#################") 649 t.Log("Validator", i) 650 t.Log(cs.GetRoundState()) 651 t.Log("") 652 } 653 os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) 654 err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) 655 require.NoError(t, err) 656 capture() 657 panic("Timed out waiting for all validators to commit a block") 658 } 659 } 660 661 func capture() { 662 trace := make([]byte, 10240000) 663 count := runtime.Stack(trace, true) 664 fmt.Printf("Stack of %d bytes: %s\n", count, trace) 665 } 666 667 //------------------------------------------------------------- 668 // Ensure basic validation of structs is functioning 669 670 func TestNewRoundStepMessageValidateBasic(t *testing.T) { 671 testCases := []struct { // nolint: maligned 672 expectErr bool 673 messageRound int32 674 messageLastCommitRound int32 675 messageHeight int64 676 testName string 677 messageStep cstypes.RoundStepType 678 }{ 679 {false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight}, 680 {true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight}, 681 {true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight}, 682 {true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1}, 683 // The following cases will be handled by ValidateHeight 684 {false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight}, 685 {false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight}, 686 } 687 688 for _, tc := range testCases { 689 tc := tc 690 t.Run(tc.testName, func(t *testing.T) { 691 message := NewRoundStepMessage{ 692 Height: tc.messageHeight, 693 Round: tc.messageRound, 694 Step: tc.messageStep, 695 LastCommitRound: tc.messageLastCommitRound, 696 } 697 698 err := message.ValidateBasic() 699 if tc.expectErr { 700 require.Error(t, err) 701 } else { 702 require.NoError(t, err) 703 } 704 }) 705 } 706 } 707 708 func TestNewRoundStepMessageValidateHeight(t *testing.T) { 709 initialHeight := int64(10) 710 testCases := []struct { // nolint: maligned 711 expectErr bool 712 messageLastCommitRound int32 713 messageHeight int64 714 testName string 715 }{ 716 {false, 0, 11, "Valid Message"}, 717 {true, 0, -1, "Negative height"}, 718 {true, 0, 0, "Zero height"}, 719 {true, 0, 10, "Initial height but LCR != -1 "}, 720 {true, -1, 11, "Normal height but LCR < 0"}, 721 } 722 723 for _, tc := range testCases { 724 tc := tc 725 t.Run(tc.testName, func(t *testing.T) { 726 message := NewRoundStepMessage{ 727 Height: tc.messageHeight, 728 Round: 0, 729 Step: cstypes.RoundStepNewHeight, 730 LastCommitRound: tc.messageLastCommitRound, 731 } 732 733 err := message.ValidateHeight(initialHeight) 734 if tc.expectErr { 735 require.Error(t, err) 736 } else { 737 require.NoError(t, err) 738 } 739 }) 740 } 741 } 742 743 func TestNewValidBlockMessageValidateBasic(t *testing.T) { 744 testCases := []struct { 745 malleateFn func(*NewValidBlockMessage) 746 expErr string 747 }{ 748 {func(msg *NewValidBlockMessage) {}, ""}, 749 {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, 750 {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, 751 { 752 func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 }, 753 "blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2", 754 }, 755 { 756 func(msg *NewValidBlockMessage) { 757 msg.BlockPartSetHeader.Total = 0 758 msg.BlockParts = bits.NewBitArray(0) 759 }, 760 "empty blockParts", 761 }, 762 { 763 func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, 764 "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", 765 }, 766 } 767 768 for i, tc := range testCases { 769 tc := tc 770 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 771 msg := &NewValidBlockMessage{ 772 Height: 1, 773 Round: 0, 774 BlockPartSetHeader: types.PartSetHeader{ 775 Total: 1, 776 }, 777 BlockParts: bits.NewBitArray(1), 778 } 779 780 tc.malleateFn(msg) 781 err := msg.ValidateBasic() 782 if tc.expErr != "" && assert.Error(t, err) { 783 assert.Contains(t, err.Error(), tc.expErr) 784 } 785 }) 786 } 787 } 788 789 func TestProposalPOLMessageValidateBasic(t *testing.T) { 790 testCases := []struct { 791 malleateFn func(*ProposalPOLMessage) 792 expErr string 793 }{ 794 {func(msg *ProposalPOLMessage) {}, ""}, 795 {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, 796 {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, 797 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, 798 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, 799 "proposalPOL bit array is too big: 10001, max: 10000"}, 800 } 801 802 for i, tc := range testCases { 803 tc := tc 804 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 805 msg := &ProposalPOLMessage{ 806 Height: 1, 807 ProposalPOLRound: 1, 808 ProposalPOL: bits.NewBitArray(1), 809 } 810 811 tc.malleateFn(msg) 812 err := msg.ValidateBasic() 813 if tc.expErr != "" && assert.Error(t, err) { 814 assert.Contains(t, err.Error(), tc.expErr) 815 } 816 }) 817 } 818 } 819 820 func TestBlockPartMessageValidateBasic(t *testing.T) { 821 testPart := new(types.Part) 822 testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) 823 testCases := []struct { 824 testName string 825 messageHeight int64 826 messageRound int32 827 messagePart *types.Part 828 expectErr bool 829 }{ 830 {"Valid Message", 0, 0, testPart, false}, 831 {"Invalid Message", -1, 0, testPart, true}, 832 {"Invalid Message", 0, -1, testPart, true}, 833 } 834 835 for _, tc := range testCases { 836 tc := tc 837 t.Run(tc.testName, func(t *testing.T) { 838 message := BlockPartMessage{ 839 Height: tc.messageHeight, 840 Round: tc.messageRound, 841 Part: tc.messagePart, 842 } 843 844 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 845 }) 846 } 847 848 message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} 849 message.Part.Index = 1 850 851 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 852 } 853 854 func TestHasVoteMessageValidateBasic(t *testing.T) { 855 const ( 856 validSignedMsgType tmproto.SignedMsgType = 0x01 857 invalidSignedMsgType tmproto.SignedMsgType = 0x03 858 ) 859 860 testCases := []struct { // nolint: maligned 861 expectErr bool 862 messageRound int32 863 messageIndex int32 864 messageHeight int64 865 testName string 866 messageType tmproto.SignedMsgType 867 }{ 868 {false, 0, 0, 0, "Valid Message", validSignedMsgType}, 869 {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, 870 {true, 0, -1, 0, "Invalid Message", validSignedMsgType}, 871 {true, 0, 0, 0, "Invalid Message", invalidSignedMsgType}, 872 {true, 0, 0, -1, "Invalid Message", validSignedMsgType}, 873 } 874 875 for _, tc := range testCases { 876 tc := tc 877 t.Run(tc.testName, func(t *testing.T) { 878 message := HasVoteMessage{ 879 Height: tc.messageHeight, 880 Round: tc.messageRound, 881 Type: tc.messageType, 882 Index: tc.messageIndex, 883 } 884 885 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 886 }) 887 } 888 } 889 890 func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { 891 const ( 892 validSignedMsgType tmproto.SignedMsgType = 0x01 893 invalidSignedMsgType tmproto.SignedMsgType = 0x03 894 ) 895 896 validBlockID := types.BlockID{} 897 invalidBlockID := types.BlockID{ 898 Hash: bytes.HexBytes{}, 899 PartSetHeader: types.PartSetHeader{ 900 Total: 1, 901 Hash: []byte{0}, 902 }, 903 } 904 905 testCases := []struct { // nolint: maligned 906 expectErr bool 907 messageRound int32 908 messageHeight int64 909 testName string 910 messageType tmproto.SignedMsgType 911 messageBlockID types.BlockID 912 }{ 913 {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, 914 {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, 915 {true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID}, 916 {true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID}, 917 {true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID}, 918 } 919 920 for _, tc := range testCases { 921 tc := tc 922 t.Run(tc.testName, func(t *testing.T) { 923 message := VoteSetMaj23Message{ 924 Height: tc.messageHeight, 925 Round: tc.messageRound, 926 Type: tc.messageType, 927 BlockID: tc.messageBlockID, 928 } 929 930 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 931 }) 932 } 933 } 934 935 func TestVoteSetBitsMessageValidateBasic(t *testing.T) { 936 testCases := []struct { 937 malleateFn func(*VoteSetBitsMessage) 938 expErr string 939 }{ 940 {func(msg *VoteSetBitsMessage) {}, ""}, 941 {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, 942 {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, 943 {func(msg *VoteSetBitsMessage) { 944 msg.BlockID = types.BlockID{ 945 Hash: bytes.HexBytes{}, 946 PartSetHeader: types.PartSetHeader{ 947 Total: 1, 948 Hash: []byte{0}, 949 }, 950 } 951 }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, 952 {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, 953 "votes bit array is too big: 10001, max: 10000"}, 954 } 955 956 for i, tc := range testCases { 957 tc := tc 958 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 959 msg := &VoteSetBitsMessage{ 960 Height: 1, 961 Round: 0, 962 Type: 0x01, 963 Votes: bits.NewBitArray(1), 964 BlockID: types.BlockID{}, 965 } 966 967 tc.malleateFn(msg) 968 err := msg.ValidateBasic() 969 if tc.expErr != "" && assert.Error(t, err) { 970 assert.Contains(t, err.Error(), tc.expErr) 971 } 972 }) 973 } 974 }