github.com/Oyster-zx/tendermint@v0.34.24-fork/consensus/reactor_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "runtime" 9 "runtime/pprof" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/gogo/protobuf/proto" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/mock" 17 "github.com/stretchr/testify/require" 18 19 dbm "github.com/tendermint/tm-db" 20 21 abcicli "github.com/tendermint/tendermint/abci/client" 22 "github.com/tendermint/tendermint/abci/example/kvstore" 23 abci "github.com/tendermint/tendermint/abci/types" 24 cfg "github.com/tendermint/tendermint/config" 25 cstypes "github.com/tendermint/tendermint/consensus/types" 26 cryptoenc "github.com/tendermint/tendermint/crypto/encoding" 27 "github.com/tendermint/tendermint/crypto/tmhash" 28 "github.com/tendermint/tendermint/libs/bits" 29 "github.com/tendermint/tendermint/libs/bytes" 30 "github.com/tendermint/tendermint/libs/log" 31 tmsync "github.com/tendermint/tendermint/libs/sync" 32 mempl "github.com/tendermint/tendermint/mempool" 33 mempoolv0 "github.com/tendermint/tendermint/mempool/v0" 34 mempoolv1 "github.com/tendermint/tendermint/mempool/v1" 35 "github.com/tendermint/tendermint/p2p" 36 p2pmock "github.com/tendermint/tendermint/p2p/mock" 37 tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" 38 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 39 sm "github.com/tendermint/tendermint/state" 40 statemocks "github.com/tendermint/tendermint/state/mocks" 41 "github.com/tendermint/tendermint/store" 42 "github.com/tendermint/tendermint/types" 43 ) 44 45 //---------------------------------------------- 46 // in-process testnets 47 48 var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 49 50 func startConsensusNet(t *testing.T, css []*State, n int) ( 51 []*Reactor, 52 []types.Subscription, 53 []*types.EventBus, 54 ) { 55 reactors := make([]*Reactor, n) 56 blocksSubs := make([]types.Subscription, 0) 57 eventBuses := make([]*types.EventBus, n) 58 for i := 0; i < n; i++ { 59 /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") 60 if err != nil { t.Fatal(err)}*/ 61 reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states 62 reactors[i].SetLogger(css[i].Logger) 63 64 // eventBus is already started with the cs 65 eventBuses[i] = css[i].eventBus 66 reactors[i].SetEventBus(eventBuses[i]) 67 68 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 69 require.NoError(t, err) 70 blocksSubs = append(blocksSubs, blocksSub) 71 72 if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake 73 if err := css[i].blockExec.Store().Save(css[i].state); err != nil { 74 t.Error(err) 75 } 76 77 } 78 } 79 // make connected switches and start all reactors 80 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 81 s.AddReactor("CONSENSUS", reactors[i]) 82 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 83 return s 84 }, p2p.Connect2Switches) 85 86 // now that everyone is connected, start the state machines 87 // If we started the state machines before everyone was connected, 88 // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors 89 // TODO: is this still true with new pubsub? 90 for i := 0; i < n; i++ { 91 s := reactors[i].conS.GetState() 92 reactors[i].SwitchToConsensus(s, false) 93 } 94 return reactors, blocksSubs, eventBuses 95 } 96 97 func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) { 98 logger.Info("stopConsensusNet", "n", len(reactors)) 99 for i, r := range reactors { 100 logger.Info("stopConsensusNet: Stopping Reactor", "i", i) 101 if err := r.Switch.Stop(); err != nil { 102 logger.Error("error trying to stop switch", "error", err) 103 } 104 } 105 for i, b := range eventBuses { 106 logger.Info("stopConsensusNet: Stopping eventBus", "i", i) 107 if err := b.Stop(); err != nil { 108 logger.Error("error trying to stop eventbus", "error", err) 109 } 110 } 111 logger.Info("stopConsensusNet: DONE", "n", len(reactors)) 112 } 113 114 // Ensure a testnet makes blocks 115 func TestReactorBasic(t *testing.T) { 116 N := 4 117 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 118 defer cleanup() 119 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 120 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 121 // wait till everyone makes the first new block 122 timeoutWaitGroup(t, N, func(j int) { 123 <-blocksSubs[j].Out() 124 }, css) 125 } 126 127 // Ensure we can process blocks with evidence 128 func TestReactorWithEvidence(t *testing.T) { 129 nValidators := 4 130 testName := "consensus_reactor_test" 131 tickerFunc := newMockTickerFunc(true) 132 appFunc := newCounter 133 134 // heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction 135 // to unroll unwieldy abstractions. Here we duplicate the code from: 136 // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 137 138 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 139 css := make([]*State, nValidators) 140 logger := consensusLogger() 141 for i := 0; i < nValidators; i++ { 142 stateDB := dbm.NewMemDB() // each state needs its own db 143 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 144 DiscardABCIResponses: false, 145 }) 146 state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) 147 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 148 defer os.RemoveAll(thisConfig.RootDir) 149 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal 150 app := appFunc() 151 vals := types.TM2PB.ValidatorUpdates(state.Validators) 152 app.InitChain(abci.RequestInitChain{Validators: vals}) 153 154 pv := privVals[i] 155 // duplicate code from: 156 // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) 157 158 blockDB := dbm.NewMemDB() 159 blockStore := store.NewBlockStore(blockDB) 160 161 mtx := new(tmsync.Mutex) 162 memplMetrics := mempl.NopMetrics() 163 // one for mempool, one for consensus 164 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 165 proxyAppConnConMem := abcicli.NewLocalClient(mtx, app) 166 167 // Make Mempool 168 var mempool mempl.Mempool 169 170 switch config.Mempool.Version { 171 case cfg.MempoolV0: 172 mempool = mempoolv0.NewCListMempool(config.Mempool, 173 proxyAppConnConMem, 174 state.LastBlockHeight, 175 mempoolv0.WithMetrics(memplMetrics), 176 mempoolv0.WithPreCheck(sm.TxPreCheck(state)), 177 mempoolv0.WithPostCheck(sm.TxPostCheck(state))) 178 case cfg.MempoolV1: 179 mempool = mempoolv1.NewTxMempool(logger, 180 config.Mempool, 181 proxyAppConnConMem, 182 state.LastBlockHeight, 183 mempoolv1.WithMetrics(memplMetrics), 184 mempoolv1.WithPreCheck(sm.TxPreCheck(state)), 185 mempoolv1.WithPostCheck(sm.TxPostCheck(state)), 186 ) 187 } 188 if thisConfig.Consensus.WaitForTxs() { 189 mempool.EnableTxsAvailable() 190 } 191 192 // mock the evidence pool 193 // everyone includes evidence of another double signing 194 vIdx := (i + 1) % nValidators 195 ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) 196 evpool := &statemocks.EvidencePool{} 197 evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) 198 evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ 199 ev}, int64(len(ev.Bytes()))) 200 evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() 201 202 evpool2 := sm.EmptyEvidencePool{} 203 204 // Make State 205 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) 206 cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) 207 cs.SetLogger(log.TestingLogger().With("module", "consensus")) 208 cs.SetPrivValidator(pv) 209 210 eventBus := types.NewEventBus() 211 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 212 err := eventBus.Start() 213 require.NoError(t, err) 214 cs.SetEventBus(eventBus) 215 216 cs.SetTimeoutTicker(tickerFunc()) 217 cs.SetLogger(logger.With("validator", i, "module", "consensus")) 218 219 css[i] = cs 220 } 221 222 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) 223 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 224 225 // we expect for each validator that is the proposer to propose one piece of evidence. 226 for i := 0; i < nValidators; i++ { 227 timeoutWaitGroup(t, nValidators, func(j int) { 228 msg := <-blocksSubs[j].Out() 229 block := msg.Data().(types.EventDataNewBlock).Block 230 assert.Len(t, block.Evidence.Evidence, 1) 231 }, css) 232 } 233 } 234 235 //------------------------------------ 236 237 // Ensure a testnet makes blocks when there are txs 238 func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { 239 N := 4 240 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, 241 func(c *cfg.Config) { 242 c.Consensus.CreateEmptyBlocks = false 243 }) 244 defer cleanup() 245 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 246 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 247 248 // send a tx 249 if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil { 250 t.Error(err) 251 } 252 253 // wait till everyone makes the first new block 254 timeoutWaitGroup(t, N, func(j int) { 255 <-blocksSubs[j].Out() 256 }, css) 257 } 258 259 func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) { 260 N := 1 261 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 262 defer cleanup() 263 reactors, _, eventBuses := startConsensusNet(t, css, N) 264 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 265 266 var ( 267 reactor = reactors[0] 268 peer = p2pmock.NewPeer(nil) 269 ) 270 271 reactor.InitPeer(peer) 272 273 // simulate switch calling Receive before AddPeer 274 assert.NotPanics(t, func() { 275 reactor.ReceiveEnvelope(p2p.Envelope{ 276 ChannelID: StateChannel, 277 Src: peer, 278 Message: &tmcons.HasVote{Height: 1, 279 Round: 1, Index: 1, Type: tmproto.PrevoteType}, 280 }) 281 reactor.AddPeer(peer) 282 }) 283 } 284 285 func TestLegacyReactorReceiveBasic(t *testing.T) { 286 N := 1 287 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 288 defer cleanup() 289 reactors, _, eventBuses := startConsensusNet(t, css, N) 290 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 291 292 var ( 293 reactor = reactors[0] 294 peer = p2pmock.NewPeer(nil) 295 ) 296 297 reactor.InitPeer(peer) 298 v := &tmcons.HasVote{ 299 Height: 1, 300 Round: 1, 301 Index: 1, 302 Type: tmproto.PrevoteType, 303 } 304 w := v.Wrap() 305 msg, err := proto.Marshal(w) 306 assert.NoError(t, err) 307 308 assert.NotPanics(t, func() { 309 reactor.Receive(StateChannel, peer, msg) 310 reactor.AddPeer(peer) 311 }) 312 } 313 314 func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { 315 N := 1 316 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 317 defer cleanup() 318 reactors, _, eventBuses := startConsensusNet(t, css, N) 319 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 320 321 var ( 322 reactor = reactors[0] 323 peer = p2pmock.NewPeer(nil) 324 ) 325 326 // we should call InitPeer here 327 328 // simulate switch calling Receive before AddPeer 329 assert.Panics(t, func() { 330 reactor.ReceiveEnvelope(p2p.Envelope{ 331 ChannelID: StateChannel, 332 Src: peer, 333 Message: &tmcons.HasVote{Height: 1, 334 Round: 1, Index: 1, Type: tmproto.PrevoteType}, 335 }) 336 }) 337 } 338 339 // Test we record stats about votes and block parts from other peers. 340 func TestReactorRecordsVotesAndBlockParts(t *testing.T) { 341 N := 4 342 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 343 defer cleanup() 344 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 345 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 346 347 // wait till everyone makes the first new block 348 timeoutWaitGroup(t, N, func(j int) { 349 <-blocksSubs[j].Out() 350 }, css) 351 352 // Get peer 353 peer := reactors[1].Switch.Peers().List()[0] 354 // Get peer state 355 ps := peer.Get(types.PeerStateKey).(*PeerState) 356 357 assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") 358 assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased") 359 } 360 361 //------------------------------------------------------------- 362 // ensure we can make blocks despite cycling a validator set 363 364 func TestReactorVotingPowerChange(t *testing.T) { 365 nVals := 4 366 logger := log.TestingLogger() 367 css, cleanup := randConsensusNet( 368 nVals, 369 "consensus_voting_power_changes_test", 370 newMockTickerFunc(true), 371 newPersistentKVStore) 372 defer cleanup() 373 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) 374 defer stopConsensusNet(logger, reactors, eventBuses) 375 376 // map of active validators 377 activeVals := make(map[string]struct{}) 378 for i := 0; i < nVals; i++ { 379 pubKey, err := css[i].privValidator.GetPubKey() 380 require.NoError(t, err) 381 addr := pubKey.Address() 382 activeVals[string(addr)] = struct{}{} 383 } 384 385 // wait till everyone makes block 1 386 timeoutWaitGroup(t, nVals, func(j int) { 387 <-blocksSubs[j].Out() 388 }, css) 389 390 //--------------------------------------------------------------------------- 391 logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") 392 393 val1PubKey, err := css[0].privValidator.GetPubKey() 394 require.NoError(t, err) 395 396 val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) 397 require.NoError(t, err) 398 updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) 399 previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() 400 401 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 402 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 403 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 404 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 405 406 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 407 t.Fatalf( 408 "expected voting power to change (before: %d, after: %d)", 409 previousTotalVotingPower, 410 css[0].GetRoundState().LastValidators.TotalVotingPower()) 411 } 412 413 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) 414 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 415 416 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 417 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 418 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 419 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 420 421 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 422 t.Fatalf( 423 "expected voting power to change (before: %d, after: %d)", 424 previousTotalVotingPower, 425 css[0].GetRoundState().LastValidators.TotalVotingPower()) 426 } 427 428 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) 429 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 430 431 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 432 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 433 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 434 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 435 436 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 437 t.Fatalf( 438 "expected voting power to change (before: %d, after: %d)", 439 previousTotalVotingPower, 440 css[0].GetRoundState().LastValidators.TotalVotingPower()) 441 } 442 } 443 444 func TestReactorValidatorSetChanges(t *testing.T) { 445 nPeers := 7 446 nVals := 4 447 css, _, _, cleanup := randConsensusNetWithPeers( 448 nVals, 449 nPeers, 450 "consensus_val_set_changes_test", 451 newMockTickerFunc(true), 452 newPersistentKVStoreWithPath) 453 454 defer cleanup() 455 logger := log.TestingLogger() 456 457 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers) 458 defer stopConsensusNet(logger, reactors, eventBuses) 459 460 // map of active validators 461 activeVals := make(map[string]struct{}) 462 for i := 0; i < nVals; i++ { 463 pubKey, err := css[i].privValidator.GetPubKey() 464 require.NoError(t, err) 465 activeVals[string(pubKey.Address())] = struct{}{} 466 } 467 468 // wait till everyone makes block 1 469 timeoutWaitGroup(t, nPeers, func(j int) { 470 <-blocksSubs[j].Out() 471 }, css) 472 473 //--------------------------------------------------------------------------- 474 logger.Info("---------------------------- Testing adding one validator") 475 476 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 477 assert.NoError(t, err) 478 valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) 479 assert.NoError(t, err) 480 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 481 482 // wait till everyone makes block 2 483 // ensure the commit includes all validators 484 // send newValTx to change vals in block 3 485 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 486 487 // wait till everyone makes block 3. 488 // it includes the commit for block 2, which is by the original validator set 489 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 490 491 // wait till everyone makes block 4. 492 // it includes the commit for block 3, which is by the original validator set 493 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 494 495 // the commits for block 4 should be with the updated validator set 496 activeVals[string(newValidatorPubKey1.Address())] = struct{}{} 497 498 // wait till everyone makes block 5 499 // it includes the commit for block 4, which should have the updated validator set 500 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 501 502 //--------------------------------------------------------------------------- 503 logger.Info("---------------------------- Testing changing the voting power of one validator") 504 505 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 506 require.NoError(t, err) 507 updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) 508 require.NoError(t, err) 509 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 510 previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() 511 512 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 513 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 514 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 515 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 516 517 if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 518 t.Errorf( 519 "expected voting power to change (before: %d, after: %d)", 520 previousTotalVotingPower, 521 css[nVals].GetRoundState().LastValidators.TotalVotingPower()) 522 } 523 524 //--------------------------------------------------------------------------- 525 logger.Info("---------------------------- Testing adding two validators at once") 526 527 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 528 require.NoError(t, err) 529 newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) 530 require.NoError(t, err) 531 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 532 533 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 534 require.NoError(t, err) 535 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 536 require.NoError(t, err) 537 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 538 539 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 540 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 541 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 542 activeVals[string(newValidatorPubKey2.Address())] = struct{}{} 543 activeVals[string(newValidatorPubKey3.Address())] = struct{}{} 544 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 545 546 //--------------------------------------------------------------------------- 547 logger.Info("---------------------------- Testing removing two validators at once") 548 549 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 550 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 551 552 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 553 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 554 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 555 delete(activeVals, string(newValidatorPubKey2.Address())) 556 delete(activeVals, string(newValidatorPubKey3.Address())) 557 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 558 } 559 560 // Check we can make blocks with skip_timeout_commit=false 561 func TestReactorWithTimeoutCommit(t *testing.T) { 562 N := 4 563 css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) 564 defer cleanup() 565 // override default SkipTimeoutCommit == true for tests 566 for i := 0; i < N; i++ { 567 css[i].config.SkipTimeoutCommit = false 568 } 569 570 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) 571 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 572 573 // wait till everyone makes the first new block 574 timeoutWaitGroup(t, N-1, func(j int) { 575 <-blocksSubs[j].Out() 576 }, css) 577 } 578 579 func waitForAndValidateBlock( 580 t *testing.T, 581 n int, 582 activeVals map[string]struct{}, 583 blocksSubs []types.Subscription, 584 css []*State, 585 txs ...[]byte, 586 ) { 587 timeoutWaitGroup(t, n, func(j int) { 588 css[j].Logger.Debug("waitForAndValidateBlock") 589 msg := <-blocksSubs[j].Out() 590 newBlock := msg.Data().(types.EventDataNewBlock).Block 591 css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) 592 err := validateBlock(newBlock, activeVals) 593 assert.Nil(t, err) 594 for _, tx := range txs { 595 err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 596 assert.Nil(t, err) 597 } 598 }, css) 599 } 600 601 func waitForAndValidateBlockWithTx( 602 t *testing.T, 603 n int, 604 activeVals map[string]struct{}, 605 blocksSubs []types.Subscription, 606 css []*State, 607 txs ...[]byte, 608 ) { 609 timeoutWaitGroup(t, n, func(j int) { 610 ntxs := 0 611 BLOCK_TX_LOOP: 612 for { 613 css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) 614 msg := <-blocksSubs[j].Out() 615 newBlock := msg.Data().(types.EventDataNewBlock).Block 616 css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) 617 err := validateBlock(newBlock, activeVals) 618 assert.Nil(t, err) 619 620 // check that txs match the txs we're waiting for. 621 // note they could be spread over multiple blocks, 622 // but they should be in order. 623 for _, tx := range newBlock.Data.Txs { 624 assert.EqualValues(t, txs[ntxs], tx) 625 ntxs++ 626 } 627 628 if ntxs == len(txs) { 629 break BLOCK_TX_LOOP 630 } 631 } 632 633 }, css) 634 } 635 636 func waitForBlockWithUpdatedValsAndValidateIt( 637 t *testing.T, 638 n int, 639 updatedVals map[string]struct{}, 640 blocksSubs []types.Subscription, 641 css []*State, 642 ) { 643 timeoutWaitGroup(t, n, func(j int) { 644 645 var newBlock *types.Block 646 LOOP: 647 for { 648 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") 649 msg := <-blocksSubs[j].Out() 650 newBlock = msg.Data().(types.EventDataNewBlock).Block 651 if newBlock.LastCommit.Size() == len(updatedVals) { 652 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) 653 break LOOP 654 } else { 655 css[j].Logger.Debug( 656 "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", 657 "height", 658 newBlock.Height) 659 } 660 } 661 662 err := validateBlock(newBlock, updatedVals) 663 assert.Nil(t, err) 664 }, css) 665 } 666 667 // expects high synchrony! 668 func validateBlock(block *types.Block, activeVals map[string]struct{}) error { 669 if block.LastCommit.Size() != len(activeVals) { 670 return fmt.Errorf( 671 "commit size doesn't match number of active validators. Got %d, expected %d", 672 block.LastCommit.Size(), 673 len(activeVals)) 674 } 675 676 for _, commitSig := range block.LastCommit.Signatures { 677 if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok { 678 return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress) 679 } 680 } 681 return nil 682 } 683 684 func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { 685 wg := new(sync.WaitGroup) 686 wg.Add(n) 687 for i := 0; i < n; i++ { 688 go func(j int) { 689 f(j) 690 wg.Done() 691 }(i) 692 } 693 694 done := make(chan struct{}) 695 go func() { 696 wg.Wait() 697 close(done) 698 }() 699 700 // we're running many nodes in-process, possibly in in a virtual machine, 701 // and spewing debug messages - making a block could take a while, 702 timeout := time.Second * 120 703 704 select { 705 case <-done: 706 case <-time.After(timeout): 707 for i, cs := range css { 708 t.Log("#################") 709 t.Log("Validator", i) 710 t.Log(cs.GetRoundState()) 711 t.Log("") 712 } 713 os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) 714 err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) 715 require.NoError(t, err) 716 capture() 717 panic("Timed out waiting for all validators to commit a block") 718 } 719 } 720 721 func capture() { 722 trace := make([]byte, 10240000) 723 count := runtime.Stack(trace, true) 724 fmt.Printf("Stack of %d bytes: %s\n", count, trace) 725 } 726 727 //------------------------------------------------------------- 728 // Ensure basic validation of structs is functioning 729 730 func TestNewRoundStepMessageValidateBasic(t *testing.T) { 731 testCases := []struct { 732 expectErr bool 733 messageRound int32 734 messageLastCommitRound int32 735 messageHeight int64 736 testName string 737 messageStep cstypes.RoundStepType 738 }{ 739 {false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight}, 740 {true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight}, 741 {true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight}, 742 {true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1}, 743 // The following cases will be handled by ValidateHeight 744 {false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight}, 745 {false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight}, 746 } 747 748 for _, tc := range testCases { 749 tc := tc 750 t.Run(tc.testName, func(t *testing.T) { 751 message := NewRoundStepMessage{ 752 Height: tc.messageHeight, 753 Round: tc.messageRound, 754 Step: tc.messageStep, 755 LastCommitRound: tc.messageLastCommitRound, 756 } 757 758 err := message.ValidateBasic() 759 if tc.expectErr { 760 require.Error(t, err) 761 } else { 762 require.NoError(t, err) 763 } 764 }) 765 } 766 } 767 768 func TestNewRoundStepMessageValidateHeight(t *testing.T) { 769 initialHeight := int64(10) 770 testCases := []struct { //nolint: maligned 771 expectErr bool 772 messageLastCommitRound int32 773 messageHeight int64 774 testName string 775 }{ 776 {false, 0, 11, "Valid Message"}, 777 {true, 0, -1, "Negative height"}, 778 {true, 0, 0, "Zero height"}, 779 {true, 0, 10, "Initial height but LCR != -1 "}, 780 {true, -1, 11, "Normal height but LCR < 0"}, 781 } 782 783 for _, tc := range testCases { 784 tc := tc 785 t.Run(tc.testName, func(t *testing.T) { 786 message := NewRoundStepMessage{ 787 Height: tc.messageHeight, 788 Round: 0, 789 Step: cstypes.RoundStepNewHeight, 790 LastCommitRound: tc.messageLastCommitRound, 791 } 792 793 err := message.ValidateHeight(initialHeight) 794 if tc.expectErr { 795 require.Error(t, err) 796 } else { 797 require.NoError(t, err) 798 } 799 }) 800 } 801 } 802 803 func TestNewValidBlockMessageValidateBasic(t *testing.T) { 804 testCases := []struct { 805 malleateFn func(*NewValidBlockMessage) 806 expErr string 807 }{ 808 {func(msg *NewValidBlockMessage) {}, ""}, 809 {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, 810 {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, 811 { 812 func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 }, 813 "blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2", 814 }, 815 { 816 func(msg *NewValidBlockMessage) { 817 msg.BlockPartSetHeader.Total = 0 818 msg.BlockParts = bits.NewBitArray(0) 819 }, 820 "empty blockParts", 821 }, 822 { 823 func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, 824 "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", 825 }, 826 } 827 828 for i, tc := range testCases { 829 tc := tc 830 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 831 msg := &NewValidBlockMessage{ 832 Height: 1, 833 Round: 0, 834 BlockPartSetHeader: types.PartSetHeader{ 835 Total: 1, 836 }, 837 BlockParts: bits.NewBitArray(1), 838 } 839 840 tc.malleateFn(msg) 841 err := msg.ValidateBasic() 842 if tc.expErr != "" && assert.Error(t, err) { 843 assert.Contains(t, err.Error(), tc.expErr) 844 } 845 }) 846 } 847 } 848 849 func TestProposalPOLMessageValidateBasic(t *testing.T) { 850 testCases := []struct { 851 malleateFn func(*ProposalPOLMessage) 852 expErr string 853 }{ 854 {func(msg *ProposalPOLMessage) {}, ""}, 855 {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, 856 {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, 857 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, 858 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, 859 "proposalPOL bit array is too big: 10001, max: 10000"}, 860 } 861 862 for i, tc := range testCases { 863 tc := tc 864 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 865 msg := &ProposalPOLMessage{ 866 Height: 1, 867 ProposalPOLRound: 1, 868 ProposalPOL: bits.NewBitArray(1), 869 } 870 871 tc.malleateFn(msg) 872 err := msg.ValidateBasic() 873 if tc.expErr != "" && assert.Error(t, err) { 874 assert.Contains(t, err.Error(), tc.expErr) 875 } 876 }) 877 } 878 } 879 880 func TestBlockPartMessageValidateBasic(t *testing.T) { 881 testPart := new(types.Part) 882 testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) 883 testCases := []struct { 884 testName string 885 messageHeight int64 886 messageRound int32 887 messagePart *types.Part 888 expectErr bool 889 }{ 890 {"Valid Message", 0, 0, testPart, false}, 891 {"Invalid Message", -1, 0, testPart, true}, 892 {"Invalid Message", 0, -1, testPart, true}, 893 } 894 895 for _, tc := range testCases { 896 tc := tc 897 t.Run(tc.testName, func(t *testing.T) { 898 message := BlockPartMessage{ 899 Height: tc.messageHeight, 900 Round: tc.messageRound, 901 Part: tc.messagePart, 902 } 903 904 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 905 }) 906 } 907 908 message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} 909 message.Part.Index = 1 910 911 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 912 } 913 914 func TestHasVoteMessageValidateBasic(t *testing.T) { 915 const ( 916 validSignedMsgType tmproto.SignedMsgType = 0x01 917 invalidSignedMsgType tmproto.SignedMsgType = 0x03 918 ) 919 920 testCases := []struct { //nolint: maligned 921 expectErr bool 922 messageRound int32 923 messageIndex int32 924 messageHeight int64 925 testName string 926 messageType tmproto.SignedMsgType 927 }{ 928 {false, 0, 0, 0, "Valid Message", validSignedMsgType}, 929 {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, 930 {true, 0, -1, 0, "Invalid Message", validSignedMsgType}, 931 {true, 0, 0, 0, "Invalid Message", invalidSignedMsgType}, 932 {true, 0, 0, -1, "Invalid Message", validSignedMsgType}, 933 } 934 935 for _, tc := range testCases { 936 tc := tc 937 t.Run(tc.testName, func(t *testing.T) { 938 message := HasVoteMessage{ 939 Height: tc.messageHeight, 940 Round: tc.messageRound, 941 Type: tc.messageType, 942 Index: tc.messageIndex, 943 } 944 945 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 946 }) 947 } 948 } 949 950 func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { 951 const ( 952 validSignedMsgType tmproto.SignedMsgType = 0x01 953 invalidSignedMsgType tmproto.SignedMsgType = 0x03 954 ) 955 956 validBlockID := types.BlockID{} 957 invalidBlockID := types.BlockID{ 958 Hash: bytes.HexBytes{}, 959 PartSetHeader: types.PartSetHeader{ 960 Total: 1, 961 Hash: []byte{0}, 962 }, 963 } 964 965 testCases := []struct { //nolint: maligned 966 expectErr bool 967 messageRound int32 968 messageHeight int64 969 testName string 970 messageType tmproto.SignedMsgType 971 messageBlockID types.BlockID 972 }{ 973 {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, 974 {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, 975 {true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID}, 976 {true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID}, 977 {true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID}, 978 } 979 980 for _, tc := range testCases { 981 tc := tc 982 t.Run(tc.testName, func(t *testing.T) { 983 message := VoteSetMaj23Message{ 984 Height: tc.messageHeight, 985 Round: tc.messageRound, 986 Type: tc.messageType, 987 BlockID: tc.messageBlockID, 988 } 989 990 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 991 }) 992 } 993 } 994 995 func TestVoteSetBitsMessageValidateBasic(t *testing.T) { 996 testCases := []struct { 997 malleateFn func(*VoteSetBitsMessage) 998 expErr string 999 }{ 1000 {func(msg *VoteSetBitsMessage) {}, ""}, 1001 {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, 1002 {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, 1003 {func(msg *VoteSetBitsMessage) { 1004 msg.BlockID = types.BlockID{ 1005 Hash: bytes.HexBytes{}, 1006 PartSetHeader: types.PartSetHeader{ 1007 Total: 1, 1008 Hash: []byte{0}, 1009 }, 1010 } 1011 }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, 1012 {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, 1013 "votes bit array is too big: 10001, max: 10000"}, 1014 } 1015 1016 for i, tc := range testCases { 1017 tc := tc 1018 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 1019 msg := &VoteSetBitsMessage{ 1020 Height: 1, 1021 Round: 0, 1022 Type: 0x01, 1023 Votes: bits.NewBitArray(1), 1024 BlockID: types.BlockID{}, 1025 } 1026 1027 tc.malleateFn(msg) 1028 err := msg.ValidateBasic() 1029 if tc.expErr != "" && assert.Error(t, err) { 1030 assert.Contains(t, err.Error(), tc.expErr) 1031 } 1032 }) 1033 } 1034 }