github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/consensus/reactor_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "runtime" 9 "runtime/pprof" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/mock" 16 "github.com/stretchr/testify/require" 17 18 dbm "github.com/badrootd/nibiru-db" 19 20 abcicli "github.com/badrootd/nibiru-cometbft/abci/client" 21 "github.com/badrootd/nibiru-cometbft/abci/example/kvstore" 22 abci "github.com/badrootd/nibiru-cometbft/abci/types" 23 cfg "github.com/badrootd/nibiru-cometbft/config" 24 cstypes "github.com/badrootd/nibiru-cometbft/consensus/types" 25 cryptoenc "github.com/badrootd/nibiru-cometbft/crypto/encoding" 26 "github.com/badrootd/nibiru-cometbft/crypto/tmhash" 27 "github.com/badrootd/nibiru-cometbft/libs/bits" 28 "github.com/badrootd/nibiru-cometbft/libs/bytes" 29 "github.com/badrootd/nibiru-cometbft/libs/json" 30 "github.com/badrootd/nibiru-cometbft/libs/log" 31 cmtsync "github.com/badrootd/nibiru-cometbft/libs/sync" 32 mempl "github.com/badrootd/nibiru-cometbft/mempool" 33 mempoolv0 "github.com/badrootd/nibiru-cometbft/mempool/v0" 34 mempoolv1 "github.com/badrootd/nibiru-cometbft/mempool/v1" //nolint:staticcheck // SA1019 Priority mempool deprecated but still supported in this release. 35 "github.com/badrootd/nibiru-cometbft/p2p" 36 p2pmock "github.com/badrootd/nibiru-cometbft/p2p/mock" 37 cmtcons "github.com/badrootd/nibiru-cometbft/proto/tendermint/consensus" 38 cmtproto "github.com/badrootd/nibiru-cometbft/proto/tendermint/types" 39 sm "github.com/badrootd/nibiru-cometbft/state" 40 statemocks "github.com/badrootd/nibiru-cometbft/state/mocks" 41 "github.com/badrootd/nibiru-cometbft/store" 42 "github.com/badrootd/nibiru-cometbft/types" 43 ) 44 45 //---------------------------------------------- 46 // in-process testnets 47 48 var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 49 50 func startConsensusNet(t *testing.T, css []*State, n int) ( 51 []*Reactor, 52 []types.Subscription, 53 []*types.EventBus, 54 ) { 55 reactors := make([]*Reactor, n) 56 blocksSubs := make([]types.Subscription, 0) 57 eventBuses := make([]*types.EventBus, n) 58 for i := 0; i < n; i++ { 59 /*logger, err := cmtflags.ParseLogLevel("consensus:info,*:error", logger, "info") 60 if err != nil { t.Fatal(err)}*/ 61 reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states 62 reactors[i].SetLogger(css[i].Logger) 63 64 // eventBus is already started with the cs 65 eventBuses[i] = css[i].eventBus 66 reactors[i].SetEventBus(eventBuses[i]) 67 68 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 69 require.NoError(t, err) 70 blocksSubs = append(blocksSubs, blocksSub) 71 72 if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake 73 if err := css[i].blockExec.Store().Save(css[i].state); err != nil { 74 t.Error(err) 75 } 76 } 77 } 78 // make connected switches and start all reactors 79 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 80 s.AddReactor("CONSENSUS", reactors[i]) 81 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 82 return s 83 }, p2p.Connect2Switches) 84 85 // now that everyone is connected, start the state machines 86 // If we started the state machines before everyone was connected, 87 // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors 88 // TODO: is this still true with new pubsub? 89 for i := 0; i < n; i++ { 90 s := reactors[i].conS.GetState() 91 reactors[i].SwitchToConsensus(s, false) 92 } 93 return reactors, blocksSubs, eventBuses 94 } 95 96 func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) { 97 logger.Info("stopConsensusNet", "n", len(reactors)) 98 for i, r := range reactors { 99 logger.Info("stopConsensusNet: Stopping Reactor", "i", i) 100 if err := r.Switch.Stop(); err != nil { 101 logger.Error("error trying to stop switch", "error", err) 102 } 103 } 104 for i, b := range eventBuses { 105 logger.Info("stopConsensusNet: Stopping eventBus", "i", i) 106 if err := b.Stop(); err != nil { 107 logger.Error("error trying to stop eventbus", "error", err) 108 } 109 } 110 logger.Info("stopConsensusNet: DONE", "n", len(reactors)) 111 } 112 113 // Ensure a testnet makes blocks 114 func TestReactorBasic(t *testing.T) { 115 N := 4 116 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) 117 defer cleanup() 118 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 119 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 120 // wait till everyone makes the first new block 121 timeoutWaitGroup(t, N, func(j int) { 122 <-blocksSubs[j].Out() 123 }, css) 124 } 125 126 // Ensure we can process blocks with evidence 127 func TestReactorWithEvidence(t *testing.T) { 128 nValidators := 4 129 testName := "consensus_reactor_test" 130 tickerFunc := newMockTickerFunc(true) 131 appFunc := newKVStore 132 133 // heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction 134 // to unroll unwieldy abstractions. Here we duplicate the code from: 135 // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) 136 137 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 138 css := make([]*State, nValidators) 139 logger := consensusLogger() 140 for i := 0; i < nValidators; i++ { 141 stateDB := dbm.NewMemDB() // each state needs its own db 142 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 143 DiscardABCIResponses: false, 144 }) 145 state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) 146 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 147 defer os.RemoveAll(thisConfig.RootDir) 148 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal 149 app := appFunc() 150 vals := types.TM2PB.ValidatorUpdates(state.Validators) 151 app.InitChain(abci.RequestInitChain{Validators: vals}) 152 153 pv := privVals[i] 154 // duplicate code from: 155 // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) 156 157 blockDB := dbm.NewMemDB() 158 blockStore := store.NewBlockStore(blockDB) 159 160 mtx := new(cmtsync.Mutex) 161 memplMetrics := mempl.NopMetrics() 162 // one for mempool, one for consensus 163 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 164 proxyAppConnConMem := abcicli.NewLocalClient(mtx, app) 165 166 // Make Mempool 167 var mempool mempl.Mempool 168 169 switch config.Mempool.Version { 170 case cfg.MempoolV0: 171 mempool = mempoolv0.NewCListMempool(config.Mempool, 172 proxyAppConnConMem, 173 state.LastBlockHeight, 174 mempoolv0.WithMetrics(memplMetrics), 175 mempoolv0.WithPreCheck(sm.TxPreCheck(state)), 176 mempoolv0.WithPostCheck(sm.TxPostCheck(state))) 177 case cfg.MempoolV1: 178 mempool = mempoolv1.NewTxMempool(logger, 179 config.Mempool, 180 proxyAppConnConMem, 181 state.LastBlockHeight, 182 mempoolv1.WithMetrics(memplMetrics), 183 mempoolv1.WithPreCheck(sm.TxPreCheck(state)), 184 mempoolv1.WithPostCheck(sm.TxPostCheck(state)), 185 ) 186 } 187 if thisConfig.Consensus.WaitForTxs() { 188 mempool.EnableTxsAvailable() 189 } 190 191 // mock the evidence pool 192 // everyone includes evidence of another double signing 193 vIdx := (i + 1) % nValidators 194 ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) 195 require.NoError(t, err) 196 evpool := &statemocks.EvidencePool{} 197 evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) 198 evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ 199 ev, 200 }, int64(len(ev.Bytes()))) 201 evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() 202 203 evpool2 := sm.EmptyEvidencePool{} 204 205 // Make State 206 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) 207 cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) 208 cs.SetLogger(log.TestingLogger().With("module", "consensus")) 209 cs.SetPrivValidator(pv) 210 211 eventBus := types.NewEventBus() 212 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 213 err = eventBus.Start() 214 require.NoError(t, err) 215 cs.SetEventBus(eventBus) 216 217 cs.SetTimeoutTicker(tickerFunc()) 218 cs.SetLogger(logger.With("validator", i, "module", "consensus")) 219 220 css[i] = cs 221 } 222 223 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) 224 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 225 226 // we expect for each validator that is the proposer to propose one piece of evidence. 227 for i := 0; i < nValidators; i++ { 228 timeoutWaitGroup(t, nValidators, func(j int) { 229 msg := <-blocksSubs[j].Out() 230 block := msg.Data().(types.EventDataNewBlock).Block 231 assert.Len(t, block.Evidence.Evidence, 1) 232 }, css) 233 } 234 } 235 236 //------------------------------------ 237 238 // Ensure a testnet makes blocks when there are txs 239 func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { 240 N := 4 241 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, 242 func(c *cfg.Config) { 243 c.Consensus.CreateEmptyBlocks = false 244 }) 245 defer cleanup() 246 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 247 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 248 249 // send a tx 250 if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil { 251 t.Error(err) 252 } 253 254 // wait till everyone makes the first new block 255 timeoutWaitGroup(t, N, func(j int) { 256 <-blocksSubs[j].Out() 257 }, css) 258 } 259 260 func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { 261 N := 1 262 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) 263 defer cleanup() 264 reactors, _, eventBuses := startConsensusNet(t, css, N) 265 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 266 267 var ( 268 reactor = reactors[0] 269 peer = p2pmock.NewPeer(nil) 270 ) 271 272 reactor.InitPeer(peer) 273 274 // simulate switch calling Receive before AddPeer 275 assert.NotPanics(t, func() { 276 reactor.ReceiveEnvelope(p2p.Envelope{ 277 ChannelID: StateChannel, 278 Src: peer, 279 Message: &cmtcons.HasVote{Height: 1, 280 Round: 1, Index: 1, Type: cmtproto.PrevoteType}, 281 }) 282 reactor.AddPeer(peer) 283 }) 284 } 285 286 func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { 287 N := 1 288 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) 289 defer cleanup() 290 reactors, _, eventBuses := startConsensusNet(t, css, N) 291 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 292 293 var ( 294 reactor = reactors[0] 295 peer = p2pmock.NewPeer(nil) 296 ) 297 298 // we should call InitPeer here 299 300 // simulate switch calling Receive before AddPeer 301 assert.Panics(t, func() { 302 reactor.ReceiveEnvelope(p2p.Envelope{ 303 ChannelID: StateChannel, 304 Src: peer, 305 Message: &cmtcons.HasVote{Height: 1, 306 Round: 1, Index: 1, Type: cmtproto.PrevoteType}, 307 }) 308 }) 309 } 310 311 // Test we record stats about votes and block parts from other peers. 312 func TestReactorRecordsVotesAndBlockParts(t *testing.T) { 313 N := 4 314 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) 315 defer cleanup() 316 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 317 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 318 319 // wait till everyone makes the first new block 320 timeoutWaitGroup(t, N, func(j int) { 321 <-blocksSubs[j].Out() 322 }, css) 323 324 // Get peer 325 peer := reactors[1].Switch.Peers().List()[0] 326 // Get peer state 327 ps := peer.Get(types.PeerStateKey).(*PeerState) 328 329 assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") 330 assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased") 331 } 332 333 //------------------------------------------------------------- 334 // ensure we can make blocks despite cycling a validator set 335 336 func TestReactorVotingPowerChange(t *testing.T) { 337 nVals := 4 338 logger := log.TestingLogger() 339 css, cleanup := randConsensusNet( 340 nVals, 341 "consensus_voting_power_changes_test", 342 newMockTickerFunc(true), 343 newPersistentKVStore) 344 defer cleanup() 345 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) 346 defer stopConsensusNet(logger, reactors, eventBuses) 347 348 // map of active validators 349 activeVals := make(map[string]struct{}) 350 for i := 0; i < nVals; i++ { 351 pubKey, err := css[i].privValidator.GetPubKey() 352 require.NoError(t, err) 353 addr := pubKey.Address() 354 activeVals[string(addr)] = struct{}{} 355 } 356 357 // wait till everyone makes block 1 358 timeoutWaitGroup(t, nVals, func(j int) { 359 <-blocksSubs[j].Out() 360 }, css) 361 362 //--------------------------------------------------------------------------- 363 logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") 364 365 val1PubKey, err := css[0].privValidator.GetPubKey() 366 require.NoError(t, err) 367 368 val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) 369 require.NoError(t, err) 370 updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) 371 previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() 372 373 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 374 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 375 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 376 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 377 378 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 379 t.Fatalf( 380 "expected voting power to change (before: %d, after: %d)", 381 previousTotalVotingPower, 382 css[0].GetRoundState().LastValidators.TotalVotingPower()) 383 } 384 385 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) 386 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 387 388 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 389 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 390 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 391 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 392 393 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 394 t.Fatalf( 395 "expected voting power to change (before: %d, after: %d)", 396 previousTotalVotingPower, 397 css[0].GetRoundState().LastValidators.TotalVotingPower()) 398 } 399 400 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) 401 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 402 403 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 404 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 405 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 406 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 407 408 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 409 t.Fatalf( 410 "expected voting power to change (before: %d, after: %d)", 411 previousTotalVotingPower, 412 css[0].GetRoundState().LastValidators.TotalVotingPower()) 413 } 414 } 415 416 func TestReactorValidatorSetChanges(t *testing.T) { 417 nPeers := 7 418 nVals := 4 419 css, _, _, cleanup := randConsensusNetWithPeers( 420 nVals, 421 nPeers, 422 "consensus_val_set_changes_test", 423 newMockTickerFunc(true), 424 newPersistentKVStoreWithPath) 425 426 defer cleanup() 427 logger := log.TestingLogger() 428 429 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers) 430 defer stopConsensusNet(logger, reactors, eventBuses) 431 432 // map of active validators 433 activeVals := make(map[string]struct{}) 434 for i := 0; i < nVals; i++ { 435 pubKey, err := css[i].privValidator.GetPubKey() 436 require.NoError(t, err) 437 activeVals[string(pubKey.Address())] = struct{}{} 438 } 439 440 // wait till everyone makes block 1 441 timeoutWaitGroup(t, nPeers, func(j int) { 442 <-blocksSubs[j].Out() 443 }, css) 444 445 //--------------------------------------------------------------------------- 446 logger.Info("---------------------------- Testing adding one validator") 447 448 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 449 assert.NoError(t, err) 450 valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) 451 assert.NoError(t, err) 452 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 453 454 // wait till everyone makes block 2 455 // ensure the commit includes all validators 456 // send newValTx to change vals in block 3 457 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 458 459 // wait till everyone makes block 3. 460 // it includes the commit for block 2, which is by the original validator set 461 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 462 463 // wait till everyone makes block 4. 464 // it includes the commit for block 3, which is by the original validator set 465 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 466 467 // the commits for block 4 should be with the updated validator set 468 activeVals[string(newValidatorPubKey1.Address())] = struct{}{} 469 470 // wait till everyone makes block 5 471 // it includes the commit for block 4, which should have the updated validator set 472 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 473 474 //--------------------------------------------------------------------------- 475 logger.Info("---------------------------- Testing changing the voting power of one validator") 476 477 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 478 require.NoError(t, err) 479 updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) 480 require.NoError(t, err) 481 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 482 previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() 483 484 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 485 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 486 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 487 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 488 489 if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 490 t.Errorf( 491 "expected voting power to change (before: %d, after: %d)", 492 previousTotalVotingPower, 493 css[nVals].GetRoundState().LastValidators.TotalVotingPower()) 494 } 495 496 //--------------------------------------------------------------------------- 497 logger.Info("---------------------------- Testing adding two validators at once") 498 499 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 500 require.NoError(t, err) 501 newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) 502 require.NoError(t, err) 503 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 504 505 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 506 require.NoError(t, err) 507 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 508 require.NoError(t, err) 509 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 510 511 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 512 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 513 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 514 activeVals[string(newValidatorPubKey2.Address())] = struct{}{} 515 activeVals[string(newValidatorPubKey3.Address())] = struct{}{} 516 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 517 518 //--------------------------------------------------------------------------- 519 logger.Info("---------------------------- Testing removing two validators at once") 520 521 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 522 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 523 524 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 525 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 526 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 527 delete(activeVals, string(newValidatorPubKey2.Address())) 528 delete(activeVals, string(newValidatorPubKey3.Address())) 529 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 530 } 531 532 // Check we can make blocks with skip_timeout_commit=false 533 func TestReactorWithTimeoutCommit(t *testing.T) { 534 N := 4 535 css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore) 536 defer cleanup() 537 // override default SkipTimeoutCommit == true for tests 538 for i := 0; i < N; i++ { 539 css[i].config.SkipTimeoutCommit = false 540 } 541 542 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) 543 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 544 545 // wait till everyone makes the first new block 546 timeoutWaitGroup(t, N-1, func(j int) { 547 <-blocksSubs[j].Out() 548 }, css) 549 } 550 551 func waitForAndValidateBlock( 552 t *testing.T, 553 n int, 554 activeVals map[string]struct{}, 555 blocksSubs []types.Subscription, 556 css []*State, 557 txs ...[]byte, 558 ) { 559 timeoutWaitGroup(t, n, func(j int) { 560 css[j].Logger.Debug("waitForAndValidateBlock") 561 msg := <-blocksSubs[j].Out() 562 newBlock := msg.Data().(types.EventDataNewBlock).Block 563 css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) 564 err := validateBlock(newBlock, activeVals) 565 assert.Nil(t, err) 566 for _, tx := range txs { 567 err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 568 assert.Nil(t, err) 569 } 570 }, css) 571 } 572 573 func waitForAndValidateBlockWithTx( 574 t *testing.T, 575 n int, 576 activeVals map[string]struct{}, 577 blocksSubs []types.Subscription, 578 css []*State, 579 txs ...[]byte, 580 ) { 581 timeoutWaitGroup(t, n, func(j int) { 582 ntxs := 0 583 BLOCK_TX_LOOP: 584 for { 585 css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) 586 msg := <-blocksSubs[j].Out() 587 newBlock := msg.Data().(types.EventDataNewBlock).Block 588 css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) 589 err := validateBlock(newBlock, activeVals) 590 assert.Nil(t, err) 591 592 // check that txs match the txs we're waiting for. 593 // note they could be spread over multiple blocks, 594 // but they should be in order. 595 for _, tx := range newBlock.Data.Txs { 596 assert.EqualValues(t, txs[ntxs], tx) 597 ntxs++ 598 } 599 600 if ntxs == len(txs) { 601 break BLOCK_TX_LOOP 602 } 603 } 604 }, css) 605 } 606 607 func waitForBlockWithUpdatedValsAndValidateIt( 608 t *testing.T, 609 n int, 610 updatedVals map[string]struct{}, 611 blocksSubs []types.Subscription, 612 css []*State, 613 ) { 614 timeoutWaitGroup(t, n, func(j int) { 615 var newBlock *types.Block 616 LOOP: 617 for { 618 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") 619 msg := <-blocksSubs[j].Out() 620 newBlock = msg.Data().(types.EventDataNewBlock).Block 621 if newBlock.LastCommit.Size() == len(updatedVals) { 622 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) 623 break LOOP 624 } else { 625 css[j].Logger.Debug( 626 "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", 627 "height", 628 newBlock.Height) 629 } 630 } 631 632 err := validateBlock(newBlock, updatedVals) 633 assert.Nil(t, err) 634 }, css) 635 } 636 637 // expects high synchrony! 638 func validateBlock(block *types.Block, activeVals map[string]struct{}) error { 639 if block.LastCommit.Size() != len(activeVals) { 640 return fmt.Errorf( 641 "commit size doesn't match number of active validators. Got %d, expected %d", 642 block.LastCommit.Size(), 643 len(activeVals)) 644 } 645 646 for _, commitSig := range block.LastCommit.Signatures { 647 if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok { 648 return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress) 649 } 650 } 651 return nil 652 } 653 654 func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { 655 wg := new(sync.WaitGroup) 656 wg.Add(n) 657 for i := 0; i < n; i++ { 658 go func(j int) { 659 f(j) 660 wg.Done() 661 }(i) 662 } 663 664 done := make(chan struct{}) 665 go func() { 666 wg.Wait() 667 close(done) 668 }() 669 670 // we're running many nodes in-process, possibly in in a virtual machine, 671 // and spewing debug messages - making a block could take a while, 672 timeout := time.Second * 120 673 674 select { 675 case <-done: 676 case <-time.After(timeout): 677 for i, cs := range css { 678 t.Log("#################") 679 t.Log("Validator", i) 680 t.Log(cs.GetRoundState()) 681 t.Log("") 682 } 683 os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) 684 err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) 685 require.NoError(t, err) 686 capture() 687 panic("Timed out waiting for all validators to commit a block") 688 } 689 } 690 691 func capture() { 692 trace := make([]byte, 10240000) 693 count := runtime.Stack(trace, true) 694 fmt.Printf("Stack of %d bytes: %s\n", count, trace) 695 } 696 697 //------------------------------------------------------------- 698 // Ensure basic validation of structs is functioning 699 700 func TestNewRoundStepMessageValidateBasic(t *testing.T) { 701 testCases := []struct { //nolint: maligned 702 expectErr bool 703 messageRound int32 704 messageLastCommitRound int32 705 messageHeight int64 706 testName string 707 messageStep cstypes.RoundStepType 708 }{ 709 {false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight}, 710 {true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight}, 711 {true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight}, 712 {true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1}, 713 // The following cases will be handled by ValidateHeight 714 {false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight}, 715 {false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight}, 716 } 717 718 for _, tc := range testCases { 719 tc := tc 720 t.Run(tc.testName, func(t *testing.T) { 721 message := NewRoundStepMessage{ 722 Height: tc.messageHeight, 723 Round: tc.messageRound, 724 Step: tc.messageStep, 725 LastCommitRound: tc.messageLastCommitRound, 726 } 727 728 err := message.ValidateBasic() 729 if tc.expectErr { 730 require.Error(t, err) 731 } else { 732 require.NoError(t, err) 733 } 734 }) 735 } 736 } 737 738 func TestNewRoundStepMessageValidateHeight(t *testing.T) { 739 initialHeight := int64(10) 740 testCases := []struct { //nolint: maligned 741 expectErr bool 742 messageLastCommitRound int32 743 messageHeight int64 744 testName string 745 }{ 746 {false, 0, 11, "Valid Message"}, 747 {true, 0, -1, "Negative height"}, 748 {true, 0, 0, "Zero height"}, 749 {true, 0, 10, "Initial height but LCR != -1 "}, 750 {true, -1, 11, "Normal height but LCR < 0"}, 751 } 752 753 for _, tc := range testCases { 754 tc := tc 755 t.Run(tc.testName, func(t *testing.T) { 756 message := NewRoundStepMessage{ 757 Height: tc.messageHeight, 758 Round: 0, 759 Step: cstypes.RoundStepNewHeight, 760 LastCommitRound: tc.messageLastCommitRound, 761 } 762 763 err := message.ValidateHeight(initialHeight) 764 if tc.expectErr { 765 require.Error(t, err) 766 } else { 767 require.NoError(t, err) 768 } 769 }) 770 } 771 } 772 773 func TestNewValidBlockMessageValidateBasic(t *testing.T) { 774 testCases := []struct { 775 malleateFn func(*NewValidBlockMessage) 776 expErr string 777 }{ 778 {func(msg *NewValidBlockMessage) {}, ""}, 779 {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, 780 {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, 781 { 782 func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 }, 783 "blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2", 784 }, 785 { 786 func(msg *NewValidBlockMessage) { 787 msg.BlockPartSetHeader.Total = 0 788 msg.BlockParts = bits.NewBitArray(0) 789 }, 790 "empty blockParts", 791 }, 792 { 793 func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, 794 "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", 795 }, 796 } 797 798 for i, tc := range testCases { 799 tc := tc 800 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 801 msg := &NewValidBlockMessage{ 802 Height: 1, 803 Round: 0, 804 BlockPartSetHeader: types.PartSetHeader{ 805 Total: 1, 806 }, 807 BlockParts: bits.NewBitArray(1), 808 } 809 810 tc.malleateFn(msg) 811 err := msg.ValidateBasic() 812 if tc.expErr != "" && assert.Error(t, err) { 813 assert.Contains(t, err.Error(), tc.expErr) 814 } 815 }) 816 } 817 } 818 819 func TestProposalPOLMessageValidateBasic(t *testing.T) { 820 testCases := []struct { 821 malleateFn func(*ProposalPOLMessage) 822 expErr string 823 }{ 824 {func(msg *ProposalPOLMessage) {}, ""}, 825 {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, 826 {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, 827 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, 828 { 829 func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, 830 "proposalPOL bit array is too big: 10001, max: 10000", 831 }, 832 } 833 834 for i, tc := range testCases { 835 tc := tc 836 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 837 msg := &ProposalPOLMessage{ 838 Height: 1, 839 ProposalPOLRound: 1, 840 ProposalPOL: bits.NewBitArray(1), 841 } 842 843 tc.malleateFn(msg) 844 err := msg.ValidateBasic() 845 if tc.expErr != "" && assert.Error(t, err) { 846 assert.Contains(t, err.Error(), tc.expErr) 847 } 848 }) 849 } 850 } 851 852 func TestBlockPartMessageValidateBasic(t *testing.T) { 853 testPart := new(types.Part) 854 testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) 855 testCases := []struct { 856 testName string 857 messageHeight int64 858 messageRound int32 859 messagePart *types.Part 860 expectErr bool 861 }{ 862 {"Valid Message", 0, 0, testPart, false}, 863 {"Invalid Message", -1, 0, testPart, true}, 864 {"Invalid Message", 0, -1, testPart, true}, 865 } 866 867 for _, tc := range testCases { 868 tc := tc 869 t.Run(tc.testName, func(t *testing.T) { 870 message := BlockPartMessage{ 871 Height: tc.messageHeight, 872 Round: tc.messageRound, 873 Part: tc.messagePart, 874 } 875 876 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 877 }) 878 } 879 880 message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} 881 message.Part.Index = 1 882 883 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 884 } 885 886 func TestHasVoteMessageValidateBasic(t *testing.T) { 887 const ( 888 validSignedMsgType cmtproto.SignedMsgType = 0x01 889 invalidSignedMsgType cmtproto.SignedMsgType = 0x03 890 ) 891 892 testCases := []struct { //nolint: maligned 893 expectErr bool 894 messageRound int32 895 messageIndex int32 896 messageHeight int64 897 testName string 898 messageType cmtproto.SignedMsgType 899 }{ 900 {false, 0, 0, 0, "Valid Message", validSignedMsgType}, 901 {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, 902 {true, 0, -1, 0, "Invalid Message", validSignedMsgType}, 903 {true, 0, 0, 0, "Invalid Message", invalidSignedMsgType}, 904 {true, 0, 0, -1, "Invalid Message", validSignedMsgType}, 905 } 906 907 for _, tc := range testCases { 908 tc := tc 909 t.Run(tc.testName, func(t *testing.T) { 910 message := HasVoteMessage{ 911 Height: tc.messageHeight, 912 Round: tc.messageRound, 913 Type: tc.messageType, 914 Index: tc.messageIndex, 915 } 916 917 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 918 }) 919 } 920 } 921 922 func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { 923 const ( 924 validSignedMsgType cmtproto.SignedMsgType = 0x01 925 invalidSignedMsgType cmtproto.SignedMsgType = 0x03 926 ) 927 928 validBlockID := types.BlockID{} 929 invalidBlockID := types.BlockID{ 930 Hash: bytes.HexBytes{}, 931 PartSetHeader: types.PartSetHeader{ 932 Total: 1, 933 Hash: []byte{0}, 934 }, 935 } 936 937 testCases := []struct { //nolint: maligned 938 expectErr bool 939 messageRound int32 940 messageHeight int64 941 testName string 942 messageType cmtproto.SignedMsgType 943 messageBlockID types.BlockID 944 }{ 945 {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, 946 {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, 947 {true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID}, 948 {true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID}, 949 {true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID}, 950 } 951 952 for _, tc := range testCases { 953 tc := tc 954 t.Run(tc.testName, func(t *testing.T) { 955 message := VoteSetMaj23Message{ 956 Height: tc.messageHeight, 957 Round: tc.messageRound, 958 Type: tc.messageType, 959 BlockID: tc.messageBlockID, 960 } 961 962 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 963 }) 964 } 965 } 966 967 func TestVoteSetBitsMessageValidateBasic(t *testing.T) { 968 testCases := []struct { 969 malleateFn func(*VoteSetBitsMessage) 970 expErr string 971 }{ 972 {func(msg *VoteSetBitsMessage) {}, ""}, 973 {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, 974 {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, 975 {func(msg *VoteSetBitsMessage) { 976 msg.BlockID = types.BlockID{ 977 Hash: bytes.HexBytes{}, 978 PartSetHeader: types.PartSetHeader{ 979 Total: 1, 980 Hash: []byte{0}, 981 }, 982 } 983 }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, 984 { 985 func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, 986 "votes bit array is too big: 10001, max: 10000", 987 }, 988 } 989 990 for i, tc := range testCases { 991 tc := tc 992 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 993 msg := &VoteSetBitsMessage{ 994 Height: 1, 995 Round: 0, 996 Type: 0x01, 997 Votes: bits.NewBitArray(1), 998 BlockID: types.BlockID{}, 999 } 1000 1001 tc.malleateFn(msg) 1002 err := msg.ValidateBasic() 1003 if tc.expErr != "" && assert.Error(t, err) { 1004 assert.Contains(t, err.Error(), tc.expErr) 1005 } 1006 }) 1007 } 1008 } 1009 1010 func TestMarshalJSONPeerState(t *testing.T) { 1011 ps := NewPeerState(nil) 1012 data, err := json.Marshal(ps) 1013 require.NoError(t, err) 1014 require.JSONEq(t, `{ 1015 "round_state":{ 1016 "height": "0", 1017 "round": -1, 1018 "step": 0, 1019 "start_time": "0001-01-01T00:00:00Z", 1020 "proposal": false, 1021 "proposal_block_part_set_header": 1022 {"total":0, "hash":""}, 1023 "proposal_block_parts": null, 1024 "proposal_pol_round": -1, 1025 "proposal_pol": null, 1026 "prevotes": null, 1027 "precommits": null, 1028 "last_commit_round": -1, 1029 "last_commit": null, 1030 "catchup_commit_round": -1, 1031 "catchup_commit": null 1032 }, 1033 "stats":{ 1034 "votes":"0", 1035 "block_parts":"0"} 1036 }`, string(data)) 1037 }