github.com/pokt-network/tendermint@v0.32.11-0.20230426215212-59310158d3e9/consensus/reactor_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "runtime" 9 "runtime/pprof" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 17 dbm "github.com/tendermint/tm-db" 18 19 abcicli "github.com/tendermint/tendermint/abci/client" 20 "github.com/tendermint/tendermint/abci/example/kvstore" 21 abci "github.com/tendermint/tendermint/abci/types" 22 cfg "github.com/tendermint/tendermint/config" 23 cstypes "github.com/tendermint/tendermint/consensus/types" 24 "github.com/tendermint/tendermint/crypto/tmhash" 25 "github.com/tendermint/tendermint/libs/bits" 26 "github.com/tendermint/tendermint/libs/bytes" 27 "github.com/tendermint/tendermint/libs/log" 28 mempl "github.com/tendermint/tendermint/mempool" 29 "github.com/tendermint/tendermint/p2p" 30 "github.com/tendermint/tendermint/p2p/mock" 31 sm "github.com/tendermint/tendermint/state" 32 "github.com/tendermint/tendermint/store" 33 "github.com/tendermint/tendermint/types" 34 ) 35 36 //---------------------------------------------- 37 // in-process testnets 38 39 func startConsensusNet(t *testing.T, css []*State, n int) ( 40 []*Reactor, 41 []types.Subscription, 42 []*types.EventBus, 43 ) { 44 reactors := make([]*Reactor, n) 45 blocksSubs := make([]types.Subscription, 0) 46 eventBuses := make([]*types.EventBus, n) 47 for i := 0; i < n; i++ { 48 /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") 49 if err != nil { t.Fatal(err)}*/ 50 reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states 51 reactors[i].SetLogger(css[i].Logger) 52 53 // eventBus is already started with the cs 54 eventBuses[i] = css[i].eventBus 55 reactors[i].SetEventBus(eventBuses[i]) 56 57 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 58 require.NoError(t, err) 59 blocksSubs = append(blocksSubs, blocksSub) 60 61 if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake 62 sm.SaveState(css[i].blockExec.DB(), css[i].state) 63 } 64 } 65 // make connected switches and start all reactors 66 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { 67 s.AddReactor("CONSENSUS", reactors[i]) 68 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 69 return s 70 }, p2p.Connect2Switches) 71 72 // now that everyone is connected, start the state machines 73 // If we started the state machines before everyone was connected, 74 // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors 75 // TODO: is this still true with new pubsub? 76 for i := 0; i < n; i++ { 77 s := reactors[i].conS.GetState() 78 reactors[i].SwitchToConsensus(s, 0) 79 } 80 return reactors, blocksSubs, eventBuses 81 } 82 83 func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) { 84 logger.Info("stopConsensusNet", "n", len(reactors)) 85 for i, r := range reactors { 86 logger.Info("stopConsensusNet: Stopping Reactor", "i", i) 87 r.Switch.Stop() 88 } 89 for i, b := range eventBuses { 90 logger.Info("stopConsensusNet: Stopping eventBus", "i", i) 91 b.Stop() 92 } 93 logger.Info("stopConsensusNet: DONE", "n", len(reactors)) 94 } 95 96 // Ensure a testnet makes blocks 97 func TestReactorBasic(t *testing.T) { 98 N := 4 99 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 100 defer cleanup() 101 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 102 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 103 // wait till everyone makes the first new block 104 timeoutWaitGroup(t, N, func(j int) { 105 <-blocksSubs[j].Out() 106 }, css) 107 } 108 109 // Ensure we can process blocks with evidence 110 func TestReactorWithEvidence(t *testing.T) { 111 types.RegisterMockEvidences(cdc) 112 types.RegisterMockEvidences(types.GetCodec()) 113 114 nValidators := 4 115 testName := "consensus_reactor_test" 116 tickerFunc := newMockTickerFunc(true) 117 appFunc := newCounter 118 119 // heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction 120 // to unroll unwieldy abstractions. Here we duplicate the code from: 121 // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 122 123 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 124 css := make([]*State, nValidators) 125 logger := consensusLogger() 126 for i := 0; i < nValidators; i++ { 127 stateDB := dbm.NewMemDB() // each state needs its own db 128 state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) 129 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 130 defer os.RemoveAll(thisConfig.RootDir) 131 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal 132 app := appFunc() 133 vals := types.TM2PB.ValidatorUpdates(state.Validators) 134 app.InitChain(abci.RequestInitChain{Validators: vals}) 135 136 pv := privVals[i] 137 // duplicate code from: 138 // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) 139 140 blockDB := dbm.NewMemDB() 141 blockStore := store.NewBlockStore(blockDB) 142 143 // one for mempool, one for consensus 144 mtx := new(sync.Mutex) 145 proxyAppConnMem := abcicli.NewLocalClient(mtx, app) 146 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 147 148 // Make Mempool 149 mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) 150 mempool.SetLogger(log.TestingLogger().With("module", "mempool")) 151 if thisConfig.Consensus.WaitForTxs() { 152 mempool.EnableTxsAvailable() 153 } 154 155 // mock the evidence pool 156 // everyone includes evidence of another double signing 157 vIdx := (i + 1) % nValidators 158 pubKey, err := privVals[vIdx].GetPubKey() 159 require.NoError(t, err) 160 evpool := newMockEvidencePool(pubKey.Address()) 161 162 // Make State 163 blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool, nil) 164 cs := NewState(thisConfig.Consensus, state, 0, blockExec, blockStore, mempool, evpool) 165 cs.SetLogger(log.TestingLogger().With("module", "consensus")) 166 cs.SetPrivValidator(pv) 167 168 eventBus := types.NewEventBus() 169 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 170 eventBus.Start() 171 cs.SetEventBus(eventBus) 172 173 cs.SetTimeoutTicker(tickerFunc()) 174 cs.SetLogger(logger.With("validator", i, "module", "consensus")) 175 176 css[i] = cs 177 } 178 179 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) 180 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 181 182 // wait till everyone makes the first new block with no evidence 183 timeoutWaitGroup(t, nValidators, func(j int) { 184 msg := <-blocksSubs[j].Out() 185 block := msg.Data().(types.EventDataNewBlock).Block 186 assert.True(t, len(block.Evidence.Evidence) == 0) 187 }, css) 188 189 // second block should have evidence 190 timeoutWaitGroup(t, nValidators, func(j int) { 191 msg := <-blocksSubs[j].Out() 192 block := msg.Data().(types.EventDataNewBlock).Block 193 assert.True(t, len(block.Evidence.Evidence) > 0) 194 }, css) 195 } 196 197 // mock evidence pool returns no evidence for block 1, 198 // and returnes one piece for all higher blocks. The one piece 199 // is for a given validator at block 1. 200 type mockEvidencePool struct { 201 height int 202 ev []types.Evidence 203 } 204 205 func (m *mockEvidencePool) RollbackEvidence(height int64, latestHeight int64) { 206 panic("implement me") 207 } 208 209 func newMockEvidencePool(val []byte) *mockEvidencePool { 210 return &mockEvidencePool{ 211 ev: []types.Evidence{types.NewMockEvidence(1, time.Now().UTC(), 1, val)}, 212 } 213 } 214 215 // NOTE: maxBytes is ignored 216 func (m *mockEvidencePool) PendingEvidence(maxBytes int64) []types.Evidence { 217 if m.height > 0 { 218 return m.ev 219 } 220 return nil 221 } 222 func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil } 223 func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { 224 if m.height > 0 { 225 if len(block.Evidence.Evidence) == 0 { 226 panic("block has no evidence") 227 } 228 } 229 m.height++ 230 } 231 func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false } 232 func (m *mockEvidencePool) IsPending(types.Evidence) bool { return false } 233 234 //------------------------------------ 235 236 // Ensure a testnet makes blocks when there are txs 237 func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { 238 N := 4 239 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, 240 func(c *cfg.Config) { 241 c.Consensus.CreateEmptyBlocks = false 242 }) 243 defer cleanup() 244 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 245 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 246 247 // send a tx 248 if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil { 249 t.Error(err) 250 } 251 252 // wait till everyone makes the first new block 253 timeoutWaitGroup(t, N, func(j int) { 254 <-blocksSubs[j].Out() 255 }, css) 256 } 257 258 func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { 259 N := 1 260 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 261 defer cleanup() 262 reactors, _, eventBuses := startConsensusNet(t, css, N) 263 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 264 265 var ( 266 reactor = reactors[0] 267 peer = mock.NewPeer(nil) 268 msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) 269 ) 270 271 reactor.InitPeer(peer) 272 273 // simulate switch calling Receive before AddPeer 274 assert.NotPanics(t, func() { 275 reactor.Receive(StateChannel, peer, msg) 276 reactor.AddPeer(peer) 277 }) 278 } 279 280 func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { 281 N := 1 282 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 283 defer cleanup() 284 reactors, _, eventBuses := startConsensusNet(t, css, N) 285 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 286 287 var ( 288 reactor = reactors[0] 289 peer = mock.NewPeer(nil) 290 msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) 291 ) 292 293 // we should call InitPeer here 294 295 // simulate switch calling Receive before AddPeer 296 assert.Panics(t, func() { 297 reactor.Receive(StateChannel, peer, msg) 298 }) 299 } 300 301 // Test we record stats about votes and block parts from other peers. 302 func TestReactorRecordsVotesAndBlockParts(t *testing.T) { 303 N := 4 304 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 305 defer cleanup() 306 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 307 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 308 309 // wait till everyone makes the first new block 310 timeoutWaitGroup(t, N, func(j int) { 311 <-blocksSubs[j].Out() 312 }, css) 313 314 // Get peer 315 peer := reactors[1].Switch.Peers().List()[0] 316 // Get peer state 317 ps := peer.Get(types.PeerStateKey).(*PeerState) 318 319 assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") 320 assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased") 321 } 322 323 //------------------------------------------------------------- 324 // ensure we can make blocks despite cycling a validator set 325 326 func TestReactorVotingPowerChange(t *testing.T) { 327 nVals := 4 328 logger := log.TestingLogger() 329 css, cleanup := randConsensusNet( 330 nVals, 331 "consensus_voting_power_changes_test", 332 newMockTickerFunc(true), 333 newPersistentKVStore) 334 defer cleanup() 335 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) 336 defer stopConsensusNet(logger, reactors, eventBuses) 337 338 // map of active validators 339 activeVals := make(map[string]struct{}) 340 for i := 0; i < nVals; i++ { 341 pubKey, err := css[i].privValidator.GetPubKey() 342 require.NoError(t, err) 343 addr := pubKey.Address() 344 activeVals[string(addr)] = struct{}{} 345 } 346 347 // wait till everyone makes block 1 348 timeoutWaitGroup(t, nVals, func(j int) { 349 <-blocksSubs[j].Out() 350 }, css) 351 352 //--------------------------------------------------------------------------- 353 logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") 354 355 val1PubKey, err := css[0].privValidator.GetPubKey() 356 require.NoError(t, err) 357 val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) 358 updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) 359 previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() 360 361 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 362 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 363 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 364 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 365 366 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 367 t.Fatalf( 368 "expected voting power to change (before: %d, after: %d)", 369 previousTotalVotingPower, 370 css[0].GetRoundState().LastValidators.TotalVotingPower()) 371 } 372 373 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) 374 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 375 376 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 377 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 378 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 379 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 380 381 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 382 t.Fatalf( 383 "expected voting power to change (before: %d, after: %d)", 384 previousTotalVotingPower, 385 css[0].GetRoundState().LastValidators.TotalVotingPower()) 386 } 387 388 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) 389 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 390 391 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 392 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 393 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 394 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 395 396 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 397 t.Fatalf( 398 "expected voting power to change (before: %d, after: %d)", 399 previousTotalVotingPower, 400 css[0].GetRoundState().LastValidators.TotalVotingPower()) 401 } 402 } 403 404 func TestReactorValidatorSetChanges(t *testing.T) { 405 nPeers := 7 406 nVals := 4 407 css, _, _, cleanup := randConsensusNetWithPeers( 408 nVals, 409 nPeers, 410 "consensus_val_set_changes_test", 411 newMockTickerFunc(true), 412 newPersistentKVStoreWithPath) 413 414 defer cleanup() 415 logger := log.TestingLogger() 416 417 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers) 418 defer stopConsensusNet(logger, reactors, eventBuses) 419 420 // map of active validators 421 activeVals := make(map[string]struct{}) 422 for i := 0; i < nVals; i++ { 423 pubKey, err := css[i].privValidator.GetPubKey() 424 require.NoError(t, err) 425 activeVals[string(pubKey.Address())] = struct{}{} 426 } 427 428 // wait till everyone makes block 1 429 timeoutWaitGroup(t, nPeers, func(j int) { 430 <-blocksSubs[j].Out() 431 }, css) 432 433 //--------------------------------------------------------------------------- 434 logger.Info("---------------------------- Testing adding one validator") 435 436 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 437 require.NoError(t, err) 438 valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) 439 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 440 441 // wait till everyone makes block 2 442 // ensure the commit includes all validators 443 // send newValTx to change vals in block 3 444 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 445 446 // wait till everyone makes block 3. 447 // it includes the commit for block 2, which is by the original validator set 448 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 449 450 // wait till everyone makes block 4. 451 // it includes the commit for block 3, which is by the original validator set 452 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 453 454 // the commits for block 4 should be with the updated validator set 455 activeVals[string(newValidatorPubKey1.Address())] = struct{}{} 456 457 // wait till everyone makes block 5 458 // it includes the commit for block 4, which should have the updated validator set 459 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 460 461 //--------------------------------------------------------------------------- 462 logger.Info("---------------------------- Testing changing the voting power of one validator") 463 464 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 465 require.NoError(t, err) 466 updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) 467 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 468 previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() 469 470 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 471 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 472 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 473 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 474 475 if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 476 t.Errorf( 477 "expected voting power to change (before: %d, after: %d)", 478 previousTotalVotingPower, 479 css[nVals].GetRoundState().LastValidators.TotalVotingPower()) 480 } 481 482 //--------------------------------------------------------------------------- 483 logger.Info("---------------------------- Testing adding two validators at once") 484 485 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 486 require.NoError(t, err) 487 newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) 488 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 489 490 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 491 require.NoError(t, err) 492 newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) 493 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 494 495 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 496 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 497 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 498 activeVals[string(newValidatorPubKey2.Address())] = struct{}{} 499 activeVals[string(newValidatorPubKey3.Address())] = struct{}{} 500 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 501 502 //--------------------------------------------------------------------------- 503 logger.Info("---------------------------- Testing removing two validators at once") 504 505 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 506 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 507 508 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 509 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 510 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 511 delete(activeVals, string(newValidatorPubKey2.Address())) 512 delete(activeVals, string(newValidatorPubKey3.Address())) 513 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 514 } 515 516 // Check we can make blocks with skip_timeout_commit=false 517 func TestReactorWithTimeoutCommit(t *testing.T) { 518 N := 4 519 css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) 520 defer cleanup() 521 // override default SkipTimeoutCommit == true for tests 522 for i := 0; i < N; i++ { 523 css[i].config.SkipTimeoutCommit = false 524 } 525 526 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) 527 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 528 529 // wait till everyone makes the first new block 530 timeoutWaitGroup(t, N-1, func(j int) { 531 <-blocksSubs[j].Out() 532 }, css) 533 } 534 535 func waitForAndValidateBlock( 536 t *testing.T, 537 n int, 538 activeVals map[string]struct{}, 539 blocksSubs []types.Subscription, 540 css []*State, 541 txs ...[]byte, 542 ) { 543 timeoutWaitGroup(t, n, func(j int) { 544 css[j].Logger.Debug("waitForAndValidateBlock") 545 msg := <-blocksSubs[j].Out() 546 newBlock := msg.Data().(types.EventDataNewBlock).Block 547 css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) 548 err := validateBlock(newBlock, activeVals) 549 assert.Nil(t, err) 550 for _, tx := range txs { 551 err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 552 assert.Nil(t, err) 553 } 554 }, css) 555 } 556 557 func waitForAndValidateBlockWithTx( 558 t *testing.T, 559 n int, 560 activeVals map[string]struct{}, 561 blocksSubs []types.Subscription, 562 css []*State, 563 txs ...[]byte, 564 ) { 565 timeoutWaitGroup(t, n, func(j int) { 566 ntxs := 0 567 BLOCK_TX_LOOP: 568 for { 569 css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) 570 msg := <-blocksSubs[j].Out() 571 newBlock := msg.Data().(types.EventDataNewBlock).Block 572 css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) 573 err := validateBlock(newBlock, activeVals) 574 assert.Nil(t, err) 575 576 // check that txs match the txs we're waiting for. 577 // note they could be spread over multiple blocks, 578 // but they should be in order. 579 for _, tx := range newBlock.Data.Txs { 580 assert.EqualValues(t, txs[ntxs], tx) 581 ntxs++ 582 } 583 584 if ntxs == len(txs) { 585 break BLOCK_TX_LOOP 586 } 587 } 588 589 }, css) 590 } 591 592 func waitForBlockWithUpdatedValsAndValidateIt( 593 t *testing.T, 594 n int, 595 updatedVals map[string]struct{}, 596 blocksSubs []types.Subscription, 597 css []*State, 598 ) { 599 timeoutWaitGroup(t, n, func(j int) { 600 601 var newBlock *types.Block 602 LOOP: 603 for { 604 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") 605 msg := <-blocksSubs[j].Out() 606 newBlock = msg.Data().(types.EventDataNewBlock).Block 607 if newBlock.LastCommit.Size() == len(updatedVals) { 608 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) 609 break LOOP 610 } else { 611 css[j].Logger.Debug( 612 "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", 613 "height", 614 newBlock.Height) 615 } 616 } 617 618 err := validateBlock(newBlock, updatedVals) 619 assert.Nil(t, err) 620 }, css) 621 } 622 623 // expects high synchrony! 624 func validateBlock(block *types.Block, activeVals map[string]struct{}) error { 625 if block.LastCommit.Size() != len(activeVals) { 626 return fmt.Errorf( 627 "commit size doesn't match number of active validators. Got %d, expected %d", 628 block.LastCommit.Size(), 629 len(activeVals)) 630 } 631 632 for _, vote := range block.LastCommit.Precommits { 633 if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok { 634 return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress) 635 } 636 } 637 return nil 638 } 639 640 func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { 641 wg := new(sync.WaitGroup) 642 wg.Add(n) 643 for i := 0; i < n; i++ { 644 go func(j int) { 645 f(j) 646 wg.Done() 647 }(i) 648 } 649 650 done := make(chan struct{}) 651 go func() { 652 wg.Wait() 653 close(done) 654 }() 655 656 // we're running many nodes in-process, possibly in in a virtual machine, 657 // and spewing debug messages - making a block could take a while, 658 timeout := time.Second * 300 659 660 select { 661 case <-done: 662 case <-time.After(timeout): 663 for i, cs := range css { 664 t.Log("#################") 665 t.Log("Validator", i) 666 t.Log(cs.GetRoundState()) 667 t.Log("") 668 } 669 os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) 670 pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) 671 capture() 672 panic("Timed out waiting for all validators to commit a block") 673 } 674 } 675 676 func capture() { 677 trace := make([]byte, 10240000) 678 count := runtime.Stack(trace, true) 679 fmt.Printf("Stack of %d bytes: %s\n", count, trace) 680 } 681 682 //------------------------------------------------------------- 683 // Ensure basic validation of structs is functioning 684 685 func TestNewRoundStepMessageValidateBasic(t *testing.T) { 686 testCases := []struct { // nolint: maligned 687 expectErr bool 688 messageRound int 689 messageLastCommitRound int 690 messageHeight int64 691 testName string 692 messageStep cstypes.RoundStepType 693 }{ 694 {false, 0, 0, 0, "Valid Message", 0x01}, 695 {true, -1, 0, 0, "Invalid Message", 0x01}, 696 {true, 0, 0, -1, "Invalid Message", 0x01}, 697 {true, 0, 0, 1, "Invalid Message", 0x00}, 698 {true, 0, 0, 1, "Invalid Message", 0x00}, 699 {true, 0, -2, 2, "Invalid Message", 0x01}, 700 } 701 702 for _, tc := range testCases { 703 tc := tc 704 t.Run(tc.testName, func(t *testing.T) { 705 message := NewRoundStepMessage{ 706 Height: tc.messageHeight, 707 Round: tc.messageRound, 708 Step: tc.messageStep, 709 LastCommitRound: tc.messageLastCommitRound, 710 } 711 712 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 713 }) 714 } 715 } 716 717 func TestNewValidBlockMessageValidateBasic(t *testing.T) { 718 testCases := []struct { 719 malleateFn func(*NewValidBlockMessage) 720 expErr string 721 }{ 722 {func(msg *NewValidBlockMessage) {}, ""}, 723 {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, 724 {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, 725 { 726 func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 2 }, 727 "blockParts bit array size 1 not equal to BlockPartsHeader.Total 2", 728 }, 729 { 730 func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 0; msg.BlockParts = bits.NewBitArray(0) }, 731 "empty blockParts", 732 }, 733 { 734 func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(types.MaxBlockPartsCount + 1) }, 735 "blockParts bit array size 1602 not equal to BlockPartsHeader.Total 1", 736 }, 737 } 738 739 for i, tc := range testCases { 740 tc := tc 741 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 742 msg := &NewValidBlockMessage{ 743 Height: 1, 744 Round: 0, 745 BlockPartsHeader: types.PartSetHeader{ 746 Total: 1, 747 }, 748 BlockParts: bits.NewBitArray(1), 749 } 750 751 tc.malleateFn(msg) 752 err := msg.ValidateBasic() 753 if tc.expErr != "" && assert.Error(t, err) { 754 assert.Contains(t, err.Error(), tc.expErr) 755 } 756 }) 757 } 758 } 759 760 func TestProposalPOLMessageValidateBasic(t *testing.T) { 761 testCases := []struct { 762 malleateFn func(*ProposalPOLMessage) 763 expErr string 764 }{ 765 {func(msg *ProposalPOLMessage) {}, ""}, 766 {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, 767 {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, 768 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, 769 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, 770 "ProposalPOL bit array is too big: 10001, max: 10000"}, 771 } 772 773 for i, tc := range testCases { 774 tc := tc 775 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 776 msg := &ProposalPOLMessage{ 777 Height: 1, 778 ProposalPOLRound: 1, 779 ProposalPOL: bits.NewBitArray(1), 780 } 781 782 tc.malleateFn(msg) 783 err := msg.ValidateBasic() 784 if tc.expErr != "" && assert.Error(t, err) { 785 assert.Contains(t, err.Error(), tc.expErr) 786 } 787 }) 788 } 789 } 790 791 func TestBlockPartMessageValidateBasic(t *testing.T) { 792 testPart := new(types.Part) 793 testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) 794 testCases := []struct { 795 testName string 796 messageHeight int64 797 messageRound int 798 messagePart *types.Part 799 expectErr bool 800 }{ 801 {"Valid Message", 0, 0, testPart, false}, 802 {"Invalid Message", -1, 0, testPart, true}, 803 {"Invalid Message", 0, -1, testPart, true}, 804 } 805 806 for _, tc := range testCases { 807 tc := tc 808 t.Run(tc.testName, func(t *testing.T) { 809 message := BlockPartMessage{ 810 Height: tc.messageHeight, 811 Round: tc.messageRound, 812 Part: tc.messagePart, 813 } 814 815 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 816 }) 817 } 818 819 message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} 820 message.Part.Index = -1 821 822 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 823 } 824 825 func TestHasVoteMessageValidateBasic(t *testing.T) { 826 const ( 827 validSignedMsgType types.SignedMsgType = 0x01 828 invalidSignedMsgType types.SignedMsgType = 0x03 829 ) 830 831 testCases := []struct { // nolint: maligned 832 expectErr bool 833 messageRound int 834 messageIndex int 835 messageHeight int64 836 testName string 837 messageType types.SignedMsgType 838 }{ 839 {false, 0, 0, 0, "Valid Message", validSignedMsgType}, 840 {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, 841 {true, 0, -1, 0, "Invalid Message", validSignedMsgType}, 842 {true, 0, 0, 0, "Invalid Message", invalidSignedMsgType}, 843 {true, 0, 0, -1, "Invalid Message", validSignedMsgType}, 844 } 845 846 for _, tc := range testCases { 847 tc := tc 848 t.Run(tc.testName, func(t *testing.T) { 849 message := HasVoteMessage{ 850 Height: tc.messageHeight, 851 Round: tc.messageRound, 852 Type: tc.messageType, 853 Index: tc.messageIndex, 854 } 855 856 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 857 }) 858 } 859 } 860 861 func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { 862 const ( 863 validSignedMsgType types.SignedMsgType = 0x01 864 invalidSignedMsgType types.SignedMsgType = 0x03 865 ) 866 867 validBlockID := types.BlockID{} 868 invalidBlockID := types.BlockID{ 869 Hash: bytes.HexBytes{}, 870 PartsHeader: types.PartSetHeader{ 871 Total: -1, 872 Hash: bytes.HexBytes{}, 873 }, 874 } 875 876 testCases := []struct { // nolint: maligned 877 expectErr bool 878 messageRound int 879 messageHeight int64 880 testName string 881 messageType types.SignedMsgType 882 messageBlockID types.BlockID 883 }{ 884 {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, 885 {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, 886 {true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID}, 887 {true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID}, 888 {true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID}, 889 } 890 891 for _, tc := range testCases { 892 tc := tc 893 t.Run(tc.testName, func(t *testing.T) { 894 message := VoteSetMaj23Message{ 895 Height: tc.messageHeight, 896 Round: tc.messageRound, 897 Type: tc.messageType, 898 BlockID: tc.messageBlockID, 899 } 900 901 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 902 }) 903 } 904 } 905 906 func TestVoteSetBitsMessageValidateBasic(t *testing.T) { 907 testCases := []struct { // nolint: maligned 908 malleateFn func(*VoteSetBitsMessage) 909 expErr string 910 }{ 911 {func(msg *VoteSetBitsMessage) {}, ""}, 912 {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, 913 {func(msg *VoteSetBitsMessage) { msg.Round = -1 }, "negative Round"}, 914 {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, 915 {func(msg *VoteSetBitsMessage) { 916 msg.BlockID = types.BlockID{ 917 Hash: bytes.HexBytes{}, 918 PartsHeader: types.PartSetHeader{ 919 Total: -1, 920 Hash: bytes.HexBytes{}, 921 }, 922 } 923 }, "wrong BlockID: wrong PartsHeader: negative Total"}, 924 {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, 925 "votes bit array is too big: 10001, max: 10000"}, 926 } 927 928 for i, tc := range testCases { 929 tc := tc 930 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 931 msg := &VoteSetBitsMessage{ 932 Height: 1, 933 Round: 0, 934 Type: 0x01, 935 Votes: bits.NewBitArray(1), 936 BlockID: types.BlockID{}, 937 } 938 939 tc.malleateFn(msg) 940 err := msg.ValidateBasic() 941 if tc.expErr != "" && assert.Error(t, err) { 942 assert.Contains(t, err.Error(), tc.expErr) 943 } 944 }) 945 } 946 }