github.com/Finschia/ostracon@v1.1.5/consensus/reactor_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "runtime" 9 "runtime/pprof" 10 "sync" 11 "testing" 12 "time" 13 14 "github.com/gogo/protobuf/proto" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/mock" 17 "github.com/stretchr/testify/require" 18 19 abci "github.com/tendermint/tendermint/abci/types" 20 tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" 21 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 22 dbm "github.com/tendermint/tm-db" 23 24 abcicli "github.com/Finschia/ostracon/abci/client" 25 "github.com/Finschia/ostracon/abci/example/kvstore" 26 cfg "github.com/Finschia/ostracon/config" 27 cstypes "github.com/Finschia/ostracon/consensus/types" 28 cryptoenc "github.com/Finschia/ostracon/crypto/encoding" 29 "github.com/Finschia/ostracon/crypto/tmhash" 30 "github.com/Finschia/ostracon/libs/bits" 31 "github.com/Finschia/ostracon/libs/bytes" 32 "github.com/Finschia/ostracon/libs/log" 33 tmsync "github.com/Finschia/ostracon/libs/sync" 34 mempl "github.com/Finschia/ostracon/mempool" 35 mempoolv0 "github.com/Finschia/ostracon/mempool/v0" 36 37 //mempoolv1 "github.com/Finschia/ostracon/mempool/v1" 38 "github.com/Finschia/ostracon/p2p" 39 p2pmock "github.com/Finschia/ostracon/p2p/mock" 40 sm "github.com/Finschia/ostracon/state" 41 statemocks "github.com/Finschia/ostracon/state/mocks" 42 "github.com/Finschia/ostracon/store" 43 "github.com/Finschia/ostracon/types" 44 ) 45 46 //---------------------------------------------- 47 // in-process testnets 48 49 var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) 50 51 func startConsensusNet(t *testing.T, css []*State, n int) ( 52 []*Reactor, 53 []types.Subscription, 54 []*types.EventBus, 55 ) { 56 reactors := make([]*Reactor, n) 57 blocksSubs := make([]types.Subscription, 0) 58 eventBuses := make([]*types.EventBus, n) 59 for i := 0; i < n; i++ { 60 /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") 61 if err != nil { t.Fatal(err)}*/ 62 reactors[i] = NewReactor(css[i], true, true, 1000) // so we dont start the consensus states 63 reactors[i].SetLogger(css[i].Logger) 64 65 // eventBus is already started with the cs 66 eventBuses[i] = css[i].eventBus 67 reactors[i].SetEventBus(eventBuses[i]) 68 69 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 70 require.NoError(t, err) 71 blocksSubs = append(blocksSubs, blocksSub) 72 73 if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake 74 if err := css[i].blockExec.Store().Save(css[i].state); err != nil { 75 t.Error(err) 76 } 77 78 } 79 } 80 // make connected switches and start all reactors 81 p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch { 82 s.AddReactor("CONSENSUS", reactors[i]) 83 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 84 return s 85 }, p2p.Connect2Switches) 86 87 // now that everyone is connected, start the state machines 88 // If we started the state machines before everyone was connected, 89 // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors 90 // TODO: is this still true with new pubsub? 91 for i := 0; i < n; i++ { 92 s := reactors[i].conS.GetState() 93 reactors[i].SwitchToConsensus(s, false) 94 } 95 return reactors, blocksSubs, eventBuses 96 } 97 98 func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) { 99 logger.Info("stopConsensusNet", "n", len(reactors)) 100 for i, r := range reactors { 101 logger.Info("stopConsensusNet: Stopping Reactor", "i", i) 102 if err := r.Switch.Stop(); err != nil { 103 logger.Error("error trying to stop switch", "error", err) 104 } 105 } 106 for i, b := range eventBuses { 107 logger.Info("stopConsensusNet: Stopping eventBus", "i", i) 108 if err := b.Stop(); err != nil { 109 logger.Error("error trying to stop eventbus", "error", err) 110 } 111 } 112 logger.Info("stopConsensusNet: DONE", "n", len(reactors)) 113 } 114 115 // Ensure a testnet makes blocks 116 func TestReactorBasic(t *testing.T) { 117 N := 4 118 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 119 defer cleanup() 120 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 121 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 122 // wait till everyone makes the first new block 123 timeoutWaitGroup(t, N, func(j int) { 124 <-blocksSubs[j].Out() 125 }, css) 126 } 127 128 // Ensure we can process blocks with evidence 129 func TestReactorWithEvidence(t *testing.T) { 130 nValidators := 4 131 testName := "consensus_reactor_test" 132 tickerFunc := newMockTickerFunc(true) 133 appFunc := newCounter 134 135 // heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction 136 // to unroll unwieldy abstractions. Here we duplicate the code from: 137 // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 138 139 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 140 css := make([]*State, nValidators) 141 logger := consensusLogger() 142 for i := 0; i < nValidators; i++ { 143 stateDB := dbm.NewMemDB() // each state needs its own db 144 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 145 DiscardABCIResponses: false, 146 }) 147 state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) 148 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 149 defer os.RemoveAll(thisConfig.RootDir) 150 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal 151 app := appFunc() 152 vals := types.OC2PB.ValidatorUpdates(state.Validators) 153 app.InitChain(abci.RequestInitChain{Validators: vals}) 154 155 pv := privVals[i] 156 // duplicate code from: 157 // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) 158 159 blockDB := dbm.NewMemDB() 160 blockStore := store.NewBlockStore(blockDB) 161 162 mtx := new(tmsync.Mutex) 163 memplMetrics := mempl.NopMetrics() 164 // one for mempool, one for consensus 165 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 166 proxyAppConnConMem := abcicli.NewLocalClient(mtx, app) 167 168 // Make Mempool 169 var mempool mempl.Mempool 170 171 switch config.Mempool.Version { 172 case cfg.MempoolV0: 173 mempool = mempoolv0.NewCListMempool(config.Mempool, 174 proxyAppConnConMem, 175 state.LastBlockHeight, 176 mempoolv0.WithMetrics(memplMetrics), 177 mempoolv0.WithPreCheck(sm.TxPreCheck(state)), 178 mempoolv0.WithPostCheck(sm.TxPostCheck(state))) 179 case cfg.MempoolV1: // XXX Deprecated MempoolV1 180 panic("Deprecated MempoolV1") 181 /* 182 mempool = mempoolv1.NewTxMempool(logger, 183 config.Mempool, 184 proxyAppConnConMem, 185 state.LastBlockHeight, 186 mempoolv1.WithMetrics(memplMetrics), 187 mempoolv1.WithPreCheck(sm.TxPreCheck(state)), 188 mempoolv1.WithPostCheck(sm.TxPostCheck(state)), 189 ) 190 */ 191 } 192 if thisConfig.Consensus.WaitForTxs() { 193 mempool.EnableTxsAvailable() 194 } 195 196 // mock the evidence pool 197 // everyone includes evidence of another double signing 198 vIdx := (i + 1) % nValidators 199 ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) 200 evpool := &statemocks.EvidencePool{} 201 evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) 202 evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ 203 ev}, int64(len(ev.Bytes()))) 204 evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() 205 206 evpool2 := sm.EmptyEvidencePool{} 207 208 // Make State 209 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) 210 cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) 211 cs.SetLogger(log.TestingLogger().With("module", "consensus")) 212 cs.SetPrivValidator(pv) 213 214 eventBus := types.NewEventBus() 215 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 216 err := eventBus.Start() 217 require.NoError(t, err) 218 cs.SetEventBus(eventBus) 219 220 cs.SetTimeoutTicker(tickerFunc()) 221 cs.SetLogger(logger.With("validator", i, "module", "consensus")) 222 223 css[i] = cs 224 } 225 226 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) 227 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 228 229 // we expect for each validator that is the proposer to propose one piece of evidence. 230 for i := 0; i < nValidators; i++ { 231 timeoutWaitGroup(t, nValidators, func(j int) { 232 msg := <-blocksSubs[j].Out() 233 block := msg.Data().(types.EventDataNewBlock).Block 234 assert.Len(t, block.Evidence.Evidence, 1) 235 }, css) 236 } 237 } 238 239 //------------------------------------ 240 241 // Ensure a testnet makes blocks when there are txs 242 func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { 243 N := 4 244 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, 245 func(c *cfg.Config) { 246 c.Consensus.CreateEmptyBlocks = false 247 }) 248 defer cleanup() 249 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 250 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 251 252 // send a tx 253 if err := assertMempool(css[3].txNotifier).CheckTxSync([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil { 254 t.Error(err) 255 } 256 257 // wait till everyone makes the first new block 258 timeoutWaitGroup(t, N, func(j int) { 259 <-blocksSubs[j].Out() 260 }, css) 261 } 262 263 func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) { 264 N := 1 265 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 266 defer cleanup() 267 reactors, _, eventBuses := startConsensusNet(t, css, N) 268 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 269 270 var ( 271 reactor = reactors[0] 272 peer = p2pmock.NewPeer(nil) 273 ) 274 275 reactor.InitPeer(peer) 276 277 // simulate switch calling Receive before AddPeer 278 assert.NotPanics(t, func() { 279 reactor.ReceiveEnvelope(p2p.Envelope{ 280 ChannelID: StateChannel, 281 Src: peer, 282 Message: &tmcons.HasVote{Height: 1, 283 Round: 1, Index: 1, Type: tmproto.PrevoteType}, 284 }) 285 reactor.AddPeer(peer) 286 }) 287 } 288 289 func TestLegacyReactorReceiveBasic(t *testing.T) { 290 N := 1 291 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 292 defer cleanup() 293 reactors, _, eventBuses := startConsensusNet(t, css, N) 294 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 295 296 var ( 297 reactor = reactors[0] 298 peer = p2pmock.NewPeer(nil) 299 ) 300 301 reactor.InitPeer(peer) 302 v := &tmcons.HasVote{ 303 Height: 1, 304 Round: 1, 305 Index: 1, 306 Type: tmproto.PrevoteType, 307 } 308 w := v.Wrap() 309 msg, err := proto.Marshal(w) 310 assert.NoError(t, err) 311 312 assert.NotPanics(t, func() { 313 reactor.Receive(StateChannel, peer, msg) 314 reactor.AddPeer(peer) 315 }) 316 } 317 318 func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { 319 N := 1 320 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 321 defer cleanup() 322 reactors, _, eventBuses := startConsensusNet(t, css, N) 323 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 324 325 var ( 326 reactor = reactors[0] 327 peer = p2pmock.NewPeer(nil) 328 ) 329 330 // we should call InitPeer here 331 332 // simulate switch calling Receive before AddPeer 333 assert.Panics(t, func() { 334 reactor.ReceiveEnvelope(p2p.Envelope{ 335 ChannelID: StateChannel, 336 Src: peer, 337 Message: &tmcons.HasVote{Height: 1, 338 Round: 1, Index: 1, Type: tmproto.PrevoteType}, 339 }) 340 }) 341 } 342 343 // Test we record stats about votes and block parts from other peers. 344 func TestReactorRecordsVotesAndBlockParts(t *testing.T) { 345 N := 4 346 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) 347 defer cleanup() 348 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) 349 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 350 351 // the proposer idx is always 0, because the LastProofHash is []byte{2} 352 proposerIdx := int32(0) 353 354 // wait till everyone makes the first new block 355 timeoutWaitGroup(t, N, func(j int) { 356 <-blocksSubs[j].Out() 357 }, css) 358 359 // look up proposer index in the validator not proposer 360 // 0:[1,2,3], 1:[0,2,3], 2:[0,1,3], 3:[0,1,2] 361 var otherIdx int 362 var proposerIdxInOtherPeer int32 363 if proposerIdx == 0 { 364 otherIdx = 1 365 proposerIdxInOtherPeer = 0 366 } else { 367 otherIdx = 0 368 proposerIdxInOtherPeer = proposerIdx - 1 369 } 370 371 // Get peer 372 peer := reactors[otherIdx].Switch.Peers().List()[proposerIdxInOtherPeer] 373 374 // Get peer state 375 ps := peer.Get(types.PeerStateKey).(*PeerState) 376 377 assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") 378 assert.Equal(t, true, ps.BlockPartsSent() > 0, 379 fmt.Sprintf("number of votes sent should have increased: %d", ps.BlockPartsSent())) 380 } 381 382 //------------------------------------------------------------- 383 // ensure we can make blocks despite cycling a validator set 384 385 func TestReactorVotingPowerChange(t *testing.T) { 386 nVals := 4 387 logger := log.TestingLogger() 388 css, cleanup := randConsensusNet( 389 nVals, 390 "consensus_voting_power_changes_test", 391 newMockTickerFunc(true), 392 newPersistentKVStore) 393 defer cleanup() 394 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) 395 defer stopConsensusNet(logger, reactors, eventBuses) 396 397 // map of active validators 398 activeVals := make(map[string]struct{}) 399 for i := 0; i < nVals; i++ { 400 pubKey, err := css[i].privValidator.GetPubKey() 401 require.NoError(t, err) 402 addr := pubKey.Address() 403 activeVals[string(addr)] = struct{}{} 404 } 405 406 // wait till everyone makes block 1 407 timeoutWaitGroup(t, nVals, func(j int) { 408 <-blocksSubs[j].Out() 409 }, css) 410 411 //--------------------------------------------------------------------------- 412 logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") 413 414 val1PubKey, err := css[0].privValidator.GetPubKey() 415 require.NoError(t, err) 416 417 val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) 418 require.NoError(t, err) 419 updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) 420 previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() 421 422 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 423 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 424 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 425 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 426 427 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 428 t.Fatalf( 429 "expected voting power to change (before: %d, after: %d)", 430 previousTotalVotingPower, 431 css[0].GetRoundState().LastValidators.TotalVotingPower()) 432 } 433 434 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) 435 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 436 437 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 438 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 439 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 440 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 441 442 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 443 t.Fatalf( 444 "expected voting power to change (before: %d, after: %d)", 445 previousTotalVotingPower, 446 css[0].GetRoundState().LastValidators.TotalVotingPower()) 447 } 448 449 updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) 450 previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() 451 452 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 453 waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) 454 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 455 waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) 456 457 if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 458 t.Fatalf( 459 "expected voting power to change (before: %d, after: %d)", 460 previousTotalVotingPower, 461 css[0].GetRoundState().LastValidators.TotalVotingPower()) 462 } 463 } 464 465 func TestReactorValidatorSetChanges(t *testing.T) { 466 nPeers := 7 467 nVals := 4 468 css, _, _, cleanup := randConsensusNetWithPeers( 469 nVals, 470 nPeers, 471 "consensus_val_set_changes_test", 472 newMockTickerFunc(true), 473 newPersistentKVStoreWithPath) 474 475 defer cleanup() 476 logger := log.TestingLogger() 477 478 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers) 479 defer stopConsensusNet(logger, reactors, eventBuses) 480 481 // map of active validators 482 activeVals := make(map[string]struct{}) 483 for i := 0; i < nVals; i++ { 484 pubKey, err := css[i].privValidator.GetPubKey() 485 require.NoError(t, err) 486 activeVals[string(pubKey.Address())] = struct{}{} 487 } 488 489 // wait till everyone makes block 1 490 timeoutWaitGroup(t, nPeers, func(j int) { 491 <-blocksSubs[j].Out() 492 }, css) 493 494 //--------------------------------------------------------------------------- 495 logger.Info("---------------------------- Testing adding one validator") 496 497 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 498 assert.NoError(t, err) 499 valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) 500 assert.NoError(t, err) 501 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 502 503 // wait till everyone makes block 2 504 // ensure the commit includes all validators 505 // send newValTx to change vals in block 3 506 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 507 508 // wait till everyone makes block 3. 509 // it includes the commit for block 2, which is by the original validator set 510 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1) 511 512 // wait till everyone makes block 4. 513 // it includes the commit for block 3, which is by the original validator set 514 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 515 516 // the commits for block 4 should be with the updated validator set 517 activeVals[string(newValidatorPubKey1.Address())] = struct{}{} 518 519 // wait till everyone makes block 5 520 // it includes the commit for block 4, which should have the updated validator set 521 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 522 523 //--------------------------------------------------------------------------- 524 logger.Info("---------------------------- Testing changing the voting power of one validator") 525 526 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 527 require.NoError(t, err) 528 updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) 529 require.NoError(t, err) 530 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 531 previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() 532 533 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 534 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) 535 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 536 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 537 538 if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { 539 t.Errorf( 540 "expected voting power to change (before: %d, after: %d)", 541 previousTotalVotingPower, 542 css[nVals].GetRoundState().LastValidators.TotalVotingPower()) 543 } 544 545 //--------------------------------------------------------------------------- 546 logger.Info("---------------------------- Testing adding two validators at once") 547 548 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 549 require.NoError(t, err) 550 newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) 551 require.NoError(t, err) 552 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 553 554 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 555 require.NoError(t, err) 556 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 557 require.NoError(t, err) 558 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 559 560 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 561 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) 562 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 563 activeVals[string(newValidatorPubKey2.Address())] = struct{}{} 564 activeVals[string(newValidatorPubKey3.Address())] = struct{}{} 565 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 566 567 //--------------------------------------------------------------------------- 568 logger.Info("---------------------------- Testing removing two validators at once") 569 570 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 571 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 572 573 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 574 waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3) 575 waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) 576 delete(activeVals, string(newValidatorPubKey2.Address())) 577 delete(activeVals, string(newValidatorPubKey3.Address())) 578 waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) 579 } 580 581 // Check we can make blocks with skip_timeout_commit=false 582 func TestReactorWithTimeoutCommit(t *testing.T) { 583 N := 4 584 css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) 585 defer cleanup() 586 // override default SkipTimeoutCommit == true for tests 587 for i := 0; i < N; i++ { 588 css[i].config.SkipTimeoutCommit = false 589 } 590 591 reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1) 592 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 593 594 // wait till everyone makes the first new block 595 timeoutWaitGroup(t, N-1, func(j int) { 596 <-blocksSubs[j].Out() 597 }, css) 598 } 599 600 func waitForAndValidateBlock( 601 t *testing.T, 602 n int, 603 activeVals map[string]struct{}, 604 blocksSubs []types.Subscription, 605 css []*State, 606 txs ...[]byte, 607 ) { 608 timeoutWaitGroup(t, n, func(j int) { 609 css[j].Logger.Debug("waitForAndValidateBlock") 610 msg := <-blocksSubs[j].Out() 611 newBlock := msg.Data().(types.EventDataNewBlock).Block 612 css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) 613 err := validateBlock(newBlock, activeVals) 614 assert.Nil(t, err) 615 for _, tx := range txs { 616 err := assertMempool(css[j].txNotifier).CheckTxSync(tx, nil, mempl.TxInfo{}) 617 assert.Nil(t, err) 618 } 619 }, css) 620 } 621 622 func waitForAndValidateBlockWithTx( 623 t *testing.T, 624 n int, 625 activeVals map[string]struct{}, 626 blocksSubs []types.Subscription, 627 css []*State, 628 txs ...[]byte, 629 ) { 630 timeoutWaitGroup(t, n, func(j int) { 631 ntxs := 0 632 BLOCK_TX_LOOP: 633 for { 634 css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) 635 msg := <-blocksSubs[j].Out() 636 newBlock := msg.Data().(types.EventDataNewBlock).Block 637 css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) 638 err := validateBlock(newBlock, activeVals) 639 assert.Nil(t, err) 640 641 // check that txs match the txs we're waiting for. 642 // note they could be spread over multiple blocks, 643 // but they should be in order. 644 for _, tx := range newBlock.Data.Txs { 645 assert.EqualValues(t, txs[ntxs], tx) 646 ntxs++ 647 } 648 649 if ntxs == len(txs) { 650 break BLOCK_TX_LOOP 651 } 652 } 653 654 }, css) 655 } 656 657 func waitForBlockWithUpdatedValsAndValidateIt( 658 t *testing.T, 659 n int, 660 updatedVals map[string]struct{}, 661 blocksSubs []types.Subscription, 662 css []*State, 663 ) { 664 timeoutWaitGroup(t, n, func(j int) { 665 666 var newBlock *types.Block 667 LOOP: 668 for { 669 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") 670 msg := <-blocksSubs[j].Out() 671 newBlock = msg.Data().(types.EventDataNewBlock).Block 672 if newBlock.LastCommit.Size() == len(updatedVals) { 673 css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) 674 break LOOP 675 } else { 676 css[j].Logger.Debug( 677 "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", 678 "height", 679 newBlock.Height) 680 } 681 } 682 683 err := validateBlock(newBlock, updatedVals) 684 assert.Nil(t, err) 685 }, css) 686 } 687 688 // expects high synchrony! 689 func validateBlock(block *types.Block, activeVals map[string]struct{}) error { 690 if block.LastCommit.Size() != len(activeVals) { 691 return fmt.Errorf( 692 "commit size doesn't match number of active validators. Got %d, expected %d", 693 block.LastCommit.Size(), 694 len(activeVals)) 695 } 696 697 for _, commitSig := range block.LastCommit.Signatures { 698 if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok { 699 return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress) 700 } 701 } 702 return nil 703 } 704 705 func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { 706 wg := new(sync.WaitGroup) 707 wg.Add(n) 708 for i := 0; i < n; i++ { 709 go func(j int) { 710 f(j) 711 wg.Done() 712 }(i) 713 } 714 715 done := make(chan struct{}) 716 go func() { 717 wg.Wait() 718 close(done) 719 }() 720 721 // we're running many nodes in-process, possibly in in a virtual machine, 722 // and spewing debug messages - making a block could take a while, 723 timeout := time.Second * 120 724 725 select { 726 case <-done: 727 case <-time.After(timeout): 728 for i, cs := range css { 729 t.Log("#################") 730 t.Log("Validator", i) 731 t.Log(cs.GetRoundState()) 732 t.Log("") 733 } 734 os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) 735 err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) 736 require.NoError(t, err) 737 capture() 738 panic("Timed out waiting for all validators to commit a block") 739 } 740 } 741 742 func capture() { 743 trace := make([]byte, 10240000) 744 count := runtime.Stack(trace, true) 745 fmt.Printf("Stack of %d bytes: %s\n", count, trace) 746 } 747 748 //------------------------------------------------------------- 749 // Ensure basic validation of structs is functioning 750 751 func TestNewRoundStepMessageValidateBasic(t *testing.T) { 752 testCases := []struct { // nolint: maligned 753 expectErr bool 754 messageRound int32 755 messageLastCommitRound int32 756 messageHeight int64 757 testName string 758 messageStep cstypes.RoundStepType 759 }{ 760 {false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight}, 761 {true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight}, 762 {true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight}, 763 {true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1}, 764 // The following cases will be handled by ValidateHeight 765 {false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight}, 766 {false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight}, 767 } 768 769 for _, tc := range testCases { 770 tc := tc 771 t.Run(tc.testName, func(t *testing.T) { 772 message := NewRoundStepMessage{ 773 Height: tc.messageHeight, 774 Round: tc.messageRound, 775 Step: tc.messageStep, 776 LastCommitRound: tc.messageLastCommitRound, 777 } 778 779 err := message.ValidateBasic() 780 if tc.expectErr { 781 require.Error(t, err) 782 } else { 783 require.NoError(t, err) 784 } 785 }) 786 } 787 } 788 789 func TestNewRoundStepMessageValidateHeight(t *testing.T) { 790 initialHeight := int64(10) 791 testCases := []struct { // nolint: maligned 792 expectErr bool 793 messageLastCommitRound int32 794 messageHeight int64 795 testName string 796 }{ 797 {false, 0, 11, "Valid Message"}, 798 {true, 0, -1, "Negative height"}, 799 {true, 0, 0, "Zero height"}, 800 {true, 0, 10, "Initial height but LCR != -1 "}, 801 {true, -1, 11, "Normal height but LCR < 0"}, 802 } 803 804 for _, tc := range testCases { 805 tc := tc 806 t.Run(tc.testName, func(t *testing.T) { 807 message := NewRoundStepMessage{ 808 Height: tc.messageHeight, 809 Round: 0, 810 Step: cstypes.RoundStepNewHeight, 811 LastCommitRound: tc.messageLastCommitRound, 812 } 813 814 err := message.ValidateHeight(initialHeight) 815 if tc.expectErr { 816 require.Error(t, err) 817 } else { 818 require.NoError(t, err) 819 } 820 }) 821 } 822 } 823 824 func TestNewValidBlockMessageValidateBasic(t *testing.T) { 825 testCases := []struct { 826 malleateFn func(*NewValidBlockMessage) 827 expErr string 828 }{ 829 {func(msg *NewValidBlockMessage) {}, ""}, 830 {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, 831 {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, 832 { 833 func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 }, 834 "blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2", 835 }, 836 { 837 func(msg *NewValidBlockMessage) { 838 msg.BlockPartSetHeader.Total = 0 839 msg.BlockParts = bits.NewBitArray(0) 840 }, 841 "empty blockParts", 842 }, 843 { 844 func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, 845 "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", 846 }, 847 } 848 849 for i, tc := range testCases { 850 tc := tc 851 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 852 msg := &NewValidBlockMessage{ 853 Height: 1, 854 Round: 0, 855 BlockPartSetHeader: types.PartSetHeader{ 856 Total: 1, 857 }, 858 BlockParts: bits.NewBitArray(1), 859 } 860 861 tc.malleateFn(msg) 862 err := msg.ValidateBasic() 863 if tc.expErr != "" && assert.Error(t, err) { 864 assert.Contains(t, err.Error(), tc.expErr) 865 } 866 }) 867 } 868 } 869 870 func TestProposalPOLMessageValidateBasic(t *testing.T) { 871 testCases := []struct { 872 malleateFn func(*ProposalPOLMessage) 873 expErr string 874 }{ 875 {func(msg *ProposalPOLMessage) {}, ""}, 876 {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, 877 {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, 878 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, 879 {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, 880 "proposalPOL bit array is too big: 10001, max: 10000"}, 881 } 882 883 for i, tc := range testCases { 884 tc := tc 885 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 886 msg := &ProposalPOLMessage{ 887 Height: 1, 888 ProposalPOLRound: 1, 889 ProposalPOL: bits.NewBitArray(1), 890 } 891 892 tc.malleateFn(msg) 893 err := msg.ValidateBasic() 894 if tc.expErr != "" && assert.Error(t, err) { 895 assert.Contains(t, err.Error(), tc.expErr) 896 } 897 }) 898 } 899 } 900 901 func TestBlockPartMessageValidateBasic(t *testing.T) { 902 testPart := new(types.Part) 903 testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) 904 testCases := []struct { 905 testName string 906 messageHeight int64 907 messageRound int32 908 messagePart *types.Part 909 expectErr bool 910 }{ 911 {"Valid Message", 0, 0, testPart, false}, 912 {"Invalid Message", -1, 0, testPart, true}, 913 {"Invalid Message", 0, -1, testPart, true}, 914 } 915 916 for _, tc := range testCases { 917 tc := tc 918 t.Run(tc.testName, func(t *testing.T) { 919 message := BlockPartMessage{ 920 Height: tc.messageHeight, 921 Round: tc.messageRound, 922 Part: tc.messagePart, 923 } 924 925 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 926 }) 927 } 928 929 message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} 930 message.Part.Index = 1 931 932 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 933 } 934 935 func TestHasVoteMessageValidateBasic(t *testing.T) { 936 const ( 937 validSignedMsgType tmproto.SignedMsgType = 0x01 938 invalidSignedMsgType tmproto.SignedMsgType = 0x03 939 ) 940 941 testCases := []struct { // nolint: maligned 942 expectErr bool 943 messageRound int32 944 messageIndex int32 945 messageHeight int64 946 testName string 947 messageType tmproto.SignedMsgType 948 }{ 949 {false, 0, 0, 0, "Valid Message", validSignedMsgType}, 950 {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, 951 {true, 0, -1, 0, "Invalid Message", validSignedMsgType}, 952 {true, 0, 0, 0, "Invalid Message", invalidSignedMsgType}, 953 {true, 0, 0, -1, "Invalid Message", validSignedMsgType}, 954 } 955 956 for _, tc := range testCases { 957 tc := tc 958 t.Run(tc.testName, func(t *testing.T) { 959 message := HasVoteMessage{ 960 Height: tc.messageHeight, 961 Round: tc.messageRound, 962 Type: tc.messageType, 963 Index: tc.messageIndex, 964 } 965 966 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 967 }) 968 } 969 } 970 971 func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { 972 const ( 973 validSignedMsgType tmproto.SignedMsgType = 0x01 974 invalidSignedMsgType tmproto.SignedMsgType = 0x03 975 ) 976 977 validBlockID := types.BlockID{} 978 invalidBlockID := types.BlockID{ 979 Hash: bytes.HexBytes{}, 980 PartSetHeader: types.PartSetHeader{ 981 Total: 1, 982 Hash: []byte{0}, 983 }, 984 } 985 986 testCases := []struct { // nolint: maligned 987 expectErr bool 988 messageRound int32 989 messageHeight int64 990 testName string 991 messageType tmproto.SignedMsgType 992 messageBlockID types.BlockID 993 }{ 994 {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, 995 {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, 996 {true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID}, 997 {true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID}, 998 {true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID}, 999 } 1000 1001 for _, tc := range testCases { 1002 tc := tc 1003 t.Run(tc.testName, func(t *testing.T) { 1004 message := VoteSetMaj23Message{ 1005 Height: tc.messageHeight, 1006 Round: tc.messageRound, 1007 Type: tc.messageType, 1008 BlockID: tc.messageBlockID, 1009 } 1010 1011 assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") 1012 }) 1013 } 1014 } 1015 1016 func TestVoteSetBitsMessageValidateBasic(t *testing.T) { 1017 testCases := []struct { 1018 malleateFn func(*VoteSetBitsMessage) 1019 expErr string 1020 }{ 1021 {func(msg *VoteSetBitsMessage) {}, ""}, 1022 {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, 1023 {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, 1024 {func(msg *VoteSetBitsMessage) { 1025 msg.BlockID = types.BlockID{ 1026 Hash: bytes.HexBytes{}, 1027 PartSetHeader: types.PartSetHeader{ 1028 Total: 1, 1029 Hash: []byte{0}, 1030 }, 1031 } 1032 }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, 1033 {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, 1034 "votes bit array is too big: 10001, max: 10000"}, 1035 } 1036 1037 for i, tc := range testCases { 1038 tc := tc 1039 t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { 1040 msg := &VoteSetBitsMessage{ 1041 Height: 1, 1042 Round: 0, 1043 Type: 0x01, 1044 Votes: bits.NewBitArray(1), 1045 BlockID: types.BlockID{}, 1046 } 1047 1048 tc.malleateFn(msg) 1049 err := msg.ValidateBasic() 1050 if tc.expErr != "" && assert.Error(t, err) { 1051 assert.Contains(t, err.Error(), tc.expErr) 1052 } 1053 }) 1054 } 1055 }