github.com/516108736/tendermint@v0.36.0/consensus/byzantine_test.go (about) 1 package consensus 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path" 8 "sync" 9 "testing" 10 "time" 11 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 15 dbm "github.com/tendermint/tm-db" 16 17 abcicli "github.com/tendermint/tendermint/abci/client" 18 abci "github.com/tendermint/tendermint/abci/types" 19 "github.com/tendermint/tendermint/evidence" 20 "github.com/tendermint/tendermint/libs/log" 21 "github.com/tendermint/tendermint/libs/service" 22 tmsync "github.com/tendermint/tendermint/libs/sync" 23 mempl "github.com/tendermint/tendermint/mempool" 24 "github.com/tendermint/tendermint/p2p" 25 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 26 sm "github.com/tendermint/tendermint/state" 27 "github.com/tendermint/tendermint/store" 28 "github.com/tendermint/tendermint/types" 29 ) 30 31 //---------------------------------------------- 32 // byzantine failures 33 34 // Byzantine node sends two different prevotes (nil and blockID) to the same validator 35 func TestByzantinePrevoteEquivocation(t *testing.T) { 36 const nValidators = 4 37 const byzantineNode = 0 38 const prevoteHeight = int64(2) 39 testName := "consensus_byzantine_test" 40 tickerFunc := newMockTickerFunc(true) 41 appFunc := newCounter 42 43 genDoc, privVals := randGenesisDoc(nValidators, false, 30) 44 css := make([]*State, nValidators) 45 46 for i := 0; i < nValidators; i++ { 47 logger := consensusLogger().With("test", "byzantine", "validator", i) 48 stateDB := dbm.NewMemDB() // each state needs its own db 49 stateStore := sm.NewStore(stateDB) 50 state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) 51 thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) 52 defer os.RemoveAll(thisConfig.RootDir) 53 ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal 54 app := appFunc() 55 vals := types.TM2PB.ValidatorUpdates(state.Validators) 56 app.InitChain(abci.RequestInitChain{Validators: vals}) 57 58 blockDB := dbm.NewMemDB() 59 blockStore := store.NewBlockStore(blockDB) 60 61 // one for mempool, one for consensus 62 mtx := new(tmsync.Mutex) 63 proxyAppConnMem := abcicli.NewLocalClient(mtx, app) 64 proxyAppConnCon := abcicli.NewLocalClient(mtx, app) 65 66 // Make Mempool 67 mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) 68 mempool.SetLogger(log.TestingLogger().With("module", "mempool")) 69 if thisConfig.Consensus.WaitForTxs() { 70 mempool.EnableTxsAvailable() 71 } 72 73 // Make a full instance of the evidence pool 74 evidenceDB := dbm.NewMemDB() 75 evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) 76 require.NoError(t, err) 77 evpool.SetLogger(logger.With("module", "evidence")) 78 79 // Make State 80 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) 81 cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) 82 cs.SetLogger(cs.Logger) 83 // set private validator 84 pv := privVals[i] 85 cs.SetPrivValidator(pv) 86 87 eventBus := types.NewEventBus() 88 eventBus.SetLogger(log.TestingLogger().With("module", "events")) 89 err = eventBus.Start() 90 require.NoError(t, err) 91 cs.SetEventBus(eventBus) 92 93 cs.SetTimeoutTicker(tickerFunc()) 94 cs.SetLogger(logger) 95 96 css[i] = cs 97 } 98 99 // initialize the reactors for each of the validators 100 reactors := make([]*Reactor, nValidators) 101 blocksSubs := make([]types.Subscription, 0) 102 eventBuses := make([]*types.EventBus, nValidators) 103 for i := 0; i < nValidators; i++ { 104 reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states 105 reactors[i].SetLogger(css[i].Logger) 106 107 // eventBus is already started with the cs 108 eventBuses[i] = css[i].eventBus 109 reactors[i].SetEventBus(eventBuses[i]) 110 111 blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100) 112 require.NoError(t, err) 113 blocksSubs = append(blocksSubs, blocksSub) 114 115 if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake 116 err = css[i].blockExec.Store().Save(css[i].state) 117 require.NoError(t, err) 118 } 119 } 120 // make connected switches and start all reactors 121 p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch) *p2p.Switch { 122 s.AddReactor("CONSENSUS", reactors[i]) 123 s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) 124 return s 125 }, p2p.Connect2Switches) 126 127 // create byzantine validator 128 bcs := css[byzantineNode] 129 130 // alter prevote so that the byzantine node double votes when height is 2 131 bcs.doPrevote = func(height int64, round int32) { 132 // allow first height to happen normally so that byzantine validator is no longer proposer 133 if height == prevoteHeight { 134 bcs.Logger.Info("Sending two votes") 135 prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header()) 136 require.NoError(t, err) 137 prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) 138 require.NoError(t, err) 139 peerList := reactors[byzantineNode].Switch.Peers().List() 140 bcs.Logger.Info("Getting peer list", "peers", peerList) 141 // send two votes to all peers (1st to one half, 2nd to another half) 142 for i, peer := range peerList { 143 if i < len(peerList)/2 { 144 bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer) 145 peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1})) 146 } else { 147 bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer) 148 peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2})) 149 } 150 } 151 } else { 152 bcs.Logger.Info("Behaving normally") 153 bcs.defaultDoPrevote(height, round) 154 } 155 } 156 157 // introducing a lazy proposer means that the time of the block committed is different to the 158 // timestamp that the other nodes have. This tests to ensure that the evidence that finally gets 159 // proposed will have a valid timestamp 160 lazyProposer := css[1] 161 162 lazyProposer.decideProposal = func(height int64, round int32) { 163 lazyProposer.Logger.Info("Lazy Proposer proposing condensed commit") 164 if lazyProposer.privValidator == nil { 165 panic("entered createProposalBlock with privValidator being nil") 166 } 167 168 var commit *types.Commit 169 switch { 170 case lazyProposer.Height == lazyProposer.state.InitialHeight: 171 // We're creating a proposal for the first block. 172 // The commit is empty, but not nil. 173 commit = types.NewCommit(0, 0, types.BlockID{}, nil) 174 case lazyProposer.LastCommit.HasTwoThirdsMajority(): 175 // Make the commit from LastCommit 176 commit = lazyProposer.LastCommit.MakeCommit() 177 default: // This shouldn't happen. 178 lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") 179 return 180 } 181 182 // omit the last signature in the commit 183 commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent() 184 185 if lazyProposer.privValidatorPubKey == nil { 186 // If this node is a validator & proposer in the current round, it will 187 // miss the opportunity to create a block. 188 lazyProposer.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) 189 return 190 } 191 proposerAddr := lazyProposer.privValidatorPubKey.Address() 192 193 block, blockParts := lazyProposer.blockExec.CreateProposalBlock( 194 lazyProposer.Height, lazyProposer.state, commit, proposerAddr, 195 ) 196 197 // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, 198 // and the privValidator will refuse to sign anything. 199 if err := lazyProposer.wal.FlushAndSync(); err != nil { 200 lazyProposer.Logger.Error("Error flushing to disk") 201 } 202 203 // Make proposal 204 propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} 205 proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID) 206 p := proposal.ToProto() 207 if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil { 208 proposal.Signature = p.Signature 209 210 // send proposal and block parts on internal msg queue 211 lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) 212 for i := 0; i < int(blockParts.Total()); i++ { 213 part := blockParts.GetPart(i) 214 lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, ""}) 215 } 216 lazyProposer.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) 217 lazyProposer.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) 218 } else if !lazyProposer.replayMode { 219 lazyProposer.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) 220 } 221 } 222 223 // start the consensus reactors 224 for i := 0; i < nValidators; i++ { 225 s := reactors[i].conS.GetState() 226 reactors[i].SwitchToConsensus(s, false) 227 } 228 defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) 229 230 // Evidence should be submitted and committed at the third height but 231 // we will check the first six just in case 232 evidenceFromEachValidator := make([]types.Evidence, nValidators) 233 234 wg := new(sync.WaitGroup) 235 for i := 0; i < nValidators; i++ { 236 wg.Add(1) 237 go func(i int) { 238 defer wg.Done() 239 for msg := range blocksSubs[i].Out() { 240 block := msg.Data().(types.EventDataNewBlock).Block 241 if len(block.Evidence.Evidence) != 0 { 242 evidenceFromEachValidator[i] = block.Evidence.Evidence[0] 243 return 244 } 245 } 246 }(i) 247 } 248 249 done := make(chan struct{}) 250 go func() { 251 wg.Wait() 252 close(done) 253 }() 254 255 pubkey, err := bcs.privValidator.GetPubKey() 256 require.NoError(t, err) 257 258 select { 259 case <-done: 260 for idx, ev := range evidenceFromEachValidator { 261 if assert.NotNil(t, ev, idx) { 262 ev, ok := ev.(*types.DuplicateVoteEvidence) 263 assert.True(t, ok) 264 assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) 265 assert.Equal(t, prevoteHeight, ev.Height()) 266 } 267 } 268 case <-time.After(20 * time.Second): 269 for i, reactor := range reactors { 270 t.Logf("Consensus Reactor %d\n%v", i, reactor) 271 } 272 t.Fatalf("Timed out waiting for validators to commit evidence") 273 } 274 } 275 276 // 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). 277 // byzantine validator sends conflicting proposals into A and B, 278 // and prevotes/precommits on both of them. 279 // B sees a commit, A doesn't. 280 // Heal partition and ensure A sees the commit 281 func TestByzantineConflictingProposalsWithPartition(t *testing.T) { 282 N := 4 283 logger := consensusLogger().With("test", "byzantine") 284 app := newCounter 285 css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app) 286 defer cleanup() 287 288 // give the byzantine validator a normal ticker 289 ticker := NewTimeoutTicker() 290 ticker.SetLogger(css[0].Logger) 291 css[0].SetTimeoutTicker(ticker) 292 293 switches := make([]*p2p.Switch, N) 294 p2pLogger := logger.With("module", "p2p") 295 for i := 0; i < N; i++ { 296 switches[i] = p2p.MakeSwitch( 297 config.P2P, 298 i, 299 "foo", "1.0.0", 300 func(i int, sw *p2p.Switch) *p2p.Switch { 301 return sw 302 }) 303 switches[i].SetLogger(p2pLogger.With("validator", i)) 304 } 305 306 blocksSubs := make([]types.Subscription, N) 307 reactors := make([]p2p.Reactor, N) 308 for i := 0; i < N; i++ { 309 310 // enable txs so we can create different proposals 311 assertMempool(css[i].txNotifier).EnableTxsAvailable() 312 // make first val byzantine 313 if i == 0 { 314 // NOTE: Now, test validators are MockPV, which by default doesn't 315 // do any safety checks. 316 css[i].privValidator.(types.MockPV).DisableChecks() 317 css[i].decideProposal = func(j int32) func(int64, int32) { 318 return func(height int64, round int32) { 319 byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) 320 } 321 }(int32(i)) 322 // We are setting the prevote function to do nothing because the prevoting 323 // and precommitting are done alongside the proposal. 324 css[i].doPrevote = func(height int64, round int32) {} 325 } 326 327 eventBus := css[i].eventBus 328 eventBus.SetLogger(logger.With("module", "events", "validator", i)) 329 330 var err error 331 blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 332 require.NoError(t, err) 333 334 conR := NewReactor(css[i], true) // so we don't start the consensus states 335 conR.SetLogger(logger.With("validator", i)) 336 conR.SetEventBus(eventBus) 337 338 var conRI p2p.Reactor = conR 339 340 // make first val byzantine 341 if i == 0 { 342 conRI = NewByzantineReactor(conR) 343 } 344 345 reactors[i] = conRI 346 err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info 347 require.NoError(t, err) 348 } 349 350 defer func() { 351 for _, r := range reactors { 352 if rr, ok := r.(*ByzantineReactor); ok { 353 err := rr.reactor.Switch.Stop() 354 require.NoError(t, err) 355 } else { 356 err := r.(*Reactor).Switch.Stop() 357 require.NoError(t, err) 358 } 359 } 360 }() 361 362 p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { 363 // ignore new switch s, we already made ours 364 switches[i].AddReactor("CONSENSUS", reactors[i]) 365 return switches[i] 366 }, func(sws []*p2p.Switch, i, j int) { 367 // the network starts partitioned with globally active adversary 368 if i != 0 { 369 return 370 } 371 p2p.Connect2Switches(sws, i, j) 372 }) 373 374 // start the non-byz state machines. 375 // note these must be started before the byz 376 for i := 1; i < N; i++ { 377 cr := reactors[i].(*Reactor) 378 cr.SwitchToConsensus(cr.conS.GetState(), false) 379 } 380 381 // start the byzantine state machine 382 byzR := reactors[0].(*ByzantineReactor) 383 s := byzR.reactor.conS.GetState() 384 byzR.reactor.SwitchToConsensus(s, false) 385 386 // byz proposer sends one block to peers[0] 387 // and the other block to peers[1] and peers[2]. 388 // note peers and switches order don't match. 389 peers := switches[0].Peers().List() 390 391 // partition A 392 ind0 := getSwitchIndex(switches, peers[0]) 393 394 // partition B 395 ind1 := getSwitchIndex(switches, peers[1]) 396 ind2 := getSwitchIndex(switches, peers[2]) 397 p2p.Connect2Switches(switches, ind1, ind2) 398 399 // wait for someone in the big partition (B) to make a block 400 <-blocksSubs[ind2].Out() 401 402 t.Log("A block has been committed. Healing partition") 403 p2p.Connect2Switches(switches, ind0, ind1) 404 p2p.Connect2Switches(switches, ind0, ind2) 405 406 // wait till everyone makes the first new block 407 // (one of them already has) 408 wg := new(sync.WaitGroup) 409 for i := 1; i < N-1; i++ { 410 wg.Add(1) 411 go func(j int) { 412 <-blocksSubs[j].Out() 413 wg.Done() 414 }(i) 415 } 416 417 done := make(chan struct{}) 418 go func() { 419 wg.Wait() 420 close(done) 421 }() 422 423 tick := time.NewTicker(time.Second * 10) 424 select { 425 case <-done: 426 case <-tick.C: 427 for i, reactor := range reactors { 428 t.Log(fmt.Sprintf("Consensus Reactor %v", i)) 429 t.Log(fmt.Sprintf("%v", reactor)) 430 } 431 t.Fatalf("Timed out waiting for all validators to commit first block") 432 } 433 } 434 435 //------------------------------- 436 // byzantine consensus functions 437 438 func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { 439 // byzantine user should create two proposals and try to split the vote. 440 // Avoid sending on internalMsgQueue and running consensus state. 441 442 // Create a new proposal block from state/txs from the mempool. 443 block1, blockParts1 := cs.createProposalBlock() 444 polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} 445 proposal1 := types.NewProposal(height, round, polRound, propBlockID) 446 p1 := proposal1.ToProto() 447 if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { 448 t.Error(err) 449 } 450 451 proposal1.Signature = p1.Signature 452 453 // some new transactions come in (this ensures that the proposals are different) 454 deliverTxsRange(cs, 0, 1) 455 456 // Create a new proposal block from state/txs from the mempool. 457 block2, blockParts2 := cs.createProposalBlock() 458 polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} 459 proposal2 := types.NewProposal(height, round, polRound, propBlockID) 460 p2 := proposal2.ToProto() 461 if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { 462 t.Error(err) 463 } 464 465 proposal2.Signature = p2.Signature 466 467 block1Hash := block1.Hash() 468 block2Hash := block2.Hash() 469 470 // broadcast conflicting proposals/block parts to peers 471 peers := sw.Peers().List() 472 t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) 473 for i, peer := range peers { 474 if i < len(peers)/2 { 475 go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) 476 } else { 477 go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) 478 } 479 } 480 } 481 482 func sendProposalAndParts( 483 height int64, 484 round int32, 485 cs *State, 486 peer p2p.Peer, 487 proposal *types.Proposal, 488 blockHash []byte, 489 parts *types.PartSet, 490 ) { 491 // proposal 492 msg := &ProposalMessage{Proposal: proposal} 493 peer.Send(DataChannel, MustEncode(msg)) 494 495 // parts 496 for i := 0; i < int(parts.Total()); i++ { 497 part := parts.GetPart(i) 498 msg := &BlockPartMessage{ 499 Height: height, // This tells peer that this part applies to us. 500 Round: round, // This tells peer that this part applies to us. 501 Part: part, 502 } 503 peer.Send(DataChannel, MustEncode(msg)) 504 } 505 506 // votes 507 cs.mtx.Lock() 508 prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) 509 precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) 510 cs.mtx.Unlock() 511 512 peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) 513 peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) 514 } 515 516 //---------------------------------------- 517 // byzantine consensus reactor 518 519 type ByzantineReactor struct { 520 service.Service 521 reactor *Reactor 522 } 523 524 func NewByzantineReactor(conR *Reactor) *ByzantineReactor { 525 return &ByzantineReactor{ 526 Service: conR, 527 reactor: conR, 528 } 529 } 530 531 func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } 532 func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } 533 func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { 534 if !br.reactor.IsRunning() { 535 return 536 } 537 538 // Create peerState for peer 539 peerState := NewPeerState(peer).SetLogger(br.reactor.Logger) 540 peer.Set(types.PeerStateKey, peerState) 541 542 // Send our state to peer. 543 // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). 544 if !br.reactor.waitSync { 545 br.reactor.sendNewRoundStepMessage(peer) 546 } 547 } 548 func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { 549 br.reactor.RemovePeer(peer, reason) 550 } 551 func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { 552 br.reactor.Receive(chID, peer, msgBytes) 553 } 554 func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }