github.com/vipernet-xyz/tm@v0.34.24/consensus/byzantine_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	dbm "github.com/tendermint/tm-db"
    16  
    17  	abcicli "github.com/vipernet-xyz/tm/abci/client"
    18  	abci "github.com/vipernet-xyz/tm/abci/types"
    19  	"github.com/vipernet-xyz/tm/evidence"
    20  	"github.com/vipernet-xyz/tm/libs/log"
    21  	"github.com/vipernet-xyz/tm/libs/service"
    22  	tmsync "github.com/vipernet-xyz/tm/libs/sync"
    23  	mempl "github.com/vipernet-xyz/tm/mempool"
    24  
    25  	cfg "github.com/vipernet-xyz/tm/config"
    26  	mempoolv0 "github.com/vipernet-xyz/tm/mempool/v0"
    27  	mempoolv1 "github.com/vipernet-xyz/tm/mempool/v1"
    28  	"github.com/vipernet-xyz/tm/p2p"
    29  	tmcons "github.com/vipernet-xyz/tm/proto/tendermint/consensus"
    30  	tmproto "github.com/vipernet-xyz/tm/proto/tendermint/types"
    31  	sm "github.com/vipernet-xyz/tm/state"
    32  	"github.com/vipernet-xyz/tm/store"
    33  	"github.com/vipernet-xyz/tm/types"
    34  )
    35  
    36  //----------------------------------------------
    37  // byzantine failures
    38  
    39  // Byzantine node sends two different prevotes (nil and blockID) to the same validator
    40  func TestByzantinePrevoteEquivocation(t *testing.T) {
    41  	const nValidators = 4
    42  	const byzantineNode = 0
    43  	const prevoteHeight = int64(2)
    44  	testName := "consensus_byzantine_test"
    45  	tickerFunc := newMockTickerFunc(true)
    46  	appFunc := newCounter
    47  
    48  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
    49  	css := make([]*State, nValidators)
    50  
    51  	for i := 0; i < nValidators; i++ {
    52  		logger := consensusLogger().With("test", "byzantine", "validator", i)
    53  		stateDB := dbm.NewMemDB() // each state needs its own db
    54  		stateStore := sm.NewStore(stateDB, sm.StoreOptions{
    55  			DiscardABCIResponses: false,
    56  		})
    57  		state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
    58  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
    59  		defer os.RemoveAll(thisConfig.RootDir)
    60  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
    61  		app := appFunc()
    62  		vals := types.TM2PB.ValidatorUpdates(state.Validators)
    63  		app.InitChain(abci.RequestInitChain{Validators: vals})
    64  
    65  		blockDB := dbm.NewMemDB()
    66  		blockStore := store.NewBlockStore(blockDB)
    67  
    68  		mtx := new(tmsync.Mutex)
    69  		// one for mempool, one for consensus
    70  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
    71  		proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
    72  
    73  		// Make Mempool
    74  		var mempool mempl.Mempool
    75  
    76  		switch thisConfig.Mempool.Version {
    77  		case cfg.MempoolV0:
    78  			mempool = mempoolv0.NewCListMempool(config.Mempool,
    79  				proxyAppConnConMem,
    80  				state.LastBlockHeight,
    81  				mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
    82  				mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
    83  		case cfg.MempoolV1:
    84  			mempool = mempoolv1.NewTxMempool(logger,
    85  				config.Mempool,
    86  				proxyAppConnConMem,
    87  				state.LastBlockHeight,
    88  				mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
    89  				mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
    90  			)
    91  		}
    92  
    93  		if thisConfig.Consensus.WaitForTxs() {
    94  			mempool.EnableTxsAvailable()
    95  		}
    96  
    97  		// Make a full instance of the evidence pool
    98  		evidenceDB := dbm.NewMemDB()
    99  		evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   100  		require.NoError(t, err)
   101  		evpool.SetLogger(logger.With("module", "evidence"))
   102  
   103  		// Make State
   104  		blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
   105  		cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
   106  		cs.SetLogger(cs.Logger)
   107  		// set private validator
   108  		pv := privVals[i]
   109  		cs.SetPrivValidator(pv)
   110  
   111  		eventBus := types.NewEventBus()
   112  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
   113  		err = eventBus.Start()
   114  		require.NoError(t, err)
   115  		cs.SetEventBus(eventBus)
   116  
   117  		cs.SetTimeoutTicker(tickerFunc())
   118  		cs.SetLogger(logger)
   119  
   120  		css[i] = cs
   121  	}
   122  
   123  	// initialize the reactors for each of the validators
   124  	reactors := make([]*Reactor, nValidators)
   125  	blocksSubs := make([]types.Subscription, 0)
   126  	eventBuses := make([]*types.EventBus, nValidators)
   127  	for i := 0; i < nValidators; i++ {
   128  		reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
   129  		reactors[i].SetLogger(css[i].Logger)
   130  
   131  		// eventBus is already started with the cs
   132  		eventBuses[i] = css[i].eventBus
   133  		reactors[i].SetEventBus(eventBuses[i])
   134  
   135  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100)
   136  		require.NoError(t, err)
   137  		blocksSubs = append(blocksSubs, blocksSub)
   138  
   139  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
   140  			err = css[i].blockExec.Store().Save(css[i].state)
   141  			require.NoError(t, err)
   142  		}
   143  	}
   144  	// make connected switches and start all reactors
   145  	p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch) *p2p.Switch {
   146  		s.AddReactor("CONSENSUS", reactors[i])
   147  		s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
   148  		return s
   149  	}, p2p.Connect2Switches)
   150  
   151  	// create byzantine validator
   152  	bcs := css[byzantineNode]
   153  
   154  	// alter prevote so that the byzantine node double votes when height is 2
   155  	bcs.doPrevote = func(height int64, round int32) {
   156  		// allow first height to happen normally so that byzantine validator is no longer proposer
   157  		if height == prevoteHeight {
   158  			bcs.Logger.Info("Sending two votes")
   159  			prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header())
   160  			require.NoError(t, err)
   161  			prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
   162  			require.NoError(t, err)
   163  			peerList := reactors[byzantineNode].Switch.Peers().List()
   164  			bcs.Logger.Info("Getting peer list", "peers", peerList)
   165  			// send two votes to all peers (1st to one half, 2nd to another half)
   166  			for i, peer := range peerList {
   167  				if i < len(peerList)/2 {
   168  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
   169  					p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   170  						Message:   &tmcons.Vote{Vote: prevote1.ToProto()},
   171  						ChannelID: VoteChannel,
   172  					}, bcs.Logger)
   173  				} else {
   174  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
   175  					p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   176  						Message:   &tmcons.Vote{Vote: prevote2.ToProto()},
   177  						ChannelID: VoteChannel,
   178  					}, bcs.Logger)
   179  				}
   180  			}
   181  		} else {
   182  			bcs.Logger.Info("Behaving normally")
   183  			bcs.defaultDoPrevote(height, round)
   184  		}
   185  	}
   186  
   187  	// introducing a lazy proposer means that the time of the block committed is different to the
   188  	// timestamp that the other nodes have. This tests to ensure that the evidence that finally gets
   189  	// proposed will have a valid timestamp
   190  	lazyProposer := css[1]
   191  
   192  	lazyProposer.decideProposal = func(height int64, round int32) {
   193  		lazyProposer.Logger.Info("Lazy Proposer proposing condensed commit")
   194  		if lazyProposer.privValidator == nil {
   195  			panic("entered createProposalBlock with privValidator being nil")
   196  		}
   197  
   198  		var commit *types.Commit
   199  		switch {
   200  		case lazyProposer.Height == lazyProposer.state.InitialHeight:
   201  			// We're creating a proposal for the first block.
   202  			// The commit is empty, but not nil.
   203  			commit = types.NewCommit(0, 0, types.BlockID{}, nil)
   204  		case lazyProposer.LastCommit.HasTwoThirdsMajority():
   205  			// Make the commit from LastCommit
   206  			commit = lazyProposer.LastCommit.MakeCommit()
   207  		default: // This shouldn't happen.
   208  			lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
   209  			return
   210  		}
   211  
   212  		// omit the last signature in the commit
   213  		commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
   214  
   215  		if lazyProposer.privValidatorPubKey == nil {
   216  			// If this node is a validator & proposer in the current round, it will
   217  			// miss the opportunity to create a block.
   218  			lazyProposer.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
   219  			return
   220  		}
   221  		proposerAddr := lazyProposer.privValidatorPubKey.Address()
   222  
   223  		block, blockParts := lazyProposer.blockExec.CreateProposalBlock(
   224  			lazyProposer.Height, lazyProposer.state, commit, proposerAddr,
   225  		)
   226  
   227  		// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
   228  		// and the privValidator will refuse to sign anything.
   229  		if err := lazyProposer.wal.FlushAndSync(); err != nil {
   230  			lazyProposer.Logger.Error("Error flushing to disk")
   231  		}
   232  
   233  		// Make proposal
   234  		propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
   235  		proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID)
   236  		p := proposal.ToProto()
   237  		if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil {
   238  			proposal.Signature = p.Signature
   239  
   240  			// send proposal and block parts on internal msg queue
   241  			lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""})
   242  			for i := 0; i < int(blockParts.Total()); i++ {
   243  				part := blockParts.GetPart(i)
   244  				lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, ""})
   245  			}
   246  			lazyProposer.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
   247  			lazyProposer.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
   248  		} else if !lazyProposer.replayMode {
   249  			lazyProposer.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
   250  		}
   251  	}
   252  
   253  	// start the consensus reactors
   254  	for i := 0; i < nValidators; i++ {
   255  		s := reactors[i].conS.GetState()
   256  		reactors[i].SwitchToConsensus(s, false)
   257  	}
   258  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   259  
   260  	// Evidence should be submitted and committed at the third height but
   261  	// we will check the first six just in case
   262  	evidenceFromEachValidator := make([]types.Evidence, nValidators)
   263  
   264  	wg := new(sync.WaitGroup)
   265  	for i := 0; i < nValidators; i++ {
   266  		wg.Add(1)
   267  		go func(i int) {
   268  			defer wg.Done()
   269  			for msg := range blocksSubs[i].Out() {
   270  				block := msg.Data().(types.EventDataNewBlock).Block
   271  				if len(block.Evidence.Evidence) != 0 {
   272  					evidenceFromEachValidator[i] = block.Evidence.Evidence[0]
   273  					return
   274  				}
   275  			}
   276  		}(i)
   277  	}
   278  
   279  	done := make(chan struct{})
   280  	go func() {
   281  		wg.Wait()
   282  		close(done)
   283  	}()
   284  
   285  	pubkey, err := bcs.privValidator.GetPubKey()
   286  	require.NoError(t, err)
   287  
   288  	select {
   289  	case <-done:
   290  		for idx, ev := range evidenceFromEachValidator {
   291  			if assert.NotNil(t, ev, idx) {
   292  				ev, ok := ev.(*types.DuplicateVoteEvidence)
   293  				assert.True(t, ok)
   294  				assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
   295  				assert.Equal(t, prevoteHeight, ev.Height())
   296  			}
   297  		}
   298  	case <-time.After(20 * time.Second):
   299  		for i, reactor := range reactors {
   300  			t.Logf("Consensus Reactor %d\n%v", i, reactor)
   301  		}
   302  		t.Fatalf("Timed out waiting for validators to commit evidence")
   303  	}
   304  }
   305  
   306  // 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
   307  // byzantine validator sends conflicting proposals into A and B,
   308  // and prevotes/precommits on both of them.
   309  // B sees a commit, A doesn't.
   310  // Heal partition and ensure A sees the commit
   311  func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
   312  	N := 4
   313  	logger := consensusLogger().With("test", "byzantine")
   314  	app := newCounter
   315  	css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
   316  	defer cleanup()
   317  
   318  	// give the byzantine validator a normal ticker
   319  	ticker := NewTimeoutTicker()
   320  	ticker.SetLogger(css[0].Logger)
   321  	css[0].SetTimeoutTicker(ticker)
   322  
   323  	switches := make([]*p2p.Switch, N)
   324  	p2pLogger := logger.With("module", "p2p")
   325  	for i := 0; i < N; i++ {
   326  		switches[i] = p2p.MakeSwitch(
   327  			config.P2P,
   328  			i,
   329  			"foo", "1.0.0",
   330  			func(i int, sw *p2p.Switch) *p2p.Switch {
   331  				return sw
   332  			})
   333  		switches[i].SetLogger(p2pLogger.With("validator", i))
   334  	}
   335  
   336  	blocksSubs := make([]types.Subscription, N)
   337  	reactors := make([]p2p.Reactor, N)
   338  	for i := 0; i < N; i++ {
   339  
   340  		// enable txs so we can create different proposals
   341  		assertMempool(css[i].txNotifier).EnableTxsAvailable()
   342  		// make first val byzantine
   343  		if i == 0 {
   344  			// NOTE: Now, test validators are MockPV, which by default doesn't
   345  			// do any safety checks.
   346  			css[i].privValidator.(types.MockPV).DisableChecks()
   347  			css[i].decideProposal = func(j int32) func(int64, int32) {
   348  				return func(height int64, round int32) {
   349  					byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
   350  				}
   351  			}(int32(i))
   352  			// We are setting the prevote function to do nothing because the prevoting
   353  			// and precommitting are done alongside the proposal.
   354  			css[i].doPrevote = func(height int64, round int32) {}
   355  		}
   356  
   357  		eventBus := css[i].eventBus
   358  		eventBus.SetLogger(logger.With("module", "events", "validator", i))
   359  
   360  		var err error
   361  		blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
   362  		require.NoError(t, err)
   363  
   364  		conR := NewReactor(css[i], true) // so we don't start the consensus states
   365  		conR.SetLogger(logger.With("validator", i))
   366  		conR.SetEventBus(eventBus)
   367  
   368  		var conRI p2p.Reactor = conR
   369  
   370  		// make first val byzantine
   371  		if i == 0 {
   372  			conRI = NewByzantineReactor(conR)
   373  		}
   374  
   375  		reactors[i] = conRI
   376  		err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info
   377  		require.NoError(t, err)
   378  	}
   379  
   380  	defer func() {
   381  		for _, r := range reactors {
   382  			if rr, ok := r.(*ByzantineReactor); ok {
   383  				err := rr.reactor.Switch.Stop()
   384  				require.NoError(t, err)
   385  			} else {
   386  				err := r.(*Reactor).Switch.Stop()
   387  				require.NoError(t, err)
   388  			}
   389  		}
   390  	}()
   391  
   392  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
   393  		// ignore new switch s, we already made ours
   394  		switches[i].AddReactor("CONSENSUS", reactors[i])
   395  		return switches[i]
   396  	}, func(sws []*p2p.Switch, i, j int) {
   397  		// the network starts partitioned with globally active adversary
   398  		if i != 0 {
   399  			return
   400  		}
   401  		p2p.Connect2Switches(sws, i, j)
   402  	})
   403  
   404  	// start the non-byz state machines.
   405  	// note these must be started before the byz
   406  	for i := 1; i < N; i++ {
   407  		cr := reactors[i].(*Reactor)
   408  		cr.SwitchToConsensus(cr.conS.GetState(), false)
   409  	}
   410  
   411  	// start the byzantine state machine
   412  	byzR := reactors[0].(*ByzantineReactor)
   413  	s := byzR.reactor.conS.GetState()
   414  	byzR.reactor.SwitchToConsensus(s, false)
   415  
   416  	// byz proposer sends one block to peers[0]
   417  	// and the other block to peers[1] and peers[2].
   418  	// note peers and switches order don't match.
   419  	peers := switches[0].Peers().List()
   420  
   421  	// partition A
   422  	ind0 := getSwitchIndex(switches, peers[0])
   423  
   424  	// partition B
   425  	ind1 := getSwitchIndex(switches, peers[1])
   426  	ind2 := getSwitchIndex(switches, peers[2])
   427  	p2p.Connect2Switches(switches, ind1, ind2)
   428  
   429  	// wait for someone in the big partition (B) to make a block
   430  	<-blocksSubs[ind2].Out()
   431  
   432  	t.Logf("A block has been committed. Healing partition")
   433  	p2p.Connect2Switches(switches, ind0, ind1)
   434  	p2p.Connect2Switches(switches, ind0, ind2)
   435  
   436  	// wait till everyone makes the first new block
   437  	// (one of them already has)
   438  	wg := new(sync.WaitGroup)
   439  	for i := 1; i < N-1; i++ {
   440  		wg.Add(1)
   441  		go func(j int) {
   442  			<-blocksSubs[j].Out()
   443  			wg.Done()
   444  		}(i)
   445  	}
   446  
   447  	done := make(chan struct{})
   448  	go func() {
   449  		wg.Wait()
   450  		close(done)
   451  	}()
   452  
   453  	tick := time.NewTicker(time.Second * 10)
   454  	select {
   455  	case <-done:
   456  	case <-tick.C:
   457  		for i, reactor := range reactors {
   458  			t.Logf(fmt.Sprintf("Consensus Reactor %v", i))
   459  			t.Logf(fmt.Sprintf("%v", reactor))
   460  		}
   461  		t.Fatalf("Timed out waiting for all validators to commit first block")
   462  	}
   463  }
   464  
   465  //-------------------------------
   466  // byzantine consensus functions
   467  
   468  func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
   469  	// byzantine user should create two proposals and try to split the vote.
   470  	// Avoid sending on internalMsgQueue and running consensus state.
   471  
   472  	// Create a new proposal block from state/txs from the mempool.
   473  	block1, blockParts1 := cs.createProposalBlock()
   474  	polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
   475  	proposal1 := types.NewProposal(height, round, polRound, propBlockID)
   476  	p1 := proposal1.ToProto()
   477  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
   478  		t.Error(err)
   479  	}
   480  
   481  	proposal1.Signature = p1.Signature
   482  
   483  	// some new transactions come in (this ensures that the proposals are different)
   484  	deliverTxsRange(cs, 0, 1)
   485  
   486  	// Create a new proposal block from state/txs from the mempool.
   487  	block2, blockParts2 := cs.createProposalBlock()
   488  	polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
   489  	proposal2 := types.NewProposal(height, round, polRound, propBlockID)
   490  	p2 := proposal2.ToProto()
   491  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
   492  		t.Error(err)
   493  	}
   494  
   495  	proposal2.Signature = p2.Signature
   496  
   497  	block1Hash := block1.Hash()
   498  	block2Hash := block2.Hash()
   499  
   500  	// broadcast conflicting proposals/block parts to peers
   501  	peers := sw.Peers().List()
   502  	t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
   503  	for i, peer := range peers {
   504  		if i < len(peers)/2 {
   505  			go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
   506  		} else {
   507  			go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
   508  		}
   509  	}
   510  }
   511  
   512  func sendProposalAndParts(
   513  	height int64,
   514  	round int32,
   515  	cs *State,
   516  	peer p2p.Peer,
   517  	proposal *types.Proposal,
   518  	blockHash []byte,
   519  	parts *types.PartSet,
   520  ) {
   521  	// proposal
   522  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   523  		ChannelID: DataChannel,
   524  		Message:   &tmcons.Proposal{Proposal: *proposal.ToProto()},
   525  	}, cs.Logger)
   526  
   527  	// parts
   528  	for i := 0; i < int(parts.Total()); i++ {
   529  		part := parts.GetPart(i)
   530  		pp, err := part.ToProto()
   531  		if err != nil {
   532  			panic(err) // TODO: wbanfield better error handling
   533  		}
   534  		p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   535  			ChannelID: DataChannel,
   536  			Message: &tmcons.BlockPart{
   537  				Height: height, // This tells peer that this part applies to us.
   538  				Round:  round,  // This tells peer that this part applies to us.
   539  				Part:   *pp,
   540  			},
   541  		}, cs.Logger)
   542  	}
   543  
   544  	// votes
   545  	cs.mtx.Lock()
   546  	prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
   547  	precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
   548  	cs.mtx.Unlock()
   549  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   550  		ChannelID: VoteChannel,
   551  		Message:   &tmcons.Vote{Vote: prevote.ToProto()},
   552  	}, cs.Logger)
   553  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   554  		ChannelID: VoteChannel,
   555  		Message:   &tmcons.Vote{Vote: precommit.ToProto()},
   556  	}, cs.Logger)
   557  }
   558  
   559  //----------------------------------------
   560  // byzantine consensus reactor
   561  
   562  type ByzantineReactor struct {
   563  	service.Service
   564  	reactor *Reactor
   565  }
   566  
   567  func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
   568  	return &ByzantineReactor{
   569  		Service: conR,
   570  		reactor: conR,
   571  	}
   572  }
   573  
   574  func (br *ByzantineReactor) SetSwitch(s *p2p.Switch)               { br.reactor.SetSwitch(s) }
   575  func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
   576  func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
   577  	if !br.reactor.IsRunning() {
   578  		return
   579  	}
   580  
   581  	// Create peerState for peer
   582  	peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
   583  	peer.Set(types.PeerStateKey, peerState)
   584  
   585  	// Send our state to peer.
   586  	// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   587  	if !br.reactor.waitSync {
   588  		br.reactor.sendNewRoundStepMessage(peer)
   589  	}
   590  }
   591  func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   592  	br.reactor.RemovePeer(peer, reason)
   593  }
   594  func (br *ByzantineReactor) ReceiveEnvelope(e p2p.Envelope) {
   595  	br.reactor.ReceiveEnvelope(e)
   596  }
   597  func (br *ByzantineReactor) Receive(chID byte, p p2p.Peer, m []byte) {
   598  	br.reactor.Receive(chID, p, m)
   599  }
   600  func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }