github.com/Finschia/ostracon@v1.1.5/consensus/byzantine_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	abci "github.com/tendermint/tendermint/abci/types"
    16  	tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
    17  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    18  	dbm "github.com/tendermint/tm-db"
    19  
    20  	abcicli "github.com/Finschia/ostracon/abci/client"
    21  	"github.com/Finschia/ostracon/evidence"
    22  	"github.com/Finschia/ostracon/libs/log"
    23  	"github.com/Finschia/ostracon/libs/service"
    24  	tmsync "github.com/Finschia/ostracon/libs/sync"
    25  	mempl "github.com/Finschia/ostracon/mempool"
    26  
    27  	cfg "github.com/Finschia/ostracon/config"
    28  	mempoolv0 "github.com/Finschia/ostracon/mempool/v0"
    29  
    30  	//mempoolv1 "github.com/Finschia/ostracon/mempool/v1"
    31  	"github.com/Finschia/ostracon/p2p"
    32  	sm "github.com/Finschia/ostracon/state"
    33  	"github.com/Finschia/ostracon/store"
    34  	"github.com/Finschia/ostracon/types"
    35  )
    36  
    37  //----------------------------------------------
    38  // byzantine failures
    39  
    40  // Byzantine node sends two different prevotes (nil and blockID) to the same validator
    41  func TestByzantinePrevoteEquivocation(t *testing.T) {
    42  	const nValidators = 4
    43  	const byzantineNode = 0
    44  	const prevoteHeight = int64(2)
    45  	testName := "consensus_byzantine_test"
    46  	tickerFunc := newMockTickerFunc(true)
    47  	appFunc := newCounter
    48  
    49  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
    50  	css := make([]*State, nValidators)
    51  
    52  	for i := 0; i < nValidators; i++ {
    53  		logger := consensusLogger().With("test", "byzantine", "validator", i)
    54  		stateDB := dbm.NewMemDB() // each state needs its own db
    55  		stateStore := sm.NewStore(stateDB, sm.StoreOptions{
    56  			DiscardABCIResponses: false,
    57  		})
    58  		state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
    59  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
    60  		defer os.RemoveAll(thisConfig.RootDir)
    61  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
    62  		app := appFunc()
    63  		vals := types.OC2PB.ValidatorUpdates(state.Validators)
    64  		app.InitChain(abci.RequestInitChain{Validators: vals})
    65  
    66  		blockDB := dbm.NewMemDB()
    67  		blockStore := store.NewBlockStore(blockDB)
    68  
    69  		mtx := new(tmsync.Mutex)
    70  		// one for mempool, one for consensus
    71  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
    72  		proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
    73  
    74  		// Make Mempool
    75  		var mempool mempl.Mempool
    76  
    77  		switch thisConfig.Mempool.Version {
    78  		case cfg.MempoolV0:
    79  			mempool = mempoolv0.NewCListMempool(config.Mempool,
    80  				proxyAppConnConMem,
    81  				state.LastBlockHeight,
    82  				mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
    83  				mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
    84  		case cfg.MempoolV1: // XXX Deprecated
    85  			panic("Deprecated MempoolV1")
    86  			/*
    87  				mempool = mempoolv1.NewTxMempool(logger,
    88  					config.Mempool,
    89  					proxyAppConnConMem,
    90  					state.LastBlockHeight,
    91  					mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
    92  					mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
    93  				)
    94  			*/
    95  		}
    96  
    97  		if thisConfig.Consensus.WaitForTxs() {
    98  			mempool.EnableTxsAvailable()
    99  		}
   100  
   101  		// Make a full instance of the evidence pool
   102  		evidenceDB := dbm.NewMemDB()
   103  		evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   104  		require.NoError(t, err)
   105  		evpool.SetLogger(logger.With("module", "evidence"))
   106  
   107  		// Make State
   108  		blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
   109  		cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
   110  		// set private validator
   111  		pv := privVals[i]
   112  		cs.SetPrivValidator(pv)
   113  
   114  		eventBus := types.NewEventBus()
   115  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
   116  		err = eventBus.Start()
   117  		require.NoError(t, err)
   118  		cs.SetEventBus(eventBus)
   119  
   120  		cs.SetTimeoutTicker(tickerFunc())
   121  		cs.SetLogger(logger)
   122  
   123  		css[i] = cs
   124  	}
   125  
   126  	// initialize the reactors for each of the validators
   127  	reactors := make([]*Reactor, nValidators)
   128  	blocksSubs := make([]types.Subscription, 0)
   129  	eventBuses := make([]*types.EventBus, nValidators)
   130  	for i := 0; i < nValidators; i++ {
   131  		reactors[i] = NewReactor(
   132  			css[i],
   133  			true,
   134  			config.P2P.RecvAsync,
   135  			config.P2P.ConsensusRecvBufSize,
   136  		) // so we dont start the consensus states
   137  		reactors[i].SetLogger(css[i].Logger)
   138  
   139  		// eventBus is already started with the cs
   140  		eventBuses[i] = css[i].eventBus
   141  		reactors[i].SetEventBus(eventBuses[i])
   142  
   143  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100)
   144  		require.NoError(t, err)
   145  		blocksSubs = append(blocksSubs, blocksSub)
   146  
   147  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
   148  			err = css[i].blockExec.Store().Save(css[i].state)
   149  			require.NoError(t, err)
   150  		}
   151  	}
   152  	// make connected switches and start all reactors
   153  	p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch, c *cfg.P2PConfig) *p2p.Switch {
   154  		s.AddReactor("CONSENSUS", reactors[i])
   155  		s.SetLogger(log.NewNopLogger().With("module", "p2p")) // Switch log is noisy for this test
   156  		return s
   157  	}, p2p.Connect2Switches)
   158  
   159  	// create byzantine validator
   160  	bcs := css[byzantineNode]
   161  
   162  	// alter prevote so that the byzantine node double votes when height is 2
   163  	bcs.doPrevote = func(height int64, round int32) {
   164  		// allow first height to happen normally so that byzantine validator is no longer proposer
   165  		if height == prevoteHeight {
   166  			bcs.Logger.Info("Sending two votes")
   167  			prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header())
   168  			require.NoError(t, err)
   169  			prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
   170  			require.NoError(t, err)
   171  			peerList := reactors[byzantineNode].Switch.Peers().List()
   172  			bcs.Logger.Info("Getting peer list", "peers", peerList)
   173  			// send two votes to all peers (1st to one half, 2nd to another half)
   174  			for i, peer := range peerList {
   175  				if i < len(peerList)/2 {
   176  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
   177  					p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   178  						Message:   &tmcons.Vote{Vote: prevote1.ToProto()},
   179  						ChannelID: VoteChannel,
   180  					}, bcs.Logger)
   181  				} else {
   182  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
   183  					p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   184  						Message:   &tmcons.Vote{Vote: prevote2.ToProto()},
   185  						ChannelID: VoteChannel,
   186  					}, bcs.Logger)
   187  				}
   188  			}
   189  		} else {
   190  			bcs.Logger.Info("Behaving normally")
   191  			bcs.defaultDoPrevote(height, round)
   192  		}
   193  	}
   194  
   195  	// introducing a lazy proposer means that the time of the block committed is different to the
   196  	// timestamp that the other nodes have. This tests to ensure that the evidence that finally gets
   197  	// proposed will have a valid timestamp
   198  	lazyProposer := css[1]
   199  
   200  	lazyProposer.decideProposal = func(height int64, round int32) {
   201  		lazyProposer.Logger.Info("Lazy Proposer proposing condensed commit")
   202  		if lazyProposer.privValidator == nil {
   203  			panic("entered createProposalBlock with privValidator being nil")
   204  		}
   205  
   206  		var commit *types.Commit
   207  		switch {
   208  		case lazyProposer.Height == lazyProposer.state.InitialHeight:
   209  			// We're creating a proposal for the first block.
   210  			// The commit is empty, but not nil.
   211  			commit = types.NewCommit(0, 0, types.BlockID{}, nil)
   212  		case lazyProposer.LastCommit.HasTwoThirdsMajority():
   213  			// Make the commit from LastCommit
   214  			commit = lazyProposer.LastCommit.MakeCommit()
   215  		default: // This shouldn't happen.
   216  			lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
   217  			return
   218  		}
   219  
   220  		// omit the last signature in the commit
   221  		// except a proposal for the first block
   222  		if commit.Signatures != nil {
   223  			commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
   224  		}
   225  
   226  		if lazyProposer.privValidatorPubKey == nil {
   227  			// If this node is a validator & proposer in the current round, it will
   228  			// miss the opportunity to create a block.
   229  			lazyProposer.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
   230  			return
   231  		}
   232  		proposerAddr := lazyProposer.privValidatorPubKey.Address()
   233  
   234  		message := lazyProposer.state.MakeHashMessage(lazyProposer.Round)
   235  		proof, _ := lazyProposer.privValidator.GenerateVRFProof(message)
   236  		block, blockParts := lazyProposer.blockExec.CreateProposalBlock(
   237  			lazyProposer.Height, lazyProposer.state, commit, proposerAddr, lazyProposer.Round, proof, 0,
   238  		)
   239  
   240  		// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
   241  		// and the privValidator will refuse to sign anything.
   242  		if err := lazyProposer.wal.FlushAndSync(); err != nil {
   243  			lazyProposer.Logger.Error("Error flushing to disk")
   244  		}
   245  
   246  		// Make proposal
   247  		propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
   248  		proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID)
   249  		p := proposal.ToProto()
   250  		if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil {
   251  			proposal.Signature = p.Signature
   252  
   253  			// send proposal and block parts on internal msg queue
   254  			lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""})
   255  			for i := 0; i < int(blockParts.Total()); i++ {
   256  				part := blockParts.GetPart(i)
   257  				lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, ""})
   258  			}
   259  			lazyProposer.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
   260  			lazyProposer.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
   261  		} else if !lazyProposer.replayMode {
   262  			lazyProposer.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
   263  		}
   264  	}
   265  
   266  	// start the consensus reactors
   267  	for i := 0; i < nValidators; i++ {
   268  		s := reactors[i].conS.GetState()
   269  		reactors[i].SwitchToConsensus(s, false)
   270  	}
   271  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   272  
   273  	// Evidence should be submitted and committed at the third height but
   274  	// we will check the first six just in case
   275  	evidenceFromEachValidator := make([]types.Evidence, nValidators)
   276  
   277  	wg := new(sync.WaitGroup)
   278  	for i := 0; i < nValidators; i++ {
   279  		wg.Add(1)
   280  		go func(i int) {
   281  			defer wg.Done()
   282  			for msg := range blocksSubs[i].Out() {
   283  				block := msg.Data().(types.EventDataNewBlock).Block
   284  				if len(block.Evidence.Evidence) != 0 {
   285  					evidenceFromEachValidator[i] = block.Evidence.Evidence[0]
   286  					return
   287  				}
   288  			}
   289  		}(i)
   290  	}
   291  
   292  	done := make(chan struct{})
   293  	go func() {
   294  		wg.Wait()
   295  		close(done)
   296  	}()
   297  
   298  	pubkey, err := bcs.privValidator.GetPubKey()
   299  	require.NoError(t, err)
   300  
   301  	select {
   302  	case <-done:
   303  		for idx, ev := range evidenceFromEachValidator {
   304  			if assert.NotNil(t, ev, idx) {
   305  				ev, ok := ev.(*types.DuplicateVoteEvidence)
   306  				assert.True(t, ok)
   307  				assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
   308  				assert.Equal(t, prevoteHeight, ev.Height())
   309  			}
   310  		}
   311  	case <-time.After(10 * time.Second): // XXX 20 second is too much time, so we changed to 10 second
   312  		for i, reactor := range reactors {
   313  			t.Logf("Consensus Reactor %d\n%v", i, reactor)
   314  		}
   315  		t.Fatalf("Timed out waiting for validators to commit evidence")
   316  	}
   317  }
   318  
   319  // 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
   320  // byzantine validator sends conflicting proposals into A and B,
   321  // and prevotes/precommits on both of them.
   322  // B sees a commit, A doesn't.
   323  // Heal partition and ensure A sees the commit
   324  func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
   325  	N := 4
   326  	logger := consensusLogger().With("test", "byzantine")
   327  	app := newCounter
   328  	css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
   329  	defer cleanup()
   330  
   331  	// get proposer of first block
   332  	proposerIdx, _ := findProposer(css[0])
   333  
   334  	// give the byzantine validator a normal ticker
   335  	ticker := NewTimeoutTicker()
   336  	ticker.SetLogger(css[0].Logger)
   337  	css[0].SetTimeoutTicker(ticker)
   338  
   339  	switches := make([]*p2p.Switch, N)
   340  	p2pLogger := logger.With("module", "p2p")
   341  	for i := 0; i < N; i++ {
   342  		switches[i] = p2p.MakeSwitch(
   343  			config.P2P,
   344  			i,
   345  			"foo", "1.0.0",
   346  			func(i int, sw *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
   347  				return sw
   348  			})
   349  		switches[i].SetLogger(p2pLogger.With("validator", i))
   350  	}
   351  
   352  	blocksSubs := make([]types.Subscription, N)
   353  	reactors := make([]p2p.Reactor, N)
   354  	for i := 0; i < N; i++ {
   355  
   356  		// enable txs so we can create different proposals
   357  		assertMempool(css[i].txNotifier).EnableTxsAvailable()
   358  		// make first val byzantine
   359  		if int32(i) == proposerIdx {
   360  			// NOTE: Now, test validators are MockPV, which by default doesn't
   361  			// do any safety checks.
   362  			css[i].privValidator.(types.MockPV).DisableChecks()
   363  			css[i].decideProposal = func(j int32) func(int64, int32) {
   364  				return func(height int64, round int32) {
   365  					byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
   366  				}
   367  			}(int32(i))
   368  			// We are setting the prevote function to do nothing because the prevoting
   369  			// and precommitting are done alongside the proposal.
   370  			css[i].doPrevote = func(height int64, round int32) {}
   371  		}
   372  
   373  		eventBus := css[i].eventBus
   374  		eventBus.SetLogger(logger.With("module", "events", "validator", i))
   375  
   376  		var err error
   377  		blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
   378  		require.NoError(t, err)
   379  
   380  		conR := NewReactor(css[i], true, true, 1000) // so we don't start the consensus states
   381  		conR.SetLogger(logger.With("validator", i))
   382  		conR.SetEventBus(eventBus)
   383  
   384  		var conRI p2p.Reactor = conR
   385  
   386  		// make first val byzantine
   387  		if int32(i) == proposerIdx {
   388  			conRI = NewByzantineReactor(conR)
   389  		}
   390  
   391  		reactors[i] = conRI
   392  		err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info
   393  		require.NoError(t, err)
   394  	}
   395  
   396  	defer func() {
   397  		for _, r := range reactors {
   398  			if rr, ok := r.(*ByzantineReactor); ok {
   399  				err := rr.reactor.Switch.Stop()
   400  				require.NoError(t, err)
   401  			} else {
   402  				err := r.(*Reactor).Switch.Stop()
   403  				require.NoError(t, err)
   404  			}
   405  		}
   406  	}()
   407  
   408  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
   409  		// ignore new switch s, we already made ours
   410  		switches[i].AddReactor("CONSENSUS", reactors[i])
   411  		return switches[i]
   412  	}, func(sws []*p2p.Switch, i, j int) {
   413  		// the network starts partitioned with globally active adversary
   414  		if int32(i) != proposerIdx && int32(j) != proposerIdx {
   415  			return
   416  		}
   417  		p2p.Connect2Switches(sws, i, j)
   418  	})
   419  
   420  	// start the non-byz state machines.
   421  	// note these must be started before the byz
   422  	for i := 0; i < N; i++ {
   423  		if int32(i) != proposerIdx {
   424  			cr := reactors[i].(*Reactor)
   425  			cr.SwitchToConsensus(cr.conS.GetState(), false)
   426  		}
   427  	}
   428  
   429  	// start the byzantine state machine
   430  	byzR := reactors[proposerIdx].(*ByzantineReactor)
   431  	s := byzR.reactor.conS.GetState()
   432  	byzR.reactor.SwitchToConsensus(s, false)
   433  
   434  	// byz proposer sends one block to peers[0]
   435  	// and the other block to peers[1] and peers[2].
   436  	// note peers and switches order don't match.
   437  	peers := switches[proposerIdx].Peers().List()
   438  
   439  	// partition A
   440  	ind0 := getSwitchIndex(switches, peers[0])
   441  
   442  	// partition B
   443  	ind1 := getSwitchIndex(switches, peers[1])
   444  	ind2 := getSwitchIndex(switches, peers[2])
   445  	p2p.Connect2Switches(switches, ind1, ind2)
   446  
   447  	// FIXME: test stops at the following step after the introduction of VRF elections
   448  	// wait for someone in the big partition (B) to make a block
   449  	<-blocksSubs[ind2].Out()
   450  
   451  	t.Log("A block has been committed. Healing partition")
   452  	p2p.Connect2Switches(switches, ind0, ind1)
   453  	p2p.Connect2Switches(switches, ind0, ind2)
   454  
   455  	// wait till everyone makes the first new block
   456  	// (one of them already has)
   457  	wg := new(sync.WaitGroup)
   458  	for i := 0; i < N-1; i++ {
   459  		if int32(i) != proposerIdx {
   460  			wg.Add(1)
   461  			go func(j int) {
   462  				<-blocksSubs[j].Out()
   463  				wg.Done()
   464  			}(i)
   465  		}
   466  	}
   467  
   468  	done := make(chan struct{})
   469  	go func() {
   470  		wg.Wait()
   471  		close(done)
   472  	}()
   473  
   474  	tick := time.NewTicker(time.Second * 10)
   475  	select {
   476  	case <-done:
   477  	case <-tick.C:
   478  		for i, reactor := range reactors {
   479  			t.Logf("Consensus Reactor %v", i)
   480  			t.Logf("%v", reactor)
   481  		}
   482  		t.Fatalf("Timed out waiting for all validators to commit first block")
   483  	}
   484  }
   485  
   486  // find proposer of current height and round from State
   487  func findProposer(state *State) (int32, *types.Validator) {
   488  	proposer := state.Validators.SelectProposer(state.state.LastProofHash, state.Height, state.Round)
   489  	return state.Validators.GetByAddress(proposer.PubKey.Address())
   490  }
   491  
   492  //-------------------------------
   493  // byzantine consensus functions
   494  
   495  func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
   496  	// byzantine user should create two proposals and try to split the vote.
   497  	// Avoid sending on internalMsgQueue and running consensus state.
   498  
   499  	// Create a new proposal block from state/txs from the mempool.
   500  	block1, blockParts1 := cs.createProposalBlock(round)
   501  	polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
   502  	proposal1 := types.NewProposal(height, round, polRound, propBlockID)
   503  	p1 := proposal1.ToProto()
   504  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
   505  		t.Error(err)
   506  	}
   507  
   508  	proposal1.Signature = p1.Signature
   509  
   510  	// some new transactions come in (this ensures that the proposals are different)
   511  	deliverTxsRange(cs, 0, 1)
   512  
   513  	// Create a new proposal block from state/txs from the mempool.
   514  	block2, blockParts2 := cs.createProposalBlock(round)
   515  	polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
   516  	proposal2 := types.NewProposal(height, round, polRound, propBlockID)
   517  	p2 := proposal2.ToProto()
   518  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
   519  		t.Error(err)
   520  	}
   521  
   522  	proposal2.Signature = p2.Signature
   523  
   524  	block1Hash := block1.Hash()
   525  	block2Hash := block2.Hash()
   526  
   527  	// broadcast conflicting proposals/block parts to peers
   528  	peers := sw.Peers().List()
   529  	t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
   530  	for i, peer := range peers {
   531  		if i < len(peers)/2 {
   532  			go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
   533  		} else {
   534  			go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
   535  		}
   536  	}
   537  }
   538  
   539  func sendProposalAndParts(
   540  	height int64,
   541  	round int32,
   542  	cs *State,
   543  	peer p2p.Peer,
   544  	proposal *types.Proposal,
   545  	blockHash []byte,
   546  	parts *types.PartSet,
   547  ) {
   548  	// proposal
   549  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   550  		ChannelID: DataChannel,
   551  		Message:   &tmcons.Proposal{Proposal: *proposal.ToProto()},
   552  	}, cs.Logger)
   553  
   554  	// parts
   555  	for i := 0; i < int(parts.Total()); i++ {
   556  		part := parts.GetPart(i)
   557  		pp, err := part.ToProto()
   558  		if err != nil {
   559  			panic(err) // TODO: wbanfield better error handling
   560  		}
   561  		p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   562  			ChannelID: DataChannel,
   563  			Message: &tmcons.BlockPart{
   564  				Height: height, // This tells peer that this part applies to us.
   565  				Round:  round,  // This tells peer that this part applies to us.
   566  				Part:   *pp,
   567  			},
   568  		}, cs.Logger)
   569  	}
   570  
   571  	// votes
   572  	cs.mtx.Lock()
   573  	prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
   574  	precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
   575  	cs.mtx.Unlock()
   576  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   577  		ChannelID: VoteChannel,
   578  		Message:   &tmcons.Vote{Vote: prevote.ToProto()},
   579  	}, cs.Logger)
   580  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   581  		ChannelID: VoteChannel,
   582  		Message:   &tmcons.Vote{Vote: precommit.ToProto()},
   583  	}, cs.Logger)
   584  }
   585  
   586  //----------------------------------------
   587  // byzantine consensus reactor
   588  
   589  type ByzantineReactor struct {
   590  	service.Service
   591  	reactor *Reactor
   592  }
   593  
   594  func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
   595  	return &ByzantineReactor{
   596  		Service: conR,
   597  		reactor: conR,
   598  	}
   599  }
   600  
   601  func (br *ByzantineReactor) SetSwitch(s *p2p.Switch)               { br.reactor.SetSwitch(s) }
   602  func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
   603  func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
   604  	if !br.reactor.IsRunning() {
   605  		return
   606  	}
   607  
   608  	// Create peerState for peer
   609  	peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
   610  	peer.Set(types.PeerStateKey, peerState)
   611  
   612  	// Send our state to peer.
   613  	// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   614  	if !br.reactor.waitSync {
   615  		br.reactor.sendNewRoundStepMessage(peer)
   616  	}
   617  }
   618  func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   619  	br.reactor.RemovePeer(peer, reason)
   620  }
   621  func (br *ByzantineReactor) ReceiveEnvelope(e p2p.Envelope) {
   622  	br.reactor.ReceiveEnvelope(e)
   623  }
   624  func (br *ByzantineReactor) Receive(chID byte, p p2p.Peer, m []byte) {
   625  	br.reactor.Receive(chID, p, m)
   626  }
   627  func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
   628  func (br *ByzantineReactor) RecvRoutine() {
   629  	br.reactor.RecvRoutine()
   630  }
   631  
   632  func (br *ByzantineReactor) GetRecvChan() chan *p2p.BufferedMsg {
   633  	return br.reactor.GetRecvChan()
   634  }