github.com/line/ostracon@v1.0.10-0.20230328032236-7f20145f065d/consensus/byzantine_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	abci "github.com/tendermint/tendermint/abci/types"
    16  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    17  	dbm "github.com/tendermint/tm-db"
    18  
    19  	config2 "github.com/line/ostracon/config"
    20  
    21  	abcicli "github.com/line/ostracon/abci/client"
    22  	"github.com/line/ostracon/evidence"
    23  	"github.com/line/ostracon/libs/log"
    24  	"github.com/line/ostracon/libs/service"
    25  	tmsync "github.com/line/ostracon/libs/sync"
    26  	mempl "github.com/line/ostracon/mempool"
    27  	"github.com/line/ostracon/p2p"
    28  	sm "github.com/line/ostracon/state"
    29  	"github.com/line/ostracon/store"
    30  	"github.com/line/ostracon/types"
    31  )
    32  
    33  //----------------------------------------------
    34  // byzantine failures
    35  
    36  // Byzantine node sends two different prevotes (nil and blockID) to the same validator
    37  func TestByzantinePrevoteEquivocation(t *testing.T) {
    38  	const nValidators = 4
    39  	const byzantineNode = 0
    40  	const prevoteHeight = int64(2)
    41  	testName := "consensus_byzantine_test"
    42  	tickerFunc := newMockTickerFunc(true)
    43  	appFunc := newCounter
    44  
    45  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
    46  	css := make([]*State, nValidators)
    47  
    48  	for i := 0; i < nValidators; i++ {
    49  		logger := consensusLogger().With("test", "byzantine", "validator", i)
    50  		stateDB := dbm.NewMemDB() // each state needs its own db
    51  		stateStore := sm.NewStore(stateDB)
    52  		state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
    53  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
    54  		defer os.RemoveAll(thisConfig.RootDir)
    55  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
    56  		app := appFunc()
    57  		vals := types.OC2PB.ValidatorUpdates(state.Validators)
    58  		app.InitChain(abci.RequestInitChain{Validators: vals})
    59  
    60  		blockDB := dbm.NewMemDB()
    61  		blockStore := store.NewBlockStore(blockDB)
    62  
    63  		// one for mempool, one for consensus
    64  		mtx := new(tmsync.Mutex)
    65  		proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
    66  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
    67  
    68  		// Make Mempool
    69  		mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
    70  		mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
    71  		if thisConfig.Consensus.WaitForTxs() {
    72  			mempool.EnableTxsAvailable()
    73  		}
    74  
    75  		// Make a full instance of the evidence pool
    76  		evidenceDB := dbm.NewMemDB()
    77  		evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
    78  		require.NoError(t, err)
    79  		evpool.SetLogger(logger.With("module", "evidence"))
    80  
    81  		// Make State
    82  		blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
    83  		cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
    84  		// set private validator
    85  		pv := privVals[i]
    86  		cs.SetPrivValidator(pv)
    87  
    88  		eventBus := types.NewEventBus()
    89  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
    90  		err = eventBus.Start()
    91  		require.NoError(t, err)
    92  		cs.SetEventBus(eventBus)
    93  
    94  		cs.SetTimeoutTicker(tickerFunc())
    95  		cs.SetLogger(logger)
    96  
    97  		css[i] = cs
    98  	}
    99  
   100  	// initialize the reactors for each of the validators
   101  	reactors := make([]*Reactor, nValidators)
   102  	blocksSubs := make([]types.Subscription, 0)
   103  	eventBuses := make([]*types.EventBus, nValidators)
   104  	for i := 0; i < nValidators; i++ {
   105  		reactors[i] = NewReactor(
   106  			css[i],
   107  			true,
   108  			config.P2P.RecvAsync,
   109  			config.P2P.ConsensusRecvBufSize,
   110  		) // so we dont start the consensus states
   111  		reactors[i].SetLogger(css[i].Logger)
   112  
   113  		// eventBus is already started with the cs
   114  		eventBuses[i] = css[i].eventBus
   115  		reactors[i].SetEventBus(eventBuses[i])
   116  
   117  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100)
   118  		require.NoError(t, err)
   119  		blocksSubs = append(blocksSubs, blocksSub)
   120  
   121  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
   122  			err = css[i].blockExec.Store().Save(css[i].state)
   123  			require.NoError(t, err)
   124  		}
   125  	}
   126  	// make connected switches and start all reactors
   127  	p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch, c *config2.P2PConfig) *p2p.Switch {
   128  		s.AddReactor("CONSENSUS", reactors[i])
   129  		s.SetLogger(log.NewNopLogger().With("module", "p2p")) // Switch log is noisy for this test
   130  		return s
   131  	}, p2p.Connect2Switches)
   132  
   133  	// create byzantine validator
   134  	bcs := css[byzantineNode]
   135  
   136  	// alter prevote so that the byzantine node double votes when height is 2
   137  	bcs.doPrevote = func(height int64, round int32) {
   138  		// allow first height to happen normally so that byzantine validator is no longer proposer
   139  		if height == prevoteHeight {
   140  			bcs.Logger.Info("Sending two votes")
   141  			prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header())
   142  			require.NoError(t, err)
   143  			prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
   144  			require.NoError(t, err)
   145  			peerList := reactors[byzantineNode].Switch.Peers().List()
   146  			bcs.Logger.Info("Getting peer list", "peers", peerList)
   147  			// send two votes to all peers (1st to one half, 2nd to another half)
   148  			for i, peer := range peerList {
   149  				if i < len(peerList)/2 {
   150  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
   151  					peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
   152  				} else {
   153  					bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
   154  					peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
   155  				}
   156  			}
   157  		} else {
   158  			bcs.Logger.Info("Behaving normally")
   159  			bcs.defaultDoPrevote(height, round)
   160  		}
   161  	}
   162  
   163  	// introducing a lazy proposer means that the time of the block committed is different to the
   164  	// timestamp that the other nodes have. This tests to ensure that the evidence that finally gets
   165  	// proposed will have a valid timestamp
   166  	lazyProposer := css[1]
   167  
   168  	lazyProposer.decideProposal = func(height int64, round int32) {
   169  		lazyProposer.Logger.Info("Lazy Proposer proposing condensed commit")
   170  		if lazyProposer.privValidator == nil {
   171  			panic("entered createProposalBlock with privValidator being nil")
   172  		}
   173  
   174  		var commit *types.Commit
   175  		switch {
   176  		case lazyProposer.Height == lazyProposer.state.InitialHeight:
   177  			// We're creating a proposal for the first block.
   178  			// The commit is empty, but not nil.
   179  			commit = types.NewCommit(0, 0, types.BlockID{}, nil)
   180  		case lazyProposer.LastCommit.HasTwoThirdsMajority():
   181  			// Make the commit from LastCommit
   182  			commit = lazyProposer.LastCommit.MakeCommit()
   183  		default: // This shouldn't happen.
   184  			lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
   185  			return
   186  		}
   187  
   188  		// omit the last signature in the commit
   189  		// except a proposal for the first block
   190  		if commit.Signatures != nil {
   191  			commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
   192  		}
   193  
   194  		if lazyProposer.privValidatorPubKey == nil {
   195  			// If this node is a validator & proposer in the current round, it will
   196  			// miss the opportunity to create a block.
   197  			lazyProposer.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
   198  			return
   199  		}
   200  		proposerAddr := lazyProposer.privValidatorPubKey.Address()
   201  
   202  		message := lazyProposer.state.MakeHashMessage(lazyProposer.Round)
   203  		proof, _ := lazyProposer.privValidator.GenerateVRFProof(message)
   204  		block, blockParts := lazyProposer.blockExec.CreateProposalBlock(
   205  			lazyProposer.Height, lazyProposer.state, commit, proposerAddr, lazyProposer.Round, proof, 0,
   206  		)
   207  
   208  		// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
   209  		// and the privValidator will refuse to sign anything.
   210  		if err := lazyProposer.wal.FlushAndSync(); err != nil {
   211  			lazyProposer.Logger.Error("Error flushing to disk")
   212  		}
   213  
   214  		// Make proposal
   215  		propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
   216  		proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID)
   217  		p := proposal.ToProto()
   218  		if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil {
   219  			proposal.Signature = p.Signature
   220  
   221  			// send proposal and block parts on internal msg queue
   222  			lazyProposer.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""})
   223  			for i := 0; i < int(blockParts.Total()); i++ {
   224  				part := blockParts.GetPart(i)
   225  				lazyProposer.sendInternalMessage(msgInfo{&BlockPartMessage{lazyProposer.Height, lazyProposer.Round, part}, ""})
   226  			}
   227  			lazyProposer.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
   228  			lazyProposer.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
   229  		} else if !lazyProposer.replayMode {
   230  			lazyProposer.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
   231  		}
   232  	}
   233  
   234  	// start the consensus reactors
   235  	for i := 0; i < nValidators; i++ {
   236  		s := reactors[i].conS.GetState()
   237  		reactors[i].SwitchToConsensus(s, false)
   238  	}
   239  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   240  
   241  	// Evidence should be submitted and committed at the third height but
   242  	// we will check the first six just in case
   243  	evidenceFromEachValidator := make([]types.Evidence, nValidators)
   244  
   245  	wg := new(sync.WaitGroup)
   246  	for i := 0; i < nValidators; i++ {
   247  		wg.Add(1)
   248  		go func(i int) {
   249  			defer wg.Done()
   250  			for msg := range blocksSubs[i].Out() {
   251  				block := msg.Data().(types.EventDataNewBlock).Block
   252  				if len(block.Evidence.Evidence) != 0 {
   253  					evidenceFromEachValidator[i] = block.Evidence.Evidence[0]
   254  					return
   255  				}
   256  			}
   257  		}(i)
   258  	}
   259  
   260  	done := make(chan struct{})
   261  	go func() {
   262  		wg.Wait()
   263  		close(done)
   264  	}()
   265  
   266  	pubkey, err := bcs.privValidator.GetPubKey()
   267  	require.NoError(t, err)
   268  
   269  	select {
   270  	case <-done:
   271  		for idx, ev := range evidenceFromEachValidator {
   272  			if assert.NotNil(t, ev, idx) {
   273  				ev, ok := ev.(*types.DuplicateVoteEvidence)
   274  				assert.True(t, ok)
   275  				assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
   276  				assert.Equal(t, prevoteHeight, ev.Height())
   277  			}
   278  		}
   279  	case <-time.After(10 * time.Second): // XXX 20 second is too much time, so we changed to 10 second
   280  		for i, reactor := range reactors {
   281  			t.Logf("Consensus Reactor %d\n%v", i, reactor)
   282  		}
   283  		t.Fatalf("Timed out waiting for validators to commit evidence")
   284  	}
   285  }
   286  
   287  // 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
   288  // byzantine validator sends conflicting proposals into A and B,
   289  // and prevotes/precommits on both of them.
   290  // B sees a commit, A doesn't.
   291  // Heal partition and ensure A sees the commit
   292  func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
   293  	N := 4
   294  	logger := consensusLogger().With("test", "byzantine")
   295  	app := newCounter
   296  	css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
   297  	defer cleanup()
   298  
   299  	// get proposer of first block
   300  	proposerIdx, _ := findProposer(css[0])
   301  
   302  	// give the byzantine validator a normal ticker
   303  	ticker := NewTimeoutTicker()
   304  	ticker.SetLogger(css[0].Logger)
   305  	css[0].SetTimeoutTicker(ticker)
   306  
   307  	switches := make([]*p2p.Switch, N)
   308  	p2pLogger := logger.With("module", "p2p")
   309  	for i := 0; i < N; i++ {
   310  		switches[i] = p2p.MakeSwitch(
   311  			config.P2P,
   312  			i,
   313  			"foo", "1.0.0",
   314  			func(i int, sw *p2p.Switch, config *config2.P2PConfig) *p2p.Switch {
   315  				return sw
   316  			})
   317  		switches[i].SetLogger(p2pLogger.With("validator", i))
   318  	}
   319  
   320  	blocksSubs := make([]types.Subscription, N)
   321  	reactors := make([]p2p.Reactor, N)
   322  	for i := 0; i < N; i++ {
   323  
   324  		// enable txs so we can create different proposals
   325  		assertMempool(css[i].txNotifier).EnableTxsAvailable()
   326  		// make first val byzantine
   327  		if int32(i) == proposerIdx {
   328  			// NOTE: Now, test validators are MockPV, which by default doesn't
   329  			// do any safety checks.
   330  			css[i].privValidator.(types.MockPV).DisableChecks()
   331  			css[i].decideProposal = func(j int32) func(int64, int32) {
   332  				return func(height int64, round int32) {
   333  					byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
   334  				}
   335  			}(int32(i))
   336  			// We are setting the prevote function to do nothing because the prevoting
   337  			// and precommitting are done alongside the proposal.
   338  			css[i].doPrevote = func(height int64, round int32) {}
   339  		}
   340  
   341  		eventBus := css[i].eventBus
   342  		eventBus.SetLogger(logger.With("module", "events", "validator", i))
   343  
   344  		var err error
   345  		blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
   346  		require.NoError(t, err)
   347  
   348  		conR := NewReactor(css[i], true, true, 1000) // so we don't start the consensus states
   349  		conR.SetLogger(logger.With("validator", i))
   350  		conR.SetEventBus(eventBus)
   351  
   352  		var conRI p2p.Reactor = conR
   353  
   354  		// make first val byzantine
   355  		if int32(i) == proposerIdx {
   356  			conRI = NewByzantineReactor(conR)
   357  		}
   358  
   359  		reactors[i] = conRI
   360  		err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info
   361  		require.NoError(t, err)
   362  	}
   363  
   364  	defer func() {
   365  		for _, r := range reactors {
   366  			if rr, ok := r.(*ByzantineReactor); ok {
   367  				err := rr.reactor.Switch.Stop()
   368  				require.NoError(t, err)
   369  			} else {
   370  				err := r.(*Reactor).Switch.Stop()
   371  				require.NoError(t, err)
   372  			}
   373  		}
   374  	}()
   375  
   376  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch, config *config2.P2PConfig) *p2p.Switch {
   377  		// ignore new switch s, we already made ours
   378  		switches[i].AddReactor("CONSENSUS", reactors[i])
   379  		return switches[i]
   380  	}, func(sws []*p2p.Switch, i, j int) {
   381  		// the network starts partitioned with globally active adversary
   382  		if int32(i) != proposerIdx && int32(j) != proposerIdx {
   383  			return
   384  		}
   385  		p2p.Connect2Switches(sws, i, j)
   386  	})
   387  
   388  	// start the non-byz state machines.
   389  	// note these must be started before the byz
   390  	for i := 0; i < N; i++ {
   391  		if int32(i) != proposerIdx {
   392  			cr := reactors[i].(*Reactor)
   393  			cr.SwitchToConsensus(cr.conS.GetState(), false)
   394  		}
   395  	}
   396  
   397  	// start the byzantine state machine
   398  	byzR := reactors[proposerIdx].(*ByzantineReactor)
   399  	s := byzR.reactor.conS.GetState()
   400  	byzR.reactor.SwitchToConsensus(s, false)
   401  
   402  	// byz proposer sends one block to peers[0]
   403  	// and the other block to peers[1] and peers[2].
   404  	// note peers and switches order don't match.
   405  	peers := switches[proposerIdx].Peers().List()
   406  
   407  	// partition A
   408  	ind0 := getSwitchIndex(switches, peers[0])
   409  
   410  	// partition B
   411  	ind1 := getSwitchIndex(switches, peers[1])
   412  	ind2 := getSwitchIndex(switches, peers[2])
   413  	p2p.Connect2Switches(switches, ind1, ind2)
   414  
   415  	// FIXME: test stops at the following step after the introduction of VRF elections
   416  	// wait for someone in the big partition (B) to make a block
   417  	<-blocksSubs[ind2].Out()
   418  
   419  	t.Log("A block has been committed. Healing partition")
   420  	p2p.Connect2Switches(switches, ind0, ind1)
   421  	p2p.Connect2Switches(switches, ind0, ind2)
   422  
   423  	// wait till everyone makes the first new block
   424  	// (one of them already has)
   425  	wg := new(sync.WaitGroup)
   426  	for i := 0; i < N-1; i++ {
   427  		if int32(i) != proposerIdx {
   428  			wg.Add(1)
   429  			go func(j int) {
   430  				<-blocksSubs[j].Out()
   431  				wg.Done()
   432  			}(i)
   433  		}
   434  	}
   435  
   436  	done := make(chan struct{})
   437  	go func() {
   438  		wg.Wait()
   439  		close(done)
   440  	}()
   441  
   442  	tick := time.NewTicker(time.Second * 10)
   443  	select {
   444  	case <-done:
   445  	case <-tick.C:
   446  		for i, reactor := range reactors {
   447  			t.Logf("Consensus Reactor %v", i)
   448  			t.Logf("%v", reactor)
   449  		}
   450  		t.Fatalf("Timed out waiting for all validators to commit first block")
   451  	}
   452  }
   453  
   454  // find proposer of current height and round from State
   455  func findProposer(state *State) (int32, *types.Validator) {
   456  	proposer := state.Validators.SelectProposer(state.state.LastProofHash, state.Height, state.Round)
   457  	return state.Validators.GetByAddress(proposer.PubKey.Address())
   458  }
   459  
   460  //-------------------------------
   461  // byzantine consensus functions
   462  
   463  func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
   464  	// byzantine user should create two proposals and try to split the vote.
   465  	// Avoid sending on internalMsgQueue and running consensus state.
   466  
   467  	// Create a new proposal block from state/txs from the mempool.
   468  	block1, blockParts1 := cs.createProposalBlock(round)
   469  	polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
   470  	proposal1 := types.NewProposal(height, round, polRound, propBlockID)
   471  	p1 := proposal1.ToProto()
   472  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
   473  		t.Error(err)
   474  	}
   475  
   476  	proposal1.Signature = p1.Signature
   477  
   478  	// some new transactions come in (this ensures that the proposals are different)
   479  	deliverTxsRange(cs, 0, 1)
   480  
   481  	// Create a new proposal block from state/txs from the mempool.
   482  	block2, blockParts2 := cs.createProposalBlock(round)
   483  	polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
   484  	proposal2 := types.NewProposal(height, round, polRound, propBlockID)
   485  	p2 := proposal2.ToProto()
   486  	if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
   487  		t.Error(err)
   488  	}
   489  
   490  	proposal2.Signature = p2.Signature
   491  
   492  	block1Hash := block1.Hash()
   493  	block2Hash := block2.Hash()
   494  
   495  	// broadcast conflicting proposals/block parts to peers
   496  	peers := sw.Peers().List()
   497  	t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
   498  	for i, peer := range peers {
   499  		if i < len(peers)/2 {
   500  			go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
   501  		} else {
   502  			go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
   503  		}
   504  	}
   505  }
   506  
   507  func sendProposalAndParts(
   508  	height int64,
   509  	round int32,
   510  	cs *State,
   511  	peer p2p.Peer,
   512  	proposal *types.Proposal,
   513  	blockHash []byte,
   514  	parts *types.PartSet,
   515  ) {
   516  	// proposal
   517  	msg := &ProposalMessage{Proposal: proposal}
   518  	peer.Send(DataChannel, MustEncode(msg))
   519  
   520  	// parts
   521  	for i := 0; i < int(parts.Total()); i++ {
   522  		part := parts.GetPart(i)
   523  		msg := &BlockPartMessage{
   524  			Height: height, // This tells peer that this part applies to us.
   525  			Round:  round,  // This tells peer that this part applies to us.
   526  			Part:   part,
   527  		}
   528  		peer.Send(DataChannel, MustEncode(msg))
   529  	}
   530  
   531  	// votes
   532  	cs.mtx.Lock()
   533  	prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
   534  	precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
   535  	cs.mtx.Unlock()
   536  
   537  	peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
   538  	peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
   539  }
   540  
   541  //----------------------------------------
   542  // byzantine consensus reactor
   543  
   544  type ByzantineReactor struct {
   545  	service.Service
   546  	reactor *Reactor
   547  }
   548  
   549  func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
   550  	return &ByzantineReactor{
   551  		Service: conR,
   552  		reactor: conR,
   553  	}
   554  }
   555  
   556  func (br *ByzantineReactor) SetSwitch(s *p2p.Switch)               { br.reactor.SetSwitch(s) }
   557  func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
   558  func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
   559  	if !br.reactor.IsRunning() {
   560  		return
   561  	}
   562  
   563  	// Create peerState for peer
   564  	peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
   565  	peer.Set(types.PeerStateKey, peerState)
   566  
   567  	// Send our state to peer.
   568  	// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   569  	if !br.reactor.waitSync {
   570  		br.reactor.sendNewRoundStepMessage(peer)
   571  	}
   572  }
   573  func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   574  	br.reactor.RemovePeer(peer, reason)
   575  }
   576  func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   577  	br.reactor.Receive(chID, peer, msgBytes)
   578  }
   579  func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
   580  func (br *ByzantineReactor) RecvRoutine() {
   581  	br.reactor.RecvRoutine()
   582  }
   583  
   584  func (br *ByzantineReactor) GetRecvChan() chan *p2p.BufferedMsg {
   585  	return br.reactor.GetRecvChan()
   586  }