github.com/noirx94/tendermintmp@v0.0.1/test/maverick/consensus/reactor.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/gogo/protobuf/proto"
    11  
    12  	tmcon "github.com/tendermint/tendermint/consensus"
    13  	cstypes "github.com/tendermint/tendermint/consensus/types"
    14  	"github.com/tendermint/tendermint/libs/bits"
    15  	tmevents "github.com/tendermint/tendermint/libs/events"
    16  	tmjson "github.com/tendermint/tendermint/libs/json"
    17  	"github.com/tendermint/tendermint/libs/log"
    18  	tmsync "github.com/tendermint/tendermint/libs/sync"
    19  	"github.com/tendermint/tendermint/p2p"
    20  	tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
    21  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    22  	sm "github.com/tendermint/tendermint/state"
    23  	"github.com/tendermint/tendermint/types"
    24  	tmtime "github.com/tendermint/tendermint/types/time"
    25  )
    26  
    27  const (
    28  	StateChannel       = byte(0x20)
    29  	DataChannel        = byte(0x21)
    30  	VoteChannel        = byte(0x22)
    31  	VoteSetBitsChannel = byte(0x23)
    32  
    33  	maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
    34  
    35  	blocksToContributeToBecomeGoodPeer = 10000
    36  	votesToContributeToBecomeGoodPeer  = 10000
    37  )
    38  
    39  //-----------------------------------------------------------------------------
    40  
    41  // Reactor defines a reactor for the consensus service.
    42  type Reactor struct {
    43  	p2p.BaseReactor // BaseService + p2p.Switch
    44  
    45  	conS *State
    46  
    47  	mtx      tmsync.RWMutex
    48  	waitSync bool
    49  	eventBus *types.EventBus
    50  
    51  	Metrics *tmcon.Metrics
    52  }
    53  
    54  type ReactorOption func(*Reactor)
    55  
    56  // NewReactor returns a new Reactor with the given
    57  // consensusState.
    58  func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
    59  	conR := &Reactor{
    60  		conS:     consensusState,
    61  		waitSync: waitSync,
    62  		Metrics:  tmcon.NopMetrics(),
    63  	}
    64  	conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
    65  
    66  	for _, option := range options {
    67  		option(conR)
    68  	}
    69  
    70  	return conR
    71  }
    72  
    73  // OnStart implements BaseService by subscribing to events, which later will be
    74  // broadcasted to other peers and starting state if we're not in fast sync.
    75  func (conR *Reactor) OnStart() error {
    76  	conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
    77  
    78  	// start routine that computes peer statistics for evaluating peer quality
    79  	go conR.peerStatsRoutine()
    80  
    81  	conR.subscribeToBroadcastEvents()
    82  
    83  	if !conR.WaitSync() {
    84  		conR.conS.SetSwitch(conR.Switch)
    85  		err := conR.conS.Start()
    86  		if err != nil {
    87  			return err
    88  		}
    89  	}
    90  
    91  	return nil
    92  }
    93  
    94  // OnStop implements BaseService by unsubscribing from events and stopping
    95  // state.
    96  func (conR *Reactor) OnStop() {
    97  	conR.unsubscribeFromBroadcastEvents()
    98  	if err := conR.conS.Stop(); err != nil {
    99  		conR.Logger.Error("Error stopping consensus state", "err", err)
   100  	}
   101  	if !conR.WaitSync() {
   102  		conR.conS.Wait()
   103  	}
   104  }
   105  
   106  // SwitchToConsensus switches from fast_sync mode to consensus mode.
   107  // It resets the state, turns off fast_sync, and starts the consensus state-machine
   108  func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
   109  	conR.Logger.Info("SwitchToConsensus")
   110  
   111  	// We have no votes, so reconstruct LastCommit from SeenCommit.
   112  	if state.LastBlockHeight > 0 {
   113  		conR.conS.reconstructLastCommit(state)
   114  	}
   115  
   116  	// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
   117  	// NewRoundStepMessage.
   118  	conR.conS.updateToState(state)
   119  
   120  	conR.mtx.Lock()
   121  	conR.waitSync = false
   122  	conR.mtx.Unlock()
   123  	conR.Metrics.FastSyncing.Set(0)
   124  	conR.Metrics.StateSyncing.Set(0)
   125  
   126  	if skipWAL {
   127  		conR.conS.doWALCatchup = false
   128  	}
   129  	conR.conS.SetSwitch(conR.Switch)
   130  	err := conR.conS.Start()
   131  	if err != nil {
   132  		panic(fmt.Sprintf(`Failed to start consensus state: %v
   133  
   134  conS:
   135  %+v
   136  
   137  conR:
   138  %+v`, err, conR.conS, conR))
   139  	}
   140  }
   141  
   142  // GetChannels implements Reactor
   143  func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   144  	// TODO optimize
   145  	return []*p2p.ChannelDescriptor{
   146  		{
   147  			ID:                  StateChannel,
   148  			Priority:            6,
   149  			SendQueueCapacity:   100,
   150  			RecvMessageCapacity: maxMsgSize,
   151  		},
   152  		{
   153  			ID: DataChannel, // maybe split between gossiping current block and catchup stuff
   154  			// once we gossip the whole block there's nothing left to send until next height or round
   155  			Priority:            10,
   156  			SendQueueCapacity:   100,
   157  			RecvBufferCapacity:  50 * 4096,
   158  			RecvMessageCapacity: maxMsgSize,
   159  		},
   160  		{
   161  			ID:                  VoteChannel,
   162  			Priority:            7,
   163  			SendQueueCapacity:   100,
   164  			RecvBufferCapacity:  100 * 100,
   165  			RecvMessageCapacity: maxMsgSize,
   166  		},
   167  		{
   168  			ID:                  VoteSetBitsChannel,
   169  			Priority:            1,
   170  			SendQueueCapacity:   2,
   171  			RecvBufferCapacity:  1024,
   172  			RecvMessageCapacity: maxMsgSize,
   173  		},
   174  	}
   175  }
   176  
   177  // InitPeer implements Reactor by creating a state for the peer.
   178  func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   179  	peerState := NewPeerState(peer).SetLogger(conR.Logger)
   180  	peer.Set(types.PeerStateKey, peerState)
   181  	return peer
   182  }
   183  
   184  // AddPeer implements Reactor by spawning multiple gossiping goroutines for the
   185  // peer.
   186  func (conR *Reactor) AddPeer(peer p2p.Peer) {
   187  	if !conR.IsRunning() {
   188  		return
   189  	}
   190  
   191  	peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
   192  	if !ok {
   193  		panic(fmt.Sprintf("peer %v has no state", peer))
   194  	}
   195  	// Begin routines for this peer.
   196  	go conR.gossipDataRoutine(peer, peerState)
   197  	go conR.gossipVotesRoutine(peer, peerState)
   198  	go conR.queryMaj23Routine(peer, peerState)
   199  
   200  	// Send our state to peer.
   201  	// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   202  	if !conR.WaitSync() {
   203  		conR.sendNewRoundStepMessage(peer)
   204  	}
   205  }
   206  
   207  // RemovePeer is a noop.
   208  func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   209  	if !conR.IsRunning() {
   210  		return
   211  	}
   212  	// TODO
   213  	// ps, ok := peer.Get(PeerStateKey).(*PeerState)
   214  	// if !ok {
   215  	// 	panic(fmt.Sprintf("Peer %v has no state", peer))
   216  	// }
   217  	// ps.Disconnect()
   218  }
   219  
   220  // Receive implements Reactor
   221  // NOTE: We process these messages even when we're fast_syncing.
   222  // Messages affect either a peer state or the consensus state.
   223  // Peer state updates can happen in parallel, but processing of
   224  // proposals, block parts, and votes are ordered by the receiveRoutine
   225  // NOTE: blocks on consensus state for proposals, block parts, and votes
   226  func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   227  	if !conR.IsRunning() {
   228  		conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
   229  		return
   230  	}
   231  
   232  	msg, err := decodeMsg(msgBytes)
   233  	if err != nil {
   234  		conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
   235  		conR.Switch.StopPeerForError(src, err)
   236  		return
   237  	}
   238  
   239  	if err = msg.ValidateBasic(); err != nil {
   240  		conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
   241  		conR.Switch.StopPeerForError(src, err)
   242  		return
   243  	}
   244  
   245  	conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   246  
   247  	// Get peer states
   248  	ps, ok := src.Get(types.PeerStateKey).(*PeerState)
   249  	if !ok {
   250  		panic(fmt.Sprintf("Peer %v has no state", src))
   251  	}
   252  
   253  	switch chID {
   254  	case StateChannel:
   255  		switch msg := msg.(type) {
   256  		case *tmcon.NewRoundStepMessage:
   257  			conR.conS.mtx.Lock()
   258  			initialHeight := conR.conS.state.InitialHeight
   259  			conR.conS.mtx.Unlock()
   260  			if err = msg.ValidateHeight(initialHeight); err != nil {
   261  				conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
   262  				conR.Switch.StopPeerForError(src, err)
   263  				return
   264  			}
   265  			ps.ApplyNewRoundStepMessage(msg)
   266  		case *tmcon.NewValidBlockMessage:
   267  			ps.ApplyNewValidBlockMessage(msg)
   268  		case *tmcon.HasVoteMessage:
   269  			ps.ApplyHasVoteMessage(msg)
   270  		case *tmcon.VoteSetMaj23Message:
   271  			cs := conR.conS
   272  			cs.mtx.Lock()
   273  			height, votes := cs.Height, cs.Votes
   274  			cs.mtx.Unlock()
   275  			if height != msg.Height {
   276  				return
   277  			}
   278  			// Peer claims to have a maj23 for some BlockID at H,R,S,
   279  			err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
   280  			if err != nil {
   281  				conR.Switch.StopPeerForError(src, err)
   282  				return
   283  			}
   284  			// Respond with a VoteSetBitsMessage showing which votes we have.
   285  			// (and consequently shows which we don't have)
   286  			var ourVotes *bits.BitArray
   287  			switch msg.Type {
   288  			case tmproto.PrevoteType:
   289  				ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   290  			case tmproto.PrecommitType:
   291  				ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   292  			default:
   293  				panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   294  			}
   295  			src.TrySend(VoteSetBitsChannel, tmcon.MustEncode(&tmcon.VoteSetBitsMessage{
   296  				Height:  msg.Height,
   297  				Round:   msg.Round,
   298  				Type:    msg.Type,
   299  				BlockID: msg.BlockID,
   300  				Votes:   ourVotes,
   301  			}))
   302  		default:
   303  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   304  		}
   305  
   306  	case DataChannel:
   307  		if conR.WaitSync() {
   308  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   309  			return
   310  		}
   311  		switch msg := msg.(type) {
   312  		case *tmcon.ProposalMessage:
   313  			ps.SetHasProposal(msg.Proposal)
   314  			conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
   315  		case *tmcon.ProposalPOLMessage:
   316  			ps.ApplyProposalPOLMessage(msg)
   317  		case *tmcon.BlockPartMessage:
   318  			ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
   319  			conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
   320  			conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
   321  		default:
   322  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   323  		}
   324  
   325  	case VoteChannel:
   326  		if conR.WaitSync() {
   327  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   328  			return
   329  		}
   330  		switch msg := msg.(type) {
   331  		case *tmcon.VoteMessage:
   332  			cs := conR.conS
   333  			cs.mtx.RLock()
   334  			height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
   335  			cs.mtx.RUnlock()
   336  			ps.EnsureVoteBitArrays(height, valSize)
   337  			ps.EnsureVoteBitArrays(height-1, lastCommitSize)
   338  			ps.SetHasVote(msg.Vote)
   339  
   340  			cs.peerMsgQueue <- msgInfo{msg, src.ID()}
   341  
   342  		default:
   343  			// don't punish (leave room for soft upgrades)
   344  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   345  		}
   346  
   347  	case VoteSetBitsChannel:
   348  		if conR.WaitSync() {
   349  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   350  			return
   351  		}
   352  		switch msg := msg.(type) {
   353  		case *tmcon.VoteSetBitsMessage:
   354  			cs := conR.conS
   355  			cs.mtx.Lock()
   356  			height, votes := cs.Height, cs.Votes
   357  			cs.mtx.Unlock()
   358  
   359  			if height == msg.Height {
   360  				var ourVotes *bits.BitArray
   361  				switch msg.Type {
   362  				case tmproto.PrevoteType:
   363  					ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   364  				case tmproto.PrecommitType:
   365  					ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   366  				default:
   367  					panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   368  				}
   369  				ps.ApplyVoteSetBitsMessage(msg, ourVotes)
   370  			} else {
   371  				ps.ApplyVoteSetBitsMessage(msg, nil)
   372  			}
   373  		default:
   374  			// don't punish (leave room for soft upgrades)
   375  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   376  		}
   377  
   378  	default:
   379  		conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
   380  	}
   381  }
   382  
   383  // SetEventBus sets event bus.
   384  func (conR *Reactor) SetEventBus(b *types.EventBus) {
   385  	conR.eventBus = b
   386  	conR.conS.SetEventBus(b)
   387  }
   388  
   389  // WaitSync returns whether the consensus reactor is waiting for state/fast sync.
   390  func (conR *Reactor) WaitSync() bool {
   391  	conR.mtx.RLock()
   392  	defer conR.mtx.RUnlock()
   393  	return conR.waitSync
   394  }
   395  
   396  //--------------------------------------
   397  
   398  // subscribeToBroadcastEvents subscribes for new round steps and votes
   399  // using internal pubsub defined on state to broadcast
   400  // them to peers upon receiving.
   401  func (conR *Reactor) subscribeToBroadcastEvents() {
   402  	const subscriber = "consensus-reactor"
   403  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
   404  		func(data tmevents.EventData) {
   405  			conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
   406  		}); err != nil {
   407  		conR.Logger.Error("Error adding listener for events", "err", err)
   408  	}
   409  
   410  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
   411  		func(data tmevents.EventData) {
   412  			conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
   413  		}); err != nil {
   414  		conR.Logger.Error("Error adding listener for events", "err", err)
   415  	}
   416  
   417  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
   418  		func(data tmevents.EventData) {
   419  			conR.broadcastHasVoteMessage(data.(*types.Vote))
   420  		}); err != nil {
   421  		conR.Logger.Error("Error adding listener for events", "err", err)
   422  	}
   423  
   424  }
   425  
   426  func (conR *Reactor) unsubscribeFromBroadcastEvents() {
   427  	const subscriber = "consensus-reactor"
   428  	conR.conS.evsw.RemoveListener(subscriber)
   429  }
   430  
   431  func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
   432  	nrsMsg := makeRoundStepMessage(rs)
   433  	conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(nrsMsg))
   434  }
   435  
   436  func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
   437  	csMsg := &tmcon.NewValidBlockMessage{
   438  		Height:             rs.Height,
   439  		Round:              rs.Round,
   440  		BlockPartSetHeader: rs.ProposalBlockParts.Header(),
   441  		BlockParts:         rs.ProposalBlockParts.BitArray(),
   442  		IsCommit:           rs.Step == cstypes.RoundStepCommit,
   443  	}
   444  	conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(csMsg))
   445  }
   446  
   447  // Broadcasts HasVoteMessage to peers that care.
   448  func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
   449  	msg := &tmcon.HasVoteMessage{
   450  		Height: vote.Height,
   451  		Round:  vote.Round,
   452  		Type:   vote.Type,
   453  		Index:  vote.ValidatorIndex,
   454  	}
   455  	conR.Switch.Broadcast(StateChannel, tmcon.MustEncode(msg))
   456  	/*
   457  		// TODO: Make this broadcast more selective.
   458  		for _, peer := range conR.Switch.Peers().List() {
   459  			ps, ok := peer.Get(PeerStateKey).(*PeerState)
   460  			if !ok {
   461  				panic(fmt.Sprintf("Peer %v has no state", peer))
   462  			}
   463  			prs := ps.GetRoundState()
   464  			if prs.Height == vote.Height {
   465  				// TODO: Also filter on round?
   466  				peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
   467  			} else {
   468  				// Height doesn't match
   469  				// TODO: check a field, maybe CatchupCommitRound?
   470  				// TODO: But that requires changing the struct field comment.
   471  			}
   472  		}
   473  	*/
   474  }
   475  
   476  func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcon.NewRoundStepMessage) {
   477  	nrsMsg = &tmcon.NewRoundStepMessage{
   478  		Height:                rs.Height,
   479  		Round:                 rs.Round,
   480  		Step:                  rs.Step,
   481  		SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
   482  		LastCommitRound:       rs.LastCommit.GetRound(),
   483  	}
   484  	return
   485  }
   486  
   487  func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
   488  	rs := conR.conS.GetRoundState()
   489  	nrsMsg := makeRoundStepMessage(rs)
   490  	peer.Send(StateChannel, tmcon.MustEncode(nrsMsg))
   491  }
   492  
   493  func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
   494  	logger := conR.Logger.With("peer", peer)
   495  
   496  OUTER_LOOP:
   497  	for {
   498  		// Manage disconnects from self or peer.
   499  		if !peer.IsRunning() || !conR.IsRunning() {
   500  			logger.Info("Stopping gossipDataRoutine for peer")
   501  			return
   502  		}
   503  		rs := conR.conS.GetRoundState()
   504  		prs := ps.GetRoundState()
   505  
   506  		// Send proposal Block parts?
   507  		if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
   508  			if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
   509  				part := rs.ProposalBlockParts.GetPart(index)
   510  				msg := &tmcon.BlockPartMessage{
   511  					Height: rs.Height, // This tells peer that this part applies to us.
   512  					Round:  rs.Round,  // This tells peer that this part applies to us.
   513  					Part:   part,
   514  				}
   515  				logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
   516  				if peer.Send(DataChannel, tmcon.MustEncode(msg)) {
   517  					ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   518  				}
   519  				continue OUTER_LOOP
   520  			}
   521  		}
   522  
   523  		// If the peer is on a previous height that we have, help catch up.
   524  		if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) {
   525  			heightLogger := logger.With("height", prs.Height)
   526  
   527  			// if we never received the commit message from the peer, the block parts wont be initialized
   528  			if prs.ProposalBlockParts == nil {
   529  				blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   530  				if blockMeta == nil {
   531  					heightLogger.Error("Failed to load block meta",
   532  						"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   533  					time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   534  				} else {
   535  					ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
   536  				}
   537  				// continue the loop since prs is a copy and not effected by this initialization
   538  				continue OUTER_LOOP
   539  			}
   540  			conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
   541  			continue OUTER_LOOP
   542  		}
   543  
   544  		// If height and round don't match, sleep.
   545  		if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
   546  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   547  			continue OUTER_LOOP
   548  		}
   549  
   550  		// By here, height and round match.
   551  		// Proposal block parts were already matched and sent if any were wanted.
   552  		// (These can match on hash so the round doesn't matter)
   553  		// Now consider sending other things, like the Proposal itself.
   554  
   555  		// Send Proposal && ProposalPOL BitArray?
   556  		if rs.Proposal != nil && !prs.Proposal {
   557  			// Proposal: share the proposal metadata with peer.
   558  			{
   559  				msg := &tmcon.ProposalMessage{Proposal: rs.Proposal}
   560  				logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
   561  				if peer.Send(DataChannel, tmcon.MustEncode(msg)) {
   562  					// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
   563  					ps.SetHasProposal(rs.Proposal)
   564  				}
   565  			}
   566  			// ProposalPOL: lets peer know which POL votes we have so far.
   567  			// Peer must receive ProposalMessage first.
   568  			// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
   569  			// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
   570  			if 0 <= rs.Proposal.POLRound {
   571  				msg := &tmcon.ProposalPOLMessage{
   572  					Height:           rs.Height,
   573  					ProposalPOLRound: rs.Proposal.POLRound,
   574  					ProposalPOL:      rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
   575  				}
   576  				logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
   577  				peer.Send(DataChannel, tmcon.MustEncode(msg))
   578  			}
   579  			continue OUTER_LOOP
   580  		}
   581  
   582  		// Nothing to do. Sleep.
   583  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   584  		continue OUTER_LOOP
   585  	}
   586  }
   587  
   588  func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
   589  	prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
   590  
   591  	if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
   592  		// Ensure that the peer's PartSetHeader is correct
   593  		blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   594  		if blockMeta == nil {
   595  			logger.Error("Failed to load block meta", "ourHeight", rs.Height,
   596  				"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   597  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   598  			return
   599  		} else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) {
   600  			logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping",
   601  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   602  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   603  			return
   604  		}
   605  		// Load the part
   606  		part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
   607  		if part == nil {
   608  			logger.Error("Could not load part", "index", index,
   609  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   610  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   611  			return
   612  		}
   613  		// Send the part
   614  		msg := &tmcon.BlockPartMessage{
   615  			Height: prs.Height, // Not our height, so it doesn't matter.
   616  			Round:  prs.Round,  // Not our height, so it doesn't matter.
   617  			Part:   part,
   618  		}
   619  		logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
   620  		if peer.Send(DataChannel, tmcon.MustEncode(msg)) {
   621  			ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   622  		} else {
   623  			logger.Debug("Sending block part for catchup failed")
   624  		}
   625  		return
   626  	}
   627  	time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   628  }
   629  
   630  func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
   631  	logger := conR.Logger.With("peer", peer)
   632  
   633  	// Simple hack to throttle logs upon sleep.
   634  	var sleeping = 0
   635  
   636  OUTER_LOOP:
   637  	for {
   638  		// Manage disconnects from self or peer.
   639  		if !peer.IsRunning() || !conR.IsRunning() {
   640  			logger.Info("Stopping gossipVotesRoutine for peer")
   641  			return
   642  		}
   643  		rs := conR.conS.GetRoundState()
   644  		prs := ps.GetRoundState()
   645  
   646  		switch sleeping {
   647  		case 1: // First sleep
   648  			sleeping = 2
   649  		case 2: // No more sleep
   650  			sleeping = 0
   651  		}
   652  
   653  		// If height matches, then send LastCommit, Prevotes, Precommits.
   654  		if rs.Height == prs.Height {
   655  			heightLogger := logger.With("height", prs.Height)
   656  			if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
   657  				continue OUTER_LOOP
   658  			}
   659  		}
   660  
   661  		// Special catchup logic.
   662  		// If peer is lagging by height 1, send LastCommit.
   663  		if prs.Height != 0 && rs.Height == prs.Height+1 {
   664  			if ps.PickSendVote(rs.LastCommit) {
   665  				logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
   666  				continue OUTER_LOOP
   667  			}
   668  		}
   669  
   670  		// Catchup logic
   671  		// If peer is lagging by more than 1, send Commit.
   672  		if prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= conR.conS.blockStore.Base() {
   673  			// Load the block commit for prs.Height,
   674  			// which contains precommit signatures for prs.Height.
   675  			if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
   676  				if ps.PickSendVote(commit) {
   677  					logger.Debug("Picked Catchup commit to send", "height", prs.Height)
   678  					continue OUTER_LOOP
   679  				}
   680  			}
   681  		}
   682  
   683  		if sleeping == 0 {
   684  			// We sent nothing. Sleep...
   685  			sleeping = 1
   686  			logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height,
   687  				"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
   688  				"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
   689  		} else if sleeping == 2 {
   690  			// Continued sleep...
   691  			sleeping = 1
   692  		}
   693  
   694  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   695  		continue OUTER_LOOP
   696  	}
   697  }
   698  
   699  func (conR *Reactor) gossipVotesForHeight(
   700  	logger log.Logger,
   701  	rs *cstypes.RoundState,
   702  	prs *cstypes.PeerRoundState,
   703  	ps *PeerState,
   704  ) bool {
   705  
   706  	// If there are lastCommits to send...
   707  	if prs.Step == cstypes.RoundStepNewHeight {
   708  		if ps.PickSendVote(rs.LastCommit) {
   709  			logger.Debug("Picked rs.LastCommit to send")
   710  			return true
   711  		}
   712  	}
   713  	// If there are POL prevotes to send...
   714  	if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
   715  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   716  			if ps.PickSendVote(polPrevotes) {
   717  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   718  					"round", prs.ProposalPOLRound)
   719  				return true
   720  			}
   721  		}
   722  	}
   723  	// If there are prevotes to send...
   724  	if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
   725  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   726  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   727  			return true
   728  		}
   729  	}
   730  	// If there are precommits to send...
   731  	if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
   732  		if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
   733  			logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
   734  			return true
   735  		}
   736  	}
   737  	// If there are prevotes to send...Needed because of validBlock mechanism
   738  	if prs.Round != -1 && prs.Round <= rs.Round {
   739  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   740  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   741  			return true
   742  		}
   743  	}
   744  	// If there are POLPrevotes to send...
   745  	if prs.ProposalPOLRound != -1 {
   746  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   747  			if ps.PickSendVote(polPrevotes) {
   748  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   749  					"round", prs.ProposalPOLRound)
   750  				return true
   751  			}
   752  		}
   753  	}
   754  
   755  	return false
   756  }
   757  
   758  // NOTE: `queryMaj23Routine` has a simple crude design since it only comes
   759  // into play for liveness when there's a signature DDoS attack happening.
   760  func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
   761  	logger := conR.Logger.With("peer", peer)
   762  
   763  OUTER_LOOP:
   764  	for {
   765  		// Manage disconnects from self or peer.
   766  		if !peer.IsRunning() || !conR.IsRunning() {
   767  			logger.Info("Stopping queryMaj23Routine for peer")
   768  			return
   769  		}
   770  
   771  		// Maybe send Height/Round/Prevotes
   772  		{
   773  			rs := conR.conS.GetRoundState()
   774  			prs := ps.GetRoundState()
   775  			if rs.Height == prs.Height {
   776  				if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
   777  					peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{
   778  						Height:  prs.Height,
   779  						Round:   prs.Round,
   780  						Type:    tmproto.PrevoteType,
   781  						BlockID: maj23,
   782  					}))
   783  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   784  				}
   785  			}
   786  		}
   787  
   788  		// Maybe send Height/Round/Precommits
   789  		{
   790  			rs := conR.conS.GetRoundState()
   791  			prs := ps.GetRoundState()
   792  			if rs.Height == prs.Height {
   793  				if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
   794  					peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{
   795  						Height:  prs.Height,
   796  						Round:   prs.Round,
   797  						Type:    tmproto.PrecommitType,
   798  						BlockID: maj23,
   799  					}))
   800  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   801  				}
   802  			}
   803  		}
   804  
   805  		// Maybe send Height/Round/ProposalPOL
   806  		{
   807  			rs := conR.conS.GetRoundState()
   808  			prs := ps.GetRoundState()
   809  			if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
   810  				if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
   811  					peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{
   812  						Height:  prs.Height,
   813  						Round:   prs.ProposalPOLRound,
   814  						Type:    tmproto.PrevoteType,
   815  						BlockID: maj23,
   816  					}))
   817  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   818  				}
   819  			}
   820  		}
   821  
   822  		// Little point sending LastCommitRound/LastCommit,
   823  		// These are fleeting and non-blocking.
   824  
   825  		// Maybe send Height/CatchupCommitRound/CatchupCommit.
   826  		{
   827  			prs := ps.GetRoundState()
   828  			if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
   829  				prs.Height >= conR.conS.blockStore.Base() {
   830  				if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
   831  					peer.TrySend(StateChannel, tmcon.MustEncode(&tmcon.VoteSetMaj23Message{
   832  						Height:  prs.Height,
   833  						Round:   commit.Round,
   834  						Type:    tmproto.PrecommitType,
   835  						BlockID: commit.BlockID,
   836  					}))
   837  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   838  				}
   839  			}
   840  		}
   841  
   842  		time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   843  
   844  		continue OUTER_LOOP
   845  	}
   846  }
   847  
   848  func (conR *Reactor) peerStatsRoutine() {
   849  	for {
   850  		if !conR.IsRunning() {
   851  			conR.Logger.Info("Stopping peerStatsRoutine")
   852  			return
   853  		}
   854  
   855  		select {
   856  		case msg := <-conR.conS.statsMsgQueue:
   857  			// Get peer
   858  			peer := conR.Switch.Peers().Get(msg.PeerID)
   859  			if peer == nil {
   860  				conR.Logger.Debug("Attempt to update stats for non-existent peer",
   861  					"peer", msg.PeerID)
   862  				continue
   863  			}
   864  			// Get peer state
   865  			ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   866  			if !ok {
   867  				panic(fmt.Sprintf("Peer %v has no state", peer))
   868  			}
   869  			switch msg.Msg.(type) {
   870  			case *tmcon.VoteMessage:
   871  				if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
   872  					conR.Switch.MarkPeerAsGood(peer)
   873  				}
   874  			case *tmcon.BlockPartMessage:
   875  				if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
   876  					conR.Switch.MarkPeerAsGood(peer)
   877  				}
   878  			}
   879  		case <-conR.conS.Quit():
   880  			return
   881  
   882  		case <-conR.Quit():
   883  			return
   884  		}
   885  	}
   886  }
   887  
   888  // String returns a string representation of the Reactor.
   889  // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
   890  // TODO: improve!
   891  func (conR *Reactor) String() string {
   892  	// better not to access shared variables
   893  	return "ConsensusReactor" // conR.StringIndented("")
   894  }
   895  
   896  // StringIndented returns an indented string representation of the Reactor
   897  func (conR *Reactor) StringIndented(indent string) string {
   898  	s := "ConsensusReactor{\n"
   899  	s += indent + "  " + conR.conS.StringIndented(indent+"  ") + "\n"
   900  	for _, peer := range conR.Switch.Peers().List() {
   901  		ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   902  		if !ok {
   903  			panic(fmt.Sprintf("Peer %v has no state", peer))
   904  		}
   905  		s += indent + "  " + ps.StringIndented(indent+"  ") + "\n"
   906  	}
   907  	s += indent + "}"
   908  	return s
   909  }
   910  
   911  // ReactorMetrics sets the metrics
   912  func ReactorMetrics(metrics *tmcon.Metrics) ReactorOption {
   913  	return func(conR *Reactor) { conR.Metrics = metrics }
   914  }
   915  
   916  //-----------------------------------------------------------------------------
   917  
   918  var (
   919  	ErrPeerStateHeightRegression = errors.New("error peer state height regression")
   920  	ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime")
   921  )
   922  
   923  // PeerState contains the known state of a peer, including its connection and
   924  // threadsafe access to its PeerRoundState.
   925  // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
   926  // Be mindful of what you Expose.
   927  type PeerState struct {
   928  	peer   p2p.Peer
   929  	logger log.Logger
   930  
   931  	mtx   sync.Mutex             // NOTE: Modify below using setters, never directly.
   932  	PRS   cstypes.PeerRoundState `json:"round_state"` // Exposed.
   933  	Stats *peerStateStats        `json:"stats"`       // Exposed.
   934  }
   935  
   936  // peerStateStats holds internal statistics for a peer.
   937  type peerStateStats struct {
   938  	Votes      int `json:"votes"`
   939  	BlockParts int `json:"block_parts"`
   940  }
   941  
   942  func (pss peerStateStats) String() string {
   943  	return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}",
   944  		pss.Votes, pss.BlockParts)
   945  }
   946  
   947  // NewPeerState returns a new PeerState for the given Peer
   948  func NewPeerState(peer p2p.Peer) *PeerState {
   949  	return &PeerState{
   950  		peer:   peer,
   951  		logger: log.NewNopLogger(),
   952  		PRS: cstypes.PeerRoundState{
   953  			Round:              -1,
   954  			ProposalPOLRound:   -1,
   955  			LastCommitRound:    -1,
   956  			CatchupCommitRound: -1,
   957  		},
   958  		Stats: &peerStateStats{},
   959  	}
   960  }
   961  
   962  // SetLogger allows to set a logger on the peer state. Returns the peer state
   963  // itself.
   964  func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
   965  	ps.logger = logger
   966  	return ps
   967  }
   968  
   969  // GetRoundState returns an shallow copy of the PeerRoundState.
   970  // There's no point in mutating it since it won't change PeerState.
   971  func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
   972  	ps.mtx.Lock()
   973  	defer ps.mtx.Unlock()
   974  
   975  	prs := ps.PRS // copy
   976  	return &prs
   977  }
   978  
   979  // ToJSON returns a json of PeerState.
   980  func (ps *PeerState) ToJSON() ([]byte, error) {
   981  	ps.mtx.Lock()
   982  	defer ps.mtx.Unlock()
   983  
   984  	return tmjson.Marshal(ps)
   985  }
   986  
   987  // GetHeight returns an atomic snapshot of the PeerRoundState's height
   988  // used by the mempool to ensure peers are caught up before broadcasting new txs
   989  func (ps *PeerState) GetHeight() int64 {
   990  	ps.mtx.Lock()
   991  	defer ps.mtx.Unlock()
   992  	return ps.PRS.Height
   993  }
   994  
   995  // SetHasProposal sets the given proposal as known for the peer.
   996  func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
   997  	ps.mtx.Lock()
   998  	defer ps.mtx.Unlock()
   999  
  1000  	if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
  1001  		return
  1002  	}
  1003  
  1004  	if ps.PRS.Proposal {
  1005  		return
  1006  	}
  1007  
  1008  	ps.PRS.Proposal = true
  1009  
  1010  	// ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage
  1011  	if ps.PRS.ProposalBlockParts != nil {
  1012  		return
  1013  	}
  1014  
  1015  	ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader
  1016  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total))
  1017  	ps.PRS.ProposalPOLRound = proposal.POLRound
  1018  	ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
  1019  }
  1020  
  1021  // InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
  1022  func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) {
  1023  	ps.mtx.Lock()
  1024  	defer ps.mtx.Unlock()
  1025  
  1026  	if ps.PRS.ProposalBlockParts != nil {
  1027  		return
  1028  	}
  1029  
  1030  	ps.PRS.ProposalBlockPartSetHeader = partSetHeader
  1031  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total))
  1032  }
  1033  
  1034  // SetHasProposalBlockPart sets the given block part index as known for the peer.
  1035  func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) {
  1036  	ps.mtx.Lock()
  1037  	defer ps.mtx.Unlock()
  1038  
  1039  	if ps.PRS.Height != height || ps.PRS.Round != round {
  1040  		return
  1041  	}
  1042  
  1043  	ps.PRS.ProposalBlockParts.SetIndex(index, true)
  1044  }
  1045  
  1046  // PickSendVote picks a vote and sends it to the peer.
  1047  // Returns true if vote was sent.
  1048  func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
  1049  	if vote, ok := ps.PickVoteToSend(votes); ok {
  1050  		msg := &tmcon.VoteMessage{Vote: vote}
  1051  		ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
  1052  		if ps.peer.Send(VoteChannel, tmcon.MustEncode(msg)) {
  1053  			ps.SetHasVote(vote)
  1054  			return true
  1055  		}
  1056  		return false
  1057  	}
  1058  	return false
  1059  }
  1060  
  1061  // PickVoteToSend picks a vote to send to the peer.
  1062  // Returns true if a vote was picked.
  1063  // NOTE: `votes` must be the correct Size() for the Height().
  1064  func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
  1065  	ps.mtx.Lock()
  1066  	defer ps.mtx.Unlock()
  1067  
  1068  	if votes.Size() == 0 {
  1069  		return nil, false
  1070  	}
  1071  
  1072  	height, round, votesType, size :=
  1073  		votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size()
  1074  
  1075  	// Lazily set data using 'votes'.
  1076  	if votes.IsCommit() {
  1077  		ps.ensureCatchupCommitRound(height, round, size)
  1078  	}
  1079  	ps.ensureVoteBitArrays(height, size)
  1080  
  1081  	psVotes := ps.getVoteBitArray(height, round, votesType)
  1082  	if psVotes == nil {
  1083  		return nil, false // Not something worth sending
  1084  	}
  1085  	if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
  1086  		return votes.GetByIndex(int32(index)), true
  1087  	}
  1088  	return nil, false
  1089  }
  1090  
  1091  func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray {
  1092  	if !types.IsVoteTypeValid(votesType) {
  1093  		return nil
  1094  	}
  1095  
  1096  	if ps.PRS.Height == height {
  1097  		if ps.PRS.Round == round {
  1098  			switch votesType {
  1099  			case tmproto.PrevoteType:
  1100  				return ps.PRS.Prevotes
  1101  			case tmproto.PrecommitType:
  1102  				return ps.PRS.Precommits
  1103  			}
  1104  		}
  1105  		if ps.PRS.CatchupCommitRound == round {
  1106  			switch votesType {
  1107  			case tmproto.PrevoteType:
  1108  				return nil
  1109  			case tmproto.PrecommitType:
  1110  				return ps.PRS.CatchupCommit
  1111  			}
  1112  		}
  1113  		if ps.PRS.ProposalPOLRound == round {
  1114  			switch votesType {
  1115  			case tmproto.PrevoteType:
  1116  				return ps.PRS.ProposalPOL
  1117  			case tmproto.PrecommitType:
  1118  				return nil
  1119  			}
  1120  		}
  1121  		return nil
  1122  	}
  1123  	if ps.PRS.Height == height+1 {
  1124  		if ps.PRS.LastCommitRound == round {
  1125  			switch votesType {
  1126  			case tmproto.PrevoteType:
  1127  				return nil
  1128  			case tmproto.PrecommitType:
  1129  				return ps.PRS.LastCommit
  1130  			}
  1131  		}
  1132  		return nil
  1133  	}
  1134  	return nil
  1135  }
  1136  
  1137  // 'round': A round for which we have a +2/3 commit.
  1138  func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) {
  1139  	if ps.PRS.Height != height {
  1140  		return
  1141  	}
  1142  	/*
  1143  		NOTE: This is wrong, 'round' could change.
  1144  		e.g. if orig round is not the same as block LastCommit round.
  1145  		if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
  1146  			panic(fmt.Sprintf(
  1147  				"Conflicting CatchupCommitRound. Height: %v,
  1148  				Orig: %v,
  1149  				New: %v",
  1150  				height,
  1151  				ps.CatchupCommitRound,
  1152  				round))
  1153  		}
  1154  	*/
  1155  	if ps.PRS.CatchupCommitRound == round {
  1156  		return // Nothing to do!
  1157  	}
  1158  	ps.PRS.CatchupCommitRound = round
  1159  	if round == ps.PRS.Round {
  1160  		ps.PRS.CatchupCommit = ps.PRS.Precommits
  1161  	} else {
  1162  		ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1163  	}
  1164  }
  1165  
  1166  // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
  1167  // what votes this peer has received.
  1168  // NOTE: It's important to make sure that numValidators actually matches
  1169  // what the node sees as the number of validators for height.
  1170  func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
  1171  	ps.mtx.Lock()
  1172  	defer ps.mtx.Unlock()
  1173  	ps.ensureVoteBitArrays(height, numValidators)
  1174  }
  1175  
  1176  func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
  1177  	if ps.PRS.Height == height {
  1178  		if ps.PRS.Prevotes == nil {
  1179  			ps.PRS.Prevotes = bits.NewBitArray(numValidators)
  1180  		}
  1181  		if ps.PRS.Precommits == nil {
  1182  			ps.PRS.Precommits = bits.NewBitArray(numValidators)
  1183  		}
  1184  		if ps.PRS.CatchupCommit == nil {
  1185  			ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1186  		}
  1187  		if ps.PRS.ProposalPOL == nil {
  1188  			ps.PRS.ProposalPOL = bits.NewBitArray(numValidators)
  1189  		}
  1190  	} else if ps.PRS.Height == height+1 {
  1191  		if ps.PRS.LastCommit == nil {
  1192  			ps.PRS.LastCommit = bits.NewBitArray(numValidators)
  1193  		}
  1194  	}
  1195  }
  1196  
  1197  // RecordVote increments internal votes related statistics for this peer.
  1198  // It returns the total number of added votes.
  1199  func (ps *PeerState) RecordVote() int {
  1200  	ps.mtx.Lock()
  1201  	defer ps.mtx.Unlock()
  1202  
  1203  	ps.Stats.Votes++
  1204  
  1205  	return ps.Stats.Votes
  1206  }
  1207  
  1208  // VotesSent returns the number of blocks for which peer has been sending us
  1209  // votes.
  1210  func (ps *PeerState) VotesSent() int {
  1211  	ps.mtx.Lock()
  1212  	defer ps.mtx.Unlock()
  1213  
  1214  	return ps.Stats.Votes
  1215  }
  1216  
  1217  // RecordBlockPart increments internal block part related statistics for this peer.
  1218  // It returns the total number of added block parts.
  1219  func (ps *PeerState) RecordBlockPart() int {
  1220  	ps.mtx.Lock()
  1221  	defer ps.mtx.Unlock()
  1222  
  1223  	ps.Stats.BlockParts++
  1224  	return ps.Stats.BlockParts
  1225  }
  1226  
  1227  // BlockPartsSent returns the number of useful block parts the peer has sent us.
  1228  func (ps *PeerState) BlockPartsSent() int {
  1229  	ps.mtx.Lock()
  1230  	defer ps.mtx.Unlock()
  1231  
  1232  	return ps.Stats.BlockParts
  1233  }
  1234  
  1235  // SetHasVote sets the given vote as known by the peer
  1236  func (ps *PeerState) SetHasVote(vote *types.Vote) {
  1237  	ps.mtx.Lock()
  1238  	defer ps.mtx.Unlock()
  1239  
  1240  	ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
  1241  }
  1242  
  1243  func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
  1244  	logger := ps.logger.With(
  1245  		"peerH/R",
  1246  		fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
  1247  		"H/R",
  1248  		fmt.Sprintf("%d/%d", height, round))
  1249  	logger.Debug("setHasVote", "type", voteType, "index", index)
  1250  
  1251  	// NOTE: some may be nil BitArrays -> no side effects.
  1252  	psVotes := ps.getVoteBitArray(height, round, voteType)
  1253  	if psVotes != nil {
  1254  		psVotes.SetIndex(int(index), true)
  1255  	}
  1256  }
  1257  
  1258  // ApplyNewRoundStepMessage updates the peer state for the new round.
  1259  func (ps *PeerState) ApplyNewRoundStepMessage(msg *tmcon.NewRoundStepMessage) {
  1260  	ps.mtx.Lock()
  1261  	defer ps.mtx.Unlock()
  1262  
  1263  	// Ignore duplicates or decreases
  1264  	if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
  1265  		return
  1266  	}
  1267  
  1268  	// Just remember these values.
  1269  	psHeight := ps.PRS.Height
  1270  	psRound := ps.PRS.Round
  1271  	psCatchupCommitRound := ps.PRS.CatchupCommitRound
  1272  	psCatchupCommit := ps.PRS.CatchupCommit
  1273  
  1274  	startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
  1275  	ps.PRS.Height = msg.Height
  1276  	ps.PRS.Round = msg.Round
  1277  	ps.PRS.Step = msg.Step
  1278  	ps.PRS.StartTime = startTime
  1279  	if psHeight != msg.Height || psRound != msg.Round {
  1280  		ps.PRS.Proposal = false
  1281  		ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{}
  1282  		ps.PRS.ProposalBlockParts = nil
  1283  		ps.PRS.ProposalPOLRound = -1
  1284  		ps.PRS.ProposalPOL = nil
  1285  		// We'll update the BitArray capacity later.
  1286  		ps.PRS.Prevotes = nil
  1287  		ps.PRS.Precommits = nil
  1288  	}
  1289  	if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
  1290  		// Peer caught up to CatchupCommitRound.
  1291  		// Preserve psCatchupCommit!
  1292  		// NOTE: We prefer to use prs.Precommits if
  1293  		// pr.Round matches pr.CatchupCommitRound.
  1294  		ps.PRS.Precommits = psCatchupCommit
  1295  	}
  1296  	if psHeight != msg.Height {
  1297  		// Shift Precommits to LastCommit.
  1298  		if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
  1299  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1300  			ps.PRS.LastCommit = ps.PRS.Precommits
  1301  		} else {
  1302  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1303  			ps.PRS.LastCommit = nil
  1304  		}
  1305  		// We'll update the BitArray capacity later.
  1306  		ps.PRS.CatchupCommitRound = -1
  1307  		ps.PRS.CatchupCommit = nil
  1308  	}
  1309  }
  1310  
  1311  // ApplyNewValidBlockMessage updates the peer state for the new valid block.
  1312  func (ps *PeerState) ApplyNewValidBlockMessage(msg *tmcon.NewValidBlockMessage) {
  1313  	ps.mtx.Lock()
  1314  	defer ps.mtx.Unlock()
  1315  
  1316  	if ps.PRS.Height != msg.Height {
  1317  		return
  1318  	}
  1319  
  1320  	if ps.PRS.Round != msg.Round && !msg.IsCommit {
  1321  		return
  1322  	}
  1323  
  1324  	ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader
  1325  	ps.PRS.ProposalBlockParts = msg.BlockParts
  1326  }
  1327  
  1328  // ApplyProposalPOLMessage updates the peer state for the new proposal POL.
  1329  func (ps *PeerState) ApplyProposalPOLMessage(msg *tmcon.ProposalPOLMessage) {
  1330  	ps.mtx.Lock()
  1331  	defer ps.mtx.Unlock()
  1332  
  1333  	if ps.PRS.Height != msg.Height {
  1334  		return
  1335  	}
  1336  	if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
  1337  		return
  1338  	}
  1339  
  1340  	// TODO: Merge onto existing ps.PRS.ProposalPOL?
  1341  	// We might have sent some prevotes in the meantime.
  1342  	ps.PRS.ProposalPOL = msg.ProposalPOL
  1343  }
  1344  
  1345  // ApplyHasVoteMessage updates the peer state for the new vote.
  1346  func (ps *PeerState) ApplyHasVoteMessage(msg *tmcon.HasVoteMessage) {
  1347  	ps.mtx.Lock()
  1348  	defer ps.mtx.Unlock()
  1349  
  1350  	if ps.PRS.Height != msg.Height {
  1351  		return
  1352  	}
  1353  
  1354  	ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
  1355  }
  1356  
  1357  // ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes
  1358  // it claims to have for the corresponding BlockID.
  1359  // `ourVotes` is a BitArray of votes we have for msg.BlockID
  1360  // NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
  1361  // we conservatively overwrite ps's votes w/ msg.Votes.
  1362  func (ps *PeerState) ApplyVoteSetBitsMessage(msg *tmcon.VoteSetBitsMessage, ourVotes *bits.BitArray) {
  1363  	ps.mtx.Lock()
  1364  	defer ps.mtx.Unlock()
  1365  
  1366  	votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
  1367  	if votes != nil {
  1368  		if ourVotes == nil {
  1369  			votes.Update(msg.Votes)
  1370  		} else {
  1371  			otherVotes := votes.Sub(ourVotes)
  1372  			hasVotes := otherVotes.Or(msg.Votes)
  1373  			votes.Update(hasVotes)
  1374  		}
  1375  	}
  1376  }
  1377  
  1378  // String returns a string representation of the PeerState
  1379  func (ps *PeerState) String() string {
  1380  	return ps.StringIndented("")
  1381  }
  1382  
  1383  // StringIndented returns a string representation of the PeerState
  1384  func (ps *PeerState) StringIndented(indent string) string {
  1385  	ps.mtx.Lock()
  1386  	defer ps.mtx.Unlock()
  1387  	return fmt.Sprintf(`PeerState{
  1388  %s  Key        %v
  1389  %s  RoundState %v
  1390  %s  Stats      %v
  1391  %s}`,
  1392  		indent, ps.peer.ID(),
  1393  		indent, ps.PRS.StringIndented(indent+"  "),
  1394  		indent, ps.Stats,
  1395  		indent)
  1396  }
  1397  
  1398  //-----------------------------------------------------------------------------
  1399  
  1400  // func init() {
  1401  // 	tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage")
  1402  // 	tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage")
  1403  // 	tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal")
  1404  // 	tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL")
  1405  // 	tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart")
  1406  // 	tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote")
  1407  // 	tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote")
  1408  // 	tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23")
  1409  // 	tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
  1410  // }
  1411  
  1412  func decodeMsg(bz []byte) (msg tmcon.Message, err error) {
  1413  	pb := &tmcons.Message{}
  1414  	if err = proto.Unmarshal(bz, pb); err != nil {
  1415  		return msg, err
  1416  	}
  1417  
  1418  	return tmcon.MsgFromProto(pb)
  1419  }