github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/consensus/reactor.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	cstypes "github.com/badrootd/nibiru-cometbft/consensus/types"
    11  	"github.com/badrootd/nibiru-cometbft/libs/bits"
    12  	cmtevents "github.com/badrootd/nibiru-cometbft/libs/events"
    13  	cmtjson "github.com/badrootd/nibiru-cometbft/libs/json"
    14  	"github.com/badrootd/nibiru-cometbft/libs/log"
    15  	cmtsync "github.com/badrootd/nibiru-cometbft/libs/sync"
    16  	"github.com/badrootd/nibiru-cometbft/p2p"
    17  	cmtcons "github.com/badrootd/nibiru-cometbft/proto/tendermint/consensus"
    18  	cmtproto "github.com/badrootd/nibiru-cometbft/proto/tendermint/types"
    19  	sm "github.com/badrootd/nibiru-cometbft/state"
    20  	"github.com/badrootd/nibiru-cometbft/types"
    21  	cmttime "github.com/badrootd/nibiru-cometbft/types/time"
    22  )
    23  
    24  const (
    25  	StateChannel       = byte(0x20)
    26  	DataChannel        = byte(0x21)
    27  	VoteChannel        = byte(0x22)
    28  	VoteSetBitsChannel = byte(0x23)
    29  
    30  	maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
    31  
    32  	blocksToContributeToBecomeGoodPeer = 10000
    33  	votesToContributeToBecomeGoodPeer  = 10000
    34  )
    35  
    36  //-----------------------------------------------------------------------------
    37  
    38  // Reactor defines a reactor for the consensus service.
    39  type Reactor struct {
    40  	p2p.BaseReactor // BaseService + p2p.Switch
    41  
    42  	conS *State
    43  
    44  	mtx      cmtsync.RWMutex
    45  	waitSync bool
    46  	eventBus *types.EventBus
    47  	rs       *cstypes.RoundState
    48  
    49  	Metrics *Metrics
    50  }
    51  
    52  type ReactorOption func(*Reactor)
    53  
    54  // NewReactor returns a new Reactor with the given
    55  // consensusState.
    56  func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
    57  	conR := &Reactor{
    58  		conS:     consensusState,
    59  		waitSync: waitSync,
    60  		rs:       consensusState.GetRoundState(),
    61  		Metrics:  NopMetrics(),
    62  	}
    63  	conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
    64  
    65  	for _, option := range options {
    66  		option(conR)
    67  	}
    68  
    69  	return conR
    70  }
    71  
    72  // OnStart implements BaseService by subscribing to events, which later will be
    73  // broadcasted to other peers and starting state if we're not in block sync.
    74  func (conR *Reactor) OnStart() error {
    75  	conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
    76  
    77  	// start routine that computes peer statistics for evaluating peer quality
    78  	go conR.peerStatsRoutine()
    79  
    80  	conR.subscribeToBroadcastEvents()
    81  	go conR.updateRoundStateRoutine()
    82  
    83  	if !conR.WaitSync() {
    84  		err := conR.conS.Start()
    85  		if err != nil {
    86  			return err
    87  		}
    88  	}
    89  
    90  	return nil
    91  }
    92  
    93  // OnStop implements BaseService by unsubscribing from events and stopping
    94  // state.
    95  func (conR *Reactor) OnStop() {
    96  	conR.unsubscribeFromBroadcastEvents()
    97  	if err := conR.conS.Stop(); err != nil {
    98  		conR.Logger.Error("Error stopping consensus state", "err", err)
    99  	}
   100  	if !conR.WaitSync() {
   101  		conR.conS.Wait()
   102  	}
   103  }
   104  
   105  // SwitchToConsensus switches from block_sync mode to consensus mode.
   106  // It resets the state, turns off block_sync, and starts the consensus state-machine
   107  func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
   108  	conR.Logger.Info("SwitchToConsensus")
   109  
   110  	func() {
   111  		// We need to lock, as we are not entering consensus state from State's `handleMsg` or `handleTimeout`
   112  		conR.conS.mtx.Lock()
   113  		defer conR.conS.mtx.Unlock()
   114  		// We have no votes, so reconstruct LastCommit from SeenCommit
   115  		if state.LastBlockHeight > 0 {
   116  			conR.conS.reconstructLastCommit(state)
   117  		}
   118  
   119  		// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
   120  		// NewRoundStepMessage.
   121  		conR.conS.updateToState(state)
   122  	}()
   123  
   124  	conR.mtx.Lock()
   125  	conR.waitSync = false
   126  	conR.mtx.Unlock()
   127  	conR.Metrics.BlockSyncing.Set(0)
   128  	conR.Metrics.StateSyncing.Set(0)
   129  
   130  	if skipWAL {
   131  		conR.conS.doWALCatchup = false
   132  	}
   133  	err := conR.conS.Start()
   134  	if err != nil {
   135  		panic(fmt.Sprintf(`Failed to start consensus state: %v
   136  
   137  conS:
   138  %+v
   139  
   140  conR:
   141  %+v`, err, conR.conS, conR))
   142  	}
   143  }
   144  
   145  // GetChannels implements Reactor
   146  func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   147  	// TODO optimize
   148  	return []*p2p.ChannelDescriptor{
   149  		{
   150  			ID:                  StateChannel,
   151  			Priority:            6,
   152  			SendQueueCapacity:   100,
   153  			RecvMessageCapacity: maxMsgSize,
   154  			MessageType:         &cmtcons.Message{},
   155  		},
   156  		{
   157  			ID: DataChannel, // maybe split between gossiping current block and catchup stuff
   158  			// once we gossip the whole block there's nothing left to send until next height or round
   159  			Priority:            10,
   160  			SendQueueCapacity:   100,
   161  			RecvBufferCapacity:  50 * 4096,
   162  			RecvMessageCapacity: maxMsgSize,
   163  			MessageType:         &cmtcons.Message{},
   164  		},
   165  		{
   166  			ID:                  VoteChannel,
   167  			Priority:            7,
   168  			SendQueueCapacity:   100,
   169  			RecvBufferCapacity:  100 * 100,
   170  			RecvMessageCapacity: maxMsgSize,
   171  			MessageType:         &cmtcons.Message{},
   172  		},
   173  		{
   174  			ID:                  VoteSetBitsChannel,
   175  			Priority:            1,
   176  			SendQueueCapacity:   2,
   177  			RecvBufferCapacity:  1024,
   178  			RecvMessageCapacity: maxMsgSize,
   179  			MessageType:         &cmtcons.Message{},
   180  		},
   181  	}
   182  }
   183  
   184  // InitPeer implements Reactor by creating a state for the peer.
   185  func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   186  	peerState := NewPeerState(peer).SetLogger(conR.Logger)
   187  	peer.Set(types.PeerStateKey, peerState)
   188  	return peer
   189  }
   190  
   191  // AddPeer implements Reactor by spawning multiple gossiping goroutines for the
   192  // peer.
   193  func (conR *Reactor) AddPeer(peer p2p.Peer) {
   194  	if !conR.IsRunning() {
   195  		return
   196  	}
   197  
   198  	peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
   199  	if !ok {
   200  		panic(fmt.Sprintf("peer %v has no state", peer))
   201  	}
   202  	// Begin routines for this peer.
   203  	go conR.gossipDataRoutine(peer, peerState)
   204  	go conR.gossipVotesRoutine(peer, peerState)
   205  	go conR.queryMaj23Routine(peer, peerState)
   206  
   207  	// Send our state to peer.
   208  	// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   209  	if !conR.WaitSync() {
   210  		conR.sendNewRoundStepMessage(peer)
   211  	}
   212  }
   213  
   214  // RemovePeer is a noop.
   215  func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   216  	if !conR.IsRunning() {
   217  		return
   218  	}
   219  	// TODO
   220  	// ps, ok := peer.Get(PeerStateKey).(*PeerState)
   221  	// if !ok {
   222  	// 	panic(fmt.Sprintf("Peer %v has no state", peer))
   223  	// }
   224  	// ps.Disconnect()
   225  }
   226  
   227  // Receive implements Reactor
   228  // NOTE: We process these messages even when we're block_syncing.
   229  // Messages affect either a peer state or the consensus state.
   230  // Peer state updates can happen in parallel, but processing of
   231  // proposals, block parts, and votes are ordered by the receiveRoutine
   232  // NOTE: blocks on consensus state for proposals, block parts, and votes
   233  func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   234  	if !conR.IsRunning() {
   235  		conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
   236  		return
   237  	}
   238  	msg, err := MsgFromProto(e.Message)
   239  	if err != nil {
   240  		conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
   241  		conR.Switch.StopPeerForError(e.Src, err)
   242  		return
   243  	}
   244  
   245  	if err = msg.ValidateBasic(); err != nil {
   246  		conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
   247  		conR.Switch.StopPeerForError(e.Src, err)
   248  		return
   249  	}
   250  
   251  	conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
   252  
   253  	// Get peer states
   254  	ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
   255  	if !ok {
   256  		panic(fmt.Sprintf("Peer %v has no state", e.Src))
   257  	}
   258  
   259  	switch e.ChannelID {
   260  	case StateChannel:
   261  		switch msg := msg.(type) {
   262  		case *NewRoundStepMessage:
   263  			conR.conS.mtx.Lock()
   264  			initialHeight := conR.conS.state.InitialHeight
   265  			conR.conS.mtx.Unlock()
   266  			if err = msg.ValidateHeight(initialHeight); err != nil {
   267  				conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
   268  				conR.Switch.StopPeerForError(e.Src, err)
   269  				return
   270  			}
   271  			ps.ApplyNewRoundStepMessage(msg)
   272  		case *NewValidBlockMessage:
   273  			ps.ApplyNewValidBlockMessage(msg)
   274  		case *HasVoteMessage:
   275  			ps.ApplyHasVoteMessage(msg)
   276  		case *VoteSetMaj23Message:
   277  			cs := conR.conS
   278  			cs.mtx.Lock()
   279  			height, votes := cs.Height, cs.Votes
   280  			cs.mtx.Unlock()
   281  			if height != msg.Height {
   282  				return
   283  			}
   284  			// Peer claims to have a maj23 for some BlockID at H,R,S,
   285  			err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
   286  			if err != nil {
   287  				conR.Switch.StopPeerForError(e.Src, err)
   288  				return
   289  			}
   290  			// Respond with a VoteSetBitsMessage showing which votes we have.
   291  			// (and consequently shows which we don't have)
   292  			var ourVotes *bits.BitArray
   293  			switch msg.Type {
   294  			case cmtproto.PrevoteType:
   295  				ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   296  			case cmtproto.PrecommitType:
   297  				ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   298  			default:
   299  				panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   300  			}
   301  			eMsg := &cmtcons.VoteSetBits{
   302  				Height:  msg.Height,
   303  				Round:   msg.Round,
   304  				Type:    msg.Type,
   305  				BlockID: msg.BlockID.ToProto(),
   306  			}
   307  			if votes := ourVotes.ToProto(); votes != nil {
   308  				eMsg.Votes = *votes
   309  			}
   310  			e.Src.TrySendEnvelope(p2p.Envelope{
   311  				ChannelID: VoteSetBitsChannel,
   312  				Message:   eMsg,
   313  			})
   314  		default:
   315  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   316  		}
   317  
   318  	case DataChannel:
   319  		if conR.WaitSync() {
   320  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   321  			return
   322  		}
   323  		switch msg := msg.(type) {
   324  		case *ProposalMessage:
   325  			ps.SetHasProposal(msg.Proposal)
   326  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   327  		case *ProposalPOLMessage:
   328  			ps.ApplyProposalPOLMessage(msg)
   329  		case *BlockPartMessage:
   330  			ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
   331  			conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
   332  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   333  		default:
   334  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   335  		}
   336  
   337  	case VoteChannel:
   338  		if conR.WaitSync() {
   339  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   340  			return
   341  		}
   342  		switch msg := msg.(type) {
   343  		case *VoteMessage:
   344  			cs := conR.conS
   345  			cs.mtx.RLock()
   346  			height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
   347  			cs.mtx.RUnlock()
   348  			ps.EnsureVoteBitArrays(height, valSize)
   349  			ps.EnsureVoteBitArrays(height-1, lastCommitSize)
   350  			ps.SetHasVote(msg.Vote)
   351  
   352  			cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   353  
   354  		default:
   355  			// don't punish (leave room for soft upgrades)
   356  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   357  		}
   358  
   359  	case VoteSetBitsChannel:
   360  		if conR.WaitSync() {
   361  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   362  			return
   363  		}
   364  		switch msg := msg.(type) {
   365  		case *VoteSetBitsMessage:
   366  			cs := conR.conS
   367  			cs.mtx.Lock()
   368  			height, votes := cs.Height, cs.Votes
   369  			cs.mtx.Unlock()
   370  
   371  			if height == msg.Height {
   372  				var ourVotes *bits.BitArray
   373  				switch msg.Type {
   374  				case cmtproto.PrevoteType:
   375  					ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   376  				case cmtproto.PrecommitType:
   377  					ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   378  				default:
   379  					panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   380  				}
   381  				ps.ApplyVoteSetBitsMessage(msg, ourVotes)
   382  			} else {
   383  				ps.ApplyVoteSetBitsMessage(msg, nil)
   384  			}
   385  		default:
   386  			// don't punish (leave room for soft upgrades)
   387  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   388  		}
   389  
   390  	default:
   391  		conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
   392  	}
   393  }
   394  
   395  // SetEventBus sets event bus.
   396  func (conR *Reactor) SetEventBus(b *types.EventBus) {
   397  	conR.eventBus = b
   398  	conR.conS.SetEventBus(b)
   399  }
   400  
   401  // WaitSync returns whether the consensus reactor is waiting for state/block sync.
   402  func (conR *Reactor) WaitSync() bool {
   403  	conR.mtx.RLock()
   404  	defer conR.mtx.RUnlock()
   405  	return conR.waitSync
   406  }
   407  
   408  //--------------------------------------
   409  
   410  // subscribeToBroadcastEvents subscribes for new round steps and votes
   411  // using internal pubsub defined on state to broadcast
   412  // them to peers upon receiving.
   413  func (conR *Reactor) subscribeToBroadcastEvents() {
   414  	const subscriber = "consensus-reactor"
   415  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
   416  		func(data cmtevents.EventData) {
   417  			conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
   418  		}); err != nil {
   419  		conR.Logger.Error("Error adding listener for events", "err", err)
   420  	}
   421  
   422  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
   423  		func(data cmtevents.EventData) {
   424  			conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
   425  		}); err != nil {
   426  		conR.Logger.Error("Error adding listener for events", "err", err)
   427  	}
   428  
   429  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
   430  		func(data cmtevents.EventData) {
   431  			conR.broadcastHasVoteMessage(data.(*types.Vote))
   432  		}); err != nil {
   433  		conR.Logger.Error("Error adding listener for events", "err", err)
   434  	}
   435  
   436  }
   437  
   438  func (conR *Reactor) unsubscribeFromBroadcastEvents() {
   439  	const subscriber = "consensus-reactor"
   440  	conR.conS.evsw.RemoveListener(subscriber)
   441  }
   442  
   443  func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
   444  	nrsMsg := makeRoundStepMessage(rs)
   445  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   446  		ChannelID: StateChannel,
   447  		Message:   nrsMsg,
   448  	})
   449  }
   450  
   451  func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
   452  	psh := rs.ProposalBlockParts.Header()
   453  	csMsg := &cmtcons.NewValidBlock{
   454  		Height:             rs.Height,
   455  		Round:              rs.Round,
   456  		BlockPartSetHeader: psh.ToProto(),
   457  		BlockParts:         rs.ProposalBlockParts.BitArray().ToProto(),
   458  		IsCommit:           rs.Step == cstypes.RoundStepCommit,
   459  	}
   460  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   461  		ChannelID: StateChannel,
   462  		Message:   csMsg,
   463  	})
   464  }
   465  
   466  // Broadcasts HasVoteMessage to peers that care.
   467  func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
   468  	msg := &cmtcons.HasVote{
   469  		Height: vote.Height,
   470  		Round:  vote.Round,
   471  		Type:   vote.Type,
   472  		Index:  vote.ValidatorIndex,
   473  	}
   474  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   475  		ChannelID: StateChannel,
   476  		Message:   msg,
   477  	})
   478  	/*
   479  		// TODO: Make this broadcast more selective.
   480  		for _, peer := range conR.Switch.Peers().List() {
   481  			ps, ok := peer.Get(PeerStateKey).(*PeerState)
   482  			if !ok {
   483  				panic(fmt.Sprintf("Peer %v has no state", peer))
   484  			}
   485  			prs := ps.GetRoundState()
   486  			if prs.Height == vote.Height {
   487  				// TODO: Also filter on round?
   488  				e := p2p.Envelope{
   489  					ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
   490  					Message: p,
   491  				}
   492  				peer.TrySendEnvelope(e)
   493  			} else {
   494  				// Height doesn't match
   495  				// TODO: check a field, maybe CatchupCommitRound?
   496  				// TODO: But that requires changing the struct field comment.
   497  			}
   498  		}
   499  	*/
   500  }
   501  
   502  func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) {
   503  	nrsMsg = &cmtcons.NewRoundStep{
   504  		Height:                rs.Height,
   505  		Round:                 rs.Round,
   506  		Step:                  uint32(rs.Step),
   507  		SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
   508  		LastCommitRound:       rs.LastCommit.GetRound(),
   509  	}
   510  	return
   511  }
   512  
   513  func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
   514  	rs := conR.getRoundState()
   515  	nrsMsg := makeRoundStepMessage(rs)
   516  	peer.SendEnvelope(p2p.Envelope{
   517  		ChannelID: StateChannel,
   518  		Message:   nrsMsg,
   519  	})
   520  }
   521  
   522  func (conR *Reactor) updateRoundStateRoutine() {
   523  	t := time.NewTicker(100 * time.Microsecond)
   524  	defer t.Stop()
   525  	for range t.C {
   526  		if !conR.IsRunning() {
   527  			return
   528  		}
   529  		rs := conR.conS.GetRoundState()
   530  		conR.mtx.Lock()
   531  		conR.rs = rs
   532  		conR.mtx.Unlock()
   533  	}
   534  }
   535  
   536  func (conR *Reactor) getRoundState() *cstypes.RoundState {
   537  	conR.mtx.RLock()
   538  	defer conR.mtx.RUnlock()
   539  	return conR.rs
   540  }
   541  
   542  func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
   543  	logger := conR.Logger.With("peer", peer)
   544  
   545  OUTER_LOOP:
   546  	for {
   547  		// Manage disconnects from self or peer.
   548  		if !peer.IsRunning() || !conR.IsRunning() {
   549  			return
   550  		}
   551  		rs := conR.getRoundState()
   552  		prs := ps.GetRoundState()
   553  
   554  		// Send proposal Block parts?
   555  		if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
   556  			if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
   557  				part := rs.ProposalBlockParts.GetPart(index)
   558  				parts, err := part.ToProto()
   559  				if err != nil {
   560  					panic(err)
   561  				}
   562  				logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
   563  				if peer.SendEnvelope(p2p.Envelope{
   564  					ChannelID: DataChannel,
   565  					Message: &cmtcons.BlockPart{
   566  						Height: rs.Height, // This tells peer that this part applies to us.
   567  						Round:  rs.Round,  // This tells peer that this part applies to us.
   568  						Part:   *parts,
   569  					},
   570  				}) {
   571  					ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   572  				}
   573  				continue OUTER_LOOP
   574  			}
   575  		}
   576  
   577  		// If the peer is on a previous height that we have, help catch up.
   578  		blockStoreBase := conR.conS.blockStore.Base()
   579  		if blockStoreBase > 0 && 0 < prs.Height && prs.Height < rs.Height && prs.Height >= blockStoreBase {
   580  			heightLogger := logger.With("height", prs.Height)
   581  
   582  			// if we never received the commit message from the peer, the block parts wont be initialized
   583  			if prs.ProposalBlockParts == nil {
   584  				blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   585  				if blockMeta == nil {
   586  					heightLogger.Error("Failed to load block meta",
   587  						"blockstoreBase", blockStoreBase, "blockstoreHeight", conR.conS.blockStore.Height())
   588  					time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   589  				} else {
   590  					ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
   591  				}
   592  				// continue the loop since prs is a copy and not effected by this initialization
   593  				continue OUTER_LOOP
   594  			}
   595  			conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
   596  			continue OUTER_LOOP
   597  		}
   598  
   599  		// If height and round don't match, sleep.
   600  		if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
   601  			// logger.Info("Peer Height|Round mismatch, sleeping",
   602  			// "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
   603  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   604  			continue OUTER_LOOP
   605  		}
   606  
   607  		// By here, height and round match.
   608  		// Proposal block parts were already matched and sent if any were wanted.
   609  		// (These can match on hash so the round doesn't matter)
   610  		// Now consider sending other things, like the Proposal itself.
   611  
   612  		// Send Proposal && ProposalPOL BitArray?
   613  		if rs.Proposal != nil && !prs.Proposal {
   614  			// Proposal: share the proposal metadata with peer.
   615  			{
   616  				logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
   617  				if peer.SendEnvelope(p2p.Envelope{
   618  					ChannelID: DataChannel,
   619  					Message:   &cmtcons.Proposal{Proposal: *rs.Proposal.ToProto()},
   620  				}) {
   621  					// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
   622  					ps.SetHasProposal(rs.Proposal)
   623  				}
   624  			}
   625  			// ProposalPOL: lets peer know which POL votes we have so far.
   626  			// Peer must receive ProposalMessage first.
   627  			// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
   628  			// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
   629  			if 0 <= rs.Proposal.POLRound {
   630  				logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
   631  				peer.SendEnvelope(p2p.Envelope{
   632  					ChannelID: DataChannel,
   633  					Message: &cmtcons.ProposalPOL{
   634  						Height:           rs.Height,
   635  						ProposalPolRound: rs.Proposal.POLRound,
   636  						ProposalPol:      *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
   637  					},
   638  				})
   639  			}
   640  			continue OUTER_LOOP
   641  		}
   642  
   643  		// Nothing to do. Sleep.
   644  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   645  		continue OUTER_LOOP
   646  	}
   647  }
   648  
   649  func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
   650  	prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
   651  
   652  	if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
   653  		// Ensure that the peer's PartSetHeader is correct
   654  		blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   655  		if blockMeta == nil {
   656  			logger.Error("Failed to load block meta", "ourHeight", rs.Height,
   657  				"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   658  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   659  			return
   660  		} else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) {
   661  			logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping",
   662  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   663  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   664  			return
   665  		}
   666  		// Load the part
   667  		part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
   668  		if part == nil {
   669  			logger.Error("Could not load part", "index", index,
   670  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   671  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   672  			return
   673  		}
   674  		// Send the part
   675  		logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
   676  		pp, err := part.ToProto()
   677  		if err != nil {
   678  			logger.Error("Could not convert part to proto", "index", index, "error", err)
   679  			return
   680  		}
   681  		if peer.SendEnvelope(p2p.Envelope{
   682  			ChannelID: DataChannel,
   683  			Message: &cmtcons.BlockPart{
   684  				Height: prs.Height, // Not our height, so it doesn't matter.
   685  				Round:  prs.Round,  // Not our height, so it doesn't matter.
   686  				Part:   *pp,
   687  			},
   688  		}) {
   689  			ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   690  		} else {
   691  			logger.Debug("Sending block part for catchup failed")
   692  			// sleep to avoid retrying too fast
   693  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   694  		}
   695  		return
   696  	}
   697  	//  logger.Info("No parts to send in catch-up, sleeping")
   698  	time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   699  }
   700  
   701  func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
   702  	logger := conR.Logger.With("peer", peer)
   703  
   704  	// Simple hack to throttle logs upon sleep.
   705  	var sleeping = 0
   706  
   707  OUTER_LOOP:
   708  	for {
   709  		// Manage disconnects from self or peer.
   710  		if !peer.IsRunning() || !conR.IsRunning() {
   711  			return
   712  		}
   713  		rs := conR.getRoundState()
   714  		prs := ps.GetRoundState()
   715  
   716  		switch sleeping {
   717  		case 1: // First sleep
   718  			sleeping = 2
   719  		case 2: // No more sleep
   720  			sleeping = 0
   721  		}
   722  
   723  		// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
   724  		// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
   725  
   726  		// If height matches, then send LastCommit, Prevotes, Precommits.
   727  		if rs.Height == prs.Height {
   728  			heightLogger := logger.With("height", prs.Height)
   729  			if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
   730  				continue OUTER_LOOP
   731  			}
   732  		}
   733  
   734  		// Special catchup logic.
   735  		// If peer is lagging by height 1, send LastCommit.
   736  		if prs.Height != 0 && rs.Height == prs.Height+1 {
   737  			if ps.PickSendVote(rs.LastCommit) {
   738  				logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
   739  				continue OUTER_LOOP
   740  			}
   741  		}
   742  
   743  		// Catchup logic
   744  		// If peer is lagging by more than 1, send Commit.
   745  		blockStoreBase := conR.conS.blockStore.Base()
   746  		if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase {
   747  			// Load the block commit for prs.Height,
   748  			// which contains precommit signatures for prs.Height.
   749  			if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
   750  				if ps.PickSendVote(commit) {
   751  					logger.Debug("Picked Catchup commit to send", "height", prs.Height)
   752  					continue OUTER_LOOP
   753  				}
   754  			}
   755  		}
   756  
   757  		if sleeping == 0 {
   758  			// We sent nothing. Sleep...
   759  			sleeping = 1
   760  			logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height,
   761  				"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
   762  				"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
   763  		} else if sleeping == 2 {
   764  			// Continued sleep...
   765  			sleeping = 1
   766  		}
   767  
   768  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   769  		continue OUTER_LOOP
   770  	}
   771  }
   772  
   773  func (conR *Reactor) gossipVotesForHeight(
   774  	logger log.Logger,
   775  	rs *cstypes.RoundState,
   776  	prs *cstypes.PeerRoundState,
   777  	ps *PeerState,
   778  ) bool {
   779  
   780  	// If there are lastCommits to send...
   781  	if prs.Step == cstypes.RoundStepNewHeight {
   782  		if ps.PickSendVote(rs.LastCommit) {
   783  			logger.Debug("Picked rs.LastCommit to send")
   784  			return true
   785  		}
   786  	}
   787  	// If there are POL prevotes to send...
   788  	if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
   789  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   790  			if ps.PickSendVote(polPrevotes) {
   791  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   792  					"round", prs.ProposalPOLRound)
   793  				return true
   794  			}
   795  		}
   796  	}
   797  	// If there are prevotes to send...
   798  	if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
   799  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   800  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   801  			return true
   802  		}
   803  	}
   804  	// If there are precommits to send...
   805  	if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
   806  		if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
   807  			logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
   808  			return true
   809  		}
   810  	}
   811  	// If there are prevotes to send...Needed because of validBlock mechanism
   812  	if prs.Round != -1 && prs.Round <= rs.Round {
   813  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   814  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   815  			return true
   816  		}
   817  	}
   818  	// If there are POLPrevotes to send...
   819  	if prs.ProposalPOLRound != -1 {
   820  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   821  			if ps.PickSendVote(polPrevotes) {
   822  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   823  					"round", prs.ProposalPOLRound)
   824  				return true
   825  			}
   826  		}
   827  	}
   828  
   829  	return false
   830  }
   831  
   832  // NOTE: `queryMaj23Routine` has a simple crude design since it only comes
   833  // into play for liveness when there's a signature DDoS attack happening.
   834  func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
   835  
   836  OUTER_LOOP:
   837  	for {
   838  		// Manage disconnects from self or peer.
   839  		if !peer.IsRunning() || !conR.IsRunning() {
   840  			return
   841  		}
   842  
   843  		// Maybe send Height/Round/Prevotes
   844  		{
   845  			rs := conR.getRoundState()
   846  			prs := ps.GetRoundState()
   847  			if rs.Height == prs.Height {
   848  				if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
   849  
   850  					peer.TrySendEnvelope(p2p.Envelope{
   851  						ChannelID: StateChannel,
   852  						Message: &cmtcons.VoteSetMaj23{
   853  							Height:  prs.Height,
   854  							Round:   prs.Round,
   855  							Type:    cmtproto.PrevoteType,
   856  							BlockID: maj23.ToProto(),
   857  						},
   858  					})
   859  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   860  				}
   861  			}
   862  		}
   863  
   864  		// Maybe send Height/Round/Precommits
   865  		{
   866  			rs := conR.getRoundState()
   867  			prs := ps.GetRoundState()
   868  			if rs.Height == prs.Height {
   869  				if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
   870  					peer.TrySendEnvelope(p2p.Envelope{
   871  						ChannelID: StateChannel,
   872  						Message: &cmtcons.VoteSetMaj23{
   873  							Height:  prs.Height,
   874  							Round:   prs.Round,
   875  							Type:    cmtproto.PrecommitType,
   876  							BlockID: maj23.ToProto(),
   877  						},
   878  					})
   879  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   880  				}
   881  			}
   882  		}
   883  
   884  		// Maybe send Height/Round/ProposalPOL
   885  		{
   886  			rs := conR.getRoundState()
   887  			prs := ps.GetRoundState()
   888  			if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
   889  				if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
   890  
   891  					peer.TrySendEnvelope(p2p.Envelope{
   892  						ChannelID: StateChannel,
   893  						Message: &cmtcons.VoteSetMaj23{
   894  							Height:  prs.Height,
   895  							Round:   prs.ProposalPOLRound,
   896  							Type:    cmtproto.PrevoteType,
   897  							BlockID: maj23.ToProto(),
   898  						},
   899  					})
   900  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   901  				}
   902  			}
   903  		}
   904  
   905  		// Little point sending LastCommitRound/LastCommit,
   906  		// These are fleeting and non-blocking.
   907  
   908  		// Maybe send Height/CatchupCommitRound/CatchupCommit.
   909  		{
   910  			prs := ps.GetRoundState()
   911  			if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
   912  				prs.Height >= conR.conS.blockStore.Base() {
   913  				if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
   914  					peer.TrySendEnvelope(p2p.Envelope{
   915  						ChannelID: StateChannel,
   916  						Message: &cmtcons.VoteSetMaj23{
   917  							Height:  prs.Height,
   918  							Round:   commit.Round,
   919  							Type:    cmtproto.PrecommitType,
   920  							BlockID: commit.BlockID.ToProto(),
   921  						},
   922  					})
   923  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   924  				}
   925  			}
   926  		}
   927  
   928  		time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   929  
   930  		continue OUTER_LOOP
   931  	}
   932  }
   933  
   934  func (conR *Reactor) peerStatsRoutine() {
   935  	for {
   936  		if !conR.IsRunning() {
   937  			conR.Logger.Info("Stopping peerStatsRoutine")
   938  			return
   939  		}
   940  
   941  		select {
   942  		case msg := <-conR.conS.statsMsgQueue:
   943  			// Get peer
   944  			peer := conR.Switch.Peers().Get(msg.PeerID)
   945  			if peer == nil {
   946  				conR.Logger.Debug("Attempt to update stats for non-existent peer",
   947  					"peer", msg.PeerID)
   948  				continue
   949  			}
   950  			// Get peer state
   951  			ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   952  			if !ok {
   953  				panic(fmt.Sprintf("Peer %v has no state", peer))
   954  			}
   955  			switch msg.Msg.(type) {
   956  			case *VoteMessage:
   957  				if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
   958  					conR.Switch.MarkPeerAsGood(peer)
   959  				}
   960  			case *BlockPartMessage:
   961  				if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
   962  					conR.Switch.MarkPeerAsGood(peer)
   963  				}
   964  			}
   965  		case <-conR.conS.Quit():
   966  			return
   967  
   968  		case <-conR.Quit():
   969  			return
   970  		}
   971  	}
   972  }
   973  
   974  // String returns a string representation of the Reactor.
   975  // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
   976  // TODO: improve!
   977  func (conR *Reactor) String() string {
   978  	// better not to access shared variables
   979  	return "ConsensusReactor" // conR.StringIndented("")
   980  }
   981  
   982  // StringIndented returns an indented string representation of the Reactor
   983  func (conR *Reactor) StringIndented(indent string) string {
   984  	s := "ConsensusReactor{\n"
   985  	s += indent + "  " + conR.conS.StringIndented(indent+"  ") + "\n"
   986  	for _, peer := range conR.Switch.Peers().List() {
   987  		ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   988  		if !ok {
   989  			panic(fmt.Sprintf("Peer %v has no state", peer))
   990  		}
   991  		s += indent + "  " + ps.StringIndented(indent+"  ") + "\n"
   992  	}
   993  	s += indent + "}"
   994  	return s
   995  }
   996  
   997  // ReactorMetrics sets the metrics
   998  func ReactorMetrics(metrics *Metrics) ReactorOption {
   999  	return func(conR *Reactor) { conR.Metrics = metrics }
  1000  }
  1001  
  1002  //-----------------------------------------------------------------------------
  1003  
  1004  var (
  1005  	ErrPeerStateHeightRegression = errors.New("error peer state height regression")
  1006  	ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime")
  1007  )
  1008  
  1009  // PeerState contains the known state of a peer, including its connection and
  1010  // threadsafe access to its PeerRoundState.
  1011  // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
  1012  // Be mindful of what you Expose.
  1013  type PeerState struct {
  1014  	peer   p2p.Peer
  1015  	logger log.Logger
  1016  
  1017  	mtx   sync.Mutex             // NOTE: Modify below using setters, never directly.
  1018  	PRS   cstypes.PeerRoundState `json:"round_state"` // Exposed.
  1019  	Stats *peerStateStats        `json:"stats"`       // Exposed.
  1020  }
  1021  
  1022  // peerStateStats holds internal statistics for a peer.
  1023  type peerStateStats struct {
  1024  	Votes      int `json:"votes"`
  1025  	BlockParts int `json:"block_parts"`
  1026  }
  1027  
  1028  func (pss peerStateStats) String() string {
  1029  	return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}",
  1030  		pss.Votes, pss.BlockParts)
  1031  }
  1032  
  1033  // NewPeerState returns a new PeerState for the given Peer
  1034  func NewPeerState(peer p2p.Peer) *PeerState {
  1035  	return &PeerState{
  1036  		peer:   peer,
  1037  		logger: log.NewNopLogger(),
  1038  		PRS: cstypes.PeerRoundState{
  1039  			Round:              -1,
  1040  			ProposalPOLRound:   -1,
  1041  			LastCommitRound:    -1,
  1042  			CatchupCommitRound: -1,
  1043  		},
  1044  		Stats: &peerStateStats{},
  1045  	}
  1046  }
  1047  
  1048  // SetLogger allows to set a logger on the peer state. Returns the peer state
  1049  // itself.
  1050  func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
  1051  	ps.logger = logger
  1052  	return ps
  1053  }
  1054  
  1055  // GetRoundState returns an shallow copy of the PeerRoundState.
  1056  // There's no point in mutating it since it won't change PeerState.
  1057  func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
  1058  	ps.mtx.Lock()
  1059  	defer ps.mtx.Unlock()
  1060  
  1061  	prs := ps.PRS // copy
  1062  	return &prs
  1063  }
  1064  
  1065  // MarshalJSON implements the json.Marshaler interface.
  1066  func (ps *PeerState) MarshalJSON() ([]byte, error) {
  1067  	ps.mtx.Lock()
  1068  	defer ps.mtx.Unlock()
  1069  
  1070  	type jsonPeerState PeerState
  1071  	return cmtjson.Marshal((*jsonPeerState)(ps))
  1072  }
  1073  
  1074  // GetHeight returns an atomic snapshot of the PeerRoundState's height
  1075  // used by the mempool to ensure peers are caught up before broadcasting new txs
  1076  func (ps *PeerState) GetHeight() int64 {
  1077  	ps.mtx.Lock()
  1078  	defer ps.mtx.Unlock()
  1079  	return ps.PRS.Height
  1080  }
  1081  
  1082  // SetHasProposal sets the given proposal as known for the peer.
  1083  func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
  1084  	ps.mtx.Lock()
  1085  	defer ps.mtx.Unlock()
  1086  
  1087  	if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
  1088  		return
  1089  	}
  1090  
  1091  	if ps.PRS.Proposal {
  1092  		return
  1093  	}
  1094  
  1095  	ps.PRS.Proposal = true
  1096  
  1097  	// ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage
  1098  	if ps.PRS.ProposalBlockParts != nil {
  1099  		return
  1100  	}
  1101  
  1102  	ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader
  1103  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total))
  1104  	ps.PRS.ProposalPOLRound = proposal.POLRound
  1105  	ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
  1106  }
  1107  
  1108  // InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
  1109  func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) {
  1110  	ps.mtx.Lock()
  1111  	defer ps.mtx.Unlock()
  1112  
  1113  	if ps.PRS.ProposalBlockParts != nil {
  1114  		return
  1115  	}
  1116  
  1117  	ps.PRS.ProposalBlockPartSetHeader = partSetHeader
  1118  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total))
  1119  }
  1120  
  1121  // SetHasProposalBlockPart sets the given block part index as known for the peer.
  1122  func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) {
  1123  	ps.mtx.Lock()
  1124  	defer ps.mtx.Unlock()
  1125  
  1126  	if ps.PRS.Height != height || ps.PRS.Round != round {
  1127  		return
  1128  	}
  1129  
  1130  	ps.PRS.ProposalBlockParts.SetIndex(index, true)
  1131  }
  1132  
  1133  // PickSendVote picks a vote and sends it to the peer.
  1134  // Returns true if vote was sent.
  1135  func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
  1136  	if vote, ok := ps.PickVoteToSend(votes); ok {
  1137  		ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
  1138  		if ps.peer.SendEnvelope(p2p.Envelope{
  1139  			ChannelID: VoteChannel,
  1140  			Message: &cmtcons.Vote{
  1141  				Vote: vote.ToProto(),
  1142  			},
  1143  		}) {
  1144  			ps.SetHasVote(vote)
  1145  			return true
  1146  		}
  1147  		return false
  1148  	}
  1149  	return false
  1150  }
  1151  
  1152  // PickVoteToSend picks a vote to send to the peer.
  1153  // Returns true if a vote was picked.
  1154  // NOTE: `votes` must be the correct Size() for the Height().
  1155  func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
  1156  	ps.mtx.Lock()
  1157  	defer ps.mtx.Unlock()
  1158  
  1159  	if votes.Size() == 0 {
  1160  		return nil, false
  1161  	}
  1162  
  1163  	height, round, votesType, size :=
  1164  		votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size()
  1165  
  1166  	// Lazily set data using 'votes'.
  1167  	if votes.IsCommit() {
  1168  		ps.ensureCatchupCommitRound(height, round, size)
  1169  	}
  1170  	ps.ensureVoteBitArrays(height, size)
  1171  
  1172  	psVotes := ps.getVoteBitArray(height, round, votesType)
  1173  	if psVotes == nil {
  1174  		return nil, false // Not something worth sending
  1175  	}
  1176  	if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
  1177  		return votes.GetByIndex(int32(index)), true
  1178  	}
  1179  	return nil, false
  1180  }
  1181  
  1182  func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType cmtproto.SignedMsgType) *bits.BitArray {
  1183  	if !types.IsVoteTypeValid(votesType) {
  1184  		return nil
  1185  	}
  1186  
  1187  	if ps.PRS.Height == height {
  1188  		if ps.PRS.Round == round {
  1189  			switch votesType {
  1190  			case cmtproto.PrevoteType:
  1191  				return ps.PRS.Prevotes
  1192  			case cmtproto.PrecommitType:
  1193  				return ps.PRS.Precommits
  1194  			}
  1195  		}
  1196  		if ps.PRS.CatchupCommitRound == round {
  1197  			switch votesType {
  1198  			case cmtproto.PrevoteType:
  1199  				return nil
  1200  			case cmtproto.PrecommitType:
  1201  				return ps.PRS.CatchupCommit
  1202  			}
  1203  		}
  1204  		if ps.PRS.ProposalPOLRound == round {
  1205  			switch votesType {
  1206  			case cmtproto.PrevoteType:
  1207  				return ps.PRS.ProposalPOL
  1208  			case cmtproto.PrecommitType:
  1209  				return nil
  1210  			}
  1211  		}
  1212  		return nil
  1213  	}
  1214  	if ps.PRS.Height == height+1 {
  1215  		if ps.PRS.LastCommitRound == round {
  1216  			switch votesType {
  1217  			case cmtproto.PrevoteType:
  1218  				return nil
  1219  			case cmtproto.PrecommitType:
  1220  				return ps.PRS.LastCommit
  1221  			}
  1222  		}
  1223  		return nil
  1224  	}
  1225  	return nil
  1226  }
  1227  
  1228  // 'round': A round for which we have a +2/3 commit.
  1229  func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) {
  1230  	if ps.PRS.Height != height {
  1231  		return
  1232  	}
  1233  	/*
  1234  		NOTE: This is wrong, 'round' could change.
  1235  		e.g. if orig round is not the same as block LastCommit round.
  1236  		if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
  1237  			panic(fmt.Sprintf(
  1238  				"Conflicting CatchupCommitRound. Height: %v,
  1239  				Orig: %v,
  1240  				New: %v",
  1241  				height,
  1242  				ps.CatchupCommitRound,
  1243  				round))
  1244  		}
  1245  	*/
  1246  	if ps.PRS.CatchupCommitRound == round {
  1247  		return // Nothing to do!
  1248  	}
  1249  	ps.PRS.CatchupCommitRound = round
  1250  	if round == ps.PRS.Round {
  1251  		ps.PRS.CatchupCommit = ps.PRS.Precommits
  1252  	} else {
  1253  		ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1254  	}
  1255  }
  1256  
  1257  // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
  1258  // what votes this peer has received.
  1259  // NOTE: It's important to make sure that numValidators actually matches
  1260  // what the node sees as the number of validators for height.
  1261  func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
  1262  	ps.mtx.Lock()
  1263  	defer ps.mtx.Unlock()
  1264  	ps.ensureVoteBitArrays(height, numValidators)
  1265  }
  1266  
  1267  func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
  1268  	if ps.PRS.Height == height {
  1269  		if ps.PRS.Prevotes == nil {
  1270  			ps.PRS.Prevotes = bits.NewBitArray(numValidators)
  1271  		}
  1272  		if ps.PRS.Precommits == nil {
  1273  			ps.PRS.Precommits = bits.NewBitArray(numValidators)
  1274  		}
  1275  		if ps.PRS.CatchupCommit == nil {
  1276  			ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1277  		}
  1278  		if ps.PRS.ProposalPOL == nil {
  1279  			ps.PRS.ProposalPOL = bits.NewBitArray(numValidators)
  1280  		}
  1281  	} else if ps.PRS.Height == height+1 {
  1282  		if ps.PRS.LastCommit == nil {
  1283  			ps.PRS.LastCommit = bits.NewBitArray(numValidators)
  1284  		}
  1285  	}
  1286  }
  1287  
  1288  // RecordVote increments internal votes related statistics for this peer.
  1289  // It returns the total number of added votes.
  1290  func (ps *PeerState) RecordVote() int {
  1291  	ps.mtx.Lock()
  1292  	defer ps.mtx.Unlock()
  1293  
  1294  	ps.Stats.Votes++
  1295  
  1296  	return ps.Stats.Votes
  1297  }
  1298  
  1299  // VotesSent returns the number of blocks for which peer has been sending us
  1300  // votes.
  1301  func (ps *PeerState) VotesSent() int {
  1302  	ps.mtx.Lock()
  1303  	defer ps.mtx.Unlock()
  1304  
  1305  	return ps.Stats.Votes
  1306  }
  1307  
  1308  // RecordBlockPart increments internal block part related statistics for this peer.
  1309  // It returns the total number of added block parts.
  1310  func (ps *PeerState) RecordBlockPart() int {
  1311  	ps.mtx.Lock()
  1312  	defer ps.mtx.Unlock()
  1313  
  1314  	ps.Stats.BlockParts++
  1315  	return ps.Stats.BlockParts
  1316  }
  1317  
  1318  // BlockPartsSent returns the number of useful block parts the peer has sent us.
  1319  func (ps *PeerState) BlockPartsSent() int {
  1320  	ps.mtx.Lock()
  1321  	defer ps.mtx.Unlock()
  1322  
  1323  	return ps.Stats.BlockParts
  1324  }
  1325  
  1326  // SetHasVote sets the given vote as known by the peer
  1327  func (ps *PeerState) SetHasVote(vote *types.Vote) {
  1328  	ps.mtx.Lock()
  1329  	defer ps.mtx.Unlock()
  1330  
  1331  	ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
  1332  }
  1333  
  1334  func (ps *PeerState) setHasVote(height int64, round int32, voteType cmtproto.SignedMsgType, index int32) {
  1335  	ps.logger.Debug("setHasVote",
  1336  		"peerH/R",
  1337  		log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
  1338  		"H/R",
  1339  		log.NewLazySprintf("%d/%d", height, round),
  1340  		"type", voteType, "index", index)
  1341  
  1342  	// NOTE: some may be nil BitArrays -> no side effects.
  1343  	psVotes := ps.getVoteBitArray(height, round, voteType)
  1344  	if psVotes != nil {
  1345  		psVotes.SetIndex(int(index), true)
  1346  	}
  1347  }
  1348  
  1349  // ApplyNewRoundStepMessage updates the peer state for the new round.
  1350  func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
  1351  	ps.mtx.Lock()
  1352  	defer ps.mtx.Unlock()
  1353  
  1354  	// Ignore duplicates or decreases
  1355  	if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
  1356  		return
  1357  	}
  1358  
  1359  	// Just remember these values.
  1360  	psHeight := ps.PRS.Height
  1361  	psRound := ps.PRS.Round
  1362  	psCatchupCommitRound := ps.PRS.CatchupCommitRound
  1363  	psCatchupCommit := ps.PRS.CatchupCommit
  1364  	lastPrecommits := ps.PRS.Precommits
  1365  
  1366  	startTime := cmttime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
  1367  	ps.PRS.Height = msg.Height
  1368  	ps.PRS.Round = msg.Round
  1369  	ps.PRS.Step = msg.Step
  1370  	ps.PRS.StartTime = startTime
  1371  	if psHeight != msg.Height || psRound != msg.Round {
  1372  		ps.PRS.Proposal = false
  1373  		ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{}
  1374  		ps.PRS.ProposalBlockParts = nil
  1375  		ps.PRS.ProposalPOLRound = -1
  1376  		ps.PRS.ProposalPOL = nil
  1377  		// We'll update the BitArray capacity later.
  1378  		ps.PRS.Prevotes = nil
  1379  		ps.PRS.Precommits = nil
  1380  	}
  1381  	if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
  1382  		// Peer caught up to CatchupCommitRound.
  1383  		// Preserve psCatchupCommit!
  1384  		// NOTE: We prefer to use prs.Precommits if
  1385  		// pr.Round matches pr.CatchupCommitRound.
  1386  		ps.PRS.Precommits = psCatchupCommit
  1387  	}
  1388  	if psHeight != msg.Height {
  1389  		// Shift Precommits to LastCommit.
  1390  		if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
  1391  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1392  			ps.PRS.LastCommit = lastPrecommits
  1393  		} else {
  1394  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1395  			ps.PRS.LastCommit = nil
  1396  		}
  1397  		// We'll update the BitArray capacity later.
  1398  		ps.PRS.CatchupCommitRound = -1
  1399  		ps.PRS.CatchupCommit = nil
  1400  	}
  1401  }
  1402  
  1403  // ApplyNewValidBlockMessage updates the peer state for the new valid block.
  1404  func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) {
  1405  	ps.mtx.Lock()
  1406  	defer ps.mtx.Unlock()
  1407  
  1408  	if ps.PRS.Height != msg.Height {
  1409  		return
  1410  	}
  1411  
  1412  	if ps.PRS.Round != msg.Round && !msg.IsCommit {
  1413  		return
  1414  	}
  1415  
  1416  	ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader
  1417  	ps.PRS.ProposalBlockParts = msg.BlockParts
  1418  }
  1419  
  1420  // ApplyProposalPOLMessage updates the peer state for the new proposal POL.
  1421  func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
  1422  	ps.mtx.Lock()
  1423  	defer ps.mtx.Unlock()
  1424  
  1425  	if ps.PRS.Height != msg.Height {
  1426  		return
  1427  	}
  1428  	if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
  1429  		return
  1430  	}
  1431  
  1432  	// TODO: Merge onto existing ps.PRS.ProposalPOL?
  1433  	// We might have sent some prevotes in the meantime.
  1434  	ps.PRS.ProposalPOL = msg.ProposalPOL
  1435  }
  1436  
  1437  // ApplyHasVoteMessage updates the peer state for the new vote.
  1438  func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
  1439  	ps.mtx.Lock()
  1440  	defer ps.mtx.Unlock()
  1441  
  1442  	if ps.PRS.Height != msg.Height {
  1443  		return
  1444  	}
  1445  
  1446  	ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
  1447  }
  1448  
  1449  // ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes
  1450  // it claims to have for the corresponding BlockID.
  1451  // `ourVotes` is a BitArray of votes we have for msg.BlockID
  1452  // NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
  1453  // we conservatively overwrite ps's votes w/ msg.Votes.
  1454  func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *bits.BitArray) {
  1455  	ps.mtx.Lock()
  1456  	defer ps.mtx.Unlock()
  1457  
  1458  	votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
  1459  	if votes != nil {
  1460  		if ourVotes == nil {
  1461  			votes.Update(msg.Votes)
  1462  		} else {
  1463  			otherVotes := votes.Sub(ourVotes)
  1464  			hasVotes := otherVotes.Or(msg.Votes)
  1465  			votes.Update(hasVotes)
  1466  		}
  1467  	}
  1468  }
  1469  
  1470  // String returns a string representation of the PeerState
  1471  func (ps *PeerState) String() string {
  1472  	return ps.StringIndented("")
  1473  }
  1474  
  1475  // StringIndented returns a string representation of the PeerState
  1476  func (ps *PeerState) StringIndented(indent string) string {
  1477  	ps.mtx.Lock()
  1478  	defer ps.mtx.Unlock()
  1479  	return fmt.Sprintf(`PeerState{
  1480  %s  Key        %v
  1481  %s  RoundState %v
  1482  %s  Stats      %v
  1483  %s}`,
  1484  		indent, ps.peer.ID(),
  1485  		indent, ps.PRS.StringIndented(indent+"  "),
  1486  		indent, ps.Stats,
  1487  		indent)
  1488  }
  1489  
  1490  //-----------------------------------------------------------------------------
  1491  // Messages
  1492  
  1493  // Message is a message that can be sent and received on the Reactor
  1494  type Message interface {
  1495  	ValidateBasic() error
  1496  }
  1497  
  1498  func init() {
  1499  	cmtjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage")
  1500  	cmtjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage")
  1501  	cmtjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal")
  1502  	cmtjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL")
  1503  	cmtjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart")
  1504  	cmtjson.RegisterType(&VoteMessage{}, "tendermint/Vote")
  1505  	cmtjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote")
  1506  	cmtjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23")
  1507  	cmtjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
  1508  }
  1509  
  1510  //-------------------------------------
  1511  
  1512  // NewRoundStepMessage is sent for every step taken in the ConsensusState.
  1513  // For every height/round/step transition
  1514  type NewRoundStepMessage struct {
  1515  	Height                int64
  1516  	Round                 int32
  1517  	Step                  cstypes.RoundStepType
  1518  	SecondsSinceStartTime int64
  1519  	LastCommitRound       int32
  1520  }
  1521  
  1522  // ValidateBasic performs basic validation.
  1523  func (m *NewRoundStepMessage) ValidateBasic() error {
  1524  	if m.Height < 0 {
  1525  		return errors.New("negative Height")
  1526  	}
  1527  	if m.Round < 0 {
  1528  		return errors.New("negative Round")
  1529  	}
  1530  	if !m.Step.IsValid() {
  1531  		return errors.New("invalid Step")
  1532  	}
  1533  
  1534  	// NOTE: SecondsSinceStartTime may be negative
  1535  
  1536  	// LastCommitRound will be -1 for the initial height, but we don't know what height this is
  1537  	// since it can be specified in genesis. The reactor will have to validate this via
  1538  	// ValidateHeight().
  1539  	if m.LastCommitRound < -1 {
  1540  		return errors.New("invalid LastCommitRound (cannot be < -1)")
  1541  	}
  1542  
  1543  	return nil
  1544  }
  1545  
  1546  // ValidateHeight validates the height given the chain's initial height.
  1547  func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error {
  1548  	if m.Height < initialHeight {
  1549  		return fmt.Errorf("invalid Height %v (lower than initial height %v)",
  1550  			m.Height, initialHeight)
  1551  	}
  1552  	if m.Height == initialHeight && m.LastCommitRound != -1 {
  1553  		return fmt.Errorf("invalid LastCommitRound %v (must be -1 for initial height %v)",
  1554  			m.LastCommitRound, initialHeight)
  1555  	}
  1556  	if m.Height > initialHeight && m.LastCommitRound < 0 {
  1557  		return fmt.Errorf("LastCommitRound can only be negative for initial height %v",
  1558  			initialHeight)
  1559  	}
  1560  	return nil
  1561  }
  1562  
  1563  // String returns a string representation.
  1564  func (m *NewRoundStepMessage) String() string {
  1565  	return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]",
  1566  		m.Height, m.Round, m.Step, m.LastCommitRound)
  1567  }
  1568  
  1569  //-------------------------------------
  1570  
  1571  // NewValidBlockMessage is sent when a validator observes a valid block B in some round r,
  1572  // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
  1573  // In case the block is also committed, then IsCommit flag is set to true.
  1574  type NewValidBlockMessage struct {
  1575  	Height             int64
  1576  	Round              int32
  1577  	BlockPartSetHeader types.PartSetHeader
  1578  	BlockParts         *bits.BitArray
  1579  	IsCommit           bool
  1580  }
  1581  
  1582  // ValidateBasic performs basic validation.
  1583  func (m *NewValidBlockMessage) ValidateBasic() error {
  1584  	if m.Height < 0 {
  1585  		return errors.New("negative Height")
  1586  	}
  1587  	if m.Round < 0 {
  1588  		return errors.New("negative Round")
  1589  	}
  1590  	if err := m.BlockPartSetHeader.ValidateBasic(); err != nil {
  1591  		return fmt.Errorf("wrong BlockPartSetHeader: %v", err)
  1592  	}
  1593  	if m.BlockParts.Size() == 0 {
  1594  		return errors.New("empty blockParts")
  1595  	}
  1596  	if m.BlockParts.Size() != int(m.BlockPartSetHeader.Total) {
  1597  		return fmt.Errorf("blockParts bit array size %d not equal to BlockPartSetHeader.Total %d",
  1598  			m.BlockParts.Size(),
  1599  			m.BlockPartSetHeader.Total)
  1600  	}
  1601  	if m.BlockParts.Size() > int(types.MaxBlockPartsCount) {
  1602  		return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount)
  1603  	}
  1604  	return nil
  1605  }
  1606  
  1607  // String returns a string representation.
  1608  func (m *NewValidBlockMessage) String() string {
  1609  	return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]",
  1610  		m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit)
  1611  }
  1612  
  1613  //-------------------------------------
  1614  
  1615  // ProposalMessage is sent when a new block is proposed.
  1616  type ProposalMessage struct {
  1617  	Proposal *types.Proposal
  1618  }
  1619  
  1620  // ValidateBasic performs basic validation.
  1621  func (m *ProposalMessage) ValidateBasic() error {
  1622  	return m.Proposal.ValidateBasic()
  1623  }
  1624  
  1625  // String returns a string representation.
  1626  func (m *ProposalMessage) String() string {
  1627  	return fmt.Sprintf("[Proposal %v]", m.Proposal)
  1628  }
  1629  
  1630  //-------------------------------------
  1631  
  1632  // ProposalPOLMessage is sent when a previous proposal is re-proposed.
  1633  type ProposalPOLMessage struct {
  1634  	Height           int64
  1635  	ProposalPOLRound int32
  1636  	ProposalPOL      *bits.BitArray
  1637  }
  1638  
  1639  // ValidateBasic performs basic validation.
  1640  func (m *ProposalPOLMessage) ValidateBasic() error {
  1641  	if m.Height < 0 {
  1642  		return errors.New("negative Height")
  1643  	}
  1644  	if m.ProposalPOLRound < 0 {
  1645  		return errors.New("negative ProposalPOLRound")
  1646  	}
  1647  	if m.ProposalPOL.Size() == 0 {
  1648  		return errors.New("empty ProposalPOL bit array")
  1649  	}
  1650  	if m.ProposalPOL.Size() > types.MaxVotesCount {
  1651  		return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount)
  1652  	}
  1653  	return nil
  1654  }
  1655  
  1656  // String returns a string representation.
  1657  func (m *ProposalPOLMessage) String() string {
  1658  	return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL)
  1659  }
  1660  
  1661  //-------------------------------------
  1662  
  1663  // BlockPartMessage is sent when gossipping a piece of the proposed block.
  1664  type BlockPartMessage struct {
  1665  	Height int64
  1666  	Round  int32
  1667  	Part   *types.Part
  1668  }
  1669  
  1670  // ValidateBasic performs basic validation.
  1671  func (m *BlockPartMessage) ValidateBasic() error {
  1672  	if m.Height < 0 {
  1673  		return errors.New("negative Height")
  1674  	}
  1675  	if m.Round < 0 {
  1676  		return errors.New("negative Round")
  1677  	}
  1678  	if err := m.Part.ValidateBasic(); err != nil {
  1679  		return fmt.Errorf("wrong Part: %v", err)
  1680  	}
  1681  	return nil
  1682  }
  1683  
  1684  // String returns a string representation.
  1685  func (m *BlockPartMessage) String() string {
  1686  	return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part)
  1687  }
  1688  
  1689  //-------------------------------------
  1690  
  1691  // VoteMessage is sent when voting for a proposal (or lack thereof).
  1692  type VoteMessage struct {
  1693  	Vote *types.Vote
  1694  }
  1695  
  1696  // ValidateBasic performs basic validation.
  1697  func (m *VoteMessage) ValidateBasic() error {
  1698  	return m.Vote.ValidateBasic()
  1699  }
  1700  
  1701  // String returns a string representation.
  1702  func (m *VoteMessage) String() string {
  1703  	return fmt.Sprintf("[Vote %v]", m.Vote)
  1704  }
  1705  
  1706  //-------------------------------------
  1707  
  1708  // HasVoteMessage is sent to indicate that a particular vote has been received.
  1709  type HasVoteMessage struct {
  1710  	Height int64
  1711  	Round  int32
  1712  	Type   cmtproto.SignedMsgType
  1713  	Index  int32
  1714  }
  1715  
  1716  // ValidateBasic performs basic validation.
  1717  func (m *HasVoteMessage) ValidateBasic() error {
  1718  	if m.Height < 0 {
  1719  		return errors.New("negative Height")
  1720  	}
  1721  	if m.Round < 0 {
  1722  		return errors.New("negative Round")
  1723  	}
  1724  	if !types.IsVoteTypeValid(m.Type) {
  1725  		return errors.New("invalid Type")
  1726  	}
  1727  	if m.Index < 0 {
  1728  		return errors.New("negative Index")
  1729  	}
  1730  	return nil
  1731  }
  1732  
  1733  // String returns a string representation.
  1734  func (m *HasVoteMessage) String() string {
  1735  	return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type)
  1736  }
  1737  
  1738  //-------------------------------------
  1739  
  1740  // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes.
  1741  type VoteSetMaj23Message struct {
  1742  	Height  int64
  1743  	Round   int32
  1744  	Type    cmtproto.SignedMsgType
  1745  	BlockID types.BlockID
  1746  }
  1747  
  1748  // ValidateBasic performs basic validation.
  1749  func (m *VoteSetMaj23Message) ValidateBasic() error {
  1750  	if m.Height < 0 {
  1751  		return errors.New("negative Height")
  1752  	}
  1753  	if m.Round < 0 {
  1754  		return errors.New("negative Round")
  1755  	}
  1756  	if !types.IsVoteTypeValid(m.Type) {
  1757  		return errors.New("invalid Type")
  1758  	}
  1759  	if err := m.BlockID.ValidateBasic(); err != nil {
  1760  		return fmt.Errorf("wrong BlockID: %v", err)
  1761  	}
  1762  	return nil
  1763  }
  1764  
  1765  // String returns a string representation.
  1766  func (m *VoteSetMaj23Message) String() string {
  1767  	return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
  1768  }
  1769  
  1770  //-------------------------------------
  1771  
  1772  // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID.
  1773  type VoteSetBitsMessage struct {
  1774  	Height  int64
  1775  	Round   int32
  1776  	Type    cmtproto.SignedMsgType
  1777  	BlockID types.BlockID
  1778  	Votes   *bits.BitArray
  1779  }
  1780  
  1781  // ValidateBasic performs basic validation.
  1782  func (m *VoteSetBitsMessage) ValidateBasic() error {
  1783  	if m.Height < 0 {
  1784  		return errors.New("negative Height")
  1785  	}
  1786  	if !types.IsVoteTypeValid(m.Type) {
  1787  		return errors.New("invalid Type")
  1788  	}
  1789  	if err := m.BlockID.ValidateBasic(); err != nil {
  1790  		return fmt.Errorf("wrong BlockID: %v", err)
  1791  	}
  1792  	// NOTE: Votes.Size() can be zero if the node does not have any
  1793  	if m.Votes.Size() > types.MaxVotesCount {
  1794  		return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount)
  1795  	}
  1796  	return nil
  1797  }
  1798  
  1799  // String returns a string representation.
  1800  func (m *VoteSetBitsMessage) String() string {
  1801  	return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
  1802  }
  1803  
  1804  //-------------------------------------