github.com/Finschia/ostracon@v1.1.5/consensus/reactor.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/gogo/protobuf/proto"
    11  	tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
    12  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    13  
    14  	cstypes "github.com/Finschia/ostracon/consensus/types"
    15  	"github.com/Finschia/ostracon/libs/bits"
    16  	tmevents "github.com/Finschia/ostracon/libs/events"
    17  	tmjson "github.com/Finschia/ostracon/libs/json"
    18  	"github.com/Finschia/ostracon/libs/log"
    19  	tmsync "github.com/Finschia/ostracon/libs/sync"
    20  	"github.com/Finschia/ostracon/p2p"
    21  	sm "github.com/Finschia/ostracon/state"
    22  	"github.com/Finschia/ostracon/types"
    23  	tmtime "github.com/Finschia/ostracon/types/time"
    24  )
    25  
    26  const (
    27  	StateChannel       = byte(0x20)
    28  	DataChannel        = byte(0x21)
    29  	VoteChannel        = byte(0x22)
    30  	VoteSetBitsChannel = byte(0x23)
    31  
    32  	maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
    33  
    34  	blocksToContributeToBecomeGoodPeer = 10000
    35  	votesToContributeToBecomeGoodPeer  = 10000
    36  )
    37  
    38  //-----------------------------------------------------------------------------
    39  
    40  // Reactor defines a reactor for the consensus service.
    41  type Reactor struct {
    42  	p2p.BaseReactor // BaseService + p2p.Switch
    43  
    44  	conS *State
    45  
    46  	mtx      tmsync.RWMutex
    47  	waitSync bool
    48  	eventBus *types.EventBus
    49  	rs       *cstypes.RoundState
    50  
    51  	Metrics *Metrics
    52  }
    53  
    54  type ReactorOption func(*Reactor)
    55  
    56  // NewReactor returns a new Reactor with the given
    57  // consensusState.
    58  func NewReactor(consensusState *State, waitSync bool, async bool, recvBufSize int, options ...ReactorOption) *Reactor {
    59  	conR := &Reactor{
    60  		conS:     consensusState,
    61  		waitSync: waitSync,
    62  		rs:       consensusState.GetRoundState(),
    63  		Metrics:  NopMetrics(),
    64  	}
    65  	conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR, async, recvBufSize)
    66  
    67  	for _, option := range options {
    68  		option(conR)
    69  	}
    70  
    71  	return conR
    72  }
    73  
    74  // OnStart implements BaseService by subscribing to events, which later will be
    75  // broadcasted to other peers and starting state if we're not in fast sync.
    76  func (conR *Reactor) OnStart() error {
    77  	conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
    78  
    79  	// call BaseReactor's OnStart()
    80  	err := conR.BaseReactor.OnStart()
    81  	if err != nil {
    82  		return err
    83  	}
    84  
    85  	// start routine that computes peer statistics for evaluating peer quality
    86  	go conR.peerStatsRoutine()
    87  
    88  	conR.subscribeToBroadcastEvents()
    89  	go conR.updateRoundStateRoutine()
    90  
    91  	if !conR.WaitSync() {
    92  		err := conR.conS.Start()
    93  		if err != nil {
    94  			return err
    95  		}
    96  	}
    97  
    98  	return nil
    99  }
   100  
   101  // OnStop implements BaseService by unsubscribing from events and stopping
   102  // state.
   103  func (conR *Reactor) OnStop() {
   104  	conR.unsubscribeFromBroadcastEvents()
   105  	if err := conR.conS.Stop(); err != nil {
   106  		conR.Logger.Error("Error stopping consensus state", "err", err)
   107  	}
   108  	if !conR.WaitSync() {
   109  		conR.conS.Wait()
   110  	}
   111  }
   112  
   113  // SwitchToConsensus switches from fast_sync mode to consensus mode.
   114  // It resets the state, turns off fast_sync, and starts the consensus state-machine
   115  func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
   116  	conR.Logger.Info("SwitchToConsensus")
   117  
   118  	// We have no votes, so reconstruct LastCommit from SeenCommit.
   119  	if state.LastBlockHeight > 0 {
   120  		conR.conS.reconstructLastCommit(state)
   121  	}
   122  
   123  	// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
   124  	// NewRoundStepMessage.
   125  	conR.conS.updateToState(state)
   126  
   127  	conR.mtx.Lock()
   128  	conR.waitSync = false
   129  	conR.mtx.Unlock()
   130  	conR.Metrics.FastSyncing.Set(0)
   131  	conR.Metrics.StateSyncing.Set(0)
   132  
   133  	if skipWAL {
   134  		conR.conS.doWALCatchup = false
   135  	}
   136  	err := conR.conS.Start()
   137  	if err != nil {
   138  		panic(fmt.Sprintf(`Failed to start consensus state: %v
   139  
   140  conS:
   141  %+v
   142  
   143  conR:
   144  %+v`, err, conR.conS, conR))
   145  	}
   146  }
   147  
   148  // GetChannels implements Reactor
   149  func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   150  	// TODO optimize
   151  	return []*p2p.ChannelDescriptor{
   152  		{
   153  			ID:                  StateChannel,
   154  			Priority:            6,
   155  			SendQueueCapacity:   100,
   156  			RecvMessageCapacity: maxMsgSize,
   157  			MessageType:         &tmcons.Message{},
   158  		},
   159  		{
   160  			ID: DataChannel, // maybe split between gossiping current block and catchup stuff
   161  			// once we gossip the whole block there's nothing left to send until next height or round
   162  			Priority:            10,
   163  			SendQueueCapacity:   100,
   164  			RecvBufferCapacity:  50 * 4096,
   165  			RecvMessageCapacity: maxMsgSize,
   166  			MessageType:         &tmcons.Message{},
   167  		},
   168  		{
   169  			ID:                  VoteChannel,
   170  			Priority:            7,
   171  			SendQueueCapacity:   100,
   172  			RecvBufferCapacity:  100 * 100,
   173  			RecvMessageCapacity: maxMsgSize,
   174  			MessageType:         &tmcons.Message{},
   175  		},
   176  		{
   177  			ID:                  VoteSetBitsChannel,
   178  			Priority:            1,
   179  			SendQueueCapacity:   2,
   180  			RecvBufferCapacity:  1024,
   181  			RecvMessageCapacity: maxMsgSize,
   182  			MessageType:         &tmcons.Message{},
   183  		},
   184  	}
   185  }
   186  
   187  // InitPeer implements Reactor by creating a state for the peer.
   188  func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   189  	peerState := NewPeerState(peer).SetLogger(conR.Logger)
   190  	peer.Set(types.PeerStateKey, peerState)
   191  	return peer
   192  }
   193  
   194  // AddPeer implements Reactor by spawning multiple gossiping goroutines for the
   195  // peer.
   196  func (conR *Reactor) AddPeer(peer p2p.Peer) {
   197  	if !conR.IsRunning() {
   198  		return
   199  	}
   200  
   201  	peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
   202  	if !ok {
   203  		panic(fmt.Sprintf("peer %v has no state", peer))
   204  	}
   205  	// Begin routines for this peer.
   206  	go conR.gossipDataRoutine(peer, peerState)
   207  	go conR.gossipVotesRoutine(peer, peerState)
   208  	go conR.queryMaj23Routine(peer, peerState)
   209  
   210  	// Send our state to peer.
   211  	// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   212  	if !conR.WaitSync() {
   213  		conR.sendNewRoundStepMessage(peer)
   214  	}
   215  }
   216  
   217  // RemovePeer is a noop.
   218  func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   219  	if !conR.IsRunning() {
   220  		return
   221  	}
   222  	// TODO
   223  	// ps, ok := peer.Get(PeerStateKey).(*PeerState)
   224  	// if !ok {
   225  	// 	panic(fmt.Sprintf("Peer %v has no state", peer))
   226  	// }
   227  	// ps.Disconnect()
   228  }
   229  
   230  // Receive implements Reactor
   231  // NOTE: We process these messages even when we're fast_syncing.
   232  // Messages affect either a peer state or the consensus state.
   233  // Peer state updates can happen in parallel, but processing of
   234  // proposals, block parts, and votes are ordered by the receiveRoutine
   235  // NOTE: blocks on consensus state for proposals, block parts, and votes
   236  func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   237  	if !conR.IsRunning() {
   238  		conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
   239  		return
   240  	}
   241  	m := e.Message
   242  	if wm, ok := m.(p2p.Wrapper); ok {
   243  		m = wm.Wrap()
   244  	}
   245  	msg, err := MsgFromProto(m.(*tmcons.Message))
   246  	if err != nil {
   247  		conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
   248  		conR.Switch.StopPeerForError(e.Src, err)
   249  		return
   250  	}
   251  
   252  	if err = msg.ValidateBasic(); err != nil {
   253  		conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
   254  		conR.Switch.StopPeerForError(e.Src, err)
   255  		return
   256  	}
   257  
   258  	conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
   259  
   260  	// Get peer states
   261  	ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
   262  	if !ok {
   263  		panic(fmt.Sprintf("Peer %v has no state", e.Src))
   264  	}
   265  
   266  	switch e.ChannelID {
   267  	case StateChannel:
   268  		switch msg := msg.(type) {
   269  		case *NewRoundStepMessage:
   270  			conR.conS.mtx.Lock()
   271  			initialHeight := conR.conS.state.InitialHeight
   272  			conR.conS.mtx.Unlock()
   273  			if err = msg.ValidateHeight(initialHeight); err != nil {
   274  				conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
   275  				conR.Switch.StopPeerForError(e.Src, err)
   276  				return
   277  			}
   278  			ps.ApplyNewRoundStepMessage(msg)
   279  		case *NewValidBlockMessage:
   280  			ps.ApplyNewValidBlockMessage(msg)
   281  		case *HasVoteMessage:
   282  			ps.ApplyHasVoteMessage(msg)
   283  		case *VoteSetMaj23Message:
   284  			cs := conR.conS
   285  			cs.mtx.Lock()
   286  			height, votes := cs.Height, cs.Votes
   287  			cs.mtx.Unlock()
   288  			if height != msg.Height {
   289  				return
   290  			}
   291  			// Peer claims to have a maj23 for some BlockID at H,R,S,
   292  			err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
   293  			if err != nil {
   294  				conR.Switch.StopPeerForError(e.Src, err)
   295  				return
   296  			}
   297  			// Respond with a VoteSetBitsMessage showing which votes we have.
   298  			// (and consequently shows which we don't have)
   299  			var ourVotes *bits.BitArray
   300  			switch msg.Type {
   301  			case tmproto.PrevoteType:
   302  				ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   303  			case tmproto.PrecommitType:
   304  				ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   305  			default:
   306  				panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   307  			}
   308  			eMsg := &tmcons.VoteSetBits{
   309  				Height:  msg.Height,
   310  				Round:   msg.Round,
   311  				Type:    msg.Type,
   312  				BlockID: msg.BlockID.ToProto(),
   313  			}
   314  			if votes := ourVotes.ToProto(); votes != nil {
   315  				eMsg.Votes = *votes
   316  			}
   317  			p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
   318  				ChannelID: VoteSetBitsChannel,
   319  				Message:   eMsg,
   320  			}, conR.Logger)
   321  		default:
   322  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   323  		}
   324  
   325  	case DataChannel:
   326  		if conR.WaitSync() {
   327  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   328  			return
   329  		}
   330  		switch msg := msg.(type) {
   331  		case *ProposalMessage:
   332  			ps.SetHasProposal(msg.Proposal)
   333  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   334  		case *ProposalPOLMessage:
   335  			ps.ApplyProposalPOLMessage(msg)
   336  		case *BlockPartMessage:
   337  			ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
   338  			conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
   339  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   340  		default:
   341  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   342  		}
   343  
   344  	case VoteChannel:
   345  		if conR.WaitSync() {
   346  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   347  			return
   348  		}
   349  		switch msg := msg.(type) {
   350  		case *VoteMessage:
   351  			cs := conR.conS
   352  			cs.mtx.RLock()
   353  			height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
   354  			cs.mtx.RUnlock()
   355  			ps.EnsureVoteBitArrays(height, valSize)
   356  			ps.EnsureVoteBitArrays(height-1, lastCommitSize)
   357  			ps.SetHasVote(msg.Vote)
   358  
   359  			cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   360  
   361  		default:
   362  			// don't punish (leave room for soft upgrades)
   363  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   364  		}
   365  
   366  	case VoteSetBitsChannel:
   367  		if conR.WaitSync() {
   368  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   369  			return
   370  		}
   371  		switch msg := msg.(type) {
   372  		case *VoteSetBitsMessage:
   373  			cs := conR.conS
   374  			cs.mtx.Lock()
   375  			height, votes := cs.Height, cs.Votes
   376  			cs.mtx.Unlock()
   377  
   378  			if height == msg.Height {
   379  				var ourVotes *bits.BitArray
   380  				switch msg.Type {
   381  				case tmproto.PrevoteType:
   382  					ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   383  				case tmproto.PrecommitType:
   384  					ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   385  				default:
   386  					panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   387  				}
   388  				ps.ApplyVoteSetBitsMessage(msg, ourVotes)
   389  			} else {
   390  				ps.ApplyVoteSetBitsMessage(msg, nil)
   391  			}
   392  		default:
   393  			// don't punish (leave room for soft upgrades)
   394  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   395  		}
   396  
   397  	default:
   398  		conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
   399  	}
   400  }
   401  
   402  func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   403  	msg := &tmcons.Message{}
   404  	err := proto.Unmarshal(msgBytes, msg)
   405  	if err != nil {
   406  		panic(err)
   407  	}
   408  	uw, err := msg.Unwrap()
   409  	if err != nil {
   410  		panic(err)
   411  	}
   412  	conR.ReceiveEnvelope(p2p.Envelope{
   413  		ChannelID: chID,
   414  		Src:       peer,
   415  		Message:   uw,
   416  	})
   417  }
   418  
   419  // SetEventBus sets event bus.
   420  func (conR *Reactor) SetEventBus(b *types.EventBus) {
   421  	conR.eventBus = b
   422  	conR.conS.SetEventBus(b)
   423  }
   424  
   425  // WaitSync returns whether the consensus reactor is waiting for state/fast sync.
   426  func (conR *Reactor) WaitSync() bool {
   427  	conR.mtx.RLock()
   428  	defer conR.mtx.RUnlock()
   429  	return conR.waitSync
   430  }
   431  
   432  //--------------------------------------
   433  
   434  // subscribeToBroadcastEvents subscribes for new round steps and votes
   435  // using internal pubsub defined on state to broadcast
   436  // them to peers upon receiving.
   437  func (conR *Reactor) subscribeToBroadcastEvents() {
   438  	const subscriber = "consensus-reactor"
   439  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
   440  		func(data tmevents.EventData) {
   441  			conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
   442  		}); err != nil {
   443  		conR.Logger.Error("Error adding listener for events", "err", err)
   444  	}
   445  
   446  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
   447  		func(data tmevents.EventData) {
   448  			conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
   449  		}); err != nil {
   450  		conR.Logger.Error("Error adding listener for events", "err", err)
   451  	}
   452  
   453  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
   454  		func(data tmevents.EventData) {
   455  			conR.broadcastHasVoteMessage(data.(*types.Vote))
   456  		}); err != nil {
   457  		conR.Logger.Error("Error adding listener for events", "err", err)
   458  	}
   459  
   460  }
   461  
   462  func (conR *Reactor) unsubscribeFromBroadcastEvents() {
   463  	const subscriber = "consensus-reactor"
   464  	conR.conS.evsw.RemoveListener(subscriber)
   465  }
   466  
   467  func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
   468  	nrsMsg := makeRoundStepMessage(rs)
   469  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   470  		ChannelID: StateChannel,
   471  		Message:   nrsMsg,
   472  	})
   473  }
   474  
   475  func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
   476  	psh := rs.ProposalBlockParts.Header()
   477  	csMsg := &tmcons.NewValidBlock{
   478  		Height:             rs.Height,
   479  		Round:              rs.Round,
   480  		BlockPartSetHeader: psh.ToProto(),
   481  		BlockParts:         rs.ProposalBlockParts.BitArray().ToProto(),
   482  		IsCommit:           rs.Step == cstypes.RoundStepCommit,
   483  	}
   484  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   485  		ChannelID: StateChannel,
   486  		Message:   csMsg,
   487  	})
   488  }
   489  
   490  // Broadcasts HasVoteMessage to peers that care.
   491  func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
   492  	msg := &tmcons.HasVote{
   493  		Height: vote.Height,
   494  		Round:  vote.Round,
   495  		Type:   vote.Type,
   496  		Index:  vote.ValidatorIndex,
   497  	}
   498  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   499  		ChannelID: StateChannel,
   500  		Message:   msg,
   501  	})
   502  	/*
   503  		// TODO: Make this broadcast more selective.
   504  		for _, peer := range conR.Switch.Peers().List() {
   505  			ps, ok := peer.Get(PeerStateKey).(*PeerState)
   506  			if !ok {
   507  				panic(fmt.Sprintf("Peer %v has no state", peer))
   508  			}
   509  			prs := ps.GetRoundState()
   510  			if prs.Height == vote.Height {
   511  				// TODO: Also filter on round?
   512  				e := p2p.Envelope{
   513  					ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
   514  					Message: p,
   515  				}
   516  				p2p.TrySendEnvelopeShim(peer, e) //nolint: staticcheck
   517  			} else {
   518  				// Height doesn't match
   519  				// TODO: check a field, maybe CatchupCommitRound?
   520  				// TODO: But that requires changing the struct field comment.
   521  			}
   522  		}
   523  	*/
   524  }
   525  
   526  func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
   527  	nrsMsg = &tmcons.NewRoundStep{
   528  		Height:                rs.Height,
   529  		Round:                 rs.Round,
   530  		Step:                  uint32(rs.Step),
   531  		SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
   532  		LastCommitRound:       rs.LastCommit.GetRound(),
   533  	}
   534  	return
   535  }
   536  
   537  func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
   538  	rs := conR.getRoundState()
   539  	nrsMsg := makeRoundStepMessage(rs)
   540  	p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   541  		ChannelID: StateChannel,
   542  		Message:   nrsMsg,
   543  	}, conR.Logger)
   544  }
   545  
   546  func (conR *Reactor) updateRoundStateRoutine() {
   547  	t := time.NewTicker(100 * time.Microsecond)
   548  	defer t.Stop()
   549  	for range t.C {
   550  		if !conR.IsRunning() {
   551  			return
   552  		}
   553  		rs := conR.conS.GetRoundState()
   554  		conR.mtx.Lock()
   555  		conR.rs = rs
   556  		conR.mtx.Unlock()
   557  	}
   558  }
   559  
   560  func (conR *Reactor) getRoundState() *cstypes.RoundState {
   561  	conR.mtx.RLock()
   562  	defer conR.mtx.RUnlock()
   563  	return conR.rs
   564  }
   565  
   566  func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
   567  	logger := conR.Logger.With("peer", peer)
   568  
   569  OUTER_LOOP:
   570  	for {
   571  		// Manage disconnects from self or peer.
   572  		if !peer.IsRunning() || !conR.IsRunning() {
   573  			return
   574  		}
   575  		rs := conR.getRoundState()
   576  		prs := ps.GetRoundState()
   577  
   578  		// Send proposal Block parts?
   579  		if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
   580  			if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
   581  				part := rs.ProposalBlockParts.GetPart(index)
   582  				parts, err := part.ToProto()
   583  				if err != nil {
   584  					panic(err)
   585  				}
   586  				logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
   587  				if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   588  					ChannelID: DataChannel,
   589  					Message: &tmcons.BlockPart{
   590  						Height: rs.Height, // This tells peer that this part applies to us.
   591  						Round:  rs.Round,  // This tells peer that this part applies to us.
   592  						Part:   *parts,
   593  					},
   594  				}, logger) {
   595  					ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   596  				}
   597  				continue OUTER_LOOP
   598  			}
   599  		}
   600  
   601  		// If the peer is on a previous height that we have, help catch up.
   602  		blockStoreBase := conR.conS.blockStore.Base()
   603  		if blockStoreBase > 0 && 0 < prs.Height && prs.Height < rs.Height && prs.Height >= blockStoreBase {
   604  			heightLogger := logger.With("height", prs.Height)
   605  
   606  			// if we never received the commit message from the peer, the block parts wont be initialized
   607  			if prs.ProposalBlockParts == nil {
   608  				blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   609  				if blockMeta == nil {
   610  					heightLogger.Error("Failed to load block meta",
   611  						"blockstoreBase", blockStoreBase, "blockstoreHeight", conR.conS.blockStore.Height())
   612  					time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   613  				} else {
   614  					ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
   615  				}
   616  				// continue the loop since prs is a copy and not effected by this initialization
   617  				continue OUTER_LOOP
   618  			}
   619  			conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
   620  			continue OUTER_LOOP
   621  		}
   622  
   623  		// If height and round don't match, sleep.
   624  		if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
   625  			// logger.Info("Peer Height|Round mismatch, sleeping",
   626  			// "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
   627  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   628  			continue OUTER_LOOP
   629  		}
   630  
   631  		// By here, height and round match.
   632  		// Proposal block parts were already matched and sent if any were wanted.
   633  		// (These can match on hash so the round doesn't matter)
   634  		// Now consider sending other things, like the Proposal itself.
   635  
   636  		// Send Proposal && ProposalPOL BitArray?
   637  		if rs.Proposal != nil && !prs.Proposal {
   638  			// Proposal: share the proposal metadata with peer.
   639  			{
   640  				logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
   641  				if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   642  					ChannelID: DataChannel,
   643  					Message:   &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
   644  				}, logger) {
   645  					// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
   646  					ps.SetHasProposal(rs.Proposal)
   647  				}
   648  			}
   649  			// ProposalPOL: lets peer know which POL votes we have so far.
   650  			// Peer must receive ProposalMessage first.
   651  			// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
   652  			// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
   653  			if 0 <= rs.Proposal.POLRound {
   654  				logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
   655  				p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   656  					ChannelID: DataChannel,
   657  					Message: &tmcons.ProposalPOL{
   658  						Height:           rs.Height,
   659  						ProposalPolRound: rs.Proposal.POLRound,
   660  						ProposalPol:      *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
   661  					},
   662  				}, logger)
   663  			}
   664  			continue OUTER_LOOP
   665  		}
   666  
   667  		// Nothing to do. Sleep.
   668  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   669  		continue OUTER_LOOP
   670  	}
   671  }
   672  
   673  func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
   674  	prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
   675  
   676  	if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
   677  		// Ensure that the peer's PartSetHeader is correct
   678  		blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   679  		if blockMeta == nil {
   680  			logger.Error("Failed to load block meta", "ourHeight", rs.Height,
   681  				"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   682  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   683  			return
   684  		} else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) {
   685  			logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping",
   686  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   687  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   688  			return
   689  		}
   690  		// Load the part
   691  		part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
   692  		if part == nil {
   693  			logger.Error("Could not load part", "index", index,
   694  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   695  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   696  			return
   697  		}
   698  		// Send the part
   699  		logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
   700  		pp, err := part.ToProto()
   701  		if err != nil {
   702  			logger.Error("Could not convert part to proto", "index", index, "error", err)
   703  			return
   704  		}
   705  		if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   706  			ChannelID: DataChannel,
   707  			Message: &tmcons.BlockPart{
   708  				Height: prs.Height, // Not our height, so it doesn't matter.
   709  				Round:  prs.Round,  // Not our height, so it doesn't matter.
   710  				Part:   *pp,
   711  			},
   712  		}, logger) {
   713  			ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   714  		} else {
   715  			logger.Debug("Sending block part for catchup failed")
   716  		}
   717  		return
   718  	}
   719  	//  logger.Info("No parts to send in catch-up, sleeping")
   720  	time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   721  }
   722  
   723  func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
   724  	logger := conR.Logger.With("peer", peer)
   725  
   726  	// Simple hack to throttle logs upon sleep.
   727  	var sleeping = 0
   728  
   729  OUTER_LOOP:
   730  	for {
   731  		// Manage disconnects from self or peer.
   732  		if !peer.IsRunning() || !conR.IsRunning() {
   733  			return
   734  		}
   735  		rs := conR.getRoundState()
   736  		prs := ps.GetRoundState()
   737  
   738  		switch sleeping {
   739  		case 1: // First sleep
   740  			sleeping = 2
   741  		case 2: // No more sleep
   742  			sleeping = 0
   743  		}
   744  
   745  		// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
   746  		// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
   747  
   748  		// If height matches, then send LastCommit, Prevotes, Precommits.
   749  		if rs.Height == prs.Height {
   750  			heightLogger := logger.With("height", prs.Height)
   751  			if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
   752  				continue OUTER_LOOP
   753  			}
   754  		}
   755  
   756  		// Special catchup logic.
   757  		// If peer is lagging by height 1, send LastCommit.
   758  		if prs.Height != 0 && rs.Height == prs.Height+1 {
   759  			if ps.PickSendVote(rs.LastCommit) {
   760  				logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
   761  				continue OUTER_LOOP
   762  			}
   763  		}
   764  
   765  		// Catchup logic
   766  		// If peer is lagging by more than 1, send Commit.
   767  		blockStoreBase := conR.conS.blockStore.Base()
   768  		if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase {
   769  			// Load the block commit for prs.Height,
   770  			// which contains precommit signatures for prs.Height.
   771  			if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
   772  				if ps.PickSendVote(commit) {
   773  					logger.Debug("Picked Catchup commit to send", "height", prs.Height)
   774  					continue OUTER_LOOP
   775  				}
   776  			}
   777  		}
   778  
   779  		if sleeping == 0 {
   780  			// We sent nothing. Sleep...
   781  			sleeping = 1
   782  			logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height,
   783  				"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
   784  				"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
   785  		} else if sleeping == 2 {
   786  			// Continued sleep...
   787  			sleeping = 1
   788  		}
   789  
   790  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   791  		continue OUTER_LOOP
   792  	}
   793  }
   794  
   795  func (conR *Reactor) gossipVotesForHeight(
   796  	logger log.Logger,
   797  	rs *cstypes.RoundState,
   798  	prs *cstypes.PeerRoundState,
   799  	ps *PeerState,
   800  ) bool {
   801  
   802  	// If there are lastCommits to send...
   803  	if prs.Step == cstypes.RoundStepNewHeight {
   804  		if ps.PickSendVote(rs.LastCommit) {
   805  			logger.Debug("Picked rs.LastCommit to send")
   806  			return true
   807  		}
   808  	}
   809  	// If there are POL prevotes to send...
   810  	if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
   811  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   812  			if ps.PickSendVote(polPrevotes) {
   813  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   814  					"round", prs.ProposalPOLRound)
   815  				return true
   816  			}
   817  		}
   818  	}
   819  	// If there are prevotes to send...
   820  	if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
   821  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   822  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   823  			return true
   824  		}
   825  	}
   826  	// If there are precommits to send...
   827  	if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
   828  		if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
   829  			logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
   830  			return true
   831  		}
   832  	}
   833  	// If there are prevotes to send...Needed because of validBlock mechanism
   834  	if prs.Round != -1 && prs.Round <= rs.Round {
   835  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   836  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   837  			return true
   838  		}
   839  	}
   840  	// If there are POLPrevotes to send...
   841  	if prs.ProposalPOLRound != -1 {
   842  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   843  			if ps.PickSendVote(polPrevotes) {
   844  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   845  					"round", prs.ProposalPOLRound)
   846  				return true
   847  			}
   848  		}
   849  	}
   850  
   851  	return false
   852  }
   853  
   854  // NOTE: `queryMaj23Routine` has a simple crude design since it only comes
   855  // into play for liveness when there's a signature DDoS attack happening.
   856  func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
   857  
   858  OUTER_LOOP:
   859  	for {
   860  		// Manage disconnects from self or peer.
   861  		if !peer.IsRunning() || !conR.IsRunning() {
   862  			return
   863  		}
   864  
   865  		// Maybe send Height/Round/Prevotes
   866  		{
   867  			rs := conR.getRoundState()
   868  			prs := ps.GetRoundState()
   869  			if rs.Height == prs.Height {
   870  				if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
   871  
   872  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   873  						ChannelID: StateChannel,
   874  						Message: &tmcons.VoteSetMaj23{
   875  							Height:  prs.Height,
   876  							Round:   prs.Round,
   877  							Type:    tmproto.PrevoteType,
   878  							BlockID: maj23.ToProto(),
   879  						},
   880  					}, ps.logger)
   881  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   882  				}
   883  			}
   884  		}
   885  
   886  		// Maybe send Height/Round/Precommits
   887  		{
   888  			rs := conR.getRoundState()
   889  			prs := ps.GetRoundState()
   890  			if rs.Height == prs.Height {
   891  				if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
   892  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   893  						ChannelID: StateChannel,
   894  						Message: &tmcons.VoteSetMaj23{
   895  							Height:  prs.Height,
   896  							Round:   prs.Round,
   897  							Type:    tmproto.PrecommitType,
   898  							BlockID: maj23.ToProto(),
   899  						},
   900  					}, ps.logger)
   901  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   902  				}
   903  			}
   904  		}
   905  
   906  		// Maybe send Height/Round/ProposalPOL
   907  		{
   908  			rs := conR.getRoundState()
   909  			prs := ps.GetRoundState()
   910  			if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
   911  				if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
   912  
   913  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   914  						ChannelID: StateChannel,
   915  						Message: &tmcons.VoteSetMaj23{
   916  							Height:  prs.Height,
   917  							Round:   prs.ProposalPOLRound,
   918  							Type:    tmproto.PrevoteType,
   919  							BlockID: maj23.ToProto(),
   920  						},
   921  					}, ps.logger)
   922  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   923  				}
   924  			}
   925  		}
   926  
   927  		// Little point sending LastCommitRound/LastCommit,
   928  		// These are fleeting and non-blocking.
   929  
   930  		// Maybe send Height/CatchupCommitRound/CatchupCommit.
   931  		{
   932  			prs := ps.GetRoundState()
   933  			if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
   934  				prs.Height >= conR.conS.blockStore.Base() {
   935  				if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
   936  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   937  						ChannelID: StateChannel,
   938  						Message: &tmcons.VoteSetMaj23{
   939  							Height:  prs.Height,
   940  							Round:   commit.Round,
   941  							Type:    tmproto.PrecommitType,
   942  							BlockID: commit.BlockID.ToProto(),
   943  						},
   944  					}, ps.logger)
   945  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   946  				}
   947  			}
   948  		}
   949  
   950  		time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   951  
   952  		continue OUTER_LOOP
   953  	}
   954  }
   955  
   956  func (conR *Reactor) peerStatsRoutine() {
   957  	for {
   958  		if !conR.IsRunning() {
   959  			conR.Logger.Info("Stopping peerStatsRoutine")
   960  			return
   961  		}
   962  
   963  		select {
   964  		case msg := <-conR.conS.statsMsgQueue:
   965  			// Get peer
   966  			peer := conR.Switch.Peers().Get(msg.PeerID)
   967  			if peer == nil {
   968  				conR.Logger.Debug("Attempt to update stats for non-existent peer",
   969  					"peer", msg.PeerID)
   970  				continue
   971  			}
   972  			// Get peer state
   973  			ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   974  			if !ok {
   975  				panic(fmt.Sprintf("Peer %v has no state", peer))
   976  			}
   977  			switch msg.Msg.(type) {
   978  			case *VoteMessage:
   979  				if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
   980  					conR.Switch.MarkPeerAsGood(peer)
   981  				}
   982  			case *BlockPartMessage:
   983  				if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
   984  					conR.Switch.MarkPeerAsGood(peer)
   985  				}
   986  			}
   987  		case <-conR.conS.Quit():
   988  			return
   989  
   990  		case <-conR.Quit():
   991  			return
   992  		}
   993  	}
   994  }
   995  
   996  // String returns a string representation of the Reactor.
   997  // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
   998  // TODO: improve!
   999  func (conR *Reactor) String() string {
  1000  	// better not to access shared variables
  1001  	return "ConsensusReactor" // conR.StringIndented("")
  1002  }
  1003  
  1004  // StringIndented returns an indented string representation of the Reactor
  1005  func (conR *Reactor) StringIndented(indent string) string {
  1006  	s := "ConsensusReactor{\n"
  1007  	s += indent + "  " + conR.conS.StringIndented(indent+"  ") + "\n"
  1008  	for _, peer := range conR.Switch.Peers().List() {
  1009  		ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
  1010  		if !ok {
  1011  			panic(fmt.Sprintf("Peer %v has no state", peer))
  1012  		}
  1013  		s += indent + "  " + ps.StringIndented(indent+"  ") + "\n"
  1014  	}
  1015  	s += indent + "}"
  1016  	return s
  1017  }
  1018  
  1019  // ReactorMetrics sets the metrics
  1020  func ReactorMetrics(metrics *Metrics) ReactorOption {
  1021  	return func(conR *Reactor) { conR.Metrics = metrics }
  1022  }
  1023  
  1024  //-----------------------------------------------------------------------------
  1025  
  1026  var (
  1027  	ErrPeerStateHeightRegression = errors.New("error peer state height regression")
  1028  	ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime")
  1029  )
  1030  
  1031  // PeerState contains the known state of a peer, including its connection and
  1032  // threadsafe access to its PeerRoundState.
  1033  // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
  1034  // Be mindful of what you Expose.
  1035  type PeerState struct {
  1036  	peer   p2p.Peer
  1037  	logger log.Logger
  1038  
  1039  	mtx   sync.Mutex             // NOTE: Modify below using setters, never directly.
  1040  	PRS   cstypes.PeerRoundState `json:"round_state"` // Exposed.
  1041  	Stats *peerStateStats        `json:"stats"`       // Exposed.
  1042  }
  1043  
  1044  // peerStateStats holds internal statistics for a peer.
  1045  type peerStateStats struct {
  1046  	Votes      int `json:"votes"`
  1047  	BlockParts int `json:"block_parts"`
  1048  }
  1049  
  1050  func (pss peerStateStats) String() string {
  1051  	return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}",
  1052  		pss.Votes, pss.BlockParts)
  1053  }
  1054  
  1055  // NewPeerState returns a new PeerState for the given Peer
  1056  func NewPeerState(peer p2p.Peer) *PeerState {
  1057  	return &PeerState{
  1058  		peer:   peer,
  1059  		logger: log.NewNopLogger(),
  1060  		PRS: cstypes.PeerRoundState{
  1061  			Round:              -1,
  1062  			ProposalPOLRound:   -1,
  1063  			LastCommitRound:    -1,
  1064  			CatchupCommitRound: -1,
  1065  		},
  1066  		Stats: &peerStateStats{},
  1067  	}
  1068  }
  1069  
  1070  // SetLogger allows to set a logger on the peer state. Returns the peer state
  1071  // itself.
  1072  func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
  1073  	ps.logger = logger
  1074  	return ps
  1075  }
  1076  
  1077  // GetRoundState returns an shallow copy of the PeerRoundState.
  1078  // There's no point in mutating it since it won't change PeerState.
  1079  func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
  1080  	ps.mtx.Lock()
  1081  	defer ps.mtx.Unlock()
  1082  
  1083  	prs := ps.PRS // copy
  1084  	return &prs
  1085  }
  1086  
  1087  // ToJSON returns a json of PeerState.
  1088  func (ps *PeerState) ToJSON() ([]byte, error) {
  1089  	ps.mtx.Lock()
  1090  	defer ps.mtx.Unlock()
  1091  
  1092  	return tmjson.Marshal(ps)
  1093  }
  1094  
  1095  // GetHeight returns an atomic snapshot of the PeerRoundState's height
  1096  // used by the mempool to ensure peers are caught up before broadcasting new txs
  1097  func (ps *PeerState) GetHeight() int64 {
  1098  	ps.mtx.Lock()
  1099  	defer ps.mtx.Unlock()
  1100  	return ps.PRS.Height
  1101  }
  1102  
  1103  // SetHasProposal sets the given proposal as known for the peer.
  1104  func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
  1105  	ps.mtx.Lock()
  1106  	defer ps.mtx.Unlock()
  1107  
  1108  	if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
  1109  		return
  1110  	}
  1111  
  1112  	if ps.PRS.Proposal {
  1113  		return
  1114  	}
  1115  
  1116  	ps.PRS.Proposal = true
  1117  
  1118  	// ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage
  1119  	if ps.PRS.ProposalBlockParts != nil {
  1120  		return
  1121  	}
  1122  
  1123  	ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader
  1124  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total))
  1125  	ps.PRS.ProposalPOLRound = proposal.POLRound
  1126  	ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
  1127  }
  1128  
  1129  // InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
  1130  func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) {
  1131  	ps.mtx.Lock()
  1132  	defer ps.mtx.Unlock()
  1133  
  1134  	if ps.PRS.ProposalBlockParts != nil {
  1135  		return
  1136  	}
  1137  
  1138  	ps.PRS.ProposalBlockPartSetHeader = partSetHeader
  1139  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total))
  1140  }
  1141  
  1142  // SetHasProposalBlockPart sets the given block part index as known for the peer.
  1143  func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) {
  1144  	ps.mtx.Lock()
  1145  	defer ps.mtx.Unlock()
  1146  
  1147  	if ps.PRS.Height != height || ps.PRS.Round != round {
  1148  		return
  1149  	}
  1150  
  1151  	ps.PRS.ProposalBlockParts.SetIndex(index, true)
  1152  }
  1153  
  1154  // PickSendVote picks a vote and sends it to the peer.
  1155  // Returns true if vote was sent.
  1156  func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
  1157  	if vote, ok := ps.PickVoteToSend(votes); ok {
  1158  		// Remove the logging `PeerState`
  1159  		// See: https://github.com/Finschia/ostracon/issues/457
  1160  		// See: https://github.com/tendermint/tendermint/discussions/9353
  1161  		//ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
  1162  		ps.logger.Debug("Sending vote message", "vote", vote)
  1163  		if p2p.SendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck
  1164  			ChannelID: VoteChannel,
  1165  			Message: &tmcons.Vote{
  1166  				Vote: vote.ToProto(),
  1167  			},
  1168  		}, ps.logger) {
  1169  			ps.SetHasVote(vote)
  1170  			return true
  1171  		}
  1172  		return false
  1173  	}
  1174  	return false
  1175  }
  1176  
  1177  // PickVoteToSend picks a vote to send to the peer.
  1178  // Returns true if a vote was picked.
  1179  // NOTE: `votes` must be the correct Size() for the Height().
  1180  func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
  1181  	ps.mtx.Lock()
  1182  	defer ps.mtx.Unlock()
  1183  
  1184  	if votes.Size() == 0 {
  1185  		return nil, false
  1186  	}
  1187  
  1188  	height, round, votesType, size :=
  1189  		votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size()
  1190  
  1191  	// Lazily set data using 'votes'.
  1192  	if votes.IsCommit() {
  1193  		ps.ensureCatchupCommitRound(height, round, size)
  1194  	}
  1195  	ps.ensureVoteBitArrays(height, size)
  1196  
  1197  	psVotes := ps.getVoteBitArray(height, round, votesType)
  1198  	if psVotes == nil {
  1199  		return nil, false // Not something worth sending
  1200  	}
  1201  	if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
  1202  		return votes.GetByIndex(int32(index)), true
  1203  	}
  1204  	return nil, false
  1205  }
  1206  
  1207  func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray {
  1208  	if !types.IsVoteTypeValid(votesType) {
  1209  		return nil
  1210  	}
  1211  
  1212  	if ps.PRS.Height == height {
  1213  		if ps.PRS.Round == round {
  1214  			switch votesType {
  1215  			case tmproto.PrevoteType:
  1216  				return ps.PRS.Prevotes
  1217  			case tmproto.PrecommitType:
  1218  				return ps.PRS.Precommits
  1219  			}
  1220  		}
  1221  		if ps.PRS.CatchupCommitRound == round {
  1222  			switch votesType {
  1223  			case tmproto.PrevoteType:
  1224  				return nil
  1225  			case tmproto.PrecommitType:
  1226  				return ps.PRS.CatchupCommit
  1227  			}
  1228  		}
  1229  		if ps.PRS.ProposalPOLRound == round {
  1230  			switch votesType {
  1231  			case tmproto.PrevoteType:
  1232  				return ps.PRS.ProposalPOL
  1233  			case tmproto.PrecommitType:
  1234  				return nil
  1235  			}
  1236  		}
  1237  		return nil
  1238  	}
  1239  	if ps.PRS.Height == height+1 {
  1240  		if ps.PRS.LastCommitRound == round {
  1241  			switch votesType {
  1242  			case tmproto.PrevoteType:
  1243  				return nil
  1244  			case tmproto.PrecommitType:
  1245  				return ps.PRS.LastCommit
  1246  			}
  1247  		}
  1248  		return nil
  1249  	}
  1250  	return nil
  1251  }
  1252  
  1253  // 'round': A round for which we have a +2/3 commit.
  1254  func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) {
  1255  	if ps.PRS.Height != height {
  1256  		return
  1257  	}
  1258  	/*
  1259  		NOTE: This is wrong, 'round' could change.
  1260  		e.g. if orig round is not the same as block LastCommit round.
  1261  		if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
  1262  			panic(fmt.Sprintf(
  1263  				"Conflicting CatchupCommitRound. Height: %v,
  1264  				Orig: %v,
  1265  				New: %v",
  1266  				height,
  1267  				ps.CatchupCommitRound,
  1268  				round))
  1269  		}
  1270  	*/
  1271  	if ps.PRS.CatchupCommitRound == round {
  1272  		return // Nothing to do!
  1273  	}
  1274  	ps.PRS.CatchupCommitRound = round
  1275  	if round == ps.PRS.Round {
  1276  		ps.PRS.CatchupCommit = ps.PRS.Precommits
  1277  	} else {
  1278  		ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1279  	}
  1280  }
  1281  
  1282  // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
  1283  // what votes this peer has received.
  1284  // NOTE: It's important to make sure that numValidators actually matches
  1285  // what the node sees as the number of validators for height.
  1286  func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
  1287  	ps.mtx.Lock()
  1288  	defer ps.mtx.Unlock()
  1289  	ps.ensureVoteBitArrays(height, numValidators)
  1290  }
  1291  
  1292  func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
  1293  	if ps.PRS.Height == height {
  1294  		if ps.PRS.Prevotes == nil {
  1295  			ps.PRS.Prevotes = bits.NewBitArray(numValidators)
  1296  		}
  1297  		if ps.PRS.Precommits == nil {
  1298  			ps.PRS.Precommits = bits.NewBitArray(numValidators)
  1299  		}
  1300  		if ps.PRS.CatchupCommit == nil {
  1301  			ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1302  		}
  1303  		if ps.PRS.ProposalPOL == nil {
  1304  			ps.PRS.ProposalPOL = bits.NewBitArray(numValidators)
  1305  		}
  1306  	} else if ps.PRS.Height == height+1 {
  1307  		if ps.PRS.LastCommit == nil {
  1308  			ps.PRS.LastCommit = bits.NewBitArray(numValidators)
  1309  		}
  1310  	}
  1311  }
  1312  
  1313  // RecordVote increments internal votes related statistics for this peer.
  1314  // It returns the total number of added votes.
  1315  func (ps *PeerState) RecordVote() int {
  1316  	ps.mtx.Lock()
  1317  	defer ps.mtx.Unlock()
  1318  
  1319  	ps.Stats.Votes++
  1320  
  1321  	return ps.Stats.Votes
  1322  }
  1323  
  1324  // VotesSent returns the number of blocks for which peer has been sending us
  1325  // votes.
  1326  func (ps *PeerState) VotesSent() int {
  1327  	ps.mtx.Lock()
  1328  	defer ps.mtx.Unlock()
  1329  
  1330  	return ps.Stats.Votes
  1331  }
  1332  
  1333  // RecordBlockPart increments internal block part related statistics for this peer.
  1334  // It returns the total number of added block parts.
  1335  func (ps *PeerState) RecordBlockPart() int {
  1336  	ps.mtx.Lock()
  1337  	defer ps.mtx.Unlock()
  1338  
  1339  	ps.Stats.BlockParts++
  1340  	return ps.Stats.BlockParts
  1341  }
  1342  
  1343  // BlockPartsSent returns the number of useful block parts the peer has sent us.
  1344  func (ps *PeerState) BlockPartsSent() int {
  1345  	ps.mtx.Lock()
  1346  	defer ps.mtx.Unlock()
  1347  
  1348  	return ps.Stats.BlockParts
  1349  }
  1350  
  1351  // SetHasVote sets the given vote as known by the peer
  1352  func (ps *PeerState) SetHasVote(vote *types.Vote) {
  1353  	ps.mtx.Lock()
  1354  	defer ps.mtx.Unlock()
  1355  
  1356  	ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
  1357  }
  1358  
  1359  func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
  1360  	ps.logger.Debug("setHasVote",
  1361  		"peerH/R",
  1362  		log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
  1363  		"H/R",
  1364  		log.NewLazySprintf("%d/%d", height, round),
  1365  		"type", voteType, "index", index)
  1366  
  1367  	// NOTE: some may be nil BitArrays -> no side effects.
  1368  	psVotes := ps.getVoteBitArray(height, round, voteType)
  1369  	if psVotes != nil {
  1370  		psVotes.SetIndex(int(index), true)
  1371  	}
  1372  }
  1373  
  1374  // ApplyNewRoundStepMessage updates the peer state for the new round.
  1375  func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
  1376  	ps.mtx.Lock()
  1377  	defer ps.mtx.Unlock()
  1378  
  1379  	// Ignore duplicates or decreases
  1380  	if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
  1381  		return
  1382  	}
  1383  
  1384  	// Just remember these values.
  1385  	psHeight := ps.PRS.Height
  1386  	psRound := ps.PRS.Round
  1387  	psCatchupCommitRound := ps.PRS.CatchupCommitRound
  1388  	psCatchupCommit := ps.PRS.CatchupCommit
  1389  
  1390  	startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
  1391  	ps.PRS.Height = msg.Height
  1392  	ps.PRS.Round = msg.Round
  1393  	ps.PRS.Step = msg.Step
  1394  	ps.PRS.StartTime = startTime
  1395  	if psHeight != msg.Height || psRound != msg.Round {
  1396  		ps.PRS.Proposal = false
  1397  		ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{}
  1398  		ps.PRS.ProposalBlockParts = nil
  1399  		ps.PRS.ProposalPOLRound = -1
  1400  		ps.PRS.ProposalPOL = nil
  1401  		// We'll update the BitArray capacity later.
  1402  		ps.PRS.Prevotes = nil
  1403  		ps.PRS.Precommits = nil
  1404  	}
  1405  	if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
  1406  		// Peer caught up to CatchupCommitRound.
  1407  		// Preserve psCatchupCommit!
  1408  		// NOTE: We prefer to use prs.Precommits if
  1409  		// pr.Round matches pr.CatchupCommitRound.
  1410  		ps.PRS.Precommits = psCatchupCommit
  1411  	}
  1412  	if psHeight != msg.Height {
  1413  		// Shift Precommits to LastCommit.
  1414  		if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
  1415  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1416  			ps.PRS.LastCommit = ps.PRS.Precommits
  1417  		} else {
  1418  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1419  			ps.PRS.LastCommit = nil
  1420  		}
  1421  		// We'll update the BitArray capacity later.
  1422  		ps.PRS.CatchupCommitRound = -1
  1423  		ps.PRS.CatchupCommit = nil
  1424  	}
  1425  }
  1426  
  1427  // ApplyNewValidBlockMessage updates the peer state for the new valid block.
  1428  func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) {
  1429  	ps.mtx.Lock()
  1430  	defer ps.mtx.Unlock()
  1431  
  1432  	if ps.PRS.Height != msg.Height {
  1433  		return
  1434  	}
  1435  
  1436  	if ps.PRS.Round != msg.Round && !msg.IsCommit {
  1437  		return
  1438  	}
  1439  
  1440  	ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader
  1441  	ps.PRS.ProposalBlockParts = msg.BlockParts
  1442  }
  1443  
  1444  // ApplyProposalPOLMessage updates the peer state for the new proposal POL.
  1445  func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
  1446  	ps.mtx.Lock()
  1447  	defer ps.mtx.Unlock()
  1448  
  1449  	if ps.PRS.Height != msg.Height {
  1450  		return
  1451  	}
  1452  	if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
  1453  		return
  1454  	}
  1455  
  1456  	// TODO: Merge onto existing ps.PRS.ProposalPOL?
  1457  	// We might have sent some prevotes in the meantime.
  1458  	ps.PRS.ProposalPOL = msg.ProposalPOL
  1459  }
  1460  
  1461  // ApplyHasVoteMessage updates the peer state for the new vote.
  1462  func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
  1463  	ps.mtx.Lock()
  1464  	defer ps.mtx.Unlock()
  1465  
  1466  	if ps.PRS.Height != msg.Height {
  1467  		return
  1468  	}
  1469  
  1470  	ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
  1471  }
  1472  
  1473  // ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes
  1474  // it claims to have for the corresponding BlockID.
  1475  // `ourVotes` is a BitArray of votes we have for msg.BlockID
  1476  // NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
  1477  // we conservatively overwrite ps's votes w/ msg.Votes.
  1478  func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *bits.BitArray) {
  1479  	ps.mtx.Lock()
  1480  	defer ps.mtx.Unlock()
  1481  
  1482  	votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
  1483  	if votes != nil {
  1484  		if ourVotes == nil {
  1485  			votes.Update(msg.Votes)
  1486  		} else {
  1487  			otherVotes := votes.Sub(ourVotes)
  1488  			hasVotes := otherVotes.Or(msg.Votes)
  1489  			votes.Update(hasVotes)
  1490  		}
  1491  	}
  1492  }
  1493  
  1494  // String returns a string representation of the PeerState
  1495  func (ps *PeerState) String() string {
  1496  	return ps.StringIndented("")
  1497  }
  1498  
  1499  // StringIndented returns a string representation of the PeerState
  1500  func (ps *PeerState) StringIndented(indent string) string {
  1501  	ps.mtx.Lock()
  1502  	defer ps.mtx.Unlock()
  1503  	return fmt.Sprintf(`PeerState{
  1504  %s  Key        %v
  1505  %s  RoundState %v
  1506  %s  Stats      %v
  1507  %s}`,
  1508  		indent, ps.peer.ID(),
  1509  		indent, ps.PRS.StringIndented(indent+"  "),
  1510  		indent, ps.Stats,
  1511  		indent)
  1512  }
  1513  
  1514  //-----------------------------------------------------------------------------
  1515  // Messages
  1516  
  1517  // Message is a message that can be sent and received on the Reactor
  1518  type Message interface {
  1519  	ValidateBasic() error
  1520  }
  1521  
  1522  func init() {
  1523  	tmjson.RegisterType(&NewRoundStepMessage{}, "ostracon/NewRoundStepMessage")
  1524  	tmjson.RegisterType(&NewValidBlockMessage{}, "ostracon/NewValidBlockMessage")
  1525  	tmjson.RegisterType(&ProposalMessage{}, "ostracon/Proposal")
  1526  	tmjson.RegisterType(&ProposalPOLMessage{}, "ostracon/ProposalPOL")
  1527  	tmjson.RegisterType(&BlockPartMessage{}, "ostracon/BlockPart")
  1528  	tmjson.RegisterType(&VoteMessage{}, "ostracon/Vote")
  1529  	tmjson.RegisterType(&HasVoteMessage{}, "ostracon/HasVote")
  1530  	tmjson.RegisterType(&VoteSetMaj23Message{}, "ostracon/VoteSetMaj23")
  1531  	tmjson.RegisterType(&VoteSetBitsMessage{}, "ostracon/VoteSetBits")
  1532  }
  1533  
  1534  //-------------------------------------
  1535  
  1536  // NewRoundStepMessage is sent for every step taken in the ConsensusState.
  1537  // For every height/round/step transition
  1538  type NewRoundStepMessage struct {
  1539  	Height                int64
  1540  	Round                 int32
  1541  	Step                  cstypes.RoundStepType
  1542  	SecondsSinceStartTime int64
  1543  	LastCommitRound       int32
  1544  }
  1545  
  1546  // ValidateBasic performs basic validation.
  1547  func (m *NewRoundStepMessage) ValidateBasic() error {
  1548  	if m.Height < 0 {
  1549  		return errors.New("negative Height")
  1550  	}
  1551  	if m.Round < 0 {
  1552  		return errors.New("negative Round")
  1553  	}
  1554  	if !m.Step.IsValid() {
  1555  		return errors.New("invalid Step")
  1556  	}
  1557  
  1558  	// NOTE: SecondsSinceStartTime may be negative
  1559  
  1560  	// LastCommitRound will be -1 for the initial height, but we don't know what height this is
  1561  	// since it can be specified in genesis. The reactor will have to validate this via
  1562  	// ValidateHeight().
  1563  	if m.LastCommitRound < -1 {
  1564  		return errors.New("invalid LastCommitRound (cannot be < -1)")
  1565  	}
  1566  
  1567  	return nil
  1568  }
  1569  
  1570  // ValidateHeight validates the height given the chain's initial height.
  1571  func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error {
  1572  	if m.Height < initialHeight {
  1573  		return fmt.Errorf("invalid Height %v (lower than initial height %v)",
  1574  			m.Height, initialHeight)
  1575  	}
  1576  	if m.Height == initialHeight && m.LastCommitRound != -1 {
  1577  		return fmt.Errorf("invalid LastCommitRound %v (must be -1 for initial height %v)",
  1578  			m.LastCommitRound, initialHeight)
  1579  	}
  1580  	if m.Height > initialHeight && m.LastCommitRound < 0 {
  1581  		return fmt.Errorf("LastCommitRound can only be negative for initial height %v",
  1582  			initialHeight)
  1583  	}
  1584  	return nil
  1585  }
  1586  
  1587  // String returns a string representation.
  1588  func (m *NewRoundStepMessage) String() string {
  1589  	return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]",
  1590  		m.Height, m.Round, m.Step, m.LastCommitRound)
  1591  }
  1592  
  1593  //-------------------------------------
  1594  
  1595  // NewValidBlockMessage is sent when a validator observes a valid block B in some round r,
  1596  // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
  1597  // In case the block is also committed, then IsCommit flag is set to true.
  1598  type NewValidBlockMessage struct {
  1599  	Height             int64
  1600  	Round              int32
  1601  	BlockPartSetHeader types.PartSetHeader
  1602  	BlockParts         *bits.BitArray
  1603  	IsCommit           bool
  1604  }
  1605  
  1606  // ValidateBasic performs basic validation.
  1607  func (m *NewValidBlockMessage) ValidateBasic() error {
  1608  	if m.Height < 0 {
  1609  		return errors.New("negative Height")
  1610  	}
  1611  	if m.Round < 0 {
  1612  		return errors.New("negative Round")
  1613  	}
  1614  	if err := m.BlockPartSetHeader.ValidateBasic(); err != nil {
  1615  		return fmt.Errorf("wrong BlockPartSetHeader: %v", err)
  1616  	}
  1617  	if m.BlockParts.Size() == 0 {
  1618  		return errors.New("empty blockParts")
  1619  	}
  1620  	if m.BlockParts.Size() != int(m.BlockPartSetHeader.Total) {
  1621  		return fmt.Errorf("blockParts bit array size %d not equal to BlockPartSetHeader.Total %d",
  1622  			m.BlockParts.Size(),
  1623  			m.BlockPartSetHeader.Total)
  1624  	}
  1625  	if m.BlockParts.Size() > int(types.MaxBlockPartsCount) {
  1626  		return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount)
  1627  	}
  1628  	return nil
  1629  }
  1630  
  1631  // String returns a string representation.
  1632  func (m *NewValidBlockMessage) String() string {
  1633  	return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]",
  1634  		m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit)
  1635  }
  1636  
  1637  //-------------------------------------
  1638  
  1639  // ProposalMessage is sent when a new block is proposed.
  1640  type ProposalMessage struct {
  1641  	Proposal *types.Proposal
  1642  }
  1643  
  1644  // ValidateBasic performs basic validation.
  1645  func (m *ProposalMessage) ValidateBasic() error {
  1646  	return m.Proposal.ValidateBasic()
  1647  }
  1648  
  1649  // String returns a string representation.
  1650  func (m *ProposalMessage) String() string {
  1651  	return fmt.Sprintf("[Proposal %v]", m.Proposal)
  1652  }
  1653  
  1654  //-------------------------------------
  1655  
  1656  // ProposalPOLMessage is sent when a previous proposal is re-proposed.
  1657  type ProposalPOLMessage struct {
  1658  	Height           int64
  1659  	ProposalPOLRound int32
  1660  	ProposalPOL      *bits.BitArray
  1661  }
  1662  
  1663  // ValidateBasic performs basic validation.
  1664  func (m *ProposalPOLMessage) ValidateBasic() error {
  1665  	if m.Height < 0 {
  1666  		return errors.New("negative Height")
  1667  	}
  1668  	if m.ProposalPOLRound < 0 {
  1669  		return errors.New("negative ProposalPOLRound")
  1670  	}
  1671  	if m.ProposalPOL.Size() == 0 {
  1672  		return errors.New("empty ProposalPOL bit array")
  1673  	}
  1674  	if m.ProposalPOL.Size() > types.MaxVotesCount {
  1675  		return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount)
  1676  	}
  1677  	return nil
  1678  }
  1679  
  1680  // String returns a string representation.
  1681  func (m *ProposalPOLMessage) String() string {
  1682  	return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL)
  1683  }
  1684  
  1685  //-------------------------------------
  1686  
  1687  // BlockPartMessage is sent when gossipping a piece of the proposed block.
  1688  type BlockPartMessage struct {
  1689  	Height int64
  1690  	Round  int32
  1691  	Part   *types.Part
  1692  }
  1693  
  1694  // ValidateBasic performs basic validation.
  1695  func (m *BlockPartMessage) ValidateBasic() error {
  1696  	if m.Height < 0 {
  1697  		return errors.New("negative Height")
  1698  	}
  1699  	if m.Round < 0 {
  1700  		return errors.New("negative Round")
  1701  	}
  1702  	if err := m.Part.ValidateBasic(); err != nil {
  1703  		return fmt.Errorf("wrong Part: %v", err)
  1704  	}
  1705  	return nil
  1706  }
  1707  
  1708  // String returns a string representation.
  1709  func (m *BlockPartMessage) String() string {
  1710  	return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part)
  1711  }
  1712  
  1713  //-------------------------------------
  1714  
  1715  // VoteMessage is sent when voting for a proposal (or lack thereof).
  1716  type VoteMessage struct {
  1717  	Vote *types.Vote
  1718  }
  1719  
  1720  // ValidateBasic performs basic validation.
  1721  func (m *VoteMessage) ValidateBasic() error {
  1722  	return m.Vote.ValidateBasic()
  1723  }
  1724  
  1725  // String returns a string representation.
  1726  func (m *VoteMessage) String() string {
  1727  	return fmt.Sprintf("[Vote %v]", m.Vote)
  1728  }
  1729  
  1730  //-------------------------------------
  1731  
  1732  // HasVoteMessage is sent to indicate that a particular vote has been received.
  1733  type HasVoteMessage struct {
  1734  	Height int64
  1735  	Round  int32
  1736  	Type   tmproto.SignedMsgType
  1737  	Index  int32
  1738  }
  1739  
  1740  // ValidateBasic performs basic validation.
  1741  func (m *HasVoteMessage) ValidateBasic() error {
  1742  	if m.Height < 0 {
  1743  		return errors.New("negative Height")
  1744  	}
  1745  	if m.Round < 0 {
  1746  		return errors.New("negative Round")
  1747  	}
  1748  	if !types.IsVoteTypeValid(m.Type) {
  1749  		return errors.New("invalid Type")
  1750  	}
  1751  	if m.Index < 0 {
  1752  		return errors.New("negative Index")
  1753  	}
  1754  	return nil
  1755  }
  1756  
  1757  // String returns a string representation.
  1758  func (m *HasVoteMessage) String() string {
  1759  	return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type)
  1760  }
  1761  
  1762  //-------------------------------------
  1763  
  1764  // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes.
  1765  type VoteSetMaj23Message struct {
  1766  	Height  int64
  1767  	Round   int32
  1768  	Type    tmproto.SignedMsgType
  1769  	BlockID types.BlockID
  1770  }
  1771  
  1772  // ValidateBasic performs basic validation.
  1773  func (m *VoteSetMaj23Message) ValidateBasic() error {
  1774  	if m.Height < 0 {
  1775  		return errors.New("negative Height")
  1776  	}
  1777  	if m.Round < 0 {
  1778  		return errors.New("negative Round")
  1779  	}
  1780  	if !types.IsVoteTypeValid(m.Type) {
  1781  		return errors.New("invalid Type")
  1782  	}
  1783  	if err := m.BlockID.ValidateBasic(); err != nil {
  1784  		return fmt.Errorf("wrong BlockID: %v", err)
  1785  	}
  1786  	return nil
  1787  }
  1788  
  1789  // String returns a string representation.
  1790  func (m *VoteSetMaj23Message) String() string {
  1791  	return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
  1792  }
  1793  
  1794  //-------------------------------------
  1795  
  1796  // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID.
  1797  type VoteSetBitsMessage struct {
  1798  	Height  int64
  1799  	Round   int32
  1800  	Type    tmproto.SignedMsgType
  1801  	BlockID types.BlockID
  1802  	Votes   *bits.BitArray
  1803  }
  1804  
  1805  // ValidateBasic performs basic validation.
  1806  func (m *VoteSetBitsMessage) ValidateBasic() error {
  1807  	if m.Height < 0 {
  1808  		return errors.New("negative Height")
  1809  	}
  1810  	if !types.IsVoteTypeValid(m.Type) {
  1811  		return errors.New("invalid Type")
  1812  	}
  1813  	if err := m.BlockID.ValidateBasic(); err != nil {
  1814  		return fmt.Errorf("wrong BlockID: %v", err)
  1815  	}
  1816  	// NOTE: Votes.Size() can be zero if the node does not have any
  1817  	if m.Votes.Size() > types.MaxVotesCount {
  1818  		return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount)
  1819  	}
  1820  	return nil
  1821  }
  1822  
  1823  // String returns a string representation.
  1824  func (m *VoteSetBitsMessage) String() string {
  1825  	return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
  1826  }
  1827  
  1828  //-------------------------------------