github.com/vipernet-xyz/tm@v0.34.24/test/maverick/consensus/reactor.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/gogo/protobuf/proto"
    11  
    12  	tmcon "github.com/vipernet-xyz/tm/consensus"
    13  	cstypes "github.com/vipernet-xyz/tm/consensus/types"
    14  	"github.com/vipernet-xyz/tm/libs/bits"
    15  	tmevents "github.com/vipernet-xyz/tm/libs/events"
    16  	tmjson "github.com/vipernet-xyz/tm/libs/json"
    17  	"github.com/vipernet-xyz/tm/libs/log"
    18  	tmsync "github.com/vipernet-xyz/tm/libs/sync"
    19  	"github.com/vipernet-xyz/tm/p2p"
    20  	tmcons "github.com/vipernet-xyz/tm/proto/tendermint/consensus"
    21  	tmproto "github.com/vipernet-xyz/tm/proto/tendermint/types"
    22  	sm "github.com/vipernet-xyz/tm/state"
    23  	"github.com/vipernet-xyz/tm/types"
    24  	tmtime "github.com/vipernet-xyz/tm/types/time"
    25  )
    26  
    27  const (
    28  	StateChannel       = byte(0x20)
    29  	DataChannel        = byte(0x21)
    30  	VoteChannel        = byte(0x22)
    31  	VoteSetBitsChannel = byte(0x23)
    32  
    33  	maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
    34  
    35  	blocksToContributeToBecomeGoodPeer = 10000
    36  	votesToContributeToBecomeGoodPeer  = 10000
    37  )
    38  
    39  //-----------------------------------------------------------------------------
    40  
    41  // Reactor defines a reactor for the consensus service.
    42  type Reactor struct {
    43  	p2p.BaseReactor // BaseService + p2p.Switch
    44  
    45  	conS *State
    46  
    47  	mtx      tmsync.RWMutex
    48  	waitSync bool
    49  	eventBus *types.EventBus
    50  
    51  	Metrics *tmcon.Metrics
    52  }
    53  
    54  type ReactorOption func(*Reactor)
    55  
    56  // NewReactor returns a new Reactor with the given
    57  // consensusState.
    58  func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
    59  	conR := &Reactor{
    60  		conS:     consensusState,
    61  		waitSync: waitSync,
    62  		Metrics:  tmcon.NopMetrics(),
    63  	}
    64  	conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
    65  
    66  	for _, option := range options {
    67  		option(conR)
    68  	}
    69  
    70  	return conR
    71  }
    72  
    73  // OnStart implements BaseService by subscribing to events, which later will be
    74  // broadcasted to other peers and starting state if we're not in fast sync.
    75  func (conR *Reactor) OnStart() error {
    76  	conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
    77  
    78  	// start routine that computes peer statistics for evaluating peer quality
    79  	go conR.peerStatsRoutine()
    80  
    81  	conR.subscribeToBroadcastEvents()
    82  
    83  	if !conR.WaitSync() {
    84  		conR.conS.SetSwitch(conR.Switch)
    85  		err := conR.conS.Start()
    86  		if err != nil {
    87  			return err
    88  		}
    89  	}
    90  
    91  	return nil
    92  }
    93  
    94  // OnStop implements BaseService by unsubscribing from events and stopping
    95  // state.
    96  func (conR *Reactor) OnStop() {
    97  	conR.unsubscribeFromBroadcastEvents()
    98  	if err := conR.conS.Stop(); err != nil {
    99  		conR.Logger.Error("Error stopping consensus state", "err", err)
   100  	}
   101  	if !conR.WaitSync() {
   102  		conR.conS.Wait()
   103  	}
   104  }
   105  
   106  // SwitchToConsensus switches from fast_sync mode to consensus mode.
   107  // It resets the state, turns off fast_sync, and starts the consensus state-machine
   108  func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
   109  	conR.Logger.Info("SwitchToConsensus")
   110  
   111  	// We have no votes, so reconstruct LastCommit from SeenCommit.
   112  	if state.LastBlockHeight > 0 {
   113  		conR.conS.reconstructLastCommit(state)
   114  	}
   115  
   116  	// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
   117  	// NewRoundStepMessage.
   118  	conR.conS.updateToState(state)
   119  
   120  	conR.mtx.Lock()
   121  	conR.waitSync = false
   122  	conR.mtx.Unlock()
   123  	conR.Metrics.FastSyncing.Set(0)
   124  	conR.Metrics.StateSyncing.Set(0)
   125  
   126  	if skipWAL {
   127  		conR.conS.doWALCatchup = false
   128  	}
   129  	conR.conS.SetSwitch(conR.Switch)
   130  	err := conR.conS.Start()
   131  	if err != nil {
   132  		panic(fmt.Sprintf(`Failed to start consensus state: %v
   133  
   134  conS:
   135  %+v
   136  
   137  conR:
   138  %+v`, err, conR.conS, conR))
   139  	}
   140  }
   141  
   142  // GetChannels implements Reactor
   143  func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   144  	// TODO optimize
   145  	return []*p2p.ChannelDescriptor{
   146  		{
   147  			ID:                  StateChannel,
   148  			Priority:            6,
   149  			SendQueueCapacity:   100,
   150  			RecvMessageCapacity: maxMsgSize,
   151  			MessageType:         &tmcons.Message{},
   152  		},
   153  		{
   154  			ID: DataChannel, // maybe split between gossiping current block and catchup stuff
   155  			// once we gossip the whole block there's nothing left to send until next height or round
   156  			Priority:            10,
   157  			SendQueueCapacity:   100,
   158  			RecvBufferCapacity:  50 * 4096,
   159  			RecvMessageCapacity: maxMsgSize,
   160  			MessageType:         &tmcons.Message{},
   161  		},
   162  		{
   163  			ID:                  VoteChannel,
   164  			Priority:            7,
   165  			SendQueueCapacity:   100,
   166  			RecvBufferCapacity:  100 * 100,
   167  			RecvMessageCapacity: maxMsgSize,
   168  			MessageType:         &tmcons.Message{},
   169  		},
   170  		{
   171  			ID:                  VoteSetBitsChannel,
   172  			Priority:            1,
   173  			SendQueueCapacity:   2,
   174  			RecvBufferCapacity:  1024,
   175  			RecvMessageCapacity: maxMsgSize,
   176  			MessageType:         &tmcons.Message{},
   177  		},
   178  	}
   179  }
   180  
   181  // InitPeer implements Reactor by creating a state for the peer.
   182  func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   183  	peerState := NewPeerState(peer).SetLogger(conR.Logger)
   184  	peer.Set(types.PeerStateKey, peerState)
   185  	return peer
   186  }
   187  
   188  // AddPeer implements Reactor by spawning multiple gossiping goroutines for the
   189  // peer.
   190  func (conR *Reactor) AddPeer(peer p2p.Peer) {
   191  	if !conR.IsRunning() {
   192  		return
   193  	}
   194  
   195  	peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
   196  	if !ok {
   197  		panic(fmt.Sprintf("peer %v has no state", peer))
   198  	}
   199  	// Begin routines for this peer.
   200  	go conR.gossipDataRoutine(peer, peerState)
   201  	go conR.gossipVotesRoutine(peer, peerState)
   202  	go conR.queryMaj23Routine(peer, peerState)
   203  
   204  	// Send our state to peer.
   205  	// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
   206  	if !conR.WaitSync() {
   207  		conR.sendNewRoundStepMessage(peer)
   208  	}
   209  }
   210  
   211  // RemovePeer is a noop.
   212  func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   213  	if !conR.IsRunning() {
   214  		return
   215  	}
   216  	// TODO
   217  	// ps, ok := peer.Get(PeerStateKey).(*PeerState)
   218  	// if !ok {
   219  	// 	panic(fmt.Sprintf("Peer %v has no state", peer))
   220  	// }
   221  	// ps.Disconnect()
   222  }
   223  
   224  // Receive implements Reactor
   225  // NOTE: We process these messages even when we're fast_syncing.
   226  // Messages affect either a peer state or the consensus state.
   227  // Peer state updates can happen in parallel, but processing of
   228  // proposals, block parts, and votes are ordered by the receiveRoutine
   229  // NOTE: blocks on consensus state for proposals, block parts, and votes
   230  func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   231  	if !conR.IsRunning() {
   232  		conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
   233  		return
   234  	}
   235  	m := e.Message
   236  	if wm, ok := m.(p2p.Wrapper); ok {
   237  		m = wm.Wrap()
   238  	}
   239  	msg, err := tmcon.MsgFromProto(m.(*tmcons.Message))
   240  	if err != nil {
   241  		conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
   242  		conR.Switch.StopPeerForError(e.Src, err)
   243  		return
   244  	}
   245  
   246  	if err := msg.ValidateBasic(); err != nil {
   247  		conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
   248  		conR.Switch.StopPeerForError(e.Src, err)
   249  		return
   250  	}
   251  
   252  	conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   253  
   254  	// Get peer states
   255  	ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
   256  	if !ok {
   257  		panic(fmt.Sprintf("Peer %v has no state", e.Src))
   258  	}
   259  
   260  	switch e.ChannelID {
   261  	case StateChannel:
   262  		switch msg := msg.(type) {
   263  		case *tmcon.NewRoundStepMessage:
   264  			conR.conS.mtx.Lock()
   265  			initialHeight := conR.conS.state.InitialHeight
   266  			conR.conS.mtx.Unlock()
   267  			if err = msg.ValidateHeight(initialHeight); err != nil {
   268  				conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
   269  				conR.Switch.StopPeerForError(e.Src, err)
   270  				return
   271  			}
   272  			ps.ApplyNewRoundStepMessage(msg)
   273  		case *tmcon.NewValidBlockMessage:
   274  			ps.ApplyNewValidBlockMessage(msg)
   275  		case *tmcon.HasVoteMessage:
   276  			ps.ApplyHasVoteMessage(msg)
   277  		case *tmcon.VoteSetMaj23Message:
   278  			cs := conR.conS
   279  			cs.mtx.Lock()
   280  			height, votes := cs.Height, cs.Votes
   281  			cs.mtx.Unlock()
   282  			if height != msg.Height {
   283  				return
   284  			}
   285  			// Peer claims to have a maj23 for some BlockID at H,R,S,
   286  			err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
   287  			if err != nil {
   288  				conR.Switch.StopPeerForError(e.Src, err)
   289  				return
   290  			}
   291  			// Respond with a VoteSetBitsMessage showing which votes we have.
   292  			// (and consequently shows which we don't have)
   293  			var ourVotes *bits.BitArray
   294  			switch msg.Type {
   295  			case tmproto.PrevoteType:
   296  				ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   297  			case tmproto.PrecommitType:
   298  				ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   299  			default:
   300  				panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   301  			}
   302  			m := &tmcons.VoteSetBits{
   303  				Height:  msg.Height,
   304  				Round:   msg.Round,
   305  				Type:    msg.Type,
   306  				BlockID: msg.BlockID.ToProto(),
   307  			}
   308  			v := ourVotes.ToProto()
   309  			if v != nil {
   310  				m.Votes = *v
   311  			}
   312  
   313  			p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
   314  				ChannelID: VoteSetBitsChannel,
   315  				Message:   m,
   316  			}, conR.Logger)
   317  		default:
   318  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   319  		}
   320  
   321  	case DataChannel:
   322  		if conR.WaitSync() {
   323  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   324  			return
   325  		}
   326  		switch msg := msg.(type) {
   327  		case *tmcon.ProposalMessage:
   328  			ps.SetHasProposal(msg.Proposal)
   329  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   330  		case *tmcon.ProposalPOLMessage:
   331  			ps.ApplyProposalPOLMessage(msg)
   332  		case *tmcon.BlockPartMessage:
   333  			ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
   334  			conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
   335  			conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   336  		default:
   337  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   338  		}
   339  
   340  	case VoteChannel:
   341  		if conR.WaitSync() {
   342  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   343  			return
   344  		}
   345  		switch msg := msg.(type) {
   346  		case *tmcon.VoteMessage:
   347  			cs := conR.conS
   348  			cs.mtx.RLock()
   349  			height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
   350  			cs.mtx.RUnlock()
   351  			ps.EnsureVoteBitArrays(height, valSize)
   352  			ps.EnsureVoteBitArrays(height-1, lastCommitSize)
   353  			ps.SetHasVote(msg.Vote)
   354  
   355  			cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
   356  
   357  		default:
   358  			// don't punish (leave room for soft upgrades)
   359  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   360  		}
   361  
   362  	case VoteSetBitsChannel:
   363  		if conR.WaitSync() {
   364  			conR.Logger.Info("Ignoring message received during sync", "msg", msg)
   365  			return
   366  		}
   367  		switch msg := msg.(type) {
   368  		case *tmcon.VoteSetBitsMessage:
   369  			cs := conR.conS
   370  			cs.mtx.Lock()
   371  			height, votes := cs.Height, cs.Votes
   372  			cs.mtx.Unlock()
   373  
   374  			if height == msg.Height {
   375  				var ourVotes *bits.BitArray
   376  				switch msg.Type {
   377  				case tmproto.PrevoteType:
   378  					ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
   379  				case tmproto.PrecommitType:
   380  					ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
   381  				default:
   382  					panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
   383  				}
   384  				ps.ApplyVoteSetBitsMessage(msg, ourVotes)
   385  			} else {
   386  				ps.ApplyVoteSetBitsMessage(msg, nil)
   387  			}
   388  		default:
   389  			// don't punish (leave room for soft upgrades)
   390  			conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   391  		}
   392  
   393  	default:
   394  		conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
   395  	}
   396  }
   397  
   398  func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   399  	msg := &tmcons.Message{}
   400  	err := proto.Unmarshal(msgBytes, msg)
   401  	if err != nil {
   402  		panic(err)
   403  	}
   404  	um, err := msg.Unwrap()
   405  	if err != nil {
   406  		panic(err)
   407  	}
   408  	conR.ReceiveEnvelope(p2p.Envelope{
   409  		ChannelID: chID,
   410  		Src:       peer,
   411  		Message:   um,
   412  	})
   413  }
   414  
   415  // SetEventBus sets event bus.
   416  func (conR *Reactor) SetEventBus(b *types.EventBus) {
   417  	conR.eventBus = b
   418  	conR.conS.SetEventBus(b)
   419  }
   420  
   421  // WaitSync returns whether the consensus reactor is waiting for state/fast sync.
   422  func (conR *Reactor) WaitSync() bool {
   423  	conR.mtx.RLock()
   424  	defer conR.mtx.RUnlock()
   425  	return conR.waitSync
   426  }
   427  
   428  //--------------------------------------
   429  
   430  // subscribeToBroadcastEvents subscribes for new round steps and votes
   431  // using internal pubsub defined on state to broadcast
   432  // them to peers upon receiving.
   433  func (conR *Reactor) subscribeToBroadcastEvents() {
   434  	const subscriber = "consensus-reactor"
   435  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
   436  		func(data tmevents.EventData) {
   437  			conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
   438  		}); err != nil {
   439  		conR.Logger.Error("Error adding listener for events", "err", err)
   440  	}
   441  
   442  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
   443  		func(data tmevents.EventData) {
   444  			conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
   445  		}); err != nil {
   446  		conR.Logger.Error("Error adding listener for events", "err", err)
   447  	}
   448  
   449  	if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
   450  		func(data tmevents.EventData) {
   451  			conR.broadcastHasVoteMessage(data.(*types.Vote))
   452  		}); err != nil {
   453  		conR.Logger.Error("Error adding listener for events", "err", err)
   454  	}
   455  
   456  }
   457  
   458  func (conR *Reactor) unsubscribeFromBroadcastEvents() {
   459  	const subscriber = "consensus-reactor"
   460  	conR.conS.evsw.RemoveListener(subscriber)
   461  }
   462  
   463  func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
   464  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   465  		ChannelID: StateChannel,
   466  		Message: &tmcons.NewRoundStep{
   467  			Height:                rs.Height,
   468  			Round:                 rs.Round,
   469  			Step:                  uint32(rs.Step),
   470  			SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
   471  			LastCommitRound:       rs.LastCommit.GetRound(),
   472  		},
   473  	})
   474  }
   475  
   476  func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
   477  	psh := rs.ProposalBlockParts.Header()
   478  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   479  		ChannelID: StateChannel,
   480  		Message: &tmcons.NewValidBlock{
   481  			Height:             rs.Height,
   482  			Round:              rs.Round,
   483  			BlockPartSetHeader: psh.ToProto(),
   484  			BlockParts:         rs.ProposalBlockParts.BitArray().ToProto(),
   485  			IsCommit:           rs.Step == cstypes.RoundStepCommit,
   486  		},
   487  	})
   488  }
   489  
   490  // Broadcasts HasVoteMessage to peers that care.
   491  func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
   492  	conR.Switch.BroadcastEnvelope(p2p.Envelope{
   493  		ChannelID: StateChannel,
   494  		Message: &tmcons.HasVote{
   495  			Height: vote.Height,
   496  			Round:  vote.Round,
   497  			Type:   vote.Type,
   498  			Index:  vote.ValidatorIndex,
   499  		},
   500  	})
   501  	/*
   502  		// TODO: Make this broadcast more selective.
   503  		for _, peer := range conR.Switch.Peers().List() {
   504  			ps, ok := peer.Get(PeerStateKey).(*PeerState)
   505  			if !ok {
   506  				panic(fmt.Sprintf("Peer %v has no state", peer))
   507  			}
   508  			prs := ps.GetRoundState()
   509  			if prs.Height == vote.Height {
   510  				// TODO: Also filter on round?
   511  				peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
   512  			} else {
   513  				// Height doesn't match
   514  				// TODO: check a field, maybe CatchupCommitRound?
   515  				// TODO: But that requires changing the struct field comment.
   516  			}
   517  		}
   518  	*/
   519  }
   520  
   521  func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
   522  	rs := conR.conS.GetRoundState()
   523  	p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   524  		ChannelID: StateChannel,
   525  		Message: &tmcons.NewRoundStep{
   526  			Height:                rs.Height,
   527  			Round:                 rs.Round,
   528  			Step:                  uint32(rs.Step),
   529  			SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
   530  			LastCommitRound:       rs.LastCommit.GetRound(),
   531  		},
   532  	}, conR.Logger)
   533  }
   534  
   535  func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
   536  	logger := conR.Logger.With("peer", peer)
   537  
   538  OUTER_LOOP:
   539  	for {
   540  		// Manage disconnects from self or peer.
   541  		if !peer.IsRunning() || !conR.IsRunning() {
   542  			logger.Info("Stopping gossipDataRoutine for peer")
   543  			return
   544  		}
   545  		rs := conR.conS.GetRoundState()
   546  		prs := ps.GetRoundState()
   547  
   548  		// Send proposal Block parts?
   549  		if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
   550  			if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
   551  				part := rs.ProposalBlockParts.GetPart(index)
   552  				logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
   553  				p, err := part.ToProto()
   554  				if err != nil {
   555  					panic(err)
   556  				}
   557  				if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   558  					ChannelID: DataChannel,
   559  					Message: &tmcons.BlockPart{
   560  						Height: rs.Height, // This tells peer that this part applies to us.
   561  						Round:  rs.Round,  // This tells peer that this part applies to us.
   562  						Part:   *p,
   563  					},
   564  				}, logger) {
   565  					ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   566  				}
   567  				continue OUTER_LOOP
   568  			}
   569  		}
   570  
   571  		// If the peer is on a previous height that we have, help catch up.
   572  		if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) {
   573  			heightLogger := logger.With("height", prs.Height)
   574  
   575  			// if we never received the commit message from the peer, the block parts wont be initialized
   576  			if prs.ProposalBlockParts == nil {
   577  				blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   578  				if blockMeta == nil {
   579  					heightLogger.Error("Failed to load block meta",
   580  						"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   581  					time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   582  				} else {
   583  					ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader)
   584  				}
   585  				// continue the loop since prs is a copy and not effected by this initialization
   586  				continue OUTER_LOOP
   587  			}
   588  			conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
   589  			continue OUTER_LOOP
   590  		}
   591  
   592  		// If height and round don't match, sleep.
   593  		if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
   594  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   595  			continue OUTER_LOOP
   596  		}
   597  
   598  		// By here, height and round match.
   599  		// Proposal block parts were already matched and sent if any were wanted.
   600  		// (These can match on hash so the round doesn't matter)
   601  		// Now consider sending other things, like the Proposal itself.
   602  
   603  		// Send Proposal && ProposalPOL BitArray?
   604  		if rs.Proposal != nil && !prs.Proposal {
   605  			// Proposal: share the proposal metadata with peer.
   606  			{
   607  				msg := &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()}
   608  				logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
   609  				if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   610  					ChannelID: DataChannel,
   611  					Message:   msg,
   612  				}, logger) {
   613  					// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
   614  					ps.SetHasProposal(rs.Proposal)
   615  				}
   616  			}
   617  			// ProposalPOL: lets peer know which POL votes we have so far.
   618  			// Peer must receive ProposalMessage first.
   619  			// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
   620  			// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
   621  			if 0 <= rs.Proposal.POLRound {
   622  				msg := &tmcons.ProposalPOL{
   623  					Height:           rs.Height,
   624  					ProposalPolRound: rs.Proposal.POLRound,
   625  					ProposalPol:      *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
   626  				}
   627  				logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
   628  				p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   629  					ChannelID: DataChannel,
   630  					Message:   msg,
   631  				}, logger)
   632  			}
   633  			continue OUTER_LOOP
   634  		}
   635  
   636  		// Nothing to do. Sleep.
   637  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   638  		continue OUTER_LOOP
   639  	}
   640  }
   641  
   642  func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
   643  	prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
   644  
   645  	if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
   646  		// Ensure that the peer's PartSetHeader is correct
   647  		blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
   648  		if blockMeta == nil {
   649  			logger.Error("Failed to load block meta", "ourHeight", rs.Height,
   650  				"blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height())
   651  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   652  			return
   653  		} else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) {
   654  			logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping",
   655  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   656  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   657  			return
   658  		}
   659  		// Load the part
   660  		part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
   661  		if part == nil {
   662  			logger.Error("Could not load part", "index", index,
   663  				"blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader)
   664  			time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   665  			return
   666  		}
   667  		// Send the part
   668  
   669  		pp, err := part.ToProto()
   670  		if err != nil {
   671  			logger.Error("Could not convert part to proto", "index", index, "error", err)
   672  			return
   673  		}
   674  		logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
   675  		if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   676  			ChannelID: DataChannel,
   677  			Message: &tmcons.BlockPart{
   678  				Height: prs.Height, // Not our height, so it doesn't matter.
   679  				Round:  prs.Round,  // Not our height, so it doesn't matter.
   680  				Part:   *pp,
   681  			},
   682  		}, logger) {
   683  			ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
   684  		} else {
   685  			logger.Debug("Sending block part for catchup failed")
   686  		}
   687  		return
   688  	}
   689  	time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   690  }
   691  
   692  func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
   693  	logger := conR.Logger.With("peer", peer)
   694  
   695  	// Simple hack to throttle logs upon sleep.
   696  	var sleeping = 0
   697  
   698  OUTER_LOOP:
   699  	for {
   700  		// Manage disconnects from self or peer.
   701  		if !peer.IsRunning() || !conR.IsRunning() {
   702  			logger.Info("Stopping gossipVotesRoutine for peer")
   703  			return
   704  		}
   705  		rs := conR.conS.GetRoundState()
   706  		prs := ps.GetRoundState()
   707  
   708  		switch sleeping {
   709  		case 1: // First sleep
   710  			sleeping = 2
   711  		case 2: // No more sleep
   712  			sleeping = 0
   713  		}
   714  
   715  		// If height matches, then send LastCommit, Prevotes, Precommits.
   716  		if rs.Height == prs.Height {
   717  			heightLogger := logger.With("height", prs.Height)
   718  			if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
   719  				continue OUTER_LOOP
   720  			}
   721  		}
   722  
   723  		// Special catchup logic.
   724  		// If peer is lagging by height 1, send LastCommit.
   725  		if prs.Height != 0 && rs.Height == prs.Height+1 {
   726  			if ps.PickSendVote(rs.LastCommit) {
   727  				logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
   728  				continue OUTER_LOOP
   729  			}
   730  		}
   731  
   732  		// Catchup logic
   733  		// If peer is lagging by more than 1, send Commit.
   734  		if prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= conR.conS.blockStore.Base() {
   735  			// Load the block commit for prs.Height,
   736  			// which contains precommit signatures for prs.Height.
   737  			if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
   738  				if ps.PickSendVote(commit) {
   739  					logger.Debug("Picked Catchup commit to send", "height", prs.Height)
   740  					continue OUTER_LOOP
   741  				}
   742  			}
   743  		}
   744  
   745  		if sleeping == 0 {
   746  			// We sent nothing. Sleep...
   747  			sleeping = 1
   748  			logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height,
   749  				"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
   750  				"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
   751  		} else if sleeping == 2 {
   752  			// Continued sleep...
   753  			sleeping = 1
   754  		}
   755  
   756  		time.Sleep(conR.conS.config.PeerGossipSleepDuration)
   757  		continue OUTER_LOOP
   758  	}
   759  }
   760  
   761  func (conR *Reactor) gossipVotesForHeight(
   762  	logger log.Logger,
   763  	rs *cstypes.RoundState,
   764  	prs *cstypes.PeerRoundState,
   765  	ps *PeerState,
   766  ) bool {
   767  
   768  	// If there are lastCommits to send...
   769  	if prs.Step == cstypes.RoundStepNewHeight {
   770  		if ps.PickSendVote(rs.LastCommit) {
   771  			logger.Debug("Picked rs.LastCommit to send")
   772  			return true
   773  		}
   774  	}
   775  	// If there are POL prevotes to send...
   776  	if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
   777  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   778  			if ps.PickSendVote(polPrevotes) {
   779  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   780  					"round", prs.ProposalPOLRound)
   781  				return true
   782  			}
   783  		}
   784  	}
   785  	// If there are prevotes to send...
   786  	if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
   787  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   788  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   789  			return true
   790  		}
   791  	}
   792  	// If there are precommits to send...
   793  	if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
   794  		if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
   795  			logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
   796  			return true
   797  		}
   798  	}
   799  	// If there are prevotes to send...Needed because of validBlock mechanism
   800  	if prs.Round != -1 && prs.Round <= rs.Round {
   801  		if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
   802  			logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
   803  			return true
   804  		}
   805  	}
   806  	// If there are POLPrevotes to send...
   807  	if prs.ProposalPOLRound != -1 {
   808  		if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
   809  			if ps.PickSendVote(polPrevotes) {
   810  				logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
   811  					"round", prs.ProposalPOLRound)
   812  				return true
   813  			}
   814  		}
   815  	}
   816  
   817  	return false
   818  }
   819  
   820  // NOTE: `queryMaj23Routine` has a simple crude design since it only comes
   821  // into play for liveness when there's a signature DDoS attack happening.
   822  func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
   823  	logger := conR.Logger.With("peer", peer)
   824  
   825  OUTER_LOOP:
   826  	for {
   827  		// Manage disconnects from self or peer.
   828  		if !peer.IsRunning() || !conR.IsRunning() {
   829  			logger.Info("Stopping queryMaj23Routine for peer")
   830  			return
   831  		}
   832  
   833  		// Maybe send Height/Round/Prevotes
   834  		{
   835  			rs := conR.conS.GetRoundState()
   836  			prs := ps.GetRoundState()
   837  			if rs.Height == prs.Height {
   838  				if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
   839  
   840  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   841  						ChannelID: StateChannel,
   842  						Message: &tmcons.VoteSetMaj23{
   843  							Height:  prs.Height,
   844  							Round:   prs.Round,
   845  							Type:    tmproto.PrevoteType,
   846  							BlockID: maj23.ToProto(),
   847  						}}, logger)
   848  
   849  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   850  				}
   851  			}
   852  		}
   853  
   854  		// Maybe send Height/Round/Precommits
   855  		{
   856  			rs := conR.conS.GetRoundState()
   857  			prs := ps.GetRoundState()
   858  			if rs.Height == prs.Height {
   859  				if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
   860  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   861  						ChannelID: StateChannel,
   862  						Message: &tmcons.VoteSetMaj23{
   863  							Height:  prs.Height,
   864  							Round:   prs.Round,
   865  							Type:    tmproto.PrecommitType,
   866  							BlockID: maj23.ToProto(),
   867  						}}, logger)
   868  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   869  				}
   870  			}
   871  		}
   872  
   873  		// Maybe send Height/Round/ProposalPOL
   874  		{
   875  			rs := conR.conS.GetRoundState()
   876  			prs := ps.GetRoundState()
   877  			if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
   878  				if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
   879  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   880  						ChannelID: StateChannel,
   881  						Message: &tmcons.VoteSetMaj23{
   882  							Height:  prs.Height,
   883  							Round:   prs.ProposalPOLRound,
   884  							Type:    tmproto.PrevoteType,
   885  							BlockID: maj23.ToProto(),
   886  						}}, logger)
   887  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   888  				}
   889  			}
   890  		}
   891  
   892  		// Little point sending LastCommitRound/LastCommit,
   893  		// These are fleeting and non-blocking.
   894  
   895  		// Maybe send Height/CatchupCommitRound/CatchupCommit.
   896  		{
   897  			prs := ps.GetRoundState()
   898  			if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
   899  				prs.Height >= conR.conS.blockStore.Base() {
   900  				if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
   901  					p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   902  						ChannelID: StateChannel,
   903  						Message: &tmcons.VoteSetMaj23{
   904  							Height:  prs.Height,
   905  							Round:   commit.Round,
   906  							Type:    tmproto.PrecommitType,
   907  							BlockID: commit.BlockID.ToProto(),
   908  						}}, logger)
   909  					time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   910  				}
   911  			}
   912  		}
   913  
   914  		time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
   915  
   916  		continue OUTER_LOOP
   917  	}
   918  }
   919  
   920  func (conR *Reactor) peerStatsRoutine() {
   921  	for {
   922  		if !conR.IsRunning() {
   923  			conR.Logger.Info("Stopping peerStatsRoutine")
   924  			return
   925  		}
   926  
   927  		select {
   928  		case msg := <-conR.conS.statsMsgQueue:
   929  			// Get peer
   930  			peer := conR.Switch.Peers().Get(msg.PeerID)
   931  			if peer == nil {
   932  				conR.Logger.Debug("Attempt to update stats for non-existent peer",
   933  					"peer", msg.PeerID)
   934  				continue
   935  			}
   936  			// Get peer state
   937  			ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   938  			if !ok {
   939  				panic(fmt.Sprintf("Peer %v has no state", peer))
   940  			}
   941  			switch msg.Msg.(type) {
   942  			case *tmcon.VoteMessage:
   943  				if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
   944  					conR.Switch.MarkPeerAsGood(peer)
   945  				}
   946  			case *tmcon.BlockPartMessage:
   947  				if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
   948  					conR.Switch.MarkPeerAsGood(peer)
   949  				}
   950  			}
   951  		case <-conR.conS.Quit():
   952  			return
   953  
   954  		case <-conR.Quit():
   955  			return
   956  		}
   957  	}
   958  }
   959  
   960  // String returns a string representation of the Reactor.
   961  // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
   962  // TODO: improve!
   963  func (conR *Reactor) String() string {
   964  	// better not to access shared variables
   965  	return "ConsensusReactor" // conR.StringIndented("")
   966  }
   967  
   968  // StringIndented returns an indented string representation of the Reactor
   969  func (conR *Reactor) StringIndented(indent string) string {
   970  	s := "ConsensusReactor{\n"
   971  	s += indent + "  " + conR.conS.StringIndented(indent+"  ") + "\n"
   972  	for _, peer := range conR.Switch.Peers().List() {
   973  		ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
   974  		if !ok {
   975  			panic(fmt.Sprintf("Peer %v has no state", peer))
   976  		}
   977  		s += indent + "  " + ps.StringIndented(indent+"  ") + "\n"
   978  	}
   979  	s += indent + "}"
   980  	return s
   981  }
   982  
   983  // ReactorMetrics sets the metrics
   984  func ReactorMetrics(metrics *tmcon.Metrics) ReactorOption {
   985  	return func(conR *Reactor) { conR.Metrics = metrics }
   986  }
   987  
   988  //-----------------------------------------------------------------------------
   989  
   990  var (
   991  	ErrPeerStateHeightRegression = errors.New("error peer state height regression")
   992  	ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime")
   993  )
   994  
   995  // PeerState contains the known state of a peer, including its connection and
   996  // threadsafe access to its PeerRoundState.
   997  // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
   998  // Be mindful of what you Expose.
   999  type PeerState struct {
  1000  	peer   p2p.Peer
  1001  	logger log.Logger
  1002  
  1003  	mtx   sync.Mutex             // NOTE: Modify below using setters, never directly.
  1004  	PRS   cstypes.PeerRoundState `json:"round_state"` // Exposed.
  1005  	Stats *peerStateStats        `json:"stats"`       // Exposed.
  1006  }
  1007  
  1008  // peerStateStats holds internal statistics for a peer.
  1009  type peerStateStats struct {
  1010  	Votes      int `json:"votes"`
  1011  	BlockParts int `json:"block_parts"`
  1012  }
  1013  
  1014  func (pss peerStateStats) String() string {
  1015  	return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}",
  1016  		pss.Votes, pss.BlockParts)
  1017  }
  1018  
  1019  // NewPeerState returns a new PeerState for the given Peer
  1020  func NewPeerState(peer p2p.Peer) *PeerState {
  1021  	return &PeerState{
  1022  		peer:   peer,
  1023  		logger: log.NewNopLogger(),
  1024  		PRS: cstypes.PeerRoundState{
  1025  			Round:              -1,
  1026  			ProposalPOLRound:   -1,
  1027  			LastCommitRound:    -1,
  1028  			CatchupCommitRound: -1,
  1029  		},
  1030  		Stats: &peerStateStats{},
  1031  	}
  1032  }
  1033  
  1034  // SetLogger allows to set a logger on the peer state. Returns the peer state
  1035  // itself.
  1036  func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
  1037  	ps.logger = logger
  1038  	return ps
  1039  }
  1040  
  1041  // GetRoundState returns an shallow copy of the PeerRoundState.
  1042  // There's no point in mutating it since it won't change PeerState.
  1043  func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
  1044  	ps.mtx.Lock()
  1045  	defer ps.mtx.Unlock()
  1046  
  1047  	prs := ps.PRS // copy
  1048  	return &prs
  1049  }
  1050  
  1051  // ToJSON returns a json of PeerState.
  1052  func (ps *PeerState) ToJSON() ([]byte, error) {
  1053  	ps.mtx.Lock()
  1054  	defer ps.mtx.Unlock()
  1055  
  1056  	return tmjson.Marshal(ps)
  1057  }
  1058  
  1059  // GetHeight returns an atomic snapshot of the PeerRoundState's height
  1060  // used by the mempool to ensure peers are caught up before broadcasting new txs
  1061  func (ps *PeerState) GetHeight() int64 {
  1062  	ps.mtx.Lock()
  1063  	defer ps.mtx.Unlock()
  1064  	return ps.PRS.Height
  1065  }
  1066  
  1067  // SetHasProposal sets the given proposal as known for the peer.
  1068  func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
  1069  	ps.mtx.Lock()
  1070  	defer ps.mtx.Unlock()
  1071  
  1072  	if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
  1073  		return
  1074  	}
  1075  
  1076  	if ps.PRS.Proposal {
  1077  		return
  1078  	}
  1079  
  1080  	ps.PRS.Proposal = true
  1081  
  1082  	// ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage
  1083  	if ps.PRS.ProposalBlockParts != nil {
  1084  		return
  1085  	}
  1086  
  1087  	ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader
  1088  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total))
  1089  	ps.PRS.ProposalPOLRound = proposal.POLRound
  1090  	ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
  1091  }
  1092  
  1093  // InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
  1094  func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) {
  1095  	ps.mtx.Lock()
  1096  	defer ps.mtx.Unlock()
  1097  
  1098  	if ps.PRS.ProposalBlockParts != nil {
  1099  		return
  1100  	}
  1101  
  1102  	ps.PRS.ProposalBlockPartSetHeader = partSetHeader
  1103  	ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total))
  1104  }
  1105  
  1106  // SetHasProposalBlockPart sets the given block part index as known for the peer.
  1107  func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) {
  1108  	ps.mtx.Lock()
  1109  	defer ps.mtx.Unlock()
  1110  
  1111  	if ps.PRS.Height != height || ps.PRS.Round != round {
  1112  		return
  1113  	}
  1114  
  1115  	ps.PRS.ProposalBlockParts.SetIndex(index, true)
  1116  }
  1117  
  1118  // PickSendVote picks a vote and sends it to the peer.
  1119  // Returns true if vote was sent.
  1120  func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
  1121  	if vote, ok := ps.PickVoteToSend(votes); ok {
  1122  		ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
  1123  		if p2p.TrySendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck
  1124  			ChannelID: VoteChannel,
  1125  			Message:   &tmcons.Vote{Vote: vote.ToProto()},
  1126  		}, ps.logger) {
  1127  			ps.SetHasVote(vote)
  1128  			return true
  1129  		}
  1130  		return false
  1131  	}
  1132  	return false
  1133  }
  1134  
  1135  // PickVoteToSend picks a vote to send to the peer.
  1136  // Returns true if a vote was picked.
  1137  // NOTE: `votes` must be the correct Size() for the Height().
  1138  func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
  1139  	ps.mtx.Lock()
  1140  	defer ps.mtx.Unlock()
  1141  
  1142  	if votes.Size() == 0 {
  1143  		return nil, false
  1144  	}
  1145  
  1146  	height, round, votesType, size :=
  1147  		votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size()
  1148  
  1149  	// Lazily set data using 'votes'.
  1150  	if votes.IsCommit() {
  1151  		ps.ensureCatchupCommitRound(height, round, size)
  1152  	}
  1153  	ps.ensureVoteBitArrays(height, size)
  1154  
  1155  	psVotes := ps.getVoteBitArray(height, round, votesType)
  1156  	if psVotes == nil {
  1157  		return nil, false // Not something worth sending
  1158  	}
  1159  	if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
  1160  		return votes.GetByIndex(int32(index)), true
  1161  	}
  1162  	return nil, false
  1163  }
  1164  
  1165  func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray {
  1166  	if !types.IsVoteTypeValid(votesType) {
  1167  		return nil
  1168  	}
  1169  
  1170  	if ps.PRS.Height == height {
  1171  		if ps.PRS.Round == round {
  1172  			switch votesType {
  1173  			case tmproto.PrevoteType:
  1174  				return ps.PRS.Prevotes
  1175  			case tmproto.PrecommitType:
  1176  				return ps.PRS.Precommits
  1177  			}
  1178  		}
  1179  		if ps.PRS.CatchupCommitRound == round {
  1180  			switch votesType {
  1181  			case tmproto.PrevoteType:
  1182  				return nil
  1183  			case tmproto.PrecommitType:
  1184  				return ps.PRS.CatchupCommit
  1185  			}
  1186  		}
  1187  		if ps.PRS.ProposalPOLRound == round {
  1188  			switch votesType {
  1189  			case tmproto.PrevoteType:
  1190  				return ps.PRS.ProposalPOL
  1191  			case tmproto.PrecommitType:
  1192  				return nil
  1193  			}
  1194  		}
  1195  		return nil
  1196  	}
  1197  	if ps.PRS.Height == height+1 {
  1198  		if ps.PRS.LastCommitRound == round {
  1199  			switch votesType {
  1200  			case tmproto.PrevoteType:
  1201  				return nil
  1202  			case tmproto.PrecommitType:
  1203  				return ps.PRS.LastCommit
  1204  			}
  1205  		}
  1206  		return nil
  1207  	}
  1208  	return nil
  1209  }
  1210  
  1211  // 'round': A round for which we have a +2/3 commit.
  1212  func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) {
  1213  	if ps.PRS.Height != height {
  1214  		return
  1215  	}
  1216  	/*
  1217  		NOTE: This is wrong, 'round' could change.
  1218  		e.g. if orig round is not the same as block LastCommit round.
  1219  		if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
  1220  			panic(fmt.Sprintf(
  1221  				"Conflicting CatchupCommitRound. Height: %v,
  1222  				Orig: %v,
  1223  				New: %v",
  1224  				height,
  1225  				ps.CatchupCommitRound,
  1226  				round))
  1227  		}
  1228  	*/
  1229  	if ps.PRS.CatchupCommitRound == round {
  1230  		return // Nothing to do!
  1231  	}
  1232  	ps.PRS.CatchupCommitRound = round
  1233  	if round == ps.PRS.Round {
  1234  		ps.PRS.CatchupCommit = ps.PRS.Precommits
  1235  	} else {
  1236  		ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1237  	}
  1238  }
  1239  
  1240  // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
  1241  // what votes this peer has received.
  1242  // NOTE: It's important to make sure that numValidators actually matches
  1243  // what the node sees as the number of validators for height.
  1244  func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
  1245  	ps.mtx.Lock()
  1246  	defer ps.mtx.Unlock()
  1247  	ps.ensureVoteBitArrays(height, numValidators)
  1248  }
  1249  
  1250  func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
  1251  	if ps.PRS.Height == height {
  1252  		if ps.PRS.Prevotes == nil {
  1253  			ps.PRS.Prevotes = bits.NewBitArray(numValidators)
  1254  		}
  1255  		if ps.PRS.Precommits == nil {
  1256  			ps.PRS.Precommits = bits.NewBitArray(numValidators)
  1257  		}
  1258  		if ps.PRS.CatchupCommit == nil {
  1259  			ps.PRS.CatchupCommit = bits.NewBitArray(numValidators)
  1260  		}
  1261  		if ps.PRS.ProposalPOL == nil {
  1262  			ps.PRS.ProposalPOL = bits.NewBitArray(numValidators)
  1263  		}
  1264  	} else if ps.PRS.Height == height+1 {
  1265  		if ps.PRS.LastCommit == nil {
  1266  			ps.PRS.LastCommit = bits.NewBitArray(numValidators)
  1267  		}
  1268  	}
  1269  }
  1270  
  1271  // RecordVote increments internal votes related statistics for this peer.
  1272  // It returns the total number of added votes.
  1273  func (ps *PeerState) RecordVote() int {
  1274  	ps.mtx.Lock()
  1275  	defer ps.mtx.Unlock()
  1276  
  1277  	ps.Stats.Votes++
  1278  
  1279  	return ps.Stats.Votes
  1280  }
  1281  
  1282  // VotesSent returns the number of blocks for which peer has been sending us
  1283  // votes.
  1284  func (ps *PeerState) VotesSent() int {
  1285  	ps.mtx.Lock()
  1286  	defer ps.mtx.Unlock()
  1287  
  1288  	return ps.Stats.Votes
  1289  }
  1290  
  1291  // RecordBlockPart increments internal block part related statistics for this peer.
  1292  // It returns the total number of added block parts.
  1293  func (ps *PeerState) RecordBlockPart() int {
  1294  	ps.mtx.Lock()
  1295  	defer ps.mtx.Unlock()
  1296  
  1297  	ps.Stats.BlockParts++
  1298  	return ps.Stats.BlockParts
  1299  }
  1300  
  1301  // BlockPartsSent returns the number of useful block parts the peer has sent us.
  1302  func (ps *PeerState) BlockPartsSent() int {
  1303  	ps.mtx.Lock()
  1304  	defer ps.mtx.Unlock()
  1305  
  1306  	return ps.Stats.BlockParts
  1307  }
  1308  
  1309  // SetHasVote sets the given vote as known by the peer
  1310  func (ps *PeerState) SetHasVote(vote *types.Vote) {
  1311  	ps.mtx.Lock()
  1312  	defer ps.mtx.Unlock()
  1313  
  1314  	ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
  1315  }
  1316  
  1317  func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
  1318  	logger := ps.logger.With(
  1319  		"peerH/R",
  1320  		fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
  1321  		"H/R",
  1322  		fmt.Sprintf("%d/%d", height, round))
  1323  	logger.Debug("setHasVote", "type", voteType, "index", index)
  1324  
  1325  	// NOTE: some may be nil BitArrays -> no side effects.
  1326  	psVotes := ps.getVoteBitArray(height, round, voteType)
  1327  	if psVotes != nil {
  1328  		psVotes.SetIndex(int(index), true)
  1329  	}
  1330  }
  1331  
  1332  // ApplyNewRoundStepMessage updates the peer state for the new round.
  1333  func (ps *PeerState) ApplyNewRoundStepMessage(msg *tmcon.NewRoundStepMessage) {
  1334  	ps.mtx.Lock()
  1335  	defer ps.mtx.Unlock()
  1336  
  1337  	// Ignore duplicates or decreases
  1338  	if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
  1339  		return
  1340  	}
  1341  
  1342  	// Just remember these values.
  1343  	psHeight := ps.PRS.Height
  1344  	psRound := ps.PRS.Round
  1345  	psCatchupCommitRound := ps.PRS.CatchupCommitRound
  1346  	psCatchupCommit := ps.PRS.CatchupCommit
  1347  
  1348  	startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
  1349  	ps.PRS.Height = msg.Height
  1350  	ps.PRS.Round = msg.Round
  1351  	ps.PRS.Step = msg.Step
  1352  	ps.PRS.StartTime = startTime
  1353  	if psHeight != msg.Height || psRound != msg.Round {
  1354  		ps.PRS.Proposal = false
  1355  		ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{}
  1356  		ps.PRS.ProposalBlockParts = nil
  1357  		ps.PRS.ProposalPOLRound = -1
  1358  		ps.PRS.ProposalPOL = nil
  1359  		// We'll update the BitArray capacity later.
  1360  		ps.PRS.Prevotes = nil
  1361  		ps.PRS.Precommits = nil
  1362  	}
  1363  	if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
  1364  		// Peer caught up to CatchupCommitRound.
  1365  		// Preserve psCatchupCommit!
  1366  		// NOTE: We prefer to use prs.Precommits if
  1367  		// pr.Round matches pr.CatchupCommitRound.
  1368  		ps.PRS.Precommits = psCatchupCommit
  1369  	}
  1370  	if psHeight != msg.Height {
  1371  		// Shift Precommits to LastCommit.
  1372  		if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
  1373  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1374  			ps.PRS.LastCommit = ps.PRS.Precommits
  1375  		} else {
  1376  			ps.PRS.LastCommitRound = msg.LastCommitRound
  1377  			ps.PRS.LastCommit = nil
  1378  		}
  1379  		// We'll update the BitArray capacity later.
  1380  		ps.PRS.CatchupCommitRound = -1
  1381  		ps.PRS.CatchupCommit = nil
  1382  	}
  1383  }
  1384  
  1385  // ApplyNewValidBlockMessage updates the peer state for the new valid block.
  1386  func (ps *PeerState) ApplyNewValidBlockMessage(msg *tmcon.NewValidBlockMessage) {
  1387  	ps.mtx.Lock()
  1388  	defer ps.mtx.Unlock()
  1389  
  1390  	if ps.PRS.Height != msg.Height {
  1391  		return
  1392  	}
  1393  
  1394  	if ps.PRS.Round != msg.Round && !msg.IsCommit {
  1395  		return
  1396  	}
  1397  
  1398  	ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader
  1399  	ps.PRS.ProposalBlockParts = msg.BlockParts
  1400  }
  1401  
  1402  // ApplyProposalPOLMessage updates the peer state for the new proposal POL.
  1403  func (ps *PeerState) ApplyProposalPOLMessage(msg *tmcon.ProposalPOLMessage) {
  1404  	ps.mtx.Lock()
  1405  	defer ps.mtx.Unlock()
  1406  
  1407  	if ps.PRS.Height != msg.Height {
  1408  		return
  1409  	}
  1410  	if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
  1411  		return
  1412  	}
  1413  
  1414  	// TODO: Merge onto existing ps.PRS.ProposalPOL?
  1415  	// We might have sent some prevotes in the meantime.
  1416  	ps.PRS.ProposalPOL = msg.ProposalPOL
  1417  }
  1418  
  1419  // ApplyHasVoteMessage updates the peer state for the new vote.
  1420  func (ps *PeerState) ApplyHasVoteMessage(msg *tmcon.HasVoteMessage) {
  1421  	ps.mtx.Lock()
  1422  	defer ps.mtx.Unlock()
  1423  
  1424  	if ps.PRS.Height != msg.Height {
  1425  		return
  1426  	}
  1427  
  1428  	ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
  1429  }
  1430  
  1431  // ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes
  1432  // it claims to have for the corresponding BlockID.
  1433  // `ourVotes` is a BitArray of votes we have for msg.BlockID
  1434  // NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
  1435  // we conservatively overwrite ps's votes w/ msg.Votes.
  1436  func (ps *PeerState) ApplyVoteSetBitsMessage(msg *tmcon.VoteSetBitsMessage, ourVotes *bits.BitArray) {
  1437  	ps.mtx.Lock()
  1438  	defer ps.mtx.Unlock()
  1439  
  1440  	votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
  1441  	if votes != nil {
  1442  		if ourVotes == nil {
  1443  			votes.Update(msg.Votes)
  1444  		} else {
  1445  			otherVotes := votes.Sub(ourVotes)
  1446  			hasVotes := otherVotes.Or(msg.Votes)
  1447  			votes.Update(hasVotes)
  1448  		}
  1449  	}
  1450  }
  1451  
  1452  // String returns a string representation of the PeerState
  1453  func (ps *PeerState) String() string {
  1454  	return ps.StringIndented("")
  1455  }
  1456  
  1457  // StringIndented returns a string representation of the PeerState
  1458  func (ps *PeerState) StringIndented(indent string) string {
  1459  	ps.mtx.Lock()
  1460  	defer ps.mtx.Unlock()
  1461  	return fmt.Sprintf(`PeerState{
  1462  %s  Key        %v
  1463  %s  RoundState %v
  1464  %s  Stats      %v
  1465  %s}`,
  1466  		indent, ps.peer.ID(),
  1467  		indent, ps.PRS.StringIndented(indent+"  "),
  1468  		indent, ps.Stats,
  1469  		indent)
  1470  }
  1471  
  1472  //-----------------------------------------------------------------------------
  1473  
  1474  // func init() {
  1475  // 	tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage")
  1476  // 	tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage")
  1477  // 	tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal")
  1478  // 	tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL")
  1479  // 	tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart")
  1480  // 	tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote")
  1481  // 	tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote")
  1482  // 	tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23")
  1483  // 	tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
  1484  // }