github.com/pokt-network/tendermint@v0.32.11-0.20230426215212-59310158d3e9/consensus/reactor.go (about) 1 package consensus 2 3 import ( 4 "fmt" 5 "reflect" 6 "sync" 7 "time" 8 9 "github.com/pkg/errors" 10 11 amino "github.com/tendermint/go-amino" 12 13 cstypes "github.com/tendermint/tendermint/consensus/types" 14 "github.com/tendermint/tendermint/libs/bits" 15 tmevents "github.com/tendermint/tendermint/libs/events" 16 "github.com/tendermint/tendermint/libs/log" 17 "github.com/tendermint/tendermint/p2p" 18 sm "github.com/tendermint/tendermint/state" 19 "github.com/tendermint/tendermint/types" 20 tmtime "github.com/tendermint/tendermint/types/time" 21 ) 22 23 const ( 24 StateChannel = byte(0x20) 25 DataChannel = byte(0x21) 26 VoteChannel = byte(0x22) 27 VoteSetBitsChannel = byte(0x23) 28 29 maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. 30 31 blocksToContributeToBecomeGoodPeer = 4 32 votesToContributeToBecomeGoodPeer = 4 33 ) 34 35 //----------------------------------------------------------------------------- 36 37 // Reactor defines a reactor for the consensus service. 38 type Reactor struct { 39 p2p.BaseReactor // BaseService + p2p.Switch 40 41 conS *State 42 43 mtx sync.RWMutex 44 fastSync bool 45 eventBus *types.EventBus 46 47 metrics *Metrics 48 } 49 50 type ReactorOption func(*Reactor) 51 52 // NewReactor returns a new Reactor with the given 53 // consensusState. 54 func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) *Reactor { 55 conR := &Reactor{ 56 conS: consensusState, 57 fastSync: fastSync, 58 metrics: NopMetrics(), 59 } 60 conR.updateFastSyncingMetric() 61 conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) 62 63 for _, option := range options { 64 option(conR) 65 } 66 67 return conR 68 } 69 70 // OnStart implements BaseService by subscribing to events, which later will be 71 // broadcasted to other peers and starting state if we're not in fast sync. 72 func (conR *Reactor) OnStart() error { 73 conR.Logger.Info("Reactor ", "fastSync", conR.FastSync()) 74 75 // start routine that computes peer statistics for evaluating peer quality 76 go conR.peerStatsRoutine() 77 78 conR.subscribeToBroadcastEvents() 79 80 if !conR.FastSync() { 81 err := conR.conS.Start() 82 if err != nil { 83 return err 84 } 85 } 86 87 return nil 88 } 89 90 // OnStop implements BaseService by unsubscribing from events and stopping 91 // state. 92 func (conR *Reactor) OnStop() { 93 conR.unsubscribeFromBroadcastEvents() 94 conR.conS.Stop() 95 if !conR.FastSync() { 96 conR.conS.Wait() 97 } 98 } 99 100 // SwitchToConsensus switches from fast_sync mode to consensus mode. 101 // It resets the state, turns off fast_sync, and starts the consensus state-machine 102 func (conR *Reactor) SwitchToConsensus(state sm.State, blocksSynced uint64) { 103 conR.Logger.Info("SwitchToConsensus") 104 conR.conS.reconstructLastCommit(state) 105 // NOTE: The line below causes broadcastNewRoundStepRoutine() to 106 // broadcast a NewRoundStepMessage. 107 conR.conS.updateToState(state) 108 109 conR.mtx.Lock() 110 conR.fastSync = false 111 conR.mtx.Unlock() 112 conR.metrics.FastSyncing.Set(0) 113 114 if blocksSynced > 0 { 115 // dont bother with the WAL if we fast synced 116 conR.conS.doWALCatchup = false 117 } 118 err := conR.conS.Start() 119 if err != nil { 120 panic(fmt.Sprintf(`Failed to start consensus state: %v 121 122 conS: 123 %+v 124 125 conR: 126 %+v`, err, conR.conS, conR)) 127 } 128 } 129 130 // GetChannels implements Reactor 131 func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { 132 // TODO optimize 133 return []*p2p.ChannelDescriptor{ 134 { 135 ID: StateChannel, 136 Priority: 5, 137 SendQueueCapacity: 100, 138 RecvMessageCapacity: maxMsgSize, 139 }, 140 { 141 ID: DataChannel, // maybe split between gossiping current block and catchup stuff 142 // once we gossip the whole block there's nothing left to send until next height or round 143 Priority: 10, 144 SendQueueCapacity: 100, 145 RecvBufferCapacity: 50 * 4096, 146 RecvMessageCapacity: maxMsgSize, 147 }, 148 { 149 ID: VoteChannel, 150 Priority: 5, 151 SendQueueCapacity: 100, 152 RecvBufferCapacity: 100 * 100, 153 RecvMessageCapacity: maxMsgSize, 154 }, 155 { 156 ID: VoteSetBitsChannel, 157 Priority: 1, 158 SendQueueCapacity: 2, 159 RecvBufferCapacity: 1024, 160 RecvMessageCapacity: maxMsgSize, 161 }, 162 } 163 } 164 165 // InitPeer implements Reactor by creating a state for the peer. 166 func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { 167 peerState := NewPeerState(peer).SetLogger(conR.Logger) 168 peer.Set(types.PeerStateKey, peerState) 169 return peer 170 } 171 172 // AddPeer implements Reactor by spawning multiple gossiping goroutines for the 173 // peer. 174 func (conR *Reactor) AddPeer(peer p2p.Peer) { 175 if !conR.IsRunning() { 176 return 177 } 178 179 peerState, ok := peer.Get(types.PeerStateKey).(*PeerState) 180 if !ok { 181 panic(fmt.Sprintf("peer %v has no state", peer)) 182 } 183 // Begin routines for this peer. 184 go conR.gossipDataRoutine(peer, peerState) 185 go conR.gossipVotesRoutine(peer, peerState) 186 go conR.queryMaj23Routine(peer, peerState) 187 188 // Send our state to peer. 189 // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). 190 if !conR.FastSync() { 191 conR.sendNewRoundStepMessage(peer) 192 } 193 } 194 195 // RemovePeer is a noop. 196 func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { 197 if !conR.IsRunning() { 198 return 199 } 200 // TODO 201 // ps, ok := peer.Get(PeerStateKey).(*PeerState) 202 // if !ok { 203 // panic(fmt.Sprintf("Peer %v has no state", peer)) 204 // } 205 // ps.Disconnect() 206 } 207 208 // Receive implements Reactor 209 // NOTE: We process these messages even when we're fast_syncing. 210 // Messages affect either a peer state or the consensus state. 211 // Peer state updates can happen in parallel, but processing of 212 // proposals, block parts, and votes are ordered by the receiveRoutine 213 // NOTE: blocks on consensus state for proposals, block parts, and votes 214 func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 215 if !conR.IsRunning() { 216 conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) 217 return 218 } 219 220 msg, err := decodeMsg(msgBytes) 221 if err != nil { 222 conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) 223 conR.Switch.StopPeerForError(src, err) 224 return 225 } 226 227 if err = msg.ValidateBasic(); err != nil { 228 conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) 229 conR.Switch.StopPeerForError(src, err) 230 return 231 } 232 233 conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) 234 235 // Get peer states 236 ps, ok := src.Get(types.PeerStateKey).(*PeerState) 237 if !ok { 238 panic(fmt.Sprintf("Peer %v has no state", src)) 239 } 240 241 switch chID { 242 case StateChannel: 243 switch msg := msg.(type) { 244 case *NewRoundStepMessage: 245 ps.ApplyNewRoundStepMessage(msg) 246 case *NewValidBlockMessage: 247 ps.ApplyNewValidBlockMessage(msg) 248 case *HasVoteMessage: 249 ps.ApplyHasVoteMessage(msg) 250 case *VoteSetMaj23Message: 251 cs := conR.conS 252 cs.mtx.Lock() 253 height, votes := cs.Height, cs.Votes 254 cs.mtx.Unlock() 255 if height != msg.Height { 256 return 257 } 258 // Peer claims to have a maj23 for some BlockID at H,R,S, 259 err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) 260 if err != nil { 261 conR.Switch.StopPeerForError(src, err) 262 return 263 } 264 // Respond with a VoteSetBitsMessage showing which votes we have. 265 // (and consequently shows which we don't have) 266 var ourVotes *bits.BitArray 267 switch msg.Type { 268 case types.PrevoteType: 269 ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) 270 case types.PrecommitType: 271 ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) 272 default: 273 panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") 274 } 275 src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ 276 Height: msg.Height, 277 Round: msg.Round, 278 Type: msg.Type, 279 BlockID: msg.BlockID, 280 Votes: ourVotes, 281 })) 282 default: 283 conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) 284 } 285 286 case DataChannel: 287 if conR.FastSync() { 288 conR.Logger.Debug("Ignoring message received during fastSync", "msg", msg) 289 return 290 } 291 switch msg := msg.(type) { 292 case *ProposalMessage: 293 ps.SetHasProposal(msg.Proposal) 294 conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} 295 case *ProposalPOLMessage: 296 ps.ApplyProposalPOLMessage(msg) 297 case *BlockPartMessage: 298 ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) 299 conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) 300 conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} 301 default: 302 conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) 303 } 304 305 case VoteChannel: 306 if conR.FastSync() { 307 conR.Logger.Debug("Ignoring message received during fastSync", "msg", msg) 308 return 309 } 310 switch msg := msg.(type) { 311 case *VoteMessage: 312 cs := conR.conS 313 cs.mtx.RLock() 314 height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() 315 cs.mtx.RUnlock() 316 ps.EnsureVoteBitArrays(height, valSize) 317 ps.EnsureVoteBitArrays(height-1, lastCommitSize) 318 ps.SetHasVote(msg.Vote) 319 320 cs.peerMsgQueue <- msgInfo{msg, src.ID()} 321 322 default: 323 // don't punish (leave room for soft upgrades) 324 conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) 325 } 326 327 case VoteSetBitsChannel: 328 if conR.FastSync() { 329 conR.Logger.Debug("Ignoring message received during fastSync", "msg", msg) 330 return 331 } 332 switch msg := msg.(type) { 333 case *VoteSetBitsMessage: 334 cs := conR.conS 335 cs.mtx.Lock() 336 height, votes := cs.Height, cs.Votes 337 cs.mtx.Unlock() 338 339 if height == msg.Height { 340 var ourVotes *bits.BitArray 341 switch msg.Type { 342 case types.PrevoteType: 343 ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) 344 case types.PrecommitType: 345 ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) 346 default: 347 panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") 348 } 349 ps.ApplyVoteSetBitsMessage(msg, ourVotes) 350 } else { 351 ps.ApplyVoteSetBitsMessage(msg, nil) 352 } 353 default: 354 // don't punish (leave room for soft upgrades) 355 conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) 356 } 357 358 default: 359 conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) 360 } 361 } 362 363 // SetEventBus sets event bus. 364 func (conR *Reactor) SetEventBus(b *types.EventBus) { 365 conR.eventBus = b 366 conR.conS.SetEventBus(b) 367 } 368 369 // FastSync returns whether the consensus reactor is in fast-sync mode. 370 func (conR *Reactor) FastSync() bool { 371 conR.mtx.RLock() 372 defer conR.mtx.RUnlock() 373 return conR.fastSync 374 } 375 376 //-------------------------------------- 377 378 // subscribeToBroadcastEvents subscribes for new round steps and votes 379 // using internal pubsub defined on state to broadcast 380 // them to peers upon receiving. 381 func (conR *Reactor) subscribeToBroadcastEvents() { 382 const subscriber = "consensus-reactor" 383 conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, 384 func(data tmevents.EventData) { 385 conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) 386 }) 387 388 conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, 389 func(data tmevents.EventData) { 390 conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) 391 }) 392 393 conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, 394 func(data tmevents.EventData) { 395 conR.broadcastHasVoteMessage(data.(*types.Vote)) 396 }) 397 398 } 399 400 func (conR *Reactor) unsubscribeFromBroadcastEvents() { 401 const subscriber = "consensus-reactor" 402 conR.conS.evsw.RemoveListener(subscriber) 403 } 404 405 func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { 406 nrsMsg := makeRoundStepMessage(rs) 407 conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) 408 } 409 410 func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { 411 csMsg := &NewValidBlockMessage{ 412 Height: rs.Height, 413 Round: rs.Round, 414 BlockPartsHeader: rs.ProposalBlockParts.Header(), 415 BlockParts: rs.ProposalBlockParts.BitArray(), 416 IsCommit: rs.Step == cstypes.RoundStepCommit, 417 } 418 conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) 419 } 420 421 // Broadcasts HasVoteMessage to peers that care. 422 func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { 423 msg := &HasVoteMessage{ 424 Height: vote.Height, 425 Round: vote.Round, 426 Type: vote.Type, 427 Index: vote.ValidatorIndex, 428 } 429 conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) 430 /* 431 // TODO: Make this broadcast more selective. 432 for _, peer := range conR.Switch.Peers().List() { 433 ps, ok := peer.Get(PeerStateKey).(*PeerState) 434 if !ok { 435 panic(fmt.Sprintf("Peer %v has no state", peer)) 436 } 437 prs := ps.GetRoundState() 438 if prs.Height == vote.Height { 439 // TODO: Also filter on round? 440 peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) 441 } else { 442 // Height doesn't match 443 // TODO: check a field, maybe CatchupCommitRound? 444 // TODO: But that requires changing the struct field comment. 445 } 446 } 447 */ 448 } 449 450 func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { 451 nrsMsg = &NewRoundStepMessage{ 452 Height: rs.Height, 453 Round: rs.Round, 454 Step: rs.Step, 455 SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), 456 LastCommitRound: rs.LastCommit.GetRound(), 457 } 458 return 459 } 460 461 func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { 462 rs := conR.conS.GetRoundState() 463 nrsMsg := makeRoundStepMessage(rs) 464 peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) 465 } 466 467 func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { 468 logger := conR.Logger.With("peer", peer) 469 470 OUTER_LOOP: 471 for { 472 // Manage disconnects from self or peer. 473 if !peer.IsRunning() || !conR.IsRunning() { 474 logger.Debug("Stopping gossipDataRoutine for peer") 475 return 476 } 477 rs := conR.conS.GetRoundState() 478 prs := ps.GetRoundState() 479 480 // Send proposal Block parts? 481 if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) { 482 if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { 483 part := rs.ProposalBlockParts.GetPart(index) 484 msg := &BlockPartMessage{ 485 Height: rs.Height, // This tells peer that this part applies to us. 486 Round: rs.Round, // This tells peer that this part applies to us. 487 Part: part, 488 } 489 logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) 490 if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { 491 ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) 492 } 493 continue OUTER_LOOP 494 } 495 } 496 497 // If the peer is on a previous height that we have, help catch up. 498 if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { 499 heightLogger := logger.With("height", prs.Height) 500 501 // if we never received the commit message from the peer, the block parts wont be initialized 502 if prs.ProposalBlockParts == nil { 503 blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) 504 if blockMeta == nil { 505 heightLogger.Error("Failed to load block meta", 506 "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) 507 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 508 } else { 509 ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) 510 } 511 // continue the loop since prs is a copy and not effected by this initialization 512 continue OUTER_LOOP 513 } 514 conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) 515 continue OUTER_LOOP 516 } 517 518 // If height and round don't match, sleep. 519 if (rs.Height != prs.Height) || (rs.Round != prs.Round) { 520 //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) 521 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 522 continue OUTER_LOOP 523 } 524 525 // By here, height and round match. 526 // Proposal block parts were already matched and sent if any were wanted. 527 // (These can match on hash so the round doesn't matter) 528 // Now consider sending other things, like the Proposal itself. 529 530 // Send Proposal && ProposalPOL BitArray? 531 if rs.Proposal != nil && !prs.Proposal { 532 // Proposal: share the proposal metadata with peer. 533 { 534 msg := &ProposalMessage{Proposal: rs.Proposal} 535 logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) 536 if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { 537 // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! 538 ps.SetHasProposal(rs.Proposal) 539 } 540 } 541 // ProposalPOL: lets peer know which POL votes we have so far. 542 // Peer must receive ProposalMessage first. 543 // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, 544 // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). 545 if 0 <= rs.Proposal.POLRound { 546 msg := &ProposalPOLMessage{ 547 Height: rs.Height, 548 ProposalPOLRound: rs.Proposal.POLRound, 549 ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), 550 } 551 logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) 552 peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) 553 } 554 continue OUTER_LOOP 555 } 556 557 // Nothing to do. Sleep. 558 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 559 continue OUTER_LOOP 560 } 561 } 562 563 func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, 564 prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { 565 566 if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { 567 // Ensure that the peer's PartSetHeader is correct 568 blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) 569 if blockMeta == nil { 570 logger.Error("Failed to load block meta", "ourHeight", rs.Height, 571 "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) 572 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 573 return 574 } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { 575 logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", 576 "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) 577 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 578 return 579 } 580 // Load the part 581 part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) 582 if part == nil { 583 logger.Error("Could not load part", "index", index, 584 "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) 585 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 586 return 587 } 588 // Send the part 589 msg := &BlockPartMessage{ 590 Height: prs.Height, // Not our height, so it doesn't matter. 591 Round: prs.Round, // Not our height, so it doesn't matter. 592 Part: part, 593 } 594 logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) 595 if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { 596 ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) 597 } else { 598 logger.Debug("Sending block part for catchup failed") 599 } 600 return 601 } 602 //logger.Info("No parts to send in catch-up, sleeping") 603 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 604 } 605 606 func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { 607 logger := conR.Logger.With("peer", peer) 608 609 // Simple hack to throttle logs upon sleep. 610 var sleeping = 0 611 612 OUTER_LOOP: 613 for { 614 // Manage disconnects from self or peer. 615 if !peer.IsRunning() || !conR.IsRunning() { 616 logger.Debug("Stopping gossipVotesRoutine for peer") 617 return 618 } 619 rs := conR.conS.GetRoundState() 620 prs := ps.GetRoundState() 621 622 switch sleeping { 623 case 1: // First sleep 624 sleeping = 2 625 case 2: // No more sleep 626 sleeping = 0 627 } 628 629 //logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, 630 // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) 631 632 // If height matches, then send LastCommit, Prevotes, Precommits. 633 if rs.Height == prs.Height { 634 heightLogger := logger.With("height", prs.Height) 635 if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { 636 continue OUTER_LOOP 637 } 638 } 639 640 // Special catchup logic. 641 // If peer is lagging by height 1, send LastCommit. 642 if prs.Height != 0 && rs.Height == prs.Height+1 { 643 if ps.PickSendVote(rs.LastCommit) { 644 logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) 645 continue OUTER_LOOP 646 } 647 } 648 649 // Catchup logic 650 // If peer is lagging by more than 1, send Commit. 651 if prs.Height != 0 && rs.Height >= prs.Height+2 { 652 // Load the block commit for prs.Height, 653 // which contains precommit signatures for prs.Height. 654 commit := conR.conS.blockStore.LoadBlockCommit(prs.Height) 655 if ps.PickSendVote(commit) { 656 logger.Debug("Picked Catchup commit to send", "height", prs.Height) 657 continue OUTER_LOOP 658 } 659 } 660 661 if sleeping == 0 { 662 // We sent nothing. Sleep... 663 sleeping = 1 664 logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height, 665 "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, 666 "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits) 667 } else if sleeping == 2 { 668 // Continued sleep... 669 sleeping = 1 670 } 671 672 time.Sleep(conR.conS.config.PeerGossipSleepDuration) 673 continue OUTER_LOOP 674 } 675 } 676 677 func (conR *Reactor) gossipVotesForHeight( 678 logger log.Logger, 679 rs *cstypes.RoundState, 680 prs *cstypes.PeerRoundState, 681 ps *PeerState, 682 ) bool { 683 684 // If there are lastCommits to send... 685 if prs.Step == cstypes.RoundStepNewHeight { 686 if ps.PickSendVote(rs.LastCommit) { 687 logger.Debug("Picked rs.LastCommit to send") 688 return true 689 } 690 } 691 // If there are POL prevotes to send... 692 if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { 693 if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { 694 if ps.PickSendVote(polPrevotes) { 695 logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", 696 "round", prs.ProposalPOLRound) 697 return true 698 } 699 } 700 } 701 // If there are prevotes to send... 702 if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { 703 if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { 704 logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) 705 return true 706 } 707 } 708 // If there are precommits to send... 709 if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { 710 if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { 711 logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) 712 return true 713 } 714 } 715 // If there are prevotes to send...Needed because of validBlock mechanism 716 if prs.Round != -1 && prs.Round <= rs.Round { 717 if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { 718 logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) 719 return true 720 } 721 } 722 // If there are POLPrevotes to send... 723 if prs.ProposalPOLRound != -1 { 724 if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { 725 if ps.PickSendVote(polPrevotes) { 726 logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", 727 "round", prs.ProposalPOLRound) 728 return true 729 } 730 } 731 } 732 733 return false 734 } 735 736 // NOTE: `queryMaj23Routine` has a simple crude design since it only comes 737 // into play for liveness when there's a signature DDoS attack happening. 738 func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { 739 logger := conR.Logger.With("peer", peer) 740 741 OUTER_LOOP: 742 for { 743 // Manage disconnects from self or peer. 744 if !peer.IsRunning() || !conR.IsRunning() { 745 logger.Debug("Stopping queryMaj23Routine for peer") 746 return 747 } 748 749 // Maybe send Height/Round/Prevotes 750 { 751 rs := conR.conS.GetRoundState() 752 prs := ps.GetRoundState() 753 if rs.Height == prs.Height { 754 if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { 755 peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ 756 Height: prs.Height, 757 Round: prs.Round, 758 Type: types.PrevoteType, 759 BlockID: maj23, 760 })) 761 time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) 762 } 763 } 764 } 765 766 // Maybe send Height/Round/Precommits 767 { 768 rs := conR.conS.GetRoundState() 769 prs := ps.GetRoundState() 770 if rs.Height == prs.Height { 771 if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { 772 peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ 773 Height: prs.Height, 774 Round: prs.Round, 775 Type: types.PrecommitType, 776 BlockID: maj23, 777 })) 778 time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) 779 } 780 } 781 } 782 783 // Maybe send Height/Round/ProposalPOL 784 { 785 rs := conR.conS.GetRoundState() 786 prs := ps.GetRoundState() 787 if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { 788 if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { 789 peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ 790 Height: prs.Height, 791 Round: prs.ProposalPOLRound, 792 Type: types.PrevoteType, 793 BlockID: maj23, 794 })) 795 time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) 796 } 797 } 798 } 799 800 // Little point sending LastCommitRound/LastCommit, 801 // These are fleeting and non-blocking. 802 803 // Maybe send Height/CatchupCommitRound/CatchupCommit. 804 { 805 prs := ps.GetRoundState() 806 if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && 807 prs.Height >= conR.conS.blockStore.Base() { 808 if commit := conR.conS.LoadCommit(prs.Height); commit != nil { 809 peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ 810 Height: prs.Height, 811 Round: commit.Round(), 812 Type: types.PrecommitType, 813 BlockID: commit.BlockID, 814 })) 815 time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) 816 } 817 } 818 } 819 820 time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) 821 822 continue OUTER_LOOP 823 } 824 } 825 826 func (conR *Reactor) peerStatsRoutine() { 827 for { 828 if !conR.IsRunning() { 829 conR.Logger.Debug("Stopping peerStatsRoutine") 830 return 831 } 832 833 select { 834 case msg := <-conR.conS.statsMsgQueue: 835 // Get peer 836 peer := conR.Switch.Peers().Get(msg.PeerID) 837 if peer == nil { 838 conR.Logger.Debug("Attempt to update stats for non-existent peer", 839 "peer", msg.PeerID) 840 continue 841 } 842 // Get peer state 843 ps, ok := peer.Get(types.PeerStateKey).(*PeerState) 844 if !ok { 845 panic(fmt.Sprintf("Peer %v has no state", peer)) 846 } 847 switch msg.Msg.(type) { 848 case *VoteMessage: 849 if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { 850 conR.Switch.MarkPeerAsGood(peer) 851 } 852 case *BlockPartMessage: 853 if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { 854 conR.Switch.MarkPeerAsGood(peer) 855 } 856 } 857 case <-conR.conS.Quit(): 858 return 859 860 case <-conR.Quit(): 861 return 862 } 863 } 864 } 865 866 // String returns a string representation of the Reactor. 867 // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. 868 // TODO: improve! 869 func (conR *Reactor) String() string { 870 // better not to access shared variables 871 return "ConsensusReactor" // conR.StringIndented("") 872 } 873 874 // StringIndented returns an indented string representation of the Reactor 875 func (conR *Reactor) StringIndented(indent string) string { 876 s := "ConsensusReactor{\n" 877 s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" 878 for _, peer := range conR.Switch.Peers().List() { 879 ps, ok := peer.Get(types.PeerStateKey).(*PeerState) 880 if !ok { 881 panic(fmt.Sprintf("Peer %v has no state", peer)) 882 } 883 s += indent + " " + ps.StringIndented(indent+" ") + "\n" 884 } 885 s += indent + "}" 886 return s 887 } 888 889 func (conR *Reactor) updateFastSyncingMetric() { 890 var fastSyncing float64 891 if conR.fastSync { 892 fastSyncing = 1 893 } else { 894 fastSyncing = 0 895 } 896 conR.metrics.FastSyncing.Set(fastSyncing) 897 } 898 899 // ReactorMetrics sets the metrics 900 func ReactorMetrics(metrics *Metrics) ReactorOption { 901 return func(conR *Reactor) { conR.metrics = metrics } 902 } 903 904 //----------------------------------------------------------------------------- 905 906 var ( 907 ErrPeerStateHeightRegression = errors.New("error peer state height regression") 908 ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime") 909 ) 910 911 // PeerState contains the known state of a peer, including its connection and 912 // threadsafe access to its PeerRoundState. 913 // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. 914 // Be mindful of what you Expose. 915 type PeerState struct { 916 peer p2p.Peer 917 logger log.Logger 918 919 mtx sync.Mutex // NOTE: Modify below using setters, never directly. 920 PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. 921 Stats *peerStateStats `json:"stats"` // Exposed. 922 } 923 924 // peerStateStats holds internal statistics for a peer. 925 type peerStateStats struct { 926 Votes int `json:"votes"` 927 BlockParts int `json:"block_parts"` 928 } 929 930 func (pss peerStateStats) String() string { 931 return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", 932 pss.Votes, pss.BlockParts) 933 } 934 935 // NewPeerState returns a new PeerState for the given Peer 936 func NewPeerState(peer p2p.Peer) *PeerState { 937 return &PeerState{ 938 peer: peer, 939 logger: log.NewNopLogger(), 940 PRS: cstypes.PeerRoundState{ 941 Round: -1, 942 ProposalPOLRound: -1, 943 LastCommitRound: -1, 944 CatchupCommitRound: -1, 945 }, 946 Stats: &peerStateStats{}, 947 } 948 } 949 950 // SetLogger allows to set a logger on the peer state. Returns the peer state 951 // itself. 952 func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { 953 ps.logger = logger 954 return ps 955 } 956 957 // GetRoundState returns an shallow copy of the PeerRoundState. 958 // There's no point in mutating it since it won't change PeerState. 959 func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { 960 ps.mtx.Lock() 961 defer ps.mtx.Unlock() 962 963 prs := ps.PRS // copy 964 return &prs 965 } 966 967 // ToJSON returns a json of PeerState, marshalled using go-amino. 968 func (ps *PeerState) ToJSON() ([]byte, error) { 969 ps.mtx.Lock() 970 defer ps.mtx.Unlock() 971 972 return cdc.MarshalJSON(ps) 973 } 974 975 // GetHeight returns an atomic snapshot of the PeerRoundState's height 976 // used by the mempool to ensure peers are caught up before broadcasting new txs 977 func (ps *PeerState) GetHeight() int64 { 978 ps.mtx.Lock() 979 defer ps.mtx.Unlock() 980 return ps.PRS.Height 981 } 982 983 // SetHasProposal sets the given proposal as known for the peer. 984 func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { 985 ps.mtx.Lock() 986 defer ps.mtx.Unlock() 987 988 if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { 989 return 990 } 991 992 if ps.PRS.Proposal { 993 return 994 } 995 996 ps.PRS.Proposal = true 997 998 // ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage 999 if ps.PRS.ProposalBlockParts != nil { 1000 return 1001 } 1002 1003 ps.PRS.ProposalBlockPartsHeader = proposal.BlockID.PartsHeader 1004 ps.PRS.ProposalBlockParts = bits.NewBitArray(proposal.BlockID.PartsHeader.Total) 1005 ps.PRS.ProposalPOLRound = proposal.POLRound 1006 ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. 1007 } 1008 1009 // InitProposalBlockParts initializes the peer's proposal block parts header and bit array. 1010 func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { 1011 ps.mtx.Lock() 1012 defer ps.mtx.Unlock() 1013 1014 if ps.PRS.ProposalBlockParts != nil { 1015 return 1016 } 1017 1018 ps.PRS.ProposalBlockPartsHeader = partsHeader 1019 ps.PRS.ProposalBlockParts = bits.NewBitArray(partsHeader.Total) 1020 } 1021 1022 // SetHasProposalBlockPart sets the given block part index as known for the peer. 1023 func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) { 1024 ps.mtx.Lock() 1025 defer ps.mtx.Unlock() 1026 1027 if ps.PRS.Height != height || ps.PRS.Round != round { 1028 return 1029 } 1030 1031 ps.PRS.ProposalBlockParts.SetIndex(index, true) 1032 } 1033 1034 // PickSendVote picks a vote and sends it to the peer. 1035 // Returns true if vote was sent. 1036 func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { 1037 if vote, ok := ps.PickVoteToSend(votes); ok { 1038 msg := &VoteMessage{vote} 1039 ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) 1040 if ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) { 1041 ps.SetHasVote(vote) 1042 return true 1043 } 1044 return false 1045 } 1046 return false 1047 } 1048 1049 // PickVoteToSend picks a vote to send to the peer. 1050 // Returns true if a vote was picked. 1051 // NOTE: `votes` must be the correct Size() for the Height(). 1052 func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { 1053 ps.mtx.Lock() 1054 defer ps.mtx.Unlock() 1055 1056 if votes.Size() == 0 { 1057 return nil, false 1058 } 1059 1060 height, round, votesType, size := votes.GetHeight(), votes.GetRound(), types.SignedMsgType(votes.Type()), votes.Size() 1061 1062 // Lazily set data using 'votes'. 1063 if votes.IsCommit() { 1064 ps.ensureCatchupCommitRound(height, round, size) 1065 } 1066 ps.ensureVoteBitArrays(height, size) 1067 1068 psVotes := ps.getVoteBitArray(height, round, votesType) 1069 if psVotes == nil { 1070 return nil, false // Not something worth sending 1071 } 1072 if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { 1073 return votes.GetByIndex(index), true 1074 } 1075 return nil, false 1076 } 1077 1078 func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.SignedMsgType) *bits.BitArray { 1079 if !types.IsVoteTypeValid(votesType) { 1080 return nil 1081 } 1082 1083 if ps.PRS.Height == height { 1084 if ps.PRS.Round == round { 1085 switch votesType { 1086 case types.PrevoteType: 1087 return ps.PRS.Prevotes 1088 case types.PrecommitType: 1089 return ps.PRS.Precommits 1090 } 1091 } 1092 if ps.PRS.CatchupCommitRound == round { 1093 switch votesType { 1094 case types.PrevoteType: 1095 return nil 1096 case types.PrecommitType: 1097 return ps.PRS.CatchupCommit 1098 } 1099 } 1100 if ps.PRS.ProposalPOLRound == round { 1101 switch votesType { 1102 case types.PrevoteType: 1103 return ps.PRS.ProposalPOL 1104 case types.PrecommitType: 1105 return nil 1106 } 1107 } 1108 return nil 1109 } 1110 if ps.PRS.Height == height+1 { 1111 if ps.PRS.LastCommitRound == round { 1112 switch votesType { 1113 case types.PrevoteType: 1114 return nil 1115 case types.PrecommitType: 1116 return ps.PRS.LastCommit 1117 } 1118 } 1119 return nil 1120 } 1121 return nil 1122 } 1123 1124 // 'round': A round for which we have a +2/3 commit. 1125 func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { 1126 if ps.PRS.Height != height { 1127 return 1128 } 1129 /* 1130 NOTE: This is wrong, 'round' could change. 1131 e.g. if orig round is not the same as block LastCommit round. 1132 if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { 1133 panic(fmt.Sprintf( 1134 "Conflicting CatchupCommitRound. Height: %v, 1135 Orig: %v, 1136 New: %v", 1137 height, 1138 ps.CatchupCommitRound, 1139 round)) 1140 } 1141 */ 1142 if ps.PRS.CatchupCommitRound == round { 1143 return // Nothing to do! 1144 } 1145 ps.PRS.CatchupCommitRound = round 1146 if round == ps.PRS.Round { 1147 ps.PRS.CatchupCommit = ps.PRS.Precommits 1148 } else { 1149 ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) 1150 } 1151 } 1152 1153 // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking 1154 // what votes this peer has received. 1155 // NOTE: It's important to make sure that numValidators actually matches 1156 // what the node sees as the number of validators for height. 1157 func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { 1158 ps.mtx.Lock() 1159 defer ps.mtx.Unlock() 1160 ps.ensureVoteBitArrays(height, numValidators) 1161 } 1162 1163 func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { 1164 if ps.PRS.Height == height { 1165 if ps.PRS.Prevotes == nil { 1166 ps.PRS.Prevotes = bits.NewBitArray(numValidators) 1167 } 1168 if ps.PRS.Precommits == nil { 1169 ps.PRS.Precommits = bits.NewBitArray(numValidators) 1170 } 1171 if ps.PRS.CatchupCommit == nil { 1172 ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) 1173 } 1174 if ps.PRS.ProposalPOL == nil { 1175 ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) 1176 } 1177 } else if ps.PRS.Height == height+1 { 1178 if ps.PRS.LastCommit == nil { 1179 ps.PRS.LastCommit = bits.NewBitArray(numValidators) 1180 } 1181 } 1182 } 1183 1184 // RecordVote increments internal votes related statistics for this peer. 1185 // It returns the total number of added votes. 1186 func (ps *PeerState) RecordVote() int { 1187 ps.mtx.Lock() 1188 defer ps.mtx.Unlock() 1189 1190 ps.Stats.Votes++ 1191 1192 return ps.Stats.Votes 1193 } 1194 1195 // VotesSent returns the number of blocks for which peer has been sending us 1196 // votes. 1197 func (ps *PeerState) VotesSent() int { 1198 ps.mtx.Lock() 1199 defer ps.mtx.Unlock() 1200 1201 return ps.Stats.Votes 1202 } 1203 1204 // RecordBlockPart increments internal block part related statistics for this peer. 1205 // It returns the total number of added block parts. 1206 func (ps *PeerState) RecordBlockPart() int { 1207 ps.mtx.Lock() 1208 defer ps.mtx.Unlock() 1209 1210 ps.Stats.BlockParts++ 1211 return ps.Stats.BlockParts 1212 } 1213 1214 // BlockPartsSent returns the number of useful block parts the peer has sent us. 1215 func (ps *PeerState) BlockPartsSent() int { 1216 ps.mtx.Lock() 1217 defer ps.mtx.Unlock() 1218 1219 return ps.Stats.BlockParts 1220 } 1221 1222 // SetHasVote sets the given vote as known by the peer 1223 func (ps *PeerState) SetHasVote(vote *types.Vote) { 1224 ps.mtx.Lock() 1225 defer ps.mtx.Unlock() 1226 1227 ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) 1228 } 1229 1230 func (ps *PeerState) setHasVote(height int64, round int, voteType types.SignedMsgType, index int) { 1231 logger := ps.logger.With( 1232 "peerH/R", 1233 fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), 1234 "H/R", 1235 fmt.Sprintf("%d/%d", height, round)) 1236 logger.Debug("setHasVote", "type", voteType, "index", index) 1237 1238 // NOTE: some may be nil BitArrays -> no side effects. 1239 psVotes := ps.getVoteBitArray(height, round, voteType) 1240 if psVotes != nil { 1241 psVotes.SetIndex(index, true) 1242 } 1243 } 1244 1245 // ApplyNewRoundStepMessage updates the peer state for the new round. 1246 func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { 1247 ps.mtx.Lock() 1248 defer ps.mtx.Unlock() 1249 1250 // Ignore duplicates or decreases 1251 if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { 1252 return 1253 } 1254 1255 // Just remember these values. 1256 psHeight := ps.PRS.Height 1257 psRound := ps.PRS.Round 1258 psCatchupCommitRound := ps.PRS.CatchupCommitRound 1259 psCatchupCommit := ps.PRS.CatchupCommit 1260 1261 startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) 1262 ps.PRS.Height = msg.Height 1263 ps.PRS.Round = msg.Round 1264 ps.PRS.Step = msg.Step 1265 ps.PRS.StartTime = startTime 1266 if psHeight != msg.Height || psRound != msg.Round { 1267 ps.PRS.Proposal = false 1268 ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{} 1269 ps.PRS.ProposalBlockParts = nil 1270 ps.PRS.ProposalPOLRound = -1 1271 ps.PRS.ProposalPOL = nil 1272 // We'll update the BitArray capacity later. 1273 ps.PRS.Prevotes = nil 1274 ps.PRS.Precommits = nil 1275 } 1276 if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound { 1277 // Peer caught up to CatchupCommitRound. 1278 // Preserve psCatchupCommit! 1279 // NOTE: We prefer to use prs.Precommits if 1280 // pr.Round matches pr.CatchupCommitRound. 1281 ps.PRS.Precommits = psCatchupCommit 1282 } 1283 if psHeight != msg.Height { 1284 // Shift Precommits to LastCommit. 1285 if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { 1286 ps.PRS.LastCommitRound = msg.LastCommitRound 1287 ps.PRS.LastCommit = ps.PRS.Precommits 1288 } else { 1289 ps.PRS.LastCommitRound = msg.LastCommitRound 1290 ps.PRS.LastCommit = nil 1291 } 1292 // We'll update the BitArray capacity later. 1293 ps.PRS.CatchupCommitRound = -1 1294 ps.PRS.CatchupCommit = nil 1295 } 1296 } 1297 1298 // ApplyNewValidBlockMessage updates the peer state for the new valid block. 1299 func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { 1300 ps.mtx.Lock() 1301 defer ps.mtx.Unlock() 1302 1303 if ps.PRS.Height != msg.Height { 1304 return 1305 } 1306 1307 if ps.PRS.Round != msg.Round && !msg.IsCommit { 1308 return 1309 } 1310 1311 ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader 1312 ps.PRS.ProposalBlockParts = msg.BlockParts 1313 } 1314 1315 // ApplyProposalPOLMessage updates the peer state for the new proposal POL. 1316 func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { 1317 ps.mtx.Lock() 1318 defer ps.mtx.Unlock() 1319 1320 if ps.PRS.Height != msg.Height { 1321 return 1322 } 1323 if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound { 1324 return 1325 } 1326 1327 // TODO: Merge onto existing ps.PRS.ProposalPOL? 1328 // We might have sent some prevotes in the meantime. 1329 ps.PRS.ProposalPOL = msg.ProposalPOL 1330 } 1331 1332 // ApplyHasVoteMessage updates the peer state for the new vote. 1333 func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { 1334 ps.mtx.Lock() 1335 defer ps.mtx.Unlock() 1336 1337 if ps.PRS.Height != msg.Height { 1338 return 1339 } 1340 1341 ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) 1342 } 1343 1344 // ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes 1345 // it claims to have for the corresponding BlockID. 1346 // `ourVotes` is a BitArray of votes we have for msg.BlockID 1347 // NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height), 1348 // we conservatively overwrite ps's votes w/ msg.Votes. 1349 func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *bits.BitArray) { 1350 ps.mtx.Lock() 1351 defer ps.mtx.Unlock() 1352 1353 votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type) 1354 if votes != nil { 1355 if ourVotes == nil { 1356 votes.Update(msg.Votes) 1357 } else { 1358 otherVotes := votes.Sub(ourVotes) 1359 hasVotes := otherVotes.Or(msg.Votes) 1360 votes.Update(hasVotes) 1361 } 1362 } 1363 } 1364 1365 // String returns a string representation of the PeerState 1366 func (ps *PeerState) String() string { 1367 return ps.StringIndented("") 1368 } 1369 1370 // StringIndented returns a string representation of the PeerState 1371 func (ps *PeerState) StringIndented(indent string) string { 1372 ps.mtx.Lock() 1373 defer ps.mtx.Unlock() 1374 return fmt.Sprintf(`PeerState{ 1375 %s Key %v 1376 %s RoundState %v 1377 %s Stats %v 1378 %s}`, 1379 indent, ps.peer.ID(), 1380 indent, ps.PRS.StringIndented(indent+" "), 1381 indent, ps.Stats, 1382 indent) 1383 } 1384 1385 //----------------------------------------------------------------------------- 1386 // Messages 1387 1388 // Message is a message that can be sent and received on the Reactor 1389 type Message interface { 1390 ValidateBasic() error 1391 } 1392 1393 func RegisterMessages(cdc *amino.Codec) { 1394 cdc.RegisterInterface((*Message)(nil), nil) 1395 cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) 1396 cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil) 1397 cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) 1398 cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) 1399 cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) 1400 cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil) 1401 cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil) 1402 cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil) 1403 cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil) 1404 } 1405 1406 func decodeMsg(bz []byte) (msg Message, err error) { 1407 if len(bz) > maxMsgSize { 1408 return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) 1409 } 1410 err = cdc.UnmarshalBinaryBare(bz, &msg) 1411 return 1412 } 1413 1414 //------------------------------------- 1415 1416 // NewRoundStepMessage is sent for every step taken in the ConsensusState. 1417 // For every height/round/step transition 1418 type NewRoundStepMessage struct { 1419 Height int64 1420 Round int 1421 Step cstypes.RoundStepType 1422 SecondsSinceStartTime int 1423 LastCommitRound int 1424 } 1425 1426 // ValidateBasic performs basic validation. 1427 func (m *NewRoundStepMessage) ValidateBasic() error { 1428 if m.Height < 0 { 1429 return errors.New("negative Height") 1430 } 1431 if m.Round < 0 { 1432 return errors.New("negative Round") 1433 } 1434 if !m.Step.IsValid() { 1435 return errors.New("invalid Step") 1436 } 1437 1438 // NOTE: SecondsSinceStartTime may be negative 1439 1440 if (m.Height == 1 && m.LastCommitRound != -1) || 1441 (m.Height > 1 && m.LastCommitRound < -1) { // TODO: #2737 LastCommitRound should always be >= 0 for heights > 1 1442 return errors.New("invalid LastCommitRound (for 1st block: -1, for others: >= 0)") 1443 } 1444 return nil 1445 } 1446 1447 // String returns a string representation. 1448 func (m *NewRoundStepMessage) String() string { 1449 return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", 1450 m.Height, m.Round, m.Step, m.LastCommitRound) 1451 } 1452 1453 //------------------------------------- 1454 1455 // NewValidBlockMessage is sent when a validator observes a valid block B in some round r, 1456 //i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. 1457 // In case the block is also committed, then IsCommit flag is set to true. 1458 type NewValidBlockMessage struct { 1459 Height int64 1460 Round int 1461 BlockPartsHeader types.PartSetHeader 1462 BlockParts *bits.BitArray 1463 IsCommit bool 1464 } 1465 1466 // ValidateBasic performs basic validation. 1467 func (m *NewValidBlockMessage) ValidateBasic() error { 1468 if m.Height < 0 { 1469 return errors.New("negative Height") 1470 } 1471 if m.Round < 0 { 1472 return errors.New("negative Round") 1473 } 1474 if err := m.BlockPartsHeader.ValidateBasic(); err != nil { 1475 return fmt.Errorf("wrong BlockPartsHeader: %v", err) 1476 } 1477 if m.BlockParts.Size() == 0 { 1478 return errors.New("empty blockParts") 1479 } 1480 if m.BlockParts.Size() != m.BlockPartsHeader.Total { 1481 return fmt.Errorf("blockParts bit array size %d not equal to BlockPartsHeader.Total %d", 1482 m.BlockParts.Size(), 1483 m.BlockPartsHeader.Total) 1484 } 1485 if m.BlockParts.Size() > types.MaxBlockPartsCount { 1486 return errors.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) 1487 } 1488 return nil 1489 } 1490 1491 // String returns a string representation. 1492 func (m *NewValidBlockMessage) String() string { 1493 return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", 1494 m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit) 1495 } 1496 1497 //------------------------------------- 1498 1499 // ProposalMessage is sent when a new block is proposed. 1500 type ProposalMessage struct { 1501 Proposal *types.Proposal 1502 } 1503 1504 // ValidateBasic performs basic validation. 1505 func (m *ProposalMessage) ValidateBasic() error { 1506 return m.Proposal.ValidateBasic() 1507 } 1508 1509 // String returns a string representation. 1510 func (m *ProposalMessage) String() string { 1511 return fmt.Sprintf("[Proposal %v]", m.Proposal) 1512 } 1513 1514 //------------------------------------- 1515 1516 // ProposalPOLMessage is sent when a previous proposal is re-proposed. 1517 type ProposalPOLMessage struct { 1518 Height int64 1519 ProposalPOLRound int 1520 ProposalPOL *bits.BitArray 1521 } 1522 1523 // ValidateBasic performs basic validation. 1524 func (m *ProposalPOLMessage) ValidateBasic() error { 1525 if m.Height < 0 { 1526 return errors.New("negative Height") 1527 } 1528 if m.ProposalPOLRound < 0 { 1529 return errors.New("negative ProposalPOLRound") 1530 } 1531 if m.ProposalPOL.Size() == 0 { 1532 return errors.New("empty ProposalPOL bit array") 1533 } 1534 if m.ProposalPOL.Size() > types.MaxVotesCount { 1535 return errors.Errorf("ProposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) 1536 } 1537 return nil 1538 } 1539 1540 // String returns a string representation. 1541 func (m *ProposalPOLMessage) String() string { 1542 return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) 1543 } 1544 1545 //------------------------------------- 1546 1547 // BlockPartMessage is sent when gossipping a piece of the proposed block. 1548 type BlockPartMessage struct { 1549 Height int64 1550 Round int 1551 Part *types.Part 1552 } 1553 1554 // ValidateBasic performs basic validation. 1555 func (m *BlockPartMessage) ValidateBasic() error { 1556 if m.Height < 0 { 1557 return errors.New("negative Height") 1558 } 1559 if m.Round < 0 { 1560 return errors.New("negative Round") 1561 } 1562 if err := m.Part.ValidateBasic(); err != nil { 1563 return fmt.Errorf("wrong Part: %v", err) 1564 } 1565 return nil 1566 } 1567 1568 // String returns a string representation. 1569 func (m *BlockPartMessage) String() string { 1570 return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) 1571 } 1572 1573 //------------------------------------- 1574 1575 // VoteMessage is sent when voting for a proposal (or lack thereof). 1576 type VoteMessage struct { 1577 Vote *types.Vote 1578 } 1579 1580 // ValidateBasic performs basic validation. 1581 func (m *VoteMessage) ValidateBasic() error { 1582 return m.Vote.ValidateBasic() 1583 } 1584 1585 // String returns a string representation. 1586 func (m *VoteMessage) String() string { 1587 return fmt.Sprintf("[Vote %v]", m.Vote) 1588 } 1589 1590 //------------------------------------- 1591 1592 // HasVoteMessage is sent to indicate that a particular vote has been received. 1593 type HasVoteMessage struct { 1594 Height int64 1595 Round int 1596 Type types.SignedMsgType 1597 Index int 1598 } 1599 1600 // ValidateBasic performs basic validation. 1601 func (m *HasVoteMessage) ValidateBasic() error { 1602 if m.Height < 0 { 1603 return errors.New("negative Height") 1604 } 1605 if m.Round < 0 { 1606 return errors.New("negative Round") 1607 } 1608 if !types.IsVoteTypeValid(m.Type) { 1609 return errors.New("invalid Type") 1610 } 1611 if m.Index < 0 { 1612 return errors.New("negative Index") 1613 } 1614 return nil 1615 } 1616 1617 // String returns a string representation. 1618 func (m *HasVoteMessage) String() string { 1619 return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) 1620 } 1621 1622 //------------------------------------- 1623 1624 // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. 1625 type VoteSetMaj23Message struct { 1626 Height int64 1627 Round int 1628 Type types.SignedMsgType 1629 BlockID types.BlockID 1630 } 1631 1632 // ValidateBasic performs basic validation. 1633 func (m *VoteSetMaj23Message) ValidateBasic() error { 1634 if m.Height < 0 { 1635 return errors.New("negative Height") 1636 } 1637 if m.Round < 0 { 1638 return errors.New("negative Round") 1639 } 1640 if !types.IsVoteTypeValid(m.Type) { 1641 return errors.New("invalid Type") 1642 } 1643 if err := m.BlockID.ValidateBasic(); err != nil { 1644 return fmt.Errorf("wrong BlockID: %v", err) 1645 } 1646 return nil 1647 } 1648 1649 // String returns a string representation. 1650 func (m *VoteSetMaj23Message) String() string { 1651 return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) 1652 } 1653 1654 //------------------------------------- 1655 1656 // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. 1657 type VoteSetBitsMessage struct { 1658 Height int64 1659 Round int 1660 Type types.SignedMsgType 1661 BlockID types.BlockID 1662 Votes *bits.BitArray 1663 } 1664 1665 // ValidateBasic performs basic validation. 1666 func (m *VoteSetBitsMessage) ValidateBasic() error { 1667 if m.Height < 0 { 1668 return errors.New("negative Height") 1669 } 1670 if m.Round < 0 { 1671 return errors.New("negative Round") 1672 } 1673 if !types.IsVoteTypeValid(m.Type) { 1674 return errors.New("invalid Type") 1675 } 1676 if err := m.BlockID.ValidateBasic(); err != nil { 1677 return fmt.Errorf("wrong BlockID: %v", err) 1678 } 1679 // NOTE: Votes.Size() can be zero if the node does not have any 1680 if m.Votes.Size() > types.MaxVotesCount { 1681 return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount) 1682 } 1683 return nil 1684 } 1685 1686 // String returns a string representation. 1687 func (m *VoteSetBitsMessage) String() string { 1688 return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) 1689 } 1690 1691 //-------------------------------------