github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/consensus/hotstuff/votecollector/combined_vote_processor_v3.go (about) 1 package votecollector 2 3 import ( 4 "errors" 5 "fmt" 6 7 "github.com/onflow/crypto" 8 "github.com/rs/zerolog" 9 "go.uber.org/atomic" 10 11 "github.com/onflow/flow-go/consensus/hotstuff" 12 "github.com/onflow/flow-go/consensus/hotstuff/model" 13 "github.com/onflow/flow-go/consensus/hotstuff/signature" 14 "github.com/onflow/flow-go/consensus/hotstuff/verification" 15 "github.com/onflow/flow-go/model/encoding" 16 "github.com/onflow/flow-go/model/flow" 17 msig "github.com/onflow/flow-go/module/signature" 18 ) 19 20 /* **************** Base-Factory for CombinedVoteProcessors ***************** */ 21 22 // combinedVoteProcessorFactoryBaseV3 is a `votecollector.baseFactory` for creating 23 // CombinedVoteProcessors, holding all needed dependencies. 24 // combinedVoteProcessorFactoryBaseV3 is intended to be used for the main consensus. 25 // CAUTION: 26 // this base factory only creates the VerifyingVoteProcessor for the given block. 27 // It does _not_ check the proposer's vote for its own block, i.e. it does _not_ 28 // implement `hotstuff.VoteProcessorFactory`. This base factory should be wrapped 29 // by `votecollector.VoteProcessorFactory` which adds the logic to verify 30 // the proposer's vote (decorator pattern). 31 // nolint:unused 32 type combinedVoteProcessorFactoryBaseV3 struct { 33 committee hotstuff.DynamicCommittee 34 onQCCreated hotstuff.OnQCCreated 35 packer hotstuff.Packer 36 } 37 38 // Create creates CombinedVoteProcessorV3 for processing votes for the given block. 39 // Caller must treat all errors as exceptions 40 // nolint:unused 41 func (f *combinedVoteProcessorFactoryBaseV3) Create(log zerolog.Logger, block *model.Block) (hotstuff.VerifyingVoteProcessor, error) { 42 allParticipants, err := f.committee.IdentitiesByBlock(block.BlockID) 43 if err != nil { 44 return nil, fmt.Errorf("error retrieving consensus participants at block %v: %w", block.BlockID, err) 45 } 46 47 // message that has to be verified against aggregated signature 48 msg := verification.MakeVoteMessage(block.View, block.BlockID) 49 50 // prepare the staking public keys of participants 51 stakingKeys := make([]crypto.PublicKey, 0, len(allParticipants)) 52 for _, participant := range allParticipants { 53 stakingKeys = append(stakingKeys, participant.StakingPubKey) 54 } 55 56 stakingSigAggtor, err := signature.NewWeightedSignatureAggregator(allParticipants, stakingKeys, msg, msig.ConsensusVoteTag) 57 if err != nil { 58 return nil, fmt.Errorf("could not create aggregator for staking signatures: %w", err) 59 } 60 61 dkg, err := f.committee.DKG(block.View) 62 if err != nil { 63 return nil, fmt.Errorf("could not get DKG info at block %v: %w", block.BlockID, err) 64 } 65 66 // prepare the random beacon public keys of participants 67 beaconKeys := make([]crypto.PublicKey, 0, len(allParticipants)) 68 for _, participant := range allParticipants { 69 pk, err := dkg.KeyShare(participant.NodeID) 70 if err != nil { 71 return nil, fmt.Errorf("could not get random beacon key share for %x: %w", participant.NodeID, err) 72 } 73 beaconKeys = append(beaconKeys, pk) 74 } 75 76 rbSigAggtor, err := signature.NewWeightedSignatureAggregator(allParticipants, beaconKeys, msg, msig.RandomBeaconTag) 77 if err != nil { 78 return nil, fmt.Errorf("could not create aggregator for thershold signatures: %w", err) 79 } 80 81 threshold := msig.RandomBeaconThreshold(int(dkg.Size())) 82 randomBeaconInspector, err := signature.NewRandomBeaconInspector(dkg.GroupKey(), beaconKeys, threshold, msg) 83 if err != nil { 84 return nil, fmt.Errorf("could not create random beacon inspector: %w", err) 85 } 86 87 rbRector := signature.NewRandomBeaconReconstructor(dkg, randomBeaconInspector) 88 minRequiredWeight, err := f.committee.QuorumThresholdForView(block.View) 89 if err != nil { 90 return nil, fmt.Errorf("could not get weight threshold for view %d: %w", block.View, err) 91 } 92 93 return &CombinedVoteProcessorV3{ 94 log: log.With().Hex("block_id", block.BlockID[:]).Logger(), 95 block: block, 96 stakingSigAggtor: stakingSigAggtor, 97 rbSigAggtor: rbSigAggtor, 98 rbRector: rbRector, 99 onQCCreated: f.onQCCreated, 100 packer: f.packer, 101 minRequiredWeight: minRequiredWeight, 102 done: *atomic.NewBool(false), 103 }, nil 104 } 105 106 /* ****************** CombinedVoteProcessorV3 Implementation ****************** */ 107 108 // CombinedVoteProcessorV3 implements the hotstuff.VerifyingVoteProcessor interface. 109 // It processes votes from the main consensus committee, where participants vote in 110 // favour of a block by proving either their staking key signature or their random 111 // beacon signature. In the former case, the participant only contributes to HotStuff 112 // progress; while in the latter case, the voter also contributes to running the 113 // random beacon. Concurrency safe. 114 type CombinedVoteProcessorV3 struct { 115 log zerolog.Logger 116 block *model.Block 117 stakingSigAggtor hotstuff.WeightedSignatureAggregator 118 rbSigAggtor hotstuff.WeightedSignatureAggregator 119 rbRector hotstuff.RandomBeaconReconstructor 120 onQCCreated hotstuff.OnQCCreated 121 packer hotstuff.Packer 122 minRequiredWeight uint64 123 done atomic.Bool 124 } 125 126 var _ hotstuff.VerifyingVoteProcessor = (*CombinedVoteProcessorV3)(nil) 127 128 // Block returns block that is part of proposal that we are processing votes for. 129 func (p *CombinedVoteProcessorV3) Block() *model.Block { 130 return p.block 131 } 132 133 // Status returns status of this vote processor, it's always verifying. 134 func (p *CombinedVoteProcessorV3) Status() hotstuff.VoteCollectorStatus { 135 return hotstuff.VoteCollectorStatusVerifying 136 } 137 138 // Process performs processing of single vote in concurrent safe way. This function is implemented to be 139 // called by multiple goroutines at the same time. Supports processing of both staking and random beacon signatures. 140 // Design of this function is event driven: as soon as we collect enough signatures to create a QC we will immediately do so 141 // and submit it via callback for further processing. 142 // Expected error returns during normal operations: 143 // * VoteForIncompatibleBlockError - submitted vote for incompatible block 144 // * VoteForIncompatibleViewError - submitted vote for incompatible view 145 // * model.InvalidVoteError - submitted vote with invalid signature 146 // * model.DuplicatedSignerError - vote from a signer whose vote was previously already processed 147 // All other errors should be treated as exceptions. 148 // 149 // CAUTION: implementation is NOT (yet) BFT 150 // Explanation: for correctness, we require that no voter can be counted repeatedly. However, 151 // CombinedVoteProcessorV3 relies on the `VoteCollector`'s `votesCache` filter out all votes but the first for 152 // every signerID. However, we have the edge case, where we still feed the proposers vote twice into the 153 // `VerifyingVoteProcessor` (once as part of a cached vote, once as an individual vote). This can be exploited 154 // by a byzantine proposer to be erroneously counted twice, which would lead to a safety fault. 155 // 156 // TODO (suggestion): I think it would be worth-while to include a second `votesCache` into the `CombinedVoteProcessorV3`. 157 // Thereby, `CombinedVoteProcessorV3` inherently guarantees correctness of the QCs it produces without relying on 158 // external conditions (making the code more modular, less interdependent and thereby easier to maintain). The 159 // runtime overhead is marginal: For `votesCache` to add 500 votes (concurrently with 20 threads) takes about 160 // 0.25ms. This runtime overhead is neglectable and a good tradeoff for the gain in maintainability and code clarity. 161 func (p *CombinedVoteProcessorV3) Process(vote *model.Vote) error { 162 err := EnsureVoteForBlock(vote, p.block) 163 if err != nil { 164 return fmt.Errorf("received incompatible vote %v: %w", vote.ID(), err) 165 } 166 167 // Vote Processing state machine 168 if p.done.Load() { 169 return nil 170 } 171 sigType, sig, err := msig.DecodeSingleSig(vote.SigData) 172 if err != nil { 173 if errors.Is(err, msig.ErrInvalidSignatureFormat) { 174 return model.NewInvalidVoteErrorf(vote, "could not decode signature: %w", err) 175 } 176 return fmt.Errorf("unexpected error decoding vote %v: %w", vote.ID(), err) 177 } 178 179 switch sigType { 180 181 case encoding.SigTypeStaking: 182 err := p.stakingSigAggtor.Verify(vote.SignerID, sig) 183 if err != nil { 184 if model.IsInvalidSignerError(err) { 185 return model.NewInvalidVoteErrorf(vote, "vote %x for view %d is not signed by an authorized consensus participant: %w", 186 vote.ID(), vote.View, err) 187 } 188 if errors.Is(err, model.ErrInvalidSignature) { 189 return model.NewInvalidVoteErrorf(vote, "vote %x for view %d has an invalid staking signature: %w", 190 vote.ID(), vote.View, err) 191 } 192 return fmt.Errorf("internal error checking signature validity for vote %v: %w", vote.ID(), err) 193 } 194 if p.done.Load() { 195 return nil 196 } 197 _, err = p.stakingSigAggtor.TrustedAdd(vote.SignerID, sig) 198 if err != nil { 199 // we don't expect any errors here during normal operation, as we previously checked 200 // for duplicated votes from the same signer and verified the signer+signature 201 return fmt.Errorf("adding the signature to staking aggregator failed for vote %v: %w", vote.ID(), err) 202 } 203 204 case encoding.SigTypeRandomBeacon: 205 err := p.rbSigAggtor.Verify(vote.SignerID, sig) 206 if err != nil { 207 if model.IsInvalidSignerError(err) { 208 return model.NewInvalidVoteErrorf(vote, "vote %x for view %d is not from an authorized random beacon participant: %w", 209 vote.ID(), vote.View, err) 210 } 211 if errors.Is(err, model.ErrInvalidSignature) { 212 return model.NewInvalidVoteErrorf(vote, "vote %x for view %d has an invalid random beacon signature: %w", 213 vote.ID(), vote.View, err) 214 } 215 return fmt.Errorf("internal error checking signature validity for vote %v: %w", vote.ID(), err) 216 } 217 218 if p.done.Load() { 219 return nil 220 } 221 // Add signatures to `rbSigAggtor` and `rbRector`: we don't expect any errors during normal operation, 222 // as we previously checked for duplicated votes from the same signer and verified the signer+signature 223 _, err = p.rbSigAggtor.TrustedAdd(vote.SignerID, sig) 224 if err != nil { 225 return fmt.Errorf("unexpected exception adding signature from vote %v to random beacon aggregator: %w", vote.ID(), err) 226 } 227 _, err = p.rbRector.TrustedAdd(vote.SignerID, sig) 228 if err != nil { 229 return fmt.Errorf("unexpected exception adding signature from vote %v to random beacon reconstructor: %w", vote.ID(), err) 230 } 231 232 default: 233 return model.NewInvalidVoteErrorf(vote, "invalid signature type %d: %w", sigType, model.NewInvalidFormatErrorf("")) 234 } 235 236 // checking of conditions for building QC are satisfied 237 if p.stakingSigAggtor.TotalWeight()+p.rbSigAggtor.TotalWeight() < p.minRequiredWeight { 238 return nil 239 } 240 if !p.rbRector.EnoughShares() { 241 return nil 242 } 243 244 // At this point, we have enough signatures to build a QC. Another routine 245 // might just be at this point. To avoid duplicate work, only one routine can pass: 246 if !p.done.CompareAndSwap(false, true) { 247 return nil 248 } 249 250 // Our algorithm for checking votes and adding them to the aggregators should 251 // guarantee that we are _always_ able to successfully construct a QC when we 252 // reach this point. A failure implies that the VoteProcessor's internal state is corrupted. 253 qc, err := p.buildQC() 254 if err != nil { 255 return fmt.Errorf("internal error constructing QC from votes: %w", err) 256 } 257 258 p.log.Info(). 259 Uint64("view", qc.View). 260 Hex("signers", qc.SignerIndices). 261 Msg("new QC has been created") 262 263 p.onQCCreated(qc) 264 265 return nil 266 } 267 268 // buildQC performs aggregation and reconstruction of signatures when we have collected enough 269 // signatures for building a QC. This function is run only once by a single worker. 270 // Any error should be treated as exception. 271 func (p *CombinedVoteProcessorV3) buildQC() (*flow.QuorumCertificate, error) { 272 // STEP 1: aggregate staking signatures (if there are any) 273 // * It is possible that all replicas signed with their random beacon keys. 274 // Per Convention, we represent an empty set of staking signers as 275 // `stakingSigners` and `aggregatedStakingSig` both being zero-length 276 // (here, we use `nil`). 277 // * If it has _not collected any_ signatures, `stakingSigAggtor.Aggregate()` 278 // errors with a `model.InsufficientSignaturesError`. We shortcut this case, 279 // and only call `Aggregate`, if the `stakingSigAggtor` has collected signatures 280 // with non-zero weight (i.e. at least one signature was collected). 281 var stakingSigners []flow.Identifier // nil (zero value) represents empty set of staking signers 282 var aggregatedStakingSig []byte // nil (zero value) for empty set of staking signers 283 if p.stakingSigAggtor.TotalWeight() > 0 { 284 var err error 285 stakingSigners, aggregatedStakingSig, err = p.stakingSigAggtor.Aggregate() 286 if err != nil { 287 return nil, fmt.Errorf("unexpected error aggregating staking signatures: %w", err) 288 } 289 } 290 291 // STEP 2: reconstruct random beacon group sig and aggregate random beacon sig shares 292 // Note: A valid random beacon group sig is required for QC validity. Our logic guarantees 293 // that we always collect the minimally required number (non-zero) of signature shares. 294 beaconSigners, aggregatedRandomBeaconSig, err := p.rbSigAggtor.Aggregate() 295 if err != nil { 296 return nil, fmt.Errorf("could not aggregate random beacon signatures: %w", err) 297 } 298 reconstructedBeaconSig, err := p.rbRector.Reconstruct() 299 if err != nil { 300 return nil, fmt.Errorf("could not reconstruct random beacon group signature: %w", err) 301 } 302 303 // STEP 3: generate BlockSignatureData and serialize it 304 blockSigData := &hotstuff.BlockSignatureData{ 305 StakingSigners: stakingSigners, 306 RandomBeaconSigners: beaconSigners, 307 AggregatedStakingSig: aggregatedStakingSig, 308 AggregatedRandomBeaconSig: aggregatedRandomBeaconSig, 309 ReconstructedRandomBeaconSig: reconstructedBeaconSig, 310 } 311 signerIndices, sigData, err := p.packer.Pack(p.block.View, blockSigData) 312 if err != nil { 313 return nil, fmt.Errorf("could not pack the block sig data: %w", err) 314 } 315 316 return &flow.QuorumCertificate{ 317 View: p.block.View, 318 BlockID: p.block.BlockID, 319 SignerIndices: signerIndices, 320 SigData: sigData, 321 }, nil 322 }