github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/cmd/bootstrap/run/qc.go (about)

     1  package run
     2  
     3  import (
     4  	"fmt"
     5  
     6  	"github.com/onflow/crypto"
     7  	"github.com/rs/zerolog"
     8  
     9  	"github.com/onflow/flow-go/consensus/hotstuff"
    10  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    11  	"github.com/onflow/flow-go/consensus/hotstuff/model"
    12  	hotstuffSig "github.com/onflow/flow-go/consensus/hotstuff/signature"
    13  	"github.com/onflow/flow-go/consensus/hotstuff/validator"
    14  	"github.com/onflow/flow-go/consensus/hotstuff/verification"
    15  	"github.com/onflow/flow-go/consensus/hotstuff/votecollector"
    16  	"github.com/onflow/flow-go/model/bootstrap"
    17  	"github.com/onflow/flow-go/model/dkg"
    18  	"github.com/onflow/flow-go/model/flow"
    19  	"github.com/onflow/flow-go/module/local"
    20  )
    21  
    22  type Participant struct {
    23  	bootstrap.NodeInfo
    24  	RandomBeaconPrivKey crypto.PrivateKey
    25  }
    26  
    27  type ParticipantData struct {
    28  	Participants []Participant
    29  	Lookup       map[flow.Identifier]flow.DKGParticipant
    30  	GroupKey     crypto.PublicKey
    31  }
    32  
    33  // PublicBeaconKeys returns the nodes' individual public random-beacon keys (excluding
    34  // the group public key). The keys are returned in the same order as the nodes appear
    35  // in the Participants list, which must be the DKG index order.
    36  func (pd *ParticipantData) PublicBeaconKeys() []crypto.PublicKey {
    37  	keys := make([]crypto.PublicKey, len(pd.Participants))
    38  	for i, participant := range pd.Participants {
    39  		keys[i] = participant.RandomBeaconPrivKey.PublicKey()
    40  	}
    41  	return keys
    42  }
    43  
    44  func (pd *ParticipantData) Identities() flow.IdentityList {
    45  	nodes := make([]bootstrap.NodeInfo, 0, len(pd.Participants))
    46  	for _, participant := range pd.Participants {
    47  		nodes = append(nodes, participant.NodeInfo)
    48  	}
    49  	return bootstrap.ToIdentityList(nodes)
    50  }
    51  
    52  // GenerateRootQC generates QC for root block, caller needs to provide votes for root QC and
    53  // participantData to build the QC.
    54  // NOTE: at the moment, we require private keys for one node because we we re-using the full business logic,
    55  // which assumes that only consensus participants construct QCs, which also have produce votes.
    56  //
    57  // TODO: modularize QC construction code (and code to verify QC) to be instantiated without needing private keys.
    58  // It returns (qc, nil, nil) if a QC can be constructed with enough votes, and there is no invalid votes
    59  // It returns (qc, invalidVotes, nil) if there are some invalid votes, but a QC can still be constructed
    60  // It returns (nil, invalidVotes, err) if no qc can be constructed with not enough votes or running any any exception
    61  func GenerateRootQC(block *flow.Block, votes []*model.Vote, participantData *ParticipantData, identities flow.IdentityList) (
    62  	*flow.QuorumCertificate, // the constructed QC
    63  	[]error, // return invalid votes error
    64  	error, // exception or could not construct qc
    65  ) {
    66  	// create consensus committee's state
    67  	committee, err := committees.NewStaticCommittee(identities, flow.Identifier{}, participantData.Lookup, participantData.GroupKey)
    68  	if err != nil {
    69  		return nil, nil, err
    70  	}
    71  
    72  	// STEP 1: create VoteProcessor
    73  	var createdQC *flow.QuorumCertificate
    74  	hotBlock := model.GenesisBlockFromFlow(block.Header)
    75  	processor, err := votecollector.NewBootstrapCombinedVoteProcessor(zerolog.Logger{}, committee, hotBlock, func(qc *flow.QuorumCertificate) {
    76  		createdQC = qc
    77  	})
    78  	if err != nil {
    79  		return nil, nil, fmt.Errorf("could not CombinedVoteProcessor processor: %w", err)
    80  	}
    81  
    82  	invalidVotes := make([]error, 0, len(votes))
    83  	// STEP 2: feed the votes into the vote processor to create QC
    84  	for _, vote := range votes {
    85  		err := processor.Process(vote)
    86  
    87  		// in case there are invalid votes, we continue process more votes,
    88  		// so that finalizing block won't be interrupted by any invalid vote.
    89  		// if no enough votes are collected, finalize will fail and exit anyway, because
    90  		// no QC will be built.
    91  		if err != nil {
    92  			if model.IsInvalidVoteError(err) {
    93  				invalidVotes = append(invalidVotes, err)
    94  				continue
    95  			}
    96  			return nil, invalidVotes, fmt.Errorf("fail to process vote %v for block %v from signer %v: %w",
    97  				vote.ID(),
    98  				vote.BlockID,
    99  				vote.SignerID,
   100  				err)
   101  		}
   102  	}
   103  
   104  	if createdQC == nil {
   105  		return nil, invalidVotes, fmt.Errorf("QC is not created, total number of votes %v, expect to have 2/3 votes of %v participants",
   106  			len(votes), len(identities))
   107  	}
   108  
   109  	// STEP 3: validate constructed QC
   110  	val, err := createValidator(committee)
   111  	if err != nil {
   112  		return nil, invalidVotes, err
   113  	}
   114  	err = val.ValidateQC(createdQC)
   115  
   116  	return createdQC, invalidVotes, err
   117  }
   118  
   119  // GenerateRootBlockVotes generates votes for root block based on participantData
   120  func GenerateRootBlockVotes(block *flow.Block, participantData *ParticipantData) ([]*model.Vote, error) {
   121  	hotBlock := model.GenesisBlockFromFlow(block.Header)
   122  	n := len(participantData.Participants)
   123  	fmt.Println("Number of staked consensus nodes: ", n)
   124  
   125  	votes := make([]*model.Vote, 0, n)
   126  	for _, p := range participantData.Participants {
   127  		fmt.Println("generating votes from consensus participants: ", p.NodeID, p.Address, p.StakingPubKey().String())
   128  
   129  		// create the participant's local identity
   130  		keys, err := p.PrivateKeys()
   131  		if err != nil {
   132  			return nil, fmt.Errorf("could not get private keys for participant: %w", err)
   133  		}
   134  		me, err := local.New(p.Identity().IdentitySkeleton, keys.StakingKey)
   135  		if err != nil {
   136  			return nil, err
   137  		}
   138  
   139  		// create signer and use it to generate vote
   140  		beaconStore := hotstuffSig.NewStaticRandomBeaconSignerStore(p.RandomBeaconPrivKey)
   141  		vote, err := verification.NewCombinedSigner(me, beaconStore).CreateVote(hotBlock)
   142  		if err != nil {
   143  			return nil, err
   144  		}
   145  		votes = append(votes, vote)
   146  	}
   147  	return votes, nil
   148  }
   149  
   150  // createValidator creates validator that can validate votes and QC
   151  func createValidator(committee hotstuff.DynamicCommittee) (hotstuff.Validator, error) {
   152  	packer := hotstuffSig.NewConsensusSigDataPacker(committee)
   153  	verifier := verification.NewCombinedVerifier(committee, packer)
   154  
   155  	hotstuffValidator := validator.New(committee, verifier)
   156  	return hotstuffValidator, nil
   157  }
   158  
   159  // GenerateQCParticipantData generates QC participant data used to create the
   160  // random beacon and staking signatures on the QC.
   161  //
   162  // allNodes must be in the same order that was used when running the DKG.
   163  func GenerateQCParticipantData(allNodes, internalNodes []bootstrap.NodeInfo, dkgData dkg.DKGData) (*ParticipantData, error) {
   164  
   165  	// stakingNodes can include external validators, so it can be longer than internalNodes
   166  	if len(allNodes) < len(internalNodes) {
   167  		return nil, fmt.Errorf("need at least as many staking public keys as private keys (pub=%d, priv=%d)", len(allNodes), len(internalNodes))
   168  	}
   169  
   170  	// length of DKG participants needs to match stakingNodes, since we run DKG for external and internal validators
   171  	if len(allNodes) != len(dkgData.PrivKeyShares) {
   172  		return nil, fmt.Errorf("need exactly the same number of staking public keys as DKG private participants")
   173  	}
   174  
   175  	qcData := &ParticipantData{}
   176  
   177  	participantLookup := make(map[flow.Identifier]flow.DKGParticipant)
   178  
   179  	// the index here is important - we assume allNodes is in the same order as the DKG
   180  	for i := 0; i < len(allNodes); i++ {
   181  		// assign a node to a DGKdata entry, using the canonical ordering
   182  		node := allNodes[i]
   183  		participantLookup[node.NodeID] = flow.DKGParticipant{
   184  			KeyShare: dkgData.PubKeyShares[i],
   185  			Index:    uint(i),
   186  		}
   187  	}
   188  
   189  	// the QC will be signed by everyone in internalNodes
   190  	for _, node := range internalNodes {
   191  
   192  		if node.NodeID == flow.ZeroID {
   193  			return nil, fmt.Errorf("node id cannot be zero")
   194  		}
   195  
   196  		if node.Weight == 0 {
   197  			return nil, fmt.Errorf("node (id=%s) cannot have 0 weight", node.NodeID)
   198  		}
   199  
   200  		dkgParticipant, ok := participantLookup[node.NodeID]
   201  		if !ok {
   202  			return nil, fmt.Errorf("nonexistannt node id (%x) in participant lookup", node.NodeID)
   203  		}
   204  		dkgIndex := dkgParticipant.Index
   205  
   206  		qcData.Participants = append(qcData.Participants, Participant{
   207  			NodeInfo:            node,
   208  			RandomBeaconPrivKey: dkgData.PrivKeyShares[dkgIndex],
   209  		})
   210  	}
   211  
   212  	qcData.Lookup = participantLookup
   213  	qcData.GroupKey = dkgData.PubGroupKey
   214  
   215  	return qcData, nil
   216  }