github.com/koko1123/flow-go-1@v0.29.6/cmd/bootstrap/dkg/dkg.go (about)

     1  package dkg
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"time"
     7  
     8  	"github.com/rs/zerolog/log"
     9  
    10  	"github.com/onflow/flow-go/crypto"
    11  	model "github.com/koko1123/flow-go-1/model/dkg"
    12  	"github.com/koko1123/flow-go-1/module/signature"
    13  )
    14  
    15  // RunDKG simulates a distributed DKG protocol by running the protocol locally
    16  // and generating the DKG output info
    17  func RunDKG(n int, seeds [][]byte) (model.DKGData, error) {
    18  
    19  	if n != len(seeds) {
    20  		return model.DKGData{}, fmt.Errorf("n needs to match the number of seeds (%v != %v)", n, len(seeds))
    21  	}
    22  
    23  	// separate the case whith one node
    24  	if n == 1 {
    25  		sk, pk, pkGroup, err := thresholdSignKeyGenOneNode(seeds[0])
    26  		if err != nil {
    27  			return model.DKGData{}, fmt.Errorf("run dkg failed: %w", err)
    28  		}
    29  
    30  		dkgData := model.DKGData{
    31  			PrivKeyShares: sk,
    32  			PubGroupKey:   pkGroup,
    33  			PubKeyShares:  pk,
    34  		}
    35  
    36  		return dkgData, nil
    37  	}
    38  
    39  	processors := make([]localDKGProcessor, 0, n)
    40  
    41  	// create the message channels for node communication
    42  	chans := make([]chan *message, n)
    43  	for i := 0; i < n; i++ {
    44  		chans[i] = make(chan *message, 5*n)
    45  	}
    46  
    47  	// create processors for all nodes
    48  	for i := 0; i < n; i++ {
    49  		processors = append(processors, localDKGProcessor{
    50  			current: i,
    51  			chans:   chans,
    52  		})
    53  	}
    54  
    55  	// create DKG instances for all nodes
    56  	for i := 0; i < n; i++ {
    57  		var err error
    58  		processors[i].dkg, err = crypto.NewJointFeldman(n,
    59  			signature.RandomBeaconThreshold(n), i, &processors[i])
    60  		if err != nil {
    61  			return model.DKGData{}, err
    62  		}
    63  	}
    64  
    65  	var wg sync.WaitGroup
    66  	phase := 0
    67  
    68  	// start DKG in all nodes
    69  	// start listening on the channels
    70  	wg.Add(n)
    71  	for i := 0; i < n; i++ {
    72  		// start dkg could also run in parallel
    73  		// but they are run sequentially to avoid having non-deterministic
    74  		// output (the PRG used is common)
    75  		err := processors[i].dkg.Start(seeds[i])
    76  		if err != nil {
    77  			return model.DKGData{}, err
    78  		}
    79  		go dkgRunChan(&processors[i], &wg, phase)
    80  	}
    81  	phase++
    82  
    83  	// sync the two timeouts and start the next phase
    84  	for ; phase <= 2; phase++ {
    85  		wg.Wait()
    86  		wg.Add(n)
    87  		for i := 0; i < n; i++ {
    88  			go dkgRunChan(&processors[i], &wg, phase)
    89  		}
    90  	}
    91  
    92  	// synchronize the main thread to end all DKGs
    93  	wg.Wait()
    94  
    95  	skShares := make([]crypto.PrivateKey, 0, n)
    96  
    97  	for _, processor := range processors {
    98  		skShares = append(skShares, processor.privkey)
    99  	}
   100  
   101  	dkgData := model.DKGData{
   102  		PrivKeyShares: skShares,
   103  		PubGroupKey:   processors[0].pubgroupkey,
   104  		PubKeyShares:  processors[0].pubkeys,
   105  	}
   106  
   107  	return dkgData, nil
   108  }
   109  
   110  // localDKGProcessor implements DKGProcessor interface
   111  type localDKGProcessor struct {
   112  	current     int
   113  	dkg         crypto.DKGState
   114  	chans       []chan *message
   115  	privkey     crypto.PrivateKey
   116  	pubgroupkey crypto.PublicKey
   117  	pubkeys     []crypto.PublicKey
   118  }
   119  
   120  const (
   121  	broadcast int = iota
   122  	private
   123  )
   124  
   125  type message struct {
   126  	orig    int
   127  	channel int
   128  	data    []byte
   129  }
   130  
   131  // PrivateSend sends a message from one node to another
   132  func (proc *localDKGProcessor) PrivateSend(dest int, data []byte) {
   133  	newMsg := &message{proc.current, private, data}
   134  	proc.chans[dest] <- newMsg
   135  }
   136  
   137  // Broadcast a message from one node to all nodes
   138  func (proc *localDKGProcessor) Broadcast(data []byte) {
   139  	newMsg := &message{proc.current, broadcast, data}
   140  	for i := 0; i < len(proc.chans); i++ {
   141  		if i != proc.current {
   142  			proc.chans[i] <- newMsg
   143  		}
   144  	}
   145  }
   146  
   147  // Disqualify a node
   148  func (proc *localDKGProcessor) Disqualify(node int, log string) {
   149  }
   150  
   151  // FlagMisbehavior flags a node for misbehaviour
   152  func (proc *localDKGProcessor) FlagMisbehavior(node int, log string) {
   153  }
   154  
   155  // dkgRunChan simulates processing incoming messages by a node
   156  // it assumes proc.dkg is already running
   157  func dkgRunChan(proc *localDKGProcessor, sync *sync.WaitGroup, phase int) {
   158  	for {
   159  		select {
   160  		case newMsg := <-proc.chans[proc.current]:
   161  			var err error
   162  			if newMsg.channel == private {
   163  				err = proc.dkg.HandlePrivateMsg(newMsg.orig, newMsg.data)
   164  			} else {
   165  				err = proc.dkg.HandleBroadcastMsg(newMsg.orig, newMsg.data)
   166  			}
   167  			if err != nil {
   168  				log.Fatal().Err(err).Msg("failed to receive DKG mst")
   169  			}
   170  		// if timeout, stop and finalize
   171  		case <-time.After(1 * time.Second):
   172  			switch phase {
   173  			case 0:
   174  				err := proc.dkg.NextTimeout()
   175  				if err != nil {
   176  					log.Fatal().Err(err).Msg("failed to wait for next timeout")
   177  				}
   178  			case 1:
   179  				err := proc.dkg.NextTimeout()
   180  				if err != nil {
   181  					log.Fatal().Err(err).Msg("failed to wait for next timeout")
   182  				}
   183  			case 2:
   184  				privkey, pubgroupkey, pubkeys, err := proc.dkg.End()
   185  				if err != nil {
   186  					log.Fatal().Err(err).Msg("end dkg error should be nit")
   187  				}
   188  				if privkey == nil {
   189  					log.Fatal().Msg("privkey was nil")
   190  				}
   191  
   192  				proc.privkey = privkey
   193  				proc.pubgroupkey = pubgroupkey
   194  				proc.pubkeys = pubkeys
   195  			}
   196  			sync.Done()
   197  			return
   198  		}
   199  	}
   200  }
   201  
   202  // RunFastKG is an alternative to RunDKG that runs much faster by using a centralized threshold signature key generation.
   203  func RunFastKG(n int, seed []byte) (model.DKGData, error) {
   204  
   205  	if n == 1 {
   206  		sk, pk, pkGroup, err := thresholdSignKeyGenOneNode(seed)
   207  		if err != nil {
   208  			return model.DKGData{}, fmt.Errorf("fast KeyGen failed: %w", err)
   209  		}
   210  
   211  		dkgData := model.DKGData{
   212  			PrivKeyShares: sk,
   213  			PubGroupKey:   pkGroup,
   214  			PubKeyShares:  pk,
   215  		}
   216  		return dkgData, nil
   217  	}
   218  
   219  	skShares, pkShares, pkGroup, err := crypto.BLSThresholdKeyGen(int(n),
   220  		signature.RandomBeaconThreshold(int(n)), seed)
   221  	if err != nil {
   222  		return model.DKGData{}, fmt.Errorf("fast KeyGen failed: %w", err)
   223  	}
   224  
   225  	dkgData := model.DKGData{
   226  		PrivKeyShares: skShares,
   227  		PubGroupKey:   pkGroup,
   228  		PubKeyShares:  pkShares,
   229  	}
   230  
   231  	return dkgData, nil
   232  }
   233  
   234  // simulates DKG with one single node
   235  func thresholdSignKeyGenOneNode(seed []byte) ([]crypto.PrivateKey, []crypto.PublicKey, crypto.PublicKey, error) {
   236  	sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed)
   237  	if err != nil {
   238  		return nil, nil, nil, fmt.Errorf("KeyGen with one node failed: %w", err)
   239  	}
   240  	pk := sk.PublicKey()
   241  	return []crypto.PrivateKey{sk},
   242  		[]crypto.PublicKey{pk},
   243  		pk,
   244  		nil
   245  }