github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/cmd/consensus/main.go (about)

     1  package main
     2  
     3  import (
     4  	"encoding/json"
     5  	"errors"
     6  	"fmt"
     7  	"os"
     8  	"path/filepath"
     9  	"time"
    10  
    11  	"github.com/spf13/pflag"
    12  
    13  	client "github.com/onflow/flow-go-sdk/access/grpc"
    14  	"github.com/onflow/flow-go-sdk/crypto"
    15  	"github.com/onflow/flow-go/cmd"
    16  	"github.com/onflow/flow-go/cmd/util/cmd/common"
    17  	"github.com/onflow/flow-go/consensus"
    18  	"github.com/onflow/flow-go/consensus/hotstuff"
    19  	"github.com/onflow/flow-go/consensus/hotstuff/blockproducer"
    20  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    21  	"github.com/onflow/flow-go/consensus/hotstuff/cruisectl"
    22  	"github.com/onflow/flow-go/consensus/hotstuff/notifications"
    23  	"github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub"
    24  	"github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout"
    25  	"github.com/onflow/flow-go/consensus/hotstuff/persister"
    26  	hotsignature "github.com/onflow/flow-go/consensus/hotstuff/signature"
    27  	"github.com/onflow/flow-go/consensus/hotstuff/timeoutcollector"
    28  	"github.com/onflow/flow-go/consensus/hotstuff/verification"
    29  	"github.com/onflow/flow-go/consensus/hotstuff/votecollector"
    30  	recovery "github.com/onflow/flow-go/consensus/recovery/protocol"
    31  	"github.com/onflow/flow-go/engine/common/requester"
    32  	synceng "github.com/onflow/flow-go/engine/common/synchronization"
    33  	"github.com/onflow/flow-go/engine/consensus/approvals/tracker"
    34  	"github.com/onflow/flow-go/engine/consensus/compliance"
    35  	dkgeng "github.com/onflow/flow-go/engine/consensus/dkg"
    36  	"github.com/onflow/flow-go/engine/consensus/ingestion"
    37  	"github.com/onflow/flow-go/engine/consensus/matching"
    38  	"github.com/onflow/flow-go/engine/consensus/message_hub"
    39  	"github.com/onflow/flow-go/engine/consensus/sealing"
    40  	"github.com/onflow/flow-go/fvm/systemcontracts"
    41  	"github.com/onflow/flow-go/model/bootstrap"
    42  	"github.com/onflow/flow-go/model/encodable"
    43  	"github.com/onflow/flow-go/model/flow"
    44  	"github.com/onflow/flow-go/model/flow/filter"
    45  	"github.com/onflow/flow-go/module"
    46  	"github.com/onflow/flow-go/module/buffer"
    47  	builder "github.com/onflow/flow-go/module/builder/consensus"
    48  	"github.com/onflow/flow-go/module/chainsync"
    49  	chmodule "github.com/onflow/flow-go/module/chunks"
    50  	dkgmodule "github.com/onflow/flow-go/module/dkg"
    51  	"github.com/onflow/flow-go/module/epochs"
    52  	finalizer "github.com/onflow/flow-go/module/finalizer/consensus"
    53  	"github.com/onflow/flow-go/module/mempool"
    54  	consensusMempools "github.com/onflow/flow-go/module/mempool/consensus"
    55  	"github.com/onflow/flow-go/module/mempool/stdmap"
    56  	"github.com/onflow/flow-go/module/metrics"
    57  	msig "github.com/onflow/flow-go/module/signature"
    58  	"github.com/onflow/flow-go/module/updatable_configs"
    59  	"github.com/onflow/flow-go/module/util"
    60  	"github.com/onflow/flow-go/module/validation"
    61  	"github.com/onflow/flow-go/network/channels"
    62  	"github.com/onflow/flow-go/state/protocol"
    63  	badgerState "github.com/onflow/flow-go/state/protocol/badger"
    64  	"github.com/onflow/flow-go/state/protocol/blocktimer"
    65  	"github.com/onflow/flow-go/state/protocol/events/gadgets"
    66  	protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state"
    67  	"github.com/onflow/flow-go/storage"
    68  	bstorage "github.com/onflow/flow-go/storage/badger"
    69  	"github.com/onflow/flow-go/utils/io"
    70  )
    71  
    72  func main() {
    73  
    74  	var (
    75  		guaranteeLimit                        uint
    76  		resultLimit                           uint
    77  		approvalLimit                         uint
    78  		sealLimit                             uint
    79  		pendingReceiptsLimit                  uint
    80  		minInterval                           time.Duration
    81  		maxInterval                           time.Duration
    82  		maxSealPerBlock                       uint
    83  		maxGuaranteePerBlock                  uint
    84  		hotstuffMinTimeout                    time.Duration
    85  		hotstuffTimeoutAdjustmentFactor       float64
    86  		hotstuffHappyPathMaxRoundFailures     uint64
    87  		chunkAlpha                            uint
    88  		requiredApprovalsForSealVerification  uint
    89  		requiredApprovalsForSealConstruction  uint
    90  		emergencySealing                      bool
    91  		dkgMessagingEngineConfig              = dkgeng.DefaultMessagingEngineConfig()
    92  		cruiseCtlConfig                       = cruisectl.DefaultConfig()
    93  		cruiseCtlFallbackProposalDurationFlag time.Duration
    94  		cruiseCtlMinViewDurationFlag          time.Duration
    95  		cruiseCtlMaxViewDurationFlag          time.Duration
    96  		cruiseCtlEnabledFlag                  bool
    97  		startupTimeString                     string
    98  		startupTime                           time.Time
    99  
   100  		// DKG contract client
   101  		machineAccountInfo *bootstrap.NodeMachineAccountInfo
   102  		flowClientConfigs  []*common.FlowClientConfig
   103  		insecureAccessAPI  bool
   104  		accessNodeIDS      []string
   105  
   106  		err                   error
   107  		mutableState          protocol.ParticipantState
   108  		beaconPrivateKey      *encodable.RandomBeaconPrivKey
   109  		guarantees            mempool.Guarantees
   110  		receipts              mempool.ExecutionTree
   111  		seals                 mempool.IncorporatedResultSeals
   112  		pendingReceipts       mempool.PendingReceipts
   113  		receiptRequester      *requester.Engine
   114  		syncCore              *chainsync.Core
   115  		comp                  *compliance.Engine
   116  		hot                   module.HotStuff
   117  		conMetrics            module.ConsensusMetrics
   118  		machineAccountMetrics module.MachineAccountMetrics
   119  		mainMetrics           module.HotstuffMetrics
   120  		receiptValidator      module.ReceiptValidator
   121  		chunkAssigner         *chmodule.ChunkAssigner
   122  		followerDistributor   *pubsub.FollowerDistributor
   123  		dkgBrokerTunnel       *dkgmodule.BrokerTunnel
   124  		blockTimer            protocol.BlockTimer
   125  		proposalDurProvider   hotstuff.ProposalDurationProvider
   126  		committee             *committees.Consensus
   127  		epochLookup           *epochs.EpochLookup
   128  		hotstuffModules       *consensus.HotstuffModules
   129  		dkgState              *bstorage.DKGState
   130  		safeBeaconKeys        *bstorage.SafeBeaconPrivateKeys
   131  		getSealingConfigs     module.SealingConfigsGetter
   132  	)
   133  	var deprecatedFlagBlockRateDelay time.Duration
   134  
   135  	nodeBuilder := cmd.FlowNode(flow.RoleConsensus.String())
   136  	nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) {
   137  		flags.UintVar(&guaranteeLimit, "guarantee-limit", 1000, "maximum number of guarantees in the memory pool")
   138  		flags.UintVar(&resultLimit, "result-limit", 10000, "maximum number of execution results in the memory pool")
   139  		flags.UintVar(&approvalLimit, "approval-limit", 1000, "maximum number of result approvals in the memory pool")
   140  		// the default value is able to buffer as many seals as would be generated over ~12 hours. In case it
   141  		// ever gets full, the node will simply crash instead of employing complex ejection logic.
   142  		flags.UintVar(&sealLimit, "seal-limit", 44200, "maximum number of block seals in the memory pool")
   143  		flags.UintVar(&pendingReceiptsLimit, "pending-receipts-limit", 10000, "maximum number of pending receipts in the mempool")
   144  		flags.DurationVar(&minInterval, "min-interval", time.Millisecond, "the minimum amount of time between two blocks")
   145  		flags.DurationVar(&maxInterval, "max-interval", 90*time.Second, "the maximum amount of time between two blocks")
   146  		flags.UintVar(&maxSealPerBlock, "max-seal-per-block", 100, "the maximum number of seals to be included in a block")
   147  		flags.UintVar(&maxGuaranteePerBlock, "max-guarantee-per-block", 100, "the maximum number of collection guarantees to be included in a block")
   148  		flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1045*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout")
   149  		flags.Float64Var(&hotstuffTimeoutAdjustmentFactor, "hotstuff-timeout-adjustment-factor", timeout.DefaultConfig.TimeoutAdjustmentFactor, "adjustment of timeout duration in case of time out event")
   150  		flags.Uint64Var(&hotstuffHappyPathMaxRoundFailures, "hotstuff-happy-path-max-round-failures", timeout.DefaultConfig.HappyPathMaxRoundFailures, "number of failed rounds before first timeout increase")
   151  		flags.DurationVar(&cruiseCtlFallbackProposalDurationFlag, "cruise-ctl-fallback-proposal-duration", cruiseCtlConfig.FallbackProposalDelay.Load(), "the proposal duration value to use when the controller is disabled, or in epoch fallback mode. In those modes, this value has the same as the old `--block-rate-delay`")
   152  		flags.DurationVar(&cruiseCtlMinViewDurationFlag, "cruise-ctl-min-view-duration", cruiseCtlConfig.MinViewDuration.Load(), "the lower bound of authority for the controller, when active. This is the smallest amount of time a view is allowed to take.")
   153  		flags.DurationVar(&cruiseCtlMaxViewDurationFlag, "cruise-ctl-max-view-duration", cruiseCtlConfig.MaxViewDuration.Load(), "the upper bound of authority for the controller when active. This is the largest amount of time a view is allowed to take.")
   154  		flags.BoolVar(&cruiseCtlEnabledFlag, "cruise-ctl-enabled", cruiseCtlConfig.Enabled.Load(), "whether the block time controller is enabled; when disabled, the FallbackProposalDelay is used")
   155  		flags.UintVar(&chunkAlpha, "chunk-alpha", flow.DefaultChunkAssignmentAlpha, "number of verifiers that should be assigned to each chunk")
   156  		flags.UintVar(&requiredApprovalsForSealVerification, "required-verification-seal-approvals", flow.DefaultRequiredApprovalsForSealValidation, "minimum number of approvals that are required to verify a seal")
   157  		flags.UintVar(&requiredApprovalsForSealConstruction, "required-construction-seal-approvals", flow.DefaultRequiredApprovalsForSealConstruction, "minimum number of approvals that are required to construct a seal")
   158  		flags.BoolVar(&emergencySealing, "emergency-sealing-active", flow.DefaultEmergencySealingActive, "(de)activation of emergency sealing")
   159  		flags.BoolVar(&insecureAccessAPI, "insecure-access-api", false, "required if insecure GRPC connection should be used")
   160  		flags.StringSliceVar(&accessNodeIDS, "access-node-ids", []string{}, fmt.Sprintf("array of access node IDs sorted in priority order where the first ID in this array will get the first connection attempt and each subsequent ID after serves as a fallback. Minimum length %d. Use '*' for all IDs in protocol state.", common.DefaultAccessNodeIDSMinimum))
   161  		flags.DurationVar(&dkgMessagingEngineConfig.RetryBaseWait, "dkg-messaging-engine-retry-base-wait", dkgMessagingEngineConfig.RetryBaseWait, "the inter-attempt wait time for the first attempt (base of exponential retry)")
   162  		flags.Uint64Var(&dkgMessagingEngineConfig.RetryMax, "dkg-messaging-engine-retry-max", dkgMessagingEngineConfig.RetryMax, "the maximum number of retry attempts for an outbound DKG message")
   163  		flags.Uint64Var(&dkgMessagingEngineConfig.RetryJitterPercent, "dkg-messaging-engine-retry-jitter-percent", dkgMessagingEngineConfig.RetryJitterPercent, "the percentage of jitter to apply to each inter-attempt wait time")
   164  		flags.StringVar(&startupTimeString, "hotstuff-startup-time", cmd.NotSet, "specifies date and time (in ISO 8601 format) after which the consensus participant may enter the first view (e.g 1996-04-24T15:04:05-07:00)")
   165  		flags.DurationVar(&deprecatedFlagBlockRateDelay, "block-rate-delay", 0, "[deprecated in v0.30; Jun 2023] Use `cruise-ctl-*` flags instead, this flag has no effect and will eventually be removed")
   166  	}).ValidateFlags(func() error {
   167  		nodeBuilder.Logger.Info().Str("startup_time_str", startupTimeString).Msg("got startup_time_str")
   168  		if startupTimeString != cmd.NotSet {
   169  			t, err := time.Parse(time.RFC3339, startupTimeString)
   170  			if err != nil {
   171  				return fmt.Errorf("invalid start-time value: %w", err)
   172  			}
   173  			startupTime = t
   174  			nodeBuilder.Logger.Info().Time("startup_time", startupTime).Msg("got startup_time")
   175  		}
   176  		// convert local flag variables to atomic config variables, for dynamically updatable fields
   177  		if cruiseCtlEnabledFlag != cruiseCtlConfig.Enabled.Load() {
   178  			cruiseCtlConfig.Enabled.Store(cruiseCtlEnabledFlag)
   179  		}
   180  		if cruiseCtlFallbackProposalDurationFlag != cruiseCtlConfig.FallbackProposalDelay.Load() {
   181  			cruiseCtlConfig.FallbackProposalDelay.Store(cruiseCtlFallbackProposalDurationFlag)
   182  		}
   183  		if cruiseCtlMinViewDurationFlag != cruiseCtlConfig.MinViewDuration.Load() {
   184  			cruiseCtlConfig.MinViewDuration.Store(cruiseCtlMinViewDurationFlag)
   185  		}
   186  		if cruiseCtlMaxViewDurationFlag != cruiseCtlConfig.MaxViewDuration.Load() {
   187  			cruiseCtlConfig.MaxViewDuration.Store(cruiseCtlMaxViewDurationFlag)
   188  		}
   189  		// log a warning about deprecated flags
   190  		if deprecatedFlagBlockRateDelay > 0 {
   191  			nodeBuilder.Logger.Warn().Msg("A deprecated flag was specified (--block-rate-delay). This flag is deprecated as of v0.30 (Jun 2023), has no effect, and will eventually be removed.")
   192  		}
   193  		return nil
   194  	})
   195  
   196  	if err = nodeBuilder.Initialize(); err != nil {
   197  		nodeBuilder.Logger.Fatal().Err(err).Send()
   198  	}
   199  
   200  	nodeBuilder.
   201  		PreInit(cmd.DynamicStartPreInit).
   202  		ValidateRootSnapshot(badgerState.ValidRootSnapshotContainsEntityExpiryRange).
   203  		Module("machine account config", func(node *cmd.NodeConfig) error {
   204  			machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID)
   205  			return err
   206  		}).
   207  		Module("consensus node metrics", func(node *cmd.NodeConfig) error {
   208  			conMetrics = metrics.NewConsensusCollector(node.Tracer, node.MetricsRegisterer)
   209  			return nil
   210  		}).
   211  		Module("machine account metrics", func(node *cmd.NodeConfig) error {
   212  			machineAccountMetrics = metrics.NewMachineAccountCollector(node.MetricsRegisterer, machineAccountInfo.FlowAddress())
   213  			return nil
   214  		}).
   215  		Module("dkg state", func(node *cmd.NodeConfig) error {
   216  			dkgState, err = bstorage.NewDKGState(node.Metrics.Cache, node.SecretsDB)
   217  			return err
   218  		}).
   219  		Module("beacon keys", func(node *cmd.NodeConfig) error {
   220  			safeBeaconKeys = bstorage.NewSafeBeaconPrivateKeys(dkgState)
   221  			return nil
   222  		}).
   223  		Module("updatable sealing config", func(node *cmd.NodeConfig) error {
   224  			setter, err := updatable_configs.NewSealingConfigs(
   225  				requiredApprovalsForSealConstruction,
   226  				requiredApprovalsForSealVerification,
   227  				chunkAlpha,
   228  				emergencySealing,
   229  			)
   230  			if err != nil {
   231  				return err
   232  			}
   233  
   234  			// update the getter with the setter, so other modules can only get, but not set
   235  			getSealingConfigs = setter
   236  
   237  			// admin tool is the only instance that have access to the setter interface, therefore, is
   238  			// the only module can change this config
   239  			err = node.ConfigManager.RegisterUintConfig("consensus-required-approvals-for-sealing",
   240  				setter.RequireApprovalsForSealConstructionDynamicValue,
   241  				setter.SetRequiredApprovalsForSealingConstruction)
   242  			return err
   243  		}).
   244  		Module("mutable follower state", func(node *cmd.NodeConfig) error {
   245  			// For now, we only support state implementations from package badger.
   246  			// If we ever support different implementations, the following can be replaced by a type-aware factory
   247  			state, ok := node.State.(*badgerState.State)
   248  			if !ok {
   249  				return fmt.Errorf("only implementations of type badger.State are currently supported but read-only state has type %T", node.State)
   250  			}
   251  
   252  			chunkAssigner, err = chmodule.NewChunkAssigner(chunkAlpha, node.State)
   253  			if err != nil {
   254  				return fmt.Errorf("could not instantiate assignment algorithm for chunk verification: %w", err)
   255  			}
   256  
   257  			receiptValidator = validation.NewReceiptValidator(
   258  				node.State,
   259  				node.Storage.Headers,
   260  				node.Storage.Index,
   261  				node.Storage.Results,
   262  				node.Storage.Seals)
   263  
   264  			sealValidator := validation.NewSealValidator(
   265  				node.State,
   266  				node.Storage.Headers,
   267  				node.Storage.Index,
   268  				node.Storage.Results,
   269  				node.Storage.Seals,
   270  				chunkAssigner,
   271  				getSealingConfigs,
   272  				conMetrics)
   273  
   274  			blockTimer, err = blocktimer.NewBlockTimer(minInterval, maxInterval)
   275  			if err != nil {
   276  				return err
   277  			}
   278  
   279  			mutableState, err = badgerState.NewFullConsensusState(
   280  				node.Logger,
   281  				node.Tracer,
   282  				node.ProtocolEvents,
   283  				state,
   284  				node.Storage.Index,
   285  				node.Storage.Payloads,
   286  				blockTimer,
   287  				receiptValidator,
   288  				sealValidator,
   289  			)
   290  			return err
   291  		}).
   292  		Module("random beacon key", func(node *cmd.NodeConfig) error {
   293  			// If this node was a participant in a spork, their beacon key for the
   294  			// first epoch was generated during the bootstrapping process and is
   295  			// specified in a private bootstrapping file. We load their key and
   296  			// store it in the db for the initial post-spork epoch for use going
   297  			// forward.
   298  			//
   299  			// If this node was not a participant in a spork, they joined at an
   300  			// epoch boundary, so they have no beacon key file (they will generate
   301  			// their first beacon private key through the DKG in the EpochSetup phase
   302  			// prior to their first epoch as network participant).
   303  
   304  			rootSnapshot := node.State.AtBlockID(node.FinalizedRootBlock.ID())
   305  			isSporkRoot, err := protocol.IsSporkRootSnapshot(rootSnapshot)
   306  			if err != nil {
   307  				return fmt.Errorf("could not check whether root snapshot is spork root: %w", err)
   308  			}
   309  			if !isSporkRoot {
   310  				node.Logger.Info().Msg("node starting from mid-spork snapshot, will not read spork random beacon key file")
   311  				return nil
   312  			}
   313  
   314  			// If the node has a beacon key file, then save it to the secrets database
   315  			// as the beacon key for the epoch of the root snapshot.
   316  			beaconPrivateKey, err = loadBeaconPrivateKey(node.BaseConfig.BootstrapDir, node.NodeID)
   317  			if errors.Is(err, os.ErrNotExist) {
   318  				return fmt.Errorf("node is starting from spork root snapshot, but does not have spork random beacon key file: %w", err)
   319  			}
   320  			if err != nil {
   321  				return fmt.Errorf("could not load beacon key file: %w", err)
   322  			}
   323  
   324  			rootEpoch := node.State.AtBlockID(node.FinalizedRootBlock.ID()).Epochs().Current()
   325  			epochCounter, err := rootEpoch.Counter()
   326  			if err != nil {
   327  				return fmt.Errorf("could not get root epoch counter: %w", err)
   328  			}
   329  
   330  			// confirm the beacon key file matches the canonical public keys
   331  			rootDKG, err := rootEpoch.DKG()
   332  			if err != nil {
   333  				return fmt.Errorf("could not get dkg for root epoch: %w", err)
   334  			}
   335  			myBeaconPublicKeyShare, err := rootDKG.KeyShare(node.NodeID)
   336  			if err != nil {
   337  				return fmt.Errorf("could not get my beacon public key share for root epoch: %w", err)
   338  			}
   339  
   340  			if !myBeaconPublicKeyShare.Equals(beaconPrivateKey.PrivateKey.PublicKey()) {
   341  				return fmt.Errorf("configured beacon key is inconsistent with this node's canonical public beacon key (%s!=%s)",
   342  					beaconPrivateKey.PrivateKey.PublicKey(),
   343  					myBeaconPublicKeyShare)
   344  			}
   345  
   346  			// store my beacon key for the first epoch post-spork
   347  			err = dkgState.InsertMyBeaconPrivateKey(epochCounter, beaconPrivateKey.PrivateKey)
   348  			if err != nil && !errors.Is(err, storage.ErrAlreadyExists) {
   349  				return err
   350  			}
   351  			// mark the root DKG as successful, so it is considered safe to use the key
   352  			err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateSuccess)
   353  			if err != nil && !errors.Is(err, storage.ErrAlreadyExists) {
   354  				return err
   355  			}
   356  
   357  			return nil
   358  		}).
   359  		Module("collection guarantees mempool", func(node *cmd.NodeConfig) error {
   360  			guarantees, err = stdmap.NewGuarantees(guaranteeLimit)
   361  			return err
   362  		}).
   363  		Module("execution receipts mempool", func(node *cmd.NodeConfig) error {
   364  			receipts = consensusMempools.NewExecutionTree()
   365  			// registers size method of backend for metrics
   366  			err = node.Metrics.Mempool.Register(metrics.ResourceReceipt, receipts.Size)
   367  			if err != nil {
   368  				return fmt.Errorf("could not register backend metric: %w", err)
   369  			}
   370  			return nil
   371  		}).
   372  		Module("block seals mempool", func(node *cmd.NodeConfig) error {
   373  			// use a custom ejector, so we don't eject seals that would break
   374  			// the chain of seals
   375  			rawMempool := stdmap.NewIncorporatedResultSeals(sealLimit)
   376  			multipleReceiptsFilterMempool := consensusMempools.NewIncorporatedResultSeals(rawMempool, node.Storage.Receipts)
   377  			seals, err = consensusMempools.NewExecStateForkSuppressor(
   378  				multipleReceiptsFilterMempool,
   379  				consensusMempools.LogForkAndCrash(node.Logger),
   380  				node.DB,
   381  				node.Logger,
   382  			)
   383  			if err != nil {
   384  				return fmt.Errorf("failed to wrap seals mempool into ExecStateForkSuppressor: %w", err)
   385  			}
   386  			err = node.Metrics.Mempool.Register(metrics.ResourcePendingIncorporatedSeal, seals.Size)
   387  			return nil
   388  		}).
   389  		Module("pending receipts mempool", func(node *cmd.NodeConfig) error {
   390  			pendingReceipts = stdmap.NewPendingReceipts(node.Storage.Headers, pendingReceiptsLimit)
   391  			return nil
   392  		}).
   393  		Module("hotstuff main metrics", func(node *cmd.NodeConfig) error {
   394  			mainMetrics = metrics.NewHotstuffCollector(node.RootChainID)
   395  			return nil
   396  		}).
   397  		Module("sync core", func(node *cmd.NodeConfig) error {
   398  			syncCore, err = chainsync.New(node.Logger, node.SyncCoreConfig, metrics.NewChainSyncCollector(node.RootChainID), node.RootChainID)
   399  			return err
   400  		}).
   401  		Module("follower distributor", func(node *cmd.NodeConfig) error {
   402  			followerDistributor = pubsub.NewFollowerDistributor()
   403  			return nil
   404  		}).
   405  		Module("sdk client connection options", func(node *cmd.NodeConfig) error {
   406  			anIDS, err := common.ValidateAccessNodeIDSFlag(accessNodeIDS, node.RootChainID, node.State.Sealed())
   407  			if err != nil {
   408  				return fmt.Errorf("failed to validate flag --access-node-ids %w", err)
   409  			}
   410  
   411  			flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed())
   412  			if err != nil {
   413  				return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err)
   414  			}
   415  
   416  			return nil
   417  		}).
   418  		Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   419  			// @TODO use fallback logic for flowClient similar to DKG/QC contract clients
   420  			flowClient, err := common.FlowClient(flowClientConfigs[0])
   421  			if err != nil {
   422  				return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err)
   423  			}
   424  
   425  			// disable balance checks for transient networks, which do not have transaction fees
   426  			var opts []epochs.MachineAccountValidatorConfigOption
   427  			if node.RootChainID.Transient() {
   428  				opts = append(opts, epochs.WithoutBalanceChecks)
   429  			}
   430  			validator, err := epochs.NewMachineAccountConfigValidator(
   431  				node.Logger,
   432  				flowClient,
   433  				flow.RoleConsensus,
   434  				*machineAccountInfo,
   435  				machineAccountMetrics,
   436  				opts...,
   437  			)
   438  			return validator, err
   439  		}).
   440  		Component("sealing engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   441  
   442  			sealingTracker := tracker.NewSealingTracker(node.Logger, node.Storage.Headers, node.Storage.Receipts, seals)
   443  
   444  			e, err := sealing.NewEngine(
   445  				node.Logger,
   446  				node.Tracer,
   447  				conMetrics,
   448  				node.Metrics.Engine,
   449  				node.Metrics.Mempool,
   450  				sealingTracker,
   451  				node.EngineRegistry,
   452  				node.Me,
   453  				node.Storage.Headers,
   454  				node.Storage.Payloads,
   455  				node.Storage.Results,
   456  				node.Storage.Index,
   457  				node.State,
   458  				node.Storage.Seals,
   459  				chunkAssigner,
   460  				seals,
   461  				getSealingConfigs,
   462  			)
   463  
   464  			// subscribe for finalization events from hotstuff
   465  			followerDistributor.AddOnBlockFinalizedConsumer(e.OnFinalizedBlock)
   466  			followerDistributor.AddOnBlockIncorporatedConsumer(e.OnBlockIncorporated)
   467  
   468  			return e, err
   469  		}).
   470  		Component("matching engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   471  			receiptRequester, err = requester.New(
   472  				node.Logger,
   473  				node.Metrics.Engine,
   474  				node.EngineRegistry,
   475  				node.Me,
   476  				node.State,
   477  				channels.RequestReceiptsByBlockID,
   478  				filter.HasRole[flow.Identity](flow.RoleExecution),
   479  				func() flow.Entity { return &flow.ExecutionReceipt{} },
   480  				requester.WithRetryInitial(2*time.Second),
   481  				requester.WithRetryMaximum(30*time.Second),
   482  			)
   483  			if err != nil {
   484  				return nil, err
   485  			}
   486  
   487  			core := matching.NewCore(
   488  				node.Logger,
   489  				node.Tracer,
   490  				conMetrics,
   491  				node.Metrics.Mempool,
   492  				node.State,
   493  				node.Storage.Headers,
   494  				node.Storage.Receipts,
   495  				receipts,
   496  				pendingReceipts,
   497  				seals,
   498  				receiptValidator,
   499  				receiptRequester,
   500  				matching.DefaultConfig(),
   501  			)
   502  
   503  			e, err := matching.NewEngine(
   504  				node.Logger,
   505  				node.EngineRegistry,
   506  				node.Me,
   507  				node.Metrics.Engine,
   508  				node.Metrics.Mempool,
   509  				node.State,
   510  				node.Storage.Receipts,
   511  				node.Storage.Index,
   512  				core,
   513  			)
   514  			if err != nil {
   515  				return nil, err
   516  			}
   517  
   518  			// subscribe engine to inputs from other node-internal components
   519  			receiptRequester.WithHandle(e.HandleReceipt)
   520  			followerDistributor.AddOnBlockFinalizedConsumer(e.OnFinalizedBlock)
   521  			followerDistributor.AddOnBlockIncorporatedConsumer(e.OnBlockIncorporated)
   522  
   523  			return e, err
   524  		}).
   525  		Component("ingestion engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   526  			core := ingestion.NewCore(
   527  				node.Logger,
   528  				node.Tracer,
   529  				node.Metrics.Mempool,
   530  				node.State,
   531  				node.Storage.Headers,
   532  				guarantees,
   533  			)
   534  
   535  			ing, err := ingestion.New(
   536  				node.Logger,
   537  				node.Metrics.Engine,
   538  				node.EngineRegistry,
   539  				node.Me,
   540  				core,
   541  			)
   542  
   543  			return ing, err
   544  		}).
   545  		Component("hotstuff committee", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   546  			committee, err = committees.NewConsensusCommittee(node.State, node.Me.NodeID())
   547  			node.ProtocolEvents.AddConsumer(committee)
   548  			return committee, err
   549  		}).
   550  		Component("epoch lookup", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   551  			epochLookup, err = epochs.NewEpochLookup(node.State)
   552  			node.ProtocolEvents.AddConsumer(epochLookup)
   553  			return epochLookup, err
   554  		}).
   555  		Component("hotstuff modules", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   556  			// initialize the block finalizer
   557  			finalize := finalizer.NewFinalizer(
   558  				node.DB,
   559  				node.Storage.Headers,
   560  				mutableState,
   561  				node.Tracer,
   562  				finalizer.WithCleanup(finalizer.CleanupMempools(
   563  					node.Metrics.Mempool,
   564  					conMetrics,
   565  					node.Storage.Payloads,
   566  					guarantees,
   567  					seals,
   568  				)),
   569  			)
   570  
   571  			// wrap Main consensus committee with metrics
   572  			wrappedCommittee := committees.NewMetricsWrapper(committee, mainMetrics) // wrapper for measuring time spent determining consensus committee relations
   573  
   574  			beaconKeyStore := hotsignature.NewEpochAwareRandomBeaconKeyStore(epochLookup, safeBeaconKeys)
   575  
   576  			// initialize the combined signer for hotstuff
   577  			var signer hotstuff.Signer
   578  			signer = verification.NewCombinedSigner(
   579  				node.Me,
   580  				beaconKeyStore,
   581  			)
   582  			signer = verification.NewMetricsWrapper(signer, mainMetrics) // wrapper for measuring time spent with crypto-related operations
   583  
   584  			// create consensus logger
   585  			logger := createLogger(node.Logger, node.RootChainID)
   586  
   587  			telemetryConsumer := notifications.NewTelemetryConsumer(logger)
   588  			slashingViolationConsumer := notifications.NewSlashingViolationsConsumer(nodeBuilder.Logger)
   589  			followerDistributor.AddProposalViolationConsumer(slashingViolationConsumer)
   590  
   591  			// initialize a logging notifier for hotstuff
   592  			notifier := createNotifier(
   593  				logger,
   594  				mainMetrics,
   595  			)
   596  
   597  			notifier.AddParticipantConsumer(telemetryConsumer)
   598  			notifier.AddCommunicatorConsumer(telemetryConsumer)
   599  			notifier.AddFinalizationConsumer(telemetryConsumer)
   600  			notifier.AddFollowerConsumer(followerDistributor)
   601  
   602  			// initialize the persister
   603  			persist := persister.New(node.DB, node.RootChainID)
   604  
   605  			finalizedBlock, err := node.State.Final().Head()
   606  			if err != nil {
   607  				return nil, err
   608  			}
   609  
   610  			forks, err := consensus.NewForks(
   611  				finalizedBlock,
   612  				node.Storage.Headers,
   613  				finalize,
   614  				notifier,
   615  				node.FinalizedRootBlock.Header,
   616  				node.RootQC,
   617  			)
   618  			if err != nil {
   619  				return nil, err
   620  			}
   621  
   622  			// create producer and connect it to consumers
   623  			voteAggregationDistributor := pubsub.NewVoteAggregationDistributor()
   624  			voteAggregationDistributor.AddVoteCollectorConsumer(telemetryConsumer)
   625  			voteAggregationDistributor.AddVoteAggregationViolationConsumer(slashingViolationConsumer)
   626  
   627  			validator := consensus.NewValidator(mainMetrics, wrappedCommittee)
   628  			voteProcessorFactory := votecollector.NewCombinedVoteProcessorFactory(wrappedCommittee, voteAggregationDistributor.OnQcConstructedFromVotes)
   629  			lowestViewForVoteProcessing := finalizedBlock.View + 1
   630  			voteAggregator, err := consensus.NewVoteAggregator(
   631  				logger,
   632  				mainMetrics,
   633  				node.Metrics.Engine,
   634  				node.Metrics.Mempool,
   635  				lowestViewForVoteProcessing,
   636  				voteAggregationDistributor,
   637  				voteProcessorFactory,
   638  				followerDistributor)
   639  			if err != nil {
   640  				return nil, fmt.Errorf("could not initialize vote aggregator: %w", err)
   641  			}
   642  
   643  			// create producer and connect it to consumers
   644  			timeoutAggregationDistributor := pubsub.NewTimeoutAggregationDistributor()
   645  			timeoutAggregationDistributor.AddTimeoutCollectorConsumer(telemetryConsumer)
   646  			timeoutAggregationDistributor.AddTimeoutAggregationViolationConsumer(slashingViolationConsumer)
   647  
   648  			timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory(
   649  				logger,
   650  				timeoutAggregationDistributor,
   651  				committee,
   652  				validator,
   653  				msig.ConsensusTimeoutTag,
   654  			)
   655  			timeoutAggregator, err := consensus.NewTimeoutAggregator(
   656  				logger,
   657  				mainMetrics,
   658  				node.Metrics.Engine,
   659  				node.Metrics.Mempool,
   660  				notifier,
   661  				timeoutProcessorFactory,
   662  				timeoutAggregationDistributor,
   663  				lowestViewForVoteProcessing,
   664  			)
   665  			if err != nil {
   666  				return nil, fmt.Errorf("could not initialize timeout aggregator: %w", err)
   667  			}
   668  
   669  			hotstuffModules = &consensus.HotstuffModules{
   670  				Notifier:                    notifier,
   671  				Committee:                   wrappedCommittee,
   672  				Signer:                      signer,
   673  				Persist:                     persist,
   674  				VoteCollectorDistributor:    voteAggregationDistributor.VoteCollectorDistributor,
   675  				TimeoutCollectorDistributor: timeoutAggregationDistributor.TimeoutCollectorDistributor,
   676  				Forks:                       forks,
   677  				Validator:                   validator,
   678  				VoteAggregator:              voteAggregator,
   679  				TimeoutAggregator:           timeoutAggregator,
   680  			}
   681  
   682  			return util.MergeReadyDone(voteAggregator, timeoutAggregator), nil
   683  		}).
   684  		Component("block rate cruise control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   685  			livenessData, err := hotstuffModules.Persist.GetLivenessData()
   686  			if err != nil {
   687  				return nil, err
   688  			}
   689  			ctl, err := cruisectl.NewBlockTimeController(node.Logger, metrics.NewCruiseCtlMetrics(), cruiseCtlConfig, node.State, livenessData.CurrentView)
   690  			if err != nil {
   691  				return nil, err
   692  			}
   693  			proposalDurProvider = ctl
   694  			hotstuffModules.Notifier.AddOnBlockIncorporatedConsumer(ctl.OnBlockIncorporated)
   695  			node.ProtocolEvents.AddConsumer(ctl)
   696  
   697  			// set up admin commands for dynamically updating configs
   698  			err = node.ConfigManager.RegisterBoolConfig("cruise-ctl-enabled", cruiseCtlConfig.GetEnabled, cruiseCtlConfig.SetEnabled)
   699  			if err != nil {
   700  				return nil, err
   701  			}
   702  			err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-fallback-proposal-duration", cruiseCtlConfig.GetFallbackProposalDuration, cruiseCtlConfig.SetFallbackProposalDuration)
   703  			if err != nil {
   704  				return nil, err
   705  			}
   706  			err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-min-view-duration", cruiseCtlConfig.GetMinViewDuration, cruiseCtlConfig.SetMinViewDuration)
   707  			if err != nil {
   708  				return nil, err
   709  			}
   710  			err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-max-view-duration", cruiseCtlConfig.GetMaxViewDuration, cruiseCtlConfig.SetMaxViewDuration)
   711  			if err != nil {
   712  				return nil, err
   713  			}
   714  
   715  			return ctl, nil
   716  		}).
   717  		Component("consensus participant", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   718  			mutableProtocolState := protocol_state.NewMutableProtocolState(
   719  				node.Storage.EpochProtocolState,
   720  				node.Storage.ProtocolKVStore,
   721  				node.State.Params(),
   722  				node.Storage.Headers,
   723  				node.Storage.Results,
   724  				node.Storage.Setups,
   725  				node.Storage.EpochCommits,
   726  			)
   727  			// initialize the block builder
   728  			var build module.Builder
   729  			build, err = builder.NewBuilder(
   730  				node.Metrics.Mempool,
   731  				node.DB,
   732  				mutableState,
   733  				node.Storage.Headers,
   734  				node.Storage.Seals,
   735  				node.Storage.Index,
   736  				node.Storage.Blocks,
   737  				node.Storage.Results,
   738  				node.Storage.Receipts,
   739  				mutableProtocolState,
   740  				guarantees,
   741  				seals,
   742  				receipts,
   743  				node.Tracer,
   744  				builder.WithBlockTimer(blockTimer),
   745  				builder.WithMaxSealCount(maxSealPerBlock),
   746  				builder.WithMaxGuaranteeCount(maxGuaranteePerBlock),
   747  			)
   748  			if err != nil {
   749  				return nil, fmt.Errorf("could not initialized block builder: %w", err)
   750  			}
   751  			build = blockproducer.NewMetricsWrapper(build, mainMetrics) // wrapper for measuring time spent building block payload component
   752  
   753  			opts := []consensus.Option{
   754  				consensus.WithMinTimeout(hotstuffMinTimeout),
   755  				consensus.WithTimeoutAdjustmentFactor(hotstuffTimeoutAdjustmentFactor),
   756  				consensus.WithHappyPathMaxRoundFailures(hotstuffHappyPathMaxRoundFailures),
   757  				consensus.WithProposalDurationProvider(proposalDurProvider),
   758  			}
   759  
   760  			if !startupTime.IsZero() {
   761  				opts = append(opts, consensus.WithStartupTime(startupTime))
   762  			}
   763  			finalizedBlock, pending, err := recovery.FindLatest(node.State, node.Storage.Headers)
   764  			if err != nil {
   765  				return nil, err
   766  			}
   767  
   768  			// initialize hotstuff consensus algorithm
   769  			hot, err = consensus.NewParticipant(
   770  				createLogger(node.Logger, node.RootChainID),
   771  				mainMetrics,
   772  				node.Metrics.Mempool,
   773  				build,
   774  				finalizedBlock,
   775  				pending,
   776  				hotstuffModules,
   777  				opts...,
   778  			)
   779  			if err != nil {
   780  				return nil, fmt.Errorf("could not initialize hotstuff engine: %w", err)
   781  			}
   782  			return hot, nil
   783  		}).
   784  		Component("consensus compliance engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   785  			// initialize the pending blocks cache
   786  			proposals := buffer.NewPendingBlocks()
   787  
   788  			logger := createLogger(node.Logger, node.RootChainID)
   789  			complianceCore, err := compliance.NewCore(
   790  				logger,
   791  				node.Metrics.Engine,
   792  				node.Metrics.Mempool,
   793  				mainMetrics,
   794  				node.Metrics.Compliance,
   795  				followerDistributor,
   796  				node.Tracer,
   797  				node.Storage.Headers,
   798  				node.Storage.Payloads,
   799  				mutableState,
   800  				proposals,
   801  				syncCore,
   802  				hotstuffModules.Validator,
   803  				hot,
   804  				hotstuffModules.VoteAggregator,
   805  				hotstuffModules.TimeoutAggregator,
   806  				node.ComplianceConfig,
   807  			)
   808  			if err != nil {
   809  				return nil, fmt.Errorf("could not initialize compliance core: %w", err)
   810  			}
   811  
   812  			// initialize the compliance engine
   813  			comp, err = compliance.NewEngine(
   814  				logger,
   815  				node.Me,
   816  				complianceCore,
   817  			)
   818  			if err != nil {
   819  				return nil, fmt.Errorf("could not initialize compliance engine: %w", err)
   820  			}
   821  			followerDistributor.AddOnBlockFinalizedConsumer(comp.OnFinalizedBlock)
   822  
   823  			return comp, nil
   824  		}).
   825  		Component("consensus message hub", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   826  			messageHub, err := message_hub.NewMessageHub(
   827  				createLogger(node.Logger, node.RootChainID),
   828  				node.Metrics.Engine,
   829  				node.EngineRegistry,
   830  				node.Me,
   831  				comp,
   832  				hot,
   833  				hotstuffModules.VoteAggregator,
   834  				hotstuffModules.TimeoutAggregator,
   835  				node.State,
   836  				node.Storage.Payloads,
   837  			)
   838  			if err != nil {
   839  				return nil, fmt.Errorf("could not create consensus message hub: %w", err)
   840  			}
   841  			hotstuffModules.Notifier.AddConsumer(messageHub)
   842  			return messageHub, nil
   843  		}).
   844  		Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   845  			spamConfig, err := synceng.NewSpamDetectionConfig()
   846  			if err != nil {
   847  				return nil, fmt.Errorf("could not initialize spam detection config: %w", err)
   848  			}
   849  
   850  			sync, err := synceng.New(
   851  				node.Logger,
   852  				node.Metrics.Engine,
   853  				node.EngineRegistry,
   854  				node.Me,
   855  				node.State,
   856  				node.Storage.Blocks,
   857  				comp,
   858  				syncCore,
   859  				node.SyncEngineIdentifierProvider,
   860  				spamConfig,
   861  			)
   862  			if err != nil {
   863  				return nil, fmt.Errorf("could not initialize synchronization engine: %w", err)
   864  			}
   865  			followerDistributor.AddFinalizationConsumer(sync)
   866  
   867  			return sync, nil
   868  		}).
   869  		Component("receipt requester engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   870  			// created with sealing engine
   871  			return receiptRequester, nil
   872  		}).
   873  		Component("DKG messaging engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   874  
   875  			// brokerTunnel is used to forward messages between the DKG
   876  			// messaging engine and the DKG broker/controller
   877  			dkgBrokerTunnel = dkgmodule.NewBrokerTunnel()
   878  
   879  			// messagingEngine is a network engine that is used by nodes to
   880  			// exchange private DKG messages
   881  			messagingEngine, err := dkgeng.NewMessagingEngine(
   882  				node.Logger,
   883  				node.EngineRegistry,
   884  				node.Me,
   885  				dkgBrokerTunnel,
   886  				node.Metrics.Mempool,
   887  				dkgMessagingEngineConfig,
   888  			)
   889  			if err != nil {
   890  				return nil, fmt.Errorf("could not initialize DKG messaging engine: %w", err)
   891  			}
   892  
   893  			return messagingEngine, nil
   894  		}).
   895  		Component("DKG reactor engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   896  			// the viewsObserver is used by the reactor engine to subscribe to
   897  			// new views being finalized
   898  			viewsObserver := gadgets.NewViews()
   899  			node.ProtocolEvents.AddConsumer(viewsObserver)
   900  
   901  			// construct DKG contract client
   902  			dkgContractClients, err := createDKGContractClients(node, machineAccountInfo, flowClientConfigs)
   903  			if err != nil {
   904  				return nil, fmt.Errorf("could not create dkg contract client %w", err)
   905  			}
   906  
   907  			// the reactor engine reacts to new views being finalized and drives the
   908  			// DKG protocol
   909  			reactorEngine := dkgeng.NewReactorEngine(
   910  				node.Logger,
   911  				node.Me,
   912  				node.State,
   913  				dkgState,
   914  				dkgmodule.NewControllerFactory(
   915  					node.Logger,
   916  					node.Me,
   917  					dkgContractClients,
   918  					dkgBrokerTunnel,
   919  				),
   920  				viewsObserver,
   921  			)
   922  
   923  			// reactorEngine consumes the EpochSetupPhaseStarted event
   924  			node.ProtocolEvents.AddConsumer(reactorEngine)
   925  
   926  			return reactorEngine, nil
   927  		})
   928  
   929  	node, err := nodeBuilder.Build()
   930  	if err != nil {
   931  		nodeBuilder.Logger.Fatal().Err(err).Send()
   932  	}
   933  	node.Run()
   934  }
   935  
   936  func loadBeaconPrivateKey(dir string, myID flow.Identifier) (*encodable.RandomBeaconPrivKey, error) {
   937  	path := fmt.Sprintf(bootstrap.PathRandomBeaconPriv, myID)
   938  	data, err := io.ReadFile(filepath.Join(dir, path))
   939  	if err != nil {
   940  		return nil, err
   941  	}
   942  
   943  	var priv encodable.RandomBeaconPrivKey
   944  	err = json.Unmarshal(data, &priv)
   945  	if err != nil {
   946  		return nil, err
   947  	}
   948  	return &priv, nil
   949  }
   950  
   951  // createDKGContractClient creates an dkgContractClient
   952  func createDKGContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClient *client.Client, anID flow.Identifier) (module.DKGContractClient, error) {
   953  	var dkgClient module.DKGContractClient
   954  
   955  	contracts := systemcontracts.SystemContractsForChain(node.RootChainID)
   956  	dkgContractAddress := contracts.DKG.Address.Hex()
   957  
   958  	// construct signer from private key
   959  	sk, err := crypto.DecodePrivateKey(machineAccountInfo.SigningAlgorithm, machineAccountInfo.EncodedPrivateKey)
   960  	if err != nil {
   961  		return nil, fmt.Errorf("could not decode private key from hex: %w", err)
   962  	}
   963  
   964  	txSigner, err := crypto.NewInMemorySigner(sk, machineAccountInfo.HashAlgorithm)
   965  	if err != nil {
   966  		return nil, fmt.Errorf("could not create in-memory signer: %w", err)
   967  	}
   968  
   969  	// create actual dkg contract client, all flags and machine account info file found
   970  	dkgClient = dkgmodule.NewClient(
   971  		node.Logger,
   972  		flowClient,
   973  		anID,
   974  		txSigner,
   975  		dkgContractAddress,
   976  		machineAccountInfo.Address,
   977  		machineAccountInfo.KeyIndex,
   978  	)
   979  
   980  	return dkgClient, nil
   981  }
   982  
   983  // createDKGContractClients creates an array dkgContractClient that is sorted by retry fallback priority
   984  func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.DKGContractClient, error) {
   985  	dkgClients := make([]module.DKGContractClient, 0)
   986  
   987  	for _, opt := range flowClientOpts {
   988  		flowClient, err := common.FlowClient(opt)
   989  		if err != nil {
   990  			return nil, fmt.Errorf("failed to create flow client for dkg contract client with options: %s %w", flowClientOpts, err)
   991  		}
   992  
   993  		node.Logger.Info().Msgf("created dkg contract client with opts: %s", opt.String())
   994  		dkgClient, err := createDKGContractClient(node, machineAccountInfo, flowClient, opt.AccessNodeID)
   995  		if err != nil {
   996  			return nil, fmt.Errorf("failed to create dkg contract client with flow client options: %s %w", flowClientOpts, err)
   997  		}
   998  
   999  		dkgClients = append(dkgClients, dkgClient)
  1000  	}
  1001  
  1002  	return dkgClients, nil
  1003  }