github.com/koko1123/flow-go-1@v0.29.6/cmd/consensus/main.go (about)

     1  // (c) 2019 Dapper Labs - ALL RIGHTS RESERVED
     2  
     3  package main
     4  
     5  import (
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"os"
    10  	"path/filepath"
    11  	"time"
    12  
    13  	"github.com/spf13/pflag"
    14  
    15  	client "github.com/onflow/flow-go-sdk/access/grpc"
    16  	"github.com/onflow/flow-go-sdk/crypto"
    17  	"github.com/koko1123/flow-go-1/cmd"
    18  	"github.com/koko1123/flow-go-1/cmd/util/cmd/common"
    19  	"github.com/koko1123/flow-go-1/consensus"
    20  	"github.com/koko1123/flow-go-1/consensus/hotstuff"
    21  	"github.com/koko1123/flow-go-1/consensus/hotstuff/blockproducer"
    22  	"github.com/koko1123/flow-go-1/consensus/hotstuff/committees"
    23  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications/pubsub"
    24  	"github.com/koko1123/flow-go-1/consensus/hotstuff/pacemaker/timeout"
    25  	"github.com/koko1123/flow-go-1/consensus/hotstuff/persister"
    26  	hotsignature "github.com/koko1123/flow-go-1/consensus/hotstuff/signature"
    27  	"github.com/koko1123/flow-go-1/consensus/hotstuff/verification"
    28  	"github.com/koko1123/flow-go-1/consensus/hotstuff/votecollector"
    29  	recovery "github.com/koko1123/flow-go-1/consensus/recovery/protocol"
    30  	"github.com/koko1123/flow-go-1/engine/common/requester"
    31  	synceng "github.com/koko1123/flow-go-1/engine/common/synchronization"
    32  	"github.com/koko1123/flow-go-1/engine/consensus/approvals/tracker"
    33  	"github.com/koko1123/flow-go-1/engine/consensus/compliance"
    34  	dkgeng "github.com/koko1123/flow-go-1/engine/consensus/dkg"
    35  	"github.com/koko1123/flow-go-1/engine/consensus/ingestion"
    36  	"github.com/koko1123/flow-go-1/engine/consensus/matching"
    37  	"github.com/koko1123/flow-go-1/engine/consensus/provider"
    38  	"github.com/koko1123/flow-go-1/engine/consensus/sealing"
    39  	"github.com/koko1123/flow-go-1/fvm/systemcontracts"
    40  	"github.com/koko1123/flow-go-1/model/bootstrap"
    41  	"github.com/koko1123/flow-go-1/model/encodable"
    42  	"github.com/koko1123/flow-go-1/model/flow"
    43  	"github.com/koko1123/flow-go-1/model/flow/filter"
    44  	"github.com/koko1123/flow-go-1/module"
    45  	"github.com/koko1123/flow-go-1/module/buffer"
    46  	builder "github.com/koko1123/flow-go-1/module/builder/consensus"
    47  	"github.com/koko1123/flow-go-1/module/chainsync"
    48  	chmodule "github.com/koko1123/flow-go-1/module/chunks"
    49  	modulecompliance "github.com/koko1123/flow-go-1/module/compliance"
    50  	dkgmodule "github.com/koko1123/flow-go-1/module/dkg"
    51  	"github.com/koko1123/flow-go-1/module/epochs"
    52  	finalizer "github.com/koko1123/flow-go-1/module/finalizer/consensus"
    53  	"github.com/koko1123/flow-go-1/module/mempool"
    54  	consensusMempools "github.com/koko1123/flow-go-1/module/mempool/consensus"
    55  	"github.com/koko1123/flow-go-1/module/mempool/stdmap"
    56  	"github.com/koko1123/flow-go-1/module/metrics"
    57  	"github.com/koko1123/flow-go-1/module/updatable_configs"
    58  	"github.com/koko1123/flow-go-1/module/validation"
    59  	"github.com/koko1123/flow-go-1/network/channels"
    60  	"github.com/koko1123/flow-go-1/state/protocol"
    61  	badgerState "github.com/koko1123/flow-go-1/state/protocol/badger"
    62  	"github.com/koko1123/flow-go-1/state/protocol/blocktimer"
    63  	"github.com/koko1123/flow-go-1/state/protocol/events/gadgets"
    64  	"github.com/koko1123/flow-go-1/storage"
    65  	bstorage "github.com/koko1123/flow-go-1/storage/badger"
    66  	"github.com/koko1123/flow-go-1/utils/io"
    67  )
    68  
    69  func main() {
    70  
    71  	var (
    72  		guaranteeLimit                         uint
    73  		resultLimit                            uint
    74  		approvalLimit                          uint
    75  		sealLimit                              uint
    76  		pendingReceiptsLimit                   uint
    77  		minInterval                            time.Duration
    78  		maxInterval                            time.Duration
    79  		maxSealPerBlock                        uint
    80  		maxGuaranteePerBlock                   uint
    81  		hotstuffTimeout                        time.Duration
    82  		hotstuffMinTimeout                     time.Duration
    83  		hotstuffTimeoutIncreaseFactor          float64
    84  		hotstuffTimeoutDecreaseFactor          float64
    85  		hotstuffTimeoutVoteAggregationFraction float64
    86  		blockRateDelay                         time.Duration
    87  		chunkAlpha                             uint
    88  		requiredApprovalsForSealVerification   uint
    89  		requiredApprovalsForSealConstruction   uint
    90  		emergencySealing                       bool
    91  		dkgControllerConfig                    dkgmodule.ControllerConfig
    92  		startupTimeString                      string
    93  		startupTime                            time.Time
    94  
    95  		// DKG contract client
    96  		machineAccountInfo *bootstrap.NodeMachineAccountInfo
    97  		flowClientConfigs  []*common.FlowClientConfig
    98  		insecureAccessAPI  bool
    99  		accessNodeIDS      []string
   100  
   101  		err                     error
   102  		mutableState            protocol.MutableState
   103  		beaconPrivateKey        *encodable.RandomBeaconPrivKey
   104  		guarantees              mempool.Guarantees
   105  		receipts                mempool.ExecutionTree
   106  		seals                   mempool.IncorporatedResultSeals
   107  		pendingReceipts         mempool.PendingReceipts
   108  		prov                    *provider.Engine
   109  		receiptRequester        *requester.Engine
   110  		syncCore                *chainsync.Core
   111  		comp                    *compliance.Engine
   112  		conMetrics              module.ConsensusMetrics
   113  		mainMetrics             module.HotstuffMetrics
   114  		receiptValidator        module.ReceiptValidator
   115  		chunkAssigner           *chmodule.ChunkAssigner
   116  		finalizationDistributor *pubsub.FinalizationDistributor
   117  		dkgBrokerTunnel         *dkgmodule.BrokerTunnel
   118  		blockTimer              protocol.BlockTimer
   119  		finalizedHeader         *synceng.FinalizedHeaderCache
   120  		hotstuffModules         *consensus.HotstuffModules
   121  		dkgState                *bstorage.DKGState
   122  		safeBeaconKeys          *bstorage.SafeBeaconPrivateKeys
   123  		getSealingConfigs       module.SealingConfigsGetter
   124  	)
   125  
   126  	nodeBuilder := cmd.FlowNode(flow.RoleConsensus.String())
   127  	nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) {
   128  		flags.UintVar(&guaranteeLimit, "guarantee-limit", 1000, "maximum number of guarantees in the memory pool")
   129  		flags.UintVar(&resultLimit, "result-limit", 10000, "maximum number of execution results in the memory pool")
   130  		flags.UintVar(&approvalLimit, "approval-limit", 1000, "maximum number of result approvals in the memory pool")
   131  		// the default value is able to buffer as many seals as would be generated over ~12 hours. In case it
   132  		// ever gets full, the node will simply crash instead of employing complex ejection logic.
   133  		flags.UintVar(&sealLimit, "seal-limit", 44200, "maximum number of block seals in the memory pool")
   134  		flags.UintVar(&pendingReceiptsLimit, "pending-receipts-limit", 10000, "maximum number of pending receipts in the mempool")
   135  		flags.DurationVar(&minInterval, "min-interval", time.Millisecond, "the minimum amount of time between two blocks")
   136  		flags.DurationVar(&maxInterval, "max-interval", 90*time.Second, "the maximum amount of time between two blocks")
   137  		flags.UintVar(&maxSealPerBlock, "max-seal-per-block", 100, "the maximum number of seals to be included in a block")
   138  		flags.UintVar(&maxGuaranteePerBlock, "max-guarantee-per-block", 100, "the maximum number of collection guarantees to be included in a block")
   139  		flags.DurationVar(&hotstuffTimeout, "hotstuff-timeout", 60*time.Second, "the initial timeout for the hotstuff pacemaker")
   140  		flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 2500*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker")
   141  		flags.Float64Var(&hotstuffTimeoutIncreaseFactor, "hotstuff-timeout-increase-factor", timeout.DefaultConfig.TimeoutIncrease, "multiplicative increase of timeout value in case of time out event")
   142  		flags.Float64Var(&hotstuffTimeoutDecreaseFactor, "hotstuff-timeout-decrease-factor", timeout.DefaultConfig.TimeoutDecrease, "multiplicative decrease of timeout value in case of progress")
   143  		flags.Float64Var(&hotstuffTimeoutVoteAggregationFraction, "hotstuff-timeout-vote-aggregation-fraction", 0.6, "additional fraction of replica timeout that the primary will wait for votes")
   144  		flags.DurationVar(&blockRateDelay, "block-rate-delay", 500*time.Millisecond, "the delay to broadcast block proposal in order to control block production rate")
   145  		flags.UintVar(&chunkAlpha, "chunk-alpha", flow.DefaultChunkAssignmentAlpha, "number of verifiers that should be assigned to each chunk")
   146  		flags.UintVar(&requiredApprovalsForSealVerification, "required-verification-seal-approvals", flow.DefaultRequiredApprovalsForSealValidation, "minimum number of approvals that are required to verify a seal")
   147  		flags.UintVar(&requiredApprovalsForSealConstruction, "required-construction-seal-approvals", flow.DefaultRequiredApprovalsForSealConstruction, "minimum number of approvals that are required to construct a seal")
   148  		flags.BoolVar(&emergencySealing, "emergency-sealing-active", flow.DefaultEmergencySealingActive, "(de)activation of emergency sealing")
   149  		flags.BoolVar(&insecureAccessAPI, "insecure-access-api", false, "required if insecure GRPC connection should be used")
   150  		flags.StringSliceVar(&accessNodeIDS, "access-node-ids", []string{}, fmt.Sprintf("array of access node IDs sorted in priority order where the first ID in this array will get the first connection attempt and each subsequent ID after serves as a fallback. Minimum length %d. Use '*' for all IDs in protocol state.", common.DefaultAccessNodeIDSMinimum))
   151  		flags.DurationVar(&dkgControllerConfig.BaseStartDelay, "dkg-controller-base-start-delay", dkgmodule.DefaultBaseStartDelay, "used to define the range for jitter prior to DKG start (eg. 500µs) - the base value is scaled quadratically with the # of DKG participants")
   152  		flags.DurationVar(&dkgControllerConfig.BaseHandleFirstBroadcastDelay, "dkg-controller-base-handle-first-broadcast-delay", dkgmodule.DefaultBaseHandleFirstBroadcastDelay, "used to define the range for jitter prior to DKG handling the first broadcast messages (eg. 50ms) - the base value is scaled quadratically with the # of DKG participants")
   153  		flags.DurationVar(&dkgControllerConfig.HandleSubsequentBroadcastDelay, "dkg-controller-handle-subsequent-broadcast-delay", dkgmodule.DefaultHandleSubsequentBroadcastDelay, "used to define the constant delay introduced prior to DKG handling subsequent broadcast messages (eg. 2s)")
   154  		flags.StringVar(&startupTimeString, "hotstuff-startup-time", cmd.NotSet, "specifies date and time (in ISO 8601 format) after which the consensus participant may enter the first view (e.g 1996-04-24T15:04:05-07:00)")
   155  	}).ValidateFlags(func() error {
   156  		nodeBuilder.Logger.Info().Str("startup_time_str", startupTimeString).Msg("got startup_time_str")
   157  		if startupTimeString != cmd.NotSet {
   158  			t, err := time.Parse(time.RFC3339, startupTimeString)
   159  			if err != nil {
   160  				return fmt.Errorf("invalid start-time value: %w", err)
   161  			}
   162  			startupTime = t
   163  			nodeBuilder.Logger.Info().Time("startup_time", startupTime).Msg("got startup_time")
   164  		}
   165  		return nil
   166  	})
   167  
   168  	if err = nodeBuilder.Initialize(); err != nil {
   169  		nodeBuilder.Logger.Fatal().Err(err).Send()
   170  	}
   171  
   172  	nodeBuilder.
   173  		PreInit(cmd.DynamicStartPreInit).
   174  		Module("consensus node metrics", func(node *cmd.NodeConfig) error {
   175  			conMetrics = metrics.NewConsensusCollector(node.Tracer, node.MetricsRegisterer)
   176  			return nil
   177  		}).
   178  		Module("dkg state", func(node *cmd.NodeConfig) error {
   179  			dkgState, err = bstorage.NewDKGState(node.Metrics.Cache, node.SecretsDB)
   180  			return err
   181  		}).
   182  		Module("beacon keys", func(node *cmd.NodeConfig) error {
   183  			safeBeaconKeys = bstorage.NewSafeBeaconPrivateKeys(dkgState)
   184  			return nil
   185  		}).
   186  		Module("updatable sealing config", func(node *cmd.NodeConfig) error {
   187  			setter, err := updatable_configs.NewSealingConfigs(
   188  				requiredApprovalsForSealConstruction,
   189  				requiredApprovalsForSealVerification,
   190  				chunkAlpha,
   191  				emergencySealing,
   192  			)
   193  			if err != nil {
   194  				return err
   195  			}
   196  
   197  			// update the getter with the setter, so other modules can only get, but not set
   198  			getSealingConfigs = setter
   199  
   200  			// admin tool is the only instance that have access to the setter interface, therefore, is
   201  			// the only module can change this config
   202  			err = node.ConfigManager.RegisterUintConfig("consensus-required-approvals-for-sealing",
   203  				setter.RequireApprovalsForSealConstructionDynamicValue,
   204  				setter.SetRequiredApprovalsForSealingConstruction)
   205  			return err
   206  		}).
   207  		Module("mutable follower state", func(node *cmd.NodeConfig) error {
   208  			// For now, we only support state implementations from package badger.
   209  			// If we ever support different implementations, the following can be replaced by a type-aware factory
   210  			state, ok := node.State.(*badgerState.State)
   211  			if !ok {
   212  				return fmt.Errorf("only implementations of type badger.State are currently supported but read-only state has type %T", node.State)
   213  			}
   214  
   215  			chunkAssigner, err = chmodule.NewChunkAssigner(chunkAlpha, node.State)
   216  			if err != nil {
   217  				return fmt.Errorf("could not instantiate assignment algorithm for chunk verification: %w", err)
   218  			}
   219  
   220  			receiptValidator = validation.NewReceiptValidator(
   221  				node.State,
   222  				node.Storage.Headers,
   223  				node.Storage.Index,
   224  				node.Storage.Results,
   225  				node.Storage.Seals)
   226  
   227  			sealValidator := validation.NewSealValidator(
   228  				node.State,
   229  				node.Storage.Headers,
   230  				node.Storage.Index,
   231  				node.Storage.Results,
   232  				node.Storage.Seals,
   233  				chunkAssigner,
   234  				getSealingConfigs,
   235  				conMetrics)
   236  
   237  			blockTimer, err = blocktimer.NewBlockTimer(minInterval, maxInterval)
   238  			if err != nil {
   239  				return err
   240  			}
   241  
   242  			mutableState, err = badgerState.NewFullConsensusState(
   243  				state,
   244  				node.Storage.Index,
   245  				node.Storage.Payloads,
   246  				node.Tracer,
   247  				node.ProtocolEvents,
   248  				blockTimer,
   249  				receiptValidator,
   250  				sealValidator)
   251  			return err
   252  		}).
   253  		Module("random beacon key", func(node *cmd.NodeConfig) error {
   254  			// If this node was a participant in a spork, their beacon key for the
   255  			// first epoch was generated during the bootstrapping process and is
   256  			// specified in a private bootstrapping file. We load their key and
   257  			// store it in the db for the initial post-spork epoch for use going
   258  			// forward.
   259  			//
   260  			// If this node was not a participant in a spork, they joined at an
   261  			// epoch boundary, so they have no beacon key file (they will generate
   262  			// their first beacon private key through the DKG in the EpochSetup phase
   263  			// prior to their first epoch as network participant).
   264  
   265  			rootSnapshot := node.State.AtBlockID(node.RootBlock.ID())
   266  			isSporkRoot, err := protocol.IsSporkRootSnapshot(rootSnapshot)
   267  			if err != nil {
   268  				return fmt.Errorf("could not check whether root snapshot is spork root: %w", err)
   269  			}
   270  			if !isSporkRoot {
   271  				node.Logger.Info().Msg("node starting from mid-spork snapshot, will not read spork random beacon key file")
   272  				return nil
   273  			}
   274  
   275  			// If the node has a beacon key file, then save it to the secrets database
   276  			// as the beacon key for the epoch of the root snapshot.
   277  			beaconPrivateKey, err = loadBeaconPrivateKey(node.BaseConfig.BootstrapDir, node.NodeID)
   278  			if errors.Is(err, os.ErrNotExist) {
   279  				return fmt.Errorf("node is starting from spork root snapshot, but does not have spork random beacon key file: %w", err)
   280  			}
   281  			if err != nil {
   282  				return fmt.Errorf("could not load beacon key file: %w", err)
   283  			}
   284  
   285  			rootEpoch := node.State.AtBlockID(node.RootBlock.ID()).Epochs().Current()
   286  			epochCounter, err := rootEpoch.Counter()
   287  			if err != nil {
   288  				return fmt.Errorf("could not get root epoch counter: %w", err)
   289  			}
   290  
   291  			// confirm the beacon key file matches the canonical public keys
   292  			rootDKG, err := rootEpoch.DKG()
   293  			if err != nil {
   294  				return fmt.Errorf("could not get dkg for root epoch: %w", err)
   295  			}
   296  			myBeaconPublicKeyShare, err := rootDKG.KeyShare(node.NodeID)
   297  			if err != nil {
   298  				return fmt.Errorf("could not get my beacon public key share for root epoch: %w", err)
   299  			}
   300  
   301  			if !myBeaconPublicKeyShare.Equals(beaconPrivateKey.PrivateKey.PublicKey()) {
   302  				return fmt.Errorf("configured beacon key is inconsistent with this node's canonical public beacon key (%s!=%s)",
   303  					beaconPrivateKey.PrivateKey.PublicKey(),
   304  					myBeaconPublicKeyShare)
   305  			}
   306  
   307  			// store my beacon key for the first epoch post-spork
   308  			err = dkgState.InsertMyBeaconPrivateKey(epochCounter, beaconPrivateKey.PrivateKey)
   309  			if err != nil && !errors.Is(err, storage.ErrAlreadyExists) {
   310  				return err
   311  			}
   312  			// mark the root DKG as successful, so it is considered safe to use the key
   313  			err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateSuccess)
   314  			if err != nil && !errors.Is(err, storage.ErrAlreadyExists) {
   315  				return err
   316  			}
   317  
   318  			return nil
   319  		}).
   320  		Module("collection guarantees mempool", func(node *cmd.NodeConfig) error {
   321  			guarantees, err = stdmap.NewGuarantees(guaranteeLimit)
   322  			return err
   323  		}).
   324  		Module("execution receipts mempool", func(node *cmd.NodeConfig) error {
   325  			receipts = consensusMempools.NewExecutionTree()
   326  			// registers size method of backend for metrics
   327  			err = node.Metrics.Mempool.Register(metrics.ResourceReceipt, receipts.Size)
   328  			if err != nil {
   329  				return fmt.Errorf("could not register backend metric: %w", err)
   330  			}
   331  			return nil
   332  		}).
   333  		Module("block seals mempool", func(node *cmd.NodeConfig) error {
   334  			// use a custom ejector, so we don't eject seals that would break
   335  			// the chain of seals
   336  			rawMempool := stdmap.NewIncorporatedResultSeals(sealLimit)
   337  			multipleReceiptsFilterMempool := consensusMempools.NewIncorporatedResultSeals(rawMempool, node.Storage.Receipts)
   338  			seals, err = consensusMempools.NewExecStateForkSuppressor(
   339  				multipleReceiptsFilterMempool,
   340  				consensusMempools.LogForkAndCrash(node.Logger),
   341  				node.DB,
   342  				node.Logger,
   343  			)
   344  			if err != nil {
   345  				return fmt.Errorf("failed to wrap seals mempool into ExecStateForkSuppressor: %w", err)
   346  			}
   347  			err = node.Metrics.Mempool.Register(metrics.ResourcePendingIncorporatedSeal, seals.Size)
   348  			return nil
   349  		}).
   350  		Module("pending receipts mempool", func(node *cmd.NodeConfig) error {
   351  			pendingReceipts = stdmap.NewPendingReceipts(node.Storage.Headers, pendingReceiptsLimit)
   352  			return nil
   353  		}).
   354  		Module("hotstuff main metrics", func(node *cmd.NodeConfig) error {
   355  			mainMetrics = metrics.NewHotstuffCollector(node.RootChainID)
   356  			return nil
   357  		}).
   358  		Module("sync core", func(node *cmd.NodeConfig) error {
   359  			syncCore, err = chainsync.New(node.Logger, node.SyncCoreConfig, metrics.NewChainSyncCollector())
   360  			return err
   361  		}).
   362  		Module("finalization distributor", func(node *cmd.NodeConfig) error {
   363  			finalizationDistributor = pubsub.NewFinalizationDistributor()
   364  			return nil
   365  		}).
   366  		Module("machine account config", func(node *cmd.NodeConfig) error {
   367  			machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID)
   368  			return err
   369  		}).
   370  		Module("sdk client connection options", func(node *cmd.NodeConfig) error {
   371  			anIDS, err := common.ValidateAccessNodeIDSFlag(accessNodeIDS, node.RootChainID, node.State.Sealed())
   372  			if err != nil {
   373  				return fmt.Errorf("failed to validate flag --access-node-ids %w", err)
   374  			}
   375  
   376  			flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed())
   377  			if err != nil {
   378  				return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err)
   379  			}
   380  
   381  			return nil
   382  		}).
   383  		Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   384  			//@TODO use fallback logic for flowClient similar to DKG/QC contract clients
   385  			flowClient, err := common.FlowClient(flowClientConfigs[0])
   386  			if err != nil {
   387  				return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err)
   388  			}
   389  
   390  			// disable balance checks for transient networks, which do not have transaction fees
   391  			var opts []epochs.MachineAccountValidatorConfigOption
   392  			if node.RootChainID.Transient() {
   393  				opts = append(opts, epochs.WithoutBalanceChecks)
   394  			}
   395  			validator, err := epochs.NewMachineAccountConfigValidator(
   396  				node.Logger,
   397  				flowClient,
   398  				flow.RoleCollection,
   399  				*machineAccountInfo,
   400  				opts...,
   401  			)
   402  			return validator, err
   403  		}).
   404  		Component("sealing engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   405  
   406  			sealingTracker := tracker.NewSealingTracker(node.Logger, node.Storage.Headers, node.Storage.Receipts, seals)
   407  
   408  			e, err := sealing.NewEngine(
   409  				node.Logger,
   410  				node.Tracer,
   411  				conMetrics,
   412  				node.Metrics.Engine,
   413  				node.Metrics.Mempool,
   414  				sealingTracker,
   415  				node.Network,
   416  				node.Me,
   417  				node.Storage.Headers,
   418  				node.Storage.Payloads,
   419  				node.Storage.Results,
   420  				node.Storage.Index,
   421  				node.State,
   422  				node.Storage.Seals,
   423  				chunkAssigner,
   424  				seals,
   425  				getSealingConfigs,
   426  			)
   427  
   428  			// subscribe for finalization events from hotstuff
   429  			finalizationDistributor.AddOnBlockFinalizedConsumer(e.OnFinalizedBlock)
   430  			finalizationDistributor.AddOnBlockIncorporatedConsumer(e.OnBlockIncorporated)
   431  
   432  			return e, err
   433  		}).
   434  		Component("matching engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   435  			receiptRequester, err = requester.New(
   436  				node.Logger,
   437  				node.Metrics.Engine,
   438  				node.Network,
   439  				node.Me,
   440  				node.State,
   441  				channels.RequestReceiptsByBlockID,
   442  				filter.HasRole(flow.RoleExecution),
   443  				func() flow.Entity { return &flow.ExecutionReceipt{} },
   444  				requester.WithRetryInitial(2*time.Second),
   445  				requester.WithRetryMaximum(30*time.Second),
   446  			)
   447  			if err != nil {
   448  				return nil, err
   449  			}
   450  
   451  			core := matching.NewCore(
   452  				node.Logger,
   453  				node.Tracer,
   454  				conMetrics,
   455  				node.Metrics.Mempool,
   456  				node.State,
   457  				node.Storage.Headers,
   458  				node.Storage.Receipts,
   459  				receipts,
   460  				pendingReceipts,
   461  				seals,
   462  				receiptValidator,
   463  				receiptRequester,
   464  				matching.DefaultConfig(),
   465  			)
   466  
   467  			e, err := matching.NewEngine(
   468  				node.Logger,
   469  				node.Network,
   470  				node.Me,
   471  				node.Metrics.Engine,
   472  				node.Metrics.Mempool,
   473  				node.State,
   474  				node.Storage.Receipts,
   475  				node.Storage.Index,
   476  				core,
   477  			)
   478  			if err != nil {
   479  				return nil, err
   480  			}
   481  
   482  			// subscribe engine to inputs from other node-internal components
   483  			receiptRequester.WithHandle(e.HandleReceipt)
   484  			finalizationDistributor.AddOnBlockFinalizedConsumer(e.OnFinalizedBlock)
   485  			finalizationDistributor.AddOnBlockIncorporatedConsumer(e.OnBlockIncorporated)
   486  
   487  			return e, err
   488  		}).
   489  		Component("provider engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   490  			prov, err = provider.New(
   491  				node.Logger,
   492  				node.Metrics.Engine,
   493  				node.Tracer,
   494  				node.Network,
   495  				node.State,
   496  				node.Me,
   497  			)
   498  			return prov, err
   499  		}).
   500  		Component("ingestion engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   501  			core := ingestion.NewCore(
   502  				node.Logger,
   503  				node.Tracer,
   504  				node.Metrics.Mempool,
   505  				node.State,
   506  				node.Storage.Headers,
   507  				guarantees,
   508  			)
   509  
   510  			ing, err := ingestion.New(
   511  				node.Logger,
   512  				node.Metrics.Engine,
   513  				node.Network,
   514  				node.Me,
   515  				core,
   516  			)
   517  
   518  			return ing, err
   519  		}).
   520  		Component("hotstuff modules", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   521  			// initialize the block finalizer
   522  			finalize := finalizer.NewFinalizer(
   523  				node.DB,
   524  				node.Storage.Headers,
   525  				mutableState,
   526  				node.Tracer,
   527  				finalizer.WithCleanup(finalizer.CleanupMempools(
   528  					node.Metrics.Mempool,
   529  					conMetrics,
   530  					node.Storage.Payloads,
   531  					guarantees,
   532  					seals,
   533  				)),
   534  			)
   535  
   536  			// initialize Main consensus committee's state
   537  			var committee hotstuff.Committee
   538  			committee, err = committees.NewConsensusCommittee(node.State, node.Me.NodeID())
   539  			if err != nil {
   540  				return nil, fmt.Errorf("could not create Committee state for main consensus: %w", err)
   541  			}
   542  			committee = committees.NewMetricsWrapper(committee, mainMetrics) // wrapper for measuring time spent determining consensus committee relations
   543  
   544  			epochLookup := epochs.NewEpochLookup(node.State)
   545  			beaconKeyStore := hotsignature.NewEpochAwareRandomBeaconKeyStore(epochLookup, safeBeaconKeys)
   546  
   547  			// initialize the combined signer for hotstuff
   548  			var signer hotstuff.Signer
   549  			signer = verification.NewCombinedSigner(
   550  				node.Me,
   551  				beaconKeyStore,
   552  			)
   553  			signer = verification.NewMetricsWrapper(signer, mainMetrics) // wrapper for measuring time spent with crypto-related operations
   554  
   555  			// initialize a logging notifier for hotstuff
   556  			notifier := createNotifier(
   557  				node.Logger,
   558  				mainMetrics,
   559  				node.Tracer,
   560  				node.RootChainID,
   561  			)
   562  
   563  			notifier.AddConsumer(finalizationDistributor)
   564  
   565  			// initialize the persister
   566  			persist := persister.New(node.DB, node.RootChainID)
   567  
   568  			finalizedBlock, err := node.State.Final().Head()
   569  			if err != nil {
   570  				return nil, err
   571  			}
   572  
   573  			forks, err := consensus.NewForks(
   574  				finalizedBlock,
   575  				node.Storage.Headers,
   576  				finalize,
   577  				notifier,
   578  				node.RootBlock.Header,
   579  				node.RootQC,
   580  			)
   581  			if err != nil {
   582  				return nil, err
   583  			}
   584  
   585  			qcDistributor := pubsub.NewQCCreatedDistributor()
   586  			validator := consensus.NewValidator(mainMetrics, committee, forks)
   587  			voteProcessorFactory := votecollector.NewCombinedVoteProcessorFactory(committee, qcDistributor.OnQcConstructedFromVotes)
   588  			lowestViewForVoteProcessing := finalizedBlock.View + 1
   589  			aggregator, err := consensus.NewVoteAggregator(node.Logger,
   590  				lowestViewForVoteProcessing,
   591  				notifier,
   592  				voteProcessorFactory,
   593  				finalizationDistributor)
   594  			if err != nil {
   595  				return nil, fmt.Errorf("could not initialize vote aggregator: %w", err)
   596  			}
   597  
   598  			hotstuffModules = &consensus.HotstuffModules{
   599  				Notifier:                notifier,
   600  				Committee:               committee,
   601  				Signer:                  signer,
   602  				Persist:                 persist,
   603  				QCCreatedDistributor:    qcDistributor,
   604  				FinalizationDistributor: finalizationDistributor,
   605  				Forks:                   forks,
   606  				Validator:               validator,
   607  				Aggregator:              aggregator,
   608  			}
   609  
   610  			return aggregator, nil
   611  		}).
   612  		Component("consensus compliance engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   613  			// initialize the block builder
   614  			var build module.Builder
   615  			build, err = builder.NewBuilder(
   616  				node.Metrics.Mempool,
   617  				node.DB,
   618  				mutableState,
   619  				node.Storage.Headers,
   620  				node.Storage.Seals,
   621  				node.Storage.Index,
   622  				node.Storage.Blocks,
   623  				node.Storage.Results,
   624  				node.Storage.Receipts,
   625  				guarantees,
   626  				seals,
   627  				receipts,
   628  				node.Tracer,
   629  				builder.WithBlockTimer(blockTimer),
   630  				builder.WithMaxSealCount(maxSealPerBlock),
   631  				builder.WithMaxGuaranteeCount(maxGuaranteePerBlock),
   632  			)
   633  			if err != nil {
   634  				return nil, fmt.Errorf("could not initialized block builder: %w", err)
   635  			}
   636  
   637  			build = blockproducer.NewMetricsWrapper(build, mainMetrics) // wrapper for measuring time spent building block payload component
   638  
   639  			opts := []consensus.Option{
   640  				consensus.WithInitialTimeout(hotstuffTimeout),
   641  				consensus.WithMinTimeout(hotstuffMinTimeout),
   642  				consensus.WithVoteAggregationTimeoutFraction(hotstuffTimeoutVoteAggregationFraction),
   643  				consensus.WithTimeoutIncreaseFactor(hotstuffTimeoutIncreaseFactor),
   644  				consensus.WithTimeoutDecreaseFactor(hotstuffTimeoutDecreaseFactor),
   645  				consensus.WithBlockRateDelay(blockRateDelay),
   646  				consensus.WithConfigRegistrar(node.ConfigManager),
   647  			}
   648  
   649  			if !startupTime.IsZero() {
   650  				opts = append(opts, consensus.WithStartupTime(startupTime))
   651  			}
   652  
   653  			finalizedBlock, pending, err := recovery.FindLatest(node.State, node.Storage.Headers)
   654  			if err != nil {
   655  				return nil, err
   656  			}
   657  
   658  			// initialize the entity database accessors
   659  			cleaner := bstorage.NewCleaner(node.Logger, node.DB, node.Metrics.CleanCollector, flow.DefaultValueLogGCFrequency)
   660  
   661  			// initialize the pending blocks cache
   662  			proposals := buffer.NewPendingBlocks()
   663  
   664  			complianceCore, err := compliance.NewCore(node.Logger,
   665  				node.Metrics.Engine,
   666  				node.Tracer,
   667  				node.Metrics.Mempool,
   668  				node.Metrics.Compliance,
   669  				cleaner,
   670  				node.Storage.Headers,
   671  				node.Storage.Payloads,
   672  				mutableState,
   673  				proposals,
   674  				syncCore,
   675  				hotstuffModules.Aggregator,
   676  				modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold),
   677  			)
   678  			if err != nil {
   679  				return nil, fmt.Errorf("could not initialize compliance core: %w", err)
   680  			}
   681  
   682  			// initialize the compliance engine
   683  			comp, err = compliance.NewEngine(
   684  				node.Logger,
   685  				node.Network,
   686  				node.Me,
   687  				prov,
   688  				complianceCore,
   689  			)
   690  			if err != nil {
   691  				return nil, fmt.Errorf("could not initialize compliance engine: %w", err)
   692  			}
   693  
   694  			// initialize hotstuff consensus algorithm
   695  			hot, err := consensus.NewParticipant(
   696  				node.Logger,
   697  				mainMetrics,
   698  				build,
   699  				comp,
   700  				finalizedBlock,
   701  				pending,
   702  				hotstuffModules,
   703  				opts...,
   704  			)
   705  			if err != nil {
   706  				return nil, fmt.Errorf("could not initialize hotstuff engine: %w", err)
   707  			}
   708  
   709  			comp = comp.WithConsensus(hot)
   710  			finalizationDistributor.AddOnBlockFinalizedConsumer(comp.OnFinalizedBlock)
   711  			return comp, nil
   712  		}).
   713  		Component("finalized snapshot", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   714  			finalizedHeader, err = synceng.NewFinalizedHeaderCache(node.Logger, node.State, finalizationDistributor)
   715  			if err != nil {
   716  				return nil, fmt.Errorf("could not create finalized snapshot cache: %w", err)
   717  			}
   718  
   719  			return finalizedHeader, nil
   720  		}).
   721  		Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   722  			sync, err := synceng.New(
   723  				node.Logger,
   724  				node.Metrics.Engine,
   725  				node.Network,
   726  				node.Me,
   727  				node.Storage.Blocks,
   728  				comp,
   729  				syncCore,
   730  				finalizedHeader,
   731  				node.SyncEngineIdentifierProvider,
   732  			)
   733  			if err != nil {
   734  				return nil, fmt.Errorf("could not initialize synchronization engine: %w", err)
   735  			}
   736  
   737  			return sync, nil
   738  		}).
   739  		Component("receipt requester engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   740  			// created with sealing engine
   741  			return receiptRequester, nil
   742  		}).
   743  		Component("DKG messaging engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   744  
   745  			// brokerTunnel is used to forward messages between the DKG
   746  			// messaging engine and the DKG broker/controller
   747  			dkgBrokerTunnel = dkgmodule.NewBrokerTunnel()
   748  
   749  			// messagingEngine is a network engine that is used by nodes to
   750  			// exchange private DKG messages
   751  			messagingEngine, err := dkgeng.NewMessagingEngine(
   752  				node.Logger,
   753  				node.Network,
   754  				node.Me,
   755  				dkgBrokerTunnel,
   756  			)
   757  			if err != nil {
   758  				return nil, fmt.Errorf("could not initialize DKG messaging engine: %w", err)
   759  			}
   760  
   761  			return messagingEngine, nil
   762  		}).
   763  		Component("DKG reactor engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   764  			// the viewsObserver is used by the reactor engine to subscribe to
   765  			// new views being finalized
   766  			viewsObserver := gadgets.NewViews()
   767  			node.ProtocolEvents.AddConsumer(viewsObserver)
   768  
   769  			// construct DKG contract client
   770  			dkgContractClients, err := createDKGContractClients(node, machineAccountInfo, flowClientConfigs)
   771  			if err != nil {
   772  				return nil, fmt.Errorf("could not create dkg contract client %w", err)
   773  			}
   774  
   775  			// the reactor engine reacts to new views being finalized and drives the
   776  			// DKG protocol
   777  			reactorEngine := dkgeng.NewReactorEngine(
   778  				node.Logger,
   779  				node.Me,
   780  				node.State,
   781  				dkgState,
   782  				dkgmodule.NewControllerFactory(
   783  					node.Logger,
   784  					node.Me,
   785  					dkgContractClients,
   786  					dkgBrokerTunnel,
   787  					dkgControllerConfig,
   788  				),
   789  				viewsObserver,
   790  			)
   791  
   792  			// reactorEngine consumes the EpochSetupPhaseStarted event
   793  			node.ProtocolEvents.AddConsumer(reactorEngine)
   794  
   795  			return reactorEngine, nil
   796  		})
   797  
   798  	node, err := nodeBuilder.Build()
   799  	if err != nil {
   800  		nodeBuilder.Logger.Fatal().Err(err).Send()
   801  	}
   802  	node.Run()
   803  }
   804  
   805  func loadBeaconPrivateKey(dir string, myID flow.Identifier) (*encodable.RandomBeaconPrivKey, error) {
   806  	path := fmt.Sprintf(bootstrap.PathRandomBeaconPriv, myID)
   807  	data, err := io.ReadFile(filepath.Join(dir, path))
   808  	if err != nil {
   809  		return nil, err
   810  	}
   811  
   812  	var priv encodable.RandomBeaconPrivKey
   813  	err = json.Unmarshal(data, &priv)
   814  	if err != nil {
   815  		return nil, err
   816  	}
   817  	return &priv, nil
   818  }
   819  
   820  // createDKGContractClient creates an dkgContractClient
   821  func createDKGContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClient *client.Client, anID flow.Identifier) (module.DKGContractClient, error) {
   822  	var dkgClient module.DKGContractClient
   823  
   824  	contracts, err := systemcontracts.SystemContractsForChain(node.RootChainID)
   825  	if err != nil {
   826  		return nil, err
   827  	}
   828  	dkgContractAddress := contracts.DKG.Address.Hex()
   829  
   830  	// construct signer from private key
   831  	sk, err := crypto.DecodePrivateKey(machineAccountInfo.SigningAlgorithm, machineAccountInfo.EncodedPrivateKey)
   832  	if err != nil {
   833  		return nil, fmt.Errorf("could not decode private key from hex: %w", err)
   834  	}
   835  
   836  	txSigner, err := crypto.NewInMemorySigner(sk, machineAccountInfo.HashAlgorithm)
   837  	if err != nil {
   838  		return nil, fmt.Errorf("could not create in-memory signer: %w", err)
   839  	}
   840  
   841  	// create actual dkg contract client, all flags and machine account info file found
   842  	dkgClient = dkgmodule.NewClient(
   843  		node.Logger,
   844  		flowClient,
   845  		anID,
   846  		txSigner,
   847  		dkgContractAddress,
   848  		machineAccountInfo.Address,
   849  		machineAccountInfo.KeyIndex,
   850  	)
   851  
   852  	return dkgClient, nil
   853  }
   854  
   855  // createDKGContractClients creates an array dkgContractClient that is sorted by retry fallback priority
   856  func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.DKGContractClient, error) {
   857  	dkgClients := make([]module.DKGContractClient, 0)
   858  
   859  	for _, opt := range flowClientOpts {
   860  		flowClient, err := common.FlowClient(opt)
   861  		if err != nil {
   862  			return nil, fmt.Errorf("failed to create flow client for dkg contract client with options: %s %w", flowClientOpts, err)
   863  		}
   864  
   865  		node.Logger.Info().Msgf("created dkg contract client with opts: %s", opt.String())
   866  		dkgClient, err := createDKGContractClient(node, machineAccountInfo, flowClient, opt.AccessNodeID)
   867  		if err != nil {
   868  			return nil, fmt.Errorf("failed to create dkg contract client with flow client options: %s %w", flowClientOpts, err)
   869  		}
   870  
   871  		dkgClients = append(dkgClients, dkgClient)
   872  	}
   873  
   874  	return dkgClients, nil
   875  }