github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/cmd/collection/main.go (about)

     1  package main
     2  
     3  import (
     4  	"fmt"
     5  	"time"
     6  
     7  	"github.com/spf13/pflag"
     8  	"golang.org/x/time/rate"
     9  
    10  	client "github.com/onflow/flow-go-sdk/access/grpc"
    11  	sdkcrypto "github.com/onflow/flow-go-sdk/crypto"
    12  	"github.com/onflow/flow-go/admin/commands"
    13  	collectionCommands "github.com/onflow/flow-go/admin/commands/collection"
    14  	storageCommands "github.com/onflow/flow-go/admin/commands/storage"
    15  	"github.com/onflow/flow-go/cmd"
    16  	"github.com/onflow/flow-go/cmd/util/cmd/common"
    17  	"github.com/onflow/flow-go/consensus"
    18  	"github.com/onflow/flow-go/consensus/hotstuff"
    19  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    20  	"github.com/onflow/flow-go/consensus/hotstuff/notifications"
    21  	"github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub"
    22  	"github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout"
    23  	hotsignature "github.com/onflow/flow-go/consensus/hotstuff/signature"
    24  	"github.com/onflow/flow-go/consensus/hotstuff/validator"
    25  	"github.com/onflow/flow-go/consensus/hotstuff/verification"
    26  	recovery "github.com/onflow/flow-go/consensus/recovery/protocol"
    27  	"github.com/onflow/flow-go/engine/collection/epochmgr"
    28  	"github.com/onflow/flow-go/engine/collection/epochmgr/factories"
    29  	"github.com/onflow/flow-go/engine/collection/events"
    30  	"github.com/onflow/flow-go/engine/collection/ingest"
    31  	"github.com/onflow/flow-go/engine/collection/pusher"
    32  	"github.com/onflow/flow-go/engine/collection/rpc"
    33  	followereng "github.com/onflow/flow-go/engine/common/follower"
    34  	"github.com/onflow/flow-go/engine/common/provider"
    35  	consync "github.com/onflow/flow-go/engine/common/synchronization"
    36  	"github.com/onflow/flow-go/fvm/systemcontracts"
    37  	"github.com/onflow/flow-go/model/bootstrap"
    38  	"github.com/onflow/flow-go/model/flow"
    39  	"github.com/onflow/flow-go/model/flow/filter"
    40  	"github.com/onflow/flow-go/module"
    41  	builder "github.com/onflow/flow-go/module/builder/collection"
    42  	"github.com/onflow/flow-go/module/chainsync"
    43  	modulecompliance "github.com/onflow/flow-go/module/compliance"
    44  	"github.com/onflow/flow-go/module/epochs"
    45  	confinalizer "github.com/onflow/flow-go/module/finalizer/consensus"
    46  	"github.com/onflow/flow-go/module/mempool"
    47  	epochpool "github.com/onflow/flow-go/module/mempool/epochs"
    48  	"github.com/onflow/flow-go/module/mempool/herocache"
    49  	"github.com/onflow/flow-go/module/mempool/queue"
    50  	"github.com/onflow/flow-go/module/metrics"
    51  	"github.com/onflow/flow-go/network/channels"
    52  	"github.com/onflow/flow-go/state/protocol"
    53  	badgerState "github.com/onflow/flow-go/state/protocol/badger"
    54  	"github.com/onflow/flow-go/state/protocol/blocktimer"
    55  	"github.com/onflow/flow-go/state/protocol/events/gadgets"
    56  	"github.com/onflow/flow-go/storage/badger"
    57  	"github.com/onflow/flow-go/utils/grpcutils"
    58  )
    59  
    60  func main() {
    61  
    62  	var (
    63  		txLimit                           uint
    64  		maxCollectionSize                 uint
    65  		maxCollectionByteSize             uint64
    66  		maxCollectionTotalGas             uint64
    67  		maxCollectionRequestCacheSize     uint32 // collection provider engine
    68  		collectionProviderWorkers         uint   // collection provider engine
    69  		builderExpiryBuffer               uint
    70  		builderPayerRateLimitDryRun       bool
    71  		builderPayerRateLimit             float64
    72  		builderUnlimitedPayers            []string
    73  		hotstuffMinTimeout                time.Duration
    74  		hotstuffTimeoutAdjustmentFactor   float64
    75  		hotstuffHappyPathMaxRoundFailures uint64
    76  		hotstuffProposalDuration          time.Duration
    77  		startupTimeString                 string
    78  		startupTime                       time.Time
    79  
    80  		mainConsensusCommittee  *committees.Consensus
    81  		followerState           protocol.FollowerState
    82  		ingestConf              = ingest.DefaultConfig()
    83  		rpcConf                 rpc.Config
    84  		clusterComplianceConfig modulecompliance.Config
    85  
    86  		pools               *epochpool.TransactionPools // epoch-scoped transaction pools
    87  		followerDistributor *pubsub.FollowerDistributor
    88  		addressRateLimiter  *ingest.AddressRateLimiter
    89  
    90  		push                  *pusher.Engine
    91  		ing                   *ingest.Engine
    92  		mainChainSyncCore     *chainsync.Core
    93  		followerCore          *hotstuff.FollowerLoop // follower hotstuff logic
    94  		followerEng           *followereng.ComplianceEngine
    95  		colMetrics            module.CollectionMetrics
    96  		machineAccountMetrics module.MachineAccountMetrics
    97  		err                   error
    98  
    99  		// epoch qc contract client
   100  		machineAccountInfo *bootstrap.NodeMachineAccountInfo
   101  		flowClientConfigs  []*common.FlowClientConfig
   102  		insecureAccessAPI  bool
   103  		accessNodeIDS      []string
   104  		apiRatelimits      map[string]int
   105  		apiBurstlimits     map[string]int
   106  		txRatelimits       float64
   107  		txBurstlimits      int
   108  		txRatelimitPayers  string
   109  	)
   110  	var deprecatedFlagBlockRateDelay time.Duration
   111  
   112  	nodeBuilder := cmd.FlowNode(flow.RoleCollection.String())
   113  	nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) {
   114  		flags.UintVar(&txLimit, "tx-limit", 50_000,
   115  			"maximum number of transactions in the memory pool")
   116  		flags.StringVarP(&rpcConf.ListenAddr, "ingress-addr", "i", "localhost:9000",
   117  			"the address the ingress server listens on")
   118  		flags.UintVar(&rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize,
   119  			"the maximum message size in bytes for messages sent or received over grpc")
   120  		flags.BoolVar(&rpcConf.RpcMetricsEnabled, "rpc-metrics-enabled", false,
   121  			"whether to enable the rpc metrics")
   122  		flags.Uint64Var(&ingestConf.MaxGasLimit, "ingest-max-gas-limit", flow.DefaultMaxTransactionGasLimit,
   123  			"maximum per-transaction computation limit (gas limit)")
   124  		flags.Uint64Var(&ingestConf.MaxTransactionByteSize, "ingest-max-tx-byte-size", flow.DefaultMaxTransactionByteSize,
   125  			"maximum per-transaction byte size")
   126  		flags.Uint64Var(&ingestConf.MaxCollectionByteSize, "ingest-max-col-byte-size", flow.DefaultMaxCollectionByteSize,
   127  			"maximum per-collection byte size")
   128  		flags.BoolVar(&ingestConf.CheckScriptsParse, "ingest-check-scripts-parse", true,
   129  			"whether we check that inbound transactions are parse-able")
   130  		flags.UintVar(&ingestConf.ExpiryBuffer, "ingest-expiry-buffer", 30,
   131  			"expiry buffer for inbound transactions")
   132  		flags.UintVar(&ingestConf.PropagationRedundancy, "ingest-tx-propagation-redundancy", 10,
   133  			"how many additional cluster members we propagate transactions to")
   134  		flags.UintVar(&builderExpiryBuffer, "builder-expiry-buffer", builder.DefaultExpiryBuffer,
   135  			"expiry buffer for transactions in proposed collections")
   136  		flags.BoolVar(&builderPayerRateLimitDryRun, "builder-rate-limit-dry-run", false,
   137  			"determines whether rate limit configuration should be enforced (false), or only logged (true)")
   138  		flags.Float64Var(&builderPayerRateLimit, "builder-rate-limit", builder.DefaultMaxPayerTransactionRate, // no rate limiting
   139  			"rate limit for each payer (transactions/collection)")
   140  		flags.StringSliceVar(&builderUnlimitedPayers, "builder-unlimited-payers", []string{}, // no unlimited payers
   141  			"set of payer addresses which are omitted from rate limiting")
   142  		flags.UintVar(&maxCollectionSize, "builder-max-collection-size", flow.DefaultMaxCollectionSize,
   143  			"maximum number of transactions in proposed collections")
   144  		flags.Uint64Var(&maxCollectionByteSize, "builder-max-collection-byte-size", flow.DefaultMaxCollectionByteSize,
   145  			"maximum byte size of the proposed collection")
   146  		flags.Uint64Var(&maxCollectionTotalGas, "builder-max-collection-total-gas", flow.DefaultMaxCollectionTotalGas,
   147  			"maximum total amount of maxgas of transactions in proposed collections")
   148  		// Collection Nodes use a lower min timeout than Consensus Nodes (1.5s vs 2.5s) because:
   149  		//  - they tend to have higher happy-path view rate, allowing a shorter timeout
   150  		//  - since they have smaller committees, 1-2 offline replicas has a larger negative impact, which is mitigating with a smaller timeout
   151  		flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1500*time.Millisecond,
   152  			"the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout")
   153  		flags.Float64Var(&hotstuffTimeoutAdjustmentFactor, "hotstuff-timeout-adjustment-factor", timeout.DefaultConfig.TimeoutAdjustmentFactor,
   154  			"adjustment of timeout duration in case of time out event")
   155  		flags.Uint64Var(&hotstuffHappyPathMaxRoundFailures, "hotstuff-happy-path-max-round-failures", timeout.DefaultConfig.HappyPathMaxRoundFailures,
   156  			"number of failed rounds before first timeout increase")
   157  		flags.Uint64Var(&clusterComplianceConfig.SkipNewProposalsThreshold,
   158  			"cluster-compliance-skip-proposals-threshold", modulecompliance.DefaultConfig().SkipNewProposalsThreshold, "threshold at which new proposals are discarded rather than cached, if their height is this much above local finalized height (cluster compliance engine)")
   159  		flags.StringVar(&startupTimeString, "hotstuff-startup-time", cmd.NotSet, "specifies date and time (in ISO 8601 format) after which the consensus participant may enter the first view (e.g (e.g 1996-04-24T15:04:05-07:00))")
   160  		flags.DurationVar(&hotstuffProposalDuration, "hotstuff-proposal-duration", time.Millisecond*250, "the target time between entering a view and broadcasting the proposal for that view (different and smaller than view time)")
   161  		flags.Uint32Var(&maxCollectionRequestCacheSize, "max-collection-provider-cache-size", provider.DefaultEntityRequestCacheSize, "maximum number of collection requests to cache for collection provider")
   162  		flags.UintVar(&collectionProviderWorkers, "collection-provider-workers", provider.DefaultRequestProviderWorkers, "number of workers to use for collection provider")
   163  		// epoch qc contract flags
   164  		flags.BoolVar(&insecureAccessAPI, "insecure-access-api", false, "required if insecure GRPC connection should be used")
   165  		flags.StringSliceVar(&accessNodeIDS, "access-node-ids", []string{}, fmt.Sprintf("array of access node IDs sorted in priority order where the first ID in this array will get the first connection attempt and each subsequent ID after serves as a fallback. Minimum length %d. Use '*' for all IDs in protocol state.", common.DefaultAccessNodeIDSMinimum))
   166  		flags.StringToIntVar(&apiRatelimits, "api-rate-limits", map[string]int{}, "per second rate limits for GRPC API methods e.g. Ping=300,SendTransaction=500 etc. note limits apply globally to all clients.")
   167  		flags.StringToIntVar(&apiBurstlimits, "api-burst-limits", map[string]int{}, "burst limits for gRPC API methods e.g. Ping=100,SendTransaction=100 etc. note limits apply globally to all clients.")
   168  
   169  		// rate limiting for accounts, default is 2 transactions every 2.5 seconds
   170  		// Note: The rate limit configured for each node may differ from the effective network-wide rate limit
   171  		// for a given payer. In particular, the number of clusters and the message propagation factor will
   172  		// influence how the individual rate limit translates to a network-wide rate limit.
   173  		// For example, suppose we have 5 collection clusters and configure each Collection Node with a rate
   174  		// limit of 1 message per second. Then, the effective network-wide rate limit for a payer address would
   175  		// be *at least* 5 messages per second.
   176  		flags.Float64Var(&txRatelimits, "ingest-tx-rate-limits", 2.5, "per second rate limits for processing transactions for limited account")
   177  		flags.IntVar(&txBurstlimits, "ingest-tx-burst-limits", 2, "burst limits for processing transactions for limited account")
   178  		flags.StringVar(&txRatelimitPayers, "ingest-tx-rate-limit-payers", "", "comma separated list of accounts to apply rate limiting to")
   179  
   180  		// deprecated flags
   181  		flags.DurationVar(&deprecatedFlagBlockRateDelay, "block-rate-delay", 0, "the delay to broadcast block proposal in order to control block production rate")
   182  	}).ValidateFlags(func() error {
   183  		if startupTimeString != cmd.NotSet {
   184  			t, err := time.Parse(time.RFC3339, startupTimeString)
   185  			if err != nil {
   186  				return fmt.Errorf("invalid start-time value: %w", err)
   187  			}
   188  			startupTime = t
   189  		}
   190  		if deprecatedFlagBlockRateDelay > 0 {
   191  			nodeBuilder.Logger.Warn().Msg("A deprecated flag was specified (--block-rate-delay). This flag is deprecated as of v0.30 (Jun 2023), has no effect, and will eventually be removed.")
   192  		}
   193  		return nil
   194  	})
   195  
   196  	if err = nodeBuilder.Initialize(); err != nil {
   197  		nodeBuilder.Logger.Fatal().Err(err).Send()
   198  	}
   199  
   200  	nodeBuilder.
   201  		PreInit(cmd.DynamicStartPreInit).
   202  		Module("transaction rate limiter", func(node *cmd.NodeConfig) error {
   203  			// To be managed by admin tool, and used by ingestion engine
   204  			addressRateLimiter = ingest.NewAddressRateLimiter(rate.Limit(txRatelimits), txBurstlimits)
   205  			// read the rate limit addresses from flag and add to the rate limiter
   206  			addrs, err := ingest.ParseAddresses(txRatelimitPayers)
   207  			if err != nil {
   208  				return fmt.Errorf("could not parse rate limit addresses: %w", err)
   209  			}
   210  			ingest.AddAddresses(addressRateLimiter, addrs)
   211  
   212  			return nil
   213  		}).
   214  		AdminCommand("ingest-tx-rate-limit", func(node *cmd.NodeConfig) commands.AdminCommand {
   215  			return collectionCommands.NewTxRateLimitCommand(addressRateLimiter)
   216  		}).
   217  		AdminCommand("read-range-cluster-blocks", func(conf *cmd.NodeConfig) commands.AdminCommand {
   218  			clusterPayloads := badger.NewClusterPayloads(&metrics.NoopCollector{}, conf.DB)
   219  			headers, ok := conf.Storage.Headers.(*badger.Headers)
   220  			if !ok {
   221  				panic("fail to initialize admin tool, conf.Storage.Headers can not be casted as badger headers")
   222  			}
   223  			return storageCommands.NewReadRangeClusterBlocksCommand(conf.DB, headers, clusterPayloads)
   224  		}).
   225  		Module("follower distributor", func(node *cmd.NodeConfig) error {
   226  			followerDistributor = pubsub.NewFollowerDistributor()
   227  			followerDistributor.AddProposalViolationConsumer(notifications.NewSlashingViolationsConsumer(node.Logger))
   228  			return nil
   229  		}).
   230  		Module("mutable follower state", func(node *cmd.NodeConfig) error {
   231  			// For now, we only support state implementations from package badger.
   232  			// If we ever support different implementations, the following can be replaced by a type-aware factory
   233  			state, ok := node.State.(*badgerState.State)
   234  			if !ok {
   235  				return fmt.Errorf("only implementations of type badger.State are currently supported but read-only state has type %T", node.State)
   236  			}
   237  			followerState, err = badgerState.NewFollowerState(
   238  				node.Logger,
   239  				node.Tracer,
   240  				node.ProtocolEvents,
   241  				state,
   242  				node.Storage.Index,
   243  				node.Storage.Payloads,
   244  				blocktimer.DefaultBlockTimer,
   245  			)
   246  			return err
   247  		}).
   248  		Module("transactions mempool", func(node *cmd.NodeConfig) error {
   249  			create := func(epoch uint64) mempool.Transactions {
   250  				var heroCacheMetricsCollector module.HeroCacheMetrics = metrics.NewNoopCollector()
   251  				if node.BaseConfig.HeroCacheMetricsEnable {
   252  					heroCacheMetricsCollector = metrics.CollectionNodeTransactionsCacheMetrics(node.MetricsRegisterer, epoch)
   253  				}
   254  				return herocache.NewTransactions(
   255  					uint32(txLimit),
   256  					node.Logger,
   257  					heroCacheMetricsCollector)
   258  			}
   259  
   260  			pools = epochpool.NewTransactionPools(create)
   261  			err := node.Metrics.Mempool.Register(metrics.ResourceTransaction, pools.CombinedSize)
   262  			return err
   263  		}).
   264  		Module("machine account config", func(node *cmd.NodeConfig) error {
   265  			machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID)
   266  			return err
   267  		}).
   268  		Module("collection node metrics", func(node *cmd.NodeConfig) error {
   269  			colMetrics = metrics.NewCollectionCollector(node.Tracer)
   270  			return nil
   271  		}).
   272  		Module("machine account metrics", func(node *cmd.NodeConfig) error {
   273  			machineAccountMetrics = metrics.NewMachineAccountCollector(node.MetricsRegisterer, machineAccountInfo.FlowAddress())
   274  			return nil
   275  		}).
   276  		Module("main chain sync core", func(node *cmd.NodeConfig) error {
   277  			log := node.Logger.With().Str("sync_chain_id", node.RootChainID.String()).Logger()
   278  			mainChainSyncCore, err = chainsync.New(log, node.SyncCoreConfig, metrics.NewChainSyncCollector(node.RootChainID), node.RootChainID)
   279  			return err
   280  		}).
   281  		Module("sdk client connection options", func(node *cmd.NodeConfig) error {
   282  			anIDS, err := common.ValidateAccessNodeIDSFlag(accessNodeIDS, node.RootChainID, node.State.Sealed())
   283  			if err != nil {
   284  				return fmt.Errorf("failed to validate flag --access-node-ids %w", err)
   285  			}
   286  
   287  			flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed())
   288  			if err != nil {
   289  				return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err)
   290  			}
   291  
   292  			return nil
   293  		}).
   294  		Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   295  			// @TODO use fallback logic for flowClient similar to DKG/QC contract clients
   296  			flowClient, err := common.FlowClient(flowClientConfigs[0])
   297  			if err != nil {
   298  				return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err)
   299  			}
   300  
   301  			// disable balance checks for transient networks, which do not have transaction fees
   302  			var opts []epochs.MachineAccountValidatorConfigOption
   303  			if node.RootChainID.Transient() {
   304  				opts = append(opts, epochs.WithoutBalanceChecks)
   305  			}
   306  			validator, err := epochs.NewMachineAccountConfigValidator(
   307  				node.Logger,
   308  				flowClient,
   309  				flow.RoleCollection,
   310  				*machineAccountInfo,
   311  				machineAccountMetrics,
   312  				opts...,
   313  			)
   314  
   315  			return validator, err
   316  		}).
   317  		Component("consensus committee", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   318  			// initialize consensus committee's membership state
   319  			// This committee state is for the HotStuff follower, which follows the MAIN CONSENSUS Committee
   320  			// Note: node.Me.NodeID() is not part of the consensus committee
   321  			mainConsensusCommittee, err = committees.NewConsensusCommittee(node.State, node.Me.NodeID())
   322  			node.ProtocolEvents.AddConsumer(mainConsensusCommittee)
   323  			return mainConsensusCommittee, err
   324  		}).
   325  		Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   326  			// create a finalizer for updating the protocol
   327  			// state when the follower detects newly finalized blocks
   328  			finalizer := confinalizer.NewFinalizer(node.DB, node.Storage.Headers, followerState, node.Tracer)
   329  			finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers)
   330  			if err != nil {
   331  				return nil, fmt.Errorf("could not find latest finalized block and pending blocks to recover consensus follower: %w", err)
   332  			}
   333  			// creates a consensus follower with noop consumer as the notifier
   334  			followerCore, err = consensus.NewFollower(
   335  				node.Logger,
   336  				node.Metrics.Mempool,
   337  				node.Storage.Headers,
   338  				finalizer,
   339  				followerDistributor,
   340  				node.FinalizedRootBlock.Header,
   341  				node.RootQC,
   342  				finalized,
   343  				pending,
   344  			)
   345  			if err != nil {
   346  				return nil, fmt.Errorf("could not create follower core logic: %w", err)
   347  			}
   348  			return followerCore, nil
   349  		}).
   350  		Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   351  			packer := hotsignature.NewConsensusSigDataPacker(mainConsensusCommittee)
   352  			// initialize the verifier for the protocol consensus
   353  			verifier := verification.NewCombinedVerifier(mainConsensusCommittee, packer)
   354  
   355  			validator := validator.New(mainConsensusCommittee, verifier)
   356  
   357  			var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector()
   358  			if node.HeroCacheMetricsEnable {
   359  				heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer)
   360  			}
   361  
   362  			core, err := followereng.NewComplianceCore(
   363  				node.Logger,
   364  				node.Metrics.Mempool,
   365  				heroCacheCollector,
   366  				followerDistributor,
   367  				followerState,
   368  				followerCore,
   369  				validator,
   370  				mainChainSyncCore,
   371  				node.Tracer,
   372  			)
   373  			if err != nil {
   374  				return nil, fmt.Errorf("could not create follower core: %w", err)
   375  			}
   376  
   377  			followerEng, err = followereng.NewComplianceLayer(
   378  				node.Logger,
   379  				node.EngineRegistry,
   380  				node.Me,
   381  				node.Metrics.Engine,
   382  				node.Storage.Headers,
   383  				node.LastFinalizedHeader,
   384  				core,
   385  				node.ComplianceConfig,
   386  			)
   387  			if err != nil {
   388  				return nil, fmt.Errorf("could not create follower engine: %w", err)
   389  			}
   390  			followerDistributor.AddOnBlockFinalizedConsumer(followerEng.OnFinalizedBlock)
   391  
   392  			return followerEng, nil
   393  		}).
   394  		Component("main chain sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   395  			spamConfig, err := consync.NewSpamDetectionConfig()
   396  			if err != nil {
   397  				return nil, fmt.Errorf("could not initialize spam detection config: %w", err)
   398  			}
   399  
   400  			// create a block synchronization engine to handle follower getting out of sync
   401  			sync, err := consync.New(
   402  				node.Logger,
   403  				node.Metrics.Engine,
   404  				node.EngineRegistry,
   405  				node.Me,
   406  				node.State,
   407  				node.Storage.Blocks,
   408  				followerEng,
   409  				mainChainSyncCore,
   410  				node.SyncEngineIdentifierProvider,
   411  				spamConfig,
   412  			)
   413  			if err != nil {
   414  				return nil, fmt.Errorf("could not create synchronization engine: %w", err)
   415  			}
   416  			followerDistributor.AddFinalizationConsumer(sync)
   417  
   418  			return sync, nil
   419  		}).
   420  		Component("ingestion engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   421  			ing, err = ingest.New(
   422  				node.Logger,
   423  				node.EngineRegistry,
   424  				node.State,
   425  				node.Metrics.Engine,
   426  				node.Metrics.Mempool,
   427  				colMetrics,
   428  				node.Me,
   429  				node.RootChainID.Chain(),
   430  				pools,
   431  				ingestConf,
   432  				addressRateLimiter,
   433  			)
   434  			return ing, err
   435  		}).
   436  		Component("transaction ingress rpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   437  			server := rpc.New(
   438  				rpcConf,
   439  				ing,
   440  				node.Logger,
   441  				node.RootChainID,
   442  				apiRatelimits,
   443  				apiBurstlimits,
   444  			)
   445  			return server, nil
   446  		}).
   447  		Component("collection provider engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   448  			retrieve := func(collID flow.Identifier) (flow.Entity, error) {
   449  				coll, err := node.Storage.Collections.ByID(collID)
   450  				return coll, err
   451  			}
   452  
   453  			var collectionRequestMetrics module.HeroCacheMetrics = metrics.NewNoopCollector()
   454  			if node.HeroCacheMetricsEnable {
   455  				collectionRequestMetrics = metrics.CollectionRequestsQueueMetricFactory(node.MetricsRegisterer)
   456  			}
   457  			collectionRequestQueue := queue.NewHeroStore(maxCollectionRequestCacheSize, node.Logger, collectionRequestMetrics)
   458  
   459  			return provider.New(
   460  				node.Logger.With().Str("engine", "collection_provider").Logger(),
   461  				node.Metrics.Engine,
   462  				node.EngineRegistry,
   463  				node.Me,
   464  				node.State,
   465  				collectionRequestQueue,
   466  				collectionProviderWorkers,
   467  				channels.ProvideCollections,
   468  				filter.And(
   469  					filter.IsValidCurrentEpochParticipantOrJoining,
   470  					filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleExecution),
   471  				),
   472  				retrieve,
   473  			)
   474  		}).
   475  		Component("pusher engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   476  			push, err = pusher.New(
   477  				node.Logger,
   478  				node.EngineRegistry,
   479  				node.State,
   480  				node.Metrics.Engine,
   481  				colMetrics,
   482  				node.Me,
   483  				node.Storage.Collections,
   484  				node.Storage.Transactions,
   485  			)
   486  			return push, err
   487  		}).
   488  		// Epoch manager encapsulates and manages epoch-dependent engines as we
   489  		// transition between epochs
   490  		Component("epoch manager", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) {
   491  			clusterStateFactory, err := factories.NewClusterStateFactory(node.DB, node.Metrics.Cache, node.Tracer)
   492  			if err != nil {
   493  				return nil, err
   494  			}
   495  
   496  			// convert hex string flag values to addresses
   497  			unlimitedPayers := make([]flow.Address, 0, len(builderUnlimitedPayers))
   498  			for _, payerStr := range builderUnlimitedPayers {
   499  				payerAddr := flow.HexToAddress(payerStr)
   500  				unlimitedPayers = append(unlimitedPayers, payerAddr)
   501  			}
   502  
   503  			builderFactory, err := factories.NewBuilderFactory(
   504  				node.DB,
   505  				node.State,
   506  				node.Storage.Headers,
   507  				node.Tracer,
   508  				colMetrics,
   509  				push,
   510  				node.Logger,
   511  				builder.WithMaxCollectionSize(maxCollectionSize),
   512  				builder.WithMaxCollectionByteSize(maxCollectionByteSize),
   513  				builder.WithMaxCollectionTotalGas(maxCollectionTotalGas),
   514  				builder.WithExpiryBuffer(builderExpiryBuffer),
   515  				builder.WithRateLimitDryRun(builderPayerRateLimitDryRun),
   516  				builder.WithMaxPayerTransactionRate(builderPayerRateLimit),
   517  				builder.WithUnlimitedPayers(unlimitedPayers...),
   518  			)
   519  			if err != nil {
   520  				return nil, err
   521  			}
   522  
   523  			complianceEngineFactory, err := factories.NewComplianceEngineFactory(
   524  				node.Logger,
   525  				node.EngineRegistry,
   526  				node.Me,
   527  				colMetrics,
   528  				node.Metrics.Engine,
   529  				node.Metrics.Mempool,
   530  				node.State,
   531  				node.Storage.Transactions,
   532  				clusterComplianceConfig,
   533  			)
   534  			if err != nil {
   535  				return nil, err
   536  			}
   537  
   538  			syncCoreFactory, err := factories.NewSyncCoreFactory(node.Logger, node.SyncCoreConfig)
   539  			if err != nil {
   540  				return nil, err
   541  			}
   542  
   543  			syncFactory, err := factories.NewSyncEngineFactory(
   544  				node.Logger,
   545  				node.Metrics.Engine,
   546  				node.EngineRegistry,
   547  				node.Me,
   548  			)
   549  			if err != nil {
   550  				return nil, err
   551  			}
   552  
   553  			createMetrics := func(chainID flow.ChainID) module.HotstuffMetrics {
   554  				return metrics.NewHotstuffCollector(chainID)
   555  			}
   556  
   557  			opts := []consensus.Option{
   558  				consensus.WithStaticProposalDuration(hotstuffProposalDuration),
   559  				consensus.WithMinTimeout(hotstuffMinTimeout),
   560  				consensus.WithTimeoutAdjustmentFactor(hotstuffTimeoutAdjustmentFactor),
   561  				consensus.WithHappyPathMaxRoundFailures(hotstuffHappyPathMaxRoundFailures),
   562  			}
   563  
   564  			if !startupTime.IsZero() {
   565  				opts = append(opts, consensus.WithStartupTime(startupTime))
   566  			}
   567  
   568  			hotstuffFactory, err := factories.NewHotStuffFactory(
   569  				node.Logger,
   570  				node.Me,
   571  				node.DB,
   572  				node.State,
   573  				node.Metrics.Engine,
   574  				node.Metrics.Mempool,
   575  				createMetrics,
   576  				opts...,
   577  			)
   578  			if err != nil {
   579  				return nil, err
   580  			}
   581  
   582  			signer := verification.NewStakingSigner(node.Me)
   583  
   584  			// construct QC contract client
   585  			qcContractClients, err := createQCContractClients(node, machineAccountInfo, flowClientConfigs)
   586  			if err != nil {
   587  				return nil, fmt.Errorf("could not create qc contract clients %w", err)
   588  			}
   589  
   590  			rootQCVoter := epochs.NewRootQCVoter(
   591  				node.Logger,
   592  				node.Me,
   593  				signer,
   594  				node.State,
   595  				qcContractClients,
   596  			)
   597  
   598  			messageHubFactory := factories.NewMessageHubFactory(
   599  				node.Logger,
   600  				node.EngineRegistry,
   601  				node.Me,
   602  				node.Metrics.Engine,
   603  				node.State,
   604  			)
   605  
   606  			factory := factories.NewEpochComponentsFactory(
   607  				node.Me,
   608  				pools,
   609  				builderFactory,
   610  				clusterStateFactory,
   611  				hotstuffFactory,
   612  				complianceEngineFactory,
   613  				syncCoreFactory,
   614  				syncFactory,
   615  				messageHubFactory,
   616  			)
   617  
   618  			heightEvents := gadgets.NewHeights()
   619  			node.ProtocolEvents.AddConsumer(heightEvents)
   620  
   621  			clusterEvents := events.NewDistributor()
   622  
   623  			manager, err := epochmgr.New(
   624  				node.Logger,
   625  				node.Me,
   626  				node.State,
   627  				pools,
   628  				rootQCVoter,
   629  				factory,
   630  				heightEvents,
   631  				clusterEvents,
   632  			)
   633  			if err != nil {
   634  				return nil, fmt.Errorf("could not create epoch manager: %w", err)
   635  			}
   636  
   637  			// register the manager for protocol events
   638  			node.ProtocolEvents.AddConsumer(manager)
   639  			clusterEvents.AddConsumer(node.LibP2PNode)
   640  			return manager, err
   641  		})
   642  
   643  	node, err := nodeBuilder.Build()
   644  	if err != nil {
   645  		nodeBuilder.Logger.Fatal().Err(err).Send()
   646  	}
   647  	node.Run()
   648  }
   649  
   650  // createQCContractClient creates QC contract client
   651  func createQCContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClient *client.Client, anID flow.Identifier) (module.QCContractClient, error) {
   652  
   653  	var qcContractClient module.QCContractClient
   654  
   655  	contracts := systemcontracts.SystemContractsForChain(node.RootChainID)
   656  	qcContractAddress := contracts.ClusterQC.Address.Hex()
   657  
   658  	// construct signer from private key
   659  	sk, err := sdkcrypto.DecodePrivateKey(machineAccountInfo.SigningAlgorithm, machineAccountInfo.EncodedPrivateKey)
   660  	if err != nil {
   661  		return nil, fmt.Errorf("could not decode private key from hex: %w", err)
   662  	}
   663  
   664  	txSigner, err := sdkcrypto.NewInMemorySigner(sk, machineAccountInfo.HashAlgorithm)
   665  	if err != nil {
   666  		return nil, fmt.Errorf("could not create in-memory signer: %w", err)
   667  	}
   668  
   669  	// create actual qc contract client, all flags and machine account info file found
   670  	qcContractClient = epochs.NewQCContractClient(
   671  		node.Logger,
   672  		flowClient,
   673  		anID,
   674  		node.Me.NodeID(),
   675  		machineAccountInfo.Address,
   676  		machineAccountInfo.KeyIndex,
   677  		qcContractAddress,
   678  		txSigner,
   679  	)
   680  
   681  	return qcContractClient, nil
   682  }
   683  
   684  // createQCContractClients creates priority ordered array of QCContractClient
   685  func createQCContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.QCContractClient, error) {
   686  	qcClients := make([]module.QCContractClient, 0)
   687  
   688  	for _, opt := range flowClientOpts {
   689  		flowClient, err := common.FlowClient(opt)
   690  		if err != nil {
   691  			return nil, fmt.Errorf("failed to create flow client for qc contract client with options: %s %w", flowClientOpts, err)
   692  		}
   693  
   694  		qcClient, err := createQCContractClient(node, machineAccountInfo, flowClient, opt.AccessNodeID)
   695  		if err != nil {
   696  			return nil, fmt.Errorf("failed to create qc contract client with flow client options: %s %w", flowClientOpts, err)
   697  		}
   698  
   699  		qcClients = append(qcClients, qcClient)
   700  	}
   701  	return qcClients, nil
   702  }