github.com/koko1123/flow-go-1@v0.29.6/cmd/execution_builder.go (about)

     1  package cmd
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"os"
     8  	"path"
     9  	"path/filepath"
    10  	goruntime "runtime"
    11  	"strings"
    12  	"time"
    13  
    14  	awsconfig "github.com/aws/aws-sdk-go-v2/config"
    15  	"github.com/aws/aws-sdk-go-v2/service/s3"
    16  	"github.com/ipfs/go-cid"
    17  	badger "github.com/ipfs/go-ds-badger2"
    18  	"github.com/onflow/flow-core-contracts/lib/go/templates"
    19  	"github.com/onflow/go-bitswap"
    20  	"github.com/rs/zerolog"
    21  	"github.com/shirou/gopsutil/v3/cpu"
    22  	"github.com/shirou/gopsutil/v3/host"
    23  	"github.com/shirou/gopsutil/v3/mem"
    24  	"go.uber.org/atomic"
    25  
    26  	"github.com/koko1123/flow-go-1/admin/commands"
    27  	executionCommands "github.com/koko1123/flow-go-1/admin/commands/execution"
    28  	stateSyncCommands "github.com/koko1123/flow-go-1/admin/commands/state_synchronization"
    29  	storageCommands "github.com/koko1123/flow-go-1/admin/commands/storage"
    30  	uploaderCommands "github.com/koko1123/flow-go-1/admin/commands/uploader"
    31  	"github.com/koko1123/flow-go-1/consensus"
    32  	"github.com/koko1123/flow-go-1/consensus/hotstuff"
    33  	"github.com/koko1123/flow-go-1/consensus/hotstuff/committees"
    34  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications/pubsub"
    35  	"github.com/koko1123/flow-go-1/consensus/hotstuff/signature"
    36  	"github.com/koko1123/flow-go-1/consensus/hotstuff/verification"
    37  	recovery "github.com/koko1123/flow-go-1/consensus/recovery/protocol"
    38  	followereng "github.com/koko1123/flow-go-1/engine/common/follower"
    39  	"github.com/koko1123/flow-go-1/engine/common/provider"
    40  	"github.com/koko1123/flow-go-1/engine/common/requester"
    41  	"github.com/koko1123/flow-go-1/engine/common/synchronization"
    42  	"github.com/koko1123/flow-go-1/engine/execution/checker"
    43  	"github.com/koko1123/flow-go-1/engine/execution/computation"
    44  	"github.com/koko1123/flow-go-1/engine/execution/computation/committer"
    45  	"github.com/koko1123/flow-go-1/engine/execution/ingestion"
    46  	"github.com/koko1123/flow-go-1/engine/execution/ingestion/uploader"
    47  	exeprovider "github.com/koko1123/flow-go-1/engine/execution/provider"
    48  	"github.com/koko1123/flow-go-1/engine/execution/rpc"
    49  	"github.com/koko1123/flow-go-1/engine/execution/state"
    50  	"github.com/koko1123/flow-go-1/engine/execution/state/bootstrap"
    51  	"github.com/koko1123/flow-go-1/engine/execution/state/delta"
    52  	"github.com/koko1123/flow-go-1/fvm"
    53  	"github.com/koko1123/flow-go-1/fvm/systemcontracts"
    54  	"github.com/koko1123/flow-go-1/ledger/common/pathfinder"
    55  	ledger "github.com/koko1123/flow-go-1/ledger/complete"
    56  	"github.com/koko1123/flow-go-1/ledger/complete/wal"
    57  	bootstrapFilenames "github.com/koko1123/flow-go-1/model/bootstrap"
    58  	"github.com/koko1123/flow-go-1/model/flow"
    59  	"github.com/koko1123/flow-go-1/model/flow/filter"
    60  	"github.com/koko1123/flow-go-1/module"
    61  	"github.com/koko1123/flow-go-1/module/blobs"
    62  	"github.com/koko1123/flow-go-1/module/buffer"
    63  	"github.com/koko1123/flow-go-1/module/chainsync"
    64  	"github.com/koko1123/flow-go-1/module/compliance"
    65  	"github.com/koko1123/flow-go-1/module/executiondatasync/execution_data"
    66  	exedataprovider "github.com/koko1123/flow-go-1/module/executiondatasync/provider"
    67  	"github.com/koko1123/flow-go-1/module/executiondatasync/pruner"
    68  	"github.com/koko1123/flow-go-1/module/executiondatasync/tracker"
    69  	finalizer "github.com/koko1123/flow-go-1/module/finalizer/consensus"
    70  	"github.com/koko1123/flow-go-1/module/mempool/queue"
    71  	"github.com/koko1123/flow-go-1/module/metrics"
    72  	"github.com/koko1123/flow-go-1/network"
    73  	"github.com/koko1123/flow-go-1/network/channels"
    74  	"github.com/koko1123/flow-go-1/network/p2p/blob"
    75  	"github.com/koko1123/flow-go-1/state/protocol"
    76  	badgerState "github.com/koko1123/flow-go-1/state/protocol/badger"
    77  	"github.com/koko1123/flow-go-1/state/protocol/blocktimer"
    78  	storageerr "github.com/koko1123/flow-go-1/storage"
    79  	storage "github.com/koko1123/flow-go-1/storage/badger"
    80  	"github.com/koko1123/flow-go-1/storage/badger/procedure"
    81  )
    82  
    83  const (
    84  	blockDataUploaderMaxRetry     uint64 = 5
    85  	blockdataUploaderRetryTimeout        = 1 * time.Second
    86  )
    87  
    88  type ExecutionNodeBuilder struct {
    89  	*FlowNodeBuilder                  // the common configs as a node
    90  	exeConf          *ExecutionConfig // the configs and flags specific for execution node
    91  }
    92  
    93  func NewExecutionNodeBuilder(nodeBuilder *FlowNodeBuilder) *ExecutionNodeBuilder {
    94  	return &ExecutionNodeBuilder{
    95  		FlowNodeBuilder: nodeBuilder,
    96  		exeConf:         &ExecutionConfig{},
    97  	}
    98  }
    99  
   100  func (builder *ExecutionNodeBuilder) LoadFlags() {
   101  	builder.FlowNodeBuilder.
   102  		ExtraFlags(builder.exeConf.SetupFlags).
   103  		ValidateFlags(builder.exeConf.ValidateFlags)
   104  }
   105  
   106  // ExecutionNode contains the running modules and their loading code.
   107  type ExecutionNode struct {
   108  	builder *FlowNodeBuilder // This is needed for accessing the ShutdownFunc
   109  	exeConf *ExecutionConfig
   110  
   111  	collector               module.ExecutionMetrics
   112  	executionState          state.ExecutionState
   113  	followerState           protocol.MutableState
   114  	committee               hotstuff.Committee
   115  	ledgerStorage           *ledger.Ledger
   116  	events                  *storage.Events
   117  	serviceEvents           *storage.ServiceEvents
   118  	txResults               *storage.TransactionResults
   119  	results                 *storage.ExecutionResults
   120  	myReceipts              *storage.MyExecutionReceipts
   121  	providerEngine          *exeprovider.Engine
   122  	checkerEng              *checker.Engine
   123  	syncCore                *chainsync.Core
   124  	pendingBlocks           *buffer.PendingBlocks // used in follower engine
   125  	deltas                  *ingestion.Deltas
   126  	syncEngine              *synchronization.Engine
   127  	followerEng             *followereng.Engine // to sync blocks from consensus nodes
   128  	computationManager      *computation.Manager
   129  	collectionRequester     *requester.Engine
   130  	ingestionEng            *ingestion.Engine
   131  	finalizationDistributor *pubsub.FinalizationDistributor
   132  	finalizedHeader         *synchronization.FinalizedHeaderCache
   133  	checkAuthorizedAtBlock  func(blockID flow.Identifier) (bool, error)
   134  	diskWAL                 *wal.DiskWAL
   135  	blockDataUploader       *uploader.Manager
   136  	executionDataStore      execution_data.ExecutionDataStore
   137  	toTriggerCheckpoint     *atomic.Bool           // create the checkpoint trigger to be controlled by admin tool, and listened by the compactor
   138  	stopControl             *ingestion.StopControl // stop the node at given block height
   139  	executionDataDatastore  *badger.Datastore
   140  	executionDataPruner     *pruner.Pruner
   141  	executionDataBlobstore  blobs.Blobstore
   142  	executionDataTracker    tracker.Storage
   143  	blobService             network.BlobService
   144  	blobserviceDependable   *module.ProxiedReadyDoneAware
   145  }
   146  
   147  func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() {
   148  
   149  	exeNode := &ExecutionNode{
   150  		builder:             builder.FlowNodeBuilder,
   151  		exeConf:             builder.exeConf,
   152  		toTriggerCheckpoint: atomic.NewBool(false),
   153  	}
   154  
   155  	builder.FlowNodeBuilder.
   156  		AdminCommand("read-execution-data", func(config *NodeConfig) commands.AdminCommand {
   157  			return stateSyncCommands.NewReadExecutionDataCommand(exeNode.executionDataStore)
   158  		}).
   159  		AdminCommand("trigger-checkpoint", func(config *NodeConfig) commands.AdminCommand {
   160  			return executionCommands.NewTriggerCheckpointCommand(exeNode.toTriggerCheckpoint)
   161  		}).
   162  		AdminCommand("stop-at-height", func(config *NodeConfig) commands.AdminCommand {
   163  			return executionCommands.NewStopAtHeightCommand(exeNode.stopControl)
   164  		}).
   165  		AdminCommand("set-uploader-enabled", func(config *NodeConfig) commands.AdminCommand {
   166  			return uploaderCommands.NewToggleUploaderCommand(exeNode.blockDataUploader)
   167  		}).
   168  		AdminCommand("get-transactions", func(conf *NodeConfig) commands.AdminCommand {
   169  			return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, conf.Storage.Collections)
   170  		}).
   171  		Module("mutable follower state", exeNode.LoadMutableFollowerState).
   172  		Module("system specs", exeNode.LoadSystemSpecs).
   173  		Module("execution metrics", exeNode.LoadExecutionMetrics).
   174  		Module("sync core", exeNode.LoadSyncCore).
   175  		Module("execution receipts storage", exeNode.LoadExecutionReceiptsStorage).
   176  		Module("pending block cache", exeNode.LoadPendingBlockCache).
   177  		Module("state exeNode.deltas mempool", exeNode.LoadDeltasMempool).
   178  		Module("authorization checking function", exeNode.LoadAuthorizationCheckingFunction).
   179  		Module("execution data datastore", exeNode.LoadExecutionDataDatastore).
   180  		Module("execution data getter", exeNode.LoadExecutionDataGetter).
   181  		Module("blobservice peer manager dependencies", exeNode.LoadBlobservicePeerManagerDependencies).
   182  		Module("bootstrap", exeNode.LoadBootstrapper).
   183  		Component("execution state ledger", exeNode.LoadExecutionStateLedger).
   184  
   185  		// TODO: Modules should be able to depends on components
   186  		// Because all modules are always bootstrapped first, before components,
   187  		// its not possible to have a module depending on a Component.
   188  		// This is the case for a StopControl which needs to query ExecutionState which needs execution state ledger.
   189  		// I prefer to use dummy component now and keep the bootstrapping steps properly separated,
   190  		// so it will be easier to follow and refactor later
   191  		Component("execution state", exeNode.LoadExecutionState).
   192  		Component("stop control", exeNode.LoadStopControl).
   193  		Component("execution state ledger WAL compactor", exeNode.LoadExecutionStateLedgerWALCompactor).
   194  		Component("execution data pruner", exeNode.LoadExecutionDataPruner).
   195  		Component("blob service", exeNode.LoadBlobService).
   196  		Component("block data upload manager", exeNode.LoadBlockUploaderManager).
   197  		Component("GCP block data uploader", exeNode.LoadGCPBlockDataUploader).
   198  		Component("S3 block data uploader", exeNode.LoadS3BlockDataUploader).
   199  		Component("provider engine", exeNode.LoadProviderEngine).
   200  		Component("checker engine", exeNode.LoadCheckerEngine).
   201  		Component("ingestion engine", exeNode.LoadIngestionEngine).
   202  		Component("follower engine", exeNode.LoadFollowerEngine).
   203  		Component("collection requester engine", exeNode.LoadCollectionRequesterEngine).
   204  		Component("receipt provider engine", exeNode.LoadReceiptProviderEngine).
   205  		Component("finalized snapshot", exeNode.LoadFinalizedSnapshot).
   206  		Component("synchronization engine", exeNode.LoadSynchronizationEngine).
   207  		Component("grpc server", exeNode.LoadGrpcServer)
   208  }
   209  
   210  func (exeNode *ExecutionNode) LoadMutableFollowerState(node *NodeConfig) error {
   211  	// For now, we only support state implementations from package badger.
   212  	// If we ever support different implementations, the following can be replaced by a type-aware factory
   213  	bState, ok := node.State.(*badgerState.State)
   214  	if !ok {
   215  		return fmt.Errorf("only implementations of type badger.State are currently supported but read-only state has type %T", node.State)
   216  	}
   217  	var err error
   218  	exeNode.followerState, err = badgerState.NewFollowerState(
   219  		bState,
   220  		node.Storage.Index,
   221  		node.Storage.Payloads,
   222  		node.Tracer,
   223  		node.ProtocolEvents,
   224  		blocktimer.DefaultBlockTimer,
   225  	)
   226  	return err
   227  }
   228  
   229  func (exeNode *ExecutionNode) LoadSystemSpecs(node *NodeConfig) error {
   230  	sysInfoLogger := node.Logger.With().Str("system", "specs").Logger()
   231  	err := logSysInfo(sysInfoLogger)
   232  	if err != nil {
   233  		sysInfoLogger.Error().Err(err)
   234  	}
   235  	return nil
   236  }
   237  
   238  func (exeNode *ExecutionNode) LoadExecutionMetrics(node *NodeConfig) error {
   239  	exeNode.collector = metrics.NewExecutionCollector(node.Tracer)
   240  
   241  	// report the highest executed block height as soon as possible
   242  	// this is guaranteed to exist because LoadBootstrapper has inserted
   243  	// the root block as executed block
   244  	var height uint64
   245  	var blockID flow.Identifier
   246  	err := node.DB.View(procedure.GetHighestExecutedBlock(&height, &blockID))
   247  	if err != nil {
   248  		// database has not been bootstrapped yet
   249  		if errors.Is(err, storageerr.ErrNotFound) {
   250  			return nil
   251  		}
   252  		return fmt.Errorf("could not get highest executed block: %w", err)
   253  	}
   254  
   255  	exeNode.collector.ExecutionLastExecutedBlockHeight(height)
   256  	return nil
   257  }
   258  
   259  func (exeNode *ExecutionNode) LoadSyncCore(node *NodeConfig) error {
   260  	var err error
   261  	exeNode.syncCore, err = chainsync.New(node.Logger, node.SyncCoreConfig, metrics.NewChainSyncCollector())
   262  	return err
   263  }
   264  
   265  func (exeNode *ExecutionNode) LoadExecutionReceiptsStorage(
   266  	node *NodeConfig,
   267  ) error {
   268  	exeNode.results = storage.NewExecutionResults(node.Metrics.Cache, node.DB)
   269  	exeNode.myReceipts = storage.NewMyExecutionReceipts(node.Metrics.Cache, node.DB, node.Storage.Receipts.(*storage.ExecutionReceipts))
   270  	return nil
   271  }
   272  
   273  func (exeNode *ExecutionNode) LoadPendingBlockCache(node *NodeConfig) error {
   274  	exeNode.pendingBlocks = buffer.NewPendingBlocks() // for following main chain consensus
   275  	return nil
   276  }
   277  
   278  func (exeNode *ExecutionNode) LoadBlobService(
   279  	node *NodeConfig,
   280  ) (
   281  	module.ReadyDoneAware,
   282  	error,
   283  ) {
   284  	// build list of Access nodes that are allowed to request execution data from this node
   285  	var allowedANs map[flow.Identifier]bool
   286  	if exeNode.exeConf.executionDataAllowedPeers != "" {
   287  		ids := strings.Split(exeNode.exeConf.executionDataAllowedPeers, ",")
   288  		allowedANs = make(map[flow.Identifier]bool, len(ids))
   289  		for _, idHex := range ids {
   290  			anID, err := flow.HexStringToIdentifier(idHex)
   291  			if err != nil {
   292  				return nil, fmt.Errorf("invalid node ID %s: %w", idHex, err)
   293  			}
   294  
   295  			id, ok := exeNode.builder.IdentityProvider.ByNodeID(anID)
   296  			if !ok {
   297  				return nil, fmt.Errorf("allowed node ID %s is not in identity list", idHex)
   298  			}
   299  
   300  			if id.Role != flow.RoleAccess {
   301  				return nil, fmt.Errorf("allowed node ID %s is not an access node", id.NodeID.String())
   302  			}
   303  
   304  			if id.Ejected {
   305  				return nil, fmt.Errorf("allowed node ID %s is ejected", id.NodeID.String())
   306  			}
   307  
   308  			allowedANs[anID] = true
   309  		}
   310  	}
   311  
   312  	opts := []network.BlobServiceOption{
   313  		blob.WithBitswapOptions(
   314  			// Only allow block requests from staked ENs and ANs on the allowedANs list (if set)
   315  			bitswap.WithPeerBlockRequestFilter(
   316  				blob.AuthorizedRequester(allowedANs, exeNode.builder.IdentityProvider, exeNode.builder.Logger),
   317  			),
   318  			bitswap.WithTracer(
   319  				blob.NewTracer(node.Logger.With().Str("blob_service", channels.ExecutionDataService.String()).Logger()),
   320  			),
   321  		),
   322  	}
   323  
   324  	if exeNode.exeConf.blobstoreRateLimit > 0 && exeNode.exeConf.blobstoreBurstLimit > 0 {
   325  		opts = append(opts, blob.WithRateLimit(float64(exeNode.exeConf.blobstoreRateLimit), exeNode.exeConf.blobstoreBurstLimit))
   326  	}
   327  
   328  	bs, err := node.Network.RegisterBlobService(channels.ExecutionDataService, exeNode.executionDataDatastore, opts...)
   329  	if err != nil {
   330  		return nil, fmt.Errorf("failed to register blob service: %w", err)
   331  	}
   332  	exeNode.blobService = bs
   333  
   334  	// add blobservice into ReadyDoneAware dependency passed to peer manager
   335  	// this configures peer manager to wait for the blobservice to be ready before starting
   336  	exeNode.blobserviceDependable.Init(bs)
   337  
   338  	// blob service's lifecycle is managed by the network layer
   339  	return &module.NoopReadyDoneAware{}, nil
   340  }
   341  
   342  func (exeNode *ExecutionNode) LoadBlockUploaderManager(
   343  	node *NodeConfig,
   344  ) (
   345  	module.ReadyDoneAware,
   346  	error,
   347  ) {
   348  	// blockDataUploader isn't a component, but needs to be initialized after the tracer, which is
   349  	// a component.
   350  	exeNode.blockDataUploader = uploader.NewManager(exeNode.builder.Tracer)
   351  	return &module.NoopReadyDoneAware{}, nil
   352  }
   353  
   354  func (exeNode *ExecutionNode) LoadGCPBlockDataUploader(
   355  	node *NodeConfig,
   356  ) (
   357  	module.ReadyDoneAware,
   358  	error,
   359  ) {
   360  	// Since RetryableAsyncUploaderWrapper relies on executionDataService so we should create
   361  	// it after execution data service is fully setup.
   362  	if !exeNode.exeConf.enableBlockDataUpload || exeNode.exeConf.gcpBucketName == "" {
   363  		// Since we don't have conditional component creation, we just use Noop one.
   364  		// It's functions will be once per startup/shutdown - non-measurable performance penalty
   365  		// blockDataUploader will stay nil and disable calling uploader at all
   366  		return &module.NoopReadyDoneAware{}, nil
   367  	}
   368  
   369  	logger := node.Logger.With().Str("component_name", "gcp_block_data_uploader").Logger()
   370  	gcpBucketUploader, err := uploader.NewGCPBucketUploader(
   371  		context.Background(),
   372  		exeNode.exeConf.gcpBucketName,
   373  		logger,
   374  	)
   375  	if err != nil {
   376  		return nil, fmt.Errorf("cannot create GCP Bucket uploader: %w", err)
   377  	}
   378  
   379  	asyncUploader := uploader.NewAsyncUploader(
   380  		gcpBucketUploader,
   381  		blockdataUploaderRetryTimeout,
   382  		blockDataUploaderMaxRetry,
   383  		logger,
   384  		exeNode.collector,
   385  	)
   386  
   387  	// Setting up RetryableUploader for GCP uploader
   388  	retryableUploader := uploader.NewBadgerRetryableUploaderWrapper(
   389  		asyncUploader,
   390  		node.Storage.Blocks,
   391  		node.Storage.Commits,
   392  		node.Storage.Collections,
   393  		exeNode.events,
   394  		exeNode.results,
   395  		exeNode.txResults,
   396  		storage.NewComputationResultUploadStatus(node.DB),
   397  		execution_data.NewDownloader(exeNode.blobService),
   398  		exeNode.collector)
   399  	if retryableUploader == nil {
   400  		return nil, errors.New("failed to create ComputationResult upload status store")
   401  	}
   402  
   403  	exeNode.blockDataUploader.AddUploader(retryableUploader)
   404  
   405  	return retryableUploader, nil
   406  }
   407  
   408  func (exeNode *ExecutionNode) LoadS3BlockDataUploader(
   409  	node *NodeConfig,
   410  ) (
   411  	module.ReadyDoneAware,
   412  	error,
   413  ) {
   414  	if !exeNode.exeConf.enableBlockDataUpload || exeNode.exeConf.s3BucketName == "" {
   415  		// Since we don't have conditional component creation, we just use Noop one.
   416  		// It's functions will be once per startup/shutdown - non-measurable performance penalty
   417  		// blockDataUploader will stay nil and disable calling uploader at all
   418  		return &module.NoopReadyDoneAware{}, nil
   419  	}
   420  	logger := node.Logger.With().Str("component_name", "s3_block_data_uploader").Logger()
   421  
   422  	ctx := context.Background()
   423  	config, err := awsconfig.LoadDefaultConfig(ctx)
   424  	if err != nil {
   425  		return nil, fmt.Errorf("failed to load AWS configuration: %w", err)
   426  	}
   427  
   428  	client := s3.NewFromConfig(config)
   429  	s3Uploader := uploader.NewS3Uploader(
   430  		ctx,
   431  		client,
   432  		exeNode.exeConf.s3BucketName,
   433  		logger,
   434  	)
   435  	asyncUploader := uploader.NewAsyncUploader(
   436  		s3Uploader,
   437  		blockdataUploaderRetryTimeout,
   438  		blockDataUploaderMaxRetry,
   439  		logger,
   440  		exeNode.collector,
   441  	)
   442  
   443  	// We are not enabling RetryableUploader for S3 uploader for now. When we need upload
   444  	// retry for multiple uploaders, we will need to use different BadgerDB key prefix.
   445  	exeNode.blockDataUploader.AddUploader(asyncUploader)
   446  
   447  	return asyncUploader, nil
   448  }
   449  
   450  func (exeNode *ExecutionNode) LoadProviderEngine(
   451  	node *NodeConfig,
   452  ) (
   453  	module.ReadyDoneAware,
   454  	error,
   455  ) {
   456  	if exeNode.blobService == nil {
   457  		return nil, errors.New("blob service is not initialized")
   458  	}
   459  
   460  	var providerMetrics module.ExecutionDataProviderMetrics = metrics.NewNoopCollector()
   461  	if node.MetricsEnabled {
   462  		providerMetrics = metrics.NewExecutionDataProviderCollector()
   463  	}
   464  
   465  	executionDataProvider := exedataprovider.NewProvider(
   466  		node.Logger,
   467  		providerMetrics,
   468  		execution_data.DefaultSerializer,
   469  		exeNode.blobService,
   470  		exeNode.executionDataTracker,
   471  	)
   472  
   473  	vmCtx := fvm.NewContext(node.FvmOptions...)
   474  
   475  	ledgerViewCommitter := committer.NewLedgerViewCommitter(exeNode.ledgerStorage, node.Tracer)
   476  	manager, err := computation.New(
   477  		node.Logger,
   478  		exeNode.collector,
   479  		node.Tracer,
   480  		node.Me,
   481  		node.State,
   482  		vmCtx,
   483  		ledgerViewCommitter,
   484  		executionDataProvider,
   485  		exeNode.exeConf.computationConfig,
   486  	)
   487  	if err != nil {
   488  		return nil, err
   489  	}
   490  	exeNode.computationManager = manager
   491  
   492  	var chunkDataPackRequestQueueMetrics module.HeroCacheMetrics = metrics.NewNoopCollector()
   493  	if node.HeroCacheMetricsEnable {
   494  		chunkDataPackRequestQueueMetrics = metrics.ChunkDataPackRequestQueueMetricsFactory(node.MetricsRegisterer)
   495  	}
   496  	chdpReqQueue := queue.NewHeroStore(exeNode.exeConf.chunkDataPackRequestsCacheSize, node.Logger, chunkDataPackRequestQueueMetrics)
   497  	exeNode.providerEngine, err = exeprovider.New(
   498  		node.Logger,
   499  		node.Tracer,
   500  		node.Network,
   501  		node.State,
   502  		exeNode.executionState,
   503  		exeNode.collector,
   504  		exeNode.checkAuthorizedAtBlock,
   505  		chdpReqQueue,
   506  		exeNode.exeConf.chunkDataPackRequestWorkers,
   507  		exeNode.exeConf.chunkDataPackQueryTimeout,
   508  		exeNode.exeConf.chunkDataPackDeliveryTimeout,
   509  	)
   510  	if err != nil {
   511  		return nil, err
   512  	}
   513  
   514  	// Get latest executed block and a view at that block
   515  	ctx := context.Background()
   516  	_, blockID, err := exeNode.executionState.GetHighestExecutedBlockID(ctx)
   517  	if err != nil {
   518  		return nil, fmt.Errorf("cannot get the latest executed block id: %w", err)
   519  	}
   520  	stateCommit, err := exeNode.executionState.StateCommitmentByBlockID(ctx, blockID)
   521  	if err != nil {
   522  		return nil, fmt.Errorf("cannot get the state commitment at latest executed block id %s: %w", blockID.String(), err)
   523  	}
   524  	blockView := exeNode.executionState.NewView(stateCommit)
   525  
   526  	// Get the epoch counter from the smart contract at the last executed block.
   527  	contractEpochCounter, err := getContractEpochCounter(exeNode.computationManager.VM(), vmCtx, blockView)
   528  	// Failing to fetch the epoch counter from the smart contract is a fatal error.
   529  	if err != nil {
   530  		return nil, fmt.Errorf("cannot get epoch counter from the smart contract at block %s: %w", blockID.String(), err)
   531  	}
   532  
   533  	// Get the epoch counter form the protocol state, at the same block.
   534  	protocolStateEpochCounter, err := node.State.
   535  		AtBlockID(blockID).
   536  		Epochs().
   537  		Current().
   538  		Counter()
   539  	// Failing to fetch the epoch counter from the protocol state is a fatal error.
   540  	if err != nil {
   541  		return nil, fmt.Errorf("cannot get epoch counter from the protocol state at block %s: %w", blockID.String(), err)
   542  	}
   543  
   544  	l := node.Logger.With().
   545  		Str("component", "provider engine").
   546  		Uint64("contractEpochCounter", contractEpochCounter).
   547  		Uint64("protocolStateEpochCounter", protocolStateEpochCounter).
   548  		Str("blockID", blockID.String()).
   549  		Logger()
   550  
   551  	if contractEpochCounter != protocolStateEpochCounter {
   552  		// Do not error, because immediately following a spork they will be mismatching,
   553  		// until the resetEpoch transaction is submitted.
   554  		l.Warn().
   555  			Msg("Epoch counter from the FlowEpoch smart contract and from the protocol state mismatch!")
   556  	} else {
   557  		l.Info().
   558  			Msg("Epoch counter from the FlowEpoch smart contract and from the protocol state match.")
   559  	}
   560  
   561  	return exeNode.providerEngine, nil
   562  }
   563  
   564  func (exeNode *ExecutionNode) LoadDeltasMempool(node *NodeConfig) error {
   565  	var err error
   566  	exeNode.deltas, err = ingestion.NewDeltas(exeNode.exeConf.stateDeltasLimit)
   567  	return err
   568  }
   569  
   570  func (exeNode *ExecutionNode) LoadAuthorizationCheckingFunction(
   571  	node *NodeConfig,
   572  ) error {
   573  
   574  	exeNode.checkAuthorizedAtBlock = func(blockID flow.Identifier) (bool, error) {
   575  		return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID())
   576  	}
   577  	return nil
   578  }
   579  
   580  func (exeNode *ExecutionNode) LoadExecutionDataDatastore(
   581  	node *NodeConfig,
   582  ) error {
   583  	datastoreDir := filepath.Join(exeNode.exeConf.executionDataDir, "blobstore")
   584  	err := os.MkdirAll(datastoreDir, 0700)
   585  	if err != nil {
   586  		return err
   587  	}
   588  	dsOpts := &badger.DefaultOptions
   589  	ds, err := badger.NewDatastore(datastoreDir, dsOpts)
   590  	if err != nil {
   591  		return err
   592  	}
   593  	exeNode.executionDataDatastore = ds
   594  	exeNode.builder.ShutdownFunc(ds.Close)
   595  	return nil
   596  }
   597  
   598  func (exeNode *ExecutionNode) LoadBlobservicePeerManagerDependencies(node *NodeConfig) error {
   599  	exeNode.blobserviceDependable = module.NewProxiedReadyDoneAware()
   600  	exeNode.builder.PeerManagerDependencies.Add(exeNode.blobserviceDependable)
   601  	return nil
   602  }
   603  
   604  func (exeNode *ExecutionNode) LoadExecutionDataGetter(node *NodeConfig) error {
   605  	exeNode.executionDataBlobstore = blobs.NewBlobstore(exeNode.executionDataDatastore)
   606  	exeNode.executionDataStore = execution_data.NewExecutionDataStore(exeNode.executionDataBlobstore, execution_data.DefaultSerializer)
   607  	return nil
   608  }
   609  
   610  func (exeNode *ExecutionNode) LoadExecutionState(
   611  	node *NodeConfig,
   612  ) (
   613  	module.ReadyDoneAware,
   614  	error,
   615  ) {
   616  
   617  	chunkDataPacks := storage.NewChunkDataPacks(node.Metrics.Cache, node.DB, node.Storage.Collections, exeNode.exeConf.chunkDataPackCacheSize)
   618  
   619  	// Needed for gRPC server, make sure to assign to main scoped vars
   620  	exeNode.events = storage.NewEvents(node.Metrics.Cache, node.DB)
   621  	exeNode.serviceEvents = storage.NewServiceEvents(node.Metrics.Cache, node.DB)
   622  	exeNode.txResults = storage.NewTransactionResults(node.Metrics.Cache, node.DB, exeNode.exeConf.transactionResultsCacheSize)
   623  
   624  	exeNode.executionState = state.NewExecutionState(
   625  		exeNode.ledgerStorage,
   626  		node.Storage.Commits,
   627  		node.Storage.Blocks,
   628  		node.Storage.Headers,
   629  		node.Storage.Collections,
   630  		chunkDataPacks,
   631  		exeNode.results,
   632  		exeNode.myReceipts,
   633  		exeNode.events,
   634  		exeNode.serviceEvents,
   635  		exeNode.txResults,
   636  		node.DB,
   637  		node.Tracer,
   638  	)
   639  
   640  	return &module.NoopReadyDoneAware{}, nil
   641  }
   642  
   643  func (exeNode *ExecutionNode) LoadStopControl(
   644  	node *NodeConfig,
   645  ) (
   646  	module.ReadyDoneAware,
   647  	error,
   648  ) {
   649  	lastExecutedHeight, _, err := exeNode.executionState.GetHighestExecutedBlockID(context.TODO())
   650  	if err != nil {
   651  		return nil, fmt.Errorf("cannot get the latest executed block height for stop control: %w", err)
   652  	}
   653  
   654  	exeNode.stopControl = ingestion.NewStopControl(
   655  		exeNode.builder.Logger.With().Str("compontent", "stop_control").Logger(),
   656  		exeNode.exeConf.pauseExecution,
   657  		lastExecutedHeight)
   658  
   659  	return &module.NoopReadyDoneAware{}, nil
   660  }
   661  
   662  func (exeNode *ExecutionNode) LoadExecutionStateLedger(
   663  	node *NodeConfig,
   664  ) (
   665  	module.ReadyDoneAware,
   666  	error,
   667  ) {
   668  	// DiskWal is a dependent component because we need to ensure
   669  	// that all WAL updates are completed before closing opened WAL segment.
   670  	var err error
   671  	exeNode.diskWAL, err = wal.NewDiskWAL(node.Logger.With().Str("subcomponent", "wal").Logger(),
   672  		node.MetricsRegisterer, exeNode.collector, exeNode.exeConf.triedir, int(exeNode.exeConf.mTrieCacheSize), pathfinder.PathByteSize, wal.SegmentSize)
   673  	if err != nil {
   674  		return nil, fmt.Errorf("failed to initialize wal: %w", err)
   675  	}
   676  
   677  	exeNode.ledgerStorage, err = ledger.NewLedger(exeNode.diskWAL, int(exeNode.exeConf.mTrieCacheSize), exeNode.collector, node.Logger.With().Str("subcomponent",
   678  		"ledger").Logger(), ledger.DefaultPathFinderVersion)
   679  	return exeNode.ledgerStorage, err
   680  }
   681  
   682  func (exeNode *ExecutionNode) LoadExecutionStateLedgerWALCompactor(
   683  	node *NodeConfig,
   684  ) (
   685  	module.ReadyDoneAware,
   686  	error,
   687  ) {
   688  	return ledger.NewCompactor(
   689  		exeNode.ledgerStorage,
   690  		exeNode.diskWAL,
   691  		node.Logger.With().Str("subcomponent", "checkpointer").Logger(),
   692  		uint(exeNode.exeConf.mTrieCacheSize),
   693  		exeNode.exeConf.checkpointDistance,
   694  		exeNode.exeConf.checkpointsToKeep,
   695  		exeNode.toTriggerCheckpoint, // compactor will listen to the signal from admin tool for force triggering checkpointing
   696  	)
   697  }
   698  
   699  func (exeNode *ExecutionNode) LoadExecutionDataPruner(
   700  	node *NodeConfig,
   701  ) (
   702  	module.ReadyDoneAware,
   703  	error,
   704  ) {
   705  	sealed, err := node.State.Sealed().Head()
   706  	if err != nil {
   707  		return nil, fmt.Errorf("cannot get the sealed block: %w", err)
   708  	}
   709  
   710  	trackerDir := filepath.Join(exeNode.exeConf.executionDataDir, "tracker")
   711  	exeNode.executionDataTracker, err = tracker.OpenStorage(
   712  		trackerDir,
   713  		sealed.Height,
   714  		node.Logger,
   715  		tracker.WithPruneCallback(func(c cid.Cid) error {
   716  			// TODO: use a proper context here
   717  			return exeNode.executionDataBlobstore.DeleteBlob(context.TODO(), c)
   718  		}),
   719  	)
   720  	if err != nil {
   721  		return nil, err
   722  	}
   723  
   724  	// by default, pruning is disabled
   725  	if exeNode.exeConf.executionDataPrunerHeightRangeTarget == 0 {
   726  		return &module.NoopReadyDoneAware{}, nil
   727  	}
   728  
   729  	var prunerMetrics module.ExecutionDataPrunerMetrics = metrics.NewNoopCollector()
   730  	if node.MetricsEnabled {
   731  		prunerMetrics = metrics.NewExecutionDataPrunerCollector()
   732  	}
   733  
   734  	exeNode.executionDataPruner, err = pruner.NewPruner(
   735  		node.Logger,
   736  		prunerMetrics,
   737  		exeNode.executionDataTracker,
   738  		pruner.WithPruneCallback(func(ctx context.Context) error {
   739  			return exeNode.executionDataDatastore.CollectGarbage(ctx)
   740  		}),
   741  		pruner.WithHeightRangeTarget(exeNode.exeConf.executionDataPrunerHeightRangeTarget),
   742  		pruner.WithThreshold(exeNode.exeConf.executionDataPrunerThreshold),
   743  	)
   744  	return exeNode.executionDataPruner, err
   745  }
   746  
   747  func (exeNode *ExecutionNode) LoadCheckerEngine(
   748  	node *NodeConfig,
   749  ) (
   750  	module.ReadyDoneAware,
   751  	error,
   752  ) {
   753  	exeNode.checkerEng = checker.New(
   754  		node.Logger,
   755  		node.State,
   756  		exeNode.executionState,
   757  		node.Storage.Seals,
   758  	)
   759  	return exeNode.checkerEng, nil
   760  }
   761  
   762  func (exeNode *ExecutionNode) LoadIngestionEngine(
   763  	node *NodeConfig,
   764  ) (
   765  	module.ReadyDoneAware,
   766  	error,
   767  ) {
   768  	var err error
   769  	exeNode.collectionRequester, err = requester.New(node.Logger, node.Metrics.Engine, node.Network, node.Me, node.State,
   770  		channels.RequestCollections,
   771  		filter.Any,
   772  		func() flow.Entity { return &flow.Collection{} },
   773  		// we are manually triggering batches in execution, but lets still send off a batch once a minute, as a safety net for the sake of retries
   774  		requester.WithBatchInterval(exeNode.exeConf.requestInterval),
   775  		// consistency of collection can be checked by checking hash, and hash comes from trusted source (blocks from consensus follower)
   776  		// hence we not need to check origin
   777  		requester.WithValidateStaking(false),
   778  	)
   779  
   780  	if err != nil {
   781  		return nil, fmt.Errorf("could not create requester engine: %w", err)
   782  	}
   783  
   784  	preferredExeFilter := filter.Any
   785  	preferredExeNodeID, err := flow.HexStringToIdentifier(exeNode.exeConf.preferredExeNodeIDStr)
   786  	if err == nil {
   787  		node.Logger.Info().Hex("prefered_exe_node_id", preferredExeNodeID[:]).Msg("starting with preferred exe sync node")
   788  		preferredExeFilter = filter.HasNodeID(preferredExeNodeID)
   789  	} else if exeNode.exeConf.preferredExeNodeIDStr != "" {
   790  		node.Logger.Debug().Str("prefered_exe_node_id_string", exeNode.exeConf.preferredExeNodeIDStr).Msg("could not parse exe node id, starting WITHOUT preferred exe sync node")
   791  	}
   792  
   793  	exeNode.ingestionEng, err = ingestion.New(
   794  		node.Logger,
   795  		node.Network,
   796  		node.Me,
   797  		exeNode.collectionRequester,
   798  		node.State,
   799  		node.Storage.Blocks,
   800  		node.Storage.Collections,
   801  		exeNode.events,
   802  		exeNode.serviceEvents,
   803  		exeNode.txResults,
   804  		exeNode.computationManager,
   805  		exeNode.providerEngine,
   806  		exeNode.executionState,
   807  		exeNode.collector,
   808  		node.Tracer,
   809  		exeNode.exeConf.extensiveLog,
   810  		preferredExeFilter,
   811  		exeNode.deltas,
   812  		exeNode.exeConf.syncThreshold,
   813  		exeNode.exeConf.syncFast,
   814  		exeNode.checkAuthorizedAtBlock,
   815  		exeNode.executionDataPruner,
   816  		exeNode.blockDataUploader,
   817  		exeNode.stopControl,
   818  	)
   819  
   820  	// TODO: we should solve these mutual dependencies better
   821  	// => https://github.com/dapperlabs/flow-go/issues/4360
   822  	exeNode.collectionRequester = exeNode.collectionRequester.WithHandle(exeNode.ingestionEng.OnCollection)
   823  
   824  	node.ProtocolEvents.AddConsumer(exeNode.ingestionEng)
   825  
   826  	return exeNode.ingestionEng, err
   827  }
   828  
   829  func (exeNode *ExecutionNode) LoadFollowerEngine(
   830  	node *NodeConfig,
   831  ) (
   832  	module.ReadyDoneAware,
   833  	error,
   834  ) {
   835  	// initialize cleaner for DB
   836  	cleaner := storage.NewCleaner(node.Logger, node.DB, node.Metrics.CleanCollector, flow.DefaultValueLogGCFrequency)
   837  
   838  	// create a finalizer that handles updating the protocol
   839  	// state when the follower detects newly finalized blocks
   840  	final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, exeNode.followerState, node.Tracer)
   841  
   842  	// initialize consensus committee's membership state
   843  	// This committee state is for the HotStuff follower, which follows the MAIN CONSENSUS Committee
   844  	// Note: node.Me.NodeID() is not part of the consensus exeNode.committee
   845  	var err error
   846  	exeNode.committee, err = committees.NewConsensusCommittee(node.State, node.Me.NodeID())
   847  	if err != nil {
   848  		return nil, fmt.Errorf("could not create Committee state for main consensus: %w", err)
   849  	}
   850  
   851  	packer := signature.NewConsensusSigDataPacker(exeNode.committee)
   852  	// initialize the verifier for the protocol consensus
   853  	verifier := verification.NewCombinedVerifier(exeNode.committee, packer)
   854  
   855  	finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers)
   856  	if err != nil {
   857  		return nil, fmt.Errorf("could not find latest finalized block and pending blocks to recover consensus follower: %w", err)
   858  	}
   859  
   860  	exeNode.finalizationDistributor = pubsub.NewFinalizationDistributor()
   861  	exeNode.finalizationDistributor.AddConsumer(exeNode.checkerEng)
   862  
   863  	// creates a consensus follower with ingestEngine as the notifier
   864  	// so that it gets notified upon each new finalized block
   865  	followerCore, err := consensus.NewFollower(node.Logger, exeNode.committee, node.Storage.Headers, final, verifier, exeNode.finalizationDistributor, node.RootBlock.Header, node.RootQC, finalized, pending)
   866  	if err != nil {
   867  		return nil, fmt.Errorf("could not create follower core logic: %w", err)
   868  	}
   869  
   870  	exeNode.followerEng, err = followereng.New(
   871  		node.Logger,
   872  		node.Network,
   873  		node.Me,
   874  		node.Metrics.Engine,
   875  		node.Metrics.Mempool,
   876  		cleaner,
   877  		node.Storage.Headers,
   878  		node.Storage.Payloads,
   879  		exeNode.followerState,
   880  		exeNode.pendingBlocks,
   881  		followerCore,
   882  		exeNode.syncCore,
   883  		node.Tracer,
   884  		followereng.WithComplianceOptions(compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)),
   885  	)
   886  	if err != nil {
   887  		return nil, fmt.Errorf("could not create follower engine: %w", err)
   888  	}
   889  
   890  	return exeNode.followerEng, nil
   891  }
   892  
   893  func (exeNode *ExecutionNode) LoadCollectionRequesterEngine(
   894  	node *NodeConfig,
   895  ) (
   896  	module.ReadyDoneAware,
   897  	error,
   898  ) {
   899  	// We initialize the requester engine inside the ingestion engine due to the mutual dependency. However, in
   900  	// order for it to properly start and shut down, we should still return it as its own engine here, so it can
   901  	// be handled by the scaffold.
   902  	return exeNode.collectionRequester, nil
   903  }
   904  
   905  func (exeNode *ExecutionNode) LoadReceiptProviderEngine(
   906  	node *NodeConfig,
   907  ) (
   908  	module.ReadyDoneAware,
   909  	error,
   910  ) {
   911  	retrieve := func(blockID flow.Identifier) (flow.Entity, error) {
   912  		return exeNode.myReceipts.MyReceipt(blockID)
   913  	}
   914  
   915  	var receiptRequestQueueMetric module.HeroCacheMetrics = metrics.NewNoopCollector()
   916  	if node.HeroCacheMetricsEnable {
   917  		receiptRequestQueueMetric = metrics.ReceiptRequestsQueueMetricFactory(node.MetricsRegisterer)
   918  	}
   919  	receiptRequestQueue := queue.NewHeroStore(exeNode.exeConf.receiptRequestsCacheSize, node.Logger, receiptRequestQueueMetric)
   920  
   921  	eng, err := provider.New(
   922  		node.Logger,
   923  		node.Metrics.Engine,
   924  		node.Network,
   925  		node.Me,
   926  		node.State,
   927  		receiptRequestQueue,
   928  		exeNode.exeConf.receiptRequestWorkers,
   929  		channels.ProvideReceiptsByBlockID,
   930  		filter.HasRole(flow.RoleConsensus),
   931  		retrieve,
   932  	)
   933  	return eng, err
   934  }
   935  
   936  func (exeNode *ExecutionNode) LoadFinalizedSnapshot(
   937  	node *NodeConfig,
   938  ) (
   939  	module.ReadyDoneAware,
   940  	error,
   941  ) {
   942  	var err error
   943  	exeNode.finalizedHeader, err = synchronization.NewFinalizedHeaderCache(node.Logger, node.State, exeNode.finalizationDistributor)
   944  	if err != nil {
   945  		return nil, fmt.Errorf("could not create finalized snapshot cache: %w", err)
   946  	}
   947  
   948  	return exeNode.finalizedHeader, nil
   949  }
   950  
   951  func (exeNode *ExecutionNode) LoadSynchronizationEngine(
   952  	node *NodeConfig,
   953  ) (
   954  	module.ReadyDoneAware,
   955  	error,
   956  ) {
   957  	// initialize the synchronization engine
   958  	var err error
   959  	exeNode.syncEngine, err = synchronization.New(
   960  		node.Logger,
   961  		node.Metrics.Engine,
   962  		node.Network,
   963  		node.Me,
   964  		node.Storage.Blocks,
   965  		exeNode.followerEng,
   966  		exeNode.syncCore,
   967  		exeNode.finalizedHeader,
   968  		node.SyncEngineIdentifierProvider,
   969  	)
   970  	if err != nil {
   971  		return nil, fmt.Errorf("could not initialize synchronization engine: %w", err)
   972  	}
   973  
   974  	return exeNode.syncEngine, nil
   975  }
   976  
   977  func (exeNode *ExecutionNode) LoadGrpcServer(
   978  	node *NodeConfig,
   979  ) (
   980  	module.ReadyDoneAware,
   981  	error,
   982  ) {
   983  	return rpc.New(
   984  		node.Logger,
   985  		exeNode.exeConf.rpcConf,
   986  		exeNode.ingestionEng,
   987  		node.Storage.Headers,
   988  		node.State,
   989  		exeNode.events,
   990  		exeNode.results,
   991  		exeNode.txResults,
   992  		node.Storage.Commits,
   993  		node.RootChainID,
   994  		signature.NewBlockSignerDecoder(exeNode.committee),
   995  		exeNode.exeConf.apiRatelimits,
   996  		exeNode.exeConf.apiBurstlimits,
   997  	), nil
   998  }
   999  
  1000  func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error {
  1001  
  1002  	// check if the execution database already exists
  1003  	bootstrapper := bootstrap.NewBootstrapper(node.Logger)
  1004  
  1005  	commit, bootstrapped, err := bootstrapper.IsBootstrapped(node.DB)
  1006  	if err != nil {
  1007  		return fmt.Errorf("could not query database to know whether database has been bootstrapped: %w", err)
  1008  	}
  1009  
  1010  	// if the execution database does not exist, then we need to bootstrap the execution database.
  1011  	if !bootstrapped {
  1012  		// when bootstrapping, the bootstrap folder must have a checkpoint file
  1013  		// we need to cover this file to the trie folder to restore the trie to restore the execution state.
  1014  		err = copyBootstrapState(node.BootstrapDir, exeNode.exeConf.triedir)
  1015  		if err != nil {
  1016  			return fmt.Errorf("could not load bootstrap state from checkpoint file: %w", err)
  1017  		}
  1018  
  1019  		// TODO: check that the checkpoint file contains the root block's statecommit hash
  1020  
  1021  		err = bootstrapper.BootstrapExecutionDatabase(node.DB, node.RootSeal.FinalState, node.RootBlock.Header)
  1022  		if err != nil {
  1023  			return fmt.Errorf("could not bootstrap execution database: %w", err)
  1024  		}
  1025  	} else {
  1026  		// if execution database has been bootstrapped, then the root statecommit must equal to the one
  1027  		// in the bootstrap folder
  1028  		if commit != node.RootSeal.FinalState {
  1029  			return fmt.Errorf("mismatching root statecommitment. database has state commitment: %x, "+
  1030  				"bootstap has statecommitment: %x",
  1031  				commit, node.RootSeal.FinalState)
  1032  		}
  1033  	}
  1034  
  1035  	return nil
  1036  }
  1037  
  1038  // getContractEpochCounter Gets the epoch counters from the FlowEpoch smart contract from the view provided.
  1039  func getContractEpochCounter(vm fvm.VM, vmCtx fvm.Context, view *delta.View) (uint64, error) {
  1040  	// Get the address of the FlowEpoch smart contract
  1041  	sc, err := systemcontracts.SystemContractsForChain(vmCtx.Chain.ChainID())
  1042  	if err != nil {
  1043  		return 0, fmt.Errorf("could not get system contracts: %w", err)
  1044  	}
  1045  	address := sc.Epoch.Address
  1046  
  1047  	// Generate the script to get the epoch counter from the FlowEpoch smart contract
  1048  	scriptCode := templates.GenerateGetCurrentEpochCounterScript(templates.Environment{
  1049  		EpochAddress: address.Hex(),
  1050  	})
  1051  	script := fvm.Script(scriptCode)
  1052  
  1053  	// execute the script
  1054  	err = vm.Run(vmCtx, script, view)
  1055  	if err != nil {
  1056  		return 0, fmt.Errorf("could not read epoch counter, internal error while executing script: %w", err)
  1057  	}
  1058  	if script.Err != nil {
  1059  		return 0, fmt.Errorf("could not read epoch counter, script error: %w", script.Err)
  1060  	}
  1061  	if script.Value == nil {
  1062  		return 0, fmt.Errorf("could not read epoch counter, script returned no value")
  1063  	}
  1064  
  1065  	epochCounter := script.Value.ToGoValue().(uint64)
  1066  	return epochCounter, nil
  1067  }
  1068  
  1069  // copy the checkpoint files from the bootstrap folder to the execution state folder
  1070  // Checkpoint file is required to restore the trie, and has to be placed in the execution
  1071  // state folder.
  1072  // There are two ways to generate a checkpoint file:
  1073  //  1. From a clean state.
  1074  //     Refer to the code in the testcase: TestGenerateExecutionState
  1075  //  2. From a previous execution state
  1076  //     This is often used when sporking the network.
  1077  //     Use the execution-state-extract util commandline to generate a checkpoint file from
  1078  //     a previous checkpoint file
  1079  func copyBootstrapState(dir, trie string) error {
  1080  	filename := ""
  1081  	firstCheckpointFilename := "00000000"
  1082  
  1083  	fileExists := func(fileName string) bool {
  1084  		_, err := os.Stat(filepath.Join(dir, bootstrapFilenames.DirnameExecutionState, fileName))
  1085  		return err == nil
  1086  	}
  1087  
  1088  	// if there is a root checkpoint file, then copy that file over
  1089  	if fileExists(bootstrapFilenames.FilenameWALRootCheckpoint) {
  1090  		filename = bootstrapFilenames.FilenameWALRootCheckpoint
  1091  	} else if fileExists(firstCheckpointFilename) {
  1092  		// else if there is a checkpoint file, then copy that file over
  1093  		filename = firstCheckpointFilename
  1094  	} else {
  1095  		filePath := filepath.Join(dir, bootstrapFilenames.DirnameExecutionState, firstCheckpointFilename)
  1096  
  1097  		// include absolute path of the missing file in the error message
  1098  		absPath, err := filepath.Abs(filePath)
  1099  		if err != nil {
  1100  			absPath = filePath
  1101  		}
  1102  
  1103  		return fmt.Errorf("execution state file not found: %v", absPath)
  1104  	}
  1105  
  1106  	// copy from the bootstrap folder to the execution state folder
  1107  	from, to := path.Join(dir, bootstrapFilenames.DirnameExecutionState), trie
  1108  	copiedFiles, err := wal.CopyCheckpointFile(filename, from, to)
  1109  	if err != nil {
  1110  		return fmt.Errorf("can not copy checkpoint file %s, from %s to %s",
  1111  			filename, from, to)
  1112  	}
  1113  
  1114  	for _, newPath := range copiedFiles {
  1115  		fmt.Printf("copied root checkpoint file from directory: %v, to: %v\n", from, newPath)
  1116  	}
  1117  
  1118  	return nil
  1119  }
  1120  
  1121  func logSysInfo(logger zerolog.Logger) error {
  1122  
  1123  	vmem, err := mem.VirtualMemory()
  1124  	if err != nil {
  1125  		return fmt.Errorf("failed to get virtual memory: %w", err)
  1126  	}
  1127  
  1128  	info, err := cpu.Info()
  1129  	if err != nil {
  1130  		return fmt.Errorf("failed to get cpu info: %w", err)
  1131  	}
  1132  
  1133  	logicalCores, err := cpu.Counts(true)
  1134  	if err != nil {
  1135  		return fmt.Errorf("failed to get logical cores: %w", err)
  1136  	}
  1137  
  1138  	physicalCores, err := cpu.Counts(false)
  1139  	if err != nil {
  1140  		return fmt.Errorf("failed to get physical cores: %w", err)
  1141  	}
  1142  
  1143  	if len(info) == 0 {
  1144  		return fmt.Errorf("cpu info length is 0")
  1145  	}
  1146  
  1147  	logger.Info().Msgf("CPU: ModelName=%s, MHz=%.0f, Family=%s, Model=%s, Stepping=%d, Microcode=%s, PhysicalCores=%d, LogicalCores=%d",
  1148  		info[0].ModelName, info[0].Mhz, info[0].Family, info[0].Model, info[0].Stepping, info[0].Microcode, physicalCores, logicalCores)
  1149  
  1150  	logger.Info().Msgf("RAM: Total=%d, Free=%d", vmem.Total, vmem.Free)
  1151  
  1152  	hostInfo, err := host.Info()
  1153  	if err != nil {
  1154  		return fmt.Errorf("failed to get platform info: %w", err)
  1155  	}
  1156  	logger.Info().Msgf("OS: OS=%s, Platform=%s, PlatformVersion=%s, KernelVersion=%s, Uptime: %d",
  1157  		hostInfo.OS, hostInfo.Platform, hostInfo.PlatformVersion, hostInfo.KernelVersion, hostInfo.Uptime)
  1158  
  1159  	// goruntime.GOMAXPROCS(0) doesn't modify any settings.
  1160  	logger.Info().Msgf("GO: GoVersion=%s, GOMAXPROCS=%d, NumCPU=%d",
  1161  		goruntime.Version(), goruntime.GOMAXPROCS(0), goruntime.NumCPU())
  1162  
  1163  	return nil
  1164  }