github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/testutil/nodes.go (about)

     1  package testutil
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math"
     7  	"path/filepath"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/coreos/go-semver/semver"
    12  	"github.com/ipfs/boxo/blockstore"
    13  	"github.com/ipfs/go-datastore"
    14  	dssync "github.com/ipfs/go-datastore/sync"
    15  	"github.com/rs/zerolog"
    16  	"github.com/stretchr/testify/mock"
    17  	"github.com/stretchr/testify/require"
    18  	"go.uber.org/atomic"
    19  	"golang.org/x/time/rate"
    20  
    21  	"github.com/onflow/flow-go/cmd/build"
    22  	"github.com/onflow/flow-go/consensus"
    23  	"github.com/onflow/flow-go/consensus/hotstuff"
    24  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    25  	mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks"
    26  	"github.com/onflow/flow-go/consensus/hotstuff/model"
    27  	"github.com/onflow/flow-go/consensus/hotstuff/notifications"
    28  	"github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub"
    29  	"github.com/onflow/flow-go/engine"
    30  	"github.com/onflow/flow-go/engine/collection/epochmgr"
    31  	"github.com/onflow/flow-go/engine/collection/epochmgr/factories"
    32  	"github.com/onflow/flow-go/engine/collection/ingest"
    33  	collectioningest "github.com/onflow/flow-go/engine/collection/ingest"
    34  	mockcollection "github.com/onflow/flow-go/engine/collection/mock"
    35  	"github.com/onflow/flow-go/engine/collection/pusher"
    36  	"github.com/onflow/flow-go/engine/common/follower"
    37  	"github.com/onflow/flow-go/engine/common/provider"
    38  	"github.com/onflow/flow-go/engine/common/requester"
    39  	"github.com/onflow/flow-go/engine/common/synchronization"
    40  	"github.com/onflow/flow-go/engine/consensus/approvals/tracker"
    41  	consensusingest "github.com/onflow/flow-go/engine/consensus/ingestion"
    42  	"github.com/onflow/flow-go/engine/consensus/matching"
    43  	"github.com/onflow/flow-go/engine/consensus/sealing"
    44  	"github.com/onflow/flow-go/engine/execution/computation"
    45  	"github.com/onflow/flow-go/engine/execution/computation/committer"
    46  	"github.com/onflow/flow-go/engine/execution/computation/query"
    47  	"github.com/onflow/flow-go/engine/execution/ingestion"
    48  	exeFetcher "github.com/onflow/flow-go/engine/execution/ingestion/fetcher"
    49  	"github.com/onflow/flow-go/engine/execution/ingestion/loader"
    50  	"github.com/onflow/flow-go/engine/execution/ingestion/stop"
    51  	"github.com/onflow/flow-go/engine/execution/ingestion/uploader"
    52  	executionprovider "github.com/onflow/flow-go/engine/execution/provider"
    53  	executionState "github.com/onflow/flow-go/engine/execution/state"
    54  	bootstrapexec "github.com/onflow/flow-go/engine/execution/state/bootstrap"
    55  	esbootstrap "github.com/onflow/flow-go/engine/execution/state/bootstrap"
    56  	"github.com/onflow/flow-go/engine/execution/storehouse"
    57  	testmock "github.com/onflow/flow-go/engine/testutil/mock"
    58  	verificationassigner "github.com/onflow/flow-go/engine/verification/assigner"
    59  	"github.com/onflow/flow-go/engine/verification/assigner/blockconsumer"
    60  	"github.com/onflow/flow-go/engine/verification/fetcher"
    61  	"github.com/onflow/flow-go/engine/verification/fetcher/chunkconsumer"
    62  	vereq "github.com/onflow/flow-go/engine/verification/requester"
    63  	"github.com/onflow/flow-go/engine/verification/verifier"
    64  	"github.com/onflow/flow-go/fvm"
    65  	"github.com/onflow/flow-go/fvm/environment"
    66  	"github.com/onflow/flow-go/fvm/storage/derived"
    67  	"github.com/onflow/flow-go/ledger/common/pathfinder"
    68  	completeLedger "github.com/onflow/flow-go/ledger/complete"
    69  	"github.com/onflow/flow-go/ledger/complete/mtrie/trie"
    70  	"github.com/onflow/flow-go/ledger/complete/wal"
    71  	"github.com/onflow/flow-go/model/bootstrap"
    72  	"github.com/onflow/flow-go/model/flow"
    73  	"github.com/onflow/flow-go/model/flow/filter"
    74  	"github.com/onflow/flow-go/module"
    75  	"github.com/onflow/flow-go/module/chainsync"
    76  	"github.com/onflow/flow-go/module/chunks"
    77  	"github.com/onflow/flow-go/module/compliance"
    78  	"github.com/onflow/flow-go/module/executiondatasync/execution_data"
    79  	exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider"
    80  	mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock"
    81  	"github.com/onflow/flow-go/module/finalizedreader"
    82  	confinalizer "github.com/onflow/flow-go/module/finalizer/consensus"
    83  	"github.com/onflow/flow-go/module/id"
    84  	"github.com/onflow/flow-go/module/irrecoverable"
    85  	"github.com/onflow/flow-go/module/local"
    86  	"github.com/onflow/flow-go/module/mempool"
    87  	consensusMempools "github.com/onflow/flow-go/module/mempool/consensus"
    88  	"github.com/onflow/flow-go/module/mempool/epochs"
    89  	"github.com/onflow/flow-go/module/mempool/herocache"
    90  	"github.com/onflow/flow-go/module/mempool/queue"
    91  	"github.com/onflow/flow-go/module/mempool/stdmap"
    92  	"github.com/onflow/flow-go/module/metrics"
    93  	mockmodule "github.com/onflow/flow-go/module/mock"
    94  	"github.com/onflow/flow-go/module/signature"
    95  	requesterunit "github.com/onflow/flow-go/module/state_synchronization/requester/unittest"
    96  	"github.com/onflow/flow-go/module/trace"
    97  	"github.com/onflow/flow-go/module/validation"
    98  	"github.com/onflow/flow-go/network/channels"
    99  	"github.com/onflow/flow-go/network/p2p/cache"
   100  	"github.com/onflow/flow-go/network/stub"
   101  	"github.com/onflow/flow-go/state/protocol"
   102  	badgerstate "github.com/onflow/flow-go/state/protocol/badger"
   103  	"github.com/onflow/flow-go/state/protocol/blocktimer"
   104  	"github.com/onflow/flow-go/state/protocol/events"
   105  	"github.com/onflow/flow-go/state/protocol/events/gadgets"
   106  	"github.com/onflow/flow-go/state/protocol/util"
   107  	storage "github.com/onflow/flow-go/storage/badger"
   108  	storagepebble "github.com/onflow/flow-go/storage/pebble"
   109  	"github.com/onflow/flow-go/utils/unittest"
   110  )
   111  
   112  // GenericNodeFromParticipants is a test helper that creates and returns a generic node.
   113  // The generic node's state is generated from the given participants, resulting in a
   114  // root state snapshot.
   115  //
   116  // CAUTION: Please use GenericNode instead for most use-cases so that multiple nodes
   117  // may share the same root state snapshot.
   118  func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity bootstrap.NodeInfo, participants []*flow.Identity, chainID flow.ChainID,
   119  	options ...func(protocol.State)) testmock.GenericNode {
   120  	var i int
   121  	var participant *flow.Identity
   122  	for i, participant = range participants {
   123  		if identity.NodeID == participant.NodeID {
   124  			break
   125  		}
   126  	}
   127  
   128  	// creates logger, metrics collector and tracer.
   129  	log := unittest.Logger().With().Int("index", i).Hex("node_id", identity.NodeID[:]).Str("role", identity.Role.String()).Logger()
   130  	tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll)
   131  	require.NoError(t, err)
   132  	metrics := metrics.NewNoopCollector()
   133  
   134  	// creates state fixture and bootstrap it.
   135  	rootSnapshot := unittest.RootSnapshotFixtureWithChainID(participants, chainID)
   136  	stateFixture := CompleteStateFixture(t, log, metrics, tracer, rootSnapshot)
   137  
   138  	require.NoError(t, err)
   139  	for _, option := range options {
   140  		option(stateFixture.State)
   141  	}
   142  
   143  	return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID)
   144  }
   145  
   146  // GenericNode returns a generic test node, containing components shared across
   147  // all node roles. The generic node is used as the core data structure to create
   148  // other types of flow nodes.
   149  func GenericNode(
   150  	t testing.TB,
   151  	hub *stub.Hub,
   152  	identity bootstrap.NodeInfo,
   153  	root protocol.Snapshot,
   154  ) testmock.GenericNode {
   155  
   156  	log := unittest.Logger().With().
   157  		Hex("node_id", identity.NodeID[:]).
   158  		Str("role", identity.Role.String()).
   159  		Logger()
   160  	metrics := metrics.NewNoopCollector()
   161  	tracer := trace.NewNoopTracer()
   162  	stateFixture := CompleteStateFixture(t, log, metrics, tracer, root)
   163  
   164  	head, err := root.Head()
   165  	require.NoError(t, err)
   166  	chainID := head.ChainID
   167  
   168  	return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID)
   169  }
   170  
   171  // GenericNodeWithStateFixture is a test helper that creates a generic node with specified state fixture.
   172  func GenericNodeWithStateFixture(t testing.TB,
   173  	stateFixture *testmock.StateFixture,
   174  	hub *stub.Hub,
   175  	bootstrapInfo bootstrap.NodeInfo,
   176  	log zerolog.Logger,
   177  	metrics *metrics.NoopCollector,
   178  	tracer module.Tracer,
   179  	chainID flow.ChainID) testmock.GenericNode {
   180  
   181  	identity := bootstrapInfo.Identity()
   182  	privateKeys, err := bootstrapInfo.PrivateKeys()
   183  	require.NoError(t, err)
   184  	me, err := local.New(identity.IdentitySkeleton, privateKeys.StakingKey)
   185  	require.NoError(t, err)
   186  	net := stub.NewNetwork(t, identity.NodeID, hub)
   187  
   188  	parentCtx, cancel := context.WithCancel(context.Background())
   189  	ctx, errs := irrecoverable.WithSignaler(parentCtx)
   190  
   191  	return testmock.GenericNode{
   192  		Ctx:                ctx,
   193  		Cancel:             cancel,
   194  		Errs:               errs,
   195  		Log:                log,
   196  		Metrics:            metrics,
   197  		Tracer:             tracer,
   198  		PublicDB:           stateFixture.PublicDB,
   199  		SecretsDB:          stateFixture.SecretsDB,
   200  		Headers:            stateFixture.Storage.Headers,
   201  		Guarantees:         stateFixture.Storage.Guarantees,
   202  		Seals:              stateFixture.Storage.Seals,
   203  		Payloads:           stateFixture.Storage.Payloads,
   204  		Blocks:             stateFixture.Storage.Blocks,
   205  		QuorumCertificates: stateFixture.Storage.QuorumCertificates,
   206  		Results:            stateFixture.Storage.Results,
   207  		Setups:             stateFixture.Storage.Setups,
   208  		EpochCommits:       stateFixture.Storage.EpochCommits,
   209  		EpochProtocolState: stateFixture.Storage.EpochProtocolState,
   210  		ProtocolKVStore:    stateFixture.Storage.ProtocolKVStore,
   211  		State:              stateFixture.State,
   212  		Index:              stateFixture.Storage.Index,
   213  		Me:                 me,
   214  		Net:                net,
   215  		DBDir:              stateFixture.DBDir,
   216  		ChainID:            chainID,
   217  		ProtocolEvents:     stateFixture.ProtocolEvents,
   218  	}
   219  }
   220  
   221  // CompleteStateFixture is a test helper that creates, bootstraps, and returns a StateFixture for sake of unit testing.
   222  func CompleteStateFixture(
   223  	t testing.TB,
   224  	log zerolog.Logger,
   225  	metric *metrics.NoopCollector,
   226  	tracer module.Tracer,
   227  	rootSnapshot protocol.Snapshot,
   228  ) *testmock.StateFixture {
   229  
   230  	dataDir := unittest.TempDir(t)
   231  	publicDBDir := filepath.Join(dataDir, "protocol")
   232  	secretsDBDir := filepath.Join(dataDir, "secrets")
   233  	db := unittest.TypedBadgerDB(t, publicDBDir, storage.InitPublic)
   234  	s := storage.InitAll(metric, db)
   235  	secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storage.InitSecret)
   236  	consumer := events.NewDistributor()
   237  
   238  	state, err := badgerstate.Bootstrap(
   239  		metric,
   240  		db,
   241  		s.Headers,
   242  		s.Seals,
   243  		s.Results,
   244  		s.Blocks,
   245  		s.QuorumCertificates,
   246  		s.Setups,
   247  		s.EpochCommits,
   248  		s.EpochProtocolState,
   249  		s.ProtocolKVStore,
   250  		s.VersionBeacons,
   251  		rootSnapshot,
   252  	)
   253  	require.NoError(t, err)
   254  
   255  	mutableState, err := badgerstate.NewFullConsensusState(
   256  		log,
   257  		tracer,
   258  		consumer,
   259  		state,
   260  		s.Index,
   261  		s.Payloads,
   262  		util.MockBlockTimer(),
   263  		util.MockReceiptValidator(),
   264  		util.MockSealValidator(s.Seals),
   265  	)
   266  	require.NoError(t, err)
   267  
   268  	return &testmock.StateFixture{
   269  		PublicDB:       db,
   270  		SecretsDB:      secretsDB,
   271  		Storage:        s,
   272  		DBDir:          dataDir,
   273  		ProtocolEvents: consumer,
   274  		State:          mutableState,
   275  	}
   276  }
   277  
   278  // CollectionNode returns a mock collection node.
   279  func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, rootSnapshot protocol.Snapshot) testmock.CollectionNode {
   280  
   281  	node := GenericNode(t, hub, identity, rootSnapshot)
   282  	privKeys, err := identity.PrivateKeys()
   283  	require.NoError(t, err)
   284  	node.Me, err = local.New(identity.Identity().IdentitySkeleton, privKeys.StakingKey)
   285  	require.NoError(t, err)
   286  
   287  	pools := epochs.NewTransactionPools(
   288  		func(_ uint64) mempool.Transactions {
   289  			return herocache.NewTransactions(1000, node.Log, metrics.NewNoopCollector())
   290  		})
   291  	transactions := storage.NewTransactions(node.Metrics, node.PublicDB)
   292  	collections := storage.NewCollections(node.PublicDB, transactions)
   293  	clusterPayloads := storage.NewClusterPayloads(node.Metrics, node.PublicDB)
   294  
   295  	ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig(),
   296  		ingest.NewAddressRateLimiter(rate.Limit(1), 10)) // 10 tps
   297  	require.NoError(t, err)
   298  
   299  	selector := filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleVerification)
   300  	retrieve := func(collID flow.Identifier) (flow.Entity, error) {
   301  		coll, err := collections.ByID(collID)
   302  		return coll, err
   303  	}
   304  	providerEngine, err := provider.New(
   305  		node.Log,
   306  		node.Metrics,
   307  		node.Net,
   308  		node.Me,
   309  		node.State,
   310  		queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()),
   311  		uint(1000),
   312  		channels.ProvideCollections,
   313  		selector,
   314  		retrieve)
   315  	require.NoError(t, err)
   316  
   317  	pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, collections, transactions)
   318  	require.NoError(t, err)
   319  
   320  	clusterStateFactory, err := factories.NewClusterStateFactory(
   321  		node.PublicDB,
   322  		node.Metrics,
   323  		node.Tracer,
   324  	)
   325  	require.NoError(t, err)
   326  
   327  	builderFactory, err := factories.NewBuilderFactory(
   328  		node.PublicDB,
   329  		node.State,
   330  		node.Headers,
   331  		node.Tracer,
   332  		node.Metrics,
   333  		pusherEngine,
   334  		node.Log,
   335  	)
   336  	require.NoError(t, err)
   337  
   338  	complianceEngineFactory, err := factories.NewComplianceEngineFactory(
   339  		node.Log,
   340  		node.Net,
   341  		node.Me,
   342  		node.Metrics, node.Metrics, node.Metrics,
   343  		node.State,
   344  		transactions,
   345  		compliance.DefaultConfig(),
   346  	)
   347  	require.NoError(t, err)
   348  
   349  	syncCoreFactory, err := factories.NewSyncCoreFactory(node.Log, chainsync.DefaultConfig())
   350  	require.NoError(t, err)
   351  
   352  	syncFactory, err := factories.NewSyncEngineFactory(
   353  		node.Log,
   354  		node.Metrics,
   355  		node.Net,
   356  		node.Me,
   357  	)
   358  	require.NoError(t, err)
   359  
   360  	createMetrics := func(chainID flow.ChainID) module.HotstuffMetrics {
   361  		return metrics.NewNoopCollector()
   362  	}
   363  	hotstuffFactory, err := factories.NewHotStuffFactory(
   364  		node.Log,
   365  		node.Me,
   366  		node.PublicDB,
   367  		node.State,
   368  		node.Metrics,
   369  		node.Metrics,
   370  		createMetrics,
   371  	)
   372  	require.NoError(t, err)
   373  
   374  	messageHubFactory := factories.NewMessageHubFactory(
   375  		node.Log,
   376  		node.Net,
   377  		node.Me,
   378  		node.Metrics,
   379  		node.State,
   380  	)
   381  
   382  	factory := factories.NewEpochComponentsFactory(
   383  		node.Me,
   384  		pools,
   385  		builderFactory,
   386  		clusterStateFactory,
   387  		hotstuffFactory,
   388  		complianceEngineFactory,
   389  		syncCoreFactory,
   390  		syncFactory,
   391  		messageHubFactory,
   392  	)
   393  
   394  	rootQCVoter := new(mockmodule.ClusterRootQCVoter)
   395  	rootQCVoter.On("Vote", mock.Anything, mock.Anything).Return(nil)
   396  
   397  	engineEventsDistributor := mockcollection.NewEngineEvents(t)
   398  	engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Maybe()
   399  	heights := gadgets.NewHeights()
   400  	node.ProtocolEvents.AddConsumer(heights)
   401  
   402  	epochManager, err := epochmgr.New(
   403  		node.Log,
   404  		node.Me,
   405  		node.State,
   406  		pools,
   407  		rootQCVoter,
   408  		factory,
   409  		heights,
   410  		engineEventsDistributor,
   411  	)
   412  	require.NoError(t, err)
   413  	node.ProtocolEvents.AddConsumer(epochManager)
   414  
   415  	return testmock.CollectionNode{
   416  		GenericNode:        node,
   417  		Collections:        collections,
   418  		Transactions:       transactions,
   419  		ClusterPayloads:    clusterPayloads,
   420  		IngestionEngine:    ingestionEngine,
   421  		PusherEngine:       pusherEngine,
   422  		ProviderEngine:     providerEngine,
   423  		EpochManagerEngine: epochManager,
   424  	}
   425  }
   426  
   427  func ConsensusNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, identities []*flow.Identity, chainID flow.ChainID) testmock.ConsensusNode {
   428  
   429  	node := GenericNodeFromParticipants(t, hub, identity, identities, chainID)
   430  
   431  	resultsDB := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   432  	receiptsDB := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, resultsDB, storage.DefaultCacheSize)
   433  
   434  	guarantees, err := stdmap.NewGuarantees(1000)
   435  	require.NoError(t, err)
   436  
   437  	receipts := consensusMempools.NewExecutionTree()
   438  
   439  	seals := stdmap.NewIncorporatedResultSeals(1000)
   440  	pendingReceipts := stdmap.NewPendingReceipts(node.Headers, 1000)
   441  
   442  	ingestionCore := consensusingest.NewCore(node.Log, node.Tracer, node.Metrics, node.State,
   443  		node.Headers, guarantees)
   444  	// receive collections
   445  	ingestionEngine, err := consensusingest.New(node.Log, node.Metrics, node.Net, node.Me, ingestionCore)
   446  	require.Nil(t, err)
   447  
   448  	// request receipts from execution nodes
   449  	receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} })
   450  	require.Nil(t, err)
   451  
   452  	assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State)
   453  	require.Nil(t, err)
   454  
   455  	receiptValidator := validation.NewReceiptValidator(
   456  		node.State,
   457  		node.Headers,
   458  		node.Index,
   459  		resultsDB,
   460  		node.Seals,
   461  	)
   462  
   463  	sealingEngine, err := sealing.NewEngine(
   464  		node.Log,
   465  		node.Tracer,
   466  		node.Metrics,
   467  		node.Metrics,
   468  		node.Metrics,
   469  		&tracker.NoopSealingTracker{},
   470  		node.Net,
   471  		node.Me,
   472  		node.Headers,
   473  		node.Payloads,
   474  		resultsDB,
   475  		node.Index,
   476  		node.State,
   477  		node.Seals,
   478  		assigner,
   479  		seals,
   480  		unittest.NewSealingConfigs(flow.DefaultRequiredApprovalsForSealConstruction),
   481  	)
   482  	require.NoError(t, err)
   483  
   484  	matchingConfig := matching.DefaultConfig()
   485  
   486  	matchingCore := matching.NewCore(
   487  		node.Log,
   488  		node.Tracer,
   489  		node.Metrics,
   490  		node.Metrics,
   491  		node.State,
   492  		node.Headers,
   493  		receiptsDB,
   494  		receipts,
   495  		pendingReceipts,
   496  		seals,
   497  		receiptValidator,
   498  		receiptRequester,
   499  		matchingConfig)
   500  
   501  	matchingEngine, err := matching.NewEngine(
   502  		node.Log,
   503  		node.Net,
   504  		node.Me,
   505  		node.Metrics,
   506  		node.Metrics,
   507  		node.State,
   508  		receiptsDB,
   509  		node.Index,
   510  		matchingCore,
   511  	)
   512  	require.NoError(t, err)
   513  
   514  	return testmock.ConsensusNode{
   515  		GenericNode:     node,
   516  		Guarantees:      guarantees,
   517  		Receipts:        receipts,
   518  		Seals:           seals,
   519  		IngestionEngine: ingestionEngine,
   520  		SealingEngine:   sealingEngine,
   521  		MatchingEngine:  matchingEngine,
   522  	}
   523  }
   524  
   525  type CheckerMock struct {
   526  	notifications.NoopConsumer // satisfy the FinalizationConsumer interface
   527  }
   528  
   529  func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, identities []*flow.Identity, syncThreshold int, chainID flow.ChainID) testmock.ExecutionNode {
   530  	node := GenericNodeFromParticipants(t, hub, identity, identities, chainID)
   531  
   532  	transactionsStorage := storage.NewTransactions(node.Metrics, node.PublicDB)
   533  	collectionsStorage := storage.NewCollections(node.PublicDB, transactionsStorage)
   534  	eventsStorage := storage.NewEvents(node.Metrics, node.PublicDB)
   535  	serviceEventsStorage := storage.NewServiceEvents(node.Metrics, node.PublicDB)
   536  	txResultStorage := storage.NewTransactionResults(node.Metrics, node.PublicDB, storage.DefaultCacheSize)
   537  	commitsStorage := storage.NewCommits(node.Metrics, node.PublicDB)
   538  	chunkDataPackStorage := storage.NewChunkDataPacks(node.Metrics, node.PublicDB, collectionsStorage, 100)
   539  	results := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   540  	receipts := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize)
   541  	myReceipts := storage.NewMyExecutionReceipts(node.Metrics, node.PublicDB, receipts)
   542  	versionBeacons := storage.NewVersionBeacons(node.PublicDB)
   543  	headersStorage := storage.NewHeaders(node.Metrics, node.PublicDB)
   544  
   545  	checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) {
   546  		return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID())
   547  	}
   548  
   549  	protoState, ok := node.State.(*badgerstate.ParticipantState)
   550  	require.True(t, ok)
   551  
   552  	followerState, err := badgerstate.NewFollowerState(
   553  		node.Log,
   554  		node.Tracer,
   555  		node.ProtocolEvents,
   556  		protoState.State,
   557  		node.Index,
   558  		node.Payloads,
   559  		blocktimer.DefaultBlockTimer,
   560  	)
   561  	require.NoError(t, err)
   562  
   563  	dbDir := unittest.TempDir(t)
   564  
   565  	metricsCollector := &metrics.NoopCollector{}
   566  
   567  	const (
   568  		capacity           = 100
   569  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   570  		checkpointsToKeep  = 1
   571  	)
   572  	diskWal, err := wal.NewDiskWAL(node.Log.With().Str("subcomponent", "wal").Logger(), nil, metricsCollector, dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   573  	require.NoError(t, err)
   574  
   575  	ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("component", "ledger").Logger(), completeLedger.DefaultPathFinderVersion)
   576  	require.NoError(t, err)
   577  
   578  	compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector)
   579  	require.NoError(t, err)
   580  
   581  	<-compactor.Ready() // Need to start compactor here because BootstrapLedger() updates ledger state.
   582  
   583  	genesisHead, err := node.State.Final().Head()
   584  	require.NoError(t, err)
   585  
   586  	bootstrapper := bootstrapexec.NewBootstrapper(node.Log)
   587  	commit, err := bootstrapper.BootstrapLedger(
   588  		ls,
   589  		unittest.ServiceAccountPublicKey,
   590  		node.ChainID.Chain(),
   591  		fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply))
   592  	require.NoError(t, err)
   593  
   594  	matchTrie, err := ls.FindTrieByStateCommit(commit)
   595  	require.NoError(t, err)
   596  	require.NotNil(t, matchTrie)
   597  
   598  	const bootstrapCheckpointFile = "bootstrap-checkpoint"
   599  	checkpointFile := filepath.Join(dbDir, bootstrapCheckpointFile)
   600  	err = wal.StoreCheckpointV6([]*trie.MTrie{matchTrie}, dbDir, bootstrapCheckpointFile, zerolog.Nop(), 1)
   601  	require.NoError(t, err)
   602  
   603  	rootResult, rootSeal, err := protoState.Sealed().SealedResult()
   604  	require.NoError(t, err)
   605  
   606  	require.Equal(t, fmt.Sprintf("%x", rootSeal.FinalState), fmt.Sprintf("%x", commit))
   607  	require.Equal(t, rootSeal.ResultID, rootResult.ID())
   608  
   609  	err = bootstrapper.BootstrapExecutionDatabase(node.PublicDB, rootSeal)
   610  	require.NoError(t, err)
   611  
   612  	registerDir := unittest.TempPebblePath(t)
   613  	pebbledb, err := storagepebble.OpenRegisterPebbleDB(registerDir)
   614  	require.NoError(t, err)
   615  
   616  	checkpointHeight := uint64(0)
   617  	require.NoError(t, esbootstrap.ImportRegistersFromCheckpoint(node.Log, checkpointFile, checkpointHeight, matchTrie.RootHash(), pebbledb, 2))
   618  
   619  	diskStore, err := storagepebble.NewRegisters(pebbledb)
   620  	require.NoError(t, err)
   621  
   622  	reader := finalizedreader.NewFinalizedReader(headersStorage, checkpointHeight)
   623  	registerStore, err := storehouse.NewRegisterStore(
   624  		diskStore,
   625  		nil, // TOOD(leo): replace with real WAL
   626  		reader,
   627  		node.Log,
   628  		storehouse.NewNoopNotifier(),
   629  	)
   630  	require.NoError(t, err)
   631  
   632  	storehouseEnabled := true
   633  	execState := executionState.NewExecutionState(
   634  		ls, commitsStorage, node.Blocks, node.Headers, collectionsStorage, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, node.PublicDB, node.Tracer,
   635  		// TODO: test with register store
   636  		registerStore,
   637  		storehouseEnabled,
   638  	)
   639  
   640  	requestEngine, err := requester.New(
   641  		node.Log, node.Metrics, node.Net, node.Me, node.State,
   642  		channels.RequestCollections,
   643  		filter.HasRole[flow.Identity](flow.RoleCollection),
   644  		func() flow.Entity { return &flow.Collection{} },
   645  	)
   646  	require.NoError(t, err)
   647  
   648  	pusherEngine, err := executionprovider.New(
   649  		node.Log,
   650  		node.Tracer,
   651  		node.Net,
   652  		node.State,
   653  		execState,
   654  		metricsCollector,
   655  		checkAuthorizedAtBlock,
   656  		queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()),
   657  		executionprovider.DefaultChunkDataPackRequestWorker,
   658  		executionprovider.DefaultChunkDataPackQueryTimeout,
   659  		executionprovider.DefaultChunkDataPackDeliveryTimeout,
   660  	)
   661  	require.NoError(t, err)
   662  
   663  	blockFinder := environment.NewBlockFinder(node.Headers)
   664  
   665  	vmCtx := fvm.NewContext(
   666  		fvm.WithLogger(node.Log),
   667  		fvm.WithChain(node.ChainID.Chain()),
   668  		fvm.WithBlocks(blockFinder),
   669  	)
   670  	committer := committer.NewLedgerViewCommitter(ls, node.Tracer)
   671  
   672  	bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore())))
   673  	trackerStorage := mocktracker.NewMockStorage()
   674  
   675  	prov := exedataprovider.NewProvider(
   676  		zerolog.Nop(),
   677  		metrics.NewNoopCollector(),
   678  		execution_data.DefaultSerializer,
   679  		bservice,
   680  		trackerStorage,
   681  	)
   682  
   683  	computationEngine, err := computation.New(
   684  		node.Log,
   685  		node.Metrics,
   686  		node.Tracer,
   687  		node.Me,
   688  		node.State,
   689  		vmCtx,
   690  		committer,
   691  		prov,
   692  		computation.ComputationConfig{
   693  			QueryConfig:          query.NewDefaultConfig(),
   694  			DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize,
   695  			MaxConcurrency:       1,
   696  		},
   697  	)
   698  	require.NoError(t, err)
   699  
   700  	syncCore, err := chainsync.New(node.Log, chainsync.DefaultConfig(), metrics.NewChainSyncCollector(genesisHead.ChainID), genesisHead.ChainID)
   701  	require.NoError(t, err)
   702  
   703  	followerDistributor := pubsub.NewFollowerDistributor()
   704  	require.NoError(t, err)
   705  
   706  	// disabled by default
   707  	uploader := uploader.NewManager(node.Tracer)
   708  
   709  	_, err = build.Semver()
   710  	require.ErrorIs(t, err, build.UndefinedVersionError)
   711  	ver := semver.New("0.0.0")
   712  
   713  	latestFinalizedBlock, err := node.State.Final().Head()
   714  	require.NoError(t, err)
   715  
   716  	unit := engine.NewUnit()
   717  	stopControl := stop.NewStopControl(
   718  		unit,
   719  		time.Second,
   720  		node.Log,
   721  		execState,
   722  		node.Headers,
   723  		versionBeacons,
   724  		ver,
   725  		latestFinalizedBlock,
   726  		false,
   727  		true,
   728  	)
   729  
   730  	fetcher := exeFetcher.NewCollectionFetcher(node.Log, requestEngine, node.State, false)
   731  	loader := loader.NewUnexecutedLoader(node.Log, node.State, node.Headers, execState)
   732  	rootHead, rootQC := getRoot(t, &node)
   733  	ingestionEngine, err := ingestion.New(
   734  		unit,
   735  		node.Log,
   736  		node.Net,
   737  		fetcher,
   738  		node.Headers,
   739  		node.Blocks,
   740  		collectionsStorage,
   741  		computationEngine,
   742  		pusherEngine,
   743  		execState,
   744  		node.Metrics,
   745  		node.Tracer,
   746  		false,
   747  		nil,
   748  		uploader,
   749  		stopControl,
   750  		loader,
   751  	)
   752  	require.NoError(t, err)
   753  	requestEngine.WithHandle(ingestionEngine.OnCollection)
   754  
   755  	node.ProtocolEvents.AddConsumer(ingestionEngine)
   756  
   757  	followerCore, finalizer := createFollowerCore(t, &node, followerState, followerDistributor, rootHead, rootQC)
   758  	// mock out hotstuff validator
   759  	validator := new(mockhotstuff.Validator)
   760  	validator.On("ValidateProposal", mock.Anything).Return(nil)
   761  
   762  	core, err := follower.NewComplianceCore(
   763  		node.Log,
   764  		node.Metrics,
   765  		node.Metrics,
   766  		followerDistributor,
   767  		followerState,
   768  		followerCore,
   769  		validator,
   770  		syncCore,
   771  		node.Tracer,
   772  	)
   773  	require.NoError(t, err)
   774  
   775  	finalizedHeader, err := protoState.Final().Head()
   776  	require.NoError(t, err)
   777  	followerEng, err := follower.NewComplianceLayer(
   778  		node.Log,
   779  		node.Net,
   780  		node.Me,
   781  		node.Metrics,
   782  		node.Headers,
   783  		finalizedHeader,
   784  		core,
   785  		compliance.DefaultConfig(),
   786  	)
   787  	require.NoError(t, err)
   788  
   789  	idCache, err := cache.NewProtocolStateIDCache(node.Log, node.State, events.NewDistributor())
   790  	require.NoError(t, err, "could not create finalized snapshot cache")
   791  	spamConfig, err := synchronization.NewSpamDetectionConfig()
   792  	require.NoError(t, err, "could not initialize spam detection config")
   793  	syncEngine, err := synchronization.New(
   794  		node.Log,
   795  		node.Metrics,
   796  		node.Net,
   797  		node.Me,
   798  		node.State,
   799  		node.Blocks,
   800  		followerEng,
   801  		syncCore,
   802  		id.NewIdentityFilterIdentifierProvider(
   803  			filter.And(
   804  				filter.HasRole[flow.Identity](flow.RoleConsensus),
   805  				filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())),
   806  			),
   807  			idCache,
   808  		),
   809  		spamConfig,
   810  		synchronization.WithPollInterval(time.Duration(0)),
   811  	)
   812  	require.NoError(t, err)
   813  	followerDistributor.AddFinalizationConsumer(syncEngine)
   814  
   815  	return testmock.ExecutionNode{
   816  		GenericNode:         node,
   817  		FollowerState:       followerState,
   818  		IngestionEngine:     ingestionEngine,
   819  		FollowerCore:        followerCore,
   820  		FollowerEngine:      followerEng,
   821  		SyncEngine:          syncEngine,
   822  		ExecutionEngine:     computationEngine,
   823  		RequestEngine:       requestEngine,
   824  		ReceiptsEngine:      pusherEngine,
   825  		BadgerDB:            node.PublicDB,
   826  		VM:                  computationEngine.VM(),
   827  		ExecutionState:      execState,
   828  		Ledger:              ls,
   829  		LevelDbDir:          dbDir,
   830  		Collections:         collectionsStorage,
   831  		Finalizer:           finalizer,
   832  		MyExecutionReceipts: myReceipts,
   833  		Compactor:           compactor,
   834  		StorehouseEnabled:   storehouseEnabled,
   835  	}
   836  }
   837  
   838  func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.QuorumCertificate) {
   839  	rootHead := node.State.Params().FinalizedRoot()
   840  
   841  	signers, err := node.State.AtHeight(0).Identities(filter.HasRole[flow.Identity](flow.RoleConsensus))
   842  	require.NoError(t, err)
   843  
   844  	signerIDs := signers.NodeIDs()
   845  	signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs)
   846  	require.NoError(t, err)
   847  
   848  	rootQC := &flow.QuorumCertificate{
   849  		View:          rootHead.View,
   850  		BlockID:       rootHead.ID(),
   851  		SignerIndices: signerIndices,
   852  		SigData:       unittest.SignatureFixture(),
   853  	}
   854  
   855  	return rootHead, rootQC
   856  }
   857  
   858  type RoundRobinLeaderSelection struct {
   859  	identities flow.IdentityList
   860  	me         flow.Identifier
   861  }
   862  
   863  var _ hotstuff.Replicas = (*RoundRobinLeaderSelection)(nil)
   864  var _ hotstuff.DynamicCommittee = (*RoundRobinLeaderSelection)(nil)
   865  
   866  func (s *RoundRobinLeaderSelection) IdentitiesByBlock(_ flow.Identifier) (flow.IdentityList, error) {
   867  	return s.identities, nil
   868  }
   869  
   870  func (s *RoundRobinLeaderSelection) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) {
   871  	id, found := s.identities.ByNodeID(participantID)
   872  	if !found {
   873  		return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID)
   874  	}
   875  
   876  	return id, nil
   877  }
   878  
   879  func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) {
   880  	return s.identities.ToSkeleton(), nil
   881  }
   882  
   883  func (s *RoundRobinLeaderSelection) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) {
   884  	id, found := s.identities.ByNodeID(participantID)
   885  	if !found {
   886  		return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID)
   887  	}
   888  	return &id.IdentitySkeleton, nil
   889  }
   890  
   891  func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, error) {
   892  	return s.identities[int(view)%len(s.identities)].NodeID, nil
   893  }
   894  
   895  func (s *RoundRobinLeaderSelection) QuorumThresholdForView(_ uint64) (uint64, error) {
   896  	return committees.WeightThresholdToBuildQC(s.identities.ToSkeleton().TotalWeight()), nil
   897  }
   898  
   899  func (s *RoundRobinLeaderSelection) TimeoutThresholdForView(_ uint64) (uint64, error) {
   900  	return committees.WeightThresholdToTimeout(s.identities.ToSkeleton().TotalWeight()), nil
   901  }
   902  
   903  func (s *RoundRobinLeaderSelection) Self() flow.Identifier {
   904  	return s.me
   905  }
   906  
   907  func (s *RoundRobinLeaderSelection) DKG(_ uint64) (hotstuff.DKG, error) {
   908  	return nil, fmt.Errorf("error")
   909  }
   910  
   911  func createFollowerCore(
   912  	t *testing.T,
   913  	node *testmock.GenericNode,
   914  	followerState *badgerstate.FollowerState,
   915  	notifier hotstuff.FollowerConsumer,
   916  	rootHead *flow.Header,
   917  	rootQC *flow.QuorumCertificate,
   918  ) (module.HotStuffFollower, *confinalizer.Finalizer) {
   919  	finalizer := confinalizer.NewFinalizer(node.PublicDB, node.Headers, followerState, trace.NewNoopTracer())
   920  
   921  	pending := make([]*flow.Header, 0)
   922  
   923  	// creates a consensus follower with noop consumer as the notifier
   924  	followerCore, err := consensus.NewFollower(
   925  		node.Log,
   926  		node.Metrics,
   927  		node.Headers,
   928  		finalizer,
   929  		notifier,
   930  		rootHead,
   931  		rootQC,
   932  		rootHead,
   933  		pending,
   934  	)
   935  	require.NoError(t, err)
   936  	return followerCore, finalizer
   937  }
   938  
   939  type VerificationOpt func(*testmock.VerificationNode)
   940  
   941  func WithChunkConsumer(chunkConsumer *chunkconsumer.ChunkConsumer) VerificationOpt {
   942  	return func(node *testmock.VerificationNode) {
   943  		node.ChunkConsumer = chunkConsumer
   944  	}
   945  }
   946  
   947  func WithGenericNode(genericNode *testmock.GenericNode) VerificationOpt {
   948  	return func(node *testmock.VerificationNode) {
   949  		node.GenericNode = genericNode
   950  	}
   951  }
   952  
   953  // VerificationNode creates a verification node with all functional engines and actual modules for purpose of
   954  // (integration) testing.
   955  func VerificationNode(t testing.TB,
   956  	hub *stub.Hub,
   957  	verIdentity bootstrap.NodeInfo, // identity of this verification node.
   958  	participants flow.IdentityList, // identity of all nodes in system including this verification node.
   959  	assigner module.ChunkAssigner,
   960  	chunksLimit uint,
   961  	chainID flow.ChainID,
   962  	collector module.VerificationMetrics, // used to enable collecting metrics on happy path integration
   963  	mempoolCollector module.MempoolMetrics, // used to enable collecting metrics on happy path integration
   964  	opts ...VerificationOpt) testmock.VerificationNode {
   965  
   966  	var err error
   967  	var node testmock.VerificationNode
   968  
   969  	for _, apply := range opts {
   970  		apply(&node)
   971  	}
   972  
   973  	if node.GenericNode == nil {
   974  		gn := GenericNodeFromParticipants(t, hub, verIdentity, participants, chainID)
   975  		node.GenericNode = &gn
   976  	}
   977  
   978  	if node.ChunkStatuses == nil {
   979  		node.ChunkStatuses = stdmap.NewChunkStatuses(chunksLimit)
   980  		err = mempoolCollector.Register(metrics.ResourceChunkStatus, node.ChunkStatuses.Size)
   981  		require.Nil(t, err)
   982  	}
   983  
   984  	if node.ChunkRequests == nil {
   985  		node.ChunkRequests = stdmap.NewChunkRequests(chunksLimit)
   986  		err = mempoolCollector.Register(metrics.ResourceChunkRequest, node.ChunkRequests.Size)
   987  		require.NoError(t, err)
   988  	}
   989  
   990  	if node.Results == nil {
   991  		results := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   992  		node.Results = results
   993  		node.Receipts = storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize)
   994  	}
   995  
   996  	if node.ProcessedChunkIndex == nil {
   997  		node.ProcessedChunkIndex = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationChunkIndex)
   998  	}
   999  
  1000  	if node.ChunksQueue == nil {
  1001  		node.ChunksQueue = storage.NewChunkQueue(node.PublicDB)
  1002  		ok, err := node.ChunksQueue.Init(chunkconsumer.DefaultJobIndex)
  1003  		require.NoError(t, err)
  1004  		require.True(t, ok)
  1005  	}
  1006  
  1007  	if node.ProcessedBlockHeight == nil {
  1008  		node.ProcessedBlockHeight = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationBlockHeight)
  1009  	}
  1010  
  1011  	if node.VerifierEngine == nil {
  1012  		vm := fvm.NewVirtualMachine()
  1013  
  1014  		blockFinder := environment.NewBlockFinder(node.Headers)
  1015  
  1016  		vmCtx := fvm.NewContext(
  1017  			fvm.WithLogger(node.Log),
  1018  			fvm.WithChain(node.ChainID.Chain()),
  1019  			fvm.WithBlocks(blockFinder),
  1020  		)
  1021  
  1022  		chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Log)
  1023  
  1024  		approvalStorage := storage.NewResultApprovals(node.Metrics, node.PublicDB)
  1025  
  1026  		node.VerifierEngine, err = verifier.New(node.Log,
  1027  			collector,
  1028  			node.Tracer,
  1029  			node.Net,
  1030  			node.State,
  1031  			node.Me,
  1032  			chunkVerifier,
  1033  			approvalStorage)
  1034  		require.Nil(t, err)
  1035  	}
  1036  
  1037  	if node.RequesterEngine == nil {
  1038  		node.RequesterEngine, err = vereq.New(node.Log,
  1039  			node.State,
  1040  			node.Net,
  1041  			node.Tracer,
  1042  			collector,
  1043  			node.ChunkRequests,
  1044  			vereq.DefaultRequestInterval,
  1045  			// requests are only qualified if their retryAfter is elapsed.
  1046  			vereq.RetryAfterQualifier,
  1047  			// exponential backoff with multiplier of 2, minimum interval of a second, and
  1048  			// maximum interval of an hour.
  1049  			mempool.ExponentialUpdater(
  1050  				vereq.DefaultBackoffMultiplier,
  1051  				vereq.DefaultBackoffMaxInterval,
  1052  				vereq.DefaultBackoffMinInterval),
  1053  			vereq.DefaultRequestTargets)
  1054  
  1055  		require.NoError(t, err)
  1056  	}
  1057  
  1058  	if node.FetcherEngine == nil {
  1059  		node.FetcherEngine = fetcher.New(node.Log,
  1060  			collector,
  1061  			node.Tracer,
  1062  			node.VerifierEngine,
  1063  			node.State,
  1064  			node.ChunkStatuses,
  1065  			node.Headers,
  1066  			node.Blocks,
  1067  			node.Results,
  1068  			node.Receipts,
  1069  			node.RequesterEngine,
  1070  			0,
  1071  		)
  1072  	}
  1073  
  1074  	if node.ChunkConsumer == nil {
  1075  		node.ChunkConsumer, err = chunkconsumer.NewChunkConsumer(node.Log,
  1076  			collector,
  1077  			node.ProcessedChunkIndex,
  1078  			node.ChunksQueue,
  1079  			node.FetcherEngine,
  1080  			chunkconsumer.DefaultChunkWorkers) // defaults number of workers to 3.
  1081  		require.NoError(t, err)
  1082  		err = mempoolCollector.Register(metrics.ResourceChunkConsumer, node.ChunkConsumer.Size)
  1083  		require.NoError(t, err)
  1084  	}
  1085  
  1086  	if node.AssignerEngine == nil {
  1087  		node.AssignerEngine = verificationassigner.New(node.Log,
  1088  			collector,
  1089  			node.Tracer,
  1090  			node.Me,
  1091  			node.State,
  1092  			assigner,
  1093  			node.ChunksQueue,
  1094  			node.ChunkConsumer,
  1095  			0)
  1096  	}
  1097  
  1098  	if node.BlockConsumer == nil {
  1099  		node.BlockConsumer, _, err = blockconsumer.NewBlockConsumer(node.Log,
  1100  			collector,
  1101  			node.ProcessedBlockHeight,
  1102  			node.Blocks,
  1103  			node.State,
  1104  			node.AssignerEngine,
  1105  			blockconsumer.DefaultBlockWorkers)
  1106  		require.NoError(t, err)
  1107  
  1108  		err = mempoolCollector.Register(metrics.ResourceBlockConsumer, node.BlockConsumer.Size)
  1109  		require.NoError(t, err)
  1110  	}
  1111  
  1112  	return node
  1113  }