github.com/koko1123/flow-go-1@v0.29.6/engine/testutil/nodes.go (about)

     1  package testutil
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"math"
     8  	"path/filepath"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/ipfs/go-datastore"
    13  	dssync "github.com/ipfs/go-datastore/sync"
    14  	blockstore "github.com/ipfs/go-ipfs-blockstore"
    15  	"github.com/rs/zerolog"
    16  	"github.com/stretchr/testify/mock"
    17  	"github.com/stretchr/testify/require"
    18  	"go.uber.org/atomic"
    19  
    20  	"github.com/koko1123/flow-go-1/network/p2p/cache"
    21  
    22  	"github.com/koko1123/flow-go-1/module/mempool/queue"
    23  
    24  	"github.com/koko1123/flow-go-1/consensus"
    25  	"github.com/koko1123/flow-go-1/consensus/hotstuff"
    26  	mockhotstuff "github.com/koko1123/flow-go-1/consensus/hotstuff/mocks"
    27  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications"
    28  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications/pubsub"
    29  	"github.com/onflow/flow-go/crypto"
    30  	"github.com/koko1123/flow-go-1/engine/collection/epochmgr"
    31  	"github.com/koko1123/flow-go-1/engine/collection/epochmgr/factories"
    32  	collectioningest "github.com/koko1123/flow-go-1/engine/collection/ingest"
    33  	"github.com/koko1123/flow-go-1/engine/collection/pusher"
    34  	"github.com/koko1123/flow-go-1/engine/common/follower"
    35  	"github.com/koko1123/flow-go-1/engine/common/provider"
    36  	"github.com/koko1123/flow-go-1/engine/common/requester"
    37  	"github.com/koko1123/flow-go-1/engine/common/synchronization"
    38  	"github.com/koko1123/flow-go-1/engine/consensus/approvals/tracker"
    39  	consensusingest "github.com/koko1123/flow-go-1/engine/consensus/ingestion"
    40  	"github.com/koko1123/flow-go-1/engine/consensus/matching"
    41  	"github.com/koko1123/flow-go-1/engine/consensus/sealing"
    42  	"github.com/koko1123/flow-go-1/engine/execution/computation"
    43  	"github.com/koko1123/flow-go-1/engine/execution/computation/committer"
    44  	"github.com/koko1123/flow-go-1/engine/execution/ingestion"
    45  	"github.com/koko1123/flow-go-1/engine/execution/ingestion/uploader"
    46  	executionprovider "github.com/koko1123/flow-go-1/engine/execution/provider"
    47  	executionState "github.com/koko1123/flow-go-1/engine/execution/state"
    48  	bootstrapexec "github.com/koko1123/flow-go-1/engine/execution/state/bootstrap"
    49  	testmock "github.com/koko1123/flow-go-1/engine/testutil/mock"
    50  	verificationassigner "github.com/koko1123/flow-go-1/engine/verification/assigner"
    51  	"github.com/koko1123/flow-go-1/engine/verification/assigner/blockconsumer"
    52  	"github.com/koko1123/flow-go-1/engine/verification/fetcher"
    53  	"github.com/koko1123/flow-go-1/engine/verification/fetcher/chunkconsumer"
    54  	vereq "github.com/koko1123/flow-go-1/engine/verification/requester"
    55  	"github.com/koko1123/flow-go-1/engine/verification/verifier"
    56  	"github.com/koko1123/flow-go-1/fvm"
    57  	"github.com/koko1123/flow-go-1/fvm/derived"
    58  	"github.com/koko1123/flow-go-1/fvm/environment"
    59  	"github.com/koko1123/flow-go-1/ledger/common/pathfinder"
    60  	completeLedger "github.com/koko1123/flow-go-1/ledger/complete"
    61  	"github.com/koko1123/flow-go-1/ledger/complete/wal"
    62  	"github.com/koko1123/flow-go-1/model/bootstrap"
    63  	"github.com/koko1123/flow-go-1/model/flow"
    64  	"github.com/koko1123/flow-go-1/model/flow/filter"
    65  	"github.com/koko1123/flow-go-1/module"
    66  	"github.com/koko1123/flow-go-1/module/buffer"
    67  	"github.com/koko1123/flow-go-1/module/chainsync"
    68  	"github.com/koko1123/flow-go-1/module/chunks"
    69  	"github.com/koko1123/flow-go-1/module/executiondatasync/execution_data"
    70  	exedataprovider "github.com/koko1123/flow-go-1/module/executiondatasync/provider"
    71  	mocktracker "github.com/koko1123/flow-go-1/module/executiondatasync/tracker/mock"
    72  	confinalizer "github.com/koko1123/flow-go-1/module/finalizer/consensus"
    73  	"github.com/koko1123/flow-go-1/module/id"
    74  	"github.com/koko1123/flow-go-1/module/irrecoverable"
    75  	"github.com/koko1123/flow-go-1/module/local"
    76  	"github.com/koko1123/flow-go-1/module/mempool"
    77  	consensusMempools "github.com/koko1123/flow-go-1/module/mempool/consensus"
    78  	"github.com/koko1123/flow-go-1/module/mempool/epochs"
    79  	"github.com/koko1123/flow-go-1/module/mempool/herocache"
    80  	"github.com/koko1123/flow-go-1/module/mempool/stdmap"
    81  	"github.com/koko1123/flow-go-1/module/metrics"
    82  	mockmodule "github.com/koko1123/flow-go-1/module/mock"
    83  	"github.com/koko1123/flow-go-1/module/signature"
    84  	requesterunit "github.com/koko1123/flow-go-1/module/state_synchronization/requester/unittest"
    85  	"github.com/koko1123/flow-go-1/module/trace"
    86  	"github.com/koko1123/flow-go-1/module/validation"
    87  	"github.com/koko1123/flow-go-1/network/channels"
    88  	"github.com/koko1123/flow-go-1/network/stub"
    89  	"github.com/koko1123/flow-go-1/state/protocol"
    90  	badgerstate "github.com/koko1123/flow-go-1/state/protocol/badger"
    91  	"github.com/koko1123/flow-go-1/state/protocol/blocktimer"
    92  	"github.com/koko1123/flow-go-1/state/protocol/events"
    93  	"github.com/koko1123/flow-go-1/state/protocol/events/gadgets"
    94  	"github.com/koko1123/flow-go-1/state/protocol/util"
    95  	storage "github.com/koko1123/flow-go-1/storage/badger"
    96  	"github.com/koko1123/flow-go-1/utils/unittest"
    97  )
    98  
    99  // GenericNodeFromParticipants is a test helper that creates and returns a generic node.
   100  // The generic node's state is generated from the given participants, resulting in a
   101  // root state snapshot.
   102  //
   103  // CAUTION: Please use GenericNode instead for most use-cases so that multiple nodes
   104  // may share the same root state snapshot.
   105  func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity *flow.Identity, participants []*flow.Identity, chainID flow.ChainID,
   106  	options ...func(protocol.State)) testmock.GenericNode {
   107  	var i int
   108  	var participant *flow.Identity
   109  	for i, participant = range participants {
   110  		if identity.NodeID == participant.NodeID {
   111  			break
   112  		}
   113  	}
   114  
   115  	// creates logger, metrics collector and tracer.
   116  	log := unittest.Logger().With().Int("index", i).Hex("node_id", identity.NodeID[:]).Str("role", identity.Role.String()).Logger()
   117  	tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll)
   118  	require.NoError(t, err)
   119  	metrics := metrics.NewNoopCollector()
   120  
   121  	// creates state fixture and bootstrap it.
   122  	rootSnapshot := unittest.RootSnapshotFixture(participants)
   123  	stateFixture := CompleteStateFixture(t, metrics, tracer, rootSnapshot)
   124  
   125  	require.NoError(t, err)
   126  	for _, option := range options {
   127  		option(stateFixture.State)
   128  	}
   129  
   130  	return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID)
   131  }
   132  
   133  // GenericNode returns a generic test node, containing components shared across
   134  // all node roles. The generic node is used as the core data structure to create
   135  // other types of flow nodes.
   136  func GenericNode(
   137  	t testing.TB,
   138  	hub *stub.Hub,
   139  	identity *flow.Identity,
   140  	root protocol.Snapshot,
   141  ) testmock.GenericNode {
   142  
   143  	log := unittest.Logger().With().
   144  		Hex("node_id", identity.NodeID[:]).
   145  		Str("role", identity.Role.String()).
   146  		Logger()
   147  	metrics := metrics.NewNoopCollector()
   148  	tracer := trace.NewNoopTracer()
   149  	stateFixture := CompleteStateFixture(t, metrics, tracer, root)
   150  
   151  	head, err := root.Head()
   152  	require.NoError(t, err)
   153  	chainID := head.ChainID
   154  
   155  	return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID)
   156  }
   157  
   158  // GenericNodeWithStateFixture is a test helper that creates a generic node with specified state fixture.
   159  func GenericNodeWithStateFixture(t testing.TB,
   160  	stateFixture *testmock.StateFixture,
   161  	hub *stub.Hub,
   162  	identity *flow.Identity,
   163  	log zerolog.Logger,
   164  	metrics *metrics.NoopCollector,
   165  	tracer module.Tracer,
   166  	chainID flow.ChainID) testmock.GenericNode {
   167  
   168  	me := LocalFixture(t, identity)
   169  	net := stub.NewNetwork(t, identity.NodeID, hub)
   170  
   171  	parentCtx, cancel := context.WithCancel(context.Background())
   172  	ctx, _ := irrecoverable.WithSignaler(parentCtx)
   173  
   174  	return testmock.GenericNode{
   175  		Ctx:            ctx,
   176  		Cancel:         cancel,
   177  		Log:            log,
   178  		Metrics:        metrics,
   179  		Tracer:         tracer,
   180  		PublicDB:       stateFixture.PublicDB,
   181  		SecretsDB:      stateFixture.SecretsDB,
   182  		State:          stateFixture.State,
   183  		Headers:        stateFixture.Storage.Headers,
   184  		Guarantees:     stateFixture.Storage.Guarantees,
   185  		Seals:          stateFixture.Storage.Seals,
   186  		Payloads:       stateFixture.Storage.Payloads,
   187  		Blocks:         stateFixture.Storage.Blocks,
   188  		Me:             me,
   189  		Net:            net,
   190  		DBDir:          stateFixture.DBDir,
   191  		ChainID:        chainID,
   192  		ProtocolEvents: stateFixture.ProtocolEvents,
   193  	}
   194  }
   195  
   196  // LocalFixture creates and returns a Local module for given identity.
   197  func LocalFixture(t testing.TB, identity *flow.Identity) module.Local {
   198  
   199  	// Generates test signing oracle for the nodes
   200  	// Disclaimer: it should not be used for practical applications
   201  	//
   202  	// uses identity of node as its seed
   203  	seed, err := json.Marshal(identity)
   204  	require.NoError(t, err)
   205  	// creates signing key of the node
   206  	sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed[:64])
   207  	require.NoError(t, err)
   208  
   209  	// sets staking public key of the node
   210  	identity.StakingPubKey = sk.PublicKey()
   211  
   212  	me, err := local.New(identity, sk)
   213  	require.NoError(t, err)
   214  
   215  	return me
   216  }
   217  
   218  // CompleteStateFixture is a test helper that creates, bootstraps, and returns a StateFixture for sake of unit testing.
   219  func CompleteStateFixture(
   220  	t testing.TB,
   221  	metric *metrics.NoopCollector,
   222  	tracer module.Tracer,
   223  	rootSnapshot protocol.Snapshot,
   224  ) *testmock.StateFixture {
   225  
   226  	dataDir := unittest.TempDir(t)
   227  	publicDBDir := filepath.Join(dataDir, "protocol")
   228  	secretsDBDir := filepath.Join(dataDir, "secrets")
   229  	db := unittest.TypedBadgerDB(t, publicDBDir, storage.InitPublic)
   230  	s := storage.InitAll(metric, db)
   231  	secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storage.InitSecret)
   232  	consumer := events.NewDistributor()
   233  
   234  	state, err := badgerstate.Bootstrap(metric, db, s.Headers, s.Seals, s.Results, s.Blocks, s.Setups, s.EpochCommits, s.Statuses, rootSnapshot)
   235  	require.NoError(t, err)
   236  
   237  	mutableState, err := badgerstate.NewFullConsensusState(state, s.Index, s.Payloads, tracer, consumer,
   238  		util.MockBlockTimer(), util.MockReceiptValidator(), util.MockSealValidator(s.Seals))
   239  	require.NoError(t, err)
   240  
   241  	return &testmock.StateFixture{
   242  		PublicDB:       db,
   243  		SecretsDB:      secretsDB,
   244  		Storage:        s,
   245  		DBDir:          dataDir,
   246  		ProtocolEvents: consumer,
   247  		State:          mutableState,
   248  	}
   249  }
   250  
   251  // CollectionNode returns a mock collection node.
   252  func CollectionNode(t *testing.T, ctx irrecoverable.SignalerContext, hub *stub.Hub, identity bootstrap.NodeInfo, rootSnapshot protocol.Snapshot) testmock.CollectionNode {
   253  
   254  	node := GenericNode(t, hub, identity.Identity(), rootSnapshot)
   255  	privKeys, err := identity.PrivateKeys()
   256  	require.NoError(t, err)
   257  	node.Me, err = local.New(identity.Identity(), privKeys.StakingKey)
   258  	require.NoError(t, err)
   259  
   260  	pools := epochs.NewTransactionPools(
   261  		func(_ uint64) mempool.Transactions {
   262  			return herocache.NewTransactions(1000, node.Log, metrics.NewNoopCollector())
   263  		})
   264  	transactions := storage.NewTransactions(node.Metrics, node.PublicDB)
   265  	collections := storage.NewCollections(node.PublicDB, transactions)
   266  	clusterPayloads := storage.NewClusterPayloads(node.Metrics, node.PublicDB)
   267  
   268  	ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig())
   269  	require.NoError(t, err)
   270  
   271  	selector := filter.HasRole(flow.RoleAccess, flow.RoleVerification)
   272  	retrieve := func(collID flow.Identifier) (flow.Entity, error) {
   273  		coll, err := collections.ByID(collID)
   274  		return coll, err
   275  	}
   276  	providerEngine, err := provider.New(
   277  		node.Log,
   278  		node.Metrics,
   279  		node.Net,
   280  		node.Me,
   281  		node.State,
   282  		queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()),
   283  		uint(1000),
   284  		channels.ProvideCollections,
   285  		selector,
   286  		retrieve)
   287  	require.NoError(t, err)
   288  	// TODO: move this start logic to a more generalized test utility (we need all engines to be startable).
   289  	providerEngine.Start(ctx)
   290  
   291  	pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, collections, transactions)
   292  	require.NoError(t, err)
   293  
   294  	clusterStateFactory, err := factories.NewClusterStateFactory(
   295  		node.PublicDB,
   296  		node.Metrics,
   297  		node.Tracer,
   298  	)
   299  	require.NoError(t, err)
   300  
   301  	builderFactory, err := factories.NewBuilderFactory(
   302  		node.PublicDB,
   303  		node.Headers,
   304  		node.Tracer,
   305  		node.Metrics,
   306  		pusherEngine,
   307  		node.Log,
   308  	)
   309  	require.NoError(t, err)
   310  
   311  	proposalFactory, err := factories.NewProposalEngineFactory(
   312  		node.Log,
   313  		node.Net,
   314  		node.Me,
   315  		node.Metrics, node.Metrics, node.Metrics,
   316  		node.State,
   317  		transactions,
   318  	)
   319  	require.NoError(t, err)
   320  
   321  	syncFactory, err := factories.NewSyncEngineFactory(
   322  		node.Log,
   323  		node.Metrics,
   324  		node.Net,
   325  		node.Me,
   326  		chainsync.DefaultConfig(),
   327  	)
   328  	require.NoError(t, err)
   329  
   330  	createMetrics := func(chainID flow.ChainID) module.HotstuffMetrics {
   331  		return metrics.NewNoopCollector()
   332  	}
   333  	hotstuffFactory, err := factories.NewHotStuffFactory(
   334  		node.Log,
   335  		node.Me,
   336  		node.PublicDB,
   337  		node.State,
   338  		createMetrics,
   339  		consensus.WithInitialTimeout(time.Second*2),
   340  	)
   341  	require.NoError(t, err)
   342  
   343  	factory := factories.NewEpochComponentsFactory(
   344  		node.Me,
   345  		pools,
   346  		builderFactory,
   347  		clusterStateFactory,
   348  		hotstuffFactory,
   349  		proposalFactory,
   350  		syncFactory,
   351  	)
   352  
   353  	rootQCVoter := new(mockmodule.ClusterRootQCVoter)
   354  	rootQCVoter.On("Vote", mock.Anything, mock.Anything).Return(nil)
   355  
   356  	heights := gadgets.NewHeights()
   357  	node.ProtocolEvents.AddConsumer(heights)
   358  
   359  	epochManager, err := epochmgr.New(
   360  		node.Log,
   361  		node.Me,
   362  		node.State,
   363  		pools,
   364  		rootQCVoter,
   365  		factory,
   366  		heights,
   367  	)
   368  	require.NoError(t, err)
   369  
   370  	node.ProtocolEvents.AddConsumer(epochManager)
   371  
   372  	return testmock.CollectionNode{
   373  		GenericNode:        node,
   374  		Collections:        collections,
   375  		Transactions:       transactions,
   376  		ClusterPayloads:    clusterPayloads,
   377  		IngestionEngine:    ingestionEngine,
   378  		PusherEngine:       pusherEngine,
   379  		ProviderEngine:     providerEngine,
   380  		EpochManagerEngine: epochManager,
   381  	}
   382  }
   383  
   384  func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, chainID flow.ChainID) testmock.ConsensusNode {
   385  
   386  	node := GenericNodeFromParticipants(t, hub, identity, identities, chainID)
   387  
   388  	resultsDB := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   389  	receiptsDB := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, resultsDB, storage.DefaultCacheSize)
   390  
   391  	guarantees, err := stdmap.NewGuarantees(1000)
   392  	require.NoError(t, err)
   393  
   394  	receipts := consensusMempools.NewExecutionTree()
   395  
   396  	seals := stdmap.NewIncorporatedResultSeals(1000)
   397  	pendingReceipts := stdmap.NewPendingReceipts(node.Headers, 1000)
   398  
   399  	ingestionCore := consensusingest.NewCore(node.Log, node.Tracer, node.Metrics, node.State,
   400  		node.Headers, guarantees)
   401  	// receive collections
   402  	ingestionEngine, err := consensusingest.New(node.Log, node.Metrics, node.Net, node.Me, ingestionCore)
   403  	require.Nil(t, err)
   404  
   405  	// request receipts from execution nodes
   406  	receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} })
   407  	require.Nil(t, err)
   408  
   409  	assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State)
   410  	require.Nil(t, err)
   411  
   412  	receiptValidator := validation.NewReceiptValidator(node.State, node.Headers, node.Index, resultsDB, node.Seals)
   413  
   414  	sealingEngine, err := sealing.NewEngine(
   415  		node.Log,
   416  		node.Tracer,
   417  		node.Metrics,
   418  		node.Metrics,
   419  		node.Metrics,
   420  		&tracker.NoopSealingTracker{},
   421  		node.Net,
   422  		node.Me,
   423  		node.Headers,
   424  		node.Payloads,
   425  		resultsDB,
   426  		node.Index,
   427  		node.State,
   428  		node.Seals,
   429  		assigner,
   430  		seals,
   431  		unittest.NewSealingConfigs(flow.DefaultRequiredApprovalsForSealConstruction),
   432  	)
   433  	require.NoError(t, err)
   434  
   435  	matchingConfig := matching.DefaultConfig()
   436  
   437  	matchingCore := matching.NewCore(
   438  		node.Log,
   439  		node.Tracer,
   440  		node.Metrics,
   441  		node.Metrics,
   442  		node.State,
   443  		node.Headers,
   444  		receiptsDB,
   445  		receipts,
   446  		pendingReceipts,
   447  		seals,
   448  		receiptValidator,
   449  		receiptRequester,
   450  		matchingConfig)
   451  
   452  	matchingEngine, err := matching.NewEngine(
   453  		node.Log,
   454  		node.Net,
   455  		node.Me,
   456  		node.Metrics,
   457  		node.Metrics,
   458  		node.State,
   459  		receiptsDB,
   460  		node.Index,
   461  		matchingCore,
   462  	)
   463  	require.NoError(t, err)
   464  
   465  	return testmock.ConsensusNode{
   466  		GenericNode:     node,
   467  		Guarantees:      guarantees,
   468  		Receipts:        receipts,
   469  		Seals:           seals,
   470  		IngestionEngine: ingestionEngine,
   471  		SealingEngine:   sealingEngine,
   472  		MatchingEngine:  matchingEngine,
   473  	}
   474  }
   475  
   476  func ConsensusNodes(t *testing.T, hub *stub.Hub, nNodes int, chainID flow.ChainID) []testmock.ConsensusNode {
   477  	conIdentities := unittest.IdentityListFixture(nNodes, unittest.WithRole(flow.RoleConsensus))
   478  	for _, id := range conIdentities {
   479  		t.Log(id.String())
   480  	}
   481  
   482  	// add some extra dummy identities so we have one of each role
   483  	others := unittest.IdentityListFixture(5, unittest.WithAllRolesExcept(flow.RoleConsensus))
   484  
   485  	identities := append(conIdentities, others...)
   486  
   487  	nodes := make([]testmock.ConsensusNode, 0, len(conIdentities))
   488  	for _, identity := range conIdentities {
   489  		nodes = append(nodes, ConsensusNode(t, hub, identity, identities, chainID))
   490  	}
   491  
   492  	return nodes
   493  }
   494  
   495  type CheckerMock struct {
   496  	notifications.NoopConsumer // satisfy the FinalizationConsumer interface
   497  }
   498  
   499  func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, syncThreshold int, chainID flow.ChainID) testmock.ExecutionNode {
   500  	node := GenericNodeFromParticipants(t, hub, identity, identities, chainID)
   501  
   502  	transactionsStorage := storage.NewTransactions(node.Metrics, node.PublicDB)
   503  	collectionsStorage := storage.NewCollections(node.PublicDB, transactionsStorage)
   504  	eventsStorage := storage.NewEvents(node.Metrics, node.PublicDB)
   505  	serviceEventsStorage := storage.NewServiceEvents(node.Metrics, node.PublicDB)
   506  	txResultStorage := storage.NewTransactionResults(node.Metrics, node.PublicDB, storage.DefaultCacheSize)
   507  	commitsStorage := storage.NewCommits(node.Metrics, node.PublicDB)
   508  	chunkDataPackStorage := storage.NewChunkDataPacks(node.Metrics, node.PublicDB, collectionsStorage, 100)
   509  	results := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   510  	receipts := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize)
   511  	myReceipts := storage.NewMyExecutionReceipts(node.Metrics, node.PublicDB, receipts)
   512  	checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) {
   513  		return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID())
   514  	}
   515  
   516  	protoState, ok := node.State.(*badgerstate.MutableState)
   517  	require.True(t, ok)
   518  
   519  	followerState, err := badgerstate.NewFollowerState(protoState.State, node.Index, node.Payloads, node.Tracer,
   520  		node.ProtocolEvents, blocktimer.DefaultBlockTimer)
   521  	require.NoError(t, err)
   522  
   523  	pendingBlocks := buffer.NewPendingBlocks() // for following main chain consensus
   524  
   525  	dbDir := unittest.TempDir(t)
   526  
   527  	metricsCollector := &metrics.NoopCollector{}
   528  
   529  	const (
   530  		capacity           = 100
   531  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   532  		checkpointsToKeep  = 1
   533  	)
   534  	diskWal, err := wal.NewDiskWAL(node.Log.With().Str("subcomponent", "wal").Logger(), nil, metricsCollector, dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   535  	require.NoError(t, err)
   536  
   537  	ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("compontent", "ledger").Logger(), completeLedger.DefaultPathFinderVersion)
   538  	require.NoError(t, err)
   539  
   540  	compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
   541  	require.NoError(t, err)
   542  
   543  	<-compactor.Ready() // Need to start compactor here because BootstrapLedger() updates ledger state.
   544  
   545  	genesisHead, err := node.State.Final().Head()
   546  	require.NoError(t, err)
   547  
   548  	bootstrapper := bootstrapexec.NewBootstrapper(node.Log)
   549  	commit, err := bootstrapper.BootstrapLedger(
   550  		ls,
   551  		unittest.ServiceAccountPublicKey,
   552  		node.ChainID.Chain(),
   553  		fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply))
   554  	require.NoError(t, err)
   555  
   556  	err = bootstrapper.BootstrapExecutionDatabase(node.PublicDB, commit, genesisHead)
   557  	require.NoError(t, err)
   558  
   559  	execState := executionState.NewExecutionState(
   560  		ls, commitsStorage, node.Blocks, node.Headers, collectionsStorage, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, node.PublicDB, node.Tracer,
   561  	)
   562  
   563  	requestEngine, err := requester.New(
   564  		node.Log, node.Metrics, node.Net, node.Me, node.State,
   565  		channels.RequestCollections,
   566  		filter.HasRole(flow.RoleCollection),
   567  		func() flow.Entity { return &flow.Collection{} },
   568  	)
   569  	require.NoError(t, err)
   570  
   571  	pusherEngine, err := executionprovider.New(
   572  		node.Log,
   573  		node.Tracer,
   574  		node.Net,
   575  		node.State,
   576  		execState,
   577  		metricsCollector,
   578  		checkAuthorizedAtBlock,
   579  		queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()),
   580  		executionprovider.DefaultChunkDataPackRequestWorker,
   581  		executionprovider.DefaultChunkDataPackQueryTimeout,
   582  		executionprovider.DefaultChunkDataPackDeliveryTimeout,
   583  	)
   584  	require.NoError(t, err)
   585  
   586  	blockFinder := environment.NewBlockFinder(node.Headers)
   587  
   588  	vmCtx := fvm.NewContext(
   589  		fvm.WithLogger(node.Log),
   590  		fvm.WithChain(node.ChainID.Chain()),
   591  		fvm.WithBlocks(blockFinder),
   592  	)
   593  	committer := committer.NewLedgerViewCommitter(ls, node.Tracer)
   594  
   595  	bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore())))
   596  	trackerStorage := mocktracker.NewMockStorage()
   597  
   598  	prov := exedataprovider.NewProvider(
   599  		zerolog.Nop(),
   600  		metrics.NewNoopCollector(),
   601  		execution_data.DefaultSerializer,
   602  		bservice,
   603  		trackerStorage,
   604  	)
   605  
   606  	computationEngine, err := computation.New(
   607  		node.Log,
   608  		node.Metrics,
   609  		node.Tracer,
   610  		node.Me,
   611  		node.State,
   612  		vmCtx,
   613  		committer,
   614  		prov,
   615  		computation.ComputationConfig{
   616  			DerivedDataCacheSize:     derived.DefaultDerivedDataCacheSize,
   617  			ScriptLogThreshold:       computation.DefaultScriptLogThreshold,
   618  			ScriptExecutionTimeLimit: computation.DefaultScriptExecutionTimeLimit,
   619  		},
   620  	)
   621  	require.NoError(t, err)
   622  
   623  	computation := &testmock.ComputerWrap{
   624  		Manager: computationEngine,
   625  	}
   626  
   627  	syncCore, err := chainsync.New(node.Log, chainsync.DefaultConfig(), metrics.NewChainSyncCollector())
   628  	require.NoError(t, err)
   629  
   630  	deltas, err := ingestion.NewDeltas(1000)
   631  	require.NoError(t, err)
   632  
   633  	finalizationDistributor := pubsub.NewFinalizationDistributor()
   634  
   635  	latestExecutedHeight, _, err := execState.GetHighestExecutedBlockID(context.TODO())
   636  	require.NoError(t, err)
   637  
   638  	// disabled by default
   639  	uploader := uploader.NewManager(node.Tracer)
   640  
   641  	rootHead, rootQC := getRoot(t, &node)
   642  	ingestionEngine, err := ingestion.New(
   643  		node.Log,
   644  		node.Net,
   645  		node.Me,
   646  		requestEngine,
   647  		node.State,
   648  		node.Blocks,
   649  		collectionsStorage,
   650  		eventsStorage,
   651  		serviceEventsStorage,
   652  		txResultStorage,
   653  		computation,
   654  		pusherEngine,
   655  		execState,
   656  		node.Metrics,
   657  		node.Tracer,
   658  		false,
   659  		filter.Any,
   660  		deltas,
   661  		syncThreshold,
   662  		false,
   663  		checkAuthorizedAtBlock,
   664  		nil,
   665  		uploader,
   666  		ingestion.NewStopControl(node.Log.With().Str("compontent", "stop_control").Logger(), false, latestExecutedHeight),
   667  	)
   668  	require.NoError(t, err)
   669  	requestEngine.WithHandle(ingestionEngine.OnCollection)
   670  
   671  	node.ProtocolEvents.AddConsumer(ingestionEngine)
   672  
   673  	followerCore, finalizer := createFollowerCore(t, &node, followerState, finalizationDistributor, rootHead, rootQC)
   674  
   675  	// initialize cleaner for DB
   676  	cleaner := storage.NewCleaner(node.Log, node.PublicDB, node.Metrics, flow.DefaultValueLogGCFrequency)
   677  
   678  	followerEng, err := follower.New(node.Log, node.Net, node.Me, node.Metrics, node.Metrics, cleaner,
   679  		node.Headers, node.Payloads, followerState, pendingBlocks, followerCore, syncCore, node.Tracer)
   680  	require.NoError(t, err)
   681  
   682  	finalizedHeader, err := synchronization.NewFinalizedHeaderCache(node.Log, node.State, finalizationDistributor)
   683  	require.NoError(t, err)
   684  
   685  	idCache, err := cache.NewProtocolStateIDCache(node.Log, node.State, events.NewDistributor())
   686  	require.NoError(t, err, "could not create finalized snapshot cache")
   687  	syncEngine, err := synchronization.New(
   688  		node.Log,
   689  		node.Metrics,
   690  		node.Net,
   691  		node.Me,
   692  		node.Blocks,
   693  		followerEng,
   694  		syncCore,
   695  		finalizedHeader,
   696  		id.NewIdentityFilterIdentifierProvider(
   697  			filter.And(
   698  				filter.HasRole(flow.RoleConsensus),
   699  				filter.Not(filter.HasNodeID(node.Me.NodeID())),
   700  			),
   701  			idCache,
   702  		),
   703  		synchronization.WithPollInterval(time.Duration(0)),
   704  	)
   705  	require.NoError(t, err)
   706  
   707  	return testmock.ExecutionNode{
   708  		GenericNode:         node,
   709  		MutableState:        followerState,
   710  		IngestionEngine:     ingestionEngine,
   711  		FollowerEngine:      followerEng,
   712  		SyncEngine:          syncEngine,
   713  		ExecutionEngine:     computation,
   714  		RequestEngine:       requestEngine,
   715  		ReceiptsEngine:      pusherEngine,
   716  		BadgerDB:            node.PublicDB,
   717  		VM:                  computationEngine.VM(),
   718  		ExecutionState:      execState,
   719  		Ledger:              ls,
   720  		LevelDbDir:          dbDir,
   721  		Collections:         collectionsStorage,
   722  		Finalizer:           finalizer,
   723  		MyExecutionReceipts: myReceipts,
   724  		Compactor:           compactor,
   725  	}
   726  }
   727  
   728  func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.QuorumCertificate) {
   729  	rootHead, err := node.State.Params().Root()
   730  	require.NoError(t, err)
   731  
   732  	signers, err := node.State.AtHeight(0).Identities(filter.HasRole(flow.RoleConsensus))
   733  	require.NoError(t, err)
   734  
   735  	signerIDs := signers.NodeIDs()
   736  	signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs)
   737  	require.NoError(t, err)
   738  
   739  	rootQC := &flow.QuorumCertificate{
   740  		View:          rootHead.View,
   741  		BlockID:       rootHead.ID(),
   742  		SignerIndices: signerIndices,
   743  		SigData:       unittest.SignatureFixture(),
   744  	}
   745  
   746  	return rootHead, rootQC
   747  }
   748  
   749  type RoundRobinLeaderSelection struct {
   750  	identities flow.IdentityList
   751  	me         flow.Identifier
   752  }
   753  
   754  func (s *RoundRobinLeaderSelection) Identities(blockID flow.Identifier) (flow.IdentityList, error) {
   755  	return s.identities, nil
   756  }
   757  
   758  func (s *RoundRobinLeaderSelection) Identity(blockID flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) {
   759  	id, found := s.identities.ByNodeID(participantID)
   760  	if !found {
   761  		return nil, fmt.Errorf("not found")
   762  	}
   763  	return id, nil
   764  }
   765  
   766  func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, error) {
   767  	return s.identities[int(view)%len(s.identities)].NodeID, nil
   768  }
   769  
   770  func (s *RoundRobinLeaderSelection) Self() flow.Identifier {
   771  	return s.me
   772  }
   773  
   774  func (s *RoundRobinLeaderSelection) DKG(blockID flow.Identifier) (hotstuff.DKG, error) {
   775  	return nil, fmt.Errorf("error")
   776  }
   777  
   778  func createFollowerCore(t *testing.T, node *testmock.GenericNode, followerState *badgerstate.FollowerState, notifier hotstuff.FinalizationConsumer,
   779  	rootHead *flow.Header, rootQC *flow.QuorumCertificate) (module.HotStuffFollower, *confinalizer.Finalizer) {
   780  
   781  	identities, err := node.State.AtHeight(0).Identities(filter.HasRole(flow.RoleConsensus))
   782  	require.NoError(t, err)
   783  
   784  	committee := &RoundRobinLeaderSelection{
   785  		identities: identities,
   786  		me:         node.Me.NodeID(),
   787  	}
   788  
   789  	// mock finalization updater
   790  	verifier := &mockhotstuff.Verifier{}
   791  	verifier.On("VerifyVote", mock.Anything, mock.Anything, mock.Anything).Return(nil)
   792  	verifier.On("VerifyQC", mock.Anything, mock.Anything, mock.Anything).Return(nil)
   793  
   794  	finalizer := confinalizer.NewFinalizer(node.PublicDB, node.Headers, followerState, trace.NewNoopTracer())
   795  
   796  	pending := make([]*flow.Header, 0)
   797  
   798  	// creates a consensus follower with noop consumer as the notifier
   799  	followerCore, err := consensus.NewFollower(
   800  		node.Log,
   801  		committee,
   802  		node.Headers,
   803  		finalizer,
   804  		verifier,
   805  		notifier,
   806  		rootHead,
   807  		rootQC,
   808  		rootHead,
   809  		pending,
   810  	)
   811  	require.NoError(t, err)
   812  	return followerCore, finalizer
   813  }
   814  
   815  type VerificationOpt func(*testmock.VerificationNode)
   816  
   817  func WithChunkConsumer(chunkConsumer *chunkconsumer.ChunkConsumer) VerificationOpt {
   818  	return func(node *testmock.VerificationNode) {
   819  		node.ChunkConsumer = chunkConsumer
   820  	}
   821  }
   822  
   823  func WithGenericNode(genericNode *testmock.GenericNode) VerificationOpt {
   824  	return func(node *testmock.VerificationNode) {
   825  		node.GenericNode = genericNode
   826  	}
   827  }
   828  
   829  // VerificationNode creates a verification node with all functional engines and actual modules for purpose of
   830  // (integration) testing.
   831  func VerificationNode(t testing.TB,
   832  	hub *stub.Hub,
   833  	verIdentity *flow.Identity,     // identity of this verification node.
   834  	participants flow.IdentityList, // identity of all nodes in system including this verification node.
   835  	assigner module.ChunkAssigner,
   836  	chunksLimit uint,
   837  	chainID flow.ChainID,
   838  	collector module.VerificationMetrics,   // used to enable collecting metrics on happy path integration
   839  	mempoolCollector module.MempoolMetrics, // used to enable collecting metrics on happy path integration
   840  	opts ...VerificationOpt) testmock.VerificationNode {
   841  
   842  	var err error
   843  	var node testmock.VerificationNode
   844  
   845  	for _, apply := range opts {
   846  		apply(&node)
   847  	}
   848  
   849  	if node.GenericNode == nil {
   850  		gn := GenericNodeFromParticipants(t, hub, verIdentity, participants, chainID)
   851  		node.GenericNode = &gn
   852  	}
   853  
   854  	if node.ChunkStatuses == nil {
   855  		node.ChunkStatuses = stdmap.NewChunkStatuses(chunksLimit)
   856  		err = mempoolCollector.Register(metrics.ResourceChunkStatus, node.ChunkStatuses.Size)
   857  		require.Nil(t, err)
   858  	}
   859  
   860  	if node.ChunkRequests == nil {
   861  		node.ChunkRequests = stdmap.NewChunkRequests(chunksLimit)
   862  		err = mempoolCollector.Register(metrics.ResourceChunkRequest, node.ChunkRequests.Size)
   863  		require.NoError(t, err)
   864  	}
   865  
   866  	if node.Results == nil {
   867  		results := storage.NewExecutionResults(node.Metrics, node.PublicDB)
   868  		node.Results = results
   869  		node.Receipts = storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize)
   870  	}
   871  
   872  	if node.ProcessedChunkIndex == nil {
   873  		node.ProcessedChunkIndex = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationChunkIndex)
   874  	}
   875  
   876  	if node.ChunksQueue == nil {
   877  		node.ChunksQueue = storage.NewChunkQueue(node.PublicDB)
   878  		ok, err := node.ChunksQueue.Init(chunkconsumer.DefaultJobIndex)
   879  		require.NoError(t, err)
   880  		require.True(t, ok)
   881  	}
   882  
   883  	if node.ProcessedBlockHeight == nil {
   884  		node.ProcessedBlockHeight = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationBlockHeight)
   885  	}
   886  
   887  	if node.VerifierEngine == nil {
   888  		vm := fvm.NewVirtualMachine()
   889  
   890  		blockFinder := environment.NewBlockFinder(node.Headers)
   891  
   892  		vmCtx := fvm.NewContext(
   893  			fvm.WithLogger(node.Log),
   894  			fvm.WithChain(node.ChainID.Chain()),
   895  			fvm.WithBlocks(blockFinder),
   896  		)
   897  
   898  		chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Log)
   899  
   900  		approvalStorage := storage.NewResultApprovals(node.Metrics, node.PublicDB)
   901  
   902  		node.VerifierEngine, err = verifier.New(node.Log,
   903  			collector,
   904  			node.Tracer,
   905  			node.Net,
   906  			node.State,
   907  			node.Me,
   908  			chunkVerifier,
   909  			approvalStorage)
   910  		require.Nil(t, err)
   911  	}
   912  
   913  	if node.RequesterEngine == nil {
   914  		node.RequesterEngine, err = vereq.New(node.Log,
   915  			node.State,
   916  			node.Net,
   917  			node.Tracer,
   918  			collector,
   919  			node.ChunkRequests,
   920  			vereq.DefaultRequestInterval,
   921  			// requests are only qualified if their retryAfter is elapsed.
   922  			vereq.RetryAfterQualifier,
   923  			// exponential backoff with multiplier of 2, minimum interval of a second, and
   924  			// maximum interval of an hour.
   925  			mempool.ExponentialUpdater(
   926  				vereq.DefaultBackoffMultiplier,
   927  				vereq.DefaultBackoffMaxInterval,
   928  				vereq.DefaultBackoffMinInterval),
   929  			vereq.DefaultRequestTargets)
   930  
   931  		require.NoError(t, err)
   932  	}
   933  
   934  	if node.FetcherEngine == nil {
   935  		node.FetcherEngine = fetcher.New(node.Log,
   936  			collector,
   937  			node.Tracer,
   938  			node.VerifierEngine,
   939  			node.State,
   940  			node.ChunkStatuses,
   941  			node.Headers,
   942  			node.Blocks,
   943  			node.Results,
   944  			node.Receipts,
   945  			node.RequesterEngine,
   946  			0,
   947  		)
   948  	}
   949  
   950  	if node.ChunkConsumer == nil {
   951  		node.ChunkConsumer = chunkconsumer.NewChunkConsumer(node.Log,
   952  			collector,
   953  			node.ProcessedChunkIndex,
   954  			node.ChunksQueue,
   955  			node.FetcherEngine,
   956  			chunkconsumer.DefaultChunkWorkers) // defaults number of workers to 3.
   957  		err = mempoolCollector.Register(metrics.ResourceChunkConsumer, node.ChunkConsumer.Size)
   958  		require.NoError(t, err)
   959  	}
   960  
   961  	if node.AssignerEngine == nil {
   962  		node.AssignerEngine = verificationassigner.New(node.Log,
   963  			collector,
   964  			node.Tracer,
   965  			node.Me,
   966  			node.State,
   967  			assigner,
   968  			node.ChunksQueue,
   969  			node.ChunkConsumer,
   970  			0)
   971  	}
   972  
   973  	if node.BlockConsumer == nil {
   974  		node.BlockConsumer, _, err = blockconsumer.NewBlockConsumer(node.Log,
   975  			collector,
   976  			node.ProcessedBlockHeight,
   977  			node.Blocks,
   978  			node.State,
   979  			node.AssignerEngine,
   980  			blockconsumer.DefaultBlockWorkers)
   981  		require.NoError(t, err)
   982  
   983  		err = mempoolCollector.Register(metrics.ResourceBlockConsumer, node.BlockConsumer.Size)
   984  		require.NoError(t, err)
   985  	}
   986  
   987  	return node
   988  }