github.com/koko1123/flow-go-1@v0.29.6/engine/verification/utils/unittest/helper.go (about)

     1  package verificationtest
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/rs/zerolog"
    11  	"github.com/rs/zerolog/log"
    12  	"github.com/stretchr/testify/assert"
    13  	testifymock "github.com/stretchr/testify/mock"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/koko1123/flow-go-1/consensus/hotstuff/model"
    17  	"github.com/koko1123/flow-go-1/engine/testutil"
    18  	enginemock "github.com/koko1123/flow-go-1/engine/testutil/mock"
    19  	"github.com/koko1123/flow-go-1/engine/verification/assigner/blockconsumer"
    20  	"github.com/koko1123/flow-go-1/model/chunks"
    21  	"github.com/koko1123/flow-go-1/model/flow"
    22  	"github.com/koko1123/flow-go-1/model/flow/filter"
    23  	"github.com/koko1123/flow-go-1/model/messages"
    24  	"github.com/koko1123/flow-go-1/module"
    25  	"github.com/koko1123/flow-go-1/module/metrics"
    26  	"github.com/koko1123/flow-go-1/module/mock"
    27  	msig "github.com/koko1123/flow-go-1/module/signature"
    28  	"github.com/koko1123/flow-go-1/module/trace"
    29  	"github.com/koko1123/flow-go-1/network"
    30  	"github.com/koko1123/flow-go-1/network/channels"
    31  	"github.com/koko1123/flow-go-1/network/mocknetwork"
    32  	"github.com/koko1123/flow-go-1/network/stub"
    33  	"github.com/koko1123/flow-go-1/state/protocol"
    34  	mockprotocol "github.com/koko1123/flow-go-1/state/protocol/mock"
    35  	"github.com/koko1123/flow-go-1/utils/logging"
    36  	"github.com/koko1123/flow-go-1/utils/unittest"
    37  	"github.com/onflow/flow-go/crypto"
    38  )
    39  
    40  // MockChunkDataProviderFunc is a test helper function encapsulating the logic of whether to reply a chunk data pack request.
    41  type MockChunkDataProviderFunc func(*testing.T, CompleteExecutionReceiptList, flow.Identifier, flow.Identifier, network.Conduit) bool
    42  
    43  // SetupChunkDataPackProvider creates and returns an execution node that only has a chunk data pack provider engine.
    44  //
    45  // The mock chunk provider engine replies the chunk back requests by invoking the injected provider method. All chunk data pack
    46  // requests should come from a verification node, and should has one of the assigned chunk IDs. Otherwise, it fails the test.
    47  func SetupChunkDataPackProvider(t *testing.T,
    48  	hub *stub.Hub,
    49  	exeIdentity *flow.Identity,
    50  	participants flow.IdentityList,
    51  	chainID flow.ChainID,
    52  	completeERs CompleteExecutionReceiptList,
    53  	assignedChunkIDs flow.IdentifierList,
    54  	provider MockChunkDataProviderFunc) (*enginemock.GenericNode,
    55  	*mocknetwork.Engine, *sync.WaitGroup) {
    56  
    57  	exeNode := testutil.GenericNodeFromParticipants(t, hub, exeIdentity, participants, chainID)
    58  	exeEngine := new(mocknetwork.Engine)
    59  
    60  	exeChunkDataConduit, err := exeNode.Net.Register(channels.ProvideChunks, exeEngine)
    61  	assert.Nil(t, err)
    62  
    63  	replied := make(map[flow.Identifier]struct{})
    64  
    65  	wg := &sync.WaitGroup{}
    66  	wg.Add(len(assignedChunkIDs))
    67  
    68  	mu := &sync.Mutex{} // making testify Run thread-safe
    69  
    70  	exeEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything).
    71  		Run(func(args testifymock.Arguments) {
    72  			mu.Lock()
    73  			defer mu.Unlock()
    74  
    75  			originID, ok := args[1].(flow.Identifier)
    76  			require.True(t, ok)
    77  			// request should be dispatched by a verification node.
    78  			require.Contains(t, participants.Filter(filter.HasRole(flow.RoleVerification)).NodeIDs(), originID)
    79  
    80  			req, ok := args[2].(*messages.ChunkDataRequest)
    81  			require.True(t, ok)
    82  			require.Contains(t, assignedChunkIDs, req.ChunkID) // only assigned chunks should be requested.
    83  
    84  			shouldReply := provider(t, completeERs, req.ChunkID, originID, exeChunkDataConduit)
    85  			_, alreadyReplied := replied[req.ChunkID]
    86  			if shouldReply && !alreadyReplied {
    87  				/*
    88  					the wait group keeps track of unique chunk requests addressed.
    89  					we make it done only upon the first successful request of a chunk.
    90  				*/
    91  				wg.Done()
    92  				replied[req.ChunkID] = struct{}{}
    93  			}
    94  		}).Return(nil)
    95  
    96  	return &exeNode, exeEngine, wg
    97  }
    98  
    99  // RespondChunkDataPackRequestImmediately immediately qualifies a chunk data request for reply by chunk data provider.
   100  func RespondChunkDataPackRequestImmediately(t *testing.T,
   101  	completeERs CompleteExecutionReceiptList,
   102  	chunkID flow.Identifier,
   103  	verID flow.Identifier,
   104  	con network.Conduit) bool {
   105  
   106  	// finds the chunk data pack of the requested chunk and sends it back.
   107  	res := completeERs.ChunkDataResponseOf(t, chunkID)
   108  
   109  	err := con.Unicast(res, verID)
   110  	assert.Nil(t, err)
   111  
   112  	log.Debug().
   113  		Hex("origin_id", logging.ID(verID)).
   114  		Hex("chunk_id", logging.ID(chunkID)).
   115  		Msg("chunk data pack request answered by provider")
   116  
   117  	return true
   118  }
   119  
   120  // RespondChunkDataPackRequestAfterNTrials only qualifies a chunk data request for reply by chunk data provider after n times.
   121  func RespondChunkDataPackRequestAfterNTrials(n int) MockChunkDataProviderFunc {
   122  	tryCount := make(map[flow.Identifier]int)
   123  
   124  	return func(t *testing.T, completeERs CompleteExecutionReceiptList, chunkID flow.Identifier, verID flow.Identifier, con network.Conduit) bool {
   125  		tryCount[chunkID]++
   126  
   127  		if tryCount[chunkID] >= n {
   128  			// finds the chunk data pack of the requested chunk and sends it back.
   129  			res := completeERs.ChunkDataResponseOf(t, chunkID)
   130  
   131  			err := con.Unicast(res, verID)
   132  			assert.Nil(t, err)
   133  
   134  			log.Debug().
   135  				Hex("origin_id", logging.ID(verID)).
   136  				Hex("chunk_id", logging.ID(chunkID)).
   137  				Int("trial_time", tryCount[chunkID]).
   138  				Msg("chunk data pack request answered by provider")
   139  
   140  			return true
   141  		}
   142  
   143  		return false
   144  	}
   145  }
   146  
   147  // SetupMockConsensusNode creates and returns a mock consensus node (conIdentity) and its registered engine in the
   148  // network (hub). It mocks the process method of the consensus engine to receive a message from a certain
   149  // verification node (verIdentity) evaluates whether it is a result approval about an assigned chunk to that verifier node.
   150  func SetupMockConsensusNode(t *testing.T,
   151  	log zerolog.Logger,
   152  	hub *stub.Hub,
   153  	conIdentity *flow.Identity,
   154  	verIdentities flow.IdentityList,
   155  	othersIdentity flow.IdentityList,
   156  	completeERs CompleteExecutionReceiptList,
   157  	chainID flow.ChainID,
   158  	assignedChunkIDs flow.IdentifierList) (*enginemock.GenericNode, *mocknetwork.Engine, *sync.WaitGroup) {
   159  
   160  	lg := log.With().Str("role", "mock-consensus").Logger()
   161  
   162  	wg := &sync.WaitGroup{}
   163  	// each verification node is assigned to issue one result approval per assigned chunk.
   164  	// and there are `len(verIdentities)`-many verification nodes
   165  	// so there is a total of len(verIdentities) * len*(assignedChunkIDs) expected
   166  	// result approvals.
   167  	wg.Add(len(verIdentities) * len(assignedChunkIDs))
   168  
   169  	// mock the consensus node with a generic node and mocked engine to assert
   170  	// that the result approval is broadcast
   171  	conNode := testutil.GenericNodeFromParticipants(t, hub, conIdentity, othersIdentity, chainID)
   172  	conEngine := new(mocknetwork.Engine)
   173  
   174  	// map form verIds --> result approval ID
   175  	resultApprovalSeen := make(map[flow.Identifier]map[flow.Identifier]struct{})
   176  	for _, verIdentity := range verIdentities {
   177  		resultApprovalSeen[verIdentity.NodeID] = make(map[flow.Identifier]struct{})
   178  	}
   179  
   180  	// creates a hasher for spock
   181  	hasher := msig.NewBLSHasher(msig.SPOCKTag)
   182  	mu := &sync.Mutex{} // making testify mock thread-safe
   183  
   184  	conEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything).
   185  		Run(func(args testifymock.Arguments) {
   186  			mu.Lock()
   187  			defer mu.Unlock()
   188  
   189  			originID, ok := args[1].(flow.Identifier)
   190  			assert.True(t, ok)
   191  
   192  			resultApproval, ok := args[2].(*flow.ResultApproval)
   193  			assert.True(t, ok)
   194  
   195  			lg.Debug().
   196  				Hex("result_approval_id", logging.ID(resultApproval.ID())).
   197  				Msg("result approval received")
   198  
   199  			// asserts that result approval has not been seen from this
   200  			_, ok = resultApprovalSeen[originID][resultApproval.ID()]
   201  			assert.False(t, ok)
   202  
   203  			// marks result approval as seen
   204  			resultApprovalSeen[originID][resultApproval.ID()] = struct{}{}
   205  
   206  			// result approval should belong to an assigned chunk to the verification node.
   207  			chunk := completeERs.ChunkOf(t, resultApproval.Body.ExecutionResultID, resultApproval.Body.ChunkIndex)
   208  			assert.Contains(t, assignedChunkIDs, chunk.ID())
   209  
   210  			// verifies SPoCK proof of result approval
   211  			// against the SPoCK secret of the execution result
   212  			//
   213  			// retrieves public key of verification node
   214  			var pk crypto.PublicKey
   215  			found := false
   216  			for _, identity := range verIdentities {
   217  				if originID == identity.NodeID {
   218  					pk = identity.StakingPubKey
   219  					found = true
   220  				}
   221  			}
   222  			require.True(t, found)
   223  
   224  			// verifies spocks
   225  			valid, err := crypto.SPOCKVerifyAgainstData(
   226  				pk,
   227  				resultApproval.Body.Spock,
   228  				completeERs.ReceiptDataOf(t, chunk.ID()).SpockSecrets[resultApproval.Body.ChunkIndex],
   229  				hasher,
   230  			)
   231  			assert.NoError(t, err)
   232  
   233  			if !valid {
   234  				// When chunk verifier returns chunk fault, a placeholder
   235  				// signature is generated for that chunk.
   236  				isChunkFaultSignature, err := crypto.SPOCKVerifyAgainstData(
   237  					pk,
   238  					resultApproval.Body.Spock,
   239  					nil, // chunk fault has no spock secret
   240  					hasher,
   241  				)
   242  				assert.NoError(t, err)
   243  
   244  				if isChunkFaultSignature {
   245  					assert.Fail(t, "chunk verifier returned chunk fault")
   246  				} else {
   247  					assert.Fail(t, "spock secret mismatch")
   248  				}
   249  			}
   250  
   251  			wg.Done()
   252  		}).Return(nil)
   253  
   254  	_, err := conNode.Net.Register(channels.ReceiveApprovals, conEngine)
   255  	assert.Nil(t, err)
   256  
   257  	return &conNode, conEngine, wg
   258  }
   259  
   260  // isSystemChunk returns true if the index corresponds to the system chunk, i.e., last chunk in
   261  // the receipt.
   262  func isSystemChunk(index uint64, chunkNum int) bool {
   263  	return int(index) == chunkNum-1
   264  }
   265  
   266  func CreateExecutionResult(blockID flow.Identifier, options ...func(result *flow.ExecutionResult, assignments *chunks.Assignment)) (*flow.ExecutionResult, *chunks.Assignment) {
   267  	result := &flow.ExecutionResult{
   268  		BlockID: blockID,
   269  		Chunks:  flow.ChunkList{},
   270  	}
   271  	assignments := chunks.NewAssignment()
   272  
   273  	for _, option := range options {
   274  		option(result, assignments)
   275  	}
   276  	return result, assignments
   277  }
   278  
   279  func WithChunks(setAssignees ...func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk) func(*flow.ExecutionResult, *chunks.Assignment) {
   280  	return func(result *flow.ExecutionResult, assignment *chunks.Assignment) {
   281  		for i, setAssignee := range setAssignees {
   282  			chunk := setAssignee(result.BlockID, uint64(i), assignment)
   283  			result.Chunks.Insert(chunk)
   284  		}
   285  	}
   286  }
   287  
   288  func ChunkWithIndex(blockID flow.Identifier, index int) *flow.Chunk {
   289  	chunk := &flow.Chunk{
   290  		Index: uint64(index),
   291  		ChunkBody: flow.ChunkBody{
   292  			CollectionIndex: uint(index),
   293  			EventCollection: blockID, // ensure chunks from different blocks with the same index will have different chunk ID
   294  			BlockID:         blockID,
   295  		},
   296  		EndState: unittest.StateCommitmentFixture(),
   297  	}
   298  	return chunk
   299  }
   300  
   301  func WithAssignee(assignee flow.Identifier) func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk {
   302  	return func(blockID flow.Identifier, index uint64, assignment *chunks.Assignment) *flow.Chunk {
   303  		chunk := ChunkWithIndex(blockID, int(index))
   304  		fmt.Printf("with assignee: %v, chunk id: %v\n", index, chunk.ID())
   305  		assignment.Add(chunk, flow.IdentifierList{assignee})
   306  		return chunk
   307  	}
   308  }
   309  
   310  func FromChunkID(chunkID flow.Identifier) flow.ChunkDataPack {
   311  	return flow.ChunkDataPack{
   312  		ChunkID: chunkID,
   313  	}
   314  }
   315  
   316  type ChunkAssignerFunc func(chunkIndex uint64, chunks int) bool
   317  
   318  // MockChunkAssignmentFixture is a test helper that mocks a chunk assigner for a set of verification nodes for the
   319  // execution results in the given complete execution receipts, and based on the given chunk assigner function.
   320  //
   321  // It returns the list of chunk locator ids assigned to the input verification nodes, as well as the list of their chunk IDs.
   322  // All verification nodes are assigned the same chunks.
   323  func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner,
   324  	verIds flow.IdentityList,
   325  	completeERs CompleteExecutionReceiptList,
   326  	isAssigned ChunkAssignerFunc) (flow.IdentifierList, flow.IdentifierList) {
   327  
   328  	expectedLocatorIds := flow.IdentifierList{}
   329  	expectedChunkIds := flow.IdentifierList{}
   330  
   331  	// keeps track of duplicate results (receipts that share same result)
   332  	visited := make(map[flow.Identifier]struct{})
   333  
   334  	for _, completeER := range completeERs {
   335  		for _, receipt := range completeER.Receipts {
   336  			a := chunks.NewAssignment()
   337  
   338  			_, duplicate := visited[receipt.ExecutionResult.ID()]
   339  			if duplicate {
   340  				// skips mocking chunk assignment for duplicate results
   341  				continue
   342  			}
   343  
   344  			for _, chunk := range receipt.ExecutionResult.Chunks {
   345  				if isAssigned(chunk.Index, len(receipt.ExecutionResult.Chunks)) {
   346  					locatorID := chunks.Locator{
   347  						ResultID: receipt.ExecutionResult.ID(),
   348  						Index:    chunk.Index,
   349  					}.ID()
   350  					expectedLocatorIds = append(expectedLocatorIds, locatorID)
   351  					expectedChunkIds = append(expectedChunkIds, chunk.ID())
   352  					a.Add(chunk, verIds.NodeIDs())
   353  				}
   354  
   355  			}
   356  
   357  			chunkAssigner.On("Assign", &receipt.ExecutionResult, completeER.ContainerBlock.ID()).Return(a, nil)
   358  			visited[receipt.ExecutionResult.ID()] = struct{}{}
   359  		}
   360  	}
   361  
   362  	return expectedLocatorIds, expectedChunkIds
   363  }
   364  
   365  // EvenChunkIndexAssigner is a helper function that returns true for the even indices in [0, chunkNum-1]
   366  // It also returns true if the index corresponds to the system chunk.
   367  func EvenChunkIndexAssigner(index uint64, chunkNum int) bool {
   368  	ok := index%2 == 0 || isSystemChunk(index, chunkNum)
   369  	return ok
   370  }
   371  
   372  // ExtendStateWithFinalizedBlocks is a test helper to extend the execution state and return the list of blocks.
   373  // It receives a list of complete execution receipt fixtures in the form of (R1,1 <- R1,2 <- ... <- C1) <- (R2,1 <- R2,2 <- ... <- C2) <- .....
   374  // Where R and C are the reference and container blocks.
   375  // Reference blocks contain guarantees, and container blocks contain execution receipt for their preceding reference blocks,
   376  // e.g., C1 contains receipts for R1,1, R1,2, etc.
   377  // Note: for sake of simplicity we do not include guarantees in the container blocks for now.
   378  func ExtendStateWithFinalizedBlocks(t *testing.T, completeExecutionReceipts CompleteExecutionReceiptList,
   379  	state protocol.MutableState) []*flow.Block {
   380  	blocks := make([]*flow.Block, 0)
   381  
   382  	// tracks of duplicate reference blocks
   383  	// since receipts may share the same execution result, hence
   384  	// their reference block is the same (and we should not extend for it).
   385  	duplicate := make(map[flow.Identifier]struct{})
   386  
   387  	// extends protocol state with the chain of blocks.
   388  	for _, completeER := range completeExecutionReceipts {
   389  		// extends state with reference blocks of the receipts
   390  		for _, receipt := range completeER.ReceiptsData {
   391  			refBlockID := receipt.ReferenceBlock.ID()
   392  			_, dup := duplicate[refBlockID]
   393  			if dup {
   394  				// skips extending state with already duplicate reference block
   395  				continue
   396  			}
   397  
   398  			err := state.Extend(context.Background(), receipt.ReferenceBlock)
   399  			require.NoError(t, err, fmt.Errorf("can not extend block %v: %w", receipt.ReferenceBlock.ID(), err))
   400  			err = state.Finalize(context.Background(), refBlockID)
   401  			require.NoError(t, err)
   402  			blocks = append(blocks, receipt.ReferenceBlock)
   403  			duplicate[refBlockID] = struct{}{}
   404  		}
   405  
   406  		// extends state with container block of receipt.
   407  		containerBlockID := completeER.ContainerBlock.ID()
   408  		_, dup := duplicate[containerBlockID]
   409  		if dup {
   410  			// skips extending state with already duplicate container block
   411  			continue
   412  		}
   413  		err := state.Extend(context.Background(), completeER.ContainerBlock)
   414  		require.NoError(t, err)
   415  		err = state.Finalize(context.Background(), containerBlockID)
   416  		require.NoError(t, err)
   417  		blocks = append(blocks, completeER.ContainerBlock)
   418  		duplicate[containerBlockID] = struct{}{}
   419  	}
   420  
   421  	return blocks
   422  }
   423  
   424  // MockLastSealedHeight mocks the protocol state for the specified last sealed height.
   425  func MockLastSealedHeight(state *mockprotocol.State, height uint64) {
   426  	snapshot := &mockprotocol.Snapshot{}
   427  	header := unittest.BlockHeaderFixture()
   428  	header.Height = height
   429  	state.On("Sealed").Return(snapshot)
   430  	snapshot.On("Head").Return(header, nil)
   431  }
   432  
   433  func NewVerificationHappyPathTest(t *testing.T,
   434  	authorized bool,
   435  	blockCount int,
   436  	eventRepetition int,
   437  	verCollector module.VerificationMetrics,
   438  	mempoolCollector module.MempoolMetrics,
   439  	retry int,
   440  	ops ...CompleteExecutionReceiptBuilderOpt) {
   441  
   442  	withConsumers(t, authorized, blockCount, verCollector, mempoolCollector, RespondChunkDataPackRequestAfterNTrials(retry), func(
   443  		blockConsumer *blockconsumer.BlockConsumer,
   444  		blocks []*flow.Block,
   445  		resultApprovalsWG *sync.WaitGroup,
   446  		chunkDataRequestWG *sync.WaitGroup) {
   447  
   448  		for i := 0; i < len(blocks)*eventRepetition; i++ {
   449  			// consumer is only required to be "notified" that a new finalized block available.
   450  			// It keeps track of the last finalized block it has read, and read the next height upon
   451  			// getting notified as follows:
   452  			blockConsumer.OnFinalizedBlock(&model.Block{})
   453  		}
   454  
   455  		unittest.RequireReturnsBefore(t, chunkDataRequestWG.Wait, time.Duration(10*retry*blockCount)*time.Second,
   456  			"could not receive chunk data requests on time")
   457  		unittest.RequireReturnsBefore(t, resultApprovalsWG.Wait, time.Duration(2*retry*blockCount)*time.Second,
   458  			"could not receive result approvals on time")
   459  
   460  	}, ops...)
   461  }
   462  
   463  // withConsumers is a test helper that sets up the following pipeline:
   464  // block reader -> block consumer (3 workers) -> assigner engine -> chunks queue -> chunks consumer (3 workers) -> mock chunk processor
   465  //
   466  // The block consumer operates on a block reader with a chain of specified number of finalized blocks
   467  // ready to read.
   468  func withConsumers(t *testing.T,
   469  	authorized bool,
   470  	blockCount int,
   471  	verCollector module.VerificationMetrics, // verification metrics collector
   472  	mempoolCollector module.MempoolMetrics, // memory pool metrics collector
   473  	providerFunc MockChunkDataProviderFunc,
   474  	withBlockConsumer func(*blockconsumer.BlockConsumer, []*flow.Block, *sync.WaitGroup, *sync.WaitGroup),
   475  	ops ...CompleteExecutionReceiptBuilderOpt) {
   476  
   477  	tracer := trace.NewNoopTracer()
   478  
   479  	// bootstraps system with one node of each role.
   480  	s, verID, participants := bootstrapSystem(t, tracer, authorized)
   481  	exeID := participants.Filter(filter.HasRole(flow.RoleExecution))[0]
   482  	conID := participants.Filter(filter.HasRole(flow.RoleConsensus))[0]
   483  	// generates a chain of blocks in the form of root <- R1 <- C1 <- R2 <- C2 <- ... where Rs are distinct reference
   484  	// blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block,
   485  	// Container blocks only contain receipts of their preceding reference blocks. But they do not
   486  	// hold any guarantees.
   487  	root, err := s.State.Final().Head()
   488  	require.NoError(t, err)
   489  	chainID := root.ChainID
   490  	ops = append(ops, WithExecutorIDs(
   491  		participants.Filter(filter.HasRole(flow.RoleExecution)).NodeIDs()), func(builder *CompleteExecutionReceiptBuilder) {
   492  		// needed for the guarantees to have the correct chainID and signer indices
   493  		builder.clusterCommittee = participants.Filter(filter.HasRole(flow.RoleCollection))
   494  	})
   495  
   496  	completeERs := CompleteExecutionReceiptChainFixture(t, root, blockCount, ops...)
   497  	blocks := ExtendStateWithFinalizedBlocks(t, completeERs, s.State)
   498  
   499  	// chunk assignment
   500  	chunkAssigner := &mock.ChunkAssigner{}
   501  	assignedChunkIDs := flow.IdentifierList{}
   502  	if authorized {
   503  		// only authorized verification node has some chunks assigned to it.
   504  		_, assignedChunkIDs = MockChunkAssignmentFixture(chunkAssigner,
   505  			flow.IdentityList{verID},
   506  			completeERs,
   507  			EvenChunkIndexAssigner)
   508  	}
   509  
   510  	hub := stub.NewNetworkHub()
   511  	collector := &metrics.NoopCollector{}
   512  	chunksLimit := 100
   513  	genericNode := testutil.GenericNodeWithStateFixture(t,
   514  		s,
   515  		hub,
   516  		verID,
   517  		unittest.Logger().With().Str("role", "verification").Logger(),
   518  		collector,
   519  		tracer,
   520  		chainID)
   521  
   522  	// execution node
   523  	exeNode, exeEngine, exeWG := SetupChunkDataPackProvider(t,
   524  		hub,
   525  		exeID,
   526  		participants,
   527  		chainID,
   528  		completeERs,
   529  		assignedChunkIDs,
   530  		providerFunc)
   531  
   532  	// consensus node
   533  	conNode, conEngine, conWG := SetupMockConsensusNode(t,
   534  		unittest.Logger(),
   535  		hub,
   536  		conID,
   537  		flow.IdentityList{verID},
   538  		participants,
   539  		completeERs,
   540  		chainID,
   541  		assignedChunkIDs)
   542  
   543  	verNode := testutil.VerificationNode(t,
   544  		hub,
   545  		verID,
   546  		participants,
   547  		chunkAssigner,
   548  		uint(chunksLimit),
   549  		chainID,
   550  		verCollector,
   551  		mempoolCollector,
   552  		testutil.WithGenericNode(&genericNode))
   553  
   554  	// turns on components and network
   555  	verNet, ok := hub.GetNetwork(verID.NodeID)
   556  	require.True(t, ok)
   557  	unittest.RequireReturnsBefore(t, func() {
   558  		verNet.StartConDev(100*time.Millisecond, true)
   559  	}, 100*time.Millisecond, "failed to start verification network")
   560  
   561  	unittest.RequireComponentsReadyBefore(t, 1*time.Second,
   562  		verNode.BlockConsumer,
   563  		verNode.ChunkConsumer,
   564  		verNode.AssignerEngine,
   565  		verNode.FetcherEngine,
   566  		verNode.RequesterEngine,
   567  		verNode.VerifierEngine)
   568  
   569  	// plays test scenario
   570  	withBlockConsumer(verNode.BlockConsumer, blocks, conWG, exeWG)
   571  
   572  	// tears down engines and nodes
   573  	unittest.RequireReturnsBefore(t, verNet.StopConDev, 100*time.Millisecond, "failed to stop verification network")
   574  	unittest.RequireComponentsDoneBefore(t, 100*time.Millisecond,
   575  		verNode.BlockConsumer,
   576  		verNode.ChunkConsumer,
   577  		verNode.AssignerEngine,
   578  		verNode.FetcherEngine,
   579  		verNode.RequesterEngine,
   580  		verNode.VerifierEngine)
   581  
   582  	enginemock.RequireGenericNodesDoneBefore(t, 1*time.Second,
   583  		conNode,
   584  		exeNode)
   585  
   586  	if !authorized {
   587  		// in unauthorized mode, no message should be received by consensus and execution node.
   588  		conEngine.AssertNotCalled(t, "Process")
   589  		exeEngine.AssertNotCalled(t, "Process")
   590  	}
   591  
   592  	// verifies memory resources are cleaned up all over pipeline
   593  	assert.True(t, verNode.BlockConsumer.Size() == 0)
   594  	assert.True(t, verNode.ChunkConsumer.Size() == 0)
   595  	assert.True(t, verNode.ChunkStatuses.Size() == 0)
   596  	assert.True(t, verNode.ChunkRequests.Size() == 0)
   597  }
   598  
   599  // bootstrapSystem is a test helper that bootstraps a flow system with node of each main roles (except execution nodes that are two).
   600  // If authorized set to true, it bootstraps verification node as an authorized one.
   601  // Otherwise, it bootstraps the verification node as unauthorized in current epoch.
   602  //
   603  // As the return values, it returns the state, local module, and list of identities in system.
   604  func bootstrapSystem(t *testing.T, tracer module.Tracer, authorized bool) (*enginemock.StateFixture, *flow.Identity,
   605  	flow.IdentityList) {
   606  	// creates identities to bootstrap system with
   607  	verID := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification))
   608  	identities := unittest.CompleteIdentitySet(verID)
   609  	identities = append(identities, unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution))) // adds extra execution node
   610  
   611  	// bootstraps the system
   612  	collector := &metrics.NoopCollector{}
   613  	rootSnapshot := unittest.RootSnapshotFixture(identities)
   614  	stateFixture := testutil.CompleteStateFixture(t, collector, tracer, rootSnapshot)
   615  
   616  	if !authorized {
   617  		// creates a new verification node identity that is unauthorized for this epoch
   618  		verID = unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification))
   619  		identities = identities.Union(flow.IdentityList{verID})
   620  
   621  		epochBuilder := unittest.NewEpochBuilder(t, stateFixture.State)
   622  		epochBuilder.
   623  			UsingSetupOpts(unittest.WithParticipants(identities)).
   624  			BuildEpoch()
   625  	}
   626  
   627  	return stateFixture, verID, identities
   628  }