github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/verification/utils/unittest/helper.go (about)

     1  package verificationtest
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/onflow/crypto"
    11  	"github.com/rs/zerolog"
    12  	"github.com/rs/zerolog/log"
    13  	"github.com/stretchr/testify/assert"
    14  	testifymock "github.com/stretchr/testify/mock"
    15  	"github.com/stretchr/testify/require"
    16  	"golang.org/x/exp/slices"
    17  
    18  	"github.com/onflow/flow-go/consensus/hotstuff/model"
    19  	"github.com/onflow/flow-go/engine/testutil"
    20  	enginemock "github.com/onflow/flow-go/engine/testutil/mock"
    21  	"github.com/onflow/flow-go/engine/verification/assigner/blockconsumer"
    22  	"github.com/onflow/flow-go/model/bootstrap"
    23  	"github.com/onflow/flow-go/model/chunks"
    24  	"github.com/onflow/flow-go/model/flow"
    25  	"github.com/onflow/flow-go/model/flow/filter"
    26  	"github.com/onflow/flow-go/model/messages"
    27  	"github.com/onflow/flow-go/module"
    28  	"github.com/onflow/flow-go/module/metrics"
    29  	"github.com/onflow/flow-go/module/mock"
    30  	msig "github.com/onflow/flow-go/module/signature"
    31  	"github.com/onflow/flow-go/module/trace"
    32  	"github.com/onflow/flow-go/network"
    33  	"github.com/onflow/flow-go/network/channels"
    34  	"github.com/onflow/flow-go/network/mocknetwork"
    35  	"github.com/onflow/flow-go/network/stub"
    36  	"github.com/onflow/flow-go/state/protocol"
    37  	mockprotocol "github.com/onflow/flow-go/state/protocol/mock"
    38  	protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state"
    39  	"github.com/onflow/flow-go/utils/logging"
    40  	"github.com/onflow/flow-go/utils/unittest"
    41  )
    42  
    43  // MockChunkDataProviderFunc is a test helper function encapsulating the logic of whether to reply a chunk data pack request.
    44  type MockChunkDataProviderFunc func(*testing.T, CompleteExecutionReceiptList, flow.Identifier, flow.Identifier, network.Conduit) bool
    45  
    46  // SetupChunkDataPackProvider creates and returns an execution node that only has a chunk data pack provider engine.
    47  //
    48  // The mock chunk provider engine replies the chunk back requests by invoking the injected provider method. All chunk data pack
    49  // requests should come from a verification node, and should has one of the assigned chunk IDs. Otherwise, it fails the test.
    50  func SetupChunkDataPackProvider(t *testing.T,
    51  	hub *stub.Hub,
    52  	exeIdentity bootstrap.NodeInfo,
    53  	participants flow.IdentityList,
    54  	chainID flow.ChainID,
    55  	completeERs CompleteExecutionReceiptList,
    56  	assignedChunkIDs flow.IdentifierList,
    57  	provider MockChunkDataProviderFunc) (*enginemock.GenericNode,
    58  	*mocknetwork.Engine, *sync.WaitGroup) {
    59  
    60  	exeNode := testutil.GenericNodeFromParticipants(t, hub, exeIdentity, participants, chainID)
    61  	exeEngine := new(mocknetwork.Engine)
    62  
    63  	exeChunkDataConduit, err := exeNode.Net.Register(channels.ProvideChunks, exeEngine)
    64  	assert.Nil(t, err)
    65  
    66  	replied := make(map[flow.Identifier]struct{})
    67  
    68  	wg := &sync.WaitGroup{}
    69  	wg.Add(len(assignedChunkIDs))
    70  
    71  	mu := &sync.Mutex{} // making testify Run thread-safe
    72  
    73  	exeEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything).
    74  		Run(func(args testifymock.Arguments) {
    75  			mu.Lock()
    76  			defer mu.Unlock()
    77  
    78  			originID, ok := args[1].(flow.Identifier)
    79  			require.True(t, ok)
    80  			// request should be dispatched by a verification node.
    81  			require.Contains(t, participants.Filter(filter.HasRole[flow.Identity](flow.RoleVerification)).NodeIDs(), originID)
    82  
    83  			req, ok := args[2].(*messages.ChunkDataRequest)
    84  			require.True(t, ok)
    85  			require.Contains(t, assignedChunkIDs, req.ChunkID) // only assigned chunks should be requested.
    86  
    87  			shouldReply := provider(t, completeERs, req.ChunkID, originID, exeChunkDataConduit)
    88  			_, alreadyReplied := replied[req.ChunkID]
    89  			if shouldReply && !alreadyReplied {
    90  				/*
    91  					the wait group keeps track of unique chunk requests addressed.
    92  					we make it done only upon the first successful request of a chunk.
    93  				*/
    94  				wg.Done()
    95  				replied[req.ChunkID] = struct{}{}
    96  			}
    97  		}).Return(nil)
    98  
    99  	return &exeNode, exeEngine, wg
   100  }
   101  
   102  // RespondChunkDataPackRequestImmediately immediately qualifies a chunk data request for reply by chunk data provider.
   103  func RespondChunkDataPackRequestImmediately(t *testing.T,
   104  	completeERs CompleteExecutionReceiptList,
   105  	chunkID flow.Identifier,
   106  	verID flow.Identifier,
   107  	con network.Conduit) bool {
   108  
   109  	// finds the chunk data pack of the requested chunk and sends it back.
   110  	res := completeERs.ChunkDataResponseOf(t, chunkID)
   111  
   112  	err := con.Unicast(res, verID)
   113  	assert.Nil(t, err)
   114  
   115  	log.Debug().
   116  		Hex("origin_id", logging.ID(verID)).
   117  		Hex("chunk_id", logging.ID(chunkID)).
   118  		Msg("chunk data pack request answered by provider")
   119  
   120  	return true
   121  }
   122  
   123  // RespondChunkDataPackRequestAfterNTrials only qualifies a chunk data request for reply by chunk data provider after n times.
   124  func RespondChunkDataPackRequestAfterNTrials(n int) MockChunkDataProviderFunc {
   125  	tryCount := make(map[flow.Identifier]int)
   126  
   127  	return func(t *testing.T, completeERs CompleteExecutionReceiptList, chunkID flow.Identifier, verID flow.Identifier, con network.Conduit) bool {
   128  		tryCount[chunkID]++
   129  
   130  		if tryCount[chunkID] >= n {
   131  			// finds the chunk data pack of the requested chunk and sends it back.
   132  			res := completeERs.ChunkDataResponseOf(t, chunkID)
   133  
   134  			err := con.Unicast(res, verID)
   135  			assert.Nil(t, err)
   136  
   137  			log.Debug().
   138  				Hex("origin_id", logging.ID(verID)).
   139  				Hex("chunk_id", logging.ID(chunkID)).
   140  				Int("trial_time", tryCount[chunkID]).
   141  				Msg("chunk data pack request answered by provider")
   142  
   143  			return true
   144  		}
   145  
   146  		return false
   147  	}
   148  }
   149  
   150  // SetupMockConsensusNode creates and returns a mock consensus node (conIdentity) and its registered engine in the
   151  // network (hub). It mocks the process method of the consensus engine to receive a message from a certain
   152  // verification node (verIdentity) evaluates whether it is a result approval about an assigned chunk to that verifier node.
   153  func SetupMockConsensusNode(t *testing.T,
   154  	log zerolog.Logger,
   155  	hub *stub.Hub,
   156  	conIdentity bootstrap.NodeInfo,
   157  	verIdentities flow.IdentityList,
   158  	othersIdentity flow.IdentityList,
   159  	completeERs CompleteExecutionReceiptList,
   160  	chainID flow.ChainID,
   161  	assignedChunkIDs flow.IdentifierList) (*enginemock.GenericNode, *mocknetwork.Engine, *sync.WaitGroup) {
   162  
   163  	lg := log.With().Str("role", "mock-consensus").Logger()
   164  
   165  	wg := &sync.WaitGroup{}
   166  	// each verification node is assigned to issue one result approval per assigned chunk.
   167  	// and there are `len(verIdentities)`-many verification nodes
   168  	// so there is a total of len(verIdentities) * len*(assignedChunkIDs) expected
   169  	// result approvals.
   170  	wg.Add(len(verIdentities) * len(assignedChunkIDs))
   171  
   172  	// mock the consensus node with a generic node and mocked engine to assert
   173  	// that the result approval is broadcast
   174  	conNode := testutil.GenericNodeFromParticipants(t, hub, conIdentity, othersIdentity, chainID)
   175  	conEngine := new(mocknetwork.Engine)
   176  
   177  	// map form verIds --> result approval ID
   178  	resultApprovalSeen := make(map[flow.Identifier]map[flow.Identifier]struct{})
   179  	for _, verIdentity := range verIdentities {
   180  		resultApprovalSeen[verIdentity.NodeID] = make(map[flow.Identifier]struct{})
   181  	}
   182  
   183  	// creates a hasher for spock
   184  	hasher := msig.NewBLSHasher(msig.SPOCKTag)
   185  	mu := &sync.Mutex{} // making testify mock thread-safe
   186  
   187  	conEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything).
   188  		Run(func(args testifymock.Arguments) {
   189  			mu.Lock()
   190  			defer mu.Unlock()
   191  
   192  			originID, ok := args[1].(flow.Identifier)
   193  			assert.True(t, ok)
   194  
   195  			resultApproval, ok := args[2].(*flow.ResultApproval)
   196  			assert.True(t, ok)
   197  
   198  			lg.Debug().
   199  				Hex("result_approval_id", logging.ID(resultApproval.ID())).
   200  				Msg("result approval received")
   201  
   202  			// asserts that result approval has not been seen from this
   203  			_, ok = resultApprovalSeen[originID][resultApproval.ID()]
   204  			assert.False(t, ok)
   205  
   206  			// marks result approval as seen
   207  			resultApprovalSeen[originID][resultApproval.ID()] = struct{}{}
   208  
   209  			// result approval should belong to an assigned chunk to the verification node.
   210  			chunk := completeERs.ChunkOf(t, resultApproval.Body.ExecutionResultID, resultApproval.Body.ChunkIndex)
   211  			assert.Contains(t, assignedChunkIDs, chunk.ID())
   212  
   213  			// verifies SPoCK proof of result approval
   214  			// against the SPoCK secret of the execution result
   215  			//
   216  			// retrieves public key of verification node
   217  			var pk crypto.PublicKey
   218  			found := false
   219  			for _, identity := range verIdentities {
   220  				if originID == identity.NodeID {
   221  					pk = identity.StakingPubKey
   222  					found = true
   223  				}
   224  			}
   225  			require.True(t, found)
   226  
   227  			// verifies spocks
   228  			valid, err := crypto.SPOCKVerifyAgainstData(
   229  				pk,
   230  				resultApproval.Body.Spock,
   231  				completeERs.ReceiptDataOf(t, chunk.ID()).SpockSecrets[resultApproval.Body.ChunkIndex],
   232  				hasher,
   233  			)
   234  			assert.NoError(t, err)
   235  
   236  			if !valid {
   237  				// When chunk verifier returns chunk fault, a placeholder
   238  				// signature is generated for that chunk.
   239  				isChunkFaultSignature, err := crypto.SPOCKVerifyAgainstData(
   240  					pk,
   241  					resultApproval.Body.Spock,
   242  					nil, // chunk fault has no spock secret
   243  					hasher,
   244  				)
   245  				assert.NoError(t, err)
   246  
   247  				if isChunkFaultSignature {
   248  					assert.Fail(t, "chunk verifier returned chunk fault")
   249  				} else {
   250  					assert.Fail(t, "spock secret mismatch")
   251  				}
   252  			}
   253  
   254  			wg.Done()
   255  		}).Return(nil)
   256  
   257  	_, err := conNode.Net.Register(channels.ReceiveApprovals, conEngine)
   258  	assert.Nil(t, err)
   259  
   260  	return &conNode, conEngine, wg
   261  }
   262  
   263  // isSystemChunk returns true if the index corresponds to the system chunk, i.e., last chunk in
   264  // the receipt.
   265  func isSystemChunk(index uint64, chunkNum int) bool {
   266  	return int(index) == chunkNum-1
   267  }
   268  
   269  func CreateExecutionResult(blockID flow.Identifier, options ...func(result *flow.ExecutionResult, assignments *chunks.Assignment)) (*flow.ExecutionResult, *chunks.Assignment) {
   270  	result := &flow.ExecutionResult{
   271  		BlockID: blockID,
   272  		Chunks:  flow.ChunkList{},
   273  	}
   274  	assignments := chunks.NewAssignment()
   275  
   276  	for _, option := range options {
   277  		option(result, assignments)
   278  	}
   279  	return result, assignments
   280  }
   281  
   282  func WithChunks(setAssignees ...func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk) func(*flow.ExecutionResult, *chunks.Assignment) {
   283  	return func(result *flow.ExecutionResult, assignment *chunks.Assignment) {
   284  		for i, setAssignee := range setAssignees {
   285  			chunk := setAssignee(result.BlockID, uint64(i), assignment)
   286  			result.Chunks.Insert(chunk)
   287  		}
   288  	}
   289  }
   290  
   291  func ChunkWithIndex(blockID flow.Identifier, index int) *flow.Chunk {
   292  	chunk := &flow.Chunk{
   293  		Index: uint64(index),
   294  		ChunkBody: flow.ChunkBody{
   295  			CollectionIndex: uint(index),
   296  			EventCollection: blockID, // ensure chunks from different blocks with the same index will have different chunk ID
   297  			BlockID:         blockID,
   298  		},
   299  		EndState: unittest.StateCommitmentFixture(),
   300  	}
   301  	return chunk
   302  }
   303  
   304  func WithAssignee(assignee flow.Identifier) func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk {
   305  	return func(blockID flow.Identifier, index uint64, assignment *chunks.Assignment) *flow.Chunk {
   306  		chunk := ChunkWithIndex(blockID, int(index))
   307  		fmt.Printf("with assignee: %v, chunk id: %v\n", index, chunk.ID())
   308  		assignment.Add(chunk, flow.IdentifierList{assignee})
   309  		return chunk
   310  	}
   311  }
   312  
   313  func FromChunkID(chunkID flow.Identifier) flow.ChunkDataPack {
   314  	return flow.ChunkDataPack{
   315  		ChunkID: chunkID,
   316  	}
   317  }
   318  
   319  type ChunkAssignerFunc func(chunkIndex uint64, chunks int) bool
   320  
   321  // MockChunkAssignmentFixture is a test helper that mocks a chunk assigner for a set of verification nodes for the
   322  // execution results in the given complete execution receipts, and based on the given chunk assigner function.
   323  //
   324  // It returns the list of chunk locator ids assigned to the input verification nodes, as well as the list of their chunk IDs.
   325  // All verification nodes are assigned the same chunks.
   326  func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner,
   327  	verIds flow.IdentityList,
   328  	completeERs CompleteExecutionReceiptList,
   329  	isAssigned ChunkAssignerFunc) (flow.IdentifierList, flow.IdentifierList) {
   330  
   331  	expectedLocatorIds := flow.IdentifierList{}
   332  	expectedChunkIds := flow.IdentifierList{}
   333  
   334  	// keeps track of duplicate results (receipts that share same result)
   335  	visited := make(map[flow.Identifier]struct{})
   336  
   337  	for _, completeER := range completeERs {
   338  		for _, receipt := range completeER.Receipts {
   339  			a := chunks.NewAssignment()
   340  
   341  			_, duplicate := visited[receipt.ExecutionResult.ID()]
   342  			if duplicate {
   343  				// skips mocking chunk assignment for duplicate results
   344  				continue
   345  			}
   346  
   347  			for _, chunk := range receipt.ExecutionResult.Chunks {
   348  				if isAssigned(chunk.Index, len(receipt.ExecutionResult.Chunks)) {
   349  					locatorID := chunks.Locator{
   350  						ResultID: receipt.ExecutionResult.ID(),
   351  						Index:    chunk.Index,
   352  					}.ID()
   353  					expectedLocatorIds = append(expectedLocatorIds, locatorID)
   354  					expectedChunkIds = append(expectedChunkIds, chunk.ID())
   355  					a.Add(chunk, verIds.NodeIDs())
   356  				}
   357  
   358  			}
   359  
   360  			chunkAssigner.On("Assign", &receipt.ExecutionResult, completeER.ContainerBlock.ID()).Return(a, nil)
   361  			visited[receipt.ExecutionResult.ID()] = struct{}{}
   362  		}
   363  	}
   364  
   365  	return expectedLocatorIds, expectedChunkIds
   366  }
   367  
   368  // EvenChunkIndexAssigner is a helper function that returns true for the even indices in [0, chunkNum-1]
   369  // It also returns true if the index corresponds to the system chunk.
   370  func EvenChunkIndexAssigner(index uint64, chunkNum int) bool {
   371  	ok := index%2 == 0 || isSystemChunk(index, chunkNum)
   372  	return ok
   373  }
   374  
   375  // ExtendStateWithFinalizedBlocks is a test helper to extend the execution state and return the list of blocks.
   376  // It receives a list of complete execution receipt fixtures in the form of (R1,1 <- R1,2 <- ... <- C1) <- (R2,1 <- R2,2 <- ... <- C2) <- .....
   377  // Where R and C are the reference and container blocks.
   378  // Reference blocks contain guarantees, and container blocks contain execution receipt for their preceding reference blocks,
   379  // e.g., C1 contains receipts for R1,1, R1,2, etc.
   380  // Note: for sake of simplicity we do not include guarantees in the container blocks for now.
   381  func ExtendStateWithFinalizedBlocks(t *testing.T, completeExecutionReceipts CompleteExecutionReceiptList,
   382  	state protocol.ParticipantState) []*flow.Block {
   383  	blocks := make([]*flow.Block, 0)
   384  
   385  	// tracks of duplicate reference blocks
   386  	// since receipts may share the same execution result, hence
   387  	// their reference block is the same (and we should not extend for it).
   388  	duplicate := make(map[flow.Identifier]struct{})
   389  
   390  	// extends protocol state with the chain of blocks.
   391  	for _, completeER := range completeExecutionReceipts {
   392  		// extends state with reference blocks of the receipts
   393  		for _, receipt := range completeER.ReceiptsData {
   394  			refBlockID := receipt.ReferenceBlock.ID()
   395  			_, dup := duplicate[refBlockID]
   396  			if dup {
   397  				// skips extending state with already duplicate reference block
   398  				continue
   399  			}
   400  
   401  			err := state.Extend(context.Background(), receipt.ReferenceBlock)
   402  			require.NoError(t, err, fmt.Errorf("can not extend block %v: %w", receipt.ReferenceBlock.ID(), err))
   403  			err = state.Finalize(context.Background(), refBlockID)
   404  			require.NoError(t, err)
   405  			blocks = append(blocks, receipt.ReferenceBlock)
   406  			duplicate[refBlockID] = struct{}{}
   407  		}
   408  
   409  		// extends state with container block of receipt.
   410  		containerBlockID := completeER.ContainerBlock.ID()
   411  		_, dup := duplicate[containerBlockID]
   412  		if dup {
   413  			// skips extending state with already duplicate container block
   414  			continue
   415  		}
   416  		err := state.Extend(context.Background(), completeER.ContainerBlock)
   417  		require.NoError(t, err)
   418  		err = state.Finalize(context.Background(), containerBlockID)
   419  		require.NoError(t, err)
   420  		blocks = append(blocks, completeER.ContainerBlock)
   421  		duplicate[containerBlockID] = struct{}{}
   422  	}
   423  
   424  	return blocks
   425  }
   426  
   427  // MockLastSealedHeight mocks the protocol state for the specified last sealed height.
   428  func MockLastSealedHeight(state *mockprotocol.State, height uint64) {
   429  	snapshot := &mockprotocol.Snapshot{}
   430  	header := unittest.BlockHeaderFixture()
   431  	header.Height = height
   432  	state.On("Sealed").Return(snapshot)
   433  	snapshot.On("Head").Return(header, nil)
   434  }
   435  
   436  func NewVerificationHappyPathTest(t *testing.T,
   437  	authorized bool,
   438  	blockCount int,
   439  	eventRepetition int,
   440  	verCollector module.VerificationMetrics,
   441  	mempoolCollector module.MempoolMetrics,
   442  	retry int,
   443  	ops ...CompleteExecutionReceiptBuilderOpt) {
   444  
   445  	withConsumers(t, authorized, blockCount, verCollector, mempoolCollector, RespondChunkDataPackRequestAfterNTrials(retry), func(
   446  		blockConsumer *blockconsumer.BlockConsumer,
   447  		blocks []*flow.Block,
   448  		resultApprovalsWG *sync.WaitGroup,
   449  		chunkDataRequestWG *sync.WaitGroup) {
   450  
   451  		for i := 0; i < len(blocks)*eventRepetition; i++ {
   452  			// consumer is only required to be "notified" that a new finalized block available.
   453  			// It keeps track of the last finalized block it has read, and read the next height upon
   454  			// getting notified as follows:
   455  			blockConsumer.OnFinalizedBlock(&model.Block{})
   456  		}
   457  
   458  		unittest.RequireReturnsBefore(t, chunkDataRequestWG.Wait, time.Duration(10*retry*blockCount)*time.Second,
   459  			"could not receive chunk data requests on time")
   460  		unittest.RequireReturnsBefore(t, resultApprovalsWG.Wait, time.Duration(2*retry*blockCount)*time.Second,
   461  			"could not receive result approvals on time")
   462  
   463  	}, ops...)
   464  }
   465  
   466  // withConsumers is a test helper that sets up the following pipeline:
   467  // block reader -> block consumer (3 workers) -> assigner engine -> chunks queue -> chunks consumer (3 workers) -> mock chunk processor
   468  //
   469  // The block consumer operates on a block reader with a chain of specified number of finalized blocks
   470  // ready to read.
   471  func withConsumers(t *testing.T,
   472  	authorized bool,
   473  	blockCount int,
   474  	verCollector module.VerificationMetrics, // verification metrics collector
   475  	mempoolCollector module.MempoolMetrics, // memory pool metrics collector
   476  	providerFunc MockChunkDataProviderFunc,
   477  	withBlockConsumer func(*blockconsumer.BlockConsumer, []*flow.Block, *sync.WaitGroup, *sync.WaitGroup),
   478  	ops ...CompleteExecutionReceiptBuilderOpt) {
   479  
   480  	tracer := trace.NewNoopTracer()
   481  	log := zerolog.Nop()
   482  
   483  	// bootstraps system with one node of each role.
   484  	s, verID, bootstrapNodesInfo := bootstrapSystem(t, log, tracer, authorized)
   485  
   486  	participants := bootstrap.ToIdentityList(bootstrapNodesInfo)
   487  	exeIndex := slices.IndexFunc(bootstrapNodesInfo, func(info bootstrap.NodeInfo) bool {
   488  		return info.Role == flow.RoleExecution
   489  	})
   490  	conIndex := slices.IndexFunc(bootstrapNodesInfo, func(info bootstrap.NodeInfo) bool {
   491  		return info.Role == flow.RoleConsensus
   492  	})
   493  	// generates a chain of blocks in the form of root <- R1 <- C1 <- R2 <- C2 <- ... where Rs are distinct reference
   494  	// blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block,
   495  	// Container blocks only contain receipts of their preceding reference blocks. But they do not
   496  	// hold any guarantees.
   497  	root, err := s.State.Final().Head()
   498  	require.NoError(t, err)
   499  	protocolState, err := s.State.Final().ProtocolState()
   500  	require.NoError(t, err)
   501  	protocolStateID := protocolState.ID()
   502  
   503  	chainID := root.ChainID
   504  	ops = append(ops, WithExecutorIDs(
   505  		participants.Filter(filter.HasRole[flow.Identity](flow.RoleExecution)).NodeIDs()), func(builder *CompleteExecutionReceiptBuilder) {
   506  		// needed for the guarantees to have the correct chainID and signer indices
   507  		builder.clusterCommittee = participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
   508  	})
   509  
   510  	// random sources for all blocks:
   511  	//  - root block (block[0]) is executed with sources[0] (included in QC of child block[1])
   512  	//  - block[i] is executed with sources[i] (included in QC of child block[i+1])
   513  	sources := unittest.RandomSourcesFixture(30)
   514  	completeERs := CompleteExecutionReceiptChainFixture(t, root, protocolStateID, blockCount, sources, ops...)
   515  	blocks := ExtendStateWithFinalizedBlocks(t, completeERs, s.State)
   516  
   517  	// chunk assignment
   518  	chunkAssigner := &mock.ChunkAssigner{}
   519  	assignedChunkIDs := flow.IdentifierList{}
   520  	if authorized {
   521  		// only authorized verification node has some chunks assigned to it.
   522  		_, assignedChunkIDs = MockChunkAssignmentFixture(chunkAssigner,
   523  			flow.IdentityList{verID.Identity()},
   524  			completeERs,
   525  			EvenChunkIndexAssigner)
   526  	}
   527  
   528  	hub := stub.NewNetworkHub()
   529  	collector := &metrics.NoopCollector{}
   530  	chunksLimit := 100
   531  	genericNode := testutil.GenericNodeWithStateFixture(t,
   532  		s,
   533  		hub,
   534  		verID,
   535  		unittest.Logger().With().Str("role", "verification").Logger(),
   536  		collector,
   537  		tracer,
   538  		chainID)
   539  
   540  	// execution node
   541  	exeNode, exeEngine, exeWG := SetupChunkDataPackProvider(t,
   542  		hub,
   543  		bootstrapNodesInfo[exeIndex],
   544  		participants,
   545  		chainID,
   546  		completeERs,
   547  		assignedChunkIDs,
   548  		providerFunc)
   549  
   550  	// consensus node
   551  	conNode, conEngine, conWG := SetupMockConsensusNode(t,
   552  		unittest.Logger(),
   553  		hub,
   554  		bootstrapNodesInfo[conIndex],
   555  		flow.IdentityList{verID.Identity()},
   556  		participants,
   557  		completeERs,
   558  		chainID,
   559  		assignedChunkIDs)
   560  
   561  	verNode := testutil.VerificationNode(t,
   562  		hub,
   563  		verID,
   564  		participants,
   565  		chunkAssigner,
   566  		uint(chunksLimit),
   567  		chainID,
   568  		verCollector,
   569  		mempoolCollector,
   570  		testutil.WithGenericNode(&genericNode))
   571  
   572  	// turns on components and network
   573  	verNet, ok := hub.GetNetwork(verID.NodeID)
   574  	require.True(t, ok)
   575  	unittest.RequireReturnsBefore(t, func() {
   576  		verNet.StartConDev(100*time.Millisecond, true)
   577  	}, 100*time.Millisecond, "failed to start verification network")
   578  
   579  	unittest.RequireComponentsReadyBefore(t, 1*time.Second,
   580  		verNode.BlockConsumer,
   581  		verNode.ChunkConsumer,
   582  		verNode.AssignerEngine,
   583  		verNode.FetcherEngine,
   584  		verNode.RequesterEngine,
   585  		verNode.VerifierEngine)
   586  
   587  	// plays test scenario
   588  	withBlockConsumer(verNode.BlockConsumer, blocks, conWG, exeWG)
   589  
   590  	// tears down engines and nodes
   591  	unittest.RequireReturnsBefore(t, verNet.StopConDev, 100*time.Millisecond, "failed to stop verification network")
   592  	unittest.RequireComponentsDoneBefore(t, 100*time.Millisecond,
   593  		verNode.BlockConsumer,
   594  		verNode.ChunkConsumer,
   595  		verNode.AssignerEngine,
   596  		verNode.FetcherEngine,
   597  		verNode.RequesterEngine,
   598  		verNode.VerifierEngine)
   599  
   600  	enginemock.RequireGenericNodesDoneBefore(t, 1*time.Second,
   601  		conNode,
   602  		exeNode)
   603  
   604  	if !authorized {
   605  		// in unauthorized mode, no message should be received by consensus and execution node.
   606  		conEngine.AssertNotCalled(t, "Process")
   607  		exeEngine.AssertNotCalled(t, "Process")
   608  	}
   609  
   610  	// verifies memory resources are cleaned up all over pipeline
   611  	assert.Zero(t, verNode.BlockConsumer.Size())
   612  	assert.Zero(t, verNode.ChunkConsumer.Size())
   613  	assert.Zero(t, verNode.ChunkStatuses.Size())
   614  	assert.Zero(t, verNode.ChunkRequests.Size())
   615  }
   616  
   617  // bootstrapSystem is a test helper that bootstraps a flow system with node of each main roles (except execution nodes that are two).
   618  // If authorized set to true, it bootstraps verification node as an authorized one.
   619  // Otherwise, it bootstraps the verification node as unauthorized in current epoch.
   620  //
   621  // As the return values, it returns the state, local module, and list of identities in system.
   622  func bootstrapSystem(
   623  	t *testing.T,
   624  	log zerolog.Logger,
   625  	tracer module.Tracer,
   626  	authorized bool,
   627  ) (
   628  	*enginemock.StateFixture,
   629  	bootstrap.NodeInfo,
   630  	[]bootstrap.NodeInfo,
   631  ) {
   632  	// creates bootstrapNodesInfo to bootstrap system with
   633  	bootstrapNodesInfo := make([]bootstrap.NodeInfo, 0)
   634  	var verID bootstrap.NodeInfo
   635  	for _, missingRole := range unittest.CompleteIdentitySet() {
   636  		nodeInfo := unittest.PrivateNodeInfoFixture(unittest.WithRole(missingRole.Role))
   637  		if nodeInfo.Role == flow.RoleVerification {
   638  			verID = nodeInfo
   639  		}
   640  		bootstrapNodesInfo = append(bootstrapNodesInfo, nodeInfo)
   641  	}
   642  	bootstrapNodesInfo = append(bootstrapNodesInfo, unittest.PrivateNodeInfoFixture(unittest.WithRole(flow.RoleExecution))) // adds extra execution node
   643  	identities := bootstrap.ToIdentityList(bootstrapNodesInfo)
   644  
   645  	collector := &metrics.NoopCollector{}
   646  	rootSnapshot := unittest.RootSnapshotFixture(identities)
   647  	stateFixture := testutil.CompleteStateFixture(t, log, collector, tracer, rootSnapshot)
   648  	// bootstraps the system
   649  
   650  	if !authorized {
   651  		// creates a new verification node identity that is unauthorized for this epoch
   652  		verID = unittest.PrivateNodeInfoFixture(unittest.WithRole(flow.RoleVerification))
   653  		bootstrapNodesInfo = append(bootstrapNodesInfo, verID)
   654  		identities = append(identities, verID.Identity())
   655  
   656  		mutableProtocolState := protocol_state.NewMutableProtocolState(
   657  			stateFixture.Storage.EpochProtocolState,
   658  			stateFixture.Storage.ProtocolKVStore,
   659  			stateFixture.State.Params(),
   660  			stateFixture.Storage.Headers,
   661  			stateFixture.Storage.Results,
   662  			stateFixture.Storage.Setups,
   663  			stateFixture.Storage.EpochCommits,
   664  		)
   665  		epochBuilder := unittest.NewEpochBuilder(t, mutableProtocolState, stateFixture.State)
   666  		epochBuilder.
   667  			UsingSetupOpts(unittest.WithParticipants(identities.ToSkeleton())).
   668  			BuildEpoch()
   669  	}
   670  
   671  	return stateFixture, verID, bootstrapNodesInfo
   672  }