github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/verification/fetcher/engine_test.go (about)

     1  package fetcher_test
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/davecgh/go-spew/spew"
    10  	"github.com/rs/zerolog"
    11  	"github.com/stretchr/testify/assert"
    12  	"github.com/stretchr/testify/mock"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	"github.com/onflow/flow-go/engine/verification/fetcher"
    16  	mockfetcher "github.com/onflow/flow-go/engine/verification/fetcher/mock"
    17  	vertestutils "github.com/onflow/flow-go/engine/verification/utils/unittest"
    18  	"github.com/onflow/flow-go/model/chunks"
    19  	"github.com/onflow/flow-go/model/flow"
    20  	"github.com/onflow/flow-go/model/verification"
    21  	mempool "github.com/onflow/flow-go/module/mempool/mock"
    22  	module "github.com/onflow/flow-go/module/mock"
    23  	"github.com/onflow/flow-go/module/trace"
    24  	"github.com/onflow/flow-go/network/mocknetwork"
    25  	flowprotocol "github.com/onflow/flow-go/state/protocol"
    26  	protocol "github.com/onflow/flow-go/state/protocol/mock"
    27  	storage "github.com/onflow/flow-go/storage/mock"
    28  	"github.com/onflow/flow-go/utils/unittest"
    29  )
    30  
    31  // FetcherEngineTestSuite encapsulates data structures for running unittests on fetcher engine.
    32  type FetcherEngineTestSuite struct {
    33  	log                   zerolog.Logger
    34  	metrics               *module.VerificationMetrics
    35  	tracer                *trace.NoopTracer
    36  	verifier              *mocknetwork.Engine                 // the verifier engine
    37  	state                 *protocol.State                     // used to verify the request origin
    38  	pendingChunks         *mempool.ChunkStatuses              // used to store all the pending chunks that assigned to this node
    39  	blocks                *storage.Blocks                     // used to for verifying collection ID.
    40  	headers               *storage.Headers                    // used for building verifiable chunk data.
    41  	chunkConsumerNotifier *module.ProcessingNotifier          // to report a chunk has been processed
    42  	results               *storage.ExecutionResults           // to retrieve execution result of an assigned chunk
    43  	receipts              *storage.ExecutionReceipts          // used to find executor of the chunk
    44  	requester             *mockfetcher.ChunkDataPackRequester // used to request chunk data packs from network
    45  }
    46  
    47  // setupTest initiates a test suite prior to each test.
    48  func setupTest() *FetcherEngineTestSuite {
    49  	s := &FetcherEngineTestSuite{
    50  		log:                   unittest.Logger(),
    51  		metrics:               &module.VerificationMetrics{},
    52  		tracer:                trace.NewNoopTracer(),
    53  		verifier:              &mocknetwork.Engine{},
    54  		state:                 &protocol.State{},
    55  		pendingChunks:         &mempool.ChunkStatuses{},
    56  		headers:               &storage.Headers{},
    57  		blocks:                &storage.Blocks{},
    58  		chunkConsumerNotifier: &module.ProcessingNotifier{},
    59  		results:               &storage.ExecutionResults{},
    60  		receipts:              &storage.ExecutionReceipts{},
    61  		requester:             &mockfetcher.ChunkDataPackRequester{},
    62  	}
    63  
    64  	return s
    65  }
    66  
    67  // newFetcherEngineWithStop returns a fetcher engine for testing with stop at height set
    68  func newFetcherEngineWithStop(s *FetcherEngineTestSuite, stopAtHeight uint64) *fetcher.Engine {
    69  	s.requester.On("WithChunkDataPackHandler", mock.AnythingOfType("*fetcher.Engine")).Return()
    70  
    71  	e := fetcher.New(s.log,
    72  		s.metrics,
    73  		s.tracer,
    74  		s.verifier,
    75  		s.state,
    76  		s.pendingChunks,
    77  		s.headers,
    78  		s.blocks,
    79  		s.results,
    80  		s.receipts,
    81  		s.requester,
    82  		stopAtHeight)
    83  
    84  	e.WithChunkConsumerNotifier(s.chunkConsumerNotifier)
    85  	return e
    86  }
    87  
    88  // newFetcherEngine returns a fetcher engine for testing.
    89  func newFetcherEngine(s *FetcherEngineTestSuite) *fetcher.Engine {
    90  	return newFetcherEngineWithStop(s, 0)
    91  }
    92  
    93  func TestProcessAssignedChunkHappyPath(t *testing.T) {
    94  	tt := []struct {
    95  		chunks   int
    96  		assigned int
    97  	}{
    98  		{
    99  			chunks:   1, // single chunk, single assigned
   100  			assigned: 1,
   101  		},
   102  		{
   103  			chunks:   2, // two chunks, single assigned
   104  			assigned: 1,
   105  		},
   106  		{
   107  			chunks:   4, // four chunks, two assigned
   108  			assigned: 2,
   109  		},
   110  		{
   111  			chunks:   10, // ten chunks, five assigned
   112  			assigned: 5,
   113  		},
   114  	}
   115  
   116  	for _, tc := range tt {
   117  		t.Run(fmt.Sprintf("%d-chunk-%d-assigned", tc.chunks, tc.assigned), func(t *testing.T) {
   118  			testProcessAssignChunkHappyPath(t, tc.chunks, tc.assigned)
   119  		})
   120  	}
   121  }
   122  
   123  // testProcessAssignChunkHappyPath evaluates behavior of fetcher engine respect to receiving some assigned chunks,
   124  // it should request the requester a chunk data pack for each chunk.
   125  // Then the test mocks sending a chunk data response for what fetcher engine requested.
   126  // On receiving the response, fetcher engine should validate it and create and pass a verifiable chunk
   127  // to the verifier engine.
   128  // Once the verifier engine returns, the fetcher engine should notify the chunk consumer that it is done with
   129  // this chunk.
   130  func testProcessAssignChunkHappyPath(t *testing.T, chunkNum int, assignedNum int) {
   131  	s := setupTest()
   132  	e := newFetcherEngine(s)
   133  
   134  	// creates a result with specified chunk number and assigned chunk numbers
   135  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   136  	block, result, statuses, locators, collMap := completeChunkStatusListFixture(t, chunkNum, assignedNum)
   137  	_, _, agrees, disagrees := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   138  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators))
   139  
   140  	// the chunks belong to an unsealed block.
   141  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   142  
   143  	// mocks resources on fetcher engine side.
   144  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   145  	mockBlocksStorage(s.blocks, s.headers, block)
   146  	mockPendingChunksAdd(t, s.pendingChunks, statuses, true)
   147  	mockPendingChunksRemove(t, s.pendingChunks, statuses, true)
   148  	mockPendingChunksGet(s.pendingChunks, statuses)
   149  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees))
   150  
   151  	// generates and mocks requesting chunk data pack fixture
   152  	requests := chunkRequestsFixture(result.ID(), statuses, agrees, disagrees)
   153  	chunkDataPacks, verifiableChunks := verifiableChunksFixture(t, statuses, block, result, collMap)
   154  
   155  	// fetcher engine should request chunk data for received (assigned) chunk locators
   156  	s.metrics.On("OnChunkDataPackRequestSentByFetcher").Return().Times(len(requests))
   157  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(chunkDataPacks))
   158  	requesterWg := mockRequester(t, s.requester, requests, chunkDataPacks,
   159  		func(originID flow.Identifier, response *verification.ChunkDataPackResponse) {
   160  
   161  			// mocks replying to the requests by sending a chunk data pack.
   162  			e.HandleChunkDataPack(originID, response)
   163  		})
   164  
   165  	// fetcher engine should create and pass a verifiable chunk to verifier engine upon receiving each
   166  	// chunk data responses, and notify the consumer that it is done with processing chunk.
   167  	s.metrics.On("OnVerifiableChunkSentToVerifier").Return().Times(len(verifiableChunks))
   168  	verifierWG := mockVerifierEngine(t, s.verifier, verifiableChunks)
   169  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   170  
   171  	// passes chunk data requests in parallel.
   172  	processWG := &sync.WaitGroup{}
   173  	processWG.Add(len(locators))
   174  	for _, locator := range locators {
   175  		go func(l *chunks.Locator) {
   176  			e.ProcessAssignedChunk(l)
   177  			processWG.Done()
   178  		}(locator)
   179  	}
   180  
   181  	unittest.RequireReturnsBefore(t, requesterWg.Wait, 1*time.Second, "could not handle received chunk data pack on time")
   182  	unittest.RequireReturnsBefore(t, verifierWG.Wait, 1*time.Second, "could not push verifiable chunk on time")
   183  	unittest.RequireReturnsBefore(t, processWG.Wait, 1*time.Second, "could not process chunks on time")
   184  
   185  	mock.AssertExpectationsForObjects(t, s.results, s.requester, s.pendingChunks, s.chunkConsumerNotifier, s.metrics)
   186  }
   187  
   188  // TestChunkResponse_RemovingStatusFails evaluates behavior of fetcher engine respect to receiving duplicate and concurrent
   189  // chunk data pack responses to the same chunk.
   190  // The deduplication of concurrent chunks happen at the pending chunk statuses mempool, that is protected by a mutex lock.
   191  // So, of any concurrent chunk data pack responses of the same chunk, only one wins the lock and can successfully remove it
   192  // from the mempool, while the others fail.
   193  // If fetcher engine fails on removing a pending chunk status, it means that it has already been processed, and hence,
   194  // it should drop it gracefully, without notifying the verifier or chunk consumer.
   195  func TestChunkResponse_RemovingStatusFails(t *testing.T) {
   196  	s := setupTest()
   197  	e := newFetcherEngine(s)
   198  
   199  	// creates a result with specified 2 chunks and a single assigned chunk to this fetcher engine.
   200  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   201  	_, _, agrees, _ := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   202  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   203  
   204  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   205  	mockBlocksStorage(s.blocks, s.headers, block)
   206  	mockPendingChunksGet(s.pendingChunks, statuses)
   207  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees)
   208  
   209  	chunkLocatorID := statuses[0].ChunkLocatorID()
   210  	// trying to remove the pending status fails.
   211  	mockPendingChunksRemove(t, s.pendingChunks, statuses, false)
   212  
   213  	chunkDataPacks, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   214  
   215  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Once()
   216  	e.HandleChunkDataPack(agrees[0].NodeID, chunkDataPacks[chunkLocatorID])
   217  
   218  	// no verifiable chunk should be passed to verifier engine
   219  	// and chunk consumer should not get any notification
   220  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   221  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   222  	mock.AssertExpectationsForObjects(t, s.requester, s.pendingChunks, s.verifier, s.chunkConsumerNotifier, s.metrics)
   223  }
   224  
   225  // TestProcessAssignChunkSealedAfterRequest evaluates behavior of fetcher engine respect to receiving an assigned chunk
   226  // that its block is getting sealed after requesting it from requester.
   227  // The requester notifies the fetcher back that the block for chunk has been sealed.
   228  // The fetcher engine then should remove chunk request status from memory, and notify the
   229  // chunk consumer that it is done processing this chunk.
   230  func TestProcessAssignChunkSealedAfterRequest(t *testing.T) {
   231  	s := setupTest()
   232  	e := newFetcherEngine(s)
   233  
   234  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   235  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   236  	// also the chunk belongs to an unsealed block.
   237  	block, result, statuses, locators, collMap := completeChunkStatusListFixture(t, 2, 1)
   238  	_, _, agrees, disagrees := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   239  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   240  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators))
   241  
   242  	// mocks resources on fetcher engine side.
   243  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   244  	mockPendingChunksAdd(t, s.pendingChunks, statuses, true)
   245  	mockPendingChunksRemove(t, s.pendingChunks, statuses, true)
   246  	mockPendingChunksGet(s.pendingChunks, statuses)
   247  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees))
   248  
   249  	// generates and mocks requesting chunk data pack fixture
   250  	requests := chunkRequestsFixture(result.ID(), statuses, agrees, disagrees)
   251  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   252  
   253  	// fetcher engine should request chunk data for received (assigned) chunk locators
   254  	// as the response it receives a notification that chunk belongs to a sealed block.
   255  	// we mock this as the block is getting sealed after request dispatch.
   256  	s.metrics.On("OnChunkDataPackRequestSentByFetcher").Return().Times(len(requests))
   257  	requesterWg := mockRequester(t, s.requester, requests, responses, func(originID flow.Identifier,
   258  		response *verification.ChunkDataPackResponse) {
   259  		e.NotifyChunkDataPackSealed(response.Index, response.ResultID)
   260  	})
   261  
   262  	// fetcher engine should notify
   263  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   264  
   265  	// passes chunk data requests in parallel.
   266  	processWG := &sync.WaitGroup{}
   267  	processWG.Add(len(locators))
   268  	for _, locator := range locators {
   269  		go func(l *chunks.Locator) {
   270  			e.ProcessAssignedChunk(l)
   271  			processWG.Done()
   272  		}(locator)
   273  	}
   274  
   275  	unittest.RequireReturnsBefore(t, requesterWg.Wait, time.Second, "could not handle sealed chunks notification on time")
   276  	unittest.RequireReturnsBefore(t, processWG.Wait, 1*time.Second, "could not process chunks on time")
   277  
   278  	mock.AssertExpectationsForObjects(t, s.requester, s.pendingChunks, s.chunkConsumerNotifier, s.metrics)
   279  	// no verifiable chunk should be passed to verifier engine
   280  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   281  }
   282  
   283  // TestChunkResponse_InvalidChunkDataPack evaluates unhappy path of receiving an invalid chunk data response.
   284  // A chunk data response is invalid if its integrity is violated. We consider collection id, chunk id, and start state,
   285  // as the necessary conditions for chunk data integrity.
   286  func TestChunkResponse_InvalidChunkDataPack(t *testing.T) {
   287  	tt := []struct {
   288  		alterChunkDataResponse func(*flow.ChunkDataPack)
   289  		mockStateFunc          func(flow.Identity, *protocol.State, flow.Identifier) // mocks state at block identifier for the given identity.
   290  		msg                    string
   291  	}{
   292  		{
   293  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   294  				// re-writes collection with a random one that is different than original collection ID
   295  				// in block's guarantee.
   296  				txBody := unittest.TransactionBodyFixture()
   297  				cdp.Collection.Transactions = []*flow.TransactionBody{
   298  					&txBody,
   299  				}
   300  			},
   301  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   302  				// mocks a valid execution node as originID
   303  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   304  			},
   305  			msg: "conflicting-collection-with-blocks-storage",
   306  		},
   307  		{
   308  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   309  				cdp.ChunkID = unittest.IdentifierFixture()
   310  			},
   311  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   312  				// mocks a valid execution node as originID
   313  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   314  			},
   315  			msg: "invalid-chunk-ID",
   316  		},
   317  		{
   318  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   319  				cdp.StartState = unittest.StateCommitmentFixture()
   320  			},
   321  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   322  				// mocks a valid execution node as originID
   323  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   324  			},
   325  			msg: "invalid-start-state",
   326  		},
   327  		{
   328  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   329  				// we don't alter chunk data pack content
   330  			},
   331  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   332  				mockStateAtBlockIDForMissingIdentities(state, blockID, flow.IdentityList{&identity})
   333  			},
   334  			msg: "invalid-origin-id",
   335  		},
   336  		{
   337  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   338  				// we don't alter chunk data pack content
   339  			},
   340  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   341  				identity.EpochParticipationStatus = flow.EpochParticipationStatusJoining
   342  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   343  			},
   344  			msg: "participation-status-joining-origin-id",
   345  		},
   346  		{
   347  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   348  				// we don't alter chunk data pack content
   349  			},
   350  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   351  				identity.EpochParticipationStatus = flow.EpochParticipationStatusLeaving
   352  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   353  			},
   354  			msg: "participation-status-leaving-origin-id",
   355  		},
   356  		{
   357  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   358  				// we don't alter chunk data pack content
   359  			},
   360  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   361  				identity.InitialWeight = 0
   362  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   363  			},
   364  			msg: "zero-weight-origin-id",
   365  		},
   366  		{
   367  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   368  				// we don't alter chunk data pack content
   369  			},
   370  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   371  				identity.Role = flow.RoleVerification
   372  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   373  			},
   374  			msg: "invalid-origin-role",
   375  		},
   376  	}
   377  
   378  	for _, tc := range tt {
   379  		t.Run(tc.msg, func(t *testing.T) {
   380  			testInvalidChunkDataResponse(t, tc.alterChunkDataResponse, tc.mockStateFunc)
   381  		})
   382  	}
   383  }
   384  
   385  // testInvalidChunkDataResponse evaluates the unhappy path of receiving
   386  // an invalid chunk data response for an already requested chunk.
   387  // The invalid response should be dropped without any further action. Particularly, the
   388  // notifier and verifier engine should not be called as the result of handling an invalid chunk.
   389  //
   390  // The input alter function alters the chunk data response to break its integrity.
   391  func testInvalidChunkDataResponse(t *testing.T,
   392  	alterChunkDataResponse func(*flow.ChunkDataPack),
   393  	mockStateFunc func(flow.Identity, *protocol.State, flow.Identifier)) {
   394  	s := setupTest()
   395  	e := newFetcherEngine(s)
   396  
   397  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   398  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   399  	// also the chunk belongs to an unsealed block.
   400  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   401  	_, _, agrees, _ := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   402  
   403  	// mocks resources on fetcher engine side.
   404  	mockPendingChunksGet(s.pendingChunks, statuses)
   405  	mockBlocksStorage(s.blocks, s.headers, block)
   406  
   407  	chunkLocatorID := statuses[0].ChunkLocatorID()
   408  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   409  
   410  	// alters chunk data pack so that it become invalid.
   411  	alterChunkDataResponse(responses[chunkLocatorID].Cdp)
   412  	mockStateFunc(*agrees[0], s.state, block.ID())
   413  
   414  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(responses))
   415  	e.HandleChunkDataPack(agrees[0].NodeID, responses[chunkLocatorID])
   416  
   417  	mock.AssertExpectationsForObjects(t, s.pendingChunks, s.metrics)
   418  	// no verifiable chunk should be passed to verifier engine
   419  	// and chunk consumer should not get any notification
   420  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   421  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   422  
   423  	// none of the subsequent calls on the pipeline path should happen upon validation fails.
   424  	s.results.AssertNotCalled(t, "ByID")
   425  	s.pendingChunks.AssertNotCalled(t, "Remove")
   426  }
   427  
   428  // TestChunkResponse_MissingStatus evaluates that if the fetcher engine receives a chunk data pack response for which
   429  // it does not have any pending status, it drops it immediately and does not proceed handling pipeline.
   430  // Receiving such chunk data response can happen in the following scenarios:
   431  // - After requesting it to the network, requester informs fetcher engine that the chunk belongs to a sealed block.
   432  // - More than one copy of the same response arrive at different time intervals, while the first copy has been handled.
   433  func TestChunkResponse_MissingStatus(t *testing.T) {
   434  	s := setupTest()
   435  	e := newFetcherEngine(s)
   436  
   437  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   438  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   439  	// also the chunk belongs to an unsealed block.
   440  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   441  	status := statuses[0]
   442  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   443  
   444  	chunkLocatorID := statuses[0].ChunkLocatorID()
   445  
   446  	// mocks there is no pending status for this chunk at fetcher engine.
   447  	s.pendingChunks.On("Get", status.ChunkIndex, result.ID()).Return(nil, false)
   448  
   449  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(responses))
   450  	e.HandleChunkDataPack(unittest.IdentifierFixture(), responses[chunkLocatorID])
   451  
   452  	mock.AssertExpectationsForObjects(t, s.pendingChunks, s.metrics)
   453  
   454  	// no verifiable chunk should be passed to verifier engine
   455  	// and chunk consumer should not get any notification
   456  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   457  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   458  
   459  	// none of the subsequent calls on the pipeline path should happen.
   460  	s.results.AssertNotCalled(t, "ByID")
   461  	s.blocks.AssertNotCalled(t, "ByID")
   462  	s.pendingChunks.AssertNotCalled(t, "Remove")
   463  	s.state.AssertNotCalled(t, "AtBlockID")
   464  }
   465  
   466  // TestSkipChunkOfSealedBlock evaluates that if fetcher engine receives a chunk belonging to a sealed block,
   467  // it drops it without processing it any further and and notifies consumer
   468  // that it is done with processing that chunk.
   469  func TestSkipChunkOfSealedBlock(t *testing.T) {
   470  	s := setupTest()
   471  	e := newFetcherEngine(s)
   472  
   473  	// creates a single chunk locator, and mocks its corresponding block sealed.
   474  	block := unittest.BlockFixture()
   475  	result := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(block.ID()))
   476  	statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, 1)
   477  	locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses)
   478  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Once()
   479  
   480  	mockBlockSealingStatus(s.state, s.headers, block.Header, true)
   481  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   482  
   483  	// expects processing notifier being invoked upon sealed chunk detected,
   484  	// which means the termination of processing a sealed chunk on fetcher engine
   485  	// side.
   486  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   487  
   488  	for _, locator := range locators {
   489  		e.ProcessAssignedChunk(locator)
   490  	}
   491  
   492  	mock.AssertExpectationsForObjects(t, s.results, s.metrics)
   493  	// we should not request a duplicate chunk status.
   494  	s.requester.AssertNotCalled(t, "Request")
   495  	// we should not try adding a chunk of a sealed block to chunk status mempool.
   496  	s.pendingChunks.AssertNotCalled(t, "Add")
   497  }
   498  
   499  // TestSkipChunkOfSealedBlock evaluates that if fetcher engine receives a chunk belonging to a sealed block,
   500  // it drops it without processing it any further and notifies consumer
   501  // that it is done with processing that chunk.
   502  func TestStopAtHeight(t *testing.T) {
   503  	s := setupTest()
   504  
   505  	headerA := unittest.BlockHeaderFixture()
   506  	headerB := unittest.BlockHeaderWithParentFixture(headerA)
   507  
   508  	fmt.Printf("A = %d B = %d\n", headerA.Height, headerB.Height)
   509  
   510  	// stop at blockB, meaning it blockA will be last to verify
   511  	e := newFetcherEngineWithStop(s, headerB.Height)
   512  
   513  	resultA := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(headerA.ID()))
   514  	resultB := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(headerB.ID()))
   515  
   516  	//statusesA := unittest.ChunkStatusListFixture(t, headerA.Height, resultA, 1)
   517  	//locatorsA := unittest.ChunkStatusListToChunkLocatorFixture(statusesA)
   518  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return()
   519  
   520  	mockBlockSealingStatus(s.state, s.headers, headerA, false)
   521  	mockBlockSealingStatus(s.state, s.headers, headerB, false)
   522  	mockResultsByIDs(s.results, []*flow.ExecutionResult{resultA, resultB})
   523  
   524  	locatorA := chunks.Locator{
   525  		ResultID: resultA.ID(),
   526  		Index:    0,
   527  	}
   528  	locatorB := chunks.Locator{
   529  		ResultID: resultB.ID(),
   530  		Index:    0,
   531  	}
   532  
   533  	// expects processing notifier being invoked upon sealed chunk detected,
   534  	// which means the termination of processing a sealed chunk on fetcher engine
   535  	// side.
   536  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs([]flow.Entity{locatorA, locatorB}))
   537  
   538  	s.pendingChunks.On("Add", mock.Anything).Run(func(args mock.Arguments) {
   539  		spew.Dump(args[0].(*verification.ChunkStatus).BlockHeight)
   540  	}).Return(false)
   541  
   542  	e.ProcessAssignedChunk(&locatorA)
   543  	e.ProcessAssignedChunk(&locatorB)
   544  
   545  	mock.AssertExpectationsForObjects(t, s.results, s.metrics)
   546  
   547  	// we should not request a duplicate chunk status.
   548  	s.requester.AssertNotCalled(t, "Request")
   549  
   550  	s.pendingChunks.AssertNotCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool {
   551  		return status.BlockHeight == headerB.Height
   552  	}))
   553  
   554  	s.pendingChunks.AssertCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool {
   555  		return status.BlockHeight == headerA.Height
   556  	}))
   557  }
   558  
   559  // mockResultsByIDs mocks the results storage for affirmative querying of result IDs.
   560  // Each result should be queried by the specified number of times.
   561  func mockResultsByIDs(results *storage.ExecutionResults, list []*flow.ExecutionResult) {
   562  	for _, result := range list {
   563  		results.On("ByID", result.ID()).Return(result, nil)
   564  	}
   565  }
   566  
   567  // mockReceiptsBlockID is a test helper that mocks the execution receipts mempool on ByBlockID method
   568  // that returns two list of receipts for given block ID.
   569  // First set of receipts are agree receipts, that have the same result ID as the given result.
   570  // Second set of receipts are disagree receipts, that have a different result ID as the given result.
   571  //
   572  // It also returns the list of distinct executor node identities for all those receipts.
   573  func mockReceiptsBlockID(t *testing.T,
   574  	blockID flow.Identifier,
   575  	receipts *storage.ExecutionReceipts,
   576  	result *flow.ExecutionResult,
   577  	agrees int,
   578  	disagrees int) (flow.ExecutionReceiptList, flow.ExecutionReceiptList, flow.IdentityList, flow.IdentityList) {
   579  
   580  	agreeReceipts := flow.ExecutionReceiptList{}
   581  	disagreeReceipts := flow.ExecutionReceiptList{}
   582  	agreeExecutors := flow.IdentityList{}
   583  	disagreeExecutors := flow.IdentityList{}
   584  
   585  	for i := 0; i < agrees; i++ {
   586  		receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(result))
   587  		require.NotContains(t, agreeExecutors.NodeIDs(), receipt.ExecutorID) // should not have duplicate executors
   588  		agreeExecutors = append(agreeExecutors, unittest.IdentityFixture(
   589  			unittest.WithRole(flow.RoleExecution),
   590  			unittest.WithNodeID(receipt.ExecutorID)))
   591  		agreeReceipts = append(agreeReceipts, receipt)
   592  	}
   593  
   594  	for i := 0; i < disagrees; i++ {
   595  		disagreeResult := unittest.ExecutionResultFixture()
   596  		require.NotEqual(t, disagreeResult.ID(), result.ID())
   597  
   598  		receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(disagreeResult))
   599  		require.NotContains(t, agreeExecutors.NodeIDs(), receipt.ExecutorID)    // should not have an executor in both lists
   600  		require.NotContains(t, disagreeExecutors.NodeIDs(), receipt.ExecutorID) // should not have duplicate executors
   601  		disagreeExecutors = append(disagreeExecutors, unittest.IdentityFixture(
   602  			unittest.WithRole(flow.RoleExecution),
   603  			unittest.WithNodeID(receipt.ExecutorID)))
   604  		disagreeReceipts = append(disagreeReceipts, receipt)
   605  	}
   606  
   607  	all := append(agreeReceipts, disagreeReceipts...)
   608  
   609  	receipts.On("ByBlockID", blockID).Return(all, nil)
   610  	return agreeReceipts, disagreeReceipts, agreeExecutors, disagreeExecutors
   611  }
   612  
   613  // mockStateAtBlockIDForIdentities is a test helper that mocks state at the block ID with the given execution nodes identities.
   614  func mockStateAtBlockIDForIdentities(state *protocol.State, blockID flow.Identifier, participants flow.IdentityList) {
   615  	snapshot := &protocol.Snapshot{}
   616  	state.On("AtBlockID", blockID).Return(snapshot)
   617  	snapshot.On("Identities", mock.Anything).Return(participants, nil)
   618  	for _, id := range participants {
   619  		snapshot.On("Identity", id.NodeID).Return(id, nil)
   620  	}
   621  }
   622  
   623  // mockStateAtBlockIDForMissingIdentities is a test helper that mocks state at the block ID with the given execution nodes identities as
   624  // as missing ones, i.e., they are not part of the state.
   625  func mockStateAtBlockIDForMissingIdentities(state *protocol.State, blockID flow.Identifier, participants flow.IdentityList) {
   626  	snapshot := &protocol.Snapshot{}
   627  	state.On("AtBlockID", blockID).Return(snapshot)
   628  	for _, id := range participants {
   629  		snapshot.On("Identity", id.NodeID).Return(nil, flowprotocol.IdentityNotFoundError{NodeID: id.NodeID})
   630  	}
   631  }
   632  
   633  // mockPendingChunksAdd mocks the add method of pending chunks for expecting only the specified list of chunk statuses.
   634  // Each chunk status should be added only once.
   635  // It should return the specified added boolean variable as the result of mocking.
   636  func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, added bool) {
   637  	mu := &sync.Mutex{}
   638  
   639  	pendingChunks.On("Add", mock.Anything).
   640  		Run(func(args mock.Arguments) {
   641  			// to provide mutual exclusion under concurrent invocations.
   642  			mu.Lock()
   643  			defer mu.Unlock()
   644  
   645  			actual, ok := args[0].(*verification.ChunkStatus)
   646  			require.True(t, ok)
   647  
   648  			// there should be a matching chunk status with the received one.
   649  			actualLocatorID := actual.ChunkLocatorID()
   650  
   651  			for _, expected := range list {
   652  				expectedLocatorID := expected.ChunkLocatorID()
   653  				if expectedLocatorID == actualLocatorID {
   654  					require.Equal(t, expected.ExecutionResult, actual.ExecutionResult)
   655  					return
   656  				}
   657  			}
   658  
   659  			require.Fail(t, "tried adding an unexpected chunk status to mempool")
   660  		}).Return(added).Times(len(list))
   661  }
   662  
   663  // mockPendingChunksRemove mocks the remove method of pending chunks for expecting only the specified list of chunk statuses.
   664  // Each chunk status should be removed only once.
   665  // It should return the specified added boolean variable as the result of mocking.
   666  func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, removed bool) {
   667  	mu := &sync.Mutex{}
   668  
   669  	pendingChunks.On("Remove", mock.Anything, mock.Anything).
   670  		Run(func(args mock.Arguments) {
   671  			// to provide mutual exclusion under concurrent invocations.
   672  			mu.Lock()
   673  			defer mu.Unlock()
   674  
   675  			actualIndex, ok := args[0].(uint64)
   676  			require.True(t, ok)
   677  
   678  			actualResultID, ok := args[1].(flow.Identifier)
   679  			require.True(t, ok)
   680  
   681  			// there should be a matching chunk status with the received one.
   682  			for _, expected := range list {
   683  				if expected.ChunkIndex == actualIndex && expected.ExecutionResult.ID() == actualResultID {
   684  					return
   685  				}
   686  			}
   687  
   688  			require.Fail(t, "tried removing an unexpected chunk status to mempool")
   689  		}).Return(removed).Times(len(list))
   690  }
   691  
   692  // mockPendingChunksGet mocks the Get method of pending chunks for expecting only the specified list of chunk statuses.
   693  func mockPendingChunksGet(pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus) {
   694  	mu := &sync.Mutex{}
   695  
   696  	pendingChunks.On("Get", mock.Anything, mock.Anything).Return(
   697  		func(chunkIndex uint64, resultID flow.Identifier) *verification.ChunkStatus {
   698  			// to provide mutual exclusion under concurrent invocations.
   699  			mu.Lock()
   700  			defer mu.Unlock()
   701  
   702  			for _, expected := range list {
   703  				if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID {
   704  					return expected
   705  				}
   706  			}
   707  			return nil
   708  		},
   709  		func(chunkIndex uint64, resultID flow.Identifier) bool {
   710  			for _, expected := range list {
   711  				if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID {
   712  					return true
   713  				}
   714  			}
   715  			return false
   716  		})
   717  }
   718  
   719  // mockVerifierEngine mocks verifier engine to expect receiving a matching chunk data pack with specified input.
   720  // Each chunk data pack should be passed only once.
   721  func mockVerifierEngine(t *testing.T,
   722  	verifier *mocknetwork.Engine,
   723  	verifiableChunks map[flow.Identifier]*verification.VerifiableChunkData) *sync.WaitGroup {
   724  	mu := sync.Mutex{}
   725  	wg := &sync.WaitGroup{}
   726  	wg.Add(len(verifiableChunks))
   727  
   728  	seen := make(map[flow.Identifier]struct{})
   729  
   730  	verifier.On("ProcessLocal", mock.Anything).
   731  		Run(func(args mock.Arguments) {
   732  			mu.Lock()
   733  			defer mu.Unlock()
   734  
   735  			vc, ok := args[0].(*verification.VerifiableChunkData)
   736  			require.True(t, ok)
   737  
   738  			// verifiable chunk data should be distinct.
   739  			_, ok = seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)]
   740  			require.False(t, ok, "duplicated verifiable chunk received")
   741  			seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)] = struct{}{}
   742  
   743  			// we should expect this verifiable chunk and its fields should match our expectation
   744  			expected, ok := verifiableChunks[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)]
   745  			require.True(t, ok, "verifier engine received an unknown verifiable chunk data")
   746  
   747  			if vc.IsSystemChunk {
   748  				// system chunk has an nil collection.
   749  				require.Nil(t, vc.ChunkDataPack.Collection)
   750  			} else {
   751  				// non-system chunk has a non-nil collection.
   752  				require.NotNil(t, vc.ChunkDataPack.Collection)
   753  				require.Equal(t, expected.ChunkDataPack.Collection.ID(), vc.ChunkDataPack.Collection.ID())
   754  			}
   755  
   756  			require.Equal(t, *expected.ChunkDataPack, *vc.ChunkDataPack)
   757  			require.Equal(t, expected.Result.ID(), vc.Result.ID())
   758  			require.Equal(t, expected.Header.ID(), vc.Header.ID())
   759  
   760  			isSystemChunk := fetcher.IsSystemChunk(vc.Chunk.Index, vc.Result)
   761  			require.Equal(t, isSystemChunk, vc.IsSystemChunk)
   762  
   763  			endState, err := fetcher.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk)
   764  			require.NoError(t, err)
   765  
   766  			require.Equal(t, endState, vc.EndState)
   767  			wg.Done()
   768  		}).Return(nil)
   769  
   770  	return wg
   771  }
   772  
   773  // mockChunkConsumerNotifier mocks the notify method of processing notifier to be notified exactly once per
   774  // given chunk IDs.
   775  func mockChunkConsumerNotifier(t *testing.T, notifier *module.ProcessingNotifier, locatorIDs flow.IdentifierList) {
   776  	mu := &sync.Mutex{}
   777  	seen := make(map[flow.Identifier]struct{})
   778  	notifier.On("Notify", mock.Anything).Run(func(args mock.Arguments) {
   779  		// to provide mutual exclusion under concurrent invocations.
   780  		mu.Lock()
   781  		defer mu.Unlock()
   782  
   783  		locatorID, ok := args[0].(flow.Identifier)
   784  		require.True(t, ok)
   785  		require.Contains(t, locatorIDs, locatorID, "tried calling notifier on an unexpected locator ID")
   786  
   787  		// each chunk should be notified once
   788  		_, ok = seen[locatorID]
   789  		require.False(t, ok)
   790  		seen[locatorID] = struct{}{}
   791  
   792  	}).Return().Times(len(locatorIDs))
   793  }
   794  
   795  // mockBlockSealingStatus mocks protocol state sealing status at height of given block.
   796  func mockBlockSealingStatus(state *protocol.State, headers *storage.Headers, header *flow.Header, sealed bool) {
   797  	headers.On("ByBlockID", header.ID()).Return(header, nil)
   798  	if sealed {
   799  		vertestutils.MockLastSealedHeight(state, header.Height+1)
   800  	} else {
   801  		vertestutils.MockLastSealedHeight(state, header.Height-1)
   802  	}
   803  }
   804  
   805  // mockBlocksStorage mocks blocks and headers storages for given block.
   806  func mockBlocksStorage(blocks *storage.Blocks, headers *storage.Headers, block *flow.Block) {
   807  	blockID := block.ID()
   808  	blocks.On("ByID", blockID).Return(block, nil)
   809  	headers.On("ByBlockID", blockID).Return(block.Header, nil)
   810  }
   811  
   812  // mockRequester mocks the chunk data pack requester with the given chunk data pack requests.
   813  // Each chunk should be requested exactly once.
   814  // On reply, it invokes the handler function with the given collection and chunk data pack for the chunk ID.
   815  func mockRequester(t *testing.T,
   816  	requester *mockfetcher.ChunkDataPackRequester,
   817  	requests map[flow.Identifier]*verification.ChunkDataPackRequest,
   818  	responses map[flow.Identifier]*verification.ChunkDataPackResponse,
   819  	handler func(flow.Identifier, *verification.ChunkDataPackResponse)) *sync.WaitGroup {
   820  
   821  	mu := sync.Mutex{}
   822  	wg := &sync.WaitGroup{}
   823  	wg.Add(len(requests))
   824  	requester.On("Request", mock.Anything).
   825  		Run(func(args mock.Arguments) {
   826  			mu.Lock()
   827  			defer mu.Unlock()
   828  
   829  			actualRequest, ok := args[0].(*verification.ChunkDataPackRequest)
   830  			require.True(t, ok)
   831  
   832  			expectedRequest, ok := requests[actualRequest.ID()]
   833  			require.True(t, ok, "requester received an unexpected chunk request")
   834  
   835  			require.Equal(t, expectedRequest.Locator, actualRequest.Locator)
   836  			require.Equal(t, expectedRequest.ChunkID, actualRequest.ChunkID)
   837  			require.Equal(t, expectedRequest.Agrees, actualRequest.Agrees)
   838  			require.Equal(t, expectedRequest.Disagrees, actualRequest.Disagrees)
   839  			require.ElementsMatch(t, expectedRequest.Targets, actualRequest.Targets)
   840  
   841  			go func() {
   842  				response, ok := responses[actualRequest.ID()]
   843  				require.True(t, ok)
   844  
   845  				handler(actualRequest.Agrees[0], response)
   846  				wg.Done()
   847  			}()
   848  		}).Return()
   849  
   850  	return wg
   851  }
   852  
   853  // chunkDataPackResponsesFixture creates chunk data pack responses for given chunks.
   854  func chunkDataPackResponsesFixture(t *testing.T,
   855  	statuses verification.ChunkStatusList,
   856  	collMap map[flow.Identifier]*flow.Collection,
   857  	result *flow.ExecutionResult,
   858  ) map[flow.Identifier]*verification.ChunkDataPackResponse {
   859  	responses := make(map[flow.Identifier]*verification.ChunkDataPackResponse)
   860  
   861  	for _, status := range statuses {
   862  		chunkLocatorID := status.ChunkLocatorID()
   863  		responses[chunkLocatorID] = chunkDataPackResponseFixture(t, status.Chunk(), collMap[status.Chunk().ID()], result)
   864  	}
   865  
   866  	return responses
   867  }
   868  
   869  // chunkDataPackResponseFixture creates a chunk data pack response for given input.
   870  func chunkDataPackResponseFixture(t *testing.T,
   871  	chunk *flow.Chunk,
   872  	collection *flow.Collection,
   873  	result *flow.ExecutionResult) *verification.ChunkDataPackResponse {
   874  
   875  	require.Equal(t, collection != nil, !fetcher.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection")
   876  
   877  	return &verification.ChunkDataPackResponse{
   878  		Locator: chunks.Locator{
   879  			ResultID: result.ID(),
   880  			Index:    chunk.Index,
   881  		},
   882  		Cdp: unittest.ChunkDataPackFixture(chunk.ID(),
   883  			unittest.WithStartState(chunk.StartState),
   884  			unittest.WithChunkDataPackCollection(collection)),
   885  	}
   886  }
   887  
   888  // verifiableChunksFixture is a test helper that creates verifiable chunks and chunk data responses.
   889  func verifiableChunksFixture(t *testing.T,
   890  	statuses verification.ChunkStatusList,
   891  	block *flow.Block,
   892  	result *flow.ExecutionResult,
   893  	collMap map[flow.Identifier]*flow.Collection) (
   894  	map[flow.Identifier]*verification.ChunkDataPackResponse,
   895  	map[flow.Identifier]*verification.VerifiableChunkData) {
   896  
   897  	responses := chunkDataPackResponsesFixture(t, statuses, collMap, result)
   898  
   899  	verifiableChunks := make(map[flow.Identifier]*verification.VerifiableChunkData)
   900  	for _, status := range statuses {
   901  		chunkLocatorID := status.ChunkLocatorID()
   902  
   903  		response, ok := responses[chunkLocatorID]
   904  		require.True(t, ok, "missing chunk data response")
   905  
   906  		verifiableChunks[chunkLocatorID] = verifiableChunkFixture(t, status.Chunk(), block, status.ExecutionResult, response.Cdp)
   907  	}
   908  
   909  	return responses, verifiableChunks
   910  }
   911  
   912  // verifiableChunksFixture is a test helper that creates verifiable chunks, chunk data packs,
   913  // and collection fixtures for the given chunks list.
   914  func verifiableChunkFixture(t *testing.T,
   915  	chunk *flow.Chunk,
   916  	block *flow.Block,
   917  	result *flow.ExecutionResult,
   918  	chunkDataPack *flow.ChunkDataPack) *verification.VerifiableChunkData {
   919  
   920  	offsetForChunk, err := fetcher.TransactionOffsetForChunk(result.Chunks, chunk.Index)
   921  	require.NoError(t, err)
   922  
   923  	// TODO: add end state
   924  	return &verification.VerifiableChunkData{
   925  		Chunk:             chunk,
   926  		Header:            block.Header,
   927  		Result:            result,
   928  		ChunkDataPack:     chunkDataPack,
   929  		TransactionOffset: offsetForChunk,
   930  	}
   931  }
   932  
   933  // chunkRequestsFixture is a test helper creates and returns chunk data pack requests for given result and chunk statuses.
   934  // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result
   935  // with the execution result that chunks belong to, respectively.
   936  func chunkRequestsFixture(
   937  	resultID flow.Identifier,
   938  	statuses verification.ChunkStatusList,
   939  	agrees flow.IdentityList,
   940  	disagrees flow.IdentityList) map[flow.Identifier]*verification.ChunkDataPackRequest {
   941  
   942  	requests := make(map[flow.Identifier]*verification.ChunkDataPackRequest)
   943  	for _, status := range statuses {
   944  		chunkLocatorID := status.ChunkLocatorID()
   945  		requests[chunkLocatorID] = chunkRequestFixture(resultID, status, agrees, disagrees)
   946  	}
   947  
   948  	return requests
   949  }
   950  
   951  // chunkRequestFixture creates and returns a chunk request for given result and chunk status.
   952  //
   953  // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result
   954  // with the execution result that chunks belong to, respectively.
   955  func chunkRequestFixture(resultID flow.Identifier,
   956  	status *verification.ChunkStatus,
   957  	agrees flow.IdentityList,
   958  	disagrees flow.IdentityList) *verification.ChunkDataPackRequest {
   959  
   960  	return &verification.ChunkDataPackRequest{
   961  		Locator: chunks.Locator{
   962  			ResultID: resultID,
   963  			Index:    status.ChunkIndex,
   964  		},
   965  		ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{
   966  			ChunkID:   status.Chunk().ID(),
   967  			Height:    status.BlockHeight,
   968  			Agrees:    agrees.NodeIDs(),
   969  			Disagrees: disagrees.NodeIDs(),
   970  			Targets:   agrees.Union(disagrees),
   971  		},
   972  	}
   973  }
   974  
   975  // completeChunkStatusListFixture creates a reference block with an execution result associated with it.
   976  // The result has specified number of chunks, which a random subset them are assumed assigned to fetcher engine,
   977  // and hence have chunk status associated with them, i.e., `statusCount` of them.
   978  //
   979  // It returns the block, result, assigned chunk statuses, their corresponding locators, and a map between chunks to their collections.
   980  func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount int) (*flow.Block,
   981  	*flow.ExecutionResult,
   982  	verification.ChunkStatusList,
   983  	chunks.LocatorMap,
   984  	map[flow.Identifier]*flow.Collection) {
   985  	require.LessOrEqual(t, statusCount, chunkCount)
   986  
   987  	// keeps collections of assigned chunks
   988  	collMap := make(map[flow.Identifier]*flow.Collection)
   989  
   990  	collections := unittest.CollectionListFixture(chunkCount)
   991  
   992  	block := unittest.BlockWithGuaranteesFixture(
   993  		unittest.CollectionGuaranteesWithCollectionIDFixture(collections),
   994  	)
   995  
   996  	result := unittest.ExecutionResultFixture(
   997  		unittest.WithBlock(block),
   998  		unittest.WithChunks(uint(chunkCount)))
   999  	statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, statusCount)
  1000  	locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses)
  1001  
  1002  	for _, status := range statuses {
  1003  		if fetcher.IsSystemChunk(status.ChunkIndex, result) {
  1004  			// system-chunk should have a nil collection
  1005  			continue
  1006  		}
  1007  		collMap[status.Chunk().ID()] = collections[status.ChunkIndex]
  1008  	}
  1009  
  1010  	return block, result, statuses, locators, collMap
  1011  }
  1012  
  1013  func TestTransactionOffsetForChunk(t *testing.T) {
  1014  	t.Run("first chunk index always returns zero offset", func(t *testing.T) {
  1015  		offsetForChunk, err := fetcher.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0)
  1016  		require.NoError(t, err)
  1017  		assert.Equal(t, uint32(0), offsetForChunk)
  1018  	})
  1019  
  1020  	t.Run("offset is calculated", func(t *testing.T) {
  1021  
  1022  		chunksList := []*flow.Chunk{
  1023  			{
  1024  				ChunkBody: flow.ChunkBody{
  1025  					NumberOfTransactions: 1,
  1026  				},
  1027  			},
  1028  			{
  1029  				ChunkBody: flow.ChunkBody{
  1030  					NumberOfTransactions: 2,
  1031  				},
  1032  			},
  1033  			{
  1034  				ChunkBody: flow.ChunkBody{
  1035  					NumberOfTransactions: 3,
  1036  				},
  1037  			},
  1038  			{
  1039  				ChunkBody: flow.ChunkBody{
  1040  					NumberOfTransactions: 5,
  1041  				},
  1042  			},
  1043  		}
  1044  
  1045  		offsetForChunk, err := fetcher.TransactionOffsetForChunk(chunksList, 0)
  1046  		require.NoError(t, err)
  1047  		assert.Equal(t, uint32(0), offsetForChunk)
  1048  
  1049  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 1)
  1050  		require.NoError(t, err)
  1051  		assert.Equal(t, uint32(1), offsetForChunk)
  1052  
  1053  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 2)
  1054  		require.NoError(t, err)
  1055  		assert.Equal(t, uint32(3), offsetForChunk)
  1056  
  1057  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 3)
  1058  		require.NoError(t, err)
  1059  		assert.Equal(t, uint32(6), offsetForChunk)
  1060  	})
  1061  
  1062  	t.Run("requesting index beyond length triggers error", func(t *testing.T) {
  1063  
  1064  		chunksList := make([]*flow.Chunk, 2)
  1065  
  1066  		_, err := fetcher.TransactionOffsetForChunk(chunksList, 2)
  1067  		require.Error(t, err)
  1068  	})
  1069  }