github.com/koko1123/flow-go-1@v0.29.6/engine/verification/fetcher/engine_test.go (about)

     1  package fetcher_test
     2  
     3  import (
     4  	"fmt"
     5  	"sync"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/davecgh/go-spew/spew"
    10  	"github.com/rs/zerolog"
    11  	"github.com/stretchr/testify/assert"
    12  	"github.com/stretchr/testify/mock"
    13  	"github.com/stretchr/testify/require"
    14  
    15  	"github.com/koko1123/flow-go-1/engine/verification/fetcher"
    16  	mockfetcher "github.com/koko1123/flow-go-1/engine/verification/fetcher/mock"
    17  	vertestutils "github.com/koko1123/flow-go-1/engine/verification/utils/unittest"
    18  	"github.com/koko1123/flow-go-1/model/chunks"
    19  	"github.com/koko1123/flow-go-1/model/flow"
    20  	"github.com/koko1123/flow-go-1/model/verification"
    21  	mempool "github.com/koko1123/flow-go-1/module/mempool/mock"
    22  	module "github.com/koko1123/flow-go-1/module/mock"
    23  	"github.com/koko1123/flow-go-1/module/trace"
    24  	"github.com/koko1123/flow-go-1/network/mocknetwork"
    25  	flowprotocol "github.com/koko1123/flow-go-1/state/protocol"
    26  	protocol "github.com/koko1123/flow-go-1/state/protocol/mock"
    27  	storage "github.com/koko1123/flow-go-1/storage/mock"
    28  	"github.com/koko1123/flow-go-1/utils/unittest"
    29  )
    30  
    31  // FetcherEngineTestSuite encapsulates data structures for running unittests on fetcher engine.
    32  type FetcherEngineTestSuite struct {
    33  	log                   zerolog.Logger
    34  	metrics               *module.VerificationMetrics
    35  	tracer                *trace.NoopTracer
    36  	verifier              *mocknetwork.Engine                 // the verifier engine
    37  	state                 *protocol.State                     // used to verify the request origin
    38  	pendingChunks         *mempool.ChunkStatuses              // used to store all the pending chunks that assigned to this node
    39  	blocks                *storage.Blocks                     // used to for verifying collection ID.
    40  	headers               *storage.Headers                    // used for building verifiable chunk data.
    41  	chunkConsumerNotifier *module.ProcessingNotifier          // to report a chunk has been processed
    42  	results               *storage.ExecutionResults           // to retrieve execution result of an assigned chunk
    43  	receipts              *storage.ExecutionReceipts          // used to find executor of the chunk
    44  	requester             *mockfetcher.ChunkDataPackRequester // used to request chunk data packs from network
    45  }
    46  
    47  // setupTest initiates a test suite prior to each test.
    48  func setupTest() *FetcherEngineTestSuite {
    49  	s := &FetcherEngineTestSuite{
    50  		log:                   unittest.Logger(),
    51  		metrics:               &module.VerificationMetrics{},
    52  		tracer:                trace.NewNoopTracer(),
    53  		verifier:              &mocknetwork.Engine{},
    54  		state:                 &protocol.State{},
    55  		pendingChunks:         &mempool.ChunkStatuses{},
    56  		headers:               &storage.Headers{},
    57  		blocks:                &storage.Blocks{},
    58  		chunkConsumerNotifier: &module.ProcessingNotifier{},
    59  		results:               &storage.ExecutionResults{},
    60  		receipts:              &storage.ExecutionReceipts{},
    61  		requester:             &mockfetcher.ChunkDataPackRequester{},
    62  	}
    63  
    64  	return s
    65  }
    66  
    67  // newFetcherEngineWithStop returns a fetcher engine for testing with stop at height set
    68  func newFetcherEngineWithStop(s *FetcherEngineTestSuite, stopAtHeight uint64) *fetcher.Engine {
    69  	s.requester.On("WithChunkDataPackHandler", mock.AnythingOfType("*fetcher.Engine")).Return()
    70  
    71  	e := fetcher.New(s.log,
    72  		s.metrics,
    73  		s.tracer,
    74  		s.verifier,
    75  		s.state,
    76  		s.pendingChunks,
    77  		s.headers,
    78  		s.blocks,
    79  		s.results,
    80  		s.receipts,
    81  		s.requester,
    82  		stopAtHeight)
    83  
    84  	e.WithChunkConsumerNotifier(s.chunkConsumerNotifier)
    85  	return e
    86  }
    87  
    88  // newFetcherEngine returns a fetcher engine for testing.
    89  func newFetcherEngine(s *FetcherEngineTestSuite) *fetcher.Engine {
    90  	return newFetcherEngineWithStop(s, 0)
    91  }
    92  
    93  func TestProcessAssignedChunkHappyPath(t *testing.T) {
    94  	tt := []struct {
    95  		chunks   int
    96  		assigned int
    97  	}{
    98  		{
    99  			chunks:   1, // single chunk, single assigned
   100  			assigned: 1,
   101  		},
   102  		{
   103  			chunks:   2, // two chunks, single assigned
   104  			assigned: 1,
   105  		},
   106  		{
   107  			chunks:   4, // four chunks, two assigned
   108  			assigned: 2,
   109  		},
   110  		{
   111  			chunks:   10, // ten chunks, five assigned
   112  			assigned: 5,
   113  		},
   114  	}
   115  
   116  	for _, tc := range tt {
   117  		t.Run(fmt.Sprintf("%d-chunk-%d-assigned", tc.chunks, tc.assigned), func(t *testing.T) {
   118  			testProcessAssignChunkHappyPath(t, tc.chunks, tc.assigned)
   119  		})
   120  	}
   121  }
   122  
   123  // testProcessAssignChunkHappyPath evaluates behavior of fetcher engine respect to receiving some assigned chunks,
   124  // it should request the requester a chunk data pack for each chunk.
   125  // Then the test mocks sending a chunk data response for what fetcher engine requested.
   126  // On receiving the response, fetcher engine should validate it and create and pass a verifiable chunk
   127  // to the verifier engine.
   128  // Once the verifier engine returns, the fetcher engine should notify the chunk consumer that it is done with
   129  // this chunk.
   130  func testProcessAssignChunkHappyPath(t *testing.T, chunkNum int, assignedNum int) {
   131  	s := setupTest()
   132  	e := newFetcherEngine(s)
   133  
   134  	// creates a result with specified chunk number and assigned chunk numbers
   135  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   136  	block, result, statuses, locators, collMap := completeChunkStatusListFixture(t, chunkNum, assignedNum)
   137  	_, _, agrees, disagrees := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   138  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators))
   139  
   140  	// the chunks belong to an unsealed block.
   141  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   142  
   143  	// mocks resources on fetcher engine side.
   144  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   145  	mockBlocksStorage(s.blocks, s.headers, block)
   146  	mockPendingChunksAdd(t, s.pendingChunks, statuses, true)
   147  	mockPendingChunksRemove(t, s.pendingChunks, statuses, true)
   148  	mockPendingChunksGet(s.pendingChunks, statuses)
   149  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees))
   150  
   151  	// generates and mocks requesting chunk data pack fixture
   152  	requests := chunkRequestsFixture(result.ID(), statuses, agrees, disagrees)
   153  	chunkDataPacks, verifiableChunks := verifiableChunksFixture(t, statuses, block, result, collMap)
   154  
   155  	// fetcher engine should request chunk data for received (assigned) chunk locators
   156  	s.metrics.On("OnChunkDataPackRequestSentByFetcher").Return().Times(len(requests))
   157  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(chunkDataPacks))
   158  	requesterWg := mockRequester(t, s.requester, requests, chunkDataPacks,
   159  		func(originID flow.Identifier, response *verification.ChunkDataPackResponse) {
   160  
   161  			// mocks replying to the requests by sending a chunk data pack.
   162  			e.HandleChunkDataPack(originID, response)
   163  		})
   164  
   165  	// fetcher engine should create and pass a verifiable chunk to verifier engine upon receiving each
   166  	// chunk data responses, and notify the consumer that it is done with processing chunk.
   167  	s.metrics.On("OnVerifiableChunkSentToVerifier").Return().Times(len(verifiableChunks))
   168  	verifierWG := mockVerifierEngine(t, s.verifier, verifiableChunks)
   169  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   170  
   171  	// passes chunk data requests in parallel.
   172  	processWG := &sync.WaitGroup{}
   173  	processWG.Add(len(locators))
   174  	for _, locator := range locators {
   175  		go func(l *chunks.Locator) {
   176  			e.ProcessAssignedChunk(l)
   177  			processWG.Done()
   178  		}(locator)
   179  	}
   180  
   181  	unittest.RequireReturnsBefore(t, requesterWg.Wait, 1*time.Second, "could not handle received chunk data pack on time")
   182  	unittest.RequireReturnsBefore(t, verifierWG.Wait, 1*time.Second, "could not push verifiable chunk on time")
   183  	unittest.RequireReturnsBefore(t, processWG.Wait, 1*time.Second, "could not process chunks on time")
   184  
   185  	mock.AssertExpectationsForObjects(t, s.results, s.requester, s.pendingChunks, s.chunkConsumerNotifier, s.metrics)
   186  }
   187  
   188  // TestChunkResponse_RemovingStatusFails evaluates behavior of fetcher engine respect to receiving duplicate and concurrent
   189  // chunk data pack responses to the same chunk.
   190  // The deduplication of concurrent chunks happen at the pending chunk statuses mempool, that is protected by a mutex lock.
   191  // So, of any concurrent chunk data pack responses of the same chunk, only one wins the lock and can successfully remove it
   192  // from the mempool, while the others fail.
   193  // If fetcher engine fails on removing a pending chunk status, it means that it has already been processed, and hence,
   194  // it should drop it gracefully, without notifying the verifier or chunk consumer.
   195  func TestChunkResponse_RemovingStatusFails(t *testing.T) {
   196  	s := setupTest()
   197  	e := newFetcherEngine(s)
   198  
   199  	// creates a result with specified 2 chunks and a single assigned chunk to this fetcher engine.
   200  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   201  	_, _, agrees, _ := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   202  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   203  
   204  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   205  	mockBlocksStorage(s.blocks, s.headers, block)
   206  	mockPendingChunksGet(s.pendingChunks, statuses)
   207  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees)
   208  
   209  	chunkLocatorID := statuses[0].ChunkLocatorID()
   210  	// trying to remove the pending status fails.
   211  	mockPendingChunksRemove(t, s.pendingChunks, statuses, false)
   212  
   213  	chunkDataPacks, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   214  
   215  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Once()
   216  	e.HandleChunkDataPack(agrees[0].NodeID, chunkDataPacks[chunkLocatorID])
   217  
   218  	// no verifiable chunk should be passed to verifier engine
   219  	// and chunk consumer should not get any notification
   220  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   221  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   222  	mock.AssertExpectationsForObjects(t, s.requester, s.pendingChunks, s.verifier, s.chunkConsumerNotifier, s.metrics)
   223  }
   224  
   225  // TestProcessAssignChunkSealedAfterRequest evaluates behavior of fetcher engine respect to receiving an assigned chunk
   226  // that its block is getting sealed after requesting it from requester.
   227  // The requester notifies the fetcher back that the block for chunk has been sealed.
   228  // The fetcher engine then should remove chunk request status from memory, and notify the
   229  // chunk consumer that it is done processing this chunk.
   230  func TestProcessAssignChunkSealedAfterRequest(t *testing.T) {
   231  	s := setupTest()
   232  	e := newFetcherEngine(s)
   233  
   234  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   235  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   236  	// also the chunk belongs to an unsealed block.
   237  	block, result, statuses, locators, collMap := completeChunkStatusListFixture(t, 2, 1)
   238  	_, _, agrees, disagrees := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   239  	mockBlockSealingStatus(s.state, s.headers, block.Header, false)
   240  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators))
   241  
   242  	// mocks resources on fetcher engine side.
   243  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   244  	mockPendingChunksAdd(t, s.pendingChunks, statuses, true)
   245  	mockPendingChunksRemove(t, s.pendingChunks, statuses, true)
   246  	mockPendingChunksGet(s.pendingChunks, statuses)
   247  	mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees.Union(disagrees))
   248  
   249  	// generates and mocks requesting chunk data pack fixture
   250  	requests := chunkRequestsFixture(result.ID(), statuses, agrees, disagrees)
   251  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   252  
   253  	// fetcher engine should request chunk data for received (assigned) chunk locators
   254  	// as the response it receives a notification that chunk belongs to a sealed block.
   255  	// we mock this as the block is getting sealed after request dispatch.
   256  	s.metrics.On("OnChunkDataPackRequestSentByFetcher").Return().Times(len(requests))
   257  	requesterWg := mockRequester(t, s.requester, requests, responses, func(originID flow.Identifier,
   258  		response *verification.ChunkDataPackResponse) {
   259  		e.NotifyChunkDataPackSealed(response.Index, response.ResultID)
   260  	})
   261  
   262  	// fetcher engine should notify
   263  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   264  
   265  	// passes chunk data requests in parallel.
   266  	processWG := &sync.WaitGroup{}
   267  	processWG.Add(len(locators))
   268  	for _, locator := range locators {
   269  		go func(l *chunks.Locator) {
   270  			e.ProcessAssignedChunk(l)
   271  			processWG.Done()
   272  		}(locator)
   273  	}
   274  
   275  	unittest.RequireReturnsBefore(t, requesterWg.Wait, time.Second, "could not handle sealed chunks notification on time")
   276  	unittest.RequireReturnsBefore(t, processWG.Wait, 1*time.Second, "could not process chunks on time")
   277  
   278  	mock.AssertExpectationsForObjects(t, s.requester, s.pendingChunks, s.chunkConsumerNotifier, s.metrics)
   279  	// no verifiable chunk should be passed to verifier engine
   280  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   281  }
   282  
   283  // TestChunkResponse_InvalidChunkDataPack evaluates unhappy path of receiving an invalid chunk data response.
   284  // A chunk data response is invalid if its integrity is violated. We consider collection id, chunk id, and start state,
   285  // as the necessary conditions for chunk data integrity.
   286  func TestChunkResponse_InvalidChunkDataPack(t *testing.T) {
   287  	tt := []struct {
   288  		alterChunkDataResponse func(*flow.ChunkDataPack)
   289  		mockStateFunc          func(flow.Identity, *protocol.State, flow.Identifier) // mocks state at block identifier for the given identity.
   290  		msg                    string
   291  	}{
   292  		{
   293  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   294  				// re-writes collection with a random one that is different than original collection ID
   295  				// in block's guarantee.
   296  				txBody := unittest.TransactionBodyFixture()
   297  				cdp.Collection.Transactions = []*flow.TransactionBody{
   298  					&txBody,
   299  				}
   300  			},
   301  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   302  				// mocks a valid execution node as originID
   303  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   304  			},
   305  			msg: "conflicting-collection-with-blocks-storage",
   306  		},
   307  		{
   308  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   309  				cdp.ChunkID = unittest.IdentifierFixture()
   310  			},
   311  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   312  				// mocks a valid execution node as originID
   313  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   314  			},
   315  			msg: "invalid-chunk-ID",
   316  		},
   317  		{
   318  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   319  				cdp.StartState = unittest.StateCommitmentFixture()
   320  			},
   321  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   322  				// mocks a valid execution node as originID
   323  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   324  			},
   325  			msg: "invalid-start-state",
   326  		},
   327  		{
   328  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   329  				// we don't alter chunk data pack content
   330  			},
   331  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   332  				mockStateAtBlockIDForMissingIdentities(state, blockID, flow.IdentityList{&identity})
   333  			},
   334  			msg: "invalid-origin-id",
   335  		},
   336  		{
   337  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   338  				// we don't alter chunk data pack content
   339  			},
   340  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   341  				identity.Weight = 0
   342  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   343  			},
   344  			msg: "zero-weight-origin-id",
   345  		},
   346  		{
   347  			alterChunkDataResponse: func(cdp *flow.ChunkDataPack) {
   348  				// we don't alter chunk data pack content
   349  			},
   350  			mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) {
   351  				identity.Role = flow.RoleVerification
   352  				mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity})
   353  			},
   354  			msg: "invalid-origin-role",
   355  		},
   356  	}
   357  
   358  	for _, tc := range tt {
   359  		t.Run(tc.msg, func(t *testing.T) {
   360  			testInvalidChunkDataResponse(t, tc.alterChunkDataResponse, tc.mockStateFunc)
   361  		})
   362  	}
   363  }
   364  
   365  // testInvalidChunkDataResponse evaluates the unhappy path of receiving
   366  // an invalid chunk data response for an already requested chunk.
   367  // The invalid response should be dropped without any further action. Particularly, the
   368  // notifier and verifier engine should not be called as the result of handling an invalid chunk.
   369  //
   370  // The input alter function alters the chunk data response to break its integrity.
   371  func testInvalidChunkDataResponse(t *testing.T,
   372  	alterChunkDataResponse func(*flow.ChunkDataPack),
   373  	mockStateFunc func(flow.Identity, *protocol.State, flow.Identifier)) {
   374  	s := setupTest()
   375  	e := newFetcherEngine(s)
   376  
   377  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   378  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   379  	// also the chunk belongs to an unsealed block.
   380  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   381  	_, _, agrees, _ := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2)
   382  
   383  	// mocks resources on fetcher engine side.
   384  	mockPendingChunksGet(s.pendingChunks, statuses)
   385  	mockBlocksStorage(s.blocks, s.headers, block)
   386  
   387  	chunkLocatorID := statuses[0].ChunkLocatorID()
   388  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   389  
   390  	// alters chunk data pack so that it become invalid.
   391  	alterChunkDataResponse(responses[chunkLocatorID].Cdp)
   392  	mockStateFunc(*agrees[0], s.state, block.ID())
   393  
   394  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(responses))
   395  	e.HandleChunkDataPack(agrees[0].NodeID, responses[chunkLocatorID])
   396  
   397  	mock.AssertExpectationsForObjects(t, s.pendingChunks, s.metrics)
   398  	// no verifiable chunk should be passed to verifier engine
   399  	// and chunk consumer should not get any notification
   400  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   401  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   402  
   403  	// none of the subsequent calls on the pipeline path should happen upon validation fails.
   404  	s.results.AssertNotCalled(t, "ByID")
   405  	s.pendingChunks.AssertNotCalled(t, "Remove")
   406  }
   407  
   408  // TestChunkResponse_MissingStatus evaluates that if the fetcher engine receives a chunk data pack response for which
   409  // it does not have any pending status, it drops it immediately and does not proceed handling pipeline.
   410  // Receiving such chunk data response can happen in the following scenarios:
   411  // - After requesting it to the network, requester informs fetcher engine that the chunk belongs to a sealed block.
   412  // - More than one copy of the same response arrive at different time intervals, while the first copy has been handled.
   413  func TestChunkResponse_MissingStatus(t *testing.T) {
   414  	s := setupTest()
   415  	e := newFetcherEngine(s)
   416  
   417  	// creates a result with 2 chunks, which one of those chunks is assigned to this fetcher engine
   418  	// also, the result has been created by two execution nodes, while the rest two have a conflicting result with it.
   419  	// also the chunk belongs to an unsealed block.
   420  	block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1)
   421  	status := statuses[0]
   422  	responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap)
   423  
   424  	chunkLocatorID := statuses[0].ChunkLocatorID()
   425  
   426  	// mocks there is no pending status for this chunk at fetcher engine.
   427  	s.pendingChunks.On("Get", status.ChunkIndex, result.ID()).Return(nil, false)
   428  
   429  	s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(responses))
   430  	e.HandleChunkDataPack(unittest.IdentifierFixture(), responses[chunkLocatorID])
   431  
   432  	mock.AssertExpectationsForObjects(t, s.pendingChunks, s.metrics)
   433  
   434  	// no verifiable chunk should be passed to verifier engine
   435  	// and chunk consumer should not get any notification
   436  	s.chunkConsumerNotifier.AssertNotCalled(t, "Notify")
   437  	s.verifier.AssertNotCalled(t, "ProcessLocal")
   438  
   439  	// none of the subsequent calls on the pipeline path should happen.
   440  	s.results.AssertNotCalled(t, "ByID")
   441  	s.blocks.AssertNotCalled(t, "ByID")
   442  	s.pendingChunks.AssertNotCalled(t, "Remove")
   443  	s.state.AssertNotCalled(t, "AtBlockID")
   444  }
   445  
   446  // TestSkipChunkOfSealedBlock evaluates that if fetcher engine receives a chunk belonging to a sealed block,
   447  // it drops it without processing it any further and and notifies consumer
   448  // that it is done with processing that chunk.
   449  func TestSkipChunkOfSealedBlock(t *testing.T) {
   450  	s := setupTest()
   451  	e := newFetcherEngine(s)
   452  
   453  	// creates a single chunk locator, and mocks its corresponding block sealed.
   454  	block := unittest.BlockFixture()
   455  	result := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(block.ID()))
   456  	statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, 1)
   457  	locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses)
   458  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Once()
   459  
   460  	mockBlockSealingStatus(s.state, s.headers, block.Header, true)
   461  	mockResultsByIDs(s.results, []*flow.ExecutionResult{result})
   462  
   463  	// expects processing notifier being invoked upon sealed chunk detected,
   464  	// which means the termination of processing a sealed chunk on fetcher engine
   465  	// side.
   466  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs(locators.ToList()))
   467  
   468  	for _, locator := range locators {
   469  		e.ProcessAssignedChunk(locator)
   470  	}
   471  
   472  	mock.AssertExpectationsForObjects(t, s.results, s.metrics)
   473  	// we should not request a duplicate chunk status.
   474  	s.requester.AssertNotCalled(t, "Request")
   475  	// we should not try adding a chunk of a sealed block to chunk status mempool.
   476  	s.pendingChunks.AssertNotCalled(t, "Add")
   477  }
   478  
   479  // TestSkipChunkOfSealedBlock evaluates that if fetcher engine receives a chunk belonging to a sealed block,
   480  // it drops it without processing it any further and notifies consumer
   481  // that it is done with processing that chunk.
   482  func TestStopAtHeight(t *testing.T) {
   483  	s := setupTest()
   484  
   485  	headerA := unittest.BlockHeaderFixture()
   486  	headerB := unittest.BlockHeaderWithParentFixture(headerA)
   487  
   488  	fmt.Printf("A = %d B = %d\n", headerA.Height, headerB.Height)
   489  
   490  	// stop at blockB, meaning it blockA will be last to verify
   491  	e := newFetcherEngineWithStop(s, headerB.Height)
   492  
   493  	resultA := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(headerA.ID()))
   494  	resultB := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(headerB.ID()))
   495  
   496  	//statusesA := unittest.ChunkStatusListFixture(t, headerA.Height, resultA, 1)
   497  	//locatorsA := unittest.ChunkStatusListToChunkLocatorFixture(statusesA)
   498  	s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return()
   499  
   500  	mockBlockSealingStatus(s.state, s.headers, headerA, false)
   501  	mockBlockSealingStatus(s.state, s.headers, headerB, false)
   502  	mockResultsByIDs(s.results, []*flow.ExecutionResult{resultA, resultB})
   503  
   504  	locatorA := chunks.Locator{
   505  		ResultID: resultA.ID(),
   506  		Index:    0,
   507  	}
   508  	locatorB := chunks.Locator{
   509  		ResultID: resultB.ID(),
   510  		Index:    0,
   511  	}
   512  
   513  	// expects processing notifier being invoked upon sealed chunk detected,
   514  	// which means the termination of processing a sealed chunk on fetcher engine
   515  	// side.
   516  	mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs([]flow.Entity{locatorA, locatorB}))
   517  
   518  	s.pendingChunks.On("Add", mock.Anything).Run(func(args mock.Arguments) {
   519  		spew.Dump(args[0].(*verification.ChunkStatus).BlockHeight)
   520  	}).Return(false)
   521  
   522  	e.ProcessAssignedChunk(&locatorA)
   523  	e.ProcessAssignedChunk(&locatorB)
   524  
   525  	mock.AssertExpectationsForObjects(t, s.results, s.metrics)
   526  
   527  	// we should not request a duplicate chunk status.
   528  	s.requester.AssertNotCalled(t, "Request")
   529  
   530  	s.pendingChunks.AssertNotCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool {
   531  		return status.BlockHeight == headerB.Height
   532  	}))
   533  
   534  	s.pendingChunks.AssertCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool {
   535  		return status.BlockHeight == headerA.Height
   536  	}))
   537  }
   538  
   539  // mockResultsByIDs mocks the results storage for affirmative querying of result IDs.
   540  // Each result should be queried by the specified number of times.
   541  func mockResultsByIDs(results *storage.ExecutionResults, list []*flow.ExecutionResult) {
   542  	for _, result := range list {
   543  		results.On("ByID", result.ID()).Return(result, nil)
   544  	}
   545  }
   546  
   547  // mockReceiptsBlockID is a test helper that mocks the execution receipts mempool on ByBlockID method
   548  // that returns two list of receipts for given block ID.
   549  // First set of receipts are agree receipts, that have the same result ID as the given result.
   550  // Second set of receipts are disagree receipts, that have a different result ID as the given result.
   551  //
   552  // It also returns the list of distinct executor node identities for all those receipts.
   553  func mockReceiptsBlockID(t *testing.T,
   554  	blockID flow.Identifier,
   555  	receipts *storage.ExecutionReceipts,
   556  	result *flow.ExecutionResult,
   557  	agrees int,
   558  	disagrees int) (flow.ExecutionReceiptList, flow.ExecutionReceiptList, flow.IdentityList, flow.IdentityList) {
   559  
   560  	agreeReceipts := flow.ExecutionReceiptList{}
   561  	disagreeReceipts := flow.ExecutionReceiptList{}
   562  	agreeExecutors := flow.IdentityList{}
   563  	disagreeExecutors := flow.IdentityList{}
   564  
   565  	for i := 0; i < agrees; i++ {
   566  		receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(result))
   567  		require.NotContains(t, agreeExecutors.NodeIDs(), receipt.ExecutorID) // should not have duplicate executors
   568  		agreeExecutors = append(agreeExecutors, unittest.IdentityFixture(
   569  			unittest.WithRole(flow.RoleExecution),
   570  			unittest.WithNodeID(receipt.ExecutorID)))
   571  		agreeReceipts = append(agreeReceipts, receipt)
   572  	}
   573  
   574  	for i := 0; i < disagrees; i++ {
   575  		disagreeResult := unittest.ExecutionResultFixture()
   576  		require.NotEqual(t, disagreeResult.ID(), result.ID())
   577  
   578  		receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(disagreeResult))
   579  		require.NotContains(t, agreeExecutors.NodeIDs(), receipt.ExecutorID)    // should not have an executor in both lists
   580  		require.NotContains(t, disagreeExecutors.NodeIDs(), receipt.ExecutorID) // should not have duplicate executors
   581  		disagreeExecutors = append(disagreeExecutors, unittest.IdentityFixture(
   582  			unittest.WithRole(flow.RoleExecution),
   583  			unittest.WithNodeID(receipt.ExecutorID)))
   584  		disagreeReceipts = append(disagreeReceipts, receipt)
   585  	}
   586  
   587  	all := append(agreeReceipts, disagreeReceipts...)
   588  
   589  	receipts.On("ByBlockID", blockID).Return(all, nil)
   590  	return agreeReceipts, disagreeReceipts, agreeExecutors, disagreeExecutors
   591  }
   592  
   593  // mockStateAtBlockIDForIdentities is a test helper that mocks state at the block ID with the given execution nodes identities.
   594  func mockStateAtBlockIDForIdentities(state *protocol.State, blockID flow.Identifier, participants flow.IdentityList) {
   595  	snapshot := &protocol.Snapshot{}
   596  	state.On("AtBlockID", blockID).Return(snapshot)
   597  	snapshot.On("Identities", mock.Anything).Return(participants, nil)
   598  	for _, id := range participants {
   599  		snapshot.On("Identity", id.NodeID).Return(id, nil)
   600  	}
   601  }
   602  
   603  // mockStateAtBlockIDForMissingIdentities is a test helper that mocks state at the block ID with the given execution nodes identities as
   604  // as missing ones, i.e., they are not part of the state.
   605  func mockStateAtBlockIDForMissingIdentities(state *protocol.State, blockID flow.Identifier, participants flow.IdentityList) {
   606  	snapshot := &protocol.Snapshot{}
   607  	state.On("AtBlockID", blockID).Return(snapshot)
   608  	for _, id := range participants {
   609  		snapshot.On("Identity", id.NodeID).Return(nil, flowprotocol.IdentityNotFoundError{NodeID: id.NodeID})
   610  	}
   611  }
   612  
   613  // mockPendingChunksAdd mocks the add method of pending chunks for expecting only the specified list of chunk statuses.
   614  // Each chunk status should be added only once.
   615  // It should return the specified added boolean variable as the result of mocking.
   616  func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, added bool) {
   617  	mu := &sync.Mutex{}
   618  
   619  	pendingChunks.On("Add", mock.Anything).
   620  		Run(func(args mock.Arguments) {
   621  			// to provide mutual exclusion under concurrent invocations.
   622  			mu.Lock()
   623  			defer mu.Unlock()
   624  
   625  			actual, ok := args[0].(*verification.ChunkStatus)
   626  			require.True(t, ok)
   627  
   628  			// there should be a matching chunk status with the received one.
   629  			actualLocatorID := actual.ChunkLocatorID()
   630  
   631  			for _, expected := range list {
   632  				expectedLocatorID := expected.ChunkLocatorID()
   633  				if expectedLocatorID == actualLocatorID {
   634  					require.Equal(t, expected.ExecutionResult, actual.ExecutionResult)
   635  					return
   636  				}
   637  			}
   638  
   639  			require.Fail(t, "tried adding an unexpected chunk status to mempool")
   640  		}).Return(added).Times(len(list))
   641  }
   642  
   643  // mockPendingChunksRemove mocks the remove method of pending chunks for expecting only the specified list of chunk statuses.
   644  // Each chunk status should be removed only once.
   645  // It should return the specified added boolean variable as the result of mocking.
   646  func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, removed bool) {
   647  	mu := &sync.Mutex{}
   648  
   649  	pendingChunks.On("Remove", mock.Anything, mock.Anything).
   650  		Run(func(args mock.Arguments) {
   651  			// to provide mutual exclusion under concurrent invocations.
   652  			mu.Lock()
   653  			defer mu.Unlock()
   654  
   655  			actualIndex, ok := args[0].(uint64)
   656  			require.True(t, ok)
   657  
   658  			actualResultID, ok := args[1].(flow.Identifier)
   659  			require.True(t, ok)
   660  
   661  			// there should be a matching chunk status with the received one.
   662  			for _, expected := range list {
   663  				if expected.ChunkIndex == actualIndex && expected.ExecutionResult.ID() == actualResultID {
   664  					return
   665  				}
   666  			}
   667  
   668  			require.Fail(t, "tried removing an unexpected chunk status to mempool")
   669  		}).Return(removed).Times(len(list))
   670  }
   671  
   672  // mockPendingChunksGet mocks the Get method of pending chunks for expecting only the specified list of chunk statuses.
   673  func mockPendingChunksGet(pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus) {
   674  	mu := &sync.Mutex{}
   675  
   676  	pendingChunks.On("Get", mock.Anything, mock.Anything).Return(
   677  		func(chunkIndex uint64, resultID flow.Identifier) *verification.ChunkStatus {
   678  			// to provide mutual exclusion under concurrent invocations.
   679  			mu.Lock()
   680  			defer mu.Unlock()
   681  
   682  			for _, expected := range list {
   683  				if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID {
   684  					return expected
   685  				}
   686  			}
   687  			return nil
   688  		},
   689  		func(chunkIndex uint64, resultID flow.Identifier) bool {
   690  			for _, expected := range list {
   691  				if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID {
   692  					return true
   693  				}
   694  			}
   695  			return false
   696  		})
   697  }
   698  
   699  // mockVerifierEngine mocks verifier engine to expect receiving a matching chunk data pack with specified input.
   700  // Each chunk data pack should be passed only once.
   701  func mockVerifierEngine(t *testing.T,
   702  	verifier *mocknetwork.Engine,
   703  	verifiableChunks map[flow.Identifier]*verification.VerifiableChunkData) *sync.WaitGroup {
   704  	mu := sync.Mutex{}
   705  	wg := &sync.WaitGroup{}
   706  	wg.Add(len(verifiableChunks))
   707  
   708  	seen := make(map[flow.Identifier]struct{})
   709  
   710  	verifier.On("ProcessLocal", mock.Anything).
   711  		Run(func(args mock.Arguments) {
   712  			mu.Lock()
   713  			defer mu.Unlock()
   714  
   715  			vc, ok := args[0].(*verification.VerifiableChunkData)
   716  			require.True(t, ok)
   717  
   718  			// verifiable chunk data should be distinct.
   719  			_, ok = seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)]
   720  			require.False(t, ok, "duplicated verifiable chunk received")
   721  			seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)] = struct{}{}
   722  
   723  			// we should expect this verifiable chunk and its fields should match our expectation
   724  			expected, ok := verifiableChunks[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)]
   725  			require.True(t, ok, "verifier engine received an unknown verifiable chunk data")
   726  
   727  			if vc.IsSystemChunk {
   728  				// system chunk has an nil collection.
   729  				require.Nil(t, vc.ChunkDataPack.Collection)
   730  			} else {
   731  				// non-system chunk has a non-nil collection.
   732  				require.NotNil(t, vc.ChunkDataPack.Collection)
   733  				require.Equal(t, expected.ChunkDataPack.Collection.ID(), vc.ChunkDataPack.Collection.ID())
   734  			}
   735  
   736  			require.Equal(t, *expected.ChunkDataPack, *vc.ChunkDataPack)
   737  			require.Equal(t, expected.Result.ID(), vc.Result.ID())
   738  			require.Equal(t, expected.Header.ID(), vc.Header.ID())
   739  
   740  			isSystemChunk := fetcher.IsSystemChunk(vc.Chunk.Index, vc.Result)
   741  			require.Equal(t, isSystemChunk, vc.IsSystemChunk)
   742  
   743  			endState, err := fetcher.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk)
   744  			require.NoError(t, err)
   745  
   746  			require.Equal(t, endState, vc.EndState)
   747  			wg.Done()
   748  		}).Return(nil)
   749  
   750  	return wg
   751  }
   752  
   753  // mockChunkConsumerNotifier mocks the notify method of processing notifier to be notified exactly once per
   754  // given chunk IDs.
   755  func mockChunkConsumerNotifier(t *testing.T, notifier *module.ProcessingNotifier, locatorIDs flow.IdentifierList) {
   756  	mu := &sync.Mutex{}
   757  	seen := make(map[flow.Identifier]struct{})
   758  	notifier.On("Notify", mock.Anything).Run(func(args mock.Arguments) {
   759  		// to provide mutual exclusion under concurrent invocations.
   760  		mu.Lock()
   761  		defer mu.Unlock()
   762  
   763  		locatorID, ok := args[0].(flow.Identifier)
   764  		require.True(t, ok)
   765  		require.Contains(t, locatorIDs, locatorID, "tried calling notifier on an unexpected locator ID")
   766  
   767  		// each chunk should be notified once
   768  		_, ok = seen[locatorID]
   769  		require.False(t, ok)
   770  		seen[locatorID] = struct{}{}
   771  
   772  	}).Return().Times(len(locatorIDs))
   773  }
   774  
   775  // mockBlockSealingStatus mocks protocol state sealing status at height of given block.
   776  func mockBlockSealingStatus(state *protocol.State, headers *storage.Headers, header *flow.Header, sealed bool) {
   777  	headers.On("ByBlockID", header.ID()).Return(header, nil)
   778  	if sealed {
   779  		vertestutils.MockLastSealedHeight(state, header.Height+1)
   780  	} else {
   781  		vertestutils.MockLastSealedHeight(state, header.Height-1)
   782  	}
   783  }
   784  
   785  // mockBlocksStorage mocks blocks and headers storages for given block.
   786  func mockBlocksStorage(blocks *storage.Blocks, headers *storage.Headers, block *flow.Block) {
   787  	blockID := block.ID()
   788  	blocks.On("ByID", blockID).Return(block, nil)
   789  	headers.On("ByBlockID", blockID).Return(block.Header, nil)
   790  }
   791  
   792  // mockRequester mocks the chunk data pack requester with the given chunk data pack requests.
   793  // Each chunk should be requested exactly once.
   794  // On reply, it invokes the handler function with the given collection and chunk data pack for the chunk ID.
   795  func mockRequester(t *testing.T,
   796  	requester *mockfetcher.ChunkDataPackRequester,
   797  	requests map[flow.Identifier]*verification.ChunkDataPackRequest,
   798  	responses map[flow.Identifier]*verification.ChunkDataPackResponse,
   799  	handler func(flow.Identifier, *verification.ChunkDataPackResponse)) *sync.WaitGroup {
   800  
   801  	mu := sync.Mutex{}
   802  	wg := &sync.WaitGroup{}
   803  	wg.Add(len(requests))
   804  	requester.On("Request", mock.Anything).
   805  		Run(func(args mock.Arguments) {
   806  			mu.Lock()
   807  			defer mu.Unlock()
   808  
   809  			actualRequest, ok := args[0].(*verification.ChunkDataPackRequest)
   810  			require.True(t, ok)
   811  
   812  			expectedRequest, ok := requests[actualRequest.ID()]
   813  			require.True(t, ok, "requester received an unexpected chunk request")
   814  
   815  			require.Equal(t, expectedRequest.Locator, actualRequest.Locator)
   816  			require.Equal(t, expectedRequest.ChunkID, actualRequest.ChunkID)
   817  			require.Equal(t, expectedRequest.Agrees, actualRequest.Agrees)
   818  			require.Equal(t, expectedRequest.Disagrees, actualRequest.Disagrees)
   819  			require.ElementsMatch(t, expectedRequest.Targets, actualRequest.Targets)
   820  
   821  			go func() {
   822  				response, ok := responses[actualRequest.ID()]
   823  				require.True(t, ok)
   824  
   825  				handler(actualRequest.Agrees[0], response)
   826  				wg.Done()
   827  			}()
   828  		}).Return()
   829  
   830  	return wg
   831  }
   832  
   833  // chunkDataPackResponsesFixture creates chunk data pack responses for given chunks.
   834  func chunkDataPackResponsesFixture(t *testing.T,
   835  	statuses verification.ChunkStatusList,
   836  	collMap map[flow.Identifier]*flow.Collection,
   837  	result *flow.ExecutionResult,
   838  ) map[flow.Identifier]*verification.ChunkDataPackResponse {
   839  	responses := make(map[flow.Identifier]*verification.ChunkDataPackResponse)
   840  
   841  	for _, status := range statuses {
   842  		chunkLocatorID := status.ChunkLocatorID()
   843  		responses[chunkLocatorID] = chunkDataPackResponseFixture(t, status.Chunk(), collMap[status.Chunk().ID()], result)
   844  	}
   845  
   846  	return responses
   847  }
   848  
   849  // chunkDataPackResponseFixture creates a chunk data pack response for given input.
   850  func chunkDataPackResponseFixture(t *testing.T,
   851  	chunk *flow.Chunk,
   852  	collection *flow.Collection,
   853  	result *flow.ExecutionResult) *verification.ChunkDataPackResponse {
   854  
   855  	require.Equal(t, collection != nil, !fetcher.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection")
   856  
   857  	return &verification.ChunkDataPackResponse{
   858  		Locator: chunks.Locator{
   859  			ResultID: result.ID(),
   860  			Index:    chunk.Index,
   861  		},
   862  		Cdp: unittest.ChunkDataPackFixture(chunk.ID(),
   863  			unittest.WithStartState(chunk.StartState),
   864  			unittest.WithChunkDataPackCollection(collection)),
   865  	}
   866  }
   867  
   868  // verifiableChunksFixture is a test helper that creates verifiable chunks and chunk data responses.
   869  func verifiableChunksFixture(t *testing.T,
   870  	statuses verification.ChunkStatusList,
   871  	block *flow.Block,
   872  	result *flow.ExecutionResult,
   873  	collMap map[flow.Identifier]*flow.Collection) (
   874  	map[flow.Identifier]*verification.ChunkDataPackResponse,
   875  	map[flow.Identifier]*verification.VerifiableChunkData) {
   876  
   877  	responses := chunkDataPackResponsesFixture(t, statuses, collMap, result)
   878  
   879  	verifiableChunks := make(map[flow.Identifier]*verification.VerifiableChunkData)
   880  	for _, status := range statuses {
   881  		chunkLocatorID := status.ChunkLocatorID()
   882  
   883  		response, ok := responses[chunkLocatorID]
   884  		require.True(t, ok, "missing chunk data response")
   885  
   886  		verifiableChunks[chunkLocatorID] = verifiableChunkFixture(t, status.Chunk(), block, status.ExecutionResult, response.Cdp)
   887  	}
   888  
   889  	return responses, verifiableChunks
   890  }
   891  
   892  // verifiableChunksFixture is a test helper that creates verifiable chunks, chunk data packs,
   893  // and collection fixtures for the given chunks list.
   894  func verifiableChunkFixture(t *testing.T,
   895  	chunk *flow.Chunk,
   896  	block *flow.Block,
   897  	result *flow.ExecutionResult,
   898  	chunkDataPack *flow.ChunkDataPack) *verification.VerifiableChunkData {
   899  
   900  	offsetForChunk, err := fetcher.TransactionOffsetForChunk(result.Chunks, chunk.Index)
   901  	require.NoError(t, err)
   902  
   903  	// TODO: add end state
   904  	return &verification.VerifiableChunkData{
   905  		Chunk:             chunk,
   906  		Header:            block.Header,
   907  		Result:            result,
   908  		ChunkDataPack:     chunkDataPack,
   909  		TransactionOffset: offsetForChunk,
   910  	}
   911  }
   912  
   913  // chunkRequestsFixture is a test helper creates and returns chunk data pack requests for given result and chunk statuses.
   914  // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result
   915  // with the execution result that chunks belong to, respectively.
   916  func chunkRequestsFixture(
   917  	resultID flow.Identifier,
   918  	statuses verification.ChunkStatusList,
   919  	agrees flow.IdentityList,
   920  	disagrees flow.IdentityList) map[flow.Identifier]*verification.ChunkDataPackRequest {
   921  
   922  	requests := make(map[flow.Identifier]*verification.ChunkDataPackRequest)
   923  	for _, status := range statuses {
   924  		chunkLocatorID := status.ChunkLocatorID()
   925  		requests[chunkLocatorID] = chunkRequestFixture(resultID, status, agrees, disagrees)
   926  	}
   927  
   928  	return requests
   929  }
   930  
   931  // chunkRequestFixture creates and returns a chunk request for given result and chunk status.
   932  //
   933  // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result
   934  // with the execution result that chunks belong to, respectively.
   935  func chunkRequestFixture(resultID flow.Identifier,
   936  	status *verification.ChunkStatus,
   937  	agrees flow.IdentityList,
   938  	disagrees flow.IdentityList) *verification.ChunkDataPackRequest {
   939  
   940  	return &verification.ChunkDataPackRequest{
   941  		Locator: chunks.Locator{
   942  			ResultID: resultID,
   943  			Index:    status.ChunkIndex,
   944  		},
   945  		ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{
   946  			ChunkID:   status.Chunk().ID(),
   947  			Height:    status.BlockHeight,
   948  			Agrees:    agrees.NodeIDs(),
   949  			Disagrees: disagrees.NodeIDs(),
   950  			Targets:   agrees.Union(disagrees),
   951  		},
   952  	}
   953  }
   954  
   955  // completeChunkStatusListFixture creates a reference block with an execution result associated with it.
   956  // The result has specified number of chunks, which a random subset them are assumed assigned to fetcher engine,
   957  // and hence have chunk status associated with them, i.e., `statusCount` of them.
   958  //
   959  // It returns the block, result, assigned chunk statuses, their corresponding locators, and a map between chunks to their collections.
   960  func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount int) (*flow.Block,
   961  	*flow.ExecutionResult,
   962  	verification.ChunkStatusList,
   963  	chunks.LocatorMap,
   964  	map[flow.Identifier]*flow.Collection) {
   965  	require.LessOrEqual(t, statusCount, chunkCount)
   966  
   967  	// keeps collections of assigned chunks
   968  	collMap := make(map[flow.Identifier]*flow.Collection)
   969  
   970  	collections := unittest.CollectionListFixture(chunkCount)
   971  
   972  	block := unittest.BlockWithGuaranteesFixture(
   973  		unittest.CollectionGuaranteesWithCollectionIDFixture(collections),
   974  	)
   975  
   976  	result := unittest.ExecutionResultFixture(
   977  		unittest.WithBlock(block),
   978  		unittest.WithChunks(uint(chunkCount)))
   979  	statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, statusCount)
   980  	locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses)
   981  
   982  	for _, status := range statuses {
   983  		if fetcher.IsSystemChunk(status.ChunkIndex, result) {
   984  			// system-chunk should have a nil collection
   985  			continue
   986  		}
   987  		collMap[status.Chunk().ID()] = collections[status.ChunkIndex]
   988  	}
   989  
   990  	return block, result, statuses, locators, collMap
   991  }
   992  
   993  func TestTransactionOffsetForChunk(t *testing.T) {
   994  	t.Run("first chunk index always returns zero offset", func(t *testing.T) {
   995  		offsetForChunk, err := fetcher.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0)
   996  		require.NoError(t, err)
   997  		assert.Equal(t, uint32(0), offsetForChunk)
   998  	})
   999  
  1000  	t.Run("offset is calculated", func(t *testing.T) {
  1001  
  1002  		chunksList := []*flow.Chunk{
  1003  			{
  1004  				ChunkBody: flow.ChunkBody{
  1005  					NumberOfTransactions: 1,
  1006  				},
  1007  			},
  1008  			{
  1009  				ChunkBody: flow.ChunkBody{
  1010  					NumberOfTransactions: 2,
  1011  				},
  1012  			},
  1013  			{
  1014  				ChunkBody: flow.ChunkBody{
  1015  					NumberOfTransactions: 3,
  1016  				},
  1017  			},
  1018  			{
  1019  				ChunkBody: flow.ChunkBody{
  1020  					NumberOfTransactions: 5,
  1021  				},
  1022  			},
  1023  		}
  1024  
  1025  		offsetForChunk, err := fetcher.TransactionOffsetForChunk(chunksList, 0)
  1026  		require.NoError(t, err)
  1027  		assert.Equal(t, uint32(0), offsetForChunk)
  1028  
  1029  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 1)
  1030  		require.NoError(t, err)
  1031  		assert.Equal(t, uint32(1), offsetForChunk)
  1032  
  1033  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 2)
  1034  		require.NoError(t, err)
  1035  		assert.Equal(t, uint32(3), offsetForChunk)
  1036  
  1037  		offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 3)
  1038  		require.NoError(t, err)
  1039  		assert.Equal(t, uint32(6), offsetForChunk)
  1040  	})
  1041  
  1042  	t.Run("requesting index beyond length triggers error", func(t *testing.T) {
  1043  
  1044  		chunksList := make([]*flow.Chunk, 2)
  1045  
  1046  		_, err := fetcher.TransactionOffsetForChunk(chunksList, 2)
  1047  		require.Error(t, err)
  1048  	})
  1049  }