github.com/koko1123/flow-go-1@v0.29.6/engine/verification/fetcher/engine.go (about)

     1  package fetcher
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	"github.com/rs/zerolog"
     8  	"go.opentelemetry.io/otel/attribute"
     9  
    10  	"github.com/koko1123/flow-go-1/engine"
    11  	"github.com/koko1123/flow-go-1/model/chunks"
    12  	"github.com/koko1123/flow-go-1/model/flow"
    13  	"github.com/koko1123/flow-go-1/model/flow/filter"
    14  	"github.com/koko1123/flow-go-1/model/verification"
    15  	"github.com/koko1123/flow-go-1/module"
    16  	"github.com/koko1123/flow-go-1/module/mempool"
    17  	"github.com/koko1123/flow-go-1/module/trace"
    18  	"github.com/koko1123/flow-go-1/network"
    19  	"github.com/koko1123/flow-go-1/state/protocol"
    20  	"github.com/koko1123/flow-go-1/storage"
    21  	"github.com/koko1123/flow-go-1/utils/logging"
    22  )
    23  
    24  // Engine implements the fetcher engine functionality. It works between a chunk consumer queue, and a verifier engine.
    25  // Its input is an assigned chunk locator from the chunk consumer it is subscribed to.
    26  //
    27  // Its output is a verifiable chunk that it passes to the verifier engine.
    28  //
    29  // Fetcher engine is an AssignedChunkProcessor implementation: it receives assigned chunks to this verification node from the chunk consumer.
    30  // The assigned chunks are passed on concurrent executions of its ProcessAssignedChunk method.
    31  //
    32  // On receiving an assigned chunk, the engine requests their chunk data pack through the requester that is attached to it.
    33  // On receiving a chunk data pack response, the fetcher engine validates it, and shapes a verifiable chunk out of it, and passes it
    34  // to the verifier engine.
    35  type Engine struct {
    36  	// common
    37  	unit  *engine.Unit
    38  	state protocol.State // used to verify the origin ID of chunk data response, and sealing status.
    39  
    40  	// monitoring
    41  	log     zerolog.Logger
    42  	tracer  module.Tracer
    43  	metrics module.VerificationMetrics
    44  
    45  	// memory and storage
    46  	pendingChunks mempool.ChunkStatuses     // stores all pending chunks that their chunk data is requested from requester.
    47  	blocks        storage.Blocks            // used to for verifying collection ID.
    48  	headers       storage.Headers           // used for building verifiable chunk data.
    49  	results       storage.ExecutionResults  // used to retrieve execution result of an assigned chunk.
    50  	receipts      storage.ExecutionReceipts // used to find executor ids of a chunk, for requesting chunk data pack.
    51  
    52  	// output interfaces
    53  	verifier              network.Engine            // used to push verifiable chunk down the verification pipeline.
    54  	requester             ChunkDataPackRequester    // used to request chunk data packs from network.
    55  	chunkConsumerNotifier module.ProcessingNotifier // used to notify chunk consumer that it is done processing a chunk.
    56  
    57  	stopAtHeight uint64
    58  }
    59  
    60  func New(
    61  	log zerolog.Logger,
    62  	metrics module.VerificationMetrics,
    63  	tracer module.Tracer,
    64  	verifier network.Engine,
    65  	state protocol.State,
    66  	pendingChunks mempool.ChunkStatuses,
    67  	headers storage.Headers,
    68  	blocks storage.Blocks,
    69  	results storage.ExecutionResults,
    70  	receipts storage.ExecutionReceipts,
    71  	requester ChunkDataPackRequester,
    72  	stopAtHeight uint64,
    73  ) *Engine {
    74  	e := &Engine{
    75  		unit:          engine.NewUnit(),
    76  		metrics:       metrics,
    77  		tracer:        tracer,
    78  		log:           log.With().Str("engine", "fetcher").Logger(),
    79  		verifier:      verifier,
    80  		state:         state,
    81  		pendingChunks: pendingChunks,
    82  		blocks:        blocks,
    83  		headers:       headers,
    84  		results:       results,
    85  		receipts:      receipts,
    86  		requester:     requester,
    87  		stopAtHeight:  stopAtHeight,
    88  	}
    89  
    90  	e.requester.WithChunkDataPackHandler(e)
    91  
    92  	return e
    93  }
    94  
    95  // WithChunkConsumerNotifier sets the processing notifier of fetcher.
    96  // The fetcher engine uses this notifier to inform the chunk consumer that it is done processing a given chunk, and
    97  // is ready to receive a new chunk to process.
    98  func (e *Engine) WithChunkConsumerNotifier(notifier module.ProcessingNotifier) {
    99  	e.chunkConsumerNotifier = notifier
   100  }
   101  
   102  // Ready initializes the engine and returns a channel that is closed when the initialization is done
   103  func (e *Engine) Ready() <-chan struct{} {
   104  	if e.chunkConsumerNotifier == nil {
   105  		e.log.Fatal().Msg("missing chunk consumer notifier callback in verification fetcher engine")
   106  	}
   107  	return e.unit.Ready(func() {
   108  		<-e.requester.Ready()
   109  	})
   110  }
   111  
   112  // Done terminates the engine and returns a channel that is closed when the termination is done
   113  func (e *Engine) Done() <-chan struct{} {
   114  	return e.unit.Done(func() {
   115  		<-e.requester.Done()
   116  	})
   117  }
   118  
   119  // ProcessAssignedChunk is the entry point of fetcher engine.
   120  // It pushes the assigned chunk down the pipeline.
   121  // Through the pipeline the chunk data pack for this chunk is requested,
   122  // a verifiable chunk is shaped for it,
   123  // and is pushed to the verifier engine for verification.
   124  //
   125  // It should not be blocking since multiple chunk consumer workers might be calling it concurrently.
   126  // It fetches the chunk data pack, once received, verifier engine will be verifying
   127  // Once a chunk has been processed, it will call the processing notifier callback to notify
   128  // the chunk consumer in order to process the next chunk.
   129  func (e *Engine) ProcessAssignedChunk(locator *chunks.Locator) {
   130  	locatorID := locator.ID()
   131  	lg := e.log.With().
   132  		Hex("locator_id", logging.ID(locatorID)).
   133  		Hex("result_id", logging.ID(locator.ResultID)).
   134  		Uint64("chunk_index", locator.Index).
   135  		Logger()
   136  
   137  	e.metrics.OnAssignedChunkReceivedAtFetcher()
   138  
   139  	// retrieves result and chunk using the locator
   140  	result, err := e.results.ByID(locator.ResultID)
   141  	if err != nil {
   142  		// a missing result for a chunk locator is a fatal error potentially a database leak.
   143  		lg.Fatal().Err(err).Msg("could not retrieve result for chunk locator")
   144  	}
   145  	chunk := result.Chunks[locator.Index]
   146  	chunkID := chunk.ID()
   147  
   148  	lg = lg.With().
   149  		Hex("chunk_id", logging.ID(chunkID)).
   150  		Hex("block_id", logging.ID(chunk.ChunkBody.BlockID)).
   151  		Logger()
   152  	lg.Debug().Msg("result and chunk for locator retrieved")
   153  
   154  	requested, blockHeight, err := e.processAssignedChunkWithTracing(chunk, result, locatorID)
   155  	lg = lg.With().Uint64("block_height", blockHeight).Logger()
   156  
   157  	if err != nil {
   158  		lg.Fatal().Err(err).Msg("could not process assigned chunk")
   159  	}
   160  
   161  	lg.Info().Bool("requested", requested).Msg("assigned chunk processed successfully")
   162  
   163  	if requested {
   164  		e.metrics.OnChunkDataPackRequestSentByFetcher()
   165  	}
   166  
   167  }
   168  
   169  // processAssignedChunkWithTracing encapsulates the logic of processing assigned chunk with tracing enabled.
   170  func (e *Engine) processAssignedChunkWithTracing(chunk *flow.Chunk, result *flow.ExecutionResult, chunkLocatorID flow.Identifier) (bool, uint64, error) {
   171  
   172  	span, _ := e.tracer.StartBlockSpan(e.unit.Ctx(), result.BlockID, trace.VERProcessAssignedChunk)
   173  	span.SetAttributes(attribute.Int("collection_index", int(chunk.CollectionIndex)))
   174  	defer span.End()
   175  
   176  	requested, blockHeight, err := e.processAssignedChunk(chunk, result, chunkLocatorID)
   177  
   178  	return requested, blockHeight, err
   179  }
   180  
   181  // processAssignedChunk receives an assigned chunk and its result and requests its chunk data pack from requester.
   182  // Boolean return value determines whether chunk data pack was requested or not.
   183  func (e *Engine) processAssignedChunk(chunk *flow.Chunk, result *flow.ExecutionResult, chunkLocatorID flow.Identifier) (bool, uint64, error) {
   184  	// skips processing a chunk if it belongs to a sealed block.
   185  	chunkID := chunk.ID()
   186  	sealed, blockHeight, err := e.blockIsSealed(chunk.ChunkBody.BlockID)
   187  	if err != nil {
   188  		return false, 0, fmt.Errorf("could not determine whether block has been sealed: %w", err)
   189  	}
   190  	if sealed {
   191  		e.chunkConsumerNotifier.Notify(chunkLocatorID) // tells consumer that we are done with this chunk.
   192  		return false, blockHeight, nil
   193  	}
   194  
   195  	// skip chunk if it verifies a block at or above stop height
   196  	if e.stopAtHeight > 0 && blockHeight >= e.stopAtHeight {
   197  		e.log.Warn().Msgf("Skipping chunk %s - height  %d at or above stop height requested (%d)", chunkID, blockHeight, e.stopAtHeight)
   198  		e.chunkConsumerNotifier.Notify(chunkLocatorID) // tells consumer that we are done with this chunk.
   199  		return false, blockHeight, nil
   200  	}
   201  
   202  	// adds chunk status as a pending chunk to mempool.
   203  	status := &verification.ChunkStatus{
   204  		ChunkIndex:      chunk.Index,
   205  		ExecutionResult: result,
   206  		BlockHeight:     blockHeight,
   207  	}
   208  	added := e.pendingChunks.Add(status)
   209  	if !added {
   210  		return false, blockHeight, nil
   211  	}
   212  
   213  	err = e.requestChunkDataPack(chunk.Index, chunkID, result.ID(), chunk.BlockID)
   214  	if err != nil {
   215  		return false, blockHeight, fmt.Errorf("could not request chunk data pack: %w", err)
   216  	}
   217  
   218  	// requesting a chunk data pack is async, i.e., once engine reaches this point
   219  	// it gracefully waits (unblocking) for the requested
   220  	// till it either delivers us the requested chunk data pack
   221  	// or cancels our request (when chunk belongs to a sealed block).
   222  	//
   223  	// both these events happen through requester module calling fetchers callbacks.
   224  	// it is during those callbacks that we notify the consumer that we are done with this job.
   225  	return true, blockHeight, nil
   226  }
   227  
   228  // HandleChunkDataPack is called by the chunk requester module everytime a new requested chunk data pack arrives.
   229  // The chunks are supposed to be deduplicated by the requester.
   230  // So invocation of this method indicates arrival of a distinct requested chunk.
   231  func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verification.ChunkDataPackResponse) {
   232  	lg := e.log.With().
   233  		Hex("origin_id", logging.ID(originID)).
   234  		Hex("chunk_id", logging.ID(response.Cdp.ChunkID)).
   235  		Logger()
   236  
   237  	if response.Cdp.Collection != nil {
   238  		// non-system chunk data packs have non-nil collection
   239  		lg = lg.With().
   240  			Hex("collection_id", logging.ID(response.Cdp.Collection.ID())).
   241  			Logger()
   242  		lg.Info().Msg("chunk data pack arrived")
   243  	} else {
   244  		lg.Info().Msg("system chunk data pack arrived")
   245  	}
   246  
   247  	e.metrics.OnChunkDataPackArrivedAtFetcher()
   248  
   249  	// make sure we still need it
   250  	status, exists := e.pendingChunks.Get(response.Index, response.ResultID)
   251  	if !exists {
   252  		lg.Debug().Msg("could not fetch pending status from mempool, dropping chunk data")
   253  		return
   254  	}
   255  
   256  	resultID := status.ExecutionResult.ID()
   257  	lg = lg.With().
   258  		Hex("block_id", logging.ID(status.ExecutionResult.BlockID)).
   259  		Uint64("block_height", status.BlockHeight).
   260  		Hex("result_id", logging.ID(resultID)).
   261  		Uint64("chunk_index", status.ChunkIndex).
   262  		Bool("system_chunk", IsSystemChunk(status.ChunkIndex, status.ExecutionResult)).
   263  		Logger()
   264  
   265  	span, ctx := e.tracer.StartBlockSpan(context.Background(), status.ExecutionResult.BlockID, trace.VERFetcherHandleChunkDataPack)
   266  	defer span.End()
   267  
   268  	processed, err := e.handleChunkDataPackWithTracing(ctx, originID, status, response.Cdp)
   269  	if IsChunkDataPackValidationError(err) {
   270  		lg.Error().Err(err).Msg("could not validate chunk data pack")
   271  		return
   272  	}
   273  
   274  	if err != nil {
   275  		// TODO: byzantine fault
   276  		lg.Fatal().Err(err).Msg("could not handle chunk data pack")
   277  		return
   278  	}
   279  
   280  	if processed {
   281  		e.metrics.OnVerifiableChunkSentToVerifier()
   282  
   283  		// we need to report that the job has been finished eventually
   284  		e.chunkConsumerNotifier.Notify(status.ChunkLocatorID())
   285  		lg.Info().Msg("verifiable chunk pushed to verifier engine")
   286  	}
   287  
   288  }
   289  
   290  // handleChunkDataPackWithTracing encapsulates the logic of handling chunk data pack with tracing enabled.
   291  //
   292  // Boolean returned value determines whether the chunk data pack passed validation and its verifiable chunk
   293  // submitted to verifier.
   294  // The first returned value determines non-critical errors (i.e., expected ones).
   295  // The last returned value determines the critical errors that are unexpected, and should lead program to halt.
   296  func (e *Engine) handleChunkDataPackWithTracing(
   297  	ctx context.Context,
   298  	originID flow.Identifier,
   299  	status *verification.ChunkStatus,
   300  	chunkDataPack *flow.ChunkDataPack) (bool, error) {
   301  
   302  	// make sure the chunk data pack is valid
   303  	err := e.validateChunkDataPackWithTracing(ctx, status.ChunkIndex, originID, chunkDataPack, status.ExecutionResult)
   304  	if err != nil {
   305  		return false, NewChunkDataPackValidationError(originID,
   306  			status.ExecutionResult.ID(),
   307  			status.ChunkIndex,
   308  			chunkDataPack.ID(),
   309  			chunkDataPack.ChunkID,
   310  			chunkDataPack.Collection.ID(),
   311  			err)
   312  	}
   313  
   314  	processed, err := e.handleValidatedChunkDataPack(ctx, status, chunkDataPack)
   315  	if err != nil {
   316  		return processed, fmt.Errorf("could not handle validated chunk data pack: %w", err)
   317  	}
   318  
   319  	return processed, nil
   320  }
   321  
   322  // handleValidatedChunkDataPack receives a validated chunk data pack, removes its status from the memory, and pushes a verifiable chunk for it to
   323  // verifier engine.
   324  // Boolean return value determines whether verifiable chunk pushed to verifier or not.
   325  func (e *Engine) handleValidatedChunkDataPack(ctx context.Context,
   326  	status *verification.ChunkStatus,
   327  	chunkDataPack *flow.ChunkDataPack) (bool, error) {
   328  
   329  	removed := e.pendingChunks.Remove(status.ChunkIndex, status.ExecutionResult.ID())
   330  	if !removed {
   331  		// we deduplicate the chunk data responses at this point, reaching here means a
   332  		// duplicate chunk data response is under process concurrently, so we give up
   333  		// on processing current one.
   334  		return false, nil
   335  	}
   336  
   337  	// pushes chunk data pack to verifier, and waits for it to be verified.
   338  	chunk := status.ExecutionResult.Chunks[status.ChunkIndex]
   339  	err := e.pushToVerifierWithTracing(ctx, chunk, status.ExecutionResult, chunkDataPack)
   340  	if err != nil {
   341  		return false, fmt.Errorf("could not push the chunk to verifier engine")
   342  	}
   343  
   344  	return true, nil
   345  }
   346  
   347  // validateChunkDataPackWithTracing encapsulates the logic of validating a chunk data pack with tracing enabled.
   348  func (e *Engine) validateChunkDataPackWithTracing(ctx context.Context,
   349  	chunkIndex uint64,
   350  	senderID flow.Identifier,
   351  	chunkDataPack *flow.ChunkDataPack,
   352  	result *flow.ExecutionResult) error {
   353  
   354  	var err error
   355  	e.tracer.WithSpanFromContext(ctx, trace.VERFetcherValidateChunkDataPack, func() {
   356  		err = e.validateChunkDataPack(chunkIndex, senderID, chunkDataPack, result)
   357  	})
   358  
   359  	return err
   360  }
   361  
   362  // validateChunkDataPack validates the integrity of a received chunk data pack as well as the authenticity of its sender.
   363  // Regarding the integrity: the chunk data pack should have a matching start state with the chunk itself, as well as a matching collection ID with the
   364  // given collection.
   365  //
   366  // Regarding the authenticity: the chunk data pack should be coming from a sender that is an authorized execution node at the block of the chunk.
   367  func (e *Engine) validateChunkDataPack(chunkIndex uint64,
   368  	senderID flow.Identifier,
   369  	chunkDataPack *flow.ChunkDataPack,
   370  	result *flow.ExecutionResult) error {
   371  
   372  	chunk := result.Chunks[chunkIndex]
   373  	// 1. chunk ID of chunk data pack should map the chunk ID on execution result
   374  	expectedChunkID := chunk.ID()
   375  	if chunkDataPack.ChunkID != expectedChunkID {
   376  		return fmt.Errorf("chunk ID of chunk data pack does not match corresponding chunk on execution result, expected: %x, got:%x",
   377  			expectedChunkID, chunkDataPack.ChunkID)
   378  	}
   379  
   380  	// 2. sender must be a authorized execution node at that block
   381  	blockID := chunk.BlockID
   382  	authorized := e.validateAuthorizedExecutionNodeAtBlockID(senderID, blockID)
   383  	if !authorized {
   384  		return fmt.Errorf("unauthorized execution node sender at block ID: %x, resultID: %x, chunk ID: %x",
   385  			blockID,
   386  			result.ID(),
   387  			chunk.ID())
   388  	}
   389  
   390  	// 3. start state must match
   391  	if chunkDataPack.StartState != chunk.ChunkBody.StartState {
   392  		return engine.NewInvalidInputErrorf("expecting chunk data pack's start state: %x, but got: %x, block ID: %x, resultID: %x, chunk ID: %x",
   393  			chunk.ChunkBody.StartState,
   394  			chunkDataPack.StartState,
   395  			blockID,
   396  			result.ID(),
   397  			chunk.ID())
   398  	}
   399  
   400  	// 3. collection id must match
   401  	err := e.validateCollectionID(chunkDataPack, result, chunk)
   402  	if err != nil {
   403  		return fmt.Errorf("could not validate collection: %w", err)
   404  	}
   405  
   406  	return nil
   407  }
   408  
   409  // validateCollectionID returns error for an invalid collection of a chunk data pack,
   410  // and returns nil otherwise.
   411  func (e Engine) validateCollectionID(
   412  	chunkDataPack *flow.ChunkDataPack,
   413  	result *flow.ExecutionResult,
   414  	chunk *flow.Chunk) error {
   415  
   416  	if IsSystemChunk(chunk.Index, result) {
   417  		return e.validateSystemChunkCollection(chunkDataPack)
   418  	}
   419  
   420  	return e.validateNonSystemChunkCollection(chunkDataPack, chunk)
   421  }
   422  
   423  // validateSystemChunkCollection returns nil if the system chunk data pack has a nil collection.
   424  func (e Engine) validateSystemChunkCollection(chunkDataPack *flow.ChunkDataPack) error {
   425  	// collection of a system chunk should be nil
   426  	if chunkDataPack.Collection != nil {
   427  		return engine.NewInvalidInputErrorf("non-nil collection for system chunk, collection ID: %v, len: %d",
   428  			chunkDataPack.Collection.ID(), chunkDataPack.Collection.Len())
   429  	}
   430  
   431  	return nil
   432  }
   433  
   434  // validateNonSystemChunkCollection returns nil if the collection is matching the non-system chunk data pack.
   435  // A collection is valid against a non-system chunk if it has a matching ID with the
   436  // collection ID of corresponding guarantee of the chunk in the referenced block payload.
   437  func (e Engine) validateNonSystemChunkCollection(chunkDataPack *flow.ChunkDataPack, chunk *flow.Chunk) error {
   438  	collID := chunkDataPack.Collection.ID()
   439  
   440  	block, err := e.blocks.ByID(chunk.BlockID)
   441  	if err != nil {
   442  		return fmt.Errorf("could not get block: %w", err)
   443  	}
   444  
   445  	if block.Payload.Guarantees[chunk.Index].CollectionID != collID {
   446  		return engine.NewInvalidInputErrorf("mismatch collection id with guarantee, expected: %v, got: %v",
   447  			block.Payload.Guarantees[chunk.Index].CollectionID,
   448  			collID)
   449  	}
   450  
   451  	return nil
   452  }
   453  
   454  // validateAuthorizedExecutionNodeAtBlockID validates sender ID of a chunk data pack response as an authorized
   455  // execution node at the given block ID.
   456  func (e Engine) validateAuthorizedExecutionNodeAtBlockID(senderID flow.Identifier, blockID flow.Identifier) bool {
   457  	snapshot := e.state.AtBlockID(blockID)
   458  	valid, err := protocol.IsNodeAuthorizedWithRoleAt(snapshot, senderID, flow.RoleExecution)
   459  
   460  	if err != nil {
   461  		e.log.Fatal().
   462  			Err(err).
   463  			Hex("block_id", logging.ID(blockID)).
   464  			Hex("sender_id", logging.ID(senderID)).
   465  			Msg("could not validate sender identity at specified block ID snapshot as execution node")
   466  	}
   467  
   468  	return valid
   469  }
   470  
   471  // NotifyChunkDataPackSealed is called by the ChunkDataPackRequester to notify the ChunkDataPackHandler that the specified chunk
   472  // has been sealed and hence the requester will no longer request it.
   473  //
   474  // When the requester calls this callback method, it will never return a chunk data pack for this specified chunk to the handler (i.e.,
   475  // through HandleChunkDataPack).
   476  func (e *Engine) NotifyChunkDataPackSealed(chunkIndex uint64, resultID flow.Identifier) {
   477  	lg := e.log.With().
   478  		Uint64("chunk_index", chunkIndex).
   479  		Hex("result_id", logging.ID(resultID)).
   480  		Logger()
   481  
   482  	// we need to report that the job has been finished eventually
   483  	status, exists := e.pendingChunks.Get(chunkIndex, resultID)
   484  	if !exists {
   485  		lg.Debug().
   486  			Msg("could not fetch pending status for sealed chunk from mempool, dropping chunk data")
   487  		return
   488  	}
   489  
   490  	chunkLocatorID := status.ChunkLocatorID()
   491  	lg = lg.With().
   492  		Uint64("block_height", status.BlockHeight).
   493  		Hex("result_id", logging.ID(status.ExecutionResult.ID())).Logger()
   494  	removed := e.pendingChunks.Remove(chunkIndex, resultID)
   495  
   496  	e.chunkConsumerNotifier.Notify(chunkLocatorID)
   497  	lg.Info().
   498  		Bool("removed", removed).
   499  		Msg("discards fetching chunk of an already sealed block and notified consumer")
   500  }
   501  
   502  // pushToVerifierWithTracing encapsulates the logic of pushing a verifiable chunk to verifier engine with tracing enabled.
   503  func (e *Engine) pushToVerifierWithTracing(
   504  	ctx context.Context,
   505  	chunk *flow.Chunk,
   506  	result *flow.ExecutionResult,
   507  	chunkDataPack *flow.ChunkDataPack) error {
   508  
   509  	var err error
   510  	e.tracer.WithSpanFromContext(ctx, trace.VERFetcherPushToVerifier, func() {
   511  		err = e.pushToVerifier(chunk, result, chunkDataPack)
   512  	})
   513  
   514  	return err
   515  }
   516  
   517  // pushToVerifier makes a verifiable chunk data out of the input and pass it to the verifier for verification.
   518  //
   519  // When this method returns without any error, it means that the verification of the chunk at the verifier engine is done (either successfully,
   520  // or unsuccessfully)
   521  func (e *Engine) pushToVerifier(chunk *flow.Chunk,
   522  	result *flow.ExecutionResult,
   523  	chunkDataPack *flow.ChunkDataPack) error {
   524  
   525  	header, err := e.headers.ByBlockID(chunk.BlockID)
   526  	if err != nil {
   527  		return fmt.Errorf("could not get block: %w", err)
   528  	}
   529  
   530  	vchunk, err := e.makeVerifiableChunkData(chunk, header, result, chunkDataPack)
   531  	if err != nil {
   532  		return fmt.Errorf("could not verify chunk: %w", err)
   533  	}
   534  
   535  	err = e.verifier.ProcessLocal(vchunk)
   536  	if err != nil {
   537  		return fmt.Errorf("verifier could not verify chunk: %w", err)
   538  	}
   539  
   540  	return nil
   541  }
   542  
   543  // makeVerifiableChunkData creates and returns a verifiable chunk data for the chunk data.
   544  // The verifier engine, which is the last engine in the pipeline of verification, uses this verifiable
   545  // chunk data to verify it.
   546  func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk,
   547  	header *flow.Header,
   548  	result *flow.ExecutionResult,
   549  	chunkDataPack *flow.ChunkDataPack,
   550  ) (*verification.VerifiableChunkData, error) {
   551  
   552  	// system chunk is the last chunk
   553  	isSystemChunk := IsSystemChunk(chunk.Index, result)
   554  
   555  	endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk)
   556  	if err != nil {
   557  		return nil, fmt.Errorf("could not compute end state of chunk: %w", err)
   558  	}
   559  
   560  	transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index)
   561  	if err != nil {
   562  		return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err)
   563  	}
   564  
   565  	return &verification.VerifiableChunkData{
   566  		IsSystemChunk:     isSystemChunk,
   567  		Chunk:             chunk,
   568  		Header:            header,
   569  		Result:            result,
   570  		ChunkDataPack:     chunkDataPack,
   571  		EndState:          endState,
   572  		TransactionOffset: transactionOffset,
   573  	}, nil
   574  }
   575  
   576  // requestChunkDataPack creates and dispatches a chunk data pack request to the requester engine.
   577  func (e *Engine) requestChunkDataPack(chunkIndex uint64, chunkID flow.Identifier, resultID flow.Identifier, blockID flow.Identifier) error {
   578  	agrees, disagrees, err := e.getAgreeAndDisagreeExecutors(blockID, resultID)
   579  	if err != nil {
   580  		return fmt.Errorf("could not segregate the agree and disagree executors for result: %x of block: %x", resultID, blockID)
   581  	}
   582  
   583  	header, err := e.headers.ByBlockID(blockID)
   584  	if err != nil {
   585  		return fmt.Errorf("could not get header for block: %x", blockID)
   586  	}
   587  
   588  	allExecutors, err := e.state.AtBlockID(blockID).Identities(filter.HasRole(flow.RoleExecution))
   589  	if err != nil {
   590  		return fmt.Errorf("could not fetch execution node ids at block %x: %w", blockID, err)
   591  	}
   592  
   593  	request := &verification.ChunkDataPackRequest{
   594  		Locator: chunks.Locator{
   595  			ResultID: resultID,
   596  			Index:    chunkIndex,
   597  		},
   598  		ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{
   599  			ChunkID:   chunkID,
   600  			Height:    header.Height,
   601  			Agrees:    agrees,
   602  			Disagrees: disagrees,
   603  			Targets:   allExecutors,
   604  		},
   605  	}
   606  
   607  	e.requester.Request(request)
   608  
   609  	return nil
   610  }
   611  
   612  // getAgreeAndDisagreeExecutors segregates the execution nodes identifiers based on the given execution result id at the given block into agree and
   613  // disagree sets.
   614  // The agree set contains the executors who made receipt with the same result as the given result id.
   615  // The disagree set contains the executors who made receipt with different result than the given result id.
   616  func (e *Engine) getAgreeAndDisagreeExecutors(blockID flow.Identifier, resultID flow.Identifier) (flow.IdentifierList, flow.IdentifierList, error) {
   617  	receipts, err := e.receipts.ByBlockID(blockID)
   618  	if err != nil {
   619  		return nil, nil, fmt.Errorf("could not retrieve receipts for block: %v: %w", blockID, err)
   620  	}
   621  
   622  	agrees, disagrees := executorsOf(receipts, resultID)
   623  	return agrees, disagrees, nil
   624  }
   625  
   626  // blockIsSealed returns true if the block at specified height by block ID is sealed.
   627  func (e Engine) blockIsSealed(blockID flow.Identifier) (bool, uint64, error) {
   628  	// TODO: as an optimization, we can keep record of last sealed height on a local variable.
   629  	header, err := e.headers.ByBlockID(blockID)
   630  	if err != nil {
   631  		return false, 0, fmt.Errorf("could not get block: %w", err)
   632  	}
   633  
   634  	lastSealed, err := e.state.Sealed().Head()
   635  	if err != nil {
   636  		return false, 0, fmt.Errorf("could not get last sealed: %w", err)
   637  	}
   638  
   639  	sealed := header.Height <= lastSealed.Height
   640  	return sealed, header.Height, nil
   641  }
   642  
   643  // executorsOf segregates the executors of the given receipts based on the given execution result id.
   644  // The agree set contains the executors who made receipt with the same result as the given result id.
   645  // The disagree set contains the executors who made receipt with different result than the given result id.
   646  func executorsOf(receipts []*flow.ExecutionReceipt, resultID flow.Identifier) (flow.IdentifierList, flow.IdentifierList) {
   647  	var agrees flow.IdentifierList
   648  	var disagrees flow.IdentifierList
   649  
   650  	for _, receipt := range receipts {
   651  		executor := receipt.ExecutorID
   652  
   653  		if receipt.ExecutionResult.ID() == resultID {
   654  			agrees = append(agrees, executor)
   655  		} else {
   656  			disagrees = append(disagrees, executor)
   657  		}
   658  	}
   659  
   660  	return agrees, disagrees
   661  }
   662  
   663  // EndStateCommitment computes the end state of the given chunk.
   664  func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) {
   665  	var endState flow.StateCommitment
   666  	if systemChunk {
   667  		var err error
   668  		// last chunk in a result is the system chunk and takes final state commitment
   669  		endState, err = result.FinalStateCommitment()
   670  		if err != nil {
   671  			return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err)
   672  		}
   673  	} else {
   674  		// any chunk except last takes the subsequent chunk's start state
   675  		endState = result.Chunks[chunkIndex+1].StartState
   676  	}
   677  
   678  	return endState, nil
   679  }
   680  
   681  // TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first
   682  // transaction of this chunk within the whole block
   683  func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) {
   684  	if int(chunkIndex) > len(chunks)-1 {
   685  		return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex)
   686  	}
   687  	var offset uint32 = 0
   688  	for i := 0; i < int(chunkIndex); i++ {
   689  		offset += uint32(chunks[i].NumberOfTransactions)
   690  	}
   691  	return offset, nil
   692  }
   693  
   694  // IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`.
   695  // Otherwise, it returns false.
   696  // In the current version, a chunk is a system chunk if it is the last chunk of the
   697  // execution result.
   698  func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool {
   699  	return chunkIndex == uint64(len(result.Chunks)-1)
   700  }