github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/module/builder/consensus/builder.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"time"
     7  
     8  	"github.com/dgraph-io/badger/v2"
     9  	otelTrace "go.opentelemetry.io/otel/trace"
    10  
    11  	"github.com/onflow/flow-go/model/flow"
    12  	"github.com/onflow/flow-go/model/flow/filter/id"
    13  	"github.com/onflow/flow-go/module"
    14  	"github.com/onflow/flow-go/module/mempool"
    15  	"github.com/onflow/flow-go/module/trace"
    16  	"github.com/onflow/flow-go/state/fork"
    17  	"github.com/onflow/flow-go/state/protocol"
    18  	"github.com/onflow/flow-go/state/protocol/blocktimer"
    19  	"github.com/onflow/flow-go/storage"
    20  	"github.com/onflow/flow-go/storage/badger/operation"
    21  )
    22  
    23  // Builder is the builder for consensus block payloads. Upon providing a payload
    24  // hash, it also memorizes which entities were included into the payload.
    25  type Builder struct {
    26  	metrics              module.MempoolMetrics
    27  	tracer               module.Tracer
    28  	db                   *badger.DB
    29  	state                protocol.ParticipantState
    30  	seals                storage.Seals
    31  	headers              storage.Headers
    32  	index                storage.Index
    33  	blocks               storage.Blocks
    34  	resultsDB            storage.ExecutionResults
    35  	receiptsDB           storage.ExecutionReceipts
    36  	guarPool             mempool.Guarantees
    37  	sealPool             mempool.IncorporatedResultSeals
    38  	recPool              mempool.ExecutionTree
    39  	mutableProtocolState protocol.MutableProtocolState
    40  	cfg                  Config
    41  }
    42  
    43  // NewBuilder creates a new block builder.
    44  func NewBuilder(
    45  	metrics module.MempoolMetrics,
    46  	db *badger.DB,
    47  	state protocol.ParticipantState,
    48  	headers storage.Headers,
    49  	seals storage.Seals,
    50  	index storage.Index,
    51  	blocks storage.Blocks,
    52  	resultsDB storage.ExecutionResults,
    53  	receiptsDB storage.ExecutionReceipts,
    54  	mutableProtocolState protocol.MutableProtocolState,
    55  	guarPool mempool.Guarantees,
    56  	sealPool mempool.IncorporatedResultSeals,
    57  	recPool mempool.ExecutionTree,
    58  	tracer module.Tracer,
    59  	options ...func(*Config),
    60  ) (*Builder, error) {
    61  
    62  	blockTimer, err := blocktimer.NewBlockTimer(500*time.Millisecond, 10*time.Second)
    63  	if err != nil {
    64  		return nil, fmt.Errorf("could not create default block timer: %w", err)
    65  	}
    66  
    67  	// initialize default config
    68  	cfg := Config{
    69  		blockTimer:        blockTimer,
    70  		maxSealCount:      100,
    71  		maxGuaranteeCount: 100,
    72  		maxReceiptCount:   200,
    73  		expiry:            flow.DefaultTransactionExpiry,
    74  	}
    75  
    76  	// apply option parameters
    77  	for _, option := range options {
    78  		option(&cfg)
    79  	}
    80  
    81  	b := &Builder{
    82  		metrics:              metrics,
    83  		db:                   db,
    84  		tracer:               tracer,
    85  		state:                state,
    86  		headers:              headers,
    87  		seals:                seals,
    88  		index:                index,
    89  		blocks:               blocks,
    90  		resultsDB:            resultsDB,
    91  		receiptsDB:           receiptsDB,
    92  		guarPool:             guarPool,
    93  		sealPool:             sealPool,
    94  		recPool:              recPool,
    95  		mutableProtocolState: mutableProtocolState,
    96  		cfg:                  cfg,
    97  	}
    98  
    99  	err = b.repopulateExecutionTree()
   100  	if err != nil {
   101  		return nil, fmt.Errorf("could not repopulate execution tree: %w", err)
   102  	}
   103  
   104  	return b, nil
   105  }
   106  
   107  // BuildOn creates a new block header on top of the provided parent, using the
   108  // given view and applying the custom setter function to allow the caller to
   109  // make changes to the header before storing it.
   110  func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error, sign func(*flow.Header) error) (*flow.Header, error) {
   111  
   112  	// since we don't know the blockID when building the block we track the
   113  	// time indirectly and insert the span directly at the end
   114  
   115  	startTime := time.Now()
   116  
   117  	// get the collection guarantees to insert in the payload
   118  	insertableGuarantees, err := b.getInsertableGuarantees(parentID)
   119  	if err != nil {
   120  		return nil, fmt.Errorf("could not insert guarantees: %w", err)
   121  	}
   122  
   123  	// get the receipts to insert in the payload
   124  	insertableReceipts, err := b.getInsertableReceipts(parentID)
   125  	if err != nil {
   126  		return nil, fmt.Errorf("could not insert receipts: %w", err)
   127  	}
   128  
   129  	// get the seals to insert in the payload
   130  	insertableSeals, err := b.getInsertableSeals(parentID)
   131  	if err != nil {
   132  		return nil, fmt.Errorf("could not insert seals: %w", err)
   133  	}
   134  
   135  	// assemble the block proposal
   136  	proposal, err := b.createProposal(parentID,
   137  		insertableGuarantees,
   138  		insertableSeals,
   139  		insertableReceipts,
   140  		setter,
   141  		sign)
   142  	if err != nil {
   143  		return nil, fmt.Errorf("could not assemble proposal: %w", err)
   144  	}
   145  
   146  	span, ctx := b.tracer.StartBlockSpan(context.Background(), proposal.ID(), trace.CONBuilderBuildOn, otelTrace.WithTimestamp(startTime))
   147  	defer span.End()
   148  
   149  	err = b.state.Extend(ctx, proposal)
   150  	if err != nil {
   151  		return nil, fmt.Errorf("could not extend state with built proposal: %w", err)
   152  	}
   153  
   154  	return proposal.Header, nil
   155  }
   156  
   157  // repopulateExecutionTree restores latest state of execution tree mempool based on local chain state information.
   158  // Repopulating of execution tree is split into two parts:
   159  // 1) traverse backwards all finalized blocks starting from last finalized block till we reach last sealed block. [lastSealedHeight, lastFinalizedHeight]
   160  // 2) traverse forward all unfinalized(pending) blocks starting from last finalized block.
   161  // For each block that is being traversed we will collect execution results and add them to execution tree.
   162  func (b *Builder) repopulateExecutionTree() error {
   163  	finalizedSnapshot := b.state.Final()
   164  	finalized, err := finalizedSnapshot.Head()
   165  	if err != nil {
   166  		return fmt.Errorf("could not retrieve finalized block: %w", err)
   167  	}
   168  	finalizedID := finalized.ID()
   169  
   170  	// Get the latest sealed block on this fork, i.e. the highest
   171  	// block for which there is a finalized seal.
   172  	latestSeal, err := b.seals.HighestInFork(finalizedID)
   173  	if err != nil {
   174  		return fmt.Errorf("could not retrieve latest seal in fork with head %x: %w", finalizedID, err)
   175  	}
   176  	latestSealedBlockID := latestSeal.BlockID
   177  	latestSealedBlock, err := b.headers.ByBlockID(latestSealedBlockID)
   178  	if err != nil {
   179  		return fmt.Errorf("could not retrieve latest sealed block (%x): %w", latestSeal.BlockID, err)
   180  	}
   181  	sealedResult, err := b.resultsDB.ByID(latestSeal.ResultID)
   182  	if err != nil {
   183  		return fmt.Errorf("could not retrieve sealed result (%x): %w", latestSeal.ResultID, err)
   184  	}
   185  
   186  	// prune execution tree to minimum height (while the tree is still empty, for max efficiency)
   187  	err = b.recPool.PruneUpToHeight(latestSealedBlock.Height)
   188  	if err != nil {
   189  		return fmt.Errorf("could not prune execution tree to height %d: %w", latestSealedBlock.Height, err)
   190  	}
   191  
   192  	// At initialization, the execution tree is empty. However, during normal operations, we
   193  	// generally query the tree for "all receipts, whose results are derived from the latest
   194  	// sealed and finalized result". This requires the execution tree to know what the latest
   195  	// sealed and finalized result is, so we add it here.
   196  	// Note: we only add the sealed and finalized result, without any Execution Receipts. This
   197  	// is sufficient to create a vertex in the tree. Thereby, we can traverse the tree, starting
   198  	// from the sealed and finalized result, to find derived results and their respective receipts.
   199  	err = b.recPool.AddResult(sealedResult, latestSealedBlock)
   200  	if err != nil {
   201  		return fmt.Errorf("failed to add sealed result as vertex to ExecutionTree (%x): %w", latestSeal.ResultID, err)
   202  	}
   203  
   204  	// receiptCollector adds _all known_ receipts for the given block to the execution tree
   205  	receiptCollector := func(header *flow.Header) error {
   206  		receipts, err := b.receiptsDB.ByBlockID(header.ID())
   207  		if err != nil {
   208  			return fmt.Errorf("could not retrieve execution reciepts for block %x: %w", header.ID(), err)
   209  		}
   210  		for _, receipt := range receipts {
   211  			_, err = b.recPool.AddReceipt(receipt, header)
   212  			if err != nil {
   213  				return fmt.Errorf("could not add receipt (%x) to execution tree: %w", receipt.ID(), err)
   214  			}
   215  		}
   216  		return nil
   217  	}
   218  
   219  	// Traverse chain backwards and add all known receipts for any finalized, unsealed block to the execution tree.
   220  	// Thereby, we add superset of all unsealed execution results to the execution tree.
   221  	err = fork.TraverseBackward(b.headers, finalizedID, receiptCollector, fork.ExcludingBlock(latestSealedBlockID))
   222  	if err != nil {
   223  		return fmt.Errorf("failed to traverse unsealed, finalized blocks: %w", err)
   224  	}
   225  
   226  	// At this point execution tree is filled with all results for blocks (lastSealedBlock, lastFinalizedBlock].
   227  	// Now, we add all known receipts for any valid block that descends from the latest finalized block:
   228  	validPending, err := finalizedSnapshot.Descendants()
   229  	if err != nil {
   230  		return fmt.Errorf("could not retrieve valid pending blocks from finalized snapshot: %w", err)
   231  	}
   232  	for _, blockID := range validPending {
   233  		block, err := b.headers.ByBlockID(blockID)
   234  		if err != nil {
   235  			return fmt.Errorf("could not retrieve header for unfinalized block %x: %w", blockID, err)
   236  		}
   237  		err = receiptCollector(block)
   238  		if err != nil {
   239  			return fmt.Errorf("failed to add receipts for unfinalized block %x at height %d: %w", blockID, block.Height, err)
   240  		}
   241  	}
   242  
   243  	return nil
   244  }
   245  
   246  // getInsertableGuarantees returns the list of CollectionGuarantees that should
   247  // be inserted in the next payload. It looks in the collection mempool and
   248  // applies the following filters:
   249  //
   250  // 1) If it was already included in the fork, skip.
   251  //
   252  // 2) If it references an unknown block, skip.
   253  //
   254  // 3) If the referenced block has an expired height, skip.
   255  //
   256  // 4) Otherwise, this guarantee can be included in the payload.
   257  func (b *Builder) getInsertableGuarantees(parentID flow.Identifier) ([]*flow.CollectionGuarantee, error) {
   258  
   259  	// we look back only as far as the expiry limit for the current height we
   260  	// are building for; any guarantee with a reference block before that can
   261  	// not be included anymore anyway
   262  	parent, err := b.headers.ByBlockID(parentID)
   263  	if err != nil {
   264  		return nil, fmt.Errorf("could not retrieve parent: %w", err)
   265  	}
   266  	height := parent.Height + 1
   267  	limit := height - uint64(b.cfg.expiry)
   268  	if limit > height { // overflow check
   269  		limit = 0
   270  	}
   271  
   272  	// look up the root height so we don't look too far back
   273  	// initially this is the genesis block height (aka 0).
   274  	var rootHeight uint64
   275  	err = b.db.View(operation.RetrieveRootHeight(&rootHeight))
   276  	if err != nil {
   277  		return nil, fmt.Errorf("could not retrieve root block height: %w", err)
   278  	}
   279  	if limit < rootHeight {
   280  		limit = rootHeight
   281  	}
   282  
   283  	// blockLookup keeps track of the blocks from limit to parent
   284  	blockLookup := make(map[flow.Identifier]struct{})
   285  
   286  	// receiptLookup keeps track of the receipts contained in blocks between
   287  	// limit and parent
   288  	receiptLookup := make(map[flow.Identifier]struct{})
   289  
   290  	// loop through the fork backwards, from parent to limit (inclusive),
   291  	// and keep track of blocks and collections visited on the way
   292  	forkScanner := func(header *flow.Header) error {
   293  		ancestorID := header.ID()
   294  		blockLookup[ancestorID] = struct{}{}
   295  
   296  		index, err := b.index.ByBlockID(ancestorID)
   297  		if err != nil {
   298  			return fmt.Errorf("could not get ancestor payload (%x): %w", ancestorID, err)
   299  		}
   300  
   301  		for _, collID := range index.CollectionIDs {
   302  			receiptLookup[collID] = struct{}{}
   303  		}
   304  
   305  		return nil
   306  	}
   307  	err = fork.TraverseBackward(b.headers, parentID, forkScanner, fork.IncludingHeight(limit))
   308  	if err != nil {
   309  		return nil, fmt.Errorf("internal error building set of CollectionGuarantees on fork: %w", err)
   310  	}
   311  
   312  	// go through mempool and collect valid collections
   313  	var guarantees []*flow.CollectionGuarantee
   314  	for _, guarantee := range b.guarPool.All() {
   315  		// add at most <maxGuaranteeCount> number of collection guarantees in a new block proposal
   316  		// in order to prevent the block payload from being too big or computationally heavy for the
   317  		// execution nodes
   318  		if uint(len(guarantees)) >= b.cfg.maxGuaranteeCount {
   319  			break
   320  		}
   321  
   322  		collID := guarantee.ID()
   323  
   324  		// skip collections that are already included in a block on the fork
   325  		_, duplicated := receiptLookup[collID]
   326  		if duplicated {
   327  			continue
   328  		}
   329  
   330  		// skip collections for blocks that are not within the limit
   331  		_, ok := blockLookup[guarantee.ReferenceBlockID]
   332  		if !ok {
   333  			continue
   334  		}
   335  
   336  		guarantees = append(guarantees, guarantee)
   337  	}
   338  
   339  	return guarantees, nil
   340  }
   341  
   342  // getInsertableSeals returns the list of Seals from the mempool that should be
   343  // inserted in the next payload.
   344  // Per protocol definition, a specific result is only incorporated _once_ in each fork.
   345  // Specifically, the result is incorporated in the block that contains a receipt committing
   346  // to a result for the _first time_ in the respective fork.
   347  // We can seal a result if and only if _all_ of the following conditions are satisfied:
   348  //
   349  //   - (0) We have collected a sufficient number of approvals for each of the result's chunks.
   350  //   - (1) The result must have been previously incorporated in the fork, which we are extending.
   351  //     Note: The protocol dictates that all incorporated results must be for ancestor blocks
   352  //     in the respective fork. Hence, a result being incorporated in the fork, implies
   353  //     that the result must be for a block in this fork.
   354  //   - (2) The result must be for an _unsealed_ block.
   355  //   - (3) The result's parent must have been previously sealed (either by a seal in an ancestor
   356  //     block or by a seal included earlier in the block that we are constructing).
   357  //
   358  // To limit block size, we cap the number of seals to maxSealCount.
   359  func (b *Builder) getInsertableSeals(parentID flow.Identifier) ([]*flow.Seal, error) {
   360  	// get the latest seal in the fork, which we are extending and
   361  	// the corresponding block, whose result is sealed
   362  	// Note: the last seal might not be included in a finalized block yet
   363  	lastSeal, err := b.seals.HighestInFork(parentID)
   364  	if err != nil {
   365  		return nil, fmt.Errorf("could not retrieve latest seal in the fork, which we are extending: %w", err)
   366  	}
   367  	latestSealedBlockID := lastSeal.BlockID
   368  	latestSealedBlock, err := b.headers.ByBlockID(latestSealedBlockID)
   369  	if err != nil {
   370  		return nil, fmt.Errorf("could not retrieve sealed block %x: %w", lastSeal.BlockID, err)
   371  	}
   372  	latestSealedHeight := latestSealedBlock.Height
   373  
   374  	// STEP I: Collect the seals for all results that satisfy (0), (1), and (2).
   375  	//         The will give us a _superset_ of all seals that can be included.
   376  	// Implementation:
   377  	//  * We walk the fork backwards and check each block for incorporated results.
   378  	//    - Therefore, all results that we encounter satisfy condition (1).
   379  	//  * We only consider results, whose executed block has a height _strictly larger_
   380  	//    than the lastSealedHeight.
   381  	//    - Thereby, we guarantee that condition (2) is satisfied.
   382  	//  * We only consider results for which we have a candidate seals in the sealPool.
   383  	//    - Thereby, we guarantee that condition (0) is satisfied, because candidate seals
   384  	//      are only generated and stored in the mempool once sufficient approvals are collected.
   385  	// Furthermore, condition (2) imposes a limit on how far we have to walk back:
   386  	//  * A result can only be incorporated in a child of the block that it computes.
   387  	//    Therefore, we only have to inspect the results incorporated in unsealed blocks.
   388  	sealsSuperset := make(map[uint64][]*flow.IncorporatedResultSeal) // map: executedBlock.Height -> candidate Seals
   389  	sealCollector := func(header *flow.Header) error {
   390  		blockID := header.ID()
   391  		if blockID == parentID {
   392  			// Important protocol edge case: There must be at least one block in between the block incorporating
   393  			// a result and the block sealing the result. This is because we need the Source of Randomness for
   394  			// the block that _incorporates_ the result, to compute the verifier assignment. Therefore, we require
   395  			// that the block _incorporating_ the result has at least one child in the fork, _before_ we include
   396  			// the seal. Thereby, we guarantee that a verifier assignment can be computed without needing
   397  			// information from the block that we are just constructing. Hence, we don't consider results for
   398  			// sealing that were incorporated in the immediate parent which we are extending.
   399  			return nil
   400  		}
   401  
   402  		index, err := b.index.ByBlockID(blockID)
   403  		if err != nil {
   404  			return fmt.Errorf("could not retrieve index for block %x: %w", blockID, err)
   405  		}
   406  
   407  		// enforce condition (1): only consider seals for results that are incorporated in the fork
   408  		for _, resultID := range index.ResultIDs {
   409  			result, err := b.resultsDB.ByID(resultID)
   410  			if err != nil {
   411  				return fmt.Errorf("could not retrieve execution result %x: %w", resultID, err)
   412  			}
   413  
   414  			// re-assemble the IncorporatedResult because we need its ID to
   415  			// check if it is in the seal mempool.
   416  			incorporatedResult := flow.NewIncorporatedResult(
   417  				blockID,
   418  				result,
   419  			)
   420  
   421  			// enforce condition (0): candidate seals are only constructed once sufficient
   422  			// approvals have been collected. Hence, any incorporated result for which we
   423  			// find a candidate seal satisfies condition (0)
   424  			irSeal, ok := b.sealPool.ByID(incorporatedResult.ID())
   425  			if !ok {
   426  				continue
   427  			}
   428  
   429  			// enforce condition (2): the block is unsealed (in this fork) if and only if
   430  			// its height is _strictly larger_ than the lastSealedHeight.
   431  			executedBlock, err := b.headers.ByBlockID(incorporatedResult.Result.BlockID)
   432  			if err != nil {
   433  				return fmt.Errorf("could not get header of block %x: %w", incorporatedResult.Result.BlockID, err)
   434  			}
   435  			if executedBlock.Height <= latestSealedHeight {
   436  				continue
   437  			}
   438  
   439  			// The following is a subtle but important protocol edge case: There can be multiple
   440  			// candidate seals for the same block. We have to include all to guarantee sealing liveness!
   441  			sealsSuperset[executedBlock.Height] = append(sealsSuperset[executedBlock.Height], irSeal)
   442  		}
   443  
   444  		return nil
   445  	}
   446  	err = fork.TraverseBackward(b.headers, parentID, sealCollector, fork.ExcludingBlock(latestSealedBlockID))
   447  	if err != nil {
   448  		return nil, fmt.Errorf("internal error traversing unsealed section of fork: %w", err)
   449  	}
   450  	// All the seals in sealsSuperset are for results that satisfy (0), (1), and (2).
   451  
   452  	// STEP II: Select only the seals from sealsSuperset that also satisfy condition (3).
   453  	// We do this by starting with the last sealed result in the fork. Then, we check whether we
   454  	// have a seal for the child block (at latestSealedBlock.Height +1), which connects to the
   455  	// sealed result. If we find such a seal, we can now consider the child block sealed.
   456  	// We continue until we stop finding a seal for the child.
   457  	seals := make([]*flow.Seal, 0, len(sealsSuperset))
   458  	for {
   459  		// cap the number of seals
   460  		if uint(len(seals)) >= b.cfg.maxSealCount {
   461  			break
   462  		}
   463  
   464  		// enforce condition (3):
   465  		candidateSeal, ok := connectingSeal(sealsSuperset[latestSealedHeight+1], lastSeal)
   466  		if !ok {
   467  			break
   468  		}
   469  		seals = append(seals, candidateSeal)
   470  		lastSeal = candidateSeal
   471  		latestSealedHeight += 1
   472  	}
   473  	return seals, nil
   474  }
   475  
   476  // connectingSeal looks through `sealsForNextBlock`. It checks whether the
   477  // sealed result directly descends from the lastSealed result.
   478  func connectingSeal(sealsForNextBlock []*flow.IncorporatedResultSeal, lastSealed *flow.Seal) (*flow.Seal, bool) {
   479  	for _, candidateSeal := range sealsForNextBlock {
   480  		if candidateSeal.IncorporatedResult.Result.PreviousResultID == lastSealed.ResultID {
   481  			return candidateSeal.Seal, true
   482  		}
   483  	}
   484  	return nil, false
   485  }
   486  
   487  type InsertableReceipts struct {
   488  	receipts []*flow.ExecutionReceiptMeta
   489  	results  []*flow.ExecutionResult
   490  }
   491  
   492  // getInsertableReceipts constructs:
   493  //   - (i)  the meta information of the ExecutionReceipts (i.e. ExecutionReceiptMeta)
   494  //     that should be inserted in the next payload
   495  //   - (ii) the ExecutionResults the receipts from step (i) commit to
   496  //     (deduplicated w.r.t. the block under construction as well as ancestor blocks)
   497  //
   498  // It looks in the receipts mempool and applies the following filter:
   499  //
   500  // 1) If it doesn't correspond to an unsealed block on the fork, skip it.
   501  //
   502  // 2) If it was already included in the fork, skip it.
   503  //
   504  // 3) Otherwise, this receipt can be included in the payload.
   505  //
   506  // Receipts have to be ordered by block height.
   507  func (b *Builder) getInsertableReceipts(parentID flow.Identifier) (*InsertableReceipts, error) {
   508  
   509  	// Get the latest sealed block on this fork, ie the highest block for which
   510  	// there is a seal in this fork. This block is not necessarily finalized.
   511  	latestSeal, err := b.seals.HighestInFork(parentID)
   512  	if err != nil {
   513  		return nil, fmt.Errorf("could not retrieve parent seal (%x): %w", parentID, err)
   514  	}
   515  	sealedBlockID := latestSeal.BlockID
   516  
   517  	// ancestors is used to keep the IDs of the ancestor blocks we iterate through.
   518  	// We use it to skip receipts that are not for unsealed blocks in the fork.
   519  	ancestors := make(map[flow.Identifier]struct{})
   520  
   521  	// includedReceipts is a set of all receipts that are contained in unsealed blocks along the fork.
   522  	includedReceipts := make(map[flow.Identifier]struct{})
   523  
   524  	// includedResults is a set of all unsealed results that were incorporated into fork
   525  	includedResults := make(map[flow.Identifier]struct{})
   526  
   527  	// loop through the fork backwards, from parent to last sealed (including),
   528  	// and keep track of blocks and receipts visited on the way.
   529  	forkScanner := func(ancestor *flow.Header) error {
   530  		ancestorID := ancestor.ID()
   531  		ancestors[ancestorID] = struct{}{}
   532  
   533  		index, err := b.index.ByBlockID(ancestorID)
   534  		if err != nil {
   535  			return fmt.Errorf("could not get payload index of block %x: %w", ancestorID, err)
   536  		}
   537  		for _, recID := range index.ReceiptIDs {
   538  			includedReceipts[recID] = struct{}{}
   539  		}
   540  		for _, resID := range index.ResultIDs {
   541  			includedResults[resID] = struct{}{}
   542  		}
   543  
   544  		return nil
   545  	}
   546  	err = fork.TraverseBackward(b.headers, parentID, forkScanner, fork.IncludingBlock(sealedBlockID))
   547  	if err != nil {
   548  		return nil, fmt.Errorf("internal error building set of CollectionGuarantees on fork: %w", err)
   549  	}
   550  
   551  	isResultForUnsealedBlock := isResultForBlock(ancestors)
   552  	isReceiptUniqueAndUnsealed := isNoDupAndNotSealed(includedReceipts, sealedBlockID)
   553  	// find all receipts:
   554  	// 1) whose result connects all the way to the last sealed result
   555  	// 2) is unique (never seen in unsealed blocks)
   556  	receipts, err := b.recPool.ReachableReceipts(latestSeal.ResultID, isResultForUnsealedBlock, isReceiptUniqueAndUnsealed)
   557  	// Occurrence of UnknownExecutionResultError:
   558  	// Populating the execution with receipts from incoming blocks happens concurrently in
   559  	// matching.Core. Hence, the following edge case can occur (rarely): matching.Core is
   560  	// just in the process of populating the Execution Tree with the receipts from the
   561  	// latest blocks, while the builder is already trying to build on top. In this rare
   562  	// situation, the Execution Tree might not yet know the latest sealed result.
   563  	// TODO: we should probably remove this edge case by _synchronously_ populating
   564  	//       the Execution Tree in the Fork's finalizationCallback
   565  	if err != nil && !mempool.IsUnknownExecutionResultError(err) {
   566  		return nil, fmt.Errorf("failed to retrieve reachable receipts from memool: %w", err)
   567  	}
   568  
   569  	insertables := toInsertables(receipts, includedResults, b.cfg.maxReceiptCount)
   570  	return insertables, nil
   571  }
   572  
   573  // toInsertables separates the provided receipts into ExecutionReceiptMeta and
   574  // ExecutionResult. Results that are in includedResults are skipped.
   575  // We also limit the number of receipts to maxReceiptCount.
   576  func toInsertables(receipts []*flow.ExecutionReceipt, includedResults map[flow.Identifier]struct{}, maxReceiptCount uint) *InsertableReceipts {
   577  	results := make([]*flow.ExecutionResult, 0)
   578  
   579  	count := uint(len(receipts))
   580  	// don't collect more than maxReceiptCount receipts
   581  	if count > maxReceiptCount {
   582  		count = maxReceiptCount
   583  	}
   584  
   585  	filteredReceipts := make([]*flow.ExecutionReceiptMeta, 0, count)
   586  
   587  	for i := uint(0); i < count; i++ {
   588  		receipt := receipts[i]
   589  		meta := receipt.Meta()
   590  		resultID := meta.ResultID
   591  		if _, inserted := includedResults[resultID]; !inserted {
   592  			results = append(results, &receipt.ExecutionResult)
   593  			includedResults[resultID] = struct{}{}
   594  		}
   595  
   596  		filteredReceipts = append(filteredReceipts, meta)
   597  	}
   598  
   599  	return &InsertableReceipts{
   600  		receipts: filteredReceipts,
   601  		results:  results,
   602  	}
   603  }
   604  
   605  // createProposal assembles a block with the provided header and payload
   606  // information
   607  func (b *Builder) createProposal(parentID flow.Identifier,
   608  	guarantees []*flow.CollectionGuarantee,
   609  	seals []*flow.Seal,
   610  	insertableReceipts *InsertableReceipts,
   611  	setter func(*flow.Header) error,
   612  	sign func(*flow.Header) error,
   613  ) (*flow.Block, error) {
   614  
   615  	parent, err := b.headers.ByBlockID(parentID)
   616  	if err != nil {
   617  		return nil, fmt.Errorf("could not retrieve parent: %w", err)
   618  	}
   619  
   620  	timestamp := b.cfg.blockTimer.Build(parent.Timestamp)
   621  
   622  	// construct default block on top of the provided parent
   623  	header := &flow.Header{
   624  		ChainID:     parent.ChainID,
   625  		ParentID:    parentID,
   626  		Height:      parent.Height + 1,
   627  		Timestamp:   timestamp,
   628  		PayloadHash: flow.ZeroID,
   629  	}
   630  
   631  	// apply the custom fields setter of the consensus algorithm, we must do this before applying service events
   632  	// since we need to know the correct view of the block.
   633  	err = setter(header)
   634  	if err != nil {
   635  		return nil, fmt.Errorf("could not apply setter: %w", err)
   636  	}
   637  
   638  	// Evolve the Protocol State starting from the parent block's state. Information that may change the state is:
   639  	// the candidate block's view and Service Events from execution results sealed in the candidate block.
   640  	protocolStateID, _, err := b.mutableProtocolState.EvolveState(header.ParentID, header.View, seals)
   641  	if err != nil {
   642  		return nil, fmt.Errorf("evolving protocol state failed: %w", err)
   643  	}
   644  
   645  	proposal := &flow.Block{
   646  		Header: header,
   647  	}
   648  	proposal.SetPayload(flow.Payload{
   649  		Guarantees:      guarantees,
   650  		Seals:           seals,
   651  		Receipts:        insertableReceipts.receipts,
   652  		Results:         insertableReceipts.results,
   653  		ProtocolStateID: protocolStateID,
   654  	})
   655  
   656  	// sign the proposal
   657  	err = sign(header)
   658  	if err != nil {
   659  		return nil, fmt.Errorf("could not sign the proposal: %w", err)
   660  	}
   661  
   662  	return proposal, nil
   663  }
   664  
   665  // isResultForBlock constructs a mempool.BlockFilter that accepts only blocks whose ID is part of the given set.
   666  func isResultForBlock(blockIDs map[flow.Identifier]struct{}) mempool.BlockFilter {
   667  	blockIdFilter := id.InSet(blockIDs)
   668  	return func(h *flow.Header) bool {
   669  		return blockIdFilter(h.ID())
   670  	}
   671  }
   672  
   673  // isNoDupAndNotSealed constructs a mempool.ReceiptFilter for discarding receipts that
   674  // * are duplicates
   675  // * or are for the sealed block
   676  func isNoDupAndNotSealed(includedReceipts map[flow.Identifier]struct{}, sealedBlockID flow.Identifier) mempool.ReceiptFilter {
   677  	return func(receipt *flow.ExecutionReceipt) bool {
   678  		if _, duplicate := includedReceipts[receipt.ID()]; duplicate {
   679  			return false
   680  		}
   681  		if receipt.ExecutionResult.BlockID == sealedBlockID {
   682  			return false
   683  		}
   684  		return true
   685  	}
   686  }