github.com/koko1123/flow-go-1@v0.29.6/module/builder/consensus/builder.go (about) 1 // (c) 2019 Dapper Labs - ALL RIGHTS RESERVED 2 3 package consensus 4 5 import ( 6 "context" 7 "fmt" 8 "time" 9 10 "github.com/dgraph-io/badger/v3" 11 otelTrace "go.opentelemetry.io/otel/trace" 12 13 "github.com/koko1123/flow-go-1/model/flow" 14 "github.com/koko1123/flow-go-1/model/flow/filter/id" 15 "github.com/koko1123/flow-go-1/module" 16 "github.com/koko1123/flow-go-1/module/mempool" 17 "github.com/koko1123/flow-go-1/module/trace" 18 "github.com/koko1123/flow-go-1/state/fork" 19 "github.com/koko1123/flow-go-1/state/protocol" 20 "github.com/koko1123/flow-go-1/state/protocol/blocktimer" 21 "github.com/koko1123/flow-go-1/storage" 22 "github.com/koko1123/flow-go-1/storage/badger/operation" 23 ) 24 25 // Builder is the builder for consensus block payloads. Upon providing a payload 26 // hash, it also memorizes which entities were included into the payload. 27 type Builder struct { 28 metrics module.MempoolMetrics 29 tracer module.Tracer 30 db *badger.DB 31 state protocol.MutableState 32 seals storage.Seals 33 headers storage.Headers 34 index storage.Index 35 blocks storage.Blocks 36 resultsDB storage.ExecutionResults 37 receiptsDB storage.ExecutionReceipts 38 guarPool mempool.Guarantees 39 sealPool mempool.IncorporatedResultSeals 40 recPool mempool.ExecutionTree 41 cfg Config 42 } 43 44 // NewBuilder creates a new block builder. 45 func NewBuilder( 46 metrics module.MempoolMetrics, 47 db *badger.DB, 48 state protocol.MutableState, 49 headers storage.Headers, 50 seals storage.Seals, 51 index storage.Index, 52 blocks storage.Blocks, 53 resultsDB storage.ExecutionResults, 54 receiptsDB storage.ExecutionReceipts, 55 guarPool mempool.Guarantees, 56 sealPool mempool.IncorporatedResultSeals, 57 recPool mempool.ExecutionTree, 58 tracer module.Tracer, 59 options ...func(*Config), 60 ) (*Builder, error) { 61 62 blockTimer, err := blocktimer.NewBlockTimer(500*time.Millisecond, 10*time.Second) 63 if err != nil { 64 return nil, fmt.Errorf("could not create default block timer: %w", err) 65 } 66 67 // initialize default config 68 cfg := Config{ 69 blockTimer: blockTimer, 70 maxSealCount: 100, 71 maxGuaranteeCount: 100, 72 maxReceiptCount: 200, 73 expiry: flow.DefaultTransactionExpiry, 74 } 75 76 // apply option parameters 77 for _, option := range options { 78 option(&cfg) 79 } 80 81 b := &Builder{ 82 metrics: metrics, 83 db: db, 84 tracer: tracer, 85 state: state, 86 headers: headers, 87 seals: seals, 88 index: index, 89 blocks: blocks, 90 resultsDB: resultsDB, 91 receiptsDB: receiptsDB, 92 guarPool: guarPool, 93 sealPool: sealPool, 94 recPool: recPool, 95 cfg: cfg, 96 } 97 98 err = b.repopulateExecutionTree() 99 if err != nil { 100 return nil, fmt.Errorf("could not repopulate execution tree: %w", err) 101 } 102 103 return b, nil 104 } 105 106 // BuildOn creates a new block header on top of the provided parent, using the 107 // given view and applying the custom setter function to allow the caller to 108 // make changes to the header before storing it. 109 func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) { 110 111 // since we don't know the blockID when building the block we track the 112 // time indirectly and insert the span directly at the end 113 114 startTime := time.Now() 115 116 // get the collection guarantees to insert in the payload 117 insertableGuarantees, err := b.getInsertableGuarantees(parentID) 118 if err != nil { 119 return nil, fmt.Errorf("could not insert guarantees: %w", err) 120 } 121 122 // get the receipts to insert in the payload 123 insertableReceipts, err := b.getInsertableReceipts(parentID) 124 if err != nil { 125 return nil, fmt.Errorf("could not insert receipts: %w", err) 126 } 127 128 // get the seals to insert in the payload 129 insertableSeals, err := b.getInsertableSeals(parentID) 130 if err != nil { 131 return nil, fmt.Errorf("could not insert seals: %w", err) 132 } 133 134 // assemble the block proposal 135 proposal, err := b.createProposal(parentID, 136 insertableGuarantees, 137 insertableSeals, 138 insertableReceipts, 139 setter) 140 if err != nil { 141 return nil, fmt.Errorf("could not assemble proposal: %w", err) 142 } 143 144 span, ctx := b.tracer.StartBlockSpan(context.Background(), proposal.ID(), trace.CONBuilderBuildOn, otelTrace.WithTimestamp(startTime)) 145 defer span.End() 146 147 err = b.state.Extend(ctx, proposal) 148 if err != nil { 149 return nil, fmt.Errorf("could not extend state with built proposal: %w", err) 150 } 151 152 return proposal.Header, nil 153 } 154 155 // repopulateExecutionTree restores latest state of execution tree mempool based on local chain state information. 156 // Repopulating of execution tree is split into two parts: 157 // 1) traverse backwards all finalized blocks starting from last finalized block till we reach last sealed block. [lastSealedHeight, lastFinalizedHeight] 158 // 2) traverse forward all unfinalized(pending) blocks starting from last finalized block. 159 // For each block that is being traversed we will collect execution results and add them to execution tree. 160 func (b *Builder) repopulateExecutionTree() error { 161 finalizedSnapshot := b.state.Final() 162 finalized, err := finalizedSnapshot.Head() 163 if err != nil { 164 return fmt.Errorf("could not retrieve finalized block: %w", err) 165 } 166 finalizedID := finalized.ID() 167 168 // Get the latest sealed block on this fork, i.e. the highest 169 // block for which there is a finalized seal. 170 latestSeal, err := b.seals.HighestInFork(finalizedID) 171 if err != nil { 172 return fmt.Errorf("could not retrieve latest seal in fork with head %x: %w", finalizedID, err) 173 } 174 latestSealedBlockID := latestSeal.BlockID 175 latestSealedBlock, err := b.headers.ByBlockID(latestSealedBlockID) 176 if err != nil { 177 return fmt.Errorf("could not retrieve latest sealed block (%x): %w", latestSeal.BlockID, err) 178 } 179 sealedResult, err := b.resultsDB.ByID(latestSeal.ResultID) 180 if err != nil { 181 return fmt.Errorf("could not retrieve sealed result (%x): %w", latestSeal.ResultID, err) 182 } 183 184 // prune execution tree to minimum height (while the tree is still empty, for max efficiency) 185 err = b.recPool.PruneUpToHeight(latestSealedBlock.Height) 186 if err != nil { 187 return fmt.Errorf("could not prune execution tree to height %d: %w", latestSealedBlock.Height, err) 188 } 189 190 // At initialization, the execution tree is empty. However, during normal operations, we 191 // generally query the tree for "all receipts, whose results are derived from the latest 192 // sealed and finalized result". This requires the execution tree to know what the latest 193 // sealed and finalized result is, so we add it here. 194 // Note: we only add the sealed and finalized result, without any Execution Receipts. This 195 // is sufficient to create a vertex in the tree. Thereby, we can traverse the tree, starting 196 // from the sealed and finalized result, to find derived results and their respective receipts. 197 err = b.recPool.AddResult(sealedResult, latestSealedBlock) 198 if err != nil { 199 return fmt.Errorf("failed to add sealed result as vertex to ExecutionTree (%x): %w", latestSeal.ResultID, err) 200 } 201 202 // receiptCollector adds _all known_ receipts for the given block to the execution tree 203 receiptCollector := func(header *flow.Header) error { 204 receipts, err := b.receiptsDB.ByBlockID(header.ID()) 205 if err != nil { 206 return fmt.Errorf("could not retrieve execution reciepts for block %x: %w", header.ID(), err) 207 } 208 for _, receipt := range receipts { 209 _, err = b.recPool.AddReceipt(receipt, header) 210 if err != nil { 211 return fmt.Errorf("could not add receipt (%x) to execution tree: %w", receipt.ID(), err) 212 } 213 } 214 return nil 215 } 216 217 // Traverse chain backwards and add all known receipts for any finalized, unsealed block to the execution tree. 218 // Thereby, we add superset of all unsealed execution results to the execution tree. 219 err = fork.TraverseBackward(b.headers, finalizedID, receiptCollector, fork.ExcludingBlock(latestSealedBlockID)) 220 if err != nil { 221 return fmt.Errorf("failed to traverse unsealed, finalized blocks: %w", err) 222 } 223 224 // At this point execution tree is filled with all results for blocks (lastSealedBlock, lastFinalizedBlock]. 225 // Now, we add all known receipts for any valid block that descends from the latest finalized block: 226 validPending, err := finalizedSnapshot.ValidDescendants() 227 if err != nil { 228 return fmt.Errorf("could not retrieve valid pending blocks from finalized snapshot: %w", err) 229 } 230 for _, blockID := range validPending { 231 block, err := b.headers.ByBlockID(blockID) 232 if err != nil { 233 return fmt.Errorf("could not retrieve header for unfinalized block %x: %w", blockID, err) 234 } 235 err = receiptCollector(block) 236 if err != nil { 237 return fmt.Errorf("failed to add receipts for unfinalized block %x at height %d: %w", blockID, block.Height, err) 238 } 239 } 240 241 return nil 242 } 243 244 // getInsertableGuarantees returns the list of CollectionGuarantees that should 245 // be inserted in the next payload. It looks in the collection mempool and 246 // applies the following filters: 247 // 248 // 1) If it was already included in the fork, skip. 249 // 250 // 2) If it references an unknown block, skip. 251 // 252 // 3) If the referenced block has an expired height, skip. 253 // 254 // 4) Otherwise, this guarantee can be included in the payload. 255 func (b *Builder) getInsertableGuarantees(parentID flow.Identifier) ([]*flow.CollectionGuarantee, error) { 256 257 // we look back only as far as the expiry limit for the current height we 258 // are building for; any guarantee with a reference block before that can 259 // not be included anymore anyway 260 parent, err := b.headers.ByBlockID(parentID) 261 if err != nil { 262 return nil, fmt.Errorf("could not retrieve parent: %w", err) 263 } 264 height := parent.Height + 1 265 limit := height - uint64(b.cfg.expiry) 266 if limit > height { // overflow check 267 limit = 0 268 } 269 270 // look up the root height so we don't look too far back 271 // initially this is the genesis block height (aka 0). 272 var rootHeight uint64 273 err = b.db.View(operation.RetrieveRootHeight(&rootHeight)) 274 if err != nil { 275 return nil, fmt.Errorf("could not retrieve root block height: %w", err) 276 } 277 if limit < rootHeight { 278 limit = rootHeight 279 } 280 281 // blockLookup keeps track of the blocks from limit to parent 282 blockLookup := make(map[flow.Identifier]struct{}) 283 284 // receiptLookup keeps track of the receipts contained in blocks between 285 // limit and parent 286 receiptLookup := make(map[flow.Identifier]struct{}) 287 288 // loop through the fork backwards, from parent to limit (inclusive), 289 // and keep track of blocks and collections visited on the way 290 forkScanner := func(header *flow.Header) error { 291 ancestorID := header.ID() 292 blockLookup[ancestorID] = struct{}{} 293 294 index, err := b.index.ByBlockID(ancestorID) 295 if err != nil { 296 return fmt.Errorf("could not get ancestor payload (%x): %w", ancestorID, err) 297 } 298 299 for _, collID := range index.CollectionIDs { 300 receiptLookup[collID] = struct{}{} 301 } 302 303 return nil 304 } 305 err = fork.TraverseBackward(b.headers, parentID, forkScanner, fork.IncludingHeight(limit)) 306 if err != nil { 307 return nil, fmt.Errorf("internal error building set of CollectionGuarantees on fork: %w", err) 308 } 309 310 // go through mempool and collect valid collections 311 var guarantees []*flow.CollectionGuarantee 312 for _, guarantee := range b.guarPool.All() { 313 // add at most <maxGuaranteeCount> number of collection guarantees in a new block proposal 314 // in order to prevent the block payload from being too big or computationally heavy for the 315 // execution nodes 316 if uint(len(guarantees)) >= b.cfg.maxGuaranteeCount { 317 break 318 } 319 320 collID := guarantee.ID() 321 322 // skip collections that are already included in a block on the fork 323 _, duplicated := receiptLookup[collID] 324 if duplicated { 325 continue 326 } 327 328 // skip collections for blocks that are not within the limit 329 _, ok := blockLookup[guarantee.ReferenceBlockID] 330 if !ok { 331 continue 332 } 333 334 guarantees = append(guarantees, guarantee) 335 } 336 337 return guarantees, nil 338 } 339 340 // getInsertableSeals returns the list of Seals from the mempool that should be 341 // inserted in the next payload. 342 // Per protocol definition, a specific result is only incorporated _once_ in each fork. 343 // Specifically, the result is incorporated in the block that contains a receipt committing 344 // to a result for the _first time_ in the respective fork. 345 // We can seal a result if and only if _all_ of the following conditions are satisfied: 346 // 347 // - (0) We have collected a sufficient number of approvals for each of the result's chunks. 348 // - (1) The result must have been previously incorporated in the fork, which we are extending. 349 // Note: The protocol dictates that all incorporated results must be for ancestor blocks 350 // in the respective fork. Hence, a result being incorporated in the fork, implies 351 // that the result must be for a block in this fork. 352 // - (2) The result must be for an _unsealed_ block. 353 // - (3) The result's parent must have been previously sealed (either by a seal in an ancestor 354 // block or by a seal included earlier in the block that we are constructing). 355 // 356 // To limit block size, we cap the number of seals to maxSealCount. 357 func (b *Builder) getInsertableSeals(parentID flow.Identifier) ([]*flow.Seal, error) { 358 // get the latest seal in the fork, which we are extending and 359 // the corresponding block, whose result is sealed 360 // Note: the last seal might not be included in a finalized block yet 361 lastSeal, err := b.seals.HighestInFork(parentID) 362 if err != nil { 363 return nil, fmt.Errorf("could not retrieve latest seal in the fork, which we are extending: %w", err) 364 } 365 latestSealedBlockID := lastSeal.BlockID 366 latestSealedBlock, err := b.headers.ByBlockID(latestSealedBlockID) 367 if err != nil { 368 return nil, fmt.Errorf("could not retrieve sealed block %x: %w", lastSeal.BlockID, err) 369 } 370 latestSealedHeight := latestSealedBlock.Height 371 372 // STEP I: Collect the seals for all results that satisfy (0), (1), and (2). 373 // The will give us a _superset_ of all seals that can be included. 374 // Implementation: 375 // * We walk the fork backwards and check each block for incorporated results. 376 // - Therefore, all results that we encounter satisfy condition (1). 377 // * We only consider results, whose executed block has a height _strictly larger_ 378 // than the lastSealedHeight. 379 // - Thereby, we guarantee that condition (2) is satisfied. 380 // * We only consider results for which we have a candidate seals in the sealPool. 381 // - Thereby, we guarantee that condition (0) is satisfied, because candidate seals 382 // are only generated and stored in the mempool once sufficient approvals are collected. 383 // Furthermore, condition (2) imposes a limit on how far we have to walk back: 384 // * A result can only be incorporated in a child of the block that it computes. 385 // Therefore, we only have to inspect the results incorporated in unsealed blocks. 386 sealsSuperset := make(map[uint64][]*flow.IncorporatedResultSeal) // map: executedBlock.Height -> candidate Seals 387 sealCollector := func(header *flow.Header) error { 388 blockID := header.ID() 389 if blockID == parentID { 390 // Important protocol edge case: There must be at least one block in between the block incorporating 391 // a result and the block sealing the result. This is because we need the Source of Randomness for 392 // the block that _incorporates_ the result, to compute the verifier assignment. Therefore, we require 393 // that the block _incorporating_ the result has at least one child in the fork, _before_ we include 394 // the seal. Thereby, we guarantee that a verifier assignment can be computed without needing 395 // information from the block that we are just constructing. Hence, we don't consider results for 396 // sealing that were incorporated in the immediate parent which we are extending. 397 return nil 398 } 399 400 index, err := b.index.ByBlockID(blockID) 401 if err != nil { 402 return fmt.Errorf("could not retrieve index for block %x: %w", blockID, err) 403 } 404 405 // enforce condition (1): only consider seals for results that are incorporated in the fork 406 for _, resultID := range index.ResultIDs { 407 result, err := b.resultsDB.ByID(resultID) 408 if err != nil { 409 return fmt.Errorf("could not retrieve execution result %x: %w", resultID, err) 410 } 411 412 // re-assemble the IncorporatedResult because we need its ID to 413 // check if it is in the seal mempool. 414 incorporatedResult := flow.NewIncorporatedResult( 415 blockID, 416 result, 417 ) 418 419 // enforce condition (0): candidate seals are only constructed once sufficient 420 // approvals have been collected. Hence, any incorporated result for which we 421 // find a candidate seal satisfies condition (0) 422 irSeal, ok := b.sealPool.ByID(incorporatedResult.ID()) 423 if !ok { 424 continue 425 } 426 427 // enforce condition (2): the block is unsealed (in this fork) if and only if 428 // its height is _strictly larger_ than the lastSealedHeight. 429 executedBlock, err := b.headers.ByBlockID(incorporatedResult.Result.BlockID) 430 if err != nil { 431 return fmt.Errorf("could not get header of block %x: %w", incorporatedResult.Result.BlockID, err) 432 } 433 if executedBlock.Height <= latestSealedHeight { 434 continue 435 } 436 437 // The following is a subtle but important protocol edge case: There can be multiple 438 // candidate seals for the same block. We have to include all to guarantee sealing liveness! 439 sealsSuperset[executedBlock.Height] = append(sealsSuperset[executedBlock.Height], irSeal) 440 } 441 442 return nil 443 } 444 err = fork.TraverseBackward(b.headers, parentID, sealCollector, fork.ExcludingBlock(latestSealedBlockID)) 445 if err != nil { 446 return nil, fmt.Errorf("internal error traversing unsealed section of fork: %w", err) 447 } 448 // All the seals in sealsSuperset are for results that satisfy (0), (1), and (2). 449 450 // STEP II: Select only the seals from sealsSuperset that also satisfy condition (3). 451 // We do this by starting with the last sealed result in the fork. Then, we check whether we 452 // have a seal for the child block (at latestSealedBlock.Height +1), which connects to the 453 // sealed result. If we find such a seal, we can now consider the child block sealed. 454 // We continue until we stop finding a seal for the child. 455 seals := make([]*flow.Seal, 0, len(sealsSuperset)) 456 for { 457 // cap the number of seals 458 if uint(len(seals)) >= b.cfg.maxSealCount { 459 break 460 } 461 462 // enforce condition (3): 463 candidateSeal, ok := connectingSeal(sealsSuperset[latestSealedHeight+1], lastSeal) 464 if !ok { 465 break 466 } 467 seals = append(seals, candidateSeal) 468 lastSeal = candidateSeal 469 latestSealedHeight += 1 470 } 471 return seals, nil 472 } 473 474 // connectingSeal looks through `sealsForNextBlock`. It checks whether the 475 // sealed result directly descends from the lastSealed result. 476 func connectingSeal(sealsForNextBlock []*flow.IncorporatedResultSeal, lastSealed *flow.Seal) (*flow.Seal, bool) { 477 for _, candidateSeal := range sealsForNextBlock { 478 if candidateSeal.IncorporatedResult.Result.PreviousResultID == lastSealed.ResultID { 479 return candidateSeal.Seal, true 480 } 481 } 482 return nil, false 483 } 484 485 type InsertableReceipts struct { 486 receipts []*flow.ExecutionReceiptMeta 487 results []*flow.ExecutionResult 488 } 489 490 // getInsertableReceipts constructs: 491 // - (i) the meta information of the ExecutionReceipts (i.e. ExecutionReceiptMeta) 492 // that should be inserted in the next payload 493 // - (ii) the ExecutionResults the receipts from step (i) commit to 494 // (deduplicated w.r.t. the block under construction as well as ancestor blocks) 495 // 496 // It looks in the receipts mempool and applies the following filter: 497 // 498 // 1) If it doesn't correspond to an unsealed block on the fork, skip it. 499 // 500 // 2) If it was already included in the fork, skip it. 501 // 502 // 3) Otherwise, this receipt can be included in the payload. 503 // 504 // Receipts have to be ordered by block height. 505 func (b *Builder) getInsertableReceipts(parentID flow.Identifier) (*InsertableReceipts, error) { 506 507 // Get the latest sealed block on this fork, ie the highest block for which 508 // there is a seal in this fork. This block is not necessarily finalized. 509 latestSeal, err := b.seals.HighestInFork(parentID) 510 if err != nil { 511 return nil, fmt.Errorf("could not retrieve parent seal (%x): %w", parentID, err) 512 } 513 sealedBlockID := latestSeal.BlockID 514 515 // ancestors is used to keep the IDs of the ancestor blocks we iterate through. 516 // We use it to skip receipts that are not for unsealed blocks in the fork. 517 ancestors := make(map[flow.Identifier]struct{}) 518 519 // includedReceipts is a set of all receipts that are contained in unsealed blocks along the fork. 520 includedReceipts := make(map[flow.Identifier]struct{}) 521 522 // includedResults is a set of all unsealed results that were incorporated into fork 523 includedResults := make(map[flow.Identifier]struct{}) 524 525 // loop through the fork backwards, from parent to last sealed (including), 526 // and keep track of blocks and receipts visited on the way. 527 forkScanner := func(ancestor *flow.Header) error { 528 ancestorID := ancestor.ID() 529 ancestors[ancestorID] = struct{}{} 530 531 index, err := b.index.ByBlockID(ancestorID) 532 if err != nil { 533 return fmt.Errorf("could not get payload index of block %x: %w", ancestorID, err) 534 } 535 for _, recID := range index.ReceiptIDs { 536 includedReceipts[recID] = struct{}{} 537 } 538 for _, resID := range index.ResultIDs { 539 includedResults[resID] = struct{}{} 540 } 541 542 return nil 543 } 544 err = fork.TraverseBackward(b.headers, parentID, forkScanner, fork.IncludingBlock(sealedBlockID)) 545 if err != nil { 546 return nil, fmt.Errorf("internal error building set of CollectionGuarantees on fork: %w", err) 547 } 548 549 isResultForUnsealedBlock := isResultForBlock(ancestors) 550 isReceiptUniqueAndUnsealed := isNoDupAndNotSealed(includedReceipts, sealedBlockID) 551 // find all receipts: 552 // 1) whose result connects all the way to the last sealed result 553 // 2) is unique (never seen in unsealed blocks) 554 receipts, err := b.recPool.ReachableReceipts(latestSeal.ResultID, isResultForUnsealedBlock, isReceiptUniqueAndUnsealed) 555 // Occurrence of UnknownExecutionResultError: 556 // Populating the execution with receipts from incoming blocks happens concurrently in 557 // matching.Core. Hence, the following edge case can occur (rarely): matching.Core is 558 // just in the process of populating the Execution Tree with the receipts from the 559 // latest blocks, while the builder is already trying to build on top. In this rare 560 // situation, the Execution Tree might not yet know the latest sealed result. 561 // TODO: we should probably remove this edge case by _synchronously_ populating 562 // the Execution Tree in the Fork's finalizationCallback 563 if err != nil && !mempool.IsUnknownExecutionResultError(err) { 564 return nil, fmt.Errorf("failed to retrieve reachable receipts from memool: %w", err) 565 } 566 567 insertables := toInsertables(receipts, includedResults, b.cfg.maxReceiptCount) 568 return insertables, nil 569 } 570 571 // toInsertables separates the provided receipts into ExecutionReceiptMeta and 572 // ExecutionResult. Results that are in includedResults are skipped. 573 // We also limit the number of receipts to maxReceiptCount. 574 func toInsertables(receipts []*flow.ExecutionReceipt, includedResults map[flow.Identifier]struct{}, maxReceiptCount uint) *InsertableReceipts { 575 results := make([]*flow.ExecutionResult, 0) 576 577 count := uint(len(receipts)) 578 // don't collect more than maxReceiptCount receipts 579 if count > maxReceiptCount { 580 count = maxReceiptCount 581 } 582 583 filteredReceipts := make([]*flow.ExecutionReceiptMeta, 0, count) 584 585 for i := uint(0); i < count; i++ { 586 receipt := receipts[i] 587 meta := receipt.Meta() 588 resultID := meta.ResultID 589 if _, inserted := includedResults[resultID]; !inserted { 590 results = append(results, &receipt.ExecutionResult) 591 includedResults[resultID] = struct{}{} 592 } 593 594 filteredReceipts = append(filteredReceipts, meta) 595 } 596 597 return &InsertableReceipts{ 598 receipts: filteredReceipts, 599 results: results, 600 } 601 } 602 603 // createProposal assembles a block with the provided header and payload 604 // information 605 func (b *Builder) createProposal(parentID flow.Identifier, 606 guarantees []*flow.CollectionGuarantee, 607 seals []*flow.Seal, 608 insertableReceipts *InsertableReceipts, 609 setter func(*flow.Header) error) (*flow.Block, error) { 610 611 // build the payload so we can get the hash 612 payload := &flow.Payload{ 613 Guarantees: guarantees, 614 Seals: seals, 615 Receipts: insertableReceipts.receipts, 616 Results: insertableReceipts.results, 617 } 618 619 parent, err := b.headers.ByBlockID(parentID) 620 if err != nil { 621 return nil, fmt.Errorf("could not retrieve parent: %w", err) 622 } 623 624 timestamp := b.cfg.blockTimer.Build(parent.Timestamp) 625 626 // construct default block on top of the provided parent 627 header := &flow.Header{ 628 ChainID: parent.ChainID, 629 ParentID: parentID, 630 Height: parent.Height + 1, 631 Timestamp: timestamp, 632 PayloadHash: payload.Hash(), 633 } 634 635 // apply the custom fields setter of the consensus algorithm 636 err = setter(header) 637 if err != nil { 638 return nil, fmt.Errorf("could not apply setter: %w", err) 639 } 640 641 proposal := &flow.Block{ 642 Header: header, 643 Payload: payload, 644 } 645 646 return proposal, nil 647 } 648 649 // isResultForBlock constructs a mempool.BlockFilter that accepts only blocks whose ID is part of the given set. 650 func isResultForBlock(blockIDs map[flow.Identifier]struct{}) mempool.BlockFilter { 651 blockIdFilter := id.InSet(blockIDs) 652 return func(h *flow.Header) bool { 653 return blockIdFilter(h.ID()) 654 } 655 } 656 657 // isNoDupAndNotSealed constructs a mempool.ReceiptFilter for discarding receipts that 658 // * are duplicates 659 // * or are for the sealed block 660 func isNoDupAndNotSealed(includedReceipts map[flow.Identifier]struct{}, sealedBlockID flow.Identifier) mempool.ReceiptFilter { 661 return func(receipt *flow.ExecutionReceipt) bool { 662 if _, duplicate := includedReceipts[receipt.ID()]; duplicate { 663 return false 664 } 665 if receipt.ExecutionResult.BlockID == sealedBlockID { 666 return false 667 } 668 return true 669 } 670 }