github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/module/state_synchronization/requester/execution_data_requester_test.go (about) 1 package requester_test 2 3 import ( 4 "context" 5 "fmt" 6 "math/rand" 7 "sync" 8 "testing" 9 "time" 10 11 "github.com/dgraph-io/badger/v2" 12 "github.com/ipfs/go-datastore" 13 dssync "github.com/ipfs/go-datastore/sync" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/mock" 16 "github.com/stretchr/testify/require" 17 "github.com/stretchr/testify/suite" 18 19 "github.com/onflow/flow-go/consensus/hotstuff/model" 20 "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" 21 "github.com/onflow/flow-go/engine/access/subscription" 22 "github.com/onflow/flow-go/model/flow" 23 "github.com/onflow/flow-go/module" 24 "github.com/onflow/flow-go/module/blobs" 25 "github.com/onflow/flow-go/module/executiondatasync/execution_data" 26 "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" 27 exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" 28 "github.com/onflow/flow-go/module/irrecoverable" 29 "github.com/onflow/flow-go/module/mempool/herocache" 30 "github.com/onflow/flow-go/module/metrics" 31 "github.com/onflow/flow-go/module/state_synchronization" 32 "github.com/onflow/flow-go/module/state_synchronization/requester" 33 synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" 34 "github.com/onflow/flow-go/state/protocol" 35 statemock "github.com/onflow/flow-go/state/protocol/mock" 36 bstorage "github.com/onflow/flow-go/storage/badger" 37 "github.com/onflow/flow-go/utils/unittest" 38 ) 39 40 type ExecutionDataRequesterSuite struct { 41 suite.Suite 42 43 blobstore blobs.Blobstore 44 datastore datastore.Batching 45 db *badger.DB 46 downloader *exedatamock.Downloader 47 distributor *requester.ExecutionDataDistributor 48 49 run edTestRun 50 51 mockSnapshot *mockSnapshot 52 } 53 54 func TestExecutionDataRequesterSuite(t *testing.T) { 55 t.Parallel() 56 suite.Run(t, new(ExecutionDataRequesterSuite)) 57 } 58 59 func (suite *ExecutionDataRequesterSuite) SetupTest() { 60 suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) 61 suite.blobstore = blobs.NewBlobstore(suite.datastore) 62 63 suite.run = edTestRun{ 64 "", 65 100, 66 func(_ int) map[uint64]testExecutionDataCallback { 67 return map[uint64]testExecutionDataCallback{} 68 }, 69 } 70 } 71 72 type testExecutionDataServiceEntry struct { 73 // When set, the response from this call back will be returned for any calls to Get 74 // Note: this callback is called twice by mockery, once for the execution data and once for the error 75 fn testExecutionDataCallback 76 // When set (and fn is unset), this error will be returned for any calls to Get for this ED 77 Err error 78 // Otherwise, the execution data will be returned directly with no error 79 ExecutionData *execution_data.BlockExecutionData 80 } 81 82 type specialBlockGenerator func(int) map[uint64]testExecutionDataCallback 83 type edTestRun struct { 84 name string 85 blockCount int 86 specialBlocks specialBlockGenerator 87 } 88 89 type testExecutionDataCallback func(*execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) 90 91 func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { 92 downloader := new(exedatamock.Downloader) 93 94 get := func(id flow.Identifier) (*execution_data.BlockExecutionData, error) { 95 ed, has := edStore[id] 96 97 // return not found 98 if !has { 99 return nil, execution_data.NewBlobNotFoundError(flow.IdToCid(id)) 100 } 101 102 // use a callback. this is useful for injecting a pause or custom error behavior 103 if ed.fn != nil { 104 return ed.fn(ed.ExecutionData) 105 } 106 107 // return a custom error 108 if ed.Err != nil { 109 return nil, ed.Err 110 } 111 112 // return the specific execution data 113 return ed.ExecutionData, nil 114 } 115 116 downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). 117 Return( 118 func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { 119 ed, _ := get(id) 120 return ed 121 }, 122 func(ctx context.Context, id flow.Identifier) error { 123 _, err := get(id) 124 return err 125 }, 126 ). 127 Maybe() // Maybe() needed to get call count 128 129 noop := module.NoopReadyDoneAware{} 130 downloader.On("Ready"). 131 Return(func() <-chan struct{} { return noop.Ready() }). 132 Maybe() // Maybe() needed to get call count 133 134 return downloader 135 } 136 137 func (suite *ExecutionDataRequesterSuite) mockProtocolState(blocksByHeight map[uint64]*flow.Block) *statemock.State { 138 state := new(statemock.State) 139 140 suite.mockSnapshot = new(mockSnapshot) 141 suite.mockSnapshot.set(blocksByHeight[0].Header, nil) // genesis block 142 143 state.On("Sealed").Return(suite.mockSnapshot).Maybe() 144 return state 145 } 146 147 // TestRequesterProcessesBlocks tests that the requester processes all blocks and sends notifications 148 // in order. 149 func (suite *ExecutionDataRequesterSuite) TestRequesterProcessesBlocks() { 150 151 tests := []edTestRun{ 152 // Test that blocks are processed in order 153 { 154 "happy path", 155 100, 156 func(_ int) map[uint64]testExecutionDataCallback { 157 return map[uint64]testExecutionDataCallback{} 158 }, 159 }, 160 // Tests that blocks that are missed are properly retried and notifications are received in order 161 { 162 "requests blocks with some missed", 163 100, 164 generateBlocksWithSomeMissed, 165 }, 166 // Tests that blocks that are missed are properly retried and backfilled 167 { 168 "requests blocks with some delayed", 169 100, 170 generateBlocksWithRandomDelays, 171 }, 172 } 173 174 for _, run := range tests { 175 suite.Run(run.name, func() { 176 unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { 177 suite.db = db 178 179 suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) 180 suite.blobstore = blobs.NewBlobstore(suite.datastore) 181 182 testData := suite.generateTestData(run.blockCount, run.specialBlocks(run.blockCount)) 183 edr, fd := suite.prepareRequesterTest(testData) 184 fetchedExecutionData := suite.runRequesterTest(edr, fd, testData) 185 186 verifyFetchedExecutionData(suite.T(), fetchedExecutionData, testData) 187 188 suite.T().Log("Shutting down test") 189 }) 190 }) 191 } 192 } 193 194 // TestRequesterResumesAfterRestart tests that the requester will pick up where it left off after a 195 // restart, without skipping any blocks 196 func (suite *ExecutionDataRequesterSuite) TestRequesterResumesAfterRestart() { 197 suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) 198 suite.blobstore = blobs.NewBlobstore(suite.datastore) 199 200 testData := suite.generateTestData(suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) 201 202 test := func(stopHeight, resumeHeight uint64) { 203 testData.fetchedExecutionData = nil 204 205 unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { 206 suite.db = db 207 208 // Process half of the blocks 209 edr, fd := suite.prepareRequesterTest(testData) 210 testData.stopHeight = stopHeight 211 testData.resumeHeight = 0 212 testData.fetchedExecutionData = suite.runRequesterTest(edr, fd, testData) 213 214 // Stand up a new component using the same datastore, and make sure all remaining 215 // blocks are processed 216 edr, fd = suite.prepareRequesterTest(testData) 217 testData.stopHeight = 0 218 testData.resumeHeight = resumeHeight 219 fetchedExecutionData := suite.runRequesterTest(edr, fd, testData) 220 221 verifyFetchedExecutionData(suite.T(), fetchedExecutionData, testData) 222 223 suite.T().Log("Shutting down test") 224 }) 225 } 226 227 suite.Run("requester resumes processing with no gap", func() { 228 stopHeight := testData.startHeight + uint64(suite.run.blockCount)/2 229 resumeHeight := stopHeight + 1 230 test(stopHeight, resumeHeight) 231 }) 232 233 suite.Run("requester resumes processing with gap", func() { 234 stopHeight := testData.startHeight + uint64(suite.run.blockCount)/2 235 resumeHeight := testData.endHeight 236 test(stopHeight, resumeHeight) 237 }) 238 } 239 240 // TestRequesterCatchesUp tests that the requester processes all heights when it starts with a 241 // backlog of sealed blocks. 242 func (suite *ExecutionDataRequesterSuite) TestRequesterCatchesUp() { 243 unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { 244 suite.db = db 245 246 suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) 247 suite.blobstore = blobs.NewBlobstore(suite.datastore) 248 249 testData := suite.generateTestData(suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) 250 251 // start processing with all seals available 252 edr, fd := suite.prepareRequesterTest(testData) 253 testData.resumeHeight = testData.endHeight 254 fetchedExecutionData := suite.runRequesterTest(edr, fd, testData) 255 256 verifyFetchedExecutionData(suite.T(), fetchedExecutionData, testData) 257 258 suite.T().Log("Shutting down test") 259 }) 260 } 261 262 // TestRequesterPausesAndResumes tests that the requester pauses when it downloads maxSearchAhead 263 // blocks beyond the last processed block, and resumes when it catches up. 264 func (suite *ExecutionDataRequesterSuite) TestRequesterPausesAndResumes() { 265 unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { 266 suite.db = db 267 268 pauseHeight := uint64(10) 269 maxSearchAhead := uint64(5) 270 271 // Downloads will succeed immediately for all blocks except pauseHeight, which will hang 272 // until the resume() is called. 273 generate, resume := generatePauseResume(pauseHeight) 274 275 testData := suite.generateTestData(suite.run.blockCount, generate(suite.run.blockCount)) 276 testData.maxSearchAhead = maxSearchAhead 277 testData.waitTimeout = time.Second * 10 278 279 // calculate the expected number of blocks that should be downloaded before resuming. 280 // the test should download all blocks up to pauseHeight, then maxSearchAhead blocks beyond. 281 // the pause block itself is excluded. 282 expectedDownloads := pauseHeight + maxSearchAhead - 1 283 284 edr, fd := suite.prepareRequesterTest(testData) 285 fetchedExecutionData := suite.runRequesterTestPauseResume(edr, fd, testData, int(expectedDownloads), resume) 286 287 verifyFetchedExecutionData(suite.T(), fetchedExecutionData, testData) 288 289 suite.T().Log("Shutting down test") 290 }) 291 } 292 293 // TestRequesterHalts tests that the requester handles halting correctly when it encounters an 294 // invalid block 295 func (suite *ExecutionDataRequesterSuite) TestRequesterHalts() { 296 unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { 297 suite.db = db 298 299 suite.run.blockCount = 10 300 suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) 301 suite.blobstore = blobs.NewBlobstore(suite.datastore) 302 303 // generate a block that will return a malformed blob error. causing the requester to halt 304 generate, expectedErr := generateBlocksWithHaltingError(suite.run.blockCount) 305 testData := suite.generateTestData(suite.run.blockCount, generate(suite.run.blockCount)) 306 307 // start processing with all seals available 308 edr, followerDistributor := suite.prepareRequesterTest(testData) 309 testData.resumeHeight = testData.endHeight 310 testData.expectedIrrecoverable = expectedErr 311 fetchedExecutionData := suite.runRequesterTestHalts(edr, followerDistributor, testData) 312 assert.Less(suite.T(), len(fetchedExecutionData), testData.sealedCount) 313 314 suite.T().Log("Shutting down test") 315 }) 316 } 317 318 func generateBlocksWithSomeMissed(blockCount int) map[uint64]testExecutionDataCallback { 319 missing := map[uint64]testExecutionDataCallback{} 320 321 // every 5th block fails to download n times before succeeding 322 for i := uint64(0); i < uint64(blockCount); i++ { 323 if i%5 > 0 { 324 continue 325 } 326 327 failures := rand.Intn(3) + 1 328 attempts := 0 329 missing[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { 330 if attempts < failures*2 { // this func is run twice for every attempt by the mock (once for ExecutionData one for errors) 331 attempts++ 332 // This should fail the first n fetch attempts 333 time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) 334 return nil, &execution_data.BlobNotFoundError{} 335 } 336 337 return ed, nil 338 } 339 } 340 341 return missing 342 } 343 344 func generateBlocksWithRandomDelays(blockCount int) map[uint64]testExecutionDataCallback { 345 // delay every third block by a random amount 346 delays := map[uint64]testExecutionDataCallback{} 347 for i := uint64(0); i < uint64(blockCount); i++ { 348 if i%5 > 0 { 349 continue 350 } 351 352 delays[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { 353 time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) 354 return ed, nil 355 } 356 } 357 358 return delays 359 } 360 361 func generateBlocksWithHaltingError(blockCount int) (specialBlockGenerator, error) { 362 // return a MalformedDataError on the second to last block 363 height := uint64(blockCount - 5) 364 err := fmt.Errorf("halting error: %w", &execution_data.MalformedDataError{}) 365 366 generate := func(int) map[uint64]testExecutionDataCallback { 367 return map[uint64]testExecutionDataCallback{ 368 height: func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { 369 return nil, err 370 }, 371 } 372 } 373 return generate, err 374 } 375 376 func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { 377 pause := make(chan struct{}) 378 379 blocks := map[uint64]testExecutionDataCallback{} 380 blocks[pauseHeight] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { 381 <-pause 382 return ed, nil 383 } 384 385 generate := func(int) map[uint64]testExecutionDataCallback { return blocks } 386 resume := func() { close(pause) } 387 388 return generate, resume 389 } 390 391 func (suite *ExecutionDataRequesterSuite) prepareRequesterTest(cfg *fetchTestRun) (state_synchronization.ExecutionDataRequester, *pubsub.FollowerDistributor) { 392 logger := unittest.Logger() 393 metrics := metrics.NewNoopCollector() 394 395 headers := synctest.MockBlockHeaderStorage( 396 synctest.WithByID(cfg.blocksByID), 397 synctest.WithByHeight(cfg.blocksByHeight), 398 synctest.WithBlockIDByHeight(cfg.blocksByHeight), 399 ) 400 results := synctest.MockResultsStorage( 401 synctest.WithResultByID(cfg.resultsByID), 402 ) 403 seals := synctest.MockSealsStorage( 404 synctest.WithSealsByBlockID(cfg.sealsByBlockID), 405 ) 406 state := suite.mockProtocolState(cfg.blocksByHeight) 407 408 suite.downloader = mockDownloader(cfg.executionDataEntries) 409 suite.distributor = requester.NewExecutionDataDistributor() 410 411 heroCache := herocache.NewBlockExecutionData(subscription.DefaultCacheSize, logger, metrics) 412 cache := cache.NewExecutionDataCache(suite.downloader, headers, seals, results, heroCache) 413 414 followerDistributor := pubsub.NewFollowerDistributor() 415 processedHeight := bstorage.NewConsumerProgress(suite.db, module.ConsumeProgressExecutionDataRequesterBlockHeight) 416 processedNotification := bstorage.NewConsumerProgress(suite.db, module.ConsumeProgressExecutionDataRequesterNotification) 417 418 edr, err := requester.New( 419 logger, 420 metrics, 421 suite.downloader, 422 cache, 423 processedHeight, 424 processedNotification, 425 state, 426 headers, 427 requester.ExecutionDataConfig{ 428 InitialBlockHeight: cfg.startHeight - 1, 429 MaxSearchAhead: cfg.maxSearchAhead, 430 FetchTimeout: cfg.fetchTimeout, 431 RetryDelay: cfg.retryDelay, 432 MaxRetryDelay: cfg.maxRetryDelay, 433 }, 434 suite.distributor, 435 ) 436 require.NoError(suite.T(), err) 437 438 followerDistributor.AddOnBlockFinalizedConsumer(edr.OnBlockFinalized) 439 440 return edr, followerDistributor 441 } 442 443 func (suite *ExecutionDataRequesterSuite) runRequesterTestHalts(edr state_synchronization.ExecutionDataRequester, followerDistributor *pubsub.FollowerDistributor, cfg *fetchTestRun) receivedExecutionData { 444 // make sure test helper goroutines are cleaned up 445 ctx, cancel := context.WithTimeout(context.Background(), time.Second) 446 defer cancel() 447 448 signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) 449 450 testDone := make(chan struct{}) 451 fetchedExecutionData := cfg.FetchedExecutionData() 452 453 // collect all execution data notifications 454 suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) 455 456 edr.Start(signalerCtx) 457 unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") 458 459 // Send blocks through followerDistributor 460 suite.finalizeBlocks(cfg, followerDistributor) 461 462 // testDone should never close because the requester paused 463 unittest.RequireNeverClosedWithin(suite.T(), testDone, 100*time.Millisecond, "finished sending notifications unexpectedly") 464 suite.T().Log("All notifications received") 465 466 cancel() 467 unittest.RequireCloseBefore(suite.T(), edr.Done(), cfg.waitTimeout, "timed out waiting for requester to shutdown") 468 469 return fetchedExecutionData 470 } 471 472 func (suite *ExecutionDataRequesterSuite) runRequesterTestPauseResume(edr state_synchronization.ExecutionDataRequester, followerDistributor *pubsub.FollowerDistributor, cfg *fetchTestRun, expectedDownloads int, resume func()) receivedExecutionData { 473 // make sure test helper goroutines are cleaned up 474 ctx, cancel := context.WithCancel(context.Background()) 475 signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) 476 477 testDone := make(chan struct{}) 478 fetchedExecutionData := cfg.FetchedExecutionData() 479 480 // collect all execution data notifications 481 suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) 482 483 edr.Start(signalerCtx) 484 unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") 485 486 // Send all blocks through followerDistributor 487 suite.finalizeBlocks(cfg, followerDistributor) 488 489 // requester should pause downloads until resume is called, so testDone should not be closed 490 unittest.RequireNeverClosedWithin(suite.T(), testDone, 500*time.Millisecond, "finished unexpectedly") 491 492 // confirm the expected number of downloads were attempted 493 suite.downloader.AssertNumberOfCalls(suite.T(), "Get", expectedDownloads) 494 495 suite.T().Log("Resuming") 496 resume() 497 498 // Pause until we've received all of the expected notifications 499 unittest.RequireCloseBefore(suite.T(), testDone, cfg.waitTimeout, "timed out waiting for notifications") 500 suite.T().Log("All notifications received") 501 502 cancel() 503 unittest.RequireCloseBefore(suite.T(), edr.Done(), cfg.waitTimeout, "timed out waiting for requester to shutdown") 504 505 return fetchedExecutionData 506 } 507 508 func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchronization.ExecutionDataRequester, followerDistributor *pubsub.FollowerDistributor, cfg *fetchTestRun) receivedExecutionData { 509 // make sure test helper goroutines are cleaned up 510 ctx, cancel := context.WithCancel(context.Background()) 511 signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) 512 513 // wait for all notifications 514 testDone := make(chan struct{}) 515 516 fetchedExecutionData := cfg.FetchedExecutionData() 517 518 // collect all execution data notifications 519 suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) 520 521 edr.Start(signalerCtx) 522 unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") 523 524 // Send blocks through followerDistributor 525 suite.finalizeBlocks(cfg, followerDistributor) 526 527 // Pause until we've received all of the expected notifications 528 unittest.RequireCloseBefore(suite.T(), testDone, cfg.waitTimeout, "timed out waiting for notifications") 529 suite.T().Log("All notifications received") 530 531 cancel() 532 unittest.RequireCloseBefore(suite.T(), edr.Done(), cfg.waitTimeout, "timed out waiting for requester to shutdown") 533 534 return fetchedExecutionData 535 } 536 537 func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData) func(ed *execution_data.BlockExecutionDataEntity) { 538 return func(ed *execution_data.BlockExecutionDataEntity) { 539 if _, has := fetchedExecutionData[ed.BlockID]; has { 540 suite.T().Errorf("duplicate execution data for block %s", ed.BlockID) 541 return 542 } 543 544 fetchedExecutionData[ed.BlockID] = ed.BlockExecutionData 545 if _, ok := cfg.blocksByID[ed.BlockID]; !ok { 546 suite.T().Errorf("unknown execution data for block %s", ed.BlockID) 547 return 548 } 549 550 suite.T().Logf("notified of execution data for block %v height %d (%d/%d)", ed.BlockID, cfg.blocksByID[ed.BlockID].Header.Height, len(fetchedExecutionData), cfg.sealedCount) 551 552 if cfg.IsLastSeal(ed.BlockID) { 553 done() 554 } 555 } 556 } 557 558 func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, followerDistributor *pubsub.FollowerDistributor) { 559 for i := cfg.StartHeight(); i <= cfg.endHeight; i++ { 560 b := cfg.blocksByHeight[i] 561 562 suite.T().Log(">>>> Finalizing block", b.ID(), b.Header.Height) 563 564 if len(b.Payload.Seals) > 0 { 565 seal := b.Payload.Seals[0] 566 sealedHeader := cfg.blocksByID[seal.BlockID].Header 567 568 suite.mockSnapshot.set(sealedHeader, nil) 569 suite.T().Log(">>>> Sealing block", sealedHeader.ID(), sealedHeader.Height) 570 } 571 572 followerDistributor.OnFinalizedBlock(&model.Block{}) // actual block is unused 573 574 if cfg.stopHeight == i { 575 break 576 } 577 } 578 } 579 580 type receivedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData 581 type fetchTestRun struct { 582 sealedCount int 583 startHeight uint64 584 endHeight uint64 585 blocksByHeight map[uint64]*flow.Block 586 blocksByID map[flow.Identifier]*flow.Block 587 resultsByID map[flow.Identifier]*flow.ExecutionResult 588 resultsByBlockID map[flow.Identifier]*flow.ExecutionResult 589 sealsByBlockID map[flow.Identifier]*flow.Seal 590 executionDataByID map[flow.Identifier]*execution_data.BlockExecutionData 591 executionDataEntries map[flow.Identifier]*testExecutionDataServiceEntry 592 executionDataIDByBlockID map[flow.Identifier]flow.Identifier 593 expectedIrrecoverable error 594 595 stopHeight uint64 596 resumeHeight uint64 597 fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData 598 waitTimeout time.Duration 599 600 maxSearchAhead uint64 601 fetchTimeout time.Duration 602 retryDelay time.Duration 603 maxRetryDelay time.Duration 604 } 605 606 func (r *fetchTestRun) StartHeight() uint64 { 607 if r.resumeHeight > 0 { 608 return r.resumeHeight 609 } 610 return r.startHeight 611 } 612 613 func (r *fetchTestRun) StopHeight() uint64 { 614 if r.stopHeight > 0 { 615 return r.stopHeight 616 } 617 return r.endHeight 618 } 619 620 func (r *fetchTestRun) FetchedExecutionData() receivedExecutionData { 621 if r.fetchedExecutionData == nil { 622 return make(receivedExecutionData, r.sealedCount) 623 } 624 return r.fetchedExecutionData 625 } 626 627 // IsLastSeal returns true if the provided blockID is the last expected sealed block for the test 628 func (r *fetchTestRun) IsLastSeal(blockID flow.Identifier) bool { 629 stopHeight := r.StopHeight() 630 lastSeal := r.blocksByHeight[stopHeight].Payload.Seals[0].BlockID 631 return lastSeal == r.blocksByID[blockID].ID() 632 } 633 634 func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, specialHeightFuncs map[uint64]testExecutionDataCallback) *fetchTestRun { 635 edsEntries := map[flow.Identifier]*testExecutionDataServiceEntry{} 636 blocksByHeight := map[uint64]*flow.Block{} 637 blocksByID := map[flow.Identifier]*flow.Block{} 638 resultsByID := map[flow.Identifier]*flow.ExecutionResult{} 639 resultsByBlockID := map[flow.Identifier]*flow.ExecutionResult{} 640 sealsByBlockID := map[flow.Identifier]*flow.Seal{} 641 executionDataByID := map[flow.Identifier]*execution_data.BlockExecutionData{} 642 executionDataIDByBlockID := map[flow.Identifier]flow.Identifier{} 643 644 sealedCount := blockCount - 4 // seals for blocks 1-96 645 firstSeal := blockCount - sealedCount 646 647 // genesis is block 0, we start syncing from block 1 648 startHeight := uint64(1) 649 endHeight := uint64(blockCount) - 1 650 651 // instantiate ExecutionDataService to generate correct CIDs 652 eds := execution_data.NewExecutionDataStore(suite.blobstore, execution_data.DefaultSerializer) 653 654 var previousBlock *flow.Block 655 var previousResult *flow.ExecutionResult 656 for i := 0; i < blockCount; i++ { 657 var seals []*flow.Header 658 659 if i >= firstSeal { 660 sealedBlock := blocksByHeight[uint64(i-firstSeal+1)] 661 seals = []*flow.Header{ 662 sealedBlock.Header, // block 0 doesn't get sealed (it's pre-sealed in the genesis state) 663 } 664 665 sealsByBlockID[sealedBlock.ID()] = unittest.Seal.Fixture( 666 unittest.Seal.WithBlockID(sealedBlock.ID()), 667 unittest.Seal.WithResult(resultsByBlockID[sealedBlock.ID()]), 668 ) 669 670 suite.T().Logf("block %d has seals for %d", i, seals[0].Height) 671 } 672 673 height := uint64(i) 674 block := buildBlock(height, previousBlock, seals) 675 676 ed := unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(block.ID())) 677 678 cid, err := eds.Add(context.Background(), ed) 679 require.NoError(suite.T(), err) 680 681 result := buildResult(block, cid, previousResult) 682 683 blocksByHeight[height] = block 684 blocksByID[block.ID()] = block 685 resultsByBlockID[block.ID()] = result 686 resultsByID[result.ID()] = result 687 688 // ignore all the data we don't need to verify the test 689 if i > 0 && i <= sealedCount { 690 executionDataByID[block.ID()] = ed 691 edsEntries[cid] = &testExecutionDataServiceEntry{ExecutionData: ed} 692 if fn, has := specialHeightFuncs[height]; has { 693 edsEntries[cid].fn = fn 694 } 695 696 executionDataIDByBlockID[block.ID()] = cid 697 } 698 699 previousBlock = block 700 previousResult = result 701 } 702 703 return &fetchTestRun{ 704 sealedCount: sealedCount, 705 startHeight: startHeight, 706 endHeight: endHeight, 707 blocksByHeight: blocksByHeight, 708 blocksByID: blocksByID, 709 resultsByBlockID: resultsByBlockID, 710 resultsByID: resultsByID, 711 sealsByBlockID: sealsByBlockID, 712 executionDataByID: executionDataByID, 713 executionDataEntries: edsEntries, 714 executionDataIDByBlockID: executionDataIDByBlockID, 715 waitTimeout: time.Second * 5, 716 717 maxSearchAhead: requester.DefaultMaxSearchAhead, 718 fetchTimeout: requester.DefaultFetchTimeout, 719 retryDelay: 1 * time.Millisecond, 720 maxRetryDelay: 15 * time.Millisecond, 721 } 722 } 723 724 func buildBlock(height uint64, parent *flow.Block, seals []*flow.Header) *flow.Block { 725 if parent == nil { 726 return unittest.GenesisFixture() 727 } 728 729 if len(seals) == 0 { 730 return unittest.BlockWithParentFixture(parent.Header) 731 } 732 733 return unittest.BlockWithParentAndSeals(parent.Header, seals) 734 } 735 736 func buildResult(block *flow.Block, cid flow.Identifier, previousResult *flow.ExecutionResult) *flow.ExecutionResult { 737 opts := []func(result *flow.ExecutionResult){ 738 unittest.WithBlock(block), 739 unittest.WithExecutionDataID(cid), 740 } 741 742 if previousResult != nil { 743 opts = append(opts, unittest.WithPreviousResult(*previousResult)) 744 } 745 746 return unittest.ExecutionResultFixture(opts...) 747 } 748 749 func verifyFetchedExecutionData(t *testing.T, actual receivedExecutionData, cfg *fetchTestRun) { 750 expected := cfg.executionDataByID 751 assert.Len(t, actual, len(expected)) 752 753 for i := 0; i < cfg.sealedCount; i++ { 754 height := cfg.startHeight + uint64(i) 755 block := cfg.blocksByHeight[height] 756 blockID := block.ID() 757 758 expectedED := expected[blockID] 759 actualED, has := actual[blockID] 760 assert.True(t, has, "missing execution data for block %v height %d", blockID, height) 761 if has { 762 assert.Equal(t, expectedED, actualED, "execution data for block %v doesn't match", blockID) 763 } 764 } 765 } 766 767 type mockSnapshot struct { 768 header *flow.Header 769 err error 770 mu sync.Mutex 771 } 772 773 var _ protocol.Snapshot = &mockSnapshot{} 774 775 func (m *mockSnapshot) set(header *flow.Header, err error) { 776 m.mu.Lock() 777 defer m.mu.Unlock() 778 779 m.header = header 780 m.err = err 781 } 782 783 func (m *mockSnapshot) Head() (*flow.Header, error) { 784 m.mu.Lock() 785 defer m.mu.Unlock() 786 787 return m.header, m.err 788 } 789 790 // none of these are used in this test 791 func (m *mockSnapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return nil, nil } 792 func (m *mockSnapshot) Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { 793 return nil, nil 794 } 795 func (m *mockSnapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { return nil, nil } 796 func (m *mockSnapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { 797 return nil, nil, nil 798 } 799 func (m *mockSnapshot) Commit() (flow.StateCommitment, error) { return flow.DummyStateCommitment, nil } 800 func (m *mockSnapshot) SealingSegment() (*flow.SealingSegment, error) { return nil, nil } 801 func (m *mockSnapshot) Descendants() ([]flow.Identifier, error) { return nil, nil } 802 func (m *mockSnapshot) RandomSource() ([]byte, error) { return nil, nil } 803 func (m *mockSnapshot) Phase() (flow.EpochPhase, error) { return flow.EpochPhaseUndefined, nil } 804 func (m *mockSnapshot) Epochs() protocol.EpochQuery { return nil } 805 func (m *mockSnapshot) Params() protocol.GlobalParams { return nil } 806 func (m *mockSnapshot) EpochProtocolState() (protocol.DynamicProtocolState, error) { return nil, nil } 807 func (m *mockSnapshot) ProtocolState() (protocol.KVStoreReader, error) { return nil, nil } 808 func (m *mockSnapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { return nil, nil }