github.com/koko1123/flow-go-1@v0.29.6/engine/verification/requester/requester_test.go (about) 1 package requester_test 2 3 import ( 4 "sync" 5 "testing" 6 "time" 7 8 "github.com/rs/zerolog" 9 testifymock "github.com/stretchr/testify/mock" 10 "github.com/stretchr/testify/require" 11 12 mockfetcher "github.com/koko1123/flow-go-1/engine/verification/fetcher/mock" 13 "github.com/koko1123/flow-go-1/engine/verification/requester" 14 vertestutils "github.com/koko1123/flow-go-1/engine/verification/utils/unittest" 15 "github.com/koko1123/flow-go-1/model/chunks" 16 "github.com/koko1123/flow-go-1/model/flow" 17 "github.com/koko1123/flow-go-1/model/messages" 18 "github.com/koko1123/flow-go-1/model/verification" 19 "github.com/koko1123/flow-go-1/module" 20 flowmempool "github.com/koko1123/flow-go-1/module/mempool" 21 mempool "github.com/koko1123/flow-go-1/module/mempool/mock" 22 "github.com/koko1123/flow-go-1/module/mock" 23 "github.com/koko1123/flow-go-1/module/trace" 24 "github.com/koko1123/flow-go-1/network/channels" 25 "github.com/koko1123/flow-go-1/network/mocknetwork" 26 protocol "github.com/koko1123/flow-go-1/state/protocol/mock" 27 "github.com/koko1123/flow-go-1/utils/unittest" 28 ) 29 30 // RequesterEngineTestSuite encapsulates data structures for running unittests on requester engine. 31 type RequesterEngineTestSuite struct { 32 // modules 33 log zerolog.Logger 34 handler *mockfetcher.ChunkDataPackHandler // contains callbacks for handling received chunk data packs. 35 pendingRequests *mempool.ChunkRequests // used to store all the pending chunks that assigned to this node 36 state *protocol.State // used to check the last sealed height 37 con *mocknetwork.Conduit // used to send chunk data request, and receive the response 38 tracer module.Tracer 39 metrics *mock.VerificationMetrics 40 41 // identities 42 verIdentity *flow.Identity // verification node 43 44 // parameters 45 requestTargets uint64 46 retryInterval time.Duration // determines time in milliseconds for retrying chunk data requests. 47 } 48 49 // setupTest initiates a test suite prior to each test. 50 func setupTest() *RequesterEngineTestSuite { 51 r := &RequesterEngineTestSuite{ 52 log: unittest.Logger(), 53 tracer: trace.NewNoopTracer(), 54 metrics: &mock.VerificationMetrics{}, 55 handler: &mockfetcher.ChunkDataPackHandler{}, 56 retryInterval: 100 * time.Millisecond, 57 requestTargets: 2, 58 pendingRequests: &mempool.ChunkRequests{}, 59 state: &protocol.State{}, 60 verIdentity: unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)), 61 con: &mocknetwork.Conduit{}, 62 } 63 64 return r 65 } 66 67 // newRequesterEngine returns a requester engine for testing. 68 func newRequesterEngine(t *testing.T, s *RequesterEngineTestSuite) *requester.Engine { 69 net := &mocknetwork.Network{} 70 // mocking the network registration of the engine 71 net.On("Register", channels.RequestChunks, testifymock.Anything). 72 Return(s.con, nil). 73 Once() 74 75 e, err := requester.New(s.log, 76 s.state, 77 net, 78 s.tracer, 79 s.metrics, 80 s.pendingRequests, 81 s.retryInterval, 82 // requests are only qualified if their retryAfter is elapsed. 83 requester.RetryAfterQualifier, 84 // exponential backoff with multiplier of 2, minimum interval of a second, and 85 // maximum interval of an hour. 86 flowmempool.ExponentialUpdater(2, time.Hour, time.Second), 87 s.requestTargets) 88 require.NoError(t, err) 89 testifymock.AssertExpectationsForObjects(t, net) 90 91 e.WithChunkDataPackHandler(s.handler) 92 93 return e 94 } 95 96 // TestHandleChunkDataPack_Request evaluates happy path of submitting a request to requester engine. 97 // The request is added to pending request mempools, and metrics updated. 98 func TestHandleChunkDataPack_Request(t *testing.T) { 99 s := setupTest() 100 e := newRequesterEngine(t, s) 101 102 request := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(unittest.IdentifierFixture())) 103 s.pendingRequests.On("Add", request).Return(true).Once() 104 s.metrics.On("OnChunkDataPackRequestReceivedByRequester").Return().Once() 105 106 e.Request(request) 107 108 testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.metrics) 109 } 110 111 // TestHandleChunkDataPack_HappyPath evaluates the happy path of receiving a requested chunk data pack. 112 // The chunk data pack should be passed to the registered handler, and the resources should be cleaned up. 113 func TestHandleChunkDataPack_HappyPath(t *testing.T) { 114 s := setupTest() 115 e := newRequesterEngine(t, s) 116 117 response := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) 118 request := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(response.ChunkDataPack.ChunkID)) 119 originID := unittest.IdentifierFixture() 120 121 // we remove pending request on receiving this response 122 locators := chunks.LocatorMap{} 123 locators[chunks.ChunkLocatorID(request.ResultID, request.Index)] = &chunks.Locator{ 124 ResultID: request.ResultID, 125 Index: request.Index, 126 } 127 s.pendingRequests.On("PopAll", response.ChunkDataPack.ChunkID).Return(locators, true).Once() 128 129 s.handler.On("HandleChunkDataPack", originID, &verification.ChunkDataPackResponse{ 130 Locator: chunks.Locator{ 131 ResultID: request.ResultID, 132 Index: request.Index, 133 }, 134 Cdp: &response.ChunkDataPack, 135 }).Return().Once() 136 s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() 137 s.metrics.On("OnChunkDataPackSentToFetcher").Return().Once() 138 139 err := e.Process(channels.RequestChunks, originID, response) 140 require.Nil(t, err) 141 142 testifymock.AssertExpectationsForObjects(t, s.con, s.handler, s.pendingRequests, s.metrics) 143 } 144 145 // TestHandleChunkDataPack_HappyPath_Multiple evaluates the happy path of receiving several requested chunk data packs. 146 // Each chunk data pack should be handled once by being passed to the registered handler, 147 // the chunk ID and collection ID should match the response, and the resources should be cleaned up. 148 func TestHandleChunkDataPack_HappyPath_Multiple(t *testing.T) { 149 s := setupTest() 150 e := newRequesterEngine(t, s) 151 152 // creates list of chunk data pack responses 153 count := 10 154 requests := unittest.ChunkDataPackRequestListFixture(count) 155 originID := unittest.IdentifierFixture() 156 chunkIDs := toChunkIDs(t, requests) 157 responses := unittest.ChunkDataResponseMessageListFixture(chunkIDs) 158 159 // we remove pending request on receiving this response 160 mockPendingRequestsPopAll(t, s.pendingRequests, requests) 161 // we pass each chunk data pack and its collection to chunk data pack handler 162 handlerWG := mockChunkDataPackHandler(t, s.handler, requests) 163 164 s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Times(len(responses)) 165 s.metrics.On("OnChunkDataPackSentToFetcher").Return().Times(len(responses)) 166 167 for _, response := range responses { 168 err := e.Process(channels.RequestChunks, originID, response) 169 require.Nil(t, err) 170 } 171 172 unittest.RequireReturnsBefore(t, handlerWG.Wait, 100*time.Millisecond, "could not handle chunk data responses on time") 173 testifymock.AssertExpectationsForObjects(t, s.con, s.metrics) 174 } 175 176 // TestHandleChunkDataPack_NonExistingRequest evaluates that failing to remove a received chunk data pack's request 177 // from the memory terminates the procedure of handling a chunk data pack without passing it to the handler. 178 // The request for a chunk data pack may be removed from the memory if duplicate copies of a requested chunk data pack arrive 179 // concurrently. Then the mutex lock on pending requests mempool allows only one of those requested chunk data packs to remove the 180 // request and pass to handler. While handling the other ones gracefully terminated. 181 func TestHandleChunkDataPack_FailedRequestRemoval(t *testing.T) { 182 s := setupTest() 183 e := newRequesterEngine(t, s) 184 185 response := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) 186 originID := unittest.IdentifierFixture() 187 188 // however by the time we try remove it, the request has gone. 189 // this can happen when duplicate chunk data packs are coming concurrently. 190 // the concurrency is safe with pending requests mempool's mutex lock. 191 s.pendingRequests.On("PopAll", response.ChunkDataPack.ChunkID).Return(nil, false).Once() 192 s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() 193 194 err := e.Process(channels.RequestChunks, originID, response) 195 require.Nil(t, err) 196 197 testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.con, s.metrics) 198 s.handler.AssertNotCalled(t, "HandleChunkDataPack") 199 } 200 201 // TestRequestPendingChunkSealedBlock evaluates that requester engine drops pending requests for chunks belonging to 202 // sealed blocks, and also notifies the handler that this requested chunk has been sealed, so it no longer requests 203 // from the network it. 204 func TestRequestPendingChunkSealedBlock(t *testing.T) { 205 s := setupTest() 206 e := newRequesterEngine(t, s) 207 208 // creates a single chunk request that belongs to a sealed height. 209 agrees := unittest.IdentifierListFixture(2) 210 disagrees := unittest.IdentifierListFixture(3) 211 requests := unittest.ChunkDataPackRequestListFixture(1, 212 unittest.WithHeight(5), 213 unittest.WithAgrees(agrees), 214 unittest.WithDisagrees(disagrees)) 215 vertestutils.MockLastSealedHeight(s.state, 10) 216 s.pendingRequests.On("All").Return(requests.UniqueRequestInfo()) 217 // check data pack request is never tried since its block has been sealed. 218 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", uint64(0)).Return().Once() 219 220 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 221 222 mockPendingRequestsPopAll(t, s.pendingRequests, requests) 223 notifierWG := mockNotifyBlockSealedHandler(t, s.handler, requests) 224 225 unittest.RequireReturnsBefore(t, notifierWG.Wait, time.Duration(2)*s.retryInterval, "could not notify the handler on time") 226 227 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 228 // requester does not call publish to disseminate the request for this chunk. 229 s.con.AssertNotCalled(t, "Publish") 230 } 231 232 // TestCompleteRequestingUnsealedChunkCycle evaluates a complete life cycle of receiving a chunk request by the requester. 233 // The requester should submit the request to the network (on its timer overflow), and receive the response back and send it to 234 // the registered handler. 235 // 236 // It should also clean the request from memory. 237 func TestCompleteRequestingUnsealedChunkLifeCycle(t *testing.T) { 238 s := setupTest() 239 e := newRequesterEngine(t, s) 240 241 sealedHeight := uint64(10) 242 // Creates a single chunk request with its corresponding response. 243 // The chunk belongs to an unsealed block. 244 agrees := unittest.IdentifierListFixture(2) 245 disagrees := unittest.IdentifierListFixture(3) 246 requests := unittest.ChunkDataPackRequestListFixture(1, 247 unittest.WithHeightGreaterThan(sealedHeight), 248 unittest.WithAgrees(agrees), 249 unittest.WithDisagrees(disagrees)) 250 response := unittest.ChunkDataResponseMsgFixture(requests[0].ChunkID) 251 252 // mocks the requester pipeline 253 vertestutils.MockLastSealedHeight(s.state, sealedHeight) 254 s.pendingRequests.On("All").Return(requests.UniqueRequestInfo()) 255 handlerWG := mockChunkDataPackHandler(t, s.handler, requests) 256 mockPendingRequestsPopAll(t, s.pendingRequests, requests) 257 258 // makes all chunk requests being qualified for dispatch instantly 259 requestHistoryWG, updateHistoryWG := mockPendingRequestInfoAndUpdate(t, 260 s.pendingRequests, 261 requests, 262 verification.ChunkDataPackRequestList{}, 263 verification.ChunkDataPackRequestList{}, 264 1) 265 s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Times(len(requests)) 266 s.metrics.On("OnChunkDataPackRequestDispatchedInNetworkByRequester").Return().Times(len(requests)) 267 s.metrics.On("OnChunkDataPackSentToFetcher").Return().Times(len(requests)) 268 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", uint64(1)).Return().Once() 269 270 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 271 272 // we wait till the engine submits the chunk request to the network, and receive the response 273 conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, 1, func(request *messages.ChunkDataRequest) { 274 err := e.Process(channels.RequestChunks, requests[0].Agrees[0], response) 275 require.NoError(t, err) 276 }) 277 unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2)*s.retryInterval, "could not check chunk requests qualification on time") 278 unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") 279 unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2)*s.retryInterval, "could not request chunks from network") 280 unittest.RequireReturnsBefore(t, handlerWG.Wait, 100*time.Second, "could not handle chunk data responses on time") 281 282 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 283 testifymock.AssertExpectationsForObjects(t, s.metrics) 284 } 285 286 // TestRequestPendingChunkSealedBlock_Hybrid evaluates the situation that requester has some pending chunk requests belonging to sealed blocks 287 // (i.e., sealed chunks), and some pending chunk requests belonging to unsealed blocks (i.e., unsealed chunks). 288 // 289 // On timer, the requester should submit pending requests for unsealed chunks to the network, while dropping the requests for the 290 // sealed chunks, and notify the handler. 291 func TestRequestPendingChunkSealedBlock_Hybrid(t *testing.T) { 292 s := setupTest() 293 e := newRequesterEngine(t, s) 294 295 sealedHeight := uint64(10) 296 // creates 2 chunk data packs that belong to a sealed height, and 297 // 3 that belong to an unsealed height. 298 agrees := unittest.IdentifierListFixture(2) 299 disagrees := unittest.IdentifierListFixture(3) 300 sealedRequests := unittest.ChunkDataPackRequestListFixture(2, 301 unittest.WithHeight(sealedHeight-1), 302 unittest.WithAgrees(agrees), 303 unittest.WithDisagrees(disagrees)) 304 unsealedRequests := unittest.ChunkDataPackRequestListFixture(3, 305 unittest.WithHeightGreaterThan(sealedHeight), 306 unittest.WithAgrees(agrees), 307 unittest.WithDisagrees(disagrees)) 308 requests := append(sealedRequests, unsealedRequests...) 309 310 vertestutils.MockLastSealedHeight(s.state, sealedHeight) 311 s.pendingRequests.On("All").Return(requests.UniqueRequestInfo()) 312 313 // makes all (unsealed) chunk requests being qualified for dispatch instantly 314 requestHistoryWG, updateHistoryWG := mockPendingRequestInfoAndUpdate(t, 315 s.pendingRequests, 316 unsealedRequests, 317 verification.ChunkDataPackRequestList{}, 318 verification.ChunkDataPackRequestList{}, 319 1) 320 s.metrics.On("OnChunkDataPackRequestDispatchedInNetworkByRequester").Return().Times(len(unsealedRequests)) 321 // each unsealed height is requested only once, hence the maximum is updated only once from 0 -> 1 322 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", testifymock.Anything).Return().Once() 323 324 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 325 326 // sealed requests should be removed and the handler should be notified. 327 mockPendingRequestsPopAll(t, s.pendingRequests, sealedRequests) 328 notifierWG := mockNotifyBlockSealedHandler(t, s.handler, sealedRequests) 329 // unsealed requests should be submitted to the network once 330 conduitWG := mockConduitForChunkDataPackRequest(t, s.con, unsealedRequests, 1, func(*messages.ChunkDataRequest) {}) 331 332 unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2)*s.retryInterval, "could not check chunk requests qualification on time") 333 unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") 334 unittest.RequireReturnsBefore(t, notifierWG.Wait, time.Duration(2)*s.retryInterval, "could not notify the handler on time") 335 unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2)*s.retryInterval, "could not request chunks from network") 336 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 337 338 testifymock.AssertExpectationsForObjects(t, s.metrics) 339 } 340 341 // TestReceivingChunkDataResponseForDuplicateChunkRequests evaluates happy path of receiving a chunk data pack response 342 // for duplicate chunk data pack requests. 343 // On receiving the chunk data pack, requester engine should send a chunk data response to the chunk handler for each 344 // of those pending duplicate chunk data requests. 345 // Note that by duplicate chunk data requests we mean chunks requests for same chunk ID that belong to 346 // distinct execution results. 347 func TestReceivingChunkDataResponseForDuplicateChunkRequests(t *testing.T) { 348 s := setupTest() 349 e := newRequesterEngine(t, s) 350 351 resultA, _, _, _ := vertestutils.ExecutionResultForkFixture(t) 352 353 duplicateChunkID := resultA.Chunks[0].ID() 354 responseA := unittest.ChunkDataResponseMsgFixture(duplicateChunkID) 355 356 requestA := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID)) 357 requestB := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID)) 358 359 requests := verification.ChunkDataPackRequestList{requestA, requestB} 360 originID := unittest.IdentifierFixture() 361 362 mockPendingRequestsPopAll(t, s.pendingRequests, requests) 363 handlerWG := mockChunkDataPackHandler(t, s.handler, requests) 364 365 s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() 366 s.metrics.On("OnChunkDataPackSentToFetcher").Return().Twice() 367 368 err := e.Process(channels.RequestChunks, originID, responseA) 369 require.Nil(t, err) 370 371 unittest.RequireReturnsBefore(t, handlerWG.Wait, time.Second, "could not handle chunk data responses on time") 372 testifymock.AssertExpectationsForObjects(t, s.con, s.metrics) 373 } 374 375 // TestHandleChunkDataPack_DuplicateChunkIDs_Sealed evaluates that on receiving duplicate chunk data requests belonging to a sealed 376 // block, the requester engine is called chunk handler once for each of those requests notifying it of sealed block. 377 // 378 // Note that by duplicate chunk data requests we mean chunks requests for same chunk ID that belong to 379 // distinct execution results. 380 func TestHandleChunkDataPack_DuplicateChunkIDs_Sealed(t *testing.T) { 381 s := setupTest() 382 e := newRequesterEngine(t, s) 383 384 // mocks the requester pipeline 385 sealedHeight := uint64(10) 386 vertestutils.MockLastSealedHeight(s.state, sealedHeight) 387 388 resultA, _, _, _ := vertestutils.ExecutionResultForkFixture(t) 389 duplicateChunkID := resultA.Chunks[0].ID() 390 requestA := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID), unittest.WithHeight(uint64(sealedHeight-1))) 391 requestB := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID), unittest.WithHeight(uint64(sealedHeight-1))) 392 requests := verification.ChunkDataPackRequestList{requestA, requestB} 393 394 // we remove pending request on receiving this response 395 s.pendingRequests.On("All").Return(requests.UniqueRequestInfo()) 396 mockPendingRequestsPopAll(t, s.pendingRequests, requests) 397 notifierWG := mockNotifyBlockSealedHandler(t, s.handler, requests) 398 399 // check data pack request is never tried since its block has been sealed. 400 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", uint64(0)).Return().Once() 401 402 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 403 404 unittest.RequireReturnsBefore(t, notifierWG.Wait, time.Duration(2)*s.retryInterval, "could not notify the handler on time") 405 406 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 407 408 testifymock.AssertExpectationsForObjects(t, s.metrics) 409 // requester does not call publish to disseminate the request for this chunk. 410 s.con.AssertNotCalled(t, "Publish") 411 } 412 413 // TestRequestPendingChunkDataPack evaluates happy path of having a single pending chunk requests. 414 // The chunk belongs to a non-sealed block. 415 // On timer interval, the chunk requests should be dispatched to the set of execution nodes agree with the execution 416 // result the chunk belongs to. 417 func TestRequestPendingChunkDataPack(t *testing.T) { 418 testRequestPendingChunkDataPack(t, 1, 1) // one request each one attempt 419 testRequestPendingChunkDataPack(t, 10, 1) // 10 requests each one attempt 420 testRequestPendingChunkDataPack(t, 10, 10) // 10 requests each 10 attempts 421 } 422 423 // testRequestPendingChunkDataPack is a test helper that evaluates happy path of having a number of chunk requests pending. 424 // The test waits enough so that the required number of attempts is made on the chunks. 425 // The chunks belongs to a non-sealed block. 426 func testRequestPendingChunkDataPack(t *testing.T, count int, attempts int) { 427 s := setupTest() 428 e := newRequesterEngine(t, s) 429 430 // creates 10 chunk request each with 2 agree targets and 3 disagree targets. 431 // chunk belongs to a block at heights greater than 5, but the last sealed block is at height 5, so 432 // the chunk request should be dispatched. 433 agrees := unittest.IdentifierListFixture(2) 434 disagrees := unittest.IdentifierListFixture(3) 435 requests := unittest.ChunkDataPackRequestListFixture(count, 436 unittest.WithHeightGreaterThan(5), 437 unittest.WithAgrees(agrees), 438 unittest.WithDisagrees(disagrees)) 439 vertestutils.MockLastSealedHeight(s.state, 5) 440 s.pendingRequests.On("All").Return(requests.UniqueRequestInfo()) 441 442 // makes all chunk requests being qualified for dispatch instantly 443 requestHistory, updateHistoryWG := mockPendingRequestInfoAndUpdate(t, 444 s.pendingRequests, 445 requests, 446 verification.ChunkDataPackRequestList{}, 447 verification.ChunkDataPackRequestList{}, 448 attempts) 449 450 s.metrics.On("OnChunkDataPackRequestDispatchedInNetworkByRequester").Return().Times(count * attempts) 451 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", testifymock.Anything).Run(func(args testifymock.Arguments) { 452 actualAttempts, ok := args[0].(uint64) 453 require.True(t, ok) 454 455 require.LessOrEqual(t, actualAttempts, uint64(attempts)) 456 }).Return().Times(attempts) 457 458 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 459 460 conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, attempts, func(*messages.ChunkDataRequest) {}) 461 unittest.RequireReturnsBefore(t, requestHistory.Wait, time.Duration(2*attempts)*s.retryInterval, "could not check chunk requests qualification on time") 462 unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") 463 unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2*attempts)*s.retryInterval, "could not request and handle chunks on time") 464 465 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 466 testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.metrics) 467 } 468 469 // TestDispatchingRequests_Hybrid evaluates the behavior of requester when it has different request dispatch timelines, i.e., 470 // some requests should be dispatched instantly to the network. Some others are old and planned for late dispatch (out of this test timeline), 471 // and some other should not be dispatched since they no longer are needed (and will be cleaned on next iteration). 472 // 473 // The test evaluates that only requests that are instantly planned are getting dispatched to the network. 474 func TestDispatchingRequests_Hybrid(t *testing.T) { 475 s := setupTest() 476 e := newRequesterEngine(t, s) 477 478 // Generates 30 requests, 10 of each type. 479 // 480 // requests belong to the chunks of 481 // a block at heights greater than 5, but the last sealed block is at height 5, so 482 // the chunk request should be dispatched. 483 agrees := unittest.IdentifierListFixture(2) 484 disagrees := unittest.IdentifierListFixture(3) 485 vertestutils.MockLastSealedHeight(s.state, 5) 486 // models new requests that are just added to the mempool and are ready to dispatch. 487 instantQualifiedRequests := unittest.ChunkDataPackRequestListFixture(10, 488 unittest.WithHeightGreaterThan(5), 489 unittest.WithAgrees(agrees), 490 unittest.WithDisagrees(disagrees)) 491 // models old requests that stayed long in the mempool and are not dispatched anytime soon. 492 lateQualifiedRequests := unittest.ChunkDataPackRequestListFixture(10, 493 unittest.WithHeightGreaterThan(5), 494 unittest.WithAgrees(agrees), 495 unittest.WithDisagrees(disagrees)) 496 // models requests that their chunk data pack arrives during the dispatch processing and hence 497 // are no longer needed to dispatch. 498 disQualifiedRequests := unittest.ChunkDataPackRequestListFixture(10, 499 unittest.WithHeightGreaterThan(5), 500 unittest.WithAgrees(agrees), 501 unittest.WithDisagrees(disagrees)) 502 503 allRequests := append(instantQualifiedRequests, lateQualifiedRequests...) 504 allRequests = append(allRequests, disQualifiedRequests...) 505 s.pendingRequests.On("All").Return(allRequests.UniqueRequestInfo()) 506 507 attempts := 10 // waits for 10 iterations of onTimer cycle in requester. 508 requestHistoryWG, updateHistoryWG := mockPendingRequestInfoAndUpdate(t, 509 s.pendingRequests, 510 instantQualifiedRequests, 511 lateQualifiedRequests, 512 disQualifiedRequests, 513 attempts) 514 515 unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") 516 517 // mocks only instantly qualified requests are dispatched in the network. 518 conduitWG := mockConduitForChunkDataPackRequest(t, s.con, instantQualifiedRequests, attempts, func(*messages.ChunkDataRequest) {}) 519 s.metrics.On("OnChunkDataPackRequestDispatchedInNetworkByRequester").Return().Times(len(instantQualifiedRequests) * attempts) 520 // each instantly qualified one is requested only once, hence the maximum is updated only once from 0 -> 1, and 521 // is kept at 1 during all cycles of this test. 522 s.metrics.On("SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester", uint64(1)).Return() 523 524 unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2*attempts)*s.retryInterval, 525 "could not check chunk requests qualification on time") 526 unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, time.Duration(2*attempts)*s.retryInterval, 527 "could not update chunk request history on time") 528 unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2*attempts)*s.retryInterval, 529 "could not request and handle chunks on time") 530 unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") 531 532 testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.metrics) 533 } 534 535 // toChunkIDs is a test helper that extracts chunk ids from chunk data pack requests. 536 func toChunkIDs(t *testing.T, requests verification.ChunkDataPackRequestList) flow.IdentifierList { 537 var chunkIDs flow.IdentifierList 538 for _, request := range requests { 539 require.NotContains(t, chunkIDs, request.ChunkID, "duplicate chunk ID found in fixture") 540 chunkIDs = append(chunkIDs, request.ChunkID) 541 } 542 return chunkIDs 543 } 544 545 // mockConduitForChunkDataPackRequest mocks given conduit for requesting chunk data packs for given chunk IDs. 546 // Each chunk should be requested exactly `count` many time. 547 // Upon request, the given request handler is invoked. 548 // Also, the entire process should not exceed longer than the specified timeout. 549 func mockConduitForChunkDataPackRequest(t *testing.T, 550 con *mocknetwork.Conduit, 551 reqList verification.ChunkDataPackRequestList, 552 count int, 553 requestHandler func(*messages.ChunkDataRequest)) *sync.WaitGroup { 554 555 // counts number of requests for each chunk data pack 556 reqCount := make(map[flow.Identifier]int) 557 reqMap := make(map[flow.Identifier]*verification.ChunkDataPackRequest) 558 for _, request := range reqList { 559 reqCount[request.ChunkID] = 0 560 reqMap[request.ChunkID] = request 561 } 562 wg := &sync.WaitGroup{} 563 564 // to counter race condition in concurrent invocations of Run 565 mutex := &sync.Mutex{} 566 wg.Add(count * len(reqList)) 567 568 con.On("Publish", testifymock.Anything, testifymock.Anything, testifymock.Anything). 569 Run(func(args testifymock.Arguments) { 570 mutex.Lock() 571 defer mutex.Unlock() 572 573 // requested chunk id from network should belong to list of chunk id requests the engine received. 574 // also, it should not be repeated below a maximum threshold 575 req, ok := args[0].(*messages.ChunkDataRequest) 576 require.True(t, ok) 577 require.True(t, reqList.ContainsChunkID(req.ChunkID)) 578 require.LessOrEqual(t, reqCount[req.ChunkID], count) 579 reqCount[req.ChunkID]++ 580 581 // requested chunk ids should only be passed to agreed execution nodes 582 target1, ok := args[1].(flow.Identifier) 583 require.True(t, ok) 584 require.Contains(t, reqMap[req.ChunkID].Agrees, target1) 585 586 target2, ok := args[2].(flow.Identifier) 587 require.True(t, ok) 588 require.Contains(t, reqMap[req.ChunkID].Agrees, target2) 589 590 go func() { 591 requestHandler(req) 592 wg.Done() 593 }() 594 595 }).Return(nil) 596 597 return wg 598 } 599 600 // mockChunkDataPackHandler mocks chunk data pack handler for receiving a set of chunk responses. 601 // It evaluates that, each pair of (chunkIndex, resultID) should be passed exactly once. 602 func mockChunkDataPackHandler(t *testing.T, handler *mockfetcher.ChunkDataPackHandler, requests verification.ChunkDataPackRequestList) *sync.WaitGroup { 603 handledLocators := make(map[flow.Identifier]struct{}) 604 605 wg := sync.WaitGroup{} 606 wg.Add(len(requests)) 607 handler.On("HandleChunkDataPack", testifymock.Anything, testifymock.Anything). 608 Run(func(args testifymock.Arguments) { 609 _, ok := args[0].(flow.Identifier) 610 require.True(t, ok) 611 response, ok := args[1].(*verification.ChunkDataPackResponse) 612 require.True(t, ok) 613 614 // we should have already requested this chunk data pack. 615 require.True(t, requests.ContainsLocator(response.ResultID, response.Index)) 616 require.True(t, requests.ContainsChunkID(response.Cdp.ChunkID)) 617 618 // invocation should be distinct per chunk ID 619 locatorID := chunks.ChunkLocatorID(response.ResultID, response.Index) 620 _, ok = handledLocators[locatorID] 621 require.False(t, ok) 622 623 handledLocators[locatorID] = struct{}{} 624 625 wg.Done() 626 }).Return() 627 628 return &wg 629 } 630 631 // mockChunkDataPackHandler mocks chunk data pack handler for being notified that a set of chunk IDs are sealed. 632 // It evaluates that, each chunk ID should be notified only once. 633 func mockNotifyBlockSealedHandler(t *testing.T, handler *mockfetcher.ChunkDataPackHandler, requests verification.ChunkDataPackRequestList) *sync.WaitGroup { 634 635 wg := &sync.WaitGroup{} 636 wg.Add(len(requests)) 637 // maps keep track of distinct invocations per chunk ID 638 seen := make(map[flow.Identifier]struct{}) 639 handler.On("NotifyChunkDataPackSealed", testifymock.Anything, testifymock.Anything). 640 Run(func(args testifymock.Arguments) { 641 chunkIndex, ok := args[0].(uint64) 642 require.True(t, ok) 643 resultID, ok := args[1].(flow.Identifier) 644 require.True(t, ok) 645 646 // we should have already requested this chunk data pack, and collection ID should be the same. 647 require.True(t, requests.ContainsLocator(resultID, chunkIndex)) 648 649 // invocation should be distinct per chunk ID 650 locatorID := chunks.ChunkLocatorID(resultID, chunkIndex) 651 _, ok = seen[locatorID] 652 require.False(t, ok) 653 seen[locatorID] = struct{}{} 654 655 wg.Done() 656 }).Return() 657 658 return wg 659 } 660 661 // mockPendingRequestsPopAll mocks chunk requests mempool for being queried for returning all requests associated with a 662 // chunk ID only once. 663 func mockPendingRequestsPopAll(t *testing.T, pendingRequests *mempool.ChunkRequests, requests verification.ChunkDataPackRequestList) { 664 // maps keep track of distinct invocations per chunk ID 665 seen := make(map[flow.Identifier]struct{}) 666 667 pendingRequests.On("PopAll", testifymock.Anything). 668 Return( 669 func(chunkID flow.Identifier) chunks.LocatorMap { 670 locators := make(chunks.LocatorMap) 671 672 // chunk ID must not be seen 673 _, ok := seen[chunkID] 674 require.False(t, ok) 675 676 for _, request := range requests { 677 if request.ChunkID == chunkID { 678 locator := request.Locator 679 locators[locator.ID()] = &locator 680 } 681 } 682 683 seen[chunkID] = struct{}{} 684 return locators 685 }, 686 func(chunkID flow.Identifier) bool { 687 for _, request := range requests { 688 if request.ChunkID == chunkID { 689 return true 690 } 691 } 692 693 return false 694 }, 695 ) 696 } 697 698 // mockPendingRequestInfoAndUpdate mocks pending requests mempool regarding three sets of chunk IDs: the instant, late, and disqualified ones. 699 // The chunk IDs in the instantly qualified requests will be instantly qualified for dispatching in the networking layer. 700 // The chunk IDs in the late qualified requests will be postponed to a very later time for dispatching. The postponed time is set so long 701 // that they literally never get the chance to dispatch within the test time, e.g., 1 hour. 702 // The chunk IDs in the disqualified requests do not dispatch at all. 703 // 704 // The disqualified ones represent the set of chunk requests that are cleaned from memory during the on timer iteration of the requester 705 // engine, and are no longer needed. 706 func mockPendingRequestInfoAndUpdate(t *testing.T, 707 pendingRequests *mempool.ChunkRequests, 708 instantQualifiedReqs verification.ChunkDataPackRequestList, 709 lateQualifiedReqs verification.ChunkDataPackRequestList, 710 disQualifiedReqs verification.ChunkDataPackRequestList, 711 attempts int) (*sync.WaitGroup, *sync.WaitGroup) { 712 713 historyWG := &sync.WaitGroup{} 714 715 // for purpose of test and due to having a mocked mempool, we assume disqualified requests reside on the 716 // mempool, so their qualification is getting checked on each attempt iteration (and rejected). 717 totalRequestHistory := attempts * (len(instantQualifiedReqs) + len(lateQualifiedReqs) + len(disQualifiedReqs)) 718 historyWG.Add(totalRequestHistory) 719 720 pendingRequests.On("RequestHistory", testifymock.Anything). 721 Run(func(args testifymock.Arguments) { 722 // type assertion of input. 723 chunkID, ok := args[0].(flow.Identifier) 724 require.True(t, ok) 725 726 // chunk ID should be one of the expected ones. 727 require.True(t, 728 instantQualifiedReqs.ContainsChunkID(chunkID) || 729 lateQualifiedReqs.ContainsChunkID(chunkID) || 730 disQualifiedReqs.ContainsChunkID(chunkID)) 731 732 historyWG.Done() 733 734 }).Return( 735 // number of attempts 736 func(chunkID flow.Identifier) uint64 { 737 if instantQualifiedReqs.ContainsChunkID(chunkID) || lateQualifiedReqs.ContainsChunkID(chunkID) { 738 return uint64(1) 739 } 740 741 return uint64(0) 742 743 }, // last tried timestamp 744 func(chunkID flow.Identifier) time.Time { 745 if instantQualifiedReqs.ContainsChunkID(chunkID) { 746 // mocks last tried long enough so they instantly get qualified. 747 return time.Now().Add(-1 * time.Hour) 748 } 749 750 if lateQualifiedReqs.ContainsChunkID(chunkID) { 751 return time.Now() 752 } 753 754 return time.Time{} 755 }, // retry after duration 756 func(chunkID flow.Identifier) time.Duration { 757 if instantQualifiedReqs.ContainsChunkID(chunkID) { 758 // mocks retry after very short so they instantly get qualified. 759 return 1 * time.Millisecond 760 } 761 762 if lateQualifiedReqs.ContainsChunkID(chunkID) { 763 // mocks retry after long so they never qualify soon. 764 return time.Hour 765 } 766 767 return 0 768 769 }, // request info existence 770 func(chunkID flow.Identifier) bool { 771 if instantQualifiedReqs.ContainsChunkID(chunkID) || lateQualifiedReqs.ContainsChunkID(chunkID) { 772 return true 773 } 774 775 return false 776 }, 777 ) 778 779 updateWG := &sync.WaitGroup{} 780 updateWG.Add(len(instantQualifiedReqs) * attempts) 781 pendingRequests.On("UpdateRequestHistory", testifymock.Anything, testifymock.Anything). 782 Run(func(args testifymock.Arguments) { 783 // type assertion of inputs. 784 chunkID, ok := args[0].(flow.Identifier) 785 require.True(t, ok) 786 787 _, ok = args[1].(flowmempool.ChunkRequestHistoryUpdaterFunc) 788 require.True(t, ok) 789 790 // checks only instantly qualified chunk requests should reach to this step, 791 // i.e., invocation of UpdateRequestHistory 792 require.True(t, instantQualifiedReqs.ContainsChunkID(chunkID)) 793 require.False(t, lateQualifiedReqs.ContainsChunkID(chunkID)) 794 require.False(t, disQualifiedReqs.ContainsChunkID(chunkID)) 795 796 updateWG.Done() 797 798 }). 799 Return(uint64(1), time.Now(), 1*time.Millisecond, true) 800 801 return historyWG, updateWG 802 }