github.com/onflow/flow-go@v0.33.17/engine/verification/utils/unittest/helper.go (about) 1 package verificationtest 2 3 import ( 4 "context" 5 "fmt" 6 "sync" 7 "testing" 8 "time" 9 10 "github.com/rs/zerolog" 11 "github.com/rs/zerolog/log" 12 "github.com/stretchr/testify/assert" 13 testifymock "github.com/stretchr/testify/mock" 14 "github.com/stretchr/testify/require" 15 16 "github.com/onflow/flow-go/consensus/hotstuff/model" 17 "github.com/onflow/flow-go/crypto" 18 "github.com/onflow/flow-go/engine/testutil" 19 enginemock "github.com/onflow/flow-go/engine/testutil/mock" 20 "github.com/onflow/flow-go/engine/verification/assigner/blockconsumer" 21 "github.com/onflow/flow-go/model/chunks" 22 "github.com/onflow/flow-go/model/flow" 23 "github.com/onflow/flow-go/model/flow/filter" 24 "github.com/onflow/flow-go/model/messages" 25 "github.com/onflow/flow-go/module" 26 "github.com/onflow/flow-go/module/metrics" 27 "github.com/onflow/flow-go/module/mock" 28 msig "github.com/onflow/flow-go/module/signature" 29 "github.com/onflow/flow-go/module/trace" 30 "github.com/onflow/flow-go/network" 31 "github.com/onflow/flow-go/network/channels" 32 "github.com/onflow/flow-go/network/mocknetwork" 33 "github.com/onflow/flow-go/network/stub" 34 "github.com/onflow/flow-go/state/protocol" 35 mockprotocol "github.com/onflow/flow-go/state/protocol/mock" 36 "github.com/onflow/flow-go/utils/logging" 37 "github.com/onflow/flow-go/utils/unittest" 38 ) 39 40 // MockChunkDataProviderFunc is a test helper function encapsulating the logic of whether to reply a chunk data pack request. 41 type MockChunkDataProviderFunc func(*testing.T, CompleteExecutionReceiptList, flow.Identifier, flow.Identifier, network.Conduit) bool 42 43 // SetupChunkDataPackProvider creates and returns an execution node that only has a chunk data pack provider engine. 44 // 45 // The mock chunk provider engine replies the chunk back requests by invoking the injected provider method. All chunk data pack 46 // requests should come from a verification node, and should has one of the assigned chunk IDs. Otherwise, it fails the test. 47 func SetupChunkDataPackProvider(t *testing.T, 48 hub *stub.Hub, 49 exeIdentity *flow.Identity, 50 participants flow.IdentityList, 51 chainID flow.ChainID, 52 completeERs CompleteExecutionReceiptList, 53 assignedChunkIDs flow.IdentifierList, 54 provider MockChunkDataProviderFunc) (*enginemock.GenericNode, 55 *mocknetwork.Engine, *sync.WaitGroup) { 56 57 exeNode := testutil.GenericNodeFromParticipants(t, hub, exeIdentity, participants, chainID) 58 exeEngine := new(mocknetwork.Engine) 59 60 exeChunkDataConduit, err := exeNode.Net.Register(channels.ProvideChunks, exeEngine) 61 assert.Nil(t, err) 62 63 replied := make(map[flow.Identifier]struct{}) 64 65 wg := &sync.WaitGroup{} 66 wg.Add(len(assignedChunkIDs)) 67 68 mu := &sync.Mutex{} // making testify Run thread-safe 69 70 exeEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything). 71 Run(func(args testifymock.Arguments) { 72 mu.Lock() 73 defer mu.Unlock() 74 75 originID, ok := args[1].(flow.Identifier) 76 require.True(t, ok) 77 // request should be dispatched by a verification node. 78 require.Contains(t, participants.Filter(filter.HasRole(flow.RoleVerification)).NodeIDs(), originID) 79 80 req, ok := args[2].(*messages.ChunkDataRequest) 81 require.True(t, ok) 82 require.Contains(t, assignedChunkIDs, req.ChunkID) // only assigned chunks should be requested. 83 84 shouldReply := provider(t, completeERs, req.ChunkID, originID, exeChunkDataConduit) 85 _, alreadyReplied := replied[req.ChunkID] 86 if shouldReply && !alreadyReplied { 87 /* 88 the wait group keeps track of unique chunk requests addressed. 89 we make it done only upon the first successful request of a chunk. 90 */ 91 wg.Done() 92 replied[req.ChunkID] = struct{}{} 93 } 94 }).Return(nil) 95 96 return &exeNode, exeEngine, wg 97 } 98 99 // RespondChunkDataPackRequestImmediately immediately qualifies a chunk data request for reply by chunk data provider. 100 func RespondChunkDataPackRequestImmediately(t *testing.T, 101 completeERs CompleteExecutionReceiptList, 102 chunkID flow.Identifier, 103 verID flow.Identifier, 104 con network.Conduit) bool { 105 106 // finds the chunk data pack of the requested chunk and sends it back. 107 res := completeERs.ChunkDataResponseOf(t, chunkID) 108 109 err := con.Unicast(res, verID) 110 assert.Nil(t, err) 111 112 log.Debug(). 113 Hex("origin_id", logging.ID(verID)). 114 Hex("chunk_id", logging.ID(chunkID)). 115 Msg("chunk data pack request answered by provider") 116 117 return true 118 } 119 120 // RespondChunkDataPackRequestAfterNTrials only qualifies a chunk data request for reply by chunk data provider after n times. 121 func RespondChunkDataPackRequestAfterNTrials(n int) MockChunkDataProviderFunc { 122 tryCount := make(map[flow.Identifier]int) 123 124 return func(t *testing.T, completeERs CompleteExecutionReceiptList, chunkID flow.Identifier, verID flow.Identifier, con network.Conduit) bool { 125 tryCount[chunkID]++ 126 127 if tryCount[chunkID] >= n { 128 // finds the chunk data pack of the requested chunk and sends it back. 129 res := completeERs.ChunkDataResponseOf(t, chunkID) 130 131 err := con.Unicast(res, verID) 132 assert.Nil(t, err) 133 134 log.Debug(). 135 Hex("origin_id", logging.ID(verID)). 136 Hex("chunk_id", logging.ID(chunkID)). 137 Int("trial_time", tryCount[chunkID]). 138 Msg("chunk data pack request answered by provider") 139 140 return true 141 } 142 143 return false 144 } 145 } 146 147 // SetupMockConsensusNode creates and returns a mock consensus node (conIdentity) and its registered engine in the 148 // network (hub). It mocks the process method of the consensus engine to receive a message from a certain 149 // verification node (verIdentity) evaluates whether it is a result approval about an assigned chunk to that verifier node. 150 func SetupMockConsensusNode(t *testing.T, 151 log zerolog.Logger, 152 hub *stub.Hub, 153 conIdentity *flow.Identity, 154 verIdentities flow.IdentityList, 155 othersIdentity flow.IdentityList, 156 completeERs CompleteExecutionReceiptList, 157 chainID flow.ChainID, 158 assignedChunkIDs flow.IdentifierList) (*enginemock.GenericNode, *mocknetwork.Engine, *sync.WaitGroup) { 159 160 lg := log.With().Str("role", "mock-consensus").Logger() 161 162 wg := &sync.WaitGroup{} 163 // each verification node is assigned to issue one result approval per assigned chunk. 164 // and there are `len(verIdentities)`-many verification nodes 165 // so there is a total of len(verIdentities) * len*(assignedChunkIDs) expected 166 // result approvals. 167 wg.Add(len(verIdentities) * len(assignedChunkIDs)) 168 169 // mock the consensus node with a generic node and mocked engine to assert 170 // that the result approval is broadcast 171 conNode := testutil.GenericNodeFromParticipants(t, hub, conIdentity, othersIdentity, chainID) 172 conEngine := new(mocknetwork.Engine) 173 174 // map form verIds --> result approval ID 175 resultApprovalSeen := make(map[flow.Identifier]map[flow.Identifier]struct{}) 176 for _, verIdentity := range verIdentities { 177 resultApprovalSeen[verIdentity.NodeID] = make(map[flow.Identifier]struct{}) 178 } 179 180 // creates a hasher for spock 181 hasher := msig.NewBLSHasher(msig.SPOCKTag) 182 mu := &sync.Mutex{} // making testify mock thread-safe 183 184 conEngine.On("Process", testifymock.AnythingOfType("channels.Channel"), testifymock.Anything, testifymock.Anything). 185 Run(func(args testifymock.Arguments) { 186 mu.Lock() 187 defer mu.Unlock() 188 189 originID, ok := args[1].(flow.Identifier) 190 assert.True(t, ok) 191 192 resultApproval, ok := args[2].(*flow.ResultApproval) 193 assert.True(t, ok) 194 195 lg.Debug(). 196 Hex("result_approval_id", logging.ID(resultApproval.ID())). 197 Msg("result approval received") 198 199 // asserts that result approval has not been seen from this 200 _, ok = resultApprovalSeen[originID][resultApproval.ID()] 201 assert.False(t, ok) 202 203 // marks result approval as seen 204 resultApprovalSeen[originID][resultApproval.ID()] = struct{}{} 205 206 // result approval should belong to an assigned chunk to the verification node. 207 chunk := completeERs.ChunkOf(t, resultApproval.Body.ExecutionResultID, resultApproval.Body.ChunkIndex) 208 assert.Contains(t, assignedChunkIDs, chunk.ID()) 209 210 // verifies SPoCK proof of result approval 211 // against the SPoCK secret of the execution result 212 // 213 // retrieves public key of verification node 214 var pk crypto.PublicKey 215 found := false 216 for _, identity := range verIdentities { 217 if originID == identity.NodeID { 218 pk = identity.StakingPubKey 219 found = true 220 } 221 } 222 require.True(t, found) 223 224 // verifies spocks 225 valid, err := crypto.SPOCKVerifyAgainstData( 226 pk, 227 resultApproval.Body.Spock, 228 completeERs.ReceiptDataOf(t, chunk.ID()).SpockSecrets[resultApproval.Body.ChunkIndex], 229 hasher, 230 ) 231 assert.NoError(t, err) 232 233 if !valid { 234 // When chunk verifier returns chunk fault, a placeholder 235 // signature is generated for that chunk. 236 isChunkFaultSignature, err := crypto.SPOCKVerifyAgainstData( 237 pk, 238 resultApproval.Body.Spock, 239 nil, // chunk fault has no spock secret 240 hasher, 241 ) 242 assert.NoError(t, err) 243 244 if isChunkFaultSignature { 245 assert.Fail(t, "chunk verifier returned chunk fault") 246 } else { 247 assert.Fail(t, "spock secret mismatch") 248 } 249 } 250 251 wg.Done() 252 }).Return(nil) 253 254 _, err := conNode.Net.Register(channels.ReceiveApprovals, conEngine) 255 assert.Nil(t, err) 256 257 return &conNode, conEngine, wg 258 } 259 260 // isSystemChunk returns true if the index corresponds to the system chunk, i.e., last chunk in 261 // the receipt. 262 func isSystemChunk(index uint64, chunkNum int) bool { 263 return int(index) == chunkNum-1 264 } 265 266 func CreateExecutionResult(blockID flow.Identifier, options ...func(result *flow.ExecutionResult, assignments *chunks.Assignment)) (*flow.ExecutionResult, *chunks.Assignment) { 267 result := &flow.ExecutionResult{ 268 BlockID: blockID, 269 Chunks: flow.ChunkList{}, 270 } 271 assignments := chunks.NewAssignment() 272 273 for _, option := range options { 274 option(result, assignments) 275 } 276 return result, assignments 277 } 278 279 func WithChunks(setAssignees ...func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk) func(*flow.ExecutionResult, *chunks.Assignment) { 280 return func(result *flow.ExecutionResult, assignment *chunks.Assignment) { 281 for i, setAssignee := range setAssignees { 282 chunk := setAssignee(result.BlockID, uint64(i), assignment) 283 result.Chunks.Insert(chunk) 284 } 285 } 286 } 287 288 func ChunkWithIndex(blockID flow.Identifier, index int) *flow.Chunk { 289 chunk := &flow.Chunk{ 290 Index: uint64(index), 291 ChunkBody: flow.ChunkBody{ 292 CollectionIndex: uint(index), 293 EventCollection: blockID, // ensure chunks from different blocks with the same index will have different chunk ID 294 BlockID: blockID, 295 }, 296 EndState: unittest.StateCommitmentFixture(), 297 } 298 return chunk 299 } 300 301 func WithAssignee(assignee flow.Identifier) func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk { 302 return func(blockID flow.Identifier, index uint64, assignment *chunks.Assignment) *flow.Chunk { 303 chunk := ChunkWithIndex(blockID, int(index)) 304 fmt.Printf("with assignee: %v, chunk id: %v\n", index, chunk.ID()) 305 assignment.Add(chunk, flow.IdentifierList{assignee}) 306 return chunk 307 } 308 } 309 310 func FromChunkID(chunkID flow.Identifier) flow.ChunkDataPack { 311 return flow.ChunkDataPack{ 312 ChunkID: chunkID, 313 } 314 } 315 316 type ChunkAssignerFunc func(chunkIndex uint64, chunks int) bool 317 318 // MockChunkAssignmentFixture is a test helper that mocks a chunk assigner for a set of verification nodes for the 319 // execution results in the given complete execution receipts, and based on the given chunk assigner function. 320 // 321 // It returns the list of chunk locator ids assigned to the input verification nodes, as well as the list of their chunk IDs. 322 // All verification nodes are assigned the same chunks. 323 func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner, 324 verIds flow.IdentityList, 325 completeERs CompleteExecutionReceiptList, 326 isAssigned ChunkAssignerFunc) (flow.IdentifierList, flow.IdentifierList) { 327 328 expectedLocatorIds := flow.IdentifierList{} 329 expectedChunkIds := flow.IdentifierList{} 330 331 // keeps track of duplicate results (receipts that share same result) 332 visited := make(map[flow.Identifier]struct{}) 333 334 for _, completeER := range completeERs { 335 for _, receipt := range completeER.Receipts { 336 a := chunks.NewAssignment() 337 338 _, duplicate := visited[receipt.ExecutionResult.ID()] 339 if duplicate { 340 // skips mocking chunk assignment for duplicate results 341 continue 342 } 343 344 for _, chunk := range receipt.ExecutionResult.Chunks { 345 if isAssigned(chunk.Index, len(receipt.ExecutionResult.Chunks)) { 346 locatorID := chunks.Locator{ 347 ResultID: receipt.ExecutionResult.ID(), 348 Index: chunk.Index, 349 }.ID() 350 expectedLocatorIds = append(expectedLocatorIds, locatorID) 351 expectedChunkIds = append(expectedChunkIds, chunk.ID()) 352 a.Add(chunk, verIds.NodeIDs()) 353 } 354 355 } 356 357 chunkAssigner.On("Assign", &receipt.ExecutionResult, completeER.ContainerBlock.ID()).Return(a, nil) 358 visited[receipt.ExecutionResult.ID()] = struct{}{} 359 } 360 } 361 362 return expectedLocatorIds, expectedChunkIds 363 } 364 365 // EvenChunkIndexAssigner is a helper function that returns true for the even indices in [0, chunkNum-1] 366 // It also returns true if the index corresponds to the system chunk. 367 func EvenChunkIndexAssigner(index uint64, chunkNum int) bool { 368 ok := index%2 == 0 || isSystemChunk(index, chunkNum) 369 return ok 370 } 371 372 // ExtendStateWithFinalizedBlocks is a test helper to extend the execution state and return the list of blocks. 373 // It receives a list of complete execution receipt fixtures in the form of (R1,1 <- R1,2 <- ... <- C1) <- (R2,1 <- R2,2 <- ... <- C2) <- ..... 374 // Where R and C are the reference and container blocks. 375 // Reference blocks contain guarantees, and container blocks contain execution receipt for their preceding reference blocks, 376 // e.g., C1 contains receipts for R1,1, R1,2, etc. 377 // Note: for sake of simplicity we do not include guarantees in the container blocks for now. 378 func ExtendStateWithFinalizedBlocks(t *testing.T, completeExecutionReceipts CompleteExecutionReceiptList, 379 state protocol.ParticipantState) []*flow.Block { 380 blocks := make([]*flow.Block, 0) 381 382 // tracks of duplicate reference blocks 383 // since receipts may share the same execution result, hence 384 // their reference block is the same (and we should not extend for it). 385 duplicate := make(map[flow.Identifier]struct{}) 386 387 // extends protocol state with the chain of blocks. 388 for _, completeER := range completeExecutionReceipts { 389 // extends state with reference blocks of the receipts 390 for _, receipt := range completeER.ReceiptsData { 391 refBlockID := receipt.ReferenceBlock.ID() 392 _, dup := duplicate[refBlockID] 393 if dup { 394 // skips extending state with already duplicate reference block 395 continue 396 } 397 398 err := state.Extend(context.Background(), receipt.ReferenceBlock) 399 require.NoError(t, err, fmt.Errorf("can not extend block %v: %w", receipt.ReferenceBlock.ID(), err)) 400 err = state.Finalize(context.Background(), refBlockID) 401 require.NoError(t, err) 402 blocks = append(blocks, receipt.ReferenceBlock) 403 duplicate[refBlockID] = struct{}{} 404 } 405 406 // extends state with container block of receipt. 407 containerBlockID := completeER.ContainerBlock.ID() 408 _, dup := duplicate[containerBlockID] 409 if dup { 410 // skips extending state with already duplicate container block 411 continue 412 } 413 err := state.Extend(context.Background(), completeER.ContainerBlock) 414 require.NoError(t, err) 415 err = state.Finalize(context.Background(), containerBlockID) 416 require.NoError(t, err) 417 blocks = append(blocks, completeER.ContainerBlock) 418 duplicate[containerBlockID] = struct{}{} 419 } 420 421 return blocks 422 } 423 424 // MockLastSealedHeight mocks the protocol state for the specified last sealed height. 425 func MockLastSealedHeight(state *mockprotocol.State, height uint64) { 426 snapshot := &mockprotocol.Snapshot{} 427 header := unittest.BlockHeaderFixture() 428 header.Height = height 429 state.On("Sealed").Return(snapshot) 430 snapshot.On("Head").Return(header, nil) 431 } 432 433 func NewVerificationHappyPathTest(t *testing.T, 434 authorized bool, 435 blockCount int, 436 eventRepetition int, 437 verCollector module.VerificationMetrics, 438 mempoolCollector module.MempoolMetrics, 439 retry int, 440 ops ...CompleteExecutionReceiptBuilderOpt) { 441 442 withConsumers(t, authorized, blockCount, verCollector, mempoolCollector, RespondChunkDataPackRequestAfterNTrials(retry), func( 443 blockConsumer *blockconsumer.BlockConsumer, 444 blocks []*flow.Block, 445 resultApprovalsWG *sync.WaitGroup, 446 chunkDataRequestWG *sync.WaitGroup) { 447 448 for i := 0; i < len(blocks)*eventRepetition; i++ { 449 // consumer is only required to be "notified" that a new finalized block available. 450 // It keeps track of the last finalized block it has read, and read the next height upon 451 // getting notified as follows: 452 blockConsumer.OnFinalizedBlock(&model.Block{}) 453 } 454 455 unittest.RequireReturnsBefore(t, chunkDataRequestWG.Wait, time.Duration(10*retry*blockCount)*time.Second, 456 "could not receive chunk data requests on time") 457 unittest.RequireReturnsBefore(t, resultApprovalsWG.Wait, time.Duration(2*retry*blockCount)*time.Second, 458 "could not receive result approvals on time") 459 460 }, ops...) 461 } 462 463 // withConsumers is a test helper that sets up the following pipeline: 464 // block reader -> block consumer (3 workers) -> assigner engine -> chunks queue -> chunks consumer (3 workers) -> mock chunk processor 465 // 466 // The block consumer operates on a block reader with a chain of specified number of finalized blocks 467 // ready to read. 468 func withConsumers(t *testing.T, 469 authorized bool, 470 blockCount int, 471 verCollector module.VerificationMetrics, // verification metrics collector 472 mempoolCollector module.MempoolMetrics, // memory pool metrics collector 473 providerFunc MockChunkDataProviderFunc, 474 withBlockConsumer func(*blockconsumer.BlockConsumer, []*flow.Block, *sync.WaitGroup, *sync.WaitGroup), 475 ops ...CompleteExecutionReceiptBuilderOpt) { 476 477 tracer := trace.NewNoopTracer() 478 log := zerolog.Nop() 479 480 // bootstraps system with one node of each role. 481 s, verID, participants := bootstrapSystem(t, log, tracer, authorized) 482 exeID := participants.Filter(filter.HasRole(flow.RoleExecution))[0] 483 conID := participants.Filter(filter.HasRole(flow.RoleConsensus))[0] 484 // generates a chain of blocks in the form of root <- R1 <- C1 <- R2 <- C2 <- ... where Rs are distinct reference 485 // blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block, 486 // Container blocks only contain receipts of their preceding reference blocks. But they do not 487 // hold any guarantees. 488 root, err := s.State.Final().Head() 489 require.NoError(t, err) 490 chainID := root.ChainID 491 ops = append(ops, WithExecutorIDs( 492 participants.Filter(filter.HasRole(flow.RoleExecution)).NodeIDs()), func(builder *CompleteExecutionReceiptBuilder) { 493 // needed for the guarantees to have the correct chainID and signer indices 494 builder.clusterCommittee = participants.Filter(filter.HasRole(flow.RoleCollection)) 495 }) 496 497 // random sources for all blocks: 498 // - root block (block[0]) is executed with sources[0] (included in QC of child block[1]) 499 // - block[i] is executed with sources[i] (included in QC of child block[i+1]) 500 sources := unittest.RandomSourcesFixture(30) 501 completeERs := CompleteExecutionReceiptChainFixture(t, root, blockCount, sources, ops...) 502 blocks := ExtendStateWithFinalizedBlocks(t, completeERs, s.State) 503 504 // chunk assignment 505 chunkAssigner := &mock.ChunkAssigner{} 506 assignedChunkIDs := flow.IdentifierList{} 507 if authorized { 508 // only authorized verification node has some chunks assigned to it. 509 _, assignedChunkIDs = MockChunkAssignmentFixture(chunkAssigner, 510 flow.IdentityList{verID}, 511 completeERs, 512 EvenChunkIndexAssigner) 513 } 514 515 hub := stub.NewNetworkHub() 516 collector := &metrics.NoopCollector{} 517 chunksLimit := 100 518 genericNode := testutil.GenericNodeWithStateFixture(t, 519 s, 520 hub, 521 verID, 522 unittest.Logger().With().Str("role", "verification").Logger(), 523 collector, 524 tracer, 525 chainID) 526 527 // execution node 528 exeNode, exeEngine, exeWG := SetupChunkDataPackProvider(t, 529 hub, 530 exeID, 531 participants, 532 chainID, 533 completeERs, 534 assignedChunkIDs, 535 providerFunc) 536 537 // consensus node 538 conNode, conEngine, conWG := SetupMockConsensusNode(t, 539 unittest.Logger(), 540 hub, 541 conID, 542 flow.IdentityList{verID}, 543 participants, 544 completeERs, 545 chainID, 546 assignedChunkIDs) 547 548 verNode := testutil.VerificationNode(t, 549 hub, 550 verID, 551 participants, 552 chunkAssigner, 553 uint(chunksLimit), 554 chainID, 555 verCollector, 556 mempoolCollector, 557 testutil.WithGenericNode(&genericNode)) 558 559 // turns on components and network 560 verNet, ok := hub.GetNetwork(verID.NodeID) 561 require.True(t, ok) 562 unittest.RequireReturnsBefore(t, func() { 563 verNet.StartConDev(100*time.Millisecond, true) 564 }, 100*time.Millisecond, "failed to start verification network") 565 566 unittest.RequireComponentsReadyBefore(t, 1*time.Second, 567 verNode.BlockConsumer, 568 verNode.ChunkConsumer, 569 verNode.AssignerEngine, 570 verNode.FetcherEngine, 571 verNode.RequesterEngine, 572 verNode.VerifierEngine) 573 574 // plays test scenario 575 withBlockConsumer(verNode.BlockConsumer, blocks, conWG, exeWG) 576 577 // tears down engines and nodes 578 unittest.RequireReturnsBefore(t, verNet.StopConDev, 100*time.Millisecond, "failed to stop verification network") 579 unittest.RequireComponentsDoneBefore(t, 100*time.Millisecond, 580 verNode.BlockConsumer, 581 verNode.ChunkConsumer, 582 verNode.AssignerEngine, 583 verNode.FetcherEngine, 584 verNode.RequesterEngine, 585 verNode.VerifierEngine) 586 587 enginemock.RequireGenericNodesDoneBefore(t, 1*time.Second, 588 conNode, 589 exeNode) 590 591 if !authorized { 592 // in unauthorized mode, no message should be received by consensus and execution node. 593 conEngine.AssertNotCalled(t, "Process") 594 exeEngine.AssertNotCalled(t, "Process") 595 } 596 597 // verifies memory resources are cleaned up all over pipeline 598 assert.Zero(t, verNode.BlockConsumer.Size()) 599 assert.Zero(t, verNode.ChunkConsumer.Size()) 600 assert.Zero(t, verNode.ChunkStatuses.Size()) 601 assert.Zero(t, verNode.ChunkRequests.Size()) 602 } 603 604 // bootstrapSystem is a test helper that bootstraps a flow system with node of each main roles (except execution nodes that are two). 605 // If authorized set to true, it bootstraps verification node as an authorized one. 606 // Otherwise, it bootstraps the verification node as unauthorized in current epoch. 607 // 608 // As the return values, it returns the state, local module, and list of identities in system. 609 func bootstrapSystem( 610 t *testing.T, 611 log zerolog.Logger, 612 tracer module.Tracer, 613 authorized bool, 614 ) ( 615 *enginemock.StateFixture, 616 *flow.Identity, 617 flow.IdentityList, 618 ) { 619 // creates identities to bootstrap system with 620 verID := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) 621 identities := unittest.CompleteIdentitySet(verID) 622 identities = append(identities, unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution))) // adds extra execution node 623 624 collector := &metrics.NoopCollector{} 625 rootSnapshot := unittest.RootSnapshotFixture(identities) 626 stateFixture := testutil.CompleteStateFixture(t, log, collector, tracer, rootSnapshot) 627 // bootstraps the system 628 629 if !authorized { 630 // creates a new verification node identity that is unauthorized for this epoch 631 verID = unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) 632 identities = identities.Union(flow.IdentityList{verID}) 633 634 epochBuilder := unittest.NewEpochBuilder(t, stateFixture.State) 635 epochBuilder. 636 UsingSetupOpts(unittest.WithParticipants(identities)). 637 BuildEpoch() 638 } 639 640 return stateFixture, verID, identities 641 }