github.com/onflow/flow-go@v0.33.17/engine/testutil/nodes.go (about) 1 package testutil 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "math" 8 "path/filepath" 9 "testing" 10 "time" 11 12 "github.com/coreos/go-semver/semver" 13 "github.com/ipfs/go-datastore" 14 dssync "github.com/ipfs/go-datastore/sync" 15 blockstore "github.com/ipfs/go-ipfs-blockstore" 16 "github.com/rs/zerolog" 17 "github.com/stretchr/testify/mock" 18 "github.com/stretchr/testify/require" 19 "go.uber.org/atomic" 20 21 "github.com/onflow/flow-go/cmd/build" 22 "github.com/onflow/flow-go/consensus" 23 "github.com/onflow/flow-go/consensus/hotstuff" 24 "github.com/onflow/flow-go/consensus/hotstuff/committees" 25 mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" 26 "github.com/onflow/flow-go/consensus/hotstuff/model" 27 "github.com/onflow/flow-go/consensus/hotstuff/notifications" 28 "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" 29 "github.com/onflow/flow-go/crypto" 30 "github.com/onflow/flow-go/engine" 31 "github.com/onflow/flow-go/engine/collection/epochmgr" 32 "github.com/onflow/flow-go/engine/collection/epochmgr/factories" 33 collectioningest "github.com/onflow/flow-go/engine/collection/ingest" 34 mockcollection "github.com/onflow/flow-go/engine/collection/mock" 35 "github.com/onflow/flow-go/engine/collection/pusher" 36 "github.com/onflow/flow-go/engine/common/follower" 37 "github.com/onflow/flow-go/engine/common/provider" 38 "github.com/onflow/flow-go/engine/common/requester" 39 "github.com/onflow/flow-go/engine/common/synchronization" 40 "github.com/onflow/flow-go/engine/consensus/approvals/tracker" 41 consensusingest "github.com/onflow/flow-go/engine/consensus/ingestion" 42 "github.com/onflow/flow-go/engine/consensus/matching" 43 "github.com/onflow/flow-go/engine/consensus/sealing" 44 "github.com/onflow/flow-go/engine/execution/computation" 45 "github.com/onflow/flow-go/engine/execution/computation/committer" 46 "github.com/onflow/flow-go/engine/execution/computation/query" 47 "github.com/onflow/flow-go/engine/execution/ingestion" 48 exeFetcher "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" 49 "github.com/onflow/flow-go/engine/execution/ingestion/loader" 50 "github.com/onflow/flow-go/engine/execution/ingestion/stop" 51 "github.com/onflow/flow-go/engine/execution/ingestion/uploader" 52 executionprovider "github.com/onflow/flow-go/engine/execution/provider" 53 executionState "github.com/onflow/flow-go/engine/execution/state" 54 bootstrapexec "github.com/onflow/flow-go/engine/execution/state/bootstrap" 55 esbootstrap "github.com/onflow/flow-go/engine/execution/state/bootstrap" 56 "github.com/onflow/flow-go/engine/execution/storehouse" 57 testmock "github.com/onflow/flow-go/engine/testutil/mock" 58 verificationassigner "github.com/onflow/flow-go/engine/verification/assigner" 59 "github.com/onflow/flow-go/engine/verification/assigner/blockconsumer" 60 "github.com/onflow/flow-go/engine/verification/fetcher" 61 "github.com/onflow/flow-go/engine/verification/fetcher/chunkconsumer" 62 vereq "github.com/onflow/flow-go/engine/verification/requester" 63 "github.com/onflow/flow-go/engine/verification/verifier" 64 "github.com/onflow/flow-go/fvm" 65 "github.com/onflow/flow-go/fvm/environment" 66 "github.com/onflow/flow-go/fvm/storage/derived" 67 "github.com/onflow/flow-go/ledger/common/pathfinder" 68 completeLedger "github.com/onflow/flow-go/ledger/complete" 69 "github.com/onflow/flow-go/ledger/complete/mtrie/trie" 70 "github.com/onflow/flow-go/ledger/complete/wal" 71 "github.com/onflow/flow-go/model/bootstrap" 72 "github.com/onflow/flow-go/model/flow" 73 "github.com/onflow/flow-go/model/flow/filter" 74 "github.com/onflow/flow-go/module" 75 "github.com/onflow/flow-go/module/chainsync" 76 "github.com/onflow/flow-go/module/chunks" 77 "github.com/onflow/flow-go/module/compliance" 78 "github.com/onflow/flow-go/module/executiondatasync/execution_data" 79 exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" 80 mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" 81 "github.com/onflow/flow-go/module/finalizedreader" 82 confinalizer "github.com/onflow/flow-go/module/finalizer/consensus" 83 "github.com/onflow/flow-go/module/id" 84 "github.com/onflow/flow-go/module/irrecoverable" 85 "github.com/onflow/flow-go/module/local" 86 "github.com/onflow/flow-go/module/mempool" 87 consensusMempools "github.com/onflow/flow-go/module/mempool/consensus" 88 "github.com/onflow/flow-go/module/mempool/epochs" 89 "github.com/onflow/flow-go/module/mempool/herocache" 90 "github.com/onflow/flow-go/module/mempool/queue" 91 "github.com/onflow/flow-go/module/mempool/stdmap" 92 "github.com/onflow/flow-go/module/metrics" 93 mockmodule "github.com/onflow/flow-go/module/mock" 94 "github.com/onflow/flow-go/module/signature" 95 requesterunit "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" 96 "github.com/onflow/flow-go/module/trace" 97 "github.com/onflow/flow-go/module/validation" 98 "github.com/onflow/flow-go/network/channels" 99 "github.com/onflow/flow-go/network/p2p/cache" 100 "github.com/onflow/flow-go/network/stub" 101 "github.com/onflow/flow-go/state/protocol" 102 badgerstate "github.com/onflow/flow-go/state/protocol/badger" 103 "github.com/onflow/flow-go/state/protocol/blocktimer" 104 "github.com/onflow/flow-go/state/protocol/events" 105 "github.com/onflow/flow-go/state/protocol/events/gadgets" 106 "github.com/onflow/flow-go/state/protocol/util" 107 storage "github.com/onflow/flow-go/storage/badger" 108 storagepebble "github.com/onflow/flow-go/storage/pebble" 109 "github.com/onflow/flow-go/utils/unittest" 110 ) 111 112 // GenericNodeFromParticipants is a test helper that creates and returns a generic node. 113 // The generic node's state is generated from the given participants, resulting in a 114 // root state snapshot. 115 // 116 // CAUTION: Please use GenericNode instead for most use-cases so that multiple nodes 117 // may share the same root state snapshot. 118 func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity *flow.Identity, participants []*flow.Identity, chainID flow.ChainID, 119 options ...func(protocol.State)) testmock.GenericNode { 120 var i int 121 var participant *flow.Identity 122 for i, participant = range participants { 123 if identity.NodeID == participant.NodeID { 124 break 125 } 126 } 127 128 // creates logger, metrics collector and tracer. 129 log := unittest.Logger().With().Int("index", i).Hex("node_id", identity.NodeID[:]).Str("role", identity.Role.String()).Logger() 130 tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll) 131 require.NoError(t, err) 132 metrics := metrics.NewNoopCollector() 133 134 // creates state fixture and bootstrap it. 135 rootSnapshot := unittest.RootSnapshotFixtureWithChainID(participants, chainID) 136 stateFixture := CompleteStateFixture(t, log, metrics, tracer, rootSnapshot) 137 138 require.NoError(t, err) 139 for _, option := range options { 140 option(stateFixture.State) 141 } 142 143 return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID) 144 } 145 146 // GenericNode returns a generic test node, containing components shared across 147 // all node roles. The generic node is used as the core data structure to create 148 // other types of flow nodes. 149 func GenericNode( 150 t testing.TB, 151 hub *stub.Hub, 152 identity *flow.Identity, 153 root protocol.Snapshot, 154 ) testmock.GenericNode { 155 156 log := unittest.Logger().With(). 157 Hex("node_id", identity.NodeID[:]). 158 Str("role", identity.Role.String()). 159 Logger() 160 metrics := metrics.NewNoopCollector() 161 tracer := trace.NewNoopTracer() 162 stateFixture := CompleteStateFixture(t, log, metrics, tracer, root) 163 164 head, err := root.Head() 165 require.NoError(t, err) 166 chainID := head.ChainID 167 168 return GenericNodeWithStateFixture(t, stateFixture, hub, identity, log, metrics, tracer, chainID) 169 } 170 171 // GenericNodeWithStateFixture is a test helper that creates a generic node with specified state fixture. 172 func GenericNodeWithStateFixture(t testing.TB, 173 stateFixture *testmock.StateFixture, 174 hub *stub.Hub, 175 identity *flow.Identity, 176 log zerolog.Logger, 177 metrics *metrics.NoopCollector, 178 tracer module.Tracer, 179 chainID flow.ChainID) testmock.GenericNode { 180 181 me := LocalFixture(t, identity) 182 net := stub.NewNetwork(t, identity.NodeID, hub) 183 184 parentCtx, cancel := context.WithCancel(context.Background()) 185 ctx, errs := irrecoverable.WithSignaler(parentCtx) 186 187 return testmock.GenericNode{ 188 Ctx: ctx, 189 Cancel: cancel, 190 Errs: errs, 191 Log: log, 192 Metrics: metrics, 193 Tracer: tracer, 194 PublicDB: stateFixture.PublicDB, 195 SecretsDB: stateFixture.SecretsDB, 196 State: stateFixture.State, 197 Headers: stateFixture.Storage.Headers, 198 Guarantees: stateFixture.Storage.Guarantees, 199 Seals: stateFixture.Storage.Seals, 200 Payloads: stateFixture.Storage.Payloads, 201 Blocks: stateFixture.Storage.Blocks, 202 QuorumCertificates: stateFixture.Storage.QuorumCertificates, 203 Me: me, 204 Net: net, 205 DBDir: stateFixture.DBDir, 206 ChainID: chainID, 207 ProtocolEvents: stateFixture.ProtocolEvents, 208 } 209 } 210 211 // LocalFixture creates and returns a Local module for given identity. 212 func LocalFixture(t testing.TB, identity *flow.Identity) module.Local { 213 214 // Generates test signing oracle for the nodes 215 // Disclaimer: it should not be used for practical applications 216 // 217 // uses identity of node as its seed 218 seed, err := json.Marshal(identity) 219 require.NoError(t, err) 220 // creates signing key of the node 221 sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed[:64]) 222 require.NoError(t, err) 223 224 // sets staking public key of the node 225 identity.StakingPubKey = sk.PublicKey() 226 227 me, err := local.New(identity, sk) 228 require.NoError(t, err) 229 230 return me 231 } 232 233 // CompleteStateFixture is a test helper that creates, bootstraps, and returns a StateFixture for sake of unit testing. 234 func CompleteStateFixture( 235 t testing.TB, 236 log zerolog.Logger, 237 metric *metrics.NoopCollector, 238 tracer module.Tracer, 239 rootSnapshot protocol.Snapshot, 240 ) *testmock.StateFixture { 241 242 dataDir := unittest.TempDir(t) 243 publicDBDir := filepath.Join(dataDir, "protocol") 244 secretsDBDir := filepath.Join(dataDir, "secrets") 245 db := unittest.TypedBadgerDB(t, publicDBDir, storage.InitPublic) 246 s := storage.InitAll(metric, db) 247 secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storage.InitSecret) 248 consumer := events.NewDistributor() 249 250 state, err := badgerstate.Bootstrap( 251 metric, 252 db, 253 s.Headers, 254 s.Seals, 255 s.Results, 256 s.Blocks, 257 s.QuorumCertificates, 258 s.Setups, 259 s.EpochCommits, 260 s.Statuses, 261 s.VersionBeacons, 262 rootSnapshot, 263 ) 264 require.NoError(t, err) 265 266 mutableState, err := badgerstate.NewFullConsensusState( 267 log, 268 tracer, 269 consumer, 270 state, 271 s.Index, 272 s.Payloads, 273 util.MockBlockTimer(), 274 util.MockReceiptValidator(), 275 util.MockSealValidator(s.Seals), 276 ) 277 require.NoError(t, err) 278 279 return &testmock.StateFixture{ 280 PublicDB: db, 281 SecretsDB: secretsDB, 282 Storage: s, 283 DBDir: dataDir, 284 ProtocolEvents: consumer, 285 State: mutableState, 286 } 287 } 288 289 // CollectionNode returns a mock collection node. 290 func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, rootSnapshot protocol.Snapshot) testmock.CollectionNode { 291 292 node := GenericNode(t, hub, identity.Identity(), rootSnapshot) 293 privKeys, err := identity.PrivateKeys() 294 require.NoError(t, err) 295 node.Me, err = local.New(identity.Identity(), privKeys.StakingKey) 296 require.NoError(t, err) 297 298 pools := epochs.NewTransactionPools( 299 func(_ uint64) mempool.Transactions { 300 return herocache.NewTransactions(1000, node.Log, metrics.NewNoopCollector()) 301 }) 302 transactions := storage.NewTransactions(node.Metrics, node.PublicDB) 303 collections := storage.NewCollections(node.PublicDB, transactions) 304 clusterPayloads := storage.NewClusterPayloads(node.Metrics, node.PublicDB) 305 306 ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig()) 307 require.NoError(t, err) 308 309 selector := filter.HasRole(flow.RoleAccess, flow.RoleVerification) 310 retrieve := func(collID flow.Identifier) (flow.Entity, error) { 311 coll, err := collections.ByID(collID) 312 return coll, err 313 } 314 providerEngine, err := provider.New( 315 node.Log, 316 node.Metrics, 317 node.Net, 318 node.Me, 319 node.State, 320 queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()), 321 uint(1000), 322 channels.ProvideCollections, 323 selector, 324 retrieve) 325 require.NoError(t, err) 326 327 pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, collections, transactions) 328 require.NoError(t, err) 329 330 clusterStateFactory, err := factories.NewClusterStateFactory( 331 node.PublicDB, 332 node.Metrics, 333 node.Tracer, 334 ) 335 require.NoError(t, err) 336 337 builderFactory, err := factories.NewBuilderFactory( 338 node.PublicDB, 339 node.State, 340 node.Headers, 341 node.Tracer, 342 node.Metrics, 343 pusherEngine, 344 node.Log, 345 ) 346 require.NoError(t, err) 347 348 complianceEngineFactory, err := factories.NewComplianceEngineFactory( 349 node.Log, 350 node.Net, 351 node.Me, 352 node.Metrics, node.Metrics, node.Metrics, 353 node.State, 354 transactions, 355 compliance.DefaultConfig(), 356 ) 357 require.NoError(t, err) 358 359 syncCoreFactory, err := factories.NewSyncCoreFactory(node.Log, chainsync.DefaultConfig()) 360 require.NoError(t, err) 361 362 syncFactory, err := factories.NewSyncEngineFactory( 363 node.Log, 364 node.Metrics, 365 node.Net, 366 node.Me, 367 ) 368 require.NoError(t, err) 369 370 createMetrics := func(chainID flow.ChainID) module.HotstuffMetrics { 371 return metrics.NewNoopCollector() 372 } 373 hotstuffFactory, err := factories.NewHotStuffFactory( 374 node.Log, 375 node.Me, 376 node.PublicDB, 377 node.State, 378 node.Metrics, 379 node.Metrics, 380 createMetrics, 381 ) 382 require.NoError(t, err) 383 384 messageHubFactory := factories.NewMessageHubFactory( 385 node.Log, 386 node.Net, 387 node.Me, 388 node.Metrics, 389 node.State, 390 ) 391 392 factory := factories.NewEpochComponentsFactory( 393 node.Me, 394 pools, 395 builderFactory, 396 clusterStateFactory, 397 hotstuffFactory, 398 complianceEngineFactory, 399 syncCoreFactory, 400 syncFactory, 401 messageHubFactory, 402 ) 403 404 rootQCVoter := new(mockmodule.ClusterRootQCVoter) 405 rootQCVoter.On("Vote", mock.Anything, mock.Anything).Return(nil) 406 407 engineEventsDistributor := mockcollection.NewEngineEvents(t) 408 engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Maybe() 409 heights := gadgets.NewHeights() 410 node.ProtocolEvents.AddConsumer(heights) 411 412 epochManager, err := epochmgr.New( 413 node.Log, 414 node.Me, 415 node.State, 416 pools, 417 rootQCVoter, 418 factory, 419 heights, 420 engineEventsDistributor, 421 ) 422 require.NoError(t, err) 423 node.ProtocolEvents.AddConsumer(epochManager) 424 425 return testmock.CollectionNode{ 426 GenericNode: node, 427 Collections: collections, 428 Transactions: transactions, 429 ClusterPayloads: clusterPayloads, 430 IngestionEngine: ingestionEngine, 431 PusherEngine: pusherEngine, 432 ProviderEngine: providerEngine, 433 EpochManagerEngine: epochManager, 434 } 435 } 436 437 func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, chainID flow.ChainID) testmock.ConsensusNode { 438 439 node := GenericNodeFromParticipants(t, hub, identity, identities, chainID) 440 441 resultsDB := storage.NewExecutionResults(node.Metrics, node.PublicDB) 442 receiptsDB := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, resultsDB, storage.DefaultCacheSize) 443 444 guarantees, err := stdmap.NewGuarantees(1000) 445 require.NoError(t, err) 446 447 receipts := consensusMempools.NewExecutionTree() 448 449 seals := stdmap.NewIncorporatedResultSeals(1000) 450 pendingReceipts := stdmap.NewPendingReceipts(node.Headers, 1000) 451 452 ingestionCore := consensusingest.NewCore(node.Log, node.Tracer, node.Metrics, node.State, 453 node.Headers, guarantees) 454 // receive collections 455 ingestionEngine, err := consensusingest.New(node.Log, node.Metrics, node.Net, node.Me, ingestionCore) 456 require.Nil(t, err) 457 458 // request receipts from execution nodes 459 receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) 460 require.Nil(t, err) 461 462 assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State) 463 require.Nil(t, err) 464 465 receiptValidator := validation.NewReceiptValidator( 466 node.State, 467 node.Headers, 468 node.Index, 469 resultsDB, 470 node.Seals, 471 ) 472 473 sealingEngine, err := sealing.NewEngine( 474 node.Log, 475 node.Tracer, 476 node.Metrics, 477 node.Metrics, 478 node.Metrics, 479 &tracker.NoopSealingTracker{}, 480 node.Net, 481 node.Me, 482 node.Headers, 483 node.Payloads, 484 resultsDB, 485 node.Index, 486 node.State, 487 node.Seals, 488 assigner, 489 seals, 490 unittest.NewSealingConfigs(flow.DefaultRequiredApprovalsForSealConstruction), 491 ) 492 require.NoError(t, err) 493 494 matchingConfig := matching.DefaultConfig() 495 496 matchingCore := matching.NewCore( 497 node.Log, 498 node.Tracer, 499 node.Metrics, 500 node.Metrics, 501 node.State, 502 node.Headers, 503 receiptsDB, 504 receipts, 505 pendingReceipts, 506 seals, 507 receiptValidator, 508 receiptRequester, 509 matchingConfig) 510 511 matchingEngine, err := matching.NewEngine( 512 node.Log, 513 node.Net, 514 node.Me, 515 node.Metrics, 516 node.Metrics, 517 node.State, 518 receiptsDB, 519 node.Index, 520 matchingCore, 521 ) 522 require.NoError(t, err) 523 524 return testmock.ConsensusNode{ 525 GenericNode: node, 526 Guarantees: guarantees, 527 Receipts: receipts, 528 Seals: seals, 529 IngestionEngine: ingestionEngine, 530 SealingEngine: sealingEngine, 531 MatchingEngine: matchingEngine, 532 } 533 } 534 535 func ConsensusNodes(t *testing.T, hub *stub.Hub, nNodes int, chainID flow.ChainID) []testmock.ConsensusNode { 536 conIdentities := unittest.IdentityListFixture(nNodes, unittest.WithRole(flow.RoleConsensus)) 537 for _, id := range conIdentities { 538 t.Log(id.String()) 539 } 540 541 // add some extra dummy identities so we have one of each role 542 others := unittest.IdentityListFixture(5, unittest.WithAllRolesExcept(flow.RoleConsensus)) 543 544 identities := append(conIdentities, others...) 545 546 nodes := make([]testmock.ConsensusNode, 0, len(conIdentities)) 547 for _, identity := range conIdentities { 548 nodes = append(nodes, ConsensusNode(t, hub, identity, identities, chainID)) 549 } 550 551 return nodes 552 } 553 554 type CheckerMock struct { 555 notifications.NoopConsumer // satisfy the FinalizationConsumer interface 556 } 557 558 func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, syncThreshold int, chainID flow.ChainID) testmock.ExecutionNode { 559 node := GenericNodeFromParticipants(t, hub, identity, identities, chainID) 560 561 transactionsStorage := storage.NewTransactions(node.Metrics, node.PublicDB) 562 collectionsStorage := storage.NewCollections(node.PublicDB, transactionsStorage) 563 eventsStorage := storage.NewEvents(node.Metrics, node.PublicDB) 564 serviceEventsStorage := storage.NewServiceEvents(node.Metrics, node.PublicDB) 565 txResultStorage := storage.NewTransactionResults(node.Metrics, node.PublicDB, storage.DefaultCacheSize) 566 commitsStorage := storage.NewCommits(node.Metrics, node.PublicDB) 567 chunkDataPackStorage := storage.NewChunkDataPacks(node.Metrics, node.PublicDB, collectionsStorage, 100) 568 results := storage.NewExecutionResults(node.Metrics, node.PublicDB) 569 receipts := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize) 570 myReceipts := storage.NewMyExecutionReceipts(node.Metrics, node.PublicDB, receipts) 571 versionBeacons := storage.NewVersionBeacons(node.PublicDB) 572 headersStorage := storage.NewHeaders(node.Metrics, node.PublicDB) 573 574 checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { 575 return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID()) 576 } 577 578 protoState, ok := node.State.(*badgerstate.ParticipantState) 579 require.True(t, ok) 580 581 followerState, err := badgerstate.NewFollowerState( 582 node.Log, 583 node.Tracer, 584 node.ProtocolEvents, 585 protoState.State, 586 node.Index, 587 node.Payloads, 588 blocktimer.DefaultBlockTimer, 589 ) 590 require.NoError(t, err) 591 592 dbDir := unittest.TempDir(t) 593 594 metricsCollector := &metrics.NoopCollector{} 595 596 const ( 597 capacity = 100 598 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 599 checkpointsToKeep = 1 600 ) 601 diskWal, err := wal.NewDiskWAL(node.Log.With().Str("subcomponent", "wal").Logger(), nil, metricsCollector, dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 602 require.NoError(t, err) 603 604 ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("compontent", "ledger").Logger(), completeLedger.DefaultPathFinderVersion) 605 require.NoError(t, err) 606 607 compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector) 608 require.NoError(t, err) 609 610 <-compactor.Ready() // Need to start compactor here because BootstrapLedger() updates ledger state. 611 612 genesisHead, err := node.State.Final().Head() 613 require.NoError(t, err) 614 615 bootstrapper := bootstrapexec.NewBootstrapper(node.Log) 616 commit, err := bootstrapper.BootstrapLedger( 617 ls, 618 unittest.ServiceAccountPublicKey, 619 node.ChainID.Chain(), 620 fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply)) 621 require.NoError(t, err) 622 623 matchTrie, err := ls.FindTrieByStateCommit(commit) 624 require.NoError(t, err) 625 require.NotNil(t, matchTrie) 626 627 const bootstrapCheckpointFile = "bootstrap-checkpoint" 628 checkpointFile := filepath.Join(dbDir, bootstrapCheckpointFile) 629 err = wal.StoreCheckpointV6([]*trie.MTrie{matchTrie}, dbDir, bootstrapCheckpointFile, zerolog.Nop(), 1) 630 require.NoError(t, err) 631 632 rootResult, rootSeal, err := protoState.Sealed().SealedResult() 633 require.NoError(t, err) 634 635 require.Equal(t, fmt.Sprintf("%x", rootSeal.FinalState), fmt.Sprintf("%x", commit)) 636 require.Equal(t, rootSeal.ResultID, rootResult.ID()) 637 638 err = bootstrapper.BootstrapExecutionDatabase(node.PublicDB, rootSeal) 639 require.NoError(t, err) 640 641 registerDir := unittest.TempPebblePath(t) 642 pebbledb, err := storagepebble.OpenRegisterPebbleDB(registerDir) 643 require.NoError(t, err) 644 645 checkpointHeight := uint64(0) 646 require.NoError(t, esbootstrap.ImportRegistersFromCheckpoint(node.Log, checkpointFile, checkpointHeight, matchTrie.RootHash(), pebbledb, 2)) 647 648 diskStore, err := storagepebble.NewRegisters(pebbledb) 649 require.NoError(t, err) 650 651 reader := finalizedreader.NewFinalizedReader(headersStorage, checkpointHeight) 652 registerStore, err := storehouse.NewRegisterStore( 653 diskStore, 654 nil, // TOOD(leo): replace with real WAL 655 reader, 656 node.Log, 657 storehouse.NewNoopNotifier(), 658 ) 659 require.NoError(t, err) 660 661 storehouseEnabled := true 662 execState := executionState.NewExecutionState( 663 ls, commitsStorage, node.Blocks, node.Headers, collectionsStorage, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, node.PublicDB, node.Tracer, 664 // TODO: test with register store 665 registerStore, 666 storehouseEnabled, 667 ) 668 669 requestEngine, err := requester.New( 670 node.Log, node.Metrics, node.Net, node.Me, node.State, 671 channels.RequestCollections, 672 filter.HasRole(flow.RoleCollection), 673 func() flow.Entity { return &flow.Collection{} }, 674 ) 675 require.NoError(t, err) 676 677 pusherEngine, err := executionprovider.New( 678 node.Log, 679 node.Tracer, 680 node.Net, 681 node.State, 682 execState, 683 metricsCollector, 684 checkAuthorizedAtBlock, 685 queue.NewHeroStore(uint32(1000), unittest.Logger(), metrics.NewNoopCollector()), 686 executionprovider.DefaultChunkDataPackRequestWorker, 687 executionprovider.DefaultChunkDataPackQueryTimeout, 688 executionprovider.DefaultChunkDataPackDeliveryTimeout, 689 ) 690 require.NoError(t, err) 691 692 blockFinder := environment.NewBlockFinder(node.Headers) 693 694 vmCtx := fvm.NewContext( 695 fvm.WithLogger(node.Log), 696 fvm.WithChain(node.ChainID.Chain()), 697 fvm.WithBlocks(blockFinder), 698 ) 699 committer := committer.NewLedgerViewCommitter(ls, node.Tracer) 700 701 bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) 702 trackerStorage := mocktracker.NewMockStorage() 703 704 prov := exedataprovider.NewProvider( 705 zerolog.Nop(), 706 metrics.NewNoopCollector(), 707 execution_data.DefaultSerializer, 708 bservice, 709 trackerStorage, 710 ) 711 712 computationEngine, err := computation.New( 713 node.Log, 714 node.Metrics, 715 node.Tracer, 716 node.Me, 717 node.State, 718 vmCtx, 719 committer, 720 prov, 721 computation.ComputationConfig{ 722 QueryConfig: query.NewDefaultConfig(), 723 DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, 724 MaxConcurrency: 1, 725 }, 726 ) 727 require.NoError(t, err) 728 729 syncCore, err := chainsync.New(node.Log, chainsync.DefaultConfig(), metrics.NewChainSyncCollector(genesisHead.ChainID), genesisHead.ChainID) 730 require.NoError(t, err) 731 732 followerDistributor := pubsub.NewFollowerDistributor() 733 require.NoError(t, err) 734 735 // disabled by default 736 uploader := uploader.NewManager(node.Tracer) 737 738 _, err = build.Semver() 739 require.ErrorIs(t, err, build.UndefinedVersionError) 740 ver := semver.New("0.0.0") 741 742 latestFinalizedBlock, err := node.State.Final().Head() 743 require.NoError(t, err) 744 745 unit := engine.NewUnit() 746 stopControl := stop.NewStopControl( 747 unit, 748 time.Second, 749 node.Log, 750 execState, 751 node.Headers, 752 versionBeacons, 753 ver, 754 latestFinalizedBlock, 755 false, 756 true, 757 ) 758 759 fetcher := exeFetcher.NewCollectionFetcher(node.Log, requestEngine, node.State, false) 760 loader := loader.NewUnexecutedLoader(node.Log, node.State, node.Headers, execState) 761 rootHead, rootQC := getRoot(t, &node) 762 ingestionEngine, err := ingestion.New( 763 unit, 764 node.Log, 765 node.Net, 766 node.Me, 767 fetcher, 768 node.Headers, 769 node.Blocks, 770 collectionsStorage, 771 computationEngine, 772 pusherEngine, 773 execState, 774 node.Metrics, 775 node.Tracer, 776 false, 777 nil, 778 uploader, 779 stopControl, 780 loader, 781 ) 782 require.NoError(t, err) 783 requestEngine.WithHandle(ingestionEngine.OnCollection) 784 785 node.ProtocolEvents.AddConsumer(ingestionEngine) 786 787 followerCore, finalizer := createFollowerCore(t, &node, followerState, followerDistributor, rootHead, rootQC) 788 // mock out hotstuff validator 789 validator := new(mockhotstuff.Validator) 790 validator.On("ValidateProposal", mock.Anything).Return(nil) 791 792 core, err := follower.NewComplianceCore( 793 node.Log, 794 node.Metrics, 795 node.Metrics, 796 followerDistributor, 797 followerState, 798 followerCore, 799 validator, 800 syncCore, 801 node.Tracer, 802 ) 803 require.NoError(t, err) 804 805 finalizedHeader, err := protoState.Final().Head() 806 require.NoError(t, err) 807 followerEng, err := follower.NewComplianceLayer( 808 node.Log, 809 node.Net, 810 node.Me, 811 node.Metrics, 812 node.Headers, 813 finalizedHeader, 814 core, 815 compliance.DefaultConfig(), 816 ) 817 require.NoError(t, err) 818 819 idCache, err := cache.NewProtocolStateIDCache(node.Log, node.State, events.NewDistributor()) 820 require.NoError(t, err, "could not create finalized snapshot cache") 821 spamConfig, err := synchronization.NewSpamDetectionConfig() 822 require.NoError(t, err, "could not initialize spam detection config") 823 syncEngine, err := synchronization.New( 824 node.Log, 825 node.Metrics, 826 node.Net, 827 node.Me, 828 node.State, 829 node.Blocks, 830 followerEng, 831 syncCore, 832 id.NewIdentityFilterIdentifierProvider( 833 filter.And( 834 filter.HasRole(flow.RoleConsensus), 835 filter.Not(filter.HasNodeID(node.Me.NodeID())), 836 ), 837 idCache, 838 ), 839 spamConfig, 840 synchronization.WithPollInterval(time.Duration(0)), 841 ) 842 require.NoError(t, err) 843 followerDistributor.AddFinalizationConsumer(syncEngine) 844 845 return testmock.ExecutionNode{ 846 GenericNode: node, 847 FollowerState: followerState, 848 IngestionEngine: ingestionEngine, 849 FollowerCore: followerCore, 850 FollowerEngine: followerEng, 851 SyncEngine: syncEngine, 852 ExecutionEngine: computationEngine, 853 RequestEngine: requestEngine, 854 ReceiptsEngine: pusherEngine, 855 BadgerDB: node.PublicDB, 856 VM: computationEngine.VM(), 857 ExecutionState: execState, 858 Ledger: ls, 859 LevelDbDir: dbDir, 860 Collections: collectionsStorage, 861 Finalizer: finalizer, 862 MyExecutionReceipts: myReceipts, 863 Compactor: compactor, 864 StorehouseEnabled: storehouseEnabled, 865 } 866 } 867 868 func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.QuorumCertificate) { 869 rootHead, err := node.State.Params().FinalizedRoot() 870 require.NoError(t, err) 871 872 signers, err := node.State.AtHeight(0).Identities(filter.HasRole(flow.RoleConsensus)) 873 require.NoError(t, err) 874 875 signerIDs := signers.NodeIDs() 876 signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs) 877 require.NoError(t, err) 878 879 rootQC := &flow.QuorumCertificate{ 880 View: rootHead.View, 881 BlockID: rootHead.ID(), 882 SignerIndices: signerIndices, 883 SigData: unittest.SignatureFixture(), 884 } 885 886 return rootHead, rootQC 887 } 888 889 type RoundRobinLeaderSelection struct { 890 identities flow.IdentityList 891 me flow.Identifier 892 } 893 894 var _ hotstuff.Replicas = (*RoundRobinLeaderSelection)(nil) 895 var _ hotstuff.DynamicCommittee = (*RoundRobinLeaderSelection)(nil) 896 897 func (s *RoundRobinLeaderSelection) IdentitiesByBlock(_ flow.Identifier) (flow.IdentityList, error) { 898 return s.identities, nil 899 } 900 901 func (s *RoundRobinLeaderSelection) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { 902 id, found := s.identities.ByNodeID(participantID) 903 if !found { 904 return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) 905 } 906 907 return id, nil 908 } 909 910 func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { 911 return s.identities, nil 912 } 913 914 func (s *RoundRobinLeaderSelection) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow.Identity, error) { 915 id, found := s.identities.ByNodeID(participantID) 916 if !found { 917 return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) 918 } 919 return id, nil 920 } 921 922 func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, error) { 923 return s.identities[int(view)%len(s.identities)].NodeID, nil 924 } 925 926 func (s *RoundRobinLeaderSelection) QuorumThresholdForView(_ uint64) (uint64, error) { 927 return committees.WeightThresholdToBuildQC(s.identities.TotalWeight()), nil 928 } 929 930 func (s *RoundRobinLeaderSelection) TimeoutThresholdForView(_ uint64) (uint64, error) { 931 return committees.WeightThresholdToTimeout(s.identities.TotalWeight()), nil 932 } 933 934 func (s *RoundRobinLeaderSelection) Self() flow.Identifier { 935 return s.me 936 } 937 938 func (s *RoundRobinLeaderSelection) DKG(_ uint64) (hotstuff.DKG, error) { 939 return nil, fmt.Errorf("error") 940 } 941 942 func createFollowerCore( 943 t *testing.T, 944 node *testmock.GenericNode, 945 followerState *badgerstate.FollowerState, 946 notifier hotstuff.FollowerConsumer, 947 rootHead *flow.Header, 948 rootQC *flow.QuorumCertificate, 949 ) (module.HotStuffFollower, *confinalizer.Finalizer) { 950 finalizer := confinalizer.NewFinalizer(node.PublicDB, node.Headers, followerState, trace.NewNoopTracer()) 951 952 pending := make([]*flow.Header, 0) 953 954 // creates a consensus follower with noop consumer as the notifier 955 followerCore, err := consensus.NewFollower( 956 node.Log, 957 node.Metrics, 958 node.Headers, 959 finalizer, 960 notifier, 961 rootHead, 962 rootQC, 963 rootHead, 964 pending, 965 ) 966 require.NoError(t, err) 967 return followerCore, finalizer 968 } 969 970 type VerificationOpt func(*testmock.VerificationNode) 971 972 func WithChunkConsumer(chunkConsumer *chunkconsumer.ChunkConsumer) VerificationOpt { 973 return func(node *testmock.VerificationNode) { 974 node.ChunkConsumer = chunkConsumer 975 } 976 } 977 978 func WithGenericNode(genericNode *testmock.GenericNode) VerificationOpt { 979 return func(node *testmock.VerificationNode) { 980 node.GenericNode = genericNode 981 } 982 } 983 984 // VerificationNode creates a verification node with all functional engines and actual modules for purpose of 985 // (integration) testing. 986 func VerificationNode(t testing.TB, 987 hub *stub.Hub, 988 verIdentity *flow.Identity, // identity of this verification node. 989 participants flow.IdentityList, // identity of all nodes in system including this verification node. 990 assigner module.ChunkAssigner, 991 chunksLimit uint, 992 chainID flow.ChainID, 993 collector module.VerificationMetrics, // used to enable collecting metrics on happy path integration 994 mempoolCollector module.MempoolMetrics, // used to enable collecting metrics on happy path integration 995 opts ...VerificationOpt) testmock.VerificationNode { 996 997 var err error 998 var node testmock.VerificationNode 999 1000 for _, apply := range opts { 1001 apply(&node) 1002 } 1003 1004 if node.GenericNode == nil { 1005 gn := GenericNodeFromParticipants(t, hub, verIdentity, participants, chainID) 1006 node.GenericNode = &gn 1007 } 1008 1009 if node.ChunkStatuses == nil { 1010 node.ChunkStatuses = stdmap.NewChunkStatuses(chunksLimit) 1011 err = mempoolCollector.Register(metrics.ResourceChunkStatus, node.ChunkStatuses.Size) 1012 require.Nil(t, err) 1013 } 1014 1015 if node.ChunkRequests == nil { 1016 node.ChunkRequests = stdmap.NewChunkRequests(chunksLimit) 1017 err = mempoolCollector.Register(metrics.ResourceChunkRequest, node.ChunkRequests.Size) 1018 require.NoError(t, err) 1019 } 1020 1021 if node.Results == nil { 1022 results := storage.NewExecutionResults(node.Metrics, node.PublicDB) 1023 node.Results = results 1024 node.Receipts = storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize) 1025 } 1026 1027 if node.ProcessedChunkIndex == nil { 1028 node.ProcessedChunkIndex = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationChunkIndex) 1029 } 1030 1031 if node.ChunksQueue == nil { 1032 node.ChunksQueue = storage.NewChunkQueue(node.PublicDB) 1033 ok, err := node.ChunksQueue.Init(chunkconsumer.DefaultJobIndex) 1034 require.NoError(t, err) 1035 require.True(t, ok) 1036 } 1037 1038 if node.ProcessedBlockHeight == nil { 1039 node.ProcessedBlockHeight = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationBlockHeight) 1040 } 1041 1042 if node.VerifierEngine == nil { 1043 vm := fvm.NewVirtualMachine() 1044 1045 blockFinder := environment.NewBlockFinder(node.Headers) 1046 1047 vmCtx := fvm.NewContext( 1048 fvm.WithLogger(node.Log), 1049 fvm.WithChain(node.ChainID.Chain()), 1050 fvm.WithBlocks(blockFinder), 1051 ) 1052 1053 chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Log) 1054 1055 approvalStorage := storage.NewResultApprovals(node.Metrics, node.PublicDB) 1056 1057 node.VerifierEngine, err = verifier.New(node.Log, 1058 collector, 1059 node.Tracer, 1060 node.Net, 1061 node.State, 1062 node.Me, 1063 chunkVerifier, 1064 approvalStorage) 1065 require.Nil(t, err) 1066 } 1067 1068 if node.RequesterEngine == nil { 1069 node.RequesterEngine, err = vereq.New(node.Log, 1070 node.State, 1071 node.Net, 1072 node.Tracer, 1073 collector, 1074 node.ChunkRequests, 1075 vereq.DefaultRequestInterval, 1076 // requests are only qualified if their retryAfter is elapsed. 1077 vereq.RetryAfterQualifier, 1078 // exponential backoff with multiplier of 2, minimum interval of a second, and 1079 // maximum interval of an hour. 1080 mempool.ExponentialUpdater( 1081 vereq.DefaultBackoffMultiplier, 1082 vereq.DefaultBackoffMaxInterval, 1083 vereq.DefaultBackoffMinInterval), 1084 vereq.DefaultRequestTargets) 1085 1086 require.NoError(t, err) 1087 } 1088 1089 if node.FetcherEngine == nil { 1090 node.FetcherEngine = fetcher.New(node.Log, 1091 collector, 1092 node.Tracer, 1093 node.VerifierEngine, 1094 node.State, 1095 node.ChunkStatuses, 1096 node.Headers, 1097 node.Blocks, 1098 node.Results, 1099 node.Receipts, 1100 node.RequesterEngine, 1101 0, 1102 ) 1103 } 1104 1105 if node.ChunkConsumer == nil { 1106 node.ChunkConsumer, err = chunkconsumer.NewChunkConsumer(node.Log, 1107 collector, 1108 node.ProcessedChunkIndex, 1109 node.ChunksQueue, 1110 node.FetcherEngine, 1111 chunkconsumer.DefaultChunkWorkers) // defaults number of workers to 3. 1112 require.NoError(t, err) 1113 err = mempoolCollector.Register(metrics.ResourceChunkConsumer, node.ChunkConsumer.Size) 1114 require.NoError(t, err) 1115 } 1116 1117 if node.AssignerEngine == nil { 1118 node.AssignerEngine = verificationassigner.New(node.Log, 1119 collector, 1120 node.Tracer, 1121 node.Me, 1122 node.State, 1123 assigner, 1124 node.ChunksQueue, 1125 node.ChunkConsumer, 1126 0) 1127 } 1128 1129 if node.BlockConsumer == nil { 1130 node.BlockConsumer, _, err = blockconsumer.NewBlockConsumer(node.Log, 1131 collector, 1132 node.ProcessedBlockHeight, 1133 node.Blocks, 1134 node.State, 1135 node.AssignerEngine, 1136 blockconsumer.DefaultBlockWorkers) 1137 require.NoError(t, err) 1138 1139 err = mempoolCollector.Register(metrics.ResourceBlockConsumer, node.BlockConsumer.Size) 1140 require.NoError(t, err) 1141 } 1142 1143 return node 1144 }