github.com/MetalBlockchain/metalgo@v1.11.9/snow/engine/snowman/syncer/state_syncer_test.go (about) 1 // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package syncer 5 6 import ( 7 "bytes" 8 "context" 9 "errors" 10 "math" 11 "testing" 12 "time" 13 14 "github.com/prometheus/client_golang/prometheus" 15 "github.com/stretchr/testify/require" 16 17 "github.com/MetalBlockchain/metalgo/database" 18 "github.com/MetalBlockchain/metalgo/ids" 19 "github.com/MetalBlockchain/metalgo/snow/engine/common" 20 "github.com/MetalBlockchain/metalgo/snow/engine/common/tracker" 21 "github.com/MetalBlockchain/metalgo/snow/engine/snowman/block" 22 "github.com/MetalBlockchain/metalgo/snow/engine/snowman/getter" 23 "github.com/MetalBlockchain/metalgo/snow/snowtest" 24 "github.com/MetalBlockchain/metalgo/utils/logging" 25 "github.com/MetalBlockchain/metalgo/utils/set" 26 "github.com/MetalBlockchain/metalgo/version" 27 ) 28 29 var ( 30 errInvalidSummary = errors.New("invalid summary") 31 errEmptySummary = errors.New("empty summary") 32 errUnknownSummary = errors.New("unknown summary") 33 ) 34 35 func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { 36 require := require.New(t) 37 38 // Build state syncer 39 snowCtx := snowtest.Context(t, snowtest.CChainID) 40 ctx := snowtest.ConsensusContext(snowCtx) 41 sender := &common.SenderTest{T: t} 42 43 // Non state syncableVM case 44 nonStateSyncableVM := &block.TestVM{ 45 TestVM: common.TestVM{T: t}, 46 } 47 dummyGetter, err := getter.New( 48 nonStateSyncableVM, 49 sender, 50 logging.NoLog{}, 51 time.Second, 52 2000, 53 prometheus.NewRegistry(), 54 ) 55 require.NoError(err) 56 57 cfg, err := NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, nonStateSyncableVM) 58 require.NoError(err) 59 syncer := New(cfg, func(context.Context, uint32) error { 60 return nil 61 }) 62 63 enabled, err := syncer.IsEnabled(context.Background()) 64 require.NoError(err) 65 require.False(enabled) 66 67 // State syncableVM case 68 fullVM := &fullVM{ 69 TestVM: &block.TestVM{ 70 TestVM: common.TestVM{T: t}, 71 }, 72 TestStateSyncableVM: &block.TestStateSyncableVM{ 73 T: t, 74 }, 75 } 76 dummyGetter, err = getter.New( 77 fullVM, 78 sender, 79 logging.NoLog{}, 80 time.Second, 81 2000, 82 prometheus.NewRegistry()) 83 require.NoError(err) 84 85 cfg, err = NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, fullVM) 86 require.NoError(err) 87 syncer = New(cfg, func(context.Context, uint32) error { 88 return nil 89 }) 90 91 // test: VM does not support state syncing 92 fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { 93 return false, nil 94 } 95 enabled, err = syncer.IsEnabled(context.Background()) 96 require.NoError(err) 97 require.False(enabled) 98 99 // test: VM does support state syncing 100 fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { 101 return true, nil 102 } 103 enabled, err = syncer.IsEnabled(context.Background()) 104 require.NoError(err) 105 require.True(enabled) 106 } 107 108 func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { 109 require := require.New(t) 110 snowCtx := snowtest.Context(t, snowtest.CChainID) 111 ctx := snowtest.ConsensusContext(snowCtx) 112 beacons := buildTestPeers(t, ctx.SubnetID) 113 alpha, err := beacons.TotalWeight(ctx.SubnetID) 114 require.NoError(err) 115 startupAlpha := alpha 116 117 peers := tracker.NewPeers() 118 startup := tracker.NewStartup(peers, startupAlpha) 119 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 120 121 syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) 122 123 sender.CantSendGetStateSummaryFrontier = true 124 sender.SendGetStateSummaryFrontierF = func(context.Context, set.Set[ids.NodeID], uint32) {} 125 startReqID := uint32(0) 126 127 // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. 128 require.False(startup.ShouldStart()) 129 require.NoError(syncer.Start(context.Background(), startReqID)) 130 require.False(syncer.started) 131 132 // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. 133 vdr0 := ids.GenerateTestNodeID() 134 require.NoError(beacons.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) 135 require.NoError(syncer.Connected(context.Background(), vdr0, version.CurrentApp)) 136 137 require.False(startup.ShouldStart()) 138 require.NoError(syncer.Start(context.Background(), startReqID)) 139 require.False(syncer.started) 140 141 // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. 142 vdr := ids.GenerateTestNodeID() 143 require.NoError(beacons.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) 144 require.NoError(syncer.Connected(context.Background(), vdr, version.CurrentApp)) 145 146 require.True(startup.ShouldStart()) 147 require.NoError(syncer.Start(context.Background(), startReqID)) 148 require.True(syncer.started) 149 } 150 151 func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) { 152 require := require.New(t) 153 snowCtx := snowtest.Context(t, snowtest.CChainID) 154 ctx := snowtest.ConsensusContext(snowCtx) 155 beacons := buildTestPeers(t, ctx.SubnetID) 156 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 157 require.NoError(err) 158 startupAlpha := (3*totalWeight + 3) / 4 159 160 peers := tracker.NewPeers() 161 startup := tracker.NewStartup(peers, startupAlpha) 162 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 163 164 syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 165 166 // mock VM to simulate a valid summary is returned 167 localSummary := &block.TestStateSummary{ 168 HeightV: 2000, 169 IDV: summaryID, 170 BytesV: summaryBytes, 171 } 172 fullVM.CantStateSyncGetOngoingSummary = true 173 fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { 174 return localSummary, nil 175 } 176 177 // Connect enough stake to start syncer 178 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 179 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 180 } 181 182 require.Equal(localSummary, syncer.locallyAvailableSummary) 183 ws, ok := syncer.weightedSummaries[summaryID] 184 require.True(ok) 185 require.Equal(summaryBytes, ws.summary.Bytes()) 186 require.Zero(ws.weight) 187 } 188 189 func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T) { 190 require := require.New(t) 191 snowCtx := snowtest.Context(t, snowtest.CChainID) 192 ctx := snowtest.ConsensusContext(snowCtx) 193 beacons := buildTestPeers(t, ctx.SubnetID) 194 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 195 require.NoError(err) 196 startupAlpha := (3*totalWeight + 3) / 4 197 198 peers := tracker.NewPeers() 199 startup := tracker.NewStartup(peers, startupAlpha) 200 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 201 202 syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 203 204 // mock VM to simulate a no summary returned 205 fullVM.CantStateSyncGetOngoingSummary = true 206 fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { 207 return nil, database.ErrNotFound 208 } 209 210 // Connect enough stake to start syncer 211 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 212 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 213 } 214 215 require.Nil(syncer.locallyAvailableSummary) 216 require.Empty(syncer.weightedSummaries) 217 } 218 219 func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { 220 require := require.New(t) 221 222 snowCtx := snowtest.Context(t, snowtest.CChainID) 223 ctx := snowtest.ConsensusContext(snowCtx) 224 beacons := buildTestPeers(t, ctx.SubnetID) 225 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 226 require.NoError(err) 227 startupAlpha := (3*totalWeight + 3) / 4 228 229 peers := tracker.NewPeers() 230 startup := tracker.NewStartup(peers, startupAlpha) 231 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 232 233 syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 234 235 // set sender to track nodes reached out 236 contactedFrontiersProviders := set.NewSet[ids.NodeID](3) 237 sender.CantSendGetStateSummaryFrontier = true 238 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], _ uint32) { 239 contactedFrontiersProviders.Union(ss) 240 } 241 242 // Connect enough stake to start syncer 243 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 244 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 245 } 246 247 // check that vdrs are reached out for frontiers 248 require.Len(contactedFrontiersProviders, min(beacons.Count(ctx.SubnetID), maxOutstandingBroadcastRequests)) 249 for beaconID := range contactedFrontiersProviders { 250 // check that beacon is duly marked as reached out 251 require.Contains(syncer.pendingSeeders, beaconID) 252 } 253 254 // check that, obviously, no summary is yet registered 255 require.Empty(syncer.weightedSummaries) 256 } 257 258 func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { 259 require := require.New(t) 260 261 snowCtx := snowtest.Context(t, snowtest.CChainID) 262 ctx := snowtest.ConsensusContext(snowCtx) 263 beacons := buildTestPeers(t, ctx.SubnetID) 264 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 265 require.NoError(err) 266 startupAlpha := (3*totalWeight + 3) / 4 267 268 peers := tracker.NewPeers() 269 startup := tracker.NewStartup(peers, startupAlpha) 270 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 271 272 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 273 274 // set sender to track nodes reached out 275 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 276 sender.CantSendGetStateSummaryFrontier = true 277 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 278 for nodeID := range ss { 279 contactedFrontiersProviders[nodeID] = reqID 280 } 281 } 282 283 // Connect enough stake to start syncer 284 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 285 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 286 } 287 288 initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) 289 require.Positive(initiallyReachedOutBeaconsSize) 290 require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) 291 292 // mock VM to simulate a valid summary is returned 293 fullVM.CantParseStateSummary = true 294 fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { 295 return &block.TestStateSummary{ 296 HeightV: key, 297 IDV: summaryID, 298 BytesV: summaryBytes, 299 }, nil 300 } 301 302 // pick one of the vdrs that have been reached out 303 responsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) 304 responsiveBeaconReqID := contactedFrontiersProviders[responsiveBeaconID] 305 306 // check a response with wrong request ID is dropped 307 require.NoError(syncer.StateSummaryFrontier( 308 context.Background(), 309 responsiveBeaconID, 310 math.MaxInt32, 311 summaryBytes, 312 )) 313 require.Contains(syncer.pendingSeeders, responsiveBeaconID) // responsiveBeacon still pending 314 require.Empty(syncer.weightedSummaries) 315 316 // check a response from unsolicited node is dropped 317 unsolicitedNodeID := ids.GenerateTestNodeID() 318 require.NoError(syncer.StateSummaryFrontier( 319 context.Background(), 320 unsolicitedNodeID, 321 responsiveBeaconReqID, 322 summaryBytes, 323 )) 324 require.Empty(syncer.weightedSummaries) 325 326 // check a valid response is duly recorded 327 require.NoError(syncer.StateSummaryFrontier( 328 context.Background(), 329 responsiveBeaconID, 330 responsiveBeaconReqID, 331 summaryBytes, 332 )) 333 334 // responsiveBeacon not pending anymore 335 require.NotContains(syncer.pendingSeeders, responsiveBeaconID) 336 337 // valid summary is recorded 338 ws, ok := syncer.weightedSummaries[summaryID] 339 require.True(ok) 340 require.True(bytes.Equal(ws.summary.Bytes(), summaryBytes)) 341 342 // other listed vdrs are reached for data 343 require.True( 344 len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || 345 len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) 346 } 347 348 func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { 349 require := require.New(t) 350 351 snowCtx := snowtest.Context(t, snowtest.CChainID) 352 ctx := snowtest.ConsensusContext(snowCtx) 353 beacons := buildTestPeers(t, ctx.SubnetID) 354 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 355 require.NoError(err) 356 startupAlpha := (3*totalWeight + 3) / 4 357 358 peers := tracker.NewPeers() 359 startup := tracker.NewStartup(peers, startupAlpha) 360 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 361 362 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 363 364 // set sender to track nodes reached out 365 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 366 sender.CantSendGetStateSummaryFrontier = true 367 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 368 for nodeID := range ss { 369 contactedFrontiersProviders[nodeID] = reqID 370 } 371 } 372 373 // Connect enough stake to start syncer 374 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 375 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 376 } 377 378 initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) 379 require.Positive(initiallyReachedOutBeaconsSize) 380 require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) 381 382 // mock VM to simulate an invalid summary is returned 383 summary := []byte{'s', 'u', 'm', 'm', 'a', 'r', 'y'} 384 isSummaryDecoded := false 385 fullVM.CantParseStateSummary = true 386 fullVM.ParseStateSummaryF = func(context.Context, []byte) (block.StateSummary, error) { 387 isSummaryDecoded = true 388 return nil, errInvalidSummary 389 } 390 391 // pick one of the vdrs that have been reached out 392 responsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) 393 responsiveBeaconReqID := contactedFrontiersProviders[responsiveBeaconID] 394 395 // response is valid, but invalid summary is not recorded 396 require.NoError(syncer.StateSummaryFrontier( 397 context.Background(), 398 responsiveBeaconID, 399 responsiveBeaconReqID, 400 summary, 401 )) 402 403 // responsiveBeacon not pending anymore 404 require.NotContains(syncer.pendingSeeders, responsiveBeaconID) 405 406 // invalid summary is not recorded 407 require.True(isSummaryDecoded) 408 require.Empty(syncer.weightedSummaries) 409 410 // even in case of invalid summaries, other listed vdrs 411 // are reached for data 412 require.True( 413 len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || 414 len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) 415 } 416 417 func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { 418 require := require.New(t) 419 420 snowCtx := snowtest.Context(t, snowtest.CChainID) 421 ctx := snowtest.ConsensusContext(snowCtx) 422 beacons := buildTestPeers(t, ctx.SubnetID) 423 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 424 require.NoError(err) 425 startupAlpha := (3*totalWeight + 3) / 4 426 427 peers := tracker.NewPeers() 428 startup := tracker.NewStartup(peers, startupAlpha) 429 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 430 431 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 432 433 // set sender to track nodes reached out 434 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 435 sender.CantSendGetStateSummaryFrontier = true 436 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 437 for nodeID := range ss { 438 contactedFrontiersProviders[nodeID] = reqID 439 } 440 } 441 442 // Connect enough stake to start syncer 443 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 444 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 445 } 446 447 initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) 448 require.Positive(initiallyReachedOutBeaconsSize) 449 require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) 450 451 // pick one of the vdrs that have been reached out 452 unresponsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) 453 unresponsiveBeaconReqID := contactedFrontiersProviders[unresponsiveBeaconID] 454 455 fullVM.CantParseStateSummary = true 456 fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { 457 require.Empty(summaryBytes) 458 return nil, errEmptySummary 459 } 460 461 // assume timeout is reached and vdrs is marked as unresponsive 462 require.NoError(syncer.GetStateSummaryFrontierFailed( 463 context.Background(), 464 unresponsiveBeaconID, 465 unresponsiveBeaconReqID, 466 )) 467 468 // unresponsiveBeacon not pending anymore 469 require.NotContains(syncer.pendingSeeders, unresponsiveBeaconID) 470 require.Contains(syncer.failedSeeders, unresponsiveBeaconID) 471 472 // even in case of timeouts, other listed vdrs 473 // are reached for data 474 require.True( 475 len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || 476 len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) 477 478 // mock VM to simulate a valid but late summary is returned 479 fullVM.CantParseStateSummary = true 480 fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { 481 return &block.TestStateSummary{ 482 HeightV: key, 483 IDV: summaryID, 484 BytesV: summaryBytes, 485 }, nil 486 } 487 488 // check a valid but late response is not recorded 489 require.NoError(syncer.StateSummaryFrontier( 490 context.Background(), 491 unresponsiveBeaconID, 492 unresponsiveBeaconReqID, 493 summaryBytes, 494 )) 495 496 // late summary is not recorded 497 require.Empty(syncer.weightedSummaries) 498 } 499 500 func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { 501 require := require.New(t) 502 503 snowCtx := snowtest.Context(t, snowtest.CChainID) 504 ctx := snowtest.ConsensusContext(snowCtx) 505 beacons := buildTestPeers(t, ctx.SubnetID) 506 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 507 require.NoError(err) 508 startupAlpha := (3*totalWeight + 3) / 4 509 510 peers := tracker.NewPeers() 511 startup := tracker.NewStartup(peers, startupAlpha) 512 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 513 514 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 515 516 // set sender to track nodes reached out 517 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 518 sender.CantSendGetStateSummaryFrontier = true 519 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 520 for nodeID := range ss { 521 contactedFrontiersProviders[nodeID] = reqID 522 } 523 } 524 525 // mock VM to simulate a valid summary is returned 526 fullVM.CantParseStateSummary = true 527 fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { 528 switch { 529 case bytes.Equal(b, summaryBytes): 530 return &block.TestStateSummary{ 531 HeightV: key, 532 IDV: summaryID, 533 BytesV: summaryBytes, 534 }, nil 535 case bytes.Equal(b, nil): 536 return nil, errEmptySummary 537 default: 538 return nil, errUnknownSummary 539 } 540 } 541 542 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 543 sender.CantSendGetAcceptedStateSummary = true 544 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 545 for nodeID := range ss { 546 contactedVoters[nodeID] = reqID 547 } 548 } 549 550 // Connect enough stake to start syncer 551 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 552 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 553 } 554 require.NotEmpty(syncer.pendingSeeders) 555 556 // let just one node respond and all others timeout 557 maxResponses := 1 558 reachedSeedersCount := syncer.Config.SampleK 559 for reachedSeedersCount >= 0 { 560 beaconID, found := syncer.pendingSeeders.Peek() 561 require.True(found) 562 reqID := contactedFrontiersProviders[beaconID] 563 564 if maxResponses > 0 { 565 require.NoError(syncer.StateSummaryFrontier( 566 context.Background(), 567 beaconID, 568 reqID, 569 summaryBytes, 570 )) 571 } else { 572 require.NoError(syncer.GetStateSummaryFrontierFailed( 573 context.Background(), 574 beaconID, 575 reqID, 576 )) 577 } 578 maxResponses-- 579 reachedSeedersCount-- 580 } 581 582 // check that some frontier seeders are reached again for the frontier 583 require.NotEmpty(syncer.pendingSeeders) 584 585 // check that no vote requests are issued 586 require.Empty(contactedVoters) 587 } 588 589 func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { 590 require := require.New(t) 591 592 snowCtx := snowtest.Context(t, snowtest.CChainID) 593 ctx := snowtest.ConsensusContext(snowCtx) 594 beacons := buildTestPeers(t, ctx.SubnetID) 595 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 596 require.NoError(err) 597 startupAlpha := (3*totalWeight + 3) / 4 598 599 peers := tracker.NewPeers() 600 startup := tracker.NewStartup(peers, startupAlpha) 601 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 602 603 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 604 605 // set sender to track nodes reached out 606 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 607 sender.CantSendGetStateSummaryFrontier = true 608 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 609 for nodeID := range ss { 610 contactedFrontiersProviders[nodeID] = reqID 611 } 612 } 613 614 // mock VM to simulate a valid summary is returned 615 fullVM.CantParseStateSummary = true 616 fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { 617 require.Equal(summaryBytes, b) 618 return &block.TestStateSummary{ 619 HeightV: key, 620 IDV: summaryID, 621 BytesV: summaryBytes, 622 }, nil 623 } 624 625 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 626 sender.CantSendGetAcceptedStateSummary = true 627 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 628 for nodeID := range ss { 629 contactedVoters[nodeID] = reqID 630 } 631 } 632 633 // Connect enough stake to start syncer 634 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 635 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 636 } 637 require.NotEmpty(syncer.pendingSeeders) 638 639 // let all contacted vdrs respond 640 for syncer.pendingSeeders.Len() != 0 { 641 beaconID, found := syncer.pendingSeeders.Peek() 642 require.True(found) 643 reqID := contactedFrontiersProviders[beaconID] 644 645 require.NoError(syncer.StateSummaryFrontier( 646 context.Background(), 647 beaconID, 648 reqID, 649 summaryBytes, 650 )) 651 } 652 require.Empty(syncer.pendingSeeders) 653 654 // check that vote requests are issued 655 initiallyContactedVotersSize := len(contactedVoters) 656 require.Positive(initiallyContactedVotersSize) 657 require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) 658 } 659 660 func TestUnRequestedVotesAreDropped(t *testing.T) { 661 require := require.New(t) 662 663 snowCtx := snowtest.Context(t, snowtest.CChainID) 664 ctx := snowtest.ConsensusContext(snowCtx) 665 beacons := buildTestPeers(t, ctx.SubnetID) 666 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 667 require.NoError(err) 668 startupAlpha := (3*totalWeight + 3) / 4 669 670 peers := tracker.NewPeers() 671 startup := tracker.NewStartup(peers, startupAlpha) 672 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 673 674 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 675 676 // set sender to track nodes reached out 677 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 678 sender.CantSendGetStateSummaryFrontier = true 679 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 680 for nodeID := range ss { 681 contactedFrontiersProviders[nodeID] = reqID 682 } 683 } 684 685 // mock VM to simulate a valid summary is returned 686 fullVM.CantParseStateSummary = true 687 fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { 688 return &block.TestStateSummary{ 689 HeightV: key, 690 IDV: summaryID, 691 BytesV: summaryBytes, 692 }, nil 693 } 694 695 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 696 sender.CantSendGetAcceptedStateSummary = true 697 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 698 for nodeID := range ss { 699 contactedVoters[nodeID] = reqID 700 } 701 } 702 703 // Connect enough stake to start syncer 704 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 705 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 706 } 707 require.NotEmpty(syncer.pendingSeeders) 708 709 // let all contacted vdrs respond 710 for syncer.pendingSeeders.Len() != 0 { 711 beaconID, found := syncer.pendingSeeders.Peek() 712 require.True(found) 713 reqID := contactedFrontiersProviders[beaconID] 714 715 require.NoError(syncer.StateSummaryFrontier( 716 context.Background(), 717 beaconID, 718 reqID, 719 summaryBytes, 720 )) 721 } 722 require.Empty(syncer.pendingSeeders) 723 724 // check that vote requests are issued 725 initiallyContactedVotersSize := len(contactedVoters) 726 require.Positive(initiallyContactedVotersSize) 727 require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) 728 729 _, found := syncer.weightedSummaries[summaryID] 730 require.True(found) 731 732 // pick one of the voters that have been reached out 733 responsiveVoterID := pickRandomFrom(contactedVoters) 734 responsiveVoterReqID := contactedVoters[responsiveVoterID] 735 736 // check a response with wrong request ID is dropped 737 require.NoError(syncer.AcceptedStateSummary( 738 context.Background(), 739 responsiveVoterID, 740 math.MaxInt32, 741 set.Of(summaryID), 742 )) 743 744 // responsiveVoter still pending 745 require.Contains(syncer.pendingVoters, responsiveVoterID) 746 require.Zero(syncer.weightedSummaries[summaryID].weight) 747 748 // check a response from unsolicited node is dropped 749 unsolicitedVoterID := ids.GenerateTestNodeID() 750 require.NoError(syncer.AcceptedStateSummary( 751 context.Background(), 752 unsolicitedVoterID, 753 responsiveVoterReqID, 754 set.Of(summaryID), 755 )) 756 require.Zero(syncer.weightedSummaries[summaryID].weight) 757 758 // check a valid response is duly recorded 759 require.NoError(syncer.AcceptedStateSummary( 760 context.Background(), 761 responsiveVoterID, 762 responsiveVoterReqID, 763 set.Of(summaryID), 764 )) 765 766 // responsiveBeacon not pending anymore 767 require.NotContains(syncer.pendingSeeders, responsiveVoterID) 768 voterWeight := beacons.GetWeight(ctx.SubnetID, responsiveVoterID) 769 require.Equal(voterWeight, syncer.weightedSummaries[summaryID].weight) 770 771 // other listed voters are reached out 772 require.True( 773 len(contactedVoters) > initiallyContactedVotersSize || 774 len(contactedVoters) == beacons.Count(ctx.SubnetID)) 775 } 776 777 func TestVotesForUnknownSummariesAreDropped(t *testing.T) { 778 require := require.New(t) 779 780 snowCtx := snowtest.Context(t, snowtest.CChainID) 781 ctx := snowtest.ConsensusContext(snowCtx) 782 beacons := buildTestPeers(t, ctx.SubnetID) 783 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 784 require.NoError(err) 785 startupAlpha := (3*totalWeight + 3) / 4 786 787 peers := tracker.NewPeers() 788 startup := tracker.NewStartup(peers, startupAlpha) 789 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 790 791 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 792 793 // set sender to track nodes reached out 794 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 795 sender.CantSendGetStateSummaryFrontier = true 796 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 797 for nodeID := range ss { 798 contactedFrontiersProviders[nodeID] = reqID 799 } 800 } 801 802 // mock VM to simulate a valid summary is returned 803 fullVM.CantParseStateSummary = true 804 fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { 805 return &block.TestStateSummary{ 806 HeightV: key, 807 IDV: summaryID, 808 BytesV: summaryBytes, 809 }, nil 810 } 811 812 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 813 sender.CantSendGetAcceptedStateSummary = true 814 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 815 for nodeID := range ss { 816 contactedVoters[nodeID] = reqID 817 } 818 } 819 820 // Connect enough stake to start syncer 821 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 822 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 823 } 824 require.NotEmpty(syncer.pendingSeeders) 825 826 // let all contacted vdrs respond 827 for syncer.pendingSeeders.Len() != 0 { 828 beaconID, found := syncer.pendingSeeders.Peek() 829 require.True(found) 830 reqID := contactedFrontiersProviders[beaconID] 831 832 require.NoError(syncer.StateSummaryFrontier( 833 context.Background(), 834 beaconID, 835 reqID, 836 summaryBytes, 837 )) 838 } 839 require.Empty(syncer.pendingSeeders) 840 841 // check that vote requests are issued 842 initiallyContactedVotersSize := len(contactedVoters) 843 require.Positive(initiallyContactedVotersSize) 844 require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) 845 846 _, found := syncer.weightedSummaries[summaryID] 847 require.True(found) 848 849 // pick one of the voters that have been reached out 850 responsiveVoterID := pickRandomFrom(contactedVoters) 851 responsiveVoterReqID := contactedVoters[responsiveVoterID] 852 853 // check a response for unRequested summary is dropped 854 require.NoError(syncer.AcceptedStateSummary( 855 context.Background(), 856 responsiveVoterID, 857 responsiveVoterReqID, 858 set.Of(unknownSummaryID), 859 )) 860 _, found = syncer.weightedSummaries[unknownSummaryID] 861 require.False(found) 862 863 // check that responsiveVoter cannot cast another vote 864 require.NotContains(syncer.pendingSeeders, responsiveVoterID) 865 require.NoError(syncer.AcceptedStateSummary( 866 context.Background(), 867 responsiveVoterID, 868 responsiveVoterReqID, 869 set.Of(summaryID), 870 )) 871 require.Zero(syncer.weightedSummaries[summaryID].weight) 872 873 // other listed voters are reached out, even in the face of vote 874 // on unknown summary 875 require.True( 876 len(contactedVoters) > initiallyContactedVotersSize || 877 len(contactedVoters) == beacons.Count(ctx.SubnetID)) 878 } 879 880 func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { 881 require := require.New(t) 882 883 snowCtx := snowtest.Context(t, snowtest.CChainID) 884 ctx := snowtest.ConsensusContext(snowCtx) 885 beacons := buildTestPeers(t, ctx.SubnetID) 886 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 887 require.NoError(err) 888 startupAlpha := (3*totalWeight + 3) / 4 889 alpha := (totalWeight + 1) / 2 890 891 peers := tracker.NewPeers() 892 startup := tracker.NewStartup(peers, startupAlpha) 893 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 894 895 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) 896 897 // set sender to track nodes reached out 898 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 899 sender.CantSendGetStateSummaryFrontier = true 900 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 901 for nodeID := range ss { 902 contactedFrontiersProviders[nodeID] = reqID 903 } 904 } 905 906 // mock VM to simulate a valid summary is returned 907 summary := &block.TestStateSummary{ 908 HeightV: key, 909 IDV: summaryID, 910 BytesV: summaryBytes, 911 T: t, 912 } 913 minoritySummary := &block.TestStateSummary{ 914 HeightV: minorityKey, 915 IDV: minoritySummaryID, 916 BytesV: minoritySummaryBytes, 917 T: t, 918 } 919 920 fullVM.CantParseStateSummary = true 921 fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { 922 switch { 923 case bytes.Equal(b, summaryBytes): 924 return summary, nil 925 case bytes.Equal(b, minoritySummaryBytes): 926 return minoritySummary, nil 927 default: 928 return nil, errUnknownSummary 929 } 930 } 931 932 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 933 sender.CantSendGetAcceptedStateSummary = true 934 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 935 for nodeID := range ss { 936 contactedVoters[nodeID] = reqID 937 } 938 } 939 940 // Connect enough stake to start syncer 941 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 942 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 943 } 944 require.NotEmpty(syncer.pendingSeeders) 945 946 // let all contacted vdrs respond with majority or minority summaries 947 for { 948 reachedSeeders := syncer.pendingSeeders.Len() 949 if reachedSeeders == 0 { 950 break 951 } 952 beaconID, found := syncer.pendingSeeders.Peek() 953 require.True(found) 954 reqID := contactedFrontiersProviders[beaconID] 955 956 if reachedSeeders%2 == 0 { 957 require.NoError(syncer.StateSummaryFrontier( 958 context.Background(), 959 beaconID, 960 reqID, 961 summaryBytes, 962 )) 963 } else { 964 require.NoError(syncer.StateSummaryFrontier( 965 context.Background(), 966 beaconID, 967 reqID, 968 minoritySummaryBytes, 969 )) 970 } 971 } 972 require.Empty(syncer.pendingSeeders) 973 974 majoritySummaryCalled := false 975 minoritySummaryCalled := false 976 summary.AcceptF = func(context.Context) (block.StateSyncMode, error) { 977 majoritySummaryCalled = true 978 return block.StateSyncStatic, nil 979 } 980 minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { 981 minoritySummaryCalled = true 982 return block.StateSyncStatic, nil 983 } 984 985 // let a majority of voters return summaryID, and a minority return minoritySummaryID. The rest timeout. 986 cumulatedWeight := uint64(0) 987 for syncer.pendingVoters.Len() != 0 { 988 voterID, found := syncer.pendingVoters.Peek() 989 require.True(found) 990 reqID := contactedVoters[voterID] 991 992 switch { 993 case cumulatedWeight < alpha/2: 994 require.NoError(syncer.AcceptedStateSummary( 995 context.Background(), 996 voterID, 997 reqID, 998 set.Of(summaryID, minoritySummaryID), 999 )) 1000 cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) 1001 1002 case cumulatedWeight < alpha: 1003 require.NoError(syncer.AcceptedStateSummary( 1004 context.Background(), 1005 voterID, 1006 reqID, 1007 set.Of(summaryID), 1008 )) 1009 cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) 1010 1011 default: 1012 require.NoError(syncer.GetAcceptedStateSummaryFailed( 1013 context.Background(), 1014 voterID, 1015 reqID, 1016 )) 1017 } 1018 } 1019 1020 // check that finally summary is passed to VM 1021 require.True(majoritySummaryCalled) 1022 require.False(minoritySummaryCalled) 1023 } 1024 1025 func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { 1026 require := require.New(t) 1027 1028 snowCtx := snowtest.Context(t, snowtest.CChainID) 1029 ctx := snowtest.ConsensusContext(snowCtx) 1030 beacons := buildTestPeers(t, ctx.SubnetID) 1031 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 1032 require.NoError(err) 1033 startupAlpha := (3*totalWeight + 3) / 4 1034 alpha := (totalWeight + 1) / 2 1035 1036 peers := tracker.NewPeers() 1037 startup := tracker.NewStartup(peers, startupAlpha) 1038 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 1039 1040 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) 1041 1042 // set sender to track nodes reached out 1043 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 1044 sender.CantSendGetStateSummaryFrontier = true 1045 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 1046 for nodeID := range ss { 1047 contactedFrontiersProviders[nodeID] = reqID 1048 } 1049 } 1050 1051 // mock VM to simulate a valid summary is returned 1052 minoritySummary := &block.TestStateSummary{ 1053 HeightV: minorityKey, 1054 IDV: minoritySummaryID, 1055 BytesV: minoritySummaryBytes, 1056 T: t, 1057 } 1058 fullVM.CantParseStateSummary = true 1059 fullVM.ParseStateSummaryF = func(context.Context, []byte) (block.StateSummary, error) { 1060 return minoritySummary, nil 1061 } 1062 1063 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 1064 sender.CantSendGetAcceptedStateSummary = true 1065 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 1066 for nodeID := range ss { 1067 contactedVoters[nodeID] = reqID 1068 } 1069 } 1070 1071 // Connect enough stake to start syncer 1072 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 1073 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 1074 } 1075 require.NotEmpty(syncer.pendingSeeders) 1076 1077 // let all contacted vdrs respond 1078 for syncer.pendingSeeders.Len() != 0 { 1079 beaconID, found := syncer.pendingSeeders.Peek() 1080 require.True(found) 1081 reqID := contactedFrontiersProviders[beaconID] 1082 1083 require.NoError(syncer.StateSummaryFrontier( 1084 context.Background(), 1085 beaconID, 1086 reqID, 1087 summaryBytes, 1088 )) 1089 } 1090 require.Empty(syncer.pendingSeeders) 1091 1092 minoritySummaryCalled := false 1093 minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { 1094 minoritySummaryCalled = true 1095 return block.StateSyncStatic, nil 1096 } 1097 1098 // Let a majority of voters timeout. 1099 timedOutWeight := uint64(0) 1100 for syncer.pendingVoters.Len() != 0 { 1101 voterID, found := syncer.pendingVoters.Peek() 1102 require.True(found) 1103 reqID := contactedVoters[voterID] 1104 1105 // vdr carries the largest weight by far. Make sure it fails 1106 if timedOutWeight <= alpha { 1107 require.NoError(syncer.GetAcceptedStateSummaryFailed( 1108 context.Background(), 1109 voterID, 1110 reqID, 1111 )) 1112 timedOutWeight += beacons.GetWeight(ctx.SubnetID, voterID) 1113 } else { 1114 require.NoError(syncer.AcceptedStateSummary( 1115 context.Background(), 1116 voterID, 1117 reqID, 1118 set.Of(summaryID), 1119 )) 1120 } 1121 } 1122 1123 // No state summary is passed to VM 1124 require.False(minoritySummaryCalled) 1125 1126 // instead the whole process is restared 1127 require.Empty(syncer.pendingVoters) // no voters reached 1128 require.NotEmpty(syncer.pendingSeeders) // frontiers providers reached again 1129 } 1130 1131 func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing.T) { 1132 require := require.New(t) 1133 1134 snowCtx := snowtest.Context(t, snowtest.CChainID) 1135 ctx := snowtest.ConsensusContext(snowCtx) 1136 beacons := buildTestPeers(t, ctx.SubnetID) 1137 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 1138 require.NoError(err) 1139 startupAlpha := (3*totalWeight + 3) / 4 1140 alpha := (totalWeight + 1) / 2 1141 1142 peers := tracker.NewPeers() 1143 startup := tracker.NewStartup(peers, startupAlpha) 1144 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 1145 1146 syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) 1147 1148 // set sender to track nodes reached out 1149 contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map 1150 sender.CantSendGetStateSummaryFrontier = true 1151 sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { 1152 for nodeID := range ss { 1153 contactedFrontiersProviders[nodeID] = reqID 1154 } 1155 } 1156 1157 // mock VM to simulate a valid minoritySummary1 is returned 1158 minoritySummary1 := &block.TestStateSummary{ 1159 HeightV: key, 1160 IDV: summaryID, 1161 BytesV: summaryBytes, 1162 T: t, 1163 } 1164 minoritySummary2 := &block.TestStateSummary{ 1165 HeightV: minorityKey, 1166 IDV: minoritySummaryID, 1167 BytesV: minoritySummaryBytes, 1168 T: t, 1169 } 1170 1171 fullVM.CantParseStateSummary = true 1172 fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { 1173 switch { 1174 case bytes.Equal(b, summaryBytes): 1175 return minoritySummary1, nil 1176 case bytes.Equal(b, minoritySummaryBytes): 1177 return minoritySummary2, nil 1178 default: 1179 return nil, errUnknownSummary 1180 } 1181 } 1182 1183 contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map 1184 sender.CantSendGetAcceptedStateSummary = true 1185 sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { 1186 for nodeID := range ss { 1187 contactedVoters[nodeID] = reqID 1188 } 1189 } 1190 1191 // Connect enough stake to start syncer 1192 for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { 1193 require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) 1194 } 1195 require.NotEmpty(syncer.pendingSeeders) 1196 1197 // let all contacted vdrs respond with majority or minority summaries 1198 for { 1199 reachedSeeders := syncer.pendingSeeders.Len() 1200 if reachedSeeders == 0 { 1201 break 1202 } 1203 beaconID, found := syncer.pendingSeeders.Peek() 1204 require.True(found) 1205 reqID := contactedFrontiersProviders[beaconID] 1206 1207 if reachedSeeders%2 == 0 { 1208 require.NoError(syncer.StateSummaryFrontier( 1209 context.Background(), 1210 beaconID, 1211 reqID, 1212 summaryBytes, 1213 )) 1214 } else { 1215 require.NoError(syncer.StateSummaryFrontier( 1216 context.Background(), 1217 beaconID, 1218 reqID, 1219 minoritySummaryBytes, 1220 )) 1221 } 1222 } 1223 require.Empty(syncer.pendingSeeders) 1224 1225 majoritySummaryCalled := false 1226 minoritySummaryCalled := false 1227 minoritySummary1.AcceptF = func(context.Context) (block.StateSyncMode, error) { 1228 majoritySummaryCalled = true 1229 return block.StateSyncStatic, nil 1230 } 1231 minoritySummary2.AcceptF = func(context.Context) (block.StateSyncMode, error) { 1232 minoritySummaryCalled = true 1233 return block.StateSyncStatic, nil 1234 } 1235 1236 stateSyncFullyDone := false 1237 syncer.onDoneStateSyncing = func(context.Context, uint32) error { 1238 stateSyncFullyDone = true 1239 return nil 1240 } 1241 1242 // let all votes respond in time without any summary reaching a majority. 1243 // We achieve it by making most nodes voting for an invalid summaryID. 1244 votingWeightStake := uint64(0) 1245 for syncer.pendingVoters.Len() != 0 { 1246 voterID, found := syncer.pendingVoters.Peek() 1247 require.True(found) 1248 reqID := contactedVoters[voterID] 1249 1250 switch { 1251 case votingWeightStake < alpha/2: 1252 require.NoError(syncer.AcceptedStateSummary( 1253 context.Background(), 1254 voterID, 1255 reqID, 1256 set.Of(minoritySummary1.ID(), minoritySummary2.ID()), 1257 )) 1258 votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) 1259 1260 default: 1261 require.NoError(syncer.AcceptedStateSummary( 1262 context.Background(), 1263 voterID, 1264 reqID, 1265 set.Of(ids.ID{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}), 1266 )) 1267 votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) 1268 } 1269 } 1270 1271 // check that finally summary is passed to VM 1272 require.False(majoritySummaryCalled) 1273 require.False(minoritySummaryCalled) 1274 require.True(stateSyncFullyDone) // no restart, just move to boostrapping 1275 } 1276 1277 func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { 1278 require := require.New(t) 1279 1280 snowCtx := snowtest.Context(t, snowtest.CChainID) 1281 ctx := snowtest.ConsensusContext(snowCtx) 1282 beacons := buildTestPeers(t, ctx.SubnetID) 1283 totalWeight, err := beacons.TotalWeight(ctx.SubnetID) 1284 require.NoError(err) 1285 startupAlpha := (3*totalWeight + 3) / 4 1286 1287 peers := tracker.NewPeers() 1288 startup := tracker.NewStartup(peers, startupAlpha) 1289 beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) 1290 1291 syncer, _, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) 1292 1293 stateSyncFullyDone := false 1294 syncer.onDoneStateSyncing = func(context.Context, uint32) error { 1295 stateSyncFullyDone = true 1296 return nil 1297 } 1298 1299 // Any Put response before StateSyncDone is received from VM is dropped 1300 require.NoError(syncer.Notify(context.Background(), common.StateSyncDone)) 1301 require.True(stateSyncFullyDone) 1302 }