git.frostfs.info/TrueCloudLab/frostfs-sdk-go@v0.0.0-20241022124111-5361f0ecebd3/pool/pool_test.go (about) 1 package pool 2 3 import ( 4 "context" 5 "crypto/ecdsa" 6 "errors" 7 "math/rand" 8 "testing" 9 "time" 10 11 apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" 12 frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" 13 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" 14 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" 15 oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" 16 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" 17 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" 18 "github.com/nspcc-dev/neo-go/pkg/crypto/keys" 19 "github.com/stretchr/testify/require" 20 "go.uber.org/zap" 21 "go.uber.org/zap/zaptest" 22 "go.uber.org/zap/zaptest/observer" 23 ) 24 25 func TestBuildPoolClientFailed(t *testing.T) { 26 mockClientBuilder := func(addr string) client { 27 mockCli := newMockClient(addr, *newPrivateKey(t)) 28 mockCli.errOnDial() 29 return mockCli 30 } 31 32 opts := InitParameters{ 33 key: newPrivateKey(t), 34 nodeParams: []NodeParam{{1, "peer0", 1}}, 35 } 36 opts.setClientBuilder(mockClientBuilder) 37 38 pool, err := NewPool(opts) 39 require.NoError(t, err) 40 err = pool.Dial(context.Background()) 41 require.Error(t, err) 42 } 43 44 func TestBuildPoolCreateSessionFailed(t *testing.T) { 45 clientMockBuilder := func(addr string) client { 46 mockCli := newMockClient(addr, *newPrivateKey(t)) 47 mockCli.errOnCreateSession() 48 return mockCli 49 } 50 51 opts := InitParameters{ 52 key: newPrivateKey(t), 53 nodeParams: []NodeParam{{1, "peer0", 1}}, 54 } 55 opts.setClientBuilder(clientMockBuilder) 56 57 pool, err := NewPool(opts) 58 require.NoError(t, err) 59 err = pool.Dial(context.Background()) 60 require.Error(t, err) 61 } 62 63 func newPrivateKey(t *testing.T) *ecdsa.PrivateKey { 64 p, err := keys.NewPrivateKey() 65 require.NoError(t, err) 66 return &p.PrivateKey 67 } 68 69 func TestBuildPoolOneNodeFailed(t *testing.T) { 70 nodes := []NodeParam{ 71 {1, "peer0", 1}, 72 {2, "peer1", 1}, 73 } 74 75 var clientKeys []*ecdsa.PrivateKey 76 mockClientBuilder := func(addr string) client { 77 key := newPrivateKey(t) 78 clientKeys = append(clientKeys, key) 79 80 if addr == nodes[0].address { 81 mockCli := newMockClient(addr, *key) 82 mockCli.errOnEndpointInfo() 83 return mockCli 84 } 85 86 return newMockClient(addr, *key) 87 } 88 89 log, err := zap.NewProduction() 90 require.NoError(t, err) 91 opts := InitParameters{ 92 key: newPrivateKey(t), 93 clientRebalanceInterval: 1000 * time.Millisecond, 94 logger: log, 95 nodeParams: nodes, 96 } 97 opts.setClientBuilder(mockClientBuilder) 98 99 clientPool, err := NewPool(opts) 100 require.NoError(t, err) 101 err = clientPool.Dial(context.Background()) 102 require.NoError(t, err) 103 t.Cleanup(clientPool.Close) 104 105 expectedAuthKey := frostfsecdsa.PublicKey(clientKeys[1].PublicKey) 106 condition := func() bool { 107 cp, err := clientPool.connection() 108 if err != nil { 109 return false 110 } 111 st, _ := clientPool.cache.Get(formCacheKey(cp.address(), clientPool.key, false)) 112 return st.AssertAuthKey(&expectedAuthKey) 113 } 114 require.Never(t, condition, 900*time.Millisecond, 100*time.Millisecond) 115 require.Eventually(t, condition, 3*time.Second, 300*time.Millisecond) 116 } 117 118 func TestBuildPoolZeroNodes(t *testing.T) { 119 opts := InitParameters{ 120 key: newPrivateKey(t), 121 } 122 _, err := NewPool(opts) 123 require.Error(t, err) 124 } 125 126 func TestOneNode(t *testing.T) { 127 key1 := newPrivateKey(t) 128 mockClientBuilder := func(addr string) client { 129 return newMockClient(addr, *key1) 130 } 131 132 opts := InitParameters{ 133 key: newPrivateKey(t), 134 nodeParams: []NodeParam{{1, "peer0", 1}}, 135 } 136 opts.setClientBuilder(mockClientBuilder) 137 138 pool, err := NewPool(opts) 139 require.NoError(t, err) 140 err = pool.Dial(context.Background()) 141 require.NoError(t, err) 142 t.Cleanup(pool.Close) 143 144 cp, err := pool.connection() 145 require.NoError(t, err) 146 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 147 expectedAuthKey := frostfsecdsa.PublicKey(key1.PublicKey) 148 require.True(t, st.AssertAuthKey(&expectedAuthKey)) 149 } 150 151 func TestTwoNodes(t *testing.T) { 152 var clientKeys []*ecdsa.PrivateKey 153 mockClientBuilder := func(addr string) client { 154 key := newPrivateKey(t) 155 clientKeys = append(clientKeys, key) 156 return newMockClient(addr, *key) 157 } 158 159 opts := InitParameters{ 160 key: newPrivateKey(t), 161 nodeParams: []NodeParam{ 162 {1, "peer0", 1}, 163 {1, "peer1", 1}, 164 }, 165 } 166 opts.setClientBuilder(mockClientBuilder) 167 168 pool, err := NewPool(opts) 169 require.NoError(t, err) 170 err = pool.Dial(context.Background()) 171 require.NoError(t, err) 172 t.Cleanup(pool.Close) 173 174 cp, err := pool.connection() 175 require.NoError(t, err) 176 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 177 require.True(t, assertAuthKeyForAny(st, clientKeys)) 178 } 179 180 func assertAuthKeyForAny(st session.Object, clientKeys []*ecdsa.PrivateKey) bool { 181 for _, key := range clientKeys { 182 expectedAuthKey := frostfsecdsa.PublicKey(key.PublicKey) 183 if st.AssertAuthKey(&expectedAuthKey) { 184 return true 185 } 186 } 187 return false 188 } 189 190 func TestOneOfTwoFailed(t *testing.T) { 191 nodes := []NodeParam{ 192 {1, "peer0", 1}, 193 {9, "peer1", 1}, 194 } 195 196 var clientKeys []*ecdsa.PrivateKey 197 mockClientBuilder := func(addr string) client { 198 key := newPrivateKey(t) 199 clientKeys = append(clientKeys, key) 200 201 if addr == nodes[0].address { 202 return newMockClient(addr, *key) 203 } 204 205 mockCli := newMockClient(addr, *key) 206 mockCli.errOnEndpointInfo() 207 mockCli.errOnNetworkInfo() 208 return mockCli 209 } 210 211 opts := InitParameters{ 212 key: newPrivateKey(t), 213 nodeParams: nodes, 214 clientRebalanceInterval: 200 * time.Millisecond, 215 } 216 opts.setClientBuilder(mockClientBuilder) 217 218 pool, err := NewPool(opts) 219 require.NoError(t, err) 220 err = pool.Dial(context.Background()) 221 require.NoError(t, err) 222 223 require.NoError(t, err) 224 t.Cleanup(pool.Close) 225 226 time.Sleep(2 * time.Second) 227 228 for range 5 { 229 cp, err := pool.connection() 230 require.NoError(t, err) 231 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 232 require.True(t, assertAuthKeyForAny(st, clientKeys)) 233 } 234 } 235 236 func TestUpdateNodesHealth(t *testing.T) { 237 ctx := context.Background() 238 key := newPrivateKey(t) 239 240 for _, tc := range []struct { 241 name string 242 wasHealthy bool 243 willHealthy bool 244 prepareCli func(*mockClient) 245 }{ 246 { 247 name: "healthy, maintenance, unhealthy", 248 wasHealthy: true, 249 willHealthy: false, 250 prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Maintenance) }, 251 }, 252 { 253 name: "unhealthy, maintenance, unhealthy", 254 wasHealthy: false, 255 willHealthy: false, 256 prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Maintenance) }, 257 }, 258 { 259 name: "healthy, no error, healthy", 260 wasHealthy: true, 261 willHealthy: true, 262 prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Online) }, 263 }, 264 { 265 name: "unhealthy, no error, healthy", 266 wasHealthy: false, 267 willHealthy: true, 268 prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Online) }, 269 }, 270 { 271 name: "healthy, error, failed restart, unhealthy", 272 wasHealthy: true, 273 willHealthy: false, 274 prepareCli: func(c *mockClient) { 275 c.errOnEndpointInfo() 276 c.errorOnDial = true 277 }, 278 }, 279 { 280 name: "unhealthy, error, failed restart, unhealthy", 281 wasHealthy: false, 282 willHealthy: false, 283 prepareCli: func(c *mockClient) { 284 c.errOnEndpointInfo() 285 c.errorOnDial = true 286 }, 287 }, 288 { 289 name: "healthy, error, restart, error, unhealthy", 290 wasHealthy: true, 291 willHealthy: false, 292 prepareCli: func(c *mockClient) { c.errOnEndpointInfo() }, 293 }, 294 { 295 name: "unhealthy, error, restart, error, unhealthy", 296 wasHealthy: false, 297 willHealthy: false, 298 prepareCli: func(c *mockClient) { c.errOnEndpointInfo() }, 299 }, 300 { 301 name: "healthy, error, restart, maintenance, unhealthy", 302 wasHealthy: true, 303 willHealthy: false, 304 prepareCli: func(c *mockClient) { 305 healthError := true 306 c.healthcheckFn = func() { 307 if healthError { 308 c.errorOnEndpointInfo = errors.New("error") 309 } else { 310 c.errorOnEndpointInfo = nil 311 c.resOnEndpointInfo.SetStatus(netmap.Maintenance) 312 } 313 healthError = !healthError 314 } 315 }, 316 }, 317 { 318 name: "unhealthy, error, restart, maintenance, unhealthy", 319 wasHealthy: false, 320 willHealthy: false, 321 prepareCli: func(c *mockClient) { 322 healthError := true 323 c.healthcheckFn = func() { 324 if healthError { 325 c.errorOnEndpointInfo = errors.New("error") 326 } else { 327 c.errorOnEndpointInfo = nil 328 c.resOnEndpointInfo.SetStatus(netmap.Maintenance) 329 } 330 healthError = !healthError 331 } 332 }, 333 }, 334 { 335 name: "healthy, error, restart, healthy", 336 wasHealthy: true, 337 willHealthy: true, 338 prepareCli: func(c *mockClient) { 339 healthError := true 340 c.healthcheckFn = func() { 341 if healthError { 342 c.errorOnEndpointInfo = errors.New("error") 343 } else { 344 c.errorOnEndpointInfo = nil 345 } 346 healthError = !healthError 347 } 348 }, 349 }, 350 { 351 name: "unhealthy, error, restart, healthy", 352 wasHealthy: false, 353 willHealthy: true, 354 prepareCli: func(c *mockClient) { 355 healthError := true 356 c.healthcheckFn = func() { 357 if healthError { 358 c.errorOnEndpointInfo = errors.New("error") 359 } else { 360 c.errorOnEndpointInfo = nil 361 } 362 healthError = !healthError 363 } 364 }, 365 }, 366 } { 367 t.Run(tc.name, func(t *testing.T) { 368 cli := newMockClientHealthy("peer0", *key, tc.wasHealthy) 369 tc.prepareCli(cli) 370 p, log := newPool(t, cli) 371 372 p.updateNodesHealth(ctx, [][]float64{{1}}) 373 374 changed := tc.wasHealthy != tc.willHealthy 375 require.Equalf(t, tc.willHealthy, cli.isHealthy(), "healthy status should be: %v", tc.willHealthy) 376 require.Equalf(t, changed, 1 == log.Len(), "healthy status should be changed: %v", changed) 377 }) 378 } 379 } 380 381 func newPool(t *testing.T, cli *mockClient) (*Pool, *observer.ObservedLogs) { 382 log, observedLog := getObservedLogger() 383 384 cache, err := newCache(0) 385 require.NoError(t, err) 386 387 return &Pool{ 388 innerPools: []*innerPool{{ 389 sampler: newSampler([]float64{1}, rand.NewSource(0)), 390 clients: []client{cli}, 391 }}, 392 cache: cache, 393 key: newPrivateKey(t), 394 closedCh: make(chan struct{}), 395 rebalanceParams: rebalanceParameters{ 396 nodesParams: []*nodesParam{{1, []string{"peer0"}, []float64{1}}}, 397 nodeRequestTimeout: time.Second, 398 clientRebalanceInterval: 200 * time.Millisecond, 399 }, 400 logger: log, 401 }, observedLog 402 } 403 404 func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) { 405 loggerCore, observedLog := observer.New(zap.DebugLevel) 406 return zap.New(loggerCore), observedLog 407 } 408 409 func TestTwoFailed(t *testing.T) { 410 var clientKeys []*ecdsa.PrivateKey 411 mockClientBuilder := func(addr string) client { 412 key := newPrivateKey(t) 413 clientKeys = append(clientKeys, key) 414 mockCli := newMockClient(addr, *key) 415 mockCli.errOnEndpointInfo() 416 return mockCli 417 } 418 419 opts := InitParameters{ 420 key: newPrivateKey(t), 421 nodeParams: []NodeParam{ 422 {1, "peer0", 1}, 423 {1, "peer1", 1}, 424 }, 425 clientRebalanceInterval: 200 * time.Millisecond, 426 } 427 opts.setClientBuilder(mockClientBuilder) 428 429 pool, err := NewPool(opts) 430 require.NoError(t, err) 431 err = pool.Dial(context.Background()) 432 require.NoError(t, err) 433 434 t.Cleanup(pool.Close) 435 436 time.Sleep(2 * time.Second) 437 438 _, err = pool.connection() 439 require.Error(t, err) 440 require.Contains(t, err.Error(), "no healthy") 441 } 442 443 func TestSessionCache(t *testing.T) { 444 key := newPrivateKey(t) 445 expectedAuthKey := frostfsecdsa.PublicKey(key.PublicKey) 446 447 mockClientBuilder := func(addr string) client { 448 mockCli := newMockClient(addr, *key) 449 mockCli.statusOnGetObject(new(apistatus.SessionTokenNotFound)) 450 return mockCli 451 } 452 453 opts := InitParameters{ 454 key: newPrivateKey(t), 455 nodeParams: []NodeParam{ 456 {1, "peer0", 1}, 457 }, 458 clientRebalanceInterval: 30 * time.Second, 459 } 460 opts.setClientBuilder(mockClientBuilder) 461 462 ctx, cancel := context.WithCancel(context.Background()) 463 defer cancel() 464 465 pool, err := NewPool(opts) 466 require.NoError(t, err) 467 err = pool.Dial(ctx) 468 require.NoError(t, err) 469 t.Cleanup(pool.Close) 470 471 // cache must contain session token 472 cp, err := pool.connection() 473 require.NoError(t, err) 474 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 475 require.True(t, st.AssertAuthKey(&expectedAuthKey)) 476 477 var prm PrmObjectGet 478 prm.SetAddress(oid.Address{}) 479 prm.UseSession(session.Object{}) 480 481 _, err = pool.GetObject(ctx, prm) 482 require.Error(t, err) 483 484 // cache must not contain session token 485 cp, err = pool.connection() 486 require.NoError(t, err) 487 _, ok := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 488 require.False(t, ok) 489 490 var prm2 PrmObjectPut 491 prm2.SetHeader(object.Object{}) 492 493 _, err = pool.PutObject(ctx, prm2) 494 require.NoError(t, err) 495 496 // cache must contain session token 497 cp, err = pool.connection() 498 require.NoError(t, err) 499 st, _ = pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 500 require.True(t, st.AssertAuthKey(&expectedAuthKey)) 501 } 502 503 func TestPriority(t *testing.T) { 504 nodes := []NodeParam{ 505 {1, "peer0", 1}, 506 {2, "peer1", 100}, 507 } 508 509 var clientKeys []*ecdsa.PrivateKey 510 mockClientBuilder := func(addr string) client { 511 key := newPrivateKey(t) 512 clientKeys = append(clientKeys, key) 513 514 if addr == nodes[0].address { 515 mockCli := newMockClient(addr, *key) 516 mockCli.errOnEndpointInfo() 517 return mockCli 518 } 519 520 return newMockClient(addr, *key) 521 } 522 523 opts := InitParameters{ 524 key: newPrivateKey(t), 525 nodeParams: nodes, 526 clientRebalanceInterval: 1500 * time.Millisecond, 527 } 528 opts.setClientBuilder(mockClientBuilder) 529 530 ctx, cancel := context.WithCancel(context.Background()) 531 defer cancel() 532 533 pool, err := NewPool(opts) 534 require.NoError(t, err) 535 err = pool.Dial(ctx) 536 require.NoError(t, err) 537 t.Cleanup(pool.Close) 538 539 expectedAuthKey1 := frostfsecdsa.PublicKey(clientKeys[0].PublicKey) 540 firstNode := func() bool { 541 cp, err := pool.connection() 542 require.NoError(t, err) 543 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 544 return st.AssertAuthKey(&expectedAuthKey1) 545 } 546 547 expectedAuthKey2 := frostfsecdsa.PublicKey(clientKeys[1].PublicKey) 548 secondNode := func() bool { 549 cp, err := pool.connection() 550 require.NoError(t, err) 551 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 552 return st.AssertAuthKey(&expectedAuthKey2) 553 } 554 require.Never(t, secondNode, time.Second, 200*time.Millisecond) 555 556 require.Eventually(t, secondNode, time.Second, 200*time.Millisecond) 557 require.Never(t, firstNode, time.Second, 200*time.Millisecond) 558 } 559 560 func TestSessionCacheWithKey(t *testing.T) { 561 key := newPrivateKey(t) 562 expectedAuthKey := frostfsecdsa.PublicKey(key.PublicKey) 563 564 mockClientBuilder := func(addr string) client { 565 return newMockClient(addr, *key) 566 } 567 568 opts := InitParameters{ 569 key: newPrivateKey(t), 570 nodeParams: []NodeParam{ 571 {1, "peer0", 1}, 572 }, 573 clientRebalanceInterval: 30 * time.Second, 574 } 575 opts.setClientBuilder(mockClientBuilder) 576 577 ctx, cancel := context.WithCancel(context.Background()) 578 defer cancel() 579 580 pool, err := NewPool(opts) 581 require.NoError(t, err) 582 err = pool.Dial(ctx) 583 require.NoError(t, err) 584 585 // cache must contain session token 586 cp, err := pool.connection() 587 require.NoError(t, err) 588 st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) 589 require.True(t, st.AssertAuthKey(&expectedAuthKey)) 590 591 var prm PrmObjectDelete 592 prm.SetAddress(oid.Address{}) 593 anonKey := newPrivateKey(t) 594 prm.UseKey(anonKey) 595 596 err = pool.DeleteObject(ctx, prm) 597 require.NoError(t, err) 598 st, _ = pool.cache.Get(formCacheKey(cp.address(), anonKey, false)) 599 require.True(t, st.AssertAuthKey(&expectedAuthKey)) 600 } 601 602 func TestSessionTokenOwner(t *testing.T) { 603 mockClientBuilder := func(addr string) client { 604 key := newPrivateKey(t) 605 return newMockClient(addr, *key) 606 } 607 608 opts := InitParameters{ 609 key: newPrivateKey(t), 610 nodeParams: []NodeParam{ 611 {1, "peer0", 1}, 612 }, 613 } 614 opts.setClientBuilder(mockClientBuilder) 615 616 ctx, cancel := context.WithCancel(context.Background()) 617 defer cancel() 618 619 p, err := NewPool(opts) 620 require.NoError(t, err) 621 err = p.Dial(ctx) 622 require.NoError(t, err) 623 t.Cleanup(p.Close) 624 625 anonKey := newPrivateKey(t) 626 var anonOwner user.ID 627 user.IDFromKey(&anonOwner, anonKey.PublicKey) 628 629 var prm prmCommon 630 prm.UseKey(anonKey) 631 var prmCtx prmContext 632 prmCtx.useDefaultSession() 633 634 var tkn session.Object 635 var cc callContext 636 cc.sessionTarget = func(tok session.Object) { 637 tkn = tok 638 } 639 err = p.initCallContext(&cc, prm, prmCtx) 640 require.NoError(t, err) 641 642 err = p.openDefaultSession(ctx, &cc) 643 require.NoError(t, err) 644 require.True(t, tkn.VerifySignature()) 645 require.True(t, tkn.Issuer().Equals(anonOwner)) 646 } 647 648 func TestWaitPresence(t *testing.T) { 649 mockCli := newMockClient("", *newPrivateKey(t)) 650 651 t.Run("context canceled", func(t *testing.T) { 652 ctx, cancel := context.WithCancel(context.Background()) 653 go func() { 654 time.Sleep(500 * time.Millisecond) 655 cancel() 656 }() 657 658 err := waitForContainerPresence(ctx, mockCli, PrmContainerGet{}, &WaitParams{ 659 Timeout: 120 * time.Second, 660 PollInterval: 5 * time.Second, 661 }) 662 require.Error(t, err) 663 require.Contains(t, err.Error(), "context canceled") 664 }) 665 666 t.Run("context deadline exceeded", func(t *testing.T) { 667 ctx := context.Background() 668 669 err := waitForContainerPresence(ctx, mockCli, PrmContainerGet{}, &WaitParams{ 670 Timeout: 500 * time.Millisecond, 671 PollInterval: 5 * time.Second, 672 }) 673 require.Error(t, err) 674 require.Contains(t, err.Error(), "context deadline exceeded") 675 }) 676 677 t.Run("ok", func(t *testing.T) { 678 ctx := context.Background() 679 680 err := waitForContainerPresence(ctx, mockCli, PrmContainerGet{}, &WaitParams{ 681 Timeout: 10 * time.Second, 682 PollInterval: 500 * time.Millisecond, 683 }) 684 require.NoError(t, err) 685 }) 686 } 687 688 func TestStatusMonitor(t *testing.T) { 689 monitor := newClientStatusMonitor(zap.NewExample(), "", 10) 690 monitor.errorThreshold = 3 691 692 count := 10 693 for range count { 694 monitor.incErrorRate() 695 } 696 697 require.Equal(t, uint64(count), monitor.overallErrorRate()) 698 require.Equal(t, uint32(1), monitor.currentErrorRate()) 699 700 t.Run("healthy status", func(t *testing.T) { 701 cases := []struct { 702 action func(*clientStatusMonitor) 703 status uint32 704 isDialed bool 705 isHealthy bool 706 description string 707 }{ 708 { 709 action: func(m *clientStatusMonitor) { m.setUnhealthy() }, 710 status: statusUnhealthyOnRequest, 711 isDialed: true, 712 isHealthy: false, 713 description: "set unhealthy on request", 714 }, 715 { 716 action: func(m *clientStatusMonitor) { m.setHealthy() }, 717 status: statusHealthy, 718 isDialed: true, 719 isHealthy: true, 720 description: "set healthy", 721 }, 722 } 723 for _, tc := range cases { 724 tc.action(&monitor) 725 require.Equal(t, tc.status, monitor.healthy.Load()) 726 require.Equal(t, tc.isHealthy, monitor.isHealthy()) 727 } 728 }) 729 } 730 731 func TestHandleError(t *testing.T) { 732 ctx := context.Background() 733 log := zaptest.NewLogger(t) 734 735 canceledCtx, cancel := context.WithCancel(context.Background()) 736 cancel() 737 738 for _, tc := range []struct { 739 name string 740 ctx context.Context 741 status apistatus.Status 742 err error 743 expectedError bool 744 countError bool 745 markedUnhealthy bool 746 }{ 747 { 748 name: "no error, no status", 749 ctx: ctx, 750 status: nil, 751 err: nil, 752 expectedError: false, 753 countError: false, 754 }, 755 { 756 name: "no error, success status", 757 ctx: ctx, 758 status: new(apistatus.SuccessDefaultV2), 759 err: nil, 760 expectedError: false, 761 countError: false, 762 }, 763 { 764 name: "error, success status", 765 ctx: ctx, 766 status: new(apistatus.SuccessDefaultV2), 767 err: errors.New("error"), 768 expectedError: true, 769 countError: true, 770 }, 771 { 772 name: "error, no status", 773 ctx: ctx, 774 status: nil, 775 err: errors.New("error"), 776 expectedError: true, 777 countError: true, 778 }, 779 { 780 name: "no error, object not found status", 781 ctx: ctx, 782 status: new(apistatus.ObjectNotFound), 783 err: nil, 784 expectedError: true, 785 countError: false, 786 }, 787 { 788 name: "object not found error, object not found status", 789 ctx: ctx, 790 status: new(apistatus.ObjectNotFound), 791 err: &apistatus.ObjectNotFound{}, 792 expectedError: true, 793 countError: false, 794 }, 795 { 796 name: "eacl not found error, no status", 797 ctx: ctx, 798 status: nil, 799 err: &apistatus.EACLNotFound{}, 800 expectedError: true, 801 // we expect error be counted because status is nil 802 // currently we assume that DisableFrostFSErrorResolution be always false for pool 803 // and status be checked first in handleError 804 countError: true, 805 }, 806 { 807 name: "no error, internal status", 808 ctx: ctx, 809 status: new(apistatus.ServerInternal), 810 err: nil, 811 expectedError: true, 812 countError: true, 813 }, 814 { 815 name: "no error, wrong magic status", 816 ctx: ctx, 817 status: new(apistatus.WrongMagicNumber), 818 err: nil, 819 expectedError: true, 820 countError: true, 821 }, 822 { 823 name: "no error, signature verification status", 824 ctx: ctx, 825 status: new(apistatus.SignatureVerification), 826 err: nil, 827 expectedError: true, 828 countError: true, 829 }, 830 { 831 name: "no error, maintenance status", 832 ctx: ctx, 833 status: new(apistatus.NodeUnderMaintenance), 834 err: nil, 835 expectedError: true, 836 countError: true, 837 markedUnhealthy: true, 838 }, 839 { 840 name: "maintenance error, no status", 841 ctx: ctx, 842 status: nil, 843 err: &apistatus.NodeUnderMaintenance{}, 844 expectedError: true, 845 countError: true, 846 markedUnhealthy: true, 847 }, 848 { 849 name: "no error, invalid argument status", 850 ctx: ctx, 851 status: new(apistatus.InvalidArgument), 852 err: nil, 853 expectedError: true, 854 countError: false, 855 }, 856 { 857 name: "context canceled error, no status", 858 ctx: canceledCtx, 859 status: nil, 860 err: errors.New("error"), 861 expectedError: true, 862 countError: false, 863 }, 864 } { 865 t.Run(tc.name, func(t *testing.T) { 866 monitor := newClientStatusMonitor(log, "", 10) 867 errCount := monitor.overallErrorRate() 868 err := monitor.handleError(tc.ctx, tc.status, tc.err) 869 if tc.expectedError { 870 require.Error(t, err) 871 } else { 872 require.NoError(t, err) 873 } 874 if tc.countError { 875 errCount++ 876 } 877 require.Equal(t, errCount, monitor.overallErrorRate()) 878 if tc.markedUnhealthy { 879 require.False(t, monitor.isHealthy()) 880 } 881 }) 882 } 883 } 884 885 func TestSwitchAfterErrorThreshold(t *testing.T) { 886 nodes := []NodeParam{ 887 {1, "peer0", 1}, 888 {2, "peer1", 100}, 889 } 890 891 errorThreshold := 5 892 893 var clientKeys []*ecdsa.PrivateKey 894 mockClientBuilder := func(addr string) client { 895 key := newPrivateKey(t) 896 clientKeys = append(clientKeys, key) 897 898 if addr == nodes[0].address { 899 mockCli := newMockClient(addr, *key) 900 mockCli.setThreshold(uint32(errorThreshold)) 901 mockCli.statusOnGetObject(new(apistatus.ServerInternal)) 902 return mockCli 903 } 904 905 return newMockClient(addr, *key) 906 } 907 908 opts := InitParameters{ 909 key: newPrivateKey(t), 910 nodeParams: nodes, 911 clientRebalanceInterval: 30 * time.Second, 912 } 913 opts.setClientBuilder(mockClientBuilder) 914 915 ctx, cancel := context.WithCancel(context.Background()) 916 defer cancel() 917 918 pool, err := NewPool(opts) 919 require.NoError(t, err) 920 err = pool.Dial(ctx) 921 require.NoError(t, err) 922 t.Cleanup(pool.Close) 923 924 for range errorThreshold { 925 conn, err := pool.connection() 926 require.NoError(t, err) 927 require.Equal(t, nodes[0].address, conn.address()) 928 _, err = conn.objectGet(ctx, PrmObjectGet{}) 929 require.Error(t, err) 930 } 931 932 conn, err := pool.connection() 933 require.NoError(t, err) 934 require.Equal(t, nodes[1].address, conn.address()) 935 _, err = conn.objectGet(ctx, PrmObjectGet{}) 936 require.NoError(t, err) 937 }