google.golang.org/grpc@v1.62.1/xds/internal/balancer/clustermanager/clustermanager_test.go (about) 1 /* 2 * 3 * Copyright 2020 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package clustermanager 20 21 import ( 22 "context" 23 "fmt" 24 "testing" 25 "time" 26 27 "github.com/google/go-cmp/cmp" 28 "google.golang.org/grpc" 29 "google.golang.org/grpc/balancer" 30 "google.golang.org/grpc/codes" 31 "google.golang.org/grpc/connectivity" 32 "google.golang.org/grpc/credentials/insecure" 33 "google.golang.org/grpc/internal/balancer/stub" 34 "google.golang.org/grpc/internal/channelz" 35 "google.golang.org/grpc/internal/grpctest" 36 "google.golang.org/grpc/internal/hierarchy" 37 "google.golang.org/grpc/internal/testutils" 38 "google.golang.org/grpc/resolver" 39 "google.golang.org/grpc/status" 40 ) 41 42 type s struct { 43 grpctest.Tester 44 } 45 46 func Test(t *testing.T) { 47 grpctest.RunSubTests(t, s{}) 48 } 49 50 const ( 51 defaultTestTimeout = 5 * time.Second 52 defaultTestShortTimeout = 10 * time.Millisecond 53 testBackendAddrsCount = 12 54 ) 55 56 var testBackendAddrStrs []string 57 58 func init() { 59 for i := 0; i < testBackendAddrsCount; i++ { 60 testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) 61 } 62 } 63 64 func testPick(t *testing.T, p balancer.Picker, info balancer.PickInfo, wantSC balancer.SubConn, wantErr error) { 65 t.Helper() 66 for i := 0; i < 5; i++ { 67 gotSCSt, err := p.Pick(info) 68 if fmt.Sprint(err) != fmt.Sprint(wantErr) { 69 t.Fatalf("picker.Pick(%+v), got error %v, want %v", info, err, wantErr) 70 } 71 if gotSCSt.SubConn != wantSC { 72 t.Fatalf("picker.Pick(%+v), got %v, want SubConn=%v", info, gotSCSt, wantSC) 73 } 74 } 75 } 76 77 func TestClusterPicks(t *testing.T) { 78 cc := testutils.NewBalancerClientConn(t) 79 builder := balancer.Get(balancerName) 80 parser := builder.(balancer.ConfigParser) 81 bal := builder.Build(cc, balancer.BuildOptions{}) 82 83 configJSON1 := `{ 84 "children": { 85 "cds:cluster_1":{ "childPolicy": [{"round_robin":""}] }, 86 "cds:cluster_2":{ "childPolicy": [{"round_robin":""}] } 87 } 88 }` 89 config1, err := parser.ParseConfig([]byte(configJSON1)) 90 if err != nil { 91 t.Fatalf("failed to parse balancer config: %v", err) 92 } 93 94 // Send the config, and an address with hierarchy path ["cluster_1"]. 95 wantAddrs := []resolver.Address{ 96 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 97 {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, 98 } 99 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 100 ResolverState: resolver.State{Addresses: []resolver.Address{ 101 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 102 hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), 103 }}, 104 BalancerConfig: config1, 105 }); err != nil { 106 t.Fatalf("failed to update ClientConn state: %v", err) 107 } 108 109 m1 := make(map[resolver.Address]balancer.SubConn) 110 // Verify that a subconn is created with the address, and the hierarchy path 111 // in the address is cleared. 112 for range wantAddrs { 113 addrs := <-cc.NewSubConnAddrsCh 114 if len(hierarchy.Get(addrs[0])) != 0 { 115 t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) 116 } 117 sc := <-cc.NewSubConnCh 118 // Clear the attributes before adding to map. 119 addrs[0].BalancerAttributes = nil 120 m1[addrs[0]] = sc 121 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 122 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 123 } 124 125 p1 := <-cc.NewPickerCh 126 for _, tt := range []struct { 127 pickInfo balancer.PickInfo 128 wantSC balancer.SubConn 129 wantErr error 130 }{ 131 { 132 pickInfo: balancer.PickInfo{ 133 Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"), 134 }, 135 wantSC: m1[wantAddrs[0]], 136 }, 137 { 138 pickInfo: balancer.PickInfo{ 139 Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"), 140 }, 141 wantSC: m1[wantAddrs[1]], 142 }, 143 { 144 pickInfo: balancer.PickInfo{ 145 Ctx: SetPickedCluster(context.Background(), "notacluster"), 146 }, 147 wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "notacluster"`), 148 }, 149 } { 150 testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr) 151 } 152 } 153 154 // TestConfigUpdateAddCluster covers the cases the balancer receives config 155 // update with extra clusters. 156 func TestConfigUpdateAddCluster(t *testing.T) { 157 cc := testutils.NewBalancerClientConn(t) 158 builder := balancer.Get(balancerName) 159 parser := builder.(balancer.ConfigParser) 160 bal := builder.Build(cc, balancer.BuildOptions{}) 161 162 configJSON1 := `{ 163 "children": { 164 "cds:cluster_1":{ "childPolicy": [{"round_robin":""}] }, 165 "cds:cluster_2":{ "childPolicy": [{"round_robin":""}] } 166 } 167 }` 168 config1, err := parser.ParseConfig([]byte(configJSON1)) 169 if err != nil { 170 t.Fatalf("failed to parse balancer config: %v", err) 171 } 172 173 // Send the config, and an address with hierarchy path ["cluster_1"]. 174 wantAddrs := []resolver.Address{ 175 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 176 {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, 177 } 178 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 179 ResolverState: resolver.State{Addresses: []resolver.Address{ 180 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 181 hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), 182 }}, 183 BalancerConfig: config1, 184 }); err != nil { 185 t.Fatalf("failed to update ClientConn state: %v", err) 186 } 187 188 m1 := make(map[resolver.Address]balancer.SubConn) 189 // Verify that a subconn is created with the address, and the hierarchy path 190 // in the address is cleared. 191 for range wantAddrs { 192 addrs := <-cc.NewSubConnAddrsCh 193 if len(hierarchy.Get(addrs[0])) != 0 { 194 t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) 195 } 196 sc := <-cc.NewSubConnCh 197 // Clear the attributes before adding to map. 198 addrs[0].BalancerAttributes = nil 199 m1[addrs[0]] = sc 200 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 201 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 202 } 203 204 p1 := <-cc.NewPickerCh 205 for _, tt := range []struct { 206 pickInfo balancer.PickInfo 207 wantSC balancer.SubConn 208 wantErr error 209 }{ 210 { 211 pickInfo: balancer.PickInfo{ 212 Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"), 213 }, 214 wantSC: m1[wantAddrs[0]], 215 }, 216 { 217 pickInfo: balancer.PickInfo{ 218 Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"), 219 }, 220 wantSC: m1[wantAddrs[1]], 221 }, 222 { 223 pickInfo: balancer.PickInfo{ 224 Ctx: SetPickedCluster(context.Background(), "cds:notacluster"), 225 }, 226 wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`), 227 }, 228 } { 229 testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr) 230 } 231 232 // A config update with different routes, and different actions. Expect a 233 // new subconn and a picker update. 234 configJSON2 := `{ 235 "children": { 236 "cds:cluster_1":{ "childPolicy": [{"round_robin":""}] }, 237 "cds:cluster_2":{ "childPolicy": [{"round_robin":""}] }, 238 "cds:cluster_3":{ "childPolicy": [{"round_robin":""}] } 239 } 240 }` 241 config2, err := parser.ParseConfig([]byte(configJSON2)) 242 if err != nil { 243 t.Fatalf("failed to parse balancer config: %v", err) 244 } 245 wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], BalancerAttributes: nil}) 246 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 247 ResolverState: resolver.State{Addresses: []resolver.Address{ 248 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 249 hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), 250 hierarchy.Set(wantAddrs[2], []string{"cds:cluster_3"}), 251 }}, 252 BalancerConfig: config2, 253 }); err != nil { 254 t.Fatalf("failed to update ClientConn state: %v", err) 255 } 256 257 // Expect exactly one new subconn. 258 addrs := <-cc.NewSubConnAddrsCh 259 if len(hierarchy.Get(addrs[0])) != 0 { 260 t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) 261 } 262 sc := <-cc.NewSubConnCh 263 // Clear the attributes before adding to map. 264 addrs[0].BalancerAttributes = nil 265 m1[addrs[0]] = sc 266 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 267 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 268 269 // Should have no more newSubConn. 270 select { 271 case <-time.After(time.Millisecond * 500): 272 case <-cc.NewSubConnCh: 273 addrs := <-cc.NewSubConnAddrsCh 274 t.Fatalf("unexpected NewSubConn with address %v", addrs) 275 } 276 277 p2 := <-cc.NewPickerCh 278 for _, tt := range []struct { 279 pickInfo balancer.PickInfo 280 wantSC balancer.SubConn 281 wantErr error 282 }{ 283 { 284 pickInfo: balancer.PickInfo{ 285 Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"), 286 }, 287 wantSC: m1[wantAddrs[0]], 288 }, 289 { 290 pickInfo: balancer.PickInfo{ 291 Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"), 292 }, 293 wantSC: m1[wantAddrs[1]], 294 }, 295 { 296 pickInfo: balancer.PickInfo{ 297 Ctx: SetPickedCluster(context.Background(), "cds:cluster_3"), 298 }, 299 wantSC: m1[wantAddrs[2]], 300 }, 301 { 302 pickInfo: balancer.PickInfo{ 303 Ctx: SetPickedCluster(context.Background(), "cds:notacluster"), 304 }, 305 wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`), 306 }, 307 } { 308 testPick(t, p2, tt.pickInfo, tt.wantSC, tt.wantErr) 309 } 310 } 311 312 // TestRoutingConfigUpdateDeleteAll covers the cases the balancer receives 313 // config update with no clusters. Pick should fail with details in error. 314 func TestRoutingConfigUpdateDeleteAll(t *testing.T) { 315 cc := testutils.NewBalancerClientConn(t) 316 builder := balancer.Get(balancerName) 317 parser := builder.(balancer.ConfigParser) 318 bal := builder.Build(cc, balancer.BuildOptions{}) 319 320 configJSON1 := `{ 321 "children": { 322 "cds:cluster_1":{ "childPolicy": [{"round_robin":""}] }, 323 "cds:cluster_2":{ "childPolicy": [{"round_robin":""}] } 324 } 325 }` 326 config1, err := parser.ParseConfig([]byte(configJSON1)) 327 if err != nil { 328 t.Fatalf("failed to parse balancer config: %v", err) 329 } 330 331 // Send the config, and an address with hierarchy path ["cluster_1"]. 332 wantAddrs := []resolver.Address{ 333 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 334 {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, 335 } 336 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 337 ResolverState: resolver.State{Addresses: []resolver.Address{ 338 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 339 hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), 340 }}, 341 BalancerConfig: config1, 342 }); err != nil { 343 t.Fatalf("failed to update ClientConn state: %v", err) 344 } 345 346 m1 := make(map[resolver.Address]balancer.SubConn) 347 // Verify that a subconn is created with the address, and the hierarchy path 348 // in the address is cleared. 349 for range wantAddrs { 350 addrs := <-cc.NewSubConnAddrsCh 351 if len(hierarchy.Get(addrs[0])) != 0 { 352 t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) 353 } 354 sc := <-cc.NewSubConnCh 355 // Clear the attributes before adding to map. 356 addrs[0].BalancerAttributes = nil 357 m1[addrs[0]] = sc 358 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 359 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 360 } 361 362 p1 := <-cc.NewPickerCh 363 for _, tt := range []struct { 364 pickInfo balancer.PickInfo 365 wantSC balancer.SubConn 366 wantErr error 367 }{ 368 { 369 pickInfo: balancer.PickInfo{ 370 Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"), 371 }, 372 wantSC: m1[wantAddrs[0]], 373 }, 374 { 375 pickInfo: balancer.PickInfo{ 376 Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"), 377 }, 378 wantSC: m1[wantAddrs[1]], 379 }, 380 { 381 pickInfo: balancer.PickInfo{ 382 Ctx: SetPickedCluster(context.Background(), "cds:notacluster"), 383 }, 384 wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`), 385 }, 386 } { 387 testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr) 388 } 389 390 // A config update with no clusters. 391 configJSON2 := `{}` 392 config2, err := parser.ParseConfig([]byte(configJSON2)) 393 if err != nil { 394 t.Fatalf("failed to parse balancer config: %v", err) 395 } 396 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 397 BalancerConfig: config2, 398 }); err != nil { 399 t.Fatalf("failed to update ClientConn state: %v", err) 400 } 401 402 // Expect two removed subconns. 403 for range wantAddrs { 404 select { 405 case <-time.After(time.Millisecond * 500): 406 t.Fatalf("timeout waiting for remove subconn") 407 case <-cc.ShutdownSubConnCh: 408 } 409 } 410 411 p2 := <-cc.NewPickerCh 412 for i := 0; i < 5; i++ { 413 gotSCSt, err := p2.Pick(balancer.PickInfo{Ctx: SetPickedCluster(context.Background(), "cds:notacluster")}) 414 if fmt.Sprint(err) != status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`).Error() { 415 t.Fatalf("picker.Pick, got %v, %v, want error %v", gotSCSt, err, `unknown cluster selected for RPC: "cds:notacluster"`) 416 } 417 } 418 419 // Resend the previous config with clusters 420 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 421 ResolverState: resolver.State{Addresses: []resolver.Address{ 422 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 423 hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), 424 }}, 425 BalancerConfig: config1, 426 }); err != nil { 427 t.Fatalf("failed to update ClientConn state: %v", err) 428 } 429 430 m2 := make(map[resolver.Address]balancer.SubConn) 431 // Verify that a subconn is created with the address, and the hierarchy path 432 // in the address is cleared. 433 for range wantAddrs { 434 addrs := <-cc.NewSubConnAddrsCh 435 if len(hierarchy.Get(addrs[0])) != 0 { 436 t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) 437 } 438 sc := <-cc.NewSubConnCh 439 // Clear the attributes before adding to map. 440 addrs[0].BalancerAttributes = nil 441 m2[addrs[0]] = sc 442 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 443 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 444 } 445 446 p3 := <-cc.NewPickerCh 447 for _, tt := range []struct { 448 pickInfo balancer.PickInfo 449 wantSC balancer.SubConn 450 wantErr error 451 }{ 452 { 453 pickInfo: balancer.PickInfo{ 454 Ctx: SetPickedCluster(context.Background(), "cds:cluster_1"), 455 }, 456 wantSC: m2[wantAddrs[0]], 457 }, 458 { 459 pickInfo: balancer.PickInfo{ 460 Ctx: SetPickedCluster(context.Background(), "cds:cluster_2"), 461 }, 462 wantSC: m2[wantAddrs[1]], 463 }, 464 { 465 pickInfo: balancer.PickInfo{ 466 Ctx: SetPickedCluster(context.Background(), "cds:notacluster"), 467 }, 468 wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`), 469 }, 470 } { 471 testPick(t, p3, tt.pickInfo, tt.wantSC, tt.wantErr) 472 } 473 } 474 475 func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { 476 const ( 477 userAgent = "ua" 478 defaultTestTimeout = 1 * time.Second 479 ) 480 481 // Setup the stub balancer such that we can read the build options passed to 482 // it in the UpdateClientConnState method. 483 ccsCh := testutils.NewChannel() 484 bOpts := balancer.BuildOptions{ 485 DialCreds: insecure.NewCredentials(), 486 ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil), 487 CustomUserAgent: userAgent, 488 } 489 stub.Register(t.Name(), stub.BalancerFuncs{ 490 UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { 491 if !cmp.Equal(bd.BuildOptions, bOpts) { 492 err := fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts) 493 ccsCh.Send(err) 494 return err 495 } 496 ccsCh.Send(nil) 497 return nil 498 }, 499 }) 500 501 cc := testutils.NewBalancerClientConn(t) 502 builder := balancer.Get(balancerName) 503 parser := builder.(balancer.ConfigParser) 504 bal := builder.Build(cc, bOpts) 505 506 configJSON1 := fmt.Sprintf(`{ 507 "children": { 508 "cds:cluster_1":{ "childPolicy": [{"%s":""}] } 509 } 510 }`, t.Name()) 511 config1, err := parser.ParseConfig([]byte(configJSON1)) 512 if err != nil { 513 t.Fatalf("failed to parse balancer config: %v", err) 514 } 515 516 if err := bal.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: config1}); err != nil { 517 t.Fatalf("failed to update ClientConn state: %v", err) 518 } 519 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 520 defer cancel() 521 v, err := ccsCh.Receive(ctx) 522 if err != nil { 523 t.Fatalf("timed out waiting for UpdateClientConnState result: %v", err) 524 } 525 if v != nil { 526 t.Fatal(v) 527 } 528 } 529 530 const initIdleBalancerName = "test-init-Idle-balancer" 531 532 var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") 533 534 func init() { 535 stub.Register(initIdleBalancerName, stub.BalancerFuncs{ 536 UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { 537 sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{ 538 StateListener: func(state balancer.SubConnState) { 539 err := fmt.Errorf("wrong picker error") 540 if state.ConnectivityState == connectivity.Idle { 541 err = errTestInitIdle 542 } 543 bd.ClientConn.UpdateState(balancer.State{ 544 ConnectivityState: state.ConnectivityState, 545 Picker: &testutils.TestConstPicker{Err: err}, 546 }) 547 }, 548 }) 549 if err != nil { 550 return err 551 } 552 sc.Connect() 553 return nil 554 }, 555 }) 556 } 557 558 // TestInitialIdle covers the case that if the child reports Idle, the overall 559 // state will be Idle. 560 func TestInitialIdle(t *testing.T) { 561 cc := testutils.NewBalancerClientConn(t) 562 builder := balancer.Get(balancerName) 563 parser := builder.(balancer.ConfigParser) 564 bal := builder.Build(cc, balancer.BuildOptions{}) 565 566 configJSON1 := `{ 567 "children": { 568 "cds:cluster_1":{ "childPolicy": [{"test-init-Idle-balancer":""}] } 569 } 570 }` 571 config1, err := parser.ParseConfig([]byte(configJSON1)) 572 if err != nil { 573 t.Fatalf("failed to parse balancer config: %v", err) 574 } 575 576 // Send the config, and an address with hierarchy path ["cluster_1"]. 577 wantAddrs := []resolver.Address{ 578 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 579 } 580 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 581 ResolverState: resolver.State{Addresses: []resolver.Address{ 582 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 583 }}, 584 BalancerConfig: config1, 585 }); err != nil { 586 t.Fatalf("failed to update ClientConn state: %v", err) 587 } 588 589 // Verify that a subconn is created with the address, and the hierarchy path 590 // in the address is cleared. 591 for range wantAddrs { 592 sc := <-cc.NewSubConnCh 593 sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) 594 } 595 596 if state1 := <-cc.NewStateCh; state1 != connectivity.Idle { 597 t.Fatalf("Received aggregated state: %v, want Idle", state1) 598 } 599 } 600 601 // TestClusterGracefulSwitch tests the graceful switch functionality for a child 602 // of the cluster manager. At first, the child is configured as a round robin 603 // load balancer, and thus should behave accordingly. The test then gracefully 604 // switches this child to a pick first load balancer. Once that balancer updates 605 // it's state and completes the graceful switch process the new picker should 606 // reflect this change. 607 func TestClusterGracefulSwitch(t *testing.T) { 608 cc := testutils.NewBalancerClientConn(t) 609 builder := balancer.Get(balancerName) 610 parser := builder.(balancer.ConfigParser) 611 bal := builder.Build(cc, balancer.BuildOptions{}) 612 613 configJSON1 := `{ 614 "children": { 615 "csp:cluster":{ "childPolicy": [{"round_robin":""}] } 616 } 617 }` 618 config1, err := parser.ParseConfig([]byte(configJSON1)) 619 if err != nil { 620 t.Fatalf("failed to parse balancer config: %v", err) 621 } 622 wantAddrs := []resolver.Address{ 623 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 624 {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, 625 } 626 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 627 ResolverState: resolver.State{Addresses: []resolver.Address{ 628 hierarchy.Set(wantAddrs[0], []string{"csp:cluster"}), 629 }}, 630 BalancerConfig: config1, 631 }); err != nil { 632 t.Fatalf("failed to update ClientConn state: %v", err) 633 } 634 635 sc1 := <-cc.NewSubConnCh 636 sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 637 sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 638 p1 := <-cc.NewPickerCh 639 pi := balancer.PickInfo{ 640 Ctx: SetPickedCluster(context.Background(), "csp:cluster"), 641 } 642 testPick(t, p1, pi, sc1, nil) 643 644 childPolicyName := t.Name() 645 stub.Register(childPolicyName, stub.BalancerFuncs{ 646 Init: func(bd *stub.BalancerData) { 647 bd.Data = balancer.Get(grpc.PickFirstBalancerName).Build(bd.ClientConn, bd.BuildOptions) 648 }, 649 UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { 650 bal := bd.Data.(balancer.Balancer) 651 return bal.UpdateClientConnState(ccs) 652 }, 653 }) 654 // Same cluster, different balancer type. 655 configJSON2 := fmt.Sprintf(`{ 656 "children": { 657 "csp:cluster":{ "childPolicy": [{"%s":""}] } 658 } 659 }`, childPolicyName) 660 config2, err := parser.ParseConfig([]byte(configJSON2)) 661 if err != nil { 662 t.Fatalf("failed to parse balancer config: %v", err) 663 } 664 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 665 ResolverState: resolver.State{Addresses: []resolver.Address{ 666 hierarchy.Set(wantAddrs[1], []string{"csp:cluster"}), 667 }}, 668 BalancerConfig: config2, 669 }); err != nil { 670 t.Fatalf("failed to update ClientConn state: %v", err) 671 } 672 sc2 := <-cc.NewSubConnCh 673 // Update the pick first balancers SubConn as CONNECTING. This will cause 674 // the pick first balancer to UpdateState() with CONNECTING, which shouldn't send 675 // a Picker update back, as the Graceful Switch process is not complete. 676 sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) 677 ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) 678 defer cancel() 679 select { 680 case <-cc.NewPickerCh: 681 t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing") 682 case <-ctx.Done(): 683 } 684 685 // Update the pick first balancers SubConn as READY. This will cause 686 // the pick first balancer to UpdateState() with READY, which should send a 687 // Picker update back, as the Graceful Switch process is complete. This 688 // Picker should always pick the pick first's created SubConn. 689 sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) 690 p2 := <-cc.NewPickerCh 691 testPick(t, p2, pi, sc2, nil) 692 // The Graceful Switch process completing for the child should cause the 693 // SubConns for the balancer being gracefully switched from to get deleted. 694 ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) 695 defer cancel() 696 select { 697 case <-ctx.Done(): 698 t.Fatalf("error waiting for sc.Shutdown()") 699 case rsc := <-cc.ShutdownSubConnCh: 700 // The SubConn removed should have been the created SubConn 701 // from the child before switching. 702 if rsc != sc1 { 703 t.Fatalf("Shutdown() got: %v, want %v", rsc, sc1) 704 } 705 } 706 } 707 708 // tcc wraps a testutils.TestClientConn but stores all state transitions in a 709 // slice. 710 type tcc struct { 711 *testutils.BalancerClientConn 712 states []balancer.State 713 } 714 715 func (t *tcc) UpdateState(bs balancer.State) { 716 t.states = append(t.states, bs) 717 t.BalancerClientConn.UpdateState(bs) 718 } 719 720 func (s) TestUpdateStatePauses(t *testing.T) { 721 cc := &tcc{BalancerClientConn: testutils.NewBalancerClientConn(t)} 722 723 balFuncs := stub.BalancerFuncs{ 724 UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { 725 bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil}) 726 bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) 727 return nil 728 }, 729 } 730 stub.Register("update_state_balancer", balFuncs) 731 732 builder := balancer.Get(balancerName) 733 parser := builder.(balancer.ConfigParser) 734 bal := builder.Build(cc, balancer.BuildOptions{}) 735 736 configJSON1 := `{ 737 "children": { 738 "cds:cluster_1":{ "childPolicy": [{"update_state_balancer":""}] } 739 } 740 }` 741 config1, err := parser.ParseConfig([]byte(configJSON1)) 742 if err != nil { 743 t.Fatalf("failed to parse balancer config: %v", err) 744 } 745 746 // Send the config, and an address with hierarchy path ["cluster_1"]. 747 wantAddrs := []resolver.Address{ 748 {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, 749 } 750 if err := bal.UpdateClientConnState(balancer.ClientConnState{ 751 ResolverState: resolver.State{Addresses: []resolver.Address{ 752 hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), 753 }}, 754 BalancerConfig: config1, 755 }); err != nil { 756 t.Fatalf("failed to update ClientConn state: %v", err) 757 } 758 759 // Verify that the only state update is the second one called by the child. 760 if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready { 761 t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states) 762 } 763 }