github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/balancer/weightedtarget/weightedtarget_test.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package weightedtarget
    20  
    21  import (
    22  	"encoding/json"
    23  	"fmt"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/google/go-cmp/cmp"
    28  	"github.com/hxx258456/ccgo/grpc/attributes"
    29  	"github.com/hxx258456/ccgo/grpc/balancer"
    30  	"github.com/hxx258456/ccgo/grpc/balancer/roundrobin"
    31  	"github.com/hxx258456/ccgo/grpc/connectivity"
    32  	"github.com/hxx258456/ccgo/grpc/internal/balancer/stub"
    33  	"github.com/hxx258456/ccgo/grpc/internal/balancergroup"
    34  	"github.com/hxx258456/ccgo/grpc/internal/grpctest"
    35  	"github.com/hxx258456/ccgo/grpc/internal/hierarchy"
    36  	"github.com/hxx258456/ccgo/grpc/internal/testutils"
    37  	"github.com/hxx258456/ccgo/grpc/resolver"
    38  	"github.com/hxx258456/ccgo/grpc/serviceconfig"
    39  )
    40  
    41  type s struct {
    42  	grpctest.Tester
    43  }
    44  
    45  func Test(t *testing.T) {
    46  	grpctest.RunSubTests(t, s{})
    47  }
    48  
    49  type testConfigBalancerBuilder struct {
    50  	balancer.Builder
    51  }
    52  
    53  func newTestConfigBalancerBuilder() *testConfigBalancerBuilder {
    54  	return &testConfigBalancerBuilder{
    55  		Builder: balancer.Get(roundrobin.Name),
    56  	}
    57  }
    58  
    59  func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
    60  	rr := t.Builder.Build(cc, opts)
    61  	return &testConfigBalancer{
    62  		Balancer: rr,
    63  	}
    64  }
    65  
    66  const testConfigBalancerName = "test_config_balancer"
    67  
    68  func (t *testConfigBalancerBuilder) Name() string {
    69  	return testConfigBalancerName
    70  }
    71  
    72  type stringBalancerConfig struct {
    73  	serviceconfig.LoadBalancingConfig
    74  	configStr string
    75  }
    76  
    77  func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
    78  	var cfg string
    79  	if err := json.Unmarshal(c, &cfg); err != nil {
    80  		return nil, fmt.Errorf("failed to unmarshal config in %q: %v", testConfigBalancerName, err)
    81  	}
    82  	return stringBalancerConfig{configStr: cfg}, nil
    83  }
    84  
    85  // testConfigBalancer is a roundrobin balancer, but it takes the balancer config
    86  // string and adds it as an address attribute to the backend addresses.
    87  type testConfigBalancer struct {
    88  	balancer.Balancer
    89  }
    90  
    91  // configKey is the type used as the key to store balancer config in the
    92  // Attributes field of resolver.Address.
    93  type configKey struct{}
    94  
    95  func setConfigKey(addr resolver.Address, config string) resolver.Address {
    96  	addr.Attributes = addr.Attributes.WithValue(configKey{}, config)
    97  	return addr
    98  }
    99  
   100  func getConfigKey(attr *attributes.Attributes) (string, bool) {
   101  	v := attr.Value(configKey{})
   102  	name, ok := v.(string)
   103  	return name, ok
   104  }
   105  
   106  func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
   107  	c, ok := s.BalancerConfig.(stringBalancerConfig)
   108  	if !ok {
   109  		return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig)
   110  	}
   111  
   112  	addrsWithAttr := make([]resolver.Address, len(s.ResolverState.Addresses))
   113  	for i, addr := range s.ResolverState.Addresses {
   114  		addrsWithAttr[i] = setConfigKey(addr, c.configStr)
   115  	}
   116  	s.BalancerConfig = nil
   117  	s.ResolverState.Addresses = addrsWithAttr
   118  	return b.Balancer.UpdateClientConnState(s)
   119  }
   120  
   121  func (b *testConfigBalancer) Close() {
   122  	b.Balancer.Close()
   123  }
   124  
   125  var (
   126  	wtbBuilder          balancer.Builder
   127  	wtbParser           balancer.ConfigParser
   128  	testBackendAddrStrs []string
   129  )
   130  
   131  const testBackendAddrsCount = 12
   132  
   133  func init() {
   134  	balancer.Register(newTestConfigBalancerBuilder())
   135  	for i := 0; i < testBackendAddrsCount; i++ {
   136  		testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
   137  	}
   138  	wtbBuilder = balancer.Get(Name)
   139  	wtbParser = wtbBuilder.(balancer.ConfigParser)
   140  
   141  	balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond
   142  	NewRandomWRR = testutils.NewTestWRR
   143  }
   144  
   145  // TestWeightedTarget covers the cases that a sub-balancer is added and a
   146  // sub-balancer is removed. It verifies that the addresses and balancer configs
   147  // are forwarded to the right sub-balancer. This test is intended to test the
   148  // glue code in weighted_target.
   149  func (s) TestWeightedTarget(t *testing.T) {
   150  	cc := testutils.NewTestClientConn(t)
   151  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   152  	defer wtb.Close()
   153  
   154  	// Start with "cluster_1: round_robin".
   155  	config1, err := wtbParser.ParseConfig([]byte(`
   156  {
   157    "targets": {
   158      "cluster_1": {
   159        "weight":1,
   160        "childPolicy": [{"round_robin": ""}]
   161      }
   162    }
   163  }`))
   164  	if err != nil {
   165  		t.Fatalf("failed to parse balancer config: %v", err)
   166  	}
   167  
   168  	// Send the config, and an address with hierarchy path ["cluster_1"].
   169  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil}
   170  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   171  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}},
   172  		BalancerConfig: config1,
   173  	}); err != nil {
   174  		t.Fatalf("failed to update ClientConn state: %v", err)
   175  	}
   176  	verifyAddressInNewSubConn(t, cc, addr1)
   177  
   178  	// Send subconn state change.
   179  	sc1 := <-cc.NewSubConnCh
   180  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   181  	<-cc.NewPickerCh
   182  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   183  	p := <-cc.NewPickerCh
   184  
   185  	// Test pick with one backend.
   186  	for i := 0; i < 5; i++ {
   187  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   188  		if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
   189  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1)
   190  		}
   191  	}
   192  
   193  	// Remove cluster_1, and add "cluster_2: test_config_balancer". The
   194  	// test_config_balancer adds an address attribute whose value is set to the
   195  	// config that is passed to it.
   196  	config2, err := wtbParser.ParseConfig([]byte(`
   197  {
   198    "targets": {
   199      "cluster_2": {
   200         "weight":1,
   201         "childPolicy": [{"test_config_balancer": "cluster_2"}]
   202      }
   203    }
   204  }`))
   205  	if err != nil {
   206  		t.Fatalf("failed to parse balancer config: %v", err)
   207  	}
   208  
   209  	// Send the config, and one address with hierarchy path "cluster_2".
   210  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil}
   211  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   212  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_2"})}},
   213  		BalancerConfig: config2,
   214  	}); err != nil {
   215  		t.Fatalf("failed to update ClientConn state: %v", err)
   216  	}
   217  
   218  	// Expect a new subConn from the test_config_balancer which has an address
   219  	// attribute set to the config that was passed to it.
   220  	verifyAddressInNewSubConn(t, cc, setConfigKey(addr2, "cluster_2"))
   221  
   222  	// The subconn for cluster_1 should be removed.
   223  	scRemoved := <-cc.RemoveSubConnCh
   224  	if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
   225  		t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved)
   226  	}
   227  	wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   228  
   229  	sc2 := <-cc.NewSubConnCh
   230  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   231  	<-cc.NewPickerCh
   232  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   233  	p = <-cc.NewPickerCh
   234  
   235  	// Test pick with one backend.
   236  	for i := 0; i < 5; i++ {
   237  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   238  		if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) {
   239  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2)
   240  		}
   241  	}
   242  
   243  	// Replace child policy of "cluster_1" to "round_robin".
   244  	config3, err := wtbParser.ParseConfig([]byte(`
   245  {
   246    "targets": {
   247      "cluster_2": {
   248        "weight":1,
   249        "childPolicy": [{"round_robin": ""}]
   250      }
   251    }
   252  }`))
   253  	if err != nil {
   254  		t.Fatalf("failed to parse balancer config: %v", err)
   255  	}
   256  
   257  	// Send the config, and an address with hierarchy path ["cluster_2"].
   258  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3], Attributes: nil}
   259  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   260  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr3, []string{"cluster_2"})}},
   261  		BalancerConfig: config3,
   262  	}); err != nil {
   263  		t.Fatalf("failed to update ClientConn state: %v", err)
   264  	}
   265  	verifyAddressInNewSubConn(t, cc, addr3)
   266  
   267  	// The subconn from the test_config_balancer should be removed.
   268  	scRemoved = <-cc.RemoveSubConnCh
   269  	if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) {
   270  		t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved)
   271  	}
   272  	wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   273  
   274  	// Send subconn state change.
   275  	sc3 := <-cc.NewSubConnCh
   276  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   277  	<-cc.NewPickerCh
   278  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   279  	p = <-cc.NewPickerCh
   280  
   281  	// Test pick with one backend.
   282  	for i := 0; i < 5; i++ {
   283  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   284  		if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) {
   285  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3)
   286  		}
   287  	}
   288  }
   289  
   290  func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
   291  	return func() balancer.SubConn {
   292  		scst, _ := p.Pick(balancer.PickInfo{})
   293  		return scst.SubConn
   294  	}
   295  }
   296  
   297  // TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we
   298  // have a weighted target balancer will one sub-balancer, and we add and remove
   299  // backends from the subBalancer.
   300  func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) {
   301  	cc := testutils.NewTestClientConn(t)
   302  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   303  	defer wtb.Close()
   304  
   305  	// Start with "cluster_1: round_robin".
   306  	config, err := wtbParser.ParseConfig([]byte(`
   307  {
   308    "targets": {
   309      "cluster_1": {
   310        "weight":1,
   311        "childPolicy": [{"round_robin": ""}]
   312      }
   313    }
   314  }`))
   315  	if err != nil {
   316  		t.Fatalf("failed to parse balancer config: %v", err)
   317  	}
   318  
   319  	// Send the config, and an address with hierarchy path ["cluster_1"].
   320  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   321  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   322  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}},
   323  		BalancerConfig: config,
   324  	}); err != nil {
   325  		t.Fatalf("failed to update ClientConn state: %v", err)
   326  	}
   327  	verifyAddressInNewSubConn(t, cc, addr1)
   328  
   329  	// Expect one SubConn, and move it to READY.
   330  	sc1 := <-cc.NewSubConnCh
   331  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   332  	<-cc.NewPickerCh
   333  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   334  	p := <-cc.NewPickerCh
   335  
   336  	// Test pick with one backend.
   337  	for i := 0; i < 5; i++ {
   338  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   339  		if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
   340  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1)
   341  		}
   342  	}
   343  
   344  	// Send two addresses.
   345  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   346  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   347  		ResolverState: resolver.State{Addresses: []resolver.Address{
   348  			hierarchy.Set(addr1, []string{"cluster_1"}),
   349  			hierarchy.Set(addr2, []string{"cluster_1"}),
   350  		}},
   351  		BalancerConfig: config,
   352  	}); err != nil {
   353  		t.Fatalf("failed to update ClientConn state: %v", err)
   354  	}
   355  	verifyAddressInNewSubConn(t, cc, addr2)
   356  
   357  	// Expect one new SubConn, and move it to READY.
   358  	sc2 := <-cc.NewSubConnCh
   359  	// Update the SubConn to become READY.
   360  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   361  	<-cc.NewPickerCh
   362  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   363  	p = <-cc.NewPickerCh
   364  
   365  	// Test round robin pick.
   366  	want := []balancer.SubConn{sc1, sc2}
   367  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   368  		t.Fatalf("want %v, got %v", want, err)
   369  	}
   370  
   371  	// Remove the first address.
   372  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   373  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_1"})}},
   374  		BalancerConfig: config,
   375  	}); err != nil {
   376  		t.Fatalf("failed to update ClientConn state: %v", err)
   377  	}
   378  
   379  	// Expect one SubConn to be removed.
   380  	scRemoved := <-cc.RemoveSubConnCh
   381  	if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
   382  		t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved)
   383  	}
   384  	wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   385  	p = <-cc.NewPickerCh
   386  
   387  	// Test pick with only the second SubConn.
   388  	for i := 0; i < 5; i++ {
   389  		gotSC, _ := p.Pick(balancer.PickInfo{})
   390  		if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) {
   391  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2)
   392  		}
   393  	}
   394  }
   395  
   396  // TestWeightedTarget_TwoSubBalancers_OneBackend tests the case where we have a
   397  // weighted target balancer with two sub-balancers, each with one backend.
   398  func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) {
   399  	cc := testutils.NewTestClientConn(t)
   400  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   401  	defer wtb.Close()
   402  
   403  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
   404  	config, err := wtbParser.ParseConfig([]byte(`
   405  {
   406    "targets": {
   407      "cluster_1": {
   408        "weight":1,
   409        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   410      },
   411      "cluster_2": {
   412        "weight":1,
   413        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   414      }
   415    }
   416  }`))
   417  	if err != nil {
   418  		t.Fatalf("failed to parse balancer config: %v", err)
   419  	}
   420  
   421  	// Send the config with one address for each cluster.
   422  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   423  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   424  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   425  		ResolverState: resolver.State{Addresses: []resolver.Address{
   426  			hierarchy.Set(addr1, []string{"cluster_1"}),
   427  			hierarchy.Set(addr2, []string{"cluster_2"}),
   428  		}},
   429  		BalancerConfig: config,
   430  	}); err != nil {
   431  		t.Fatalf("failed to update ClientConn state: %v", err)
   432  	}
   433  
   434  	scs := waitForNewSubConns(t, cc, 2)
   435  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   436  		"cluster_1": {addr1},
   437  		"cluster_2": {addr2},
   438  	})
   439  
   440  	// We expect a single subConn on each subBalancer.
   441  	sc1 := scs["cluster_1"][0].sc
   442  	sc2 := scs["cluster_2"][0].sc
   443  
   444  	// Send state changes for both SubConns, and wait for the picker.
   445  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   446  	<-cc.NewPickerCh
   447  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   448  	<-cc.NewPickerCh
   449  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   450  	<-cc.NewPickerCh
   451  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   452  	p := <-cc.NewPickerCh
   453  
   454  	// Test roundrobin on the last picker.
   455  	want := []balancer.SubConn{sc1, sc2}
   456  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   457  		t.Fatalf("want %v, got %v", want, err)
   458  	}
   459  }
   460  
   461  // TestWeightedTarget_TwoSubBalancers_MoreBackends tests the case where we have
   462  // a weighted target balancer with two sub-balancers, each with more than one
   463  // backend.
   464  func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) {
   465  	cc := testutils.NewTestClientConn(t)
   466  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   467  	defer wtb.Close()
   468  
   469  	// Start with "cluster_1: round_robin, cluster_2: round_robin".
   470  	config, err := wtbParser.ParseConfig([]byte(`
   471  {
   472    "targets": {
   473      "cluster_1": {
   474        "weight":1,
   475        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   476      },
   477      "cluster_2": {
   478        "weight":1,
   479        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   480      }
   481    }
   482  }`))
   483  	if err != nil {
   484  		t.Fatalf("failed to parse balancer config: %v", err)
   485  	}
   486  
   487  	// Send the config with two backends for each cluster.
   488  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   489  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   490  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   491  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
   492  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   493  		ResolverState: resolver.State{Addresses: []resolver.Address{
   494  			hierarchy.Set(addr1, []string{"cluster_1"}),
   495  			hierarchy.Set(addr2, []string{"cluster_1"}),
   496  			hierarchy.Set(addr3, []string{"cluster_2"}),
   497  			hierarchy.Set(addr4, []string{"cluster_2"}),
   498  		}},
   499  		BalancerConfig: config,
   500  	}); err != nil {
   501  		t.Fatalf("failed to update ClientConn state: %v", err)
   502  	}
   503  
   504  	scs := waitForNewSubConns(t, cc, 4)
   505  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   506  		"cluster_1": {addr1, addr2},
   507  		"cluster_2": {addr3, addr4},
   508  	})
   509  
   510  	// We expect two subConns on each subBalancer.
   511  	sc1 := scs["cluster_1"][0].sc
   512  	sc2 := scs["cluster_1"][1].sc
   513  	sc3 := scs["cluster_2"][0].sc
   514  	sc4 := scs["cluster_2"][1].sc
   515  
   516  	// Send state changes for all SubConns, and wait for the picker.
   517  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   518  	<-cc.NewPickerCh
   519  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   520  	<-cc.NewPickerCh
   521  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   522  	<-cc.NewPickerCh
   523  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   524  	<-cc.NewPickerCh
   525  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   526  	<-cc.NewPickerCh
   527  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   528  	<-cc.NewPickerCh
   529  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   530  	<-cc.NewPickerCh
   531  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   532  	p := <-cc.NewPickerCh
   533  
   534  	// Test roundrobin on the last picker. RPCs should be sent equally to all
   535  	// backends.
   536  	want := []balancer.SubConn{sc1, sc2, sc3, sc4}
   537  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   538  		t.Fatalf("want %v, got %v", want, err)
   539  	}
   540  
   541  	// Turn sc2's connection down, should be RR between balancers.
   542  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   543  	p = <-cc.NewPickerCh
   544  	want = []balancer.SubConn{sc1, sc1, sc3, sc4}
   545  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   546  		t.Fatalf("want %v, got %v", want, err)
   547  	}
   548  
   549  	// Remove subConn corresponding to addr3.
   550  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   551  		ResolverState: resolver.State{Addresses: []resolver.Address{
   552  			hierarchy.Set(addr1, []string{"cluster_1"}),
   553  			hierarchy.Set(addr2, []string{"cluster_1"}),
   554  			hierarchy.Set(addr4, []string{"cluster_2"}),
   555  		}},
   556  		BalancerConfig: config,
   557  	}); err != nil {
   558  		t.Fatalf("failed to update ClientConn state: %v", err)
   559  	}
   560  	scRemoved := <-cc.RemoveSubConnCh
   561  	if !cmp.Equal(scRemoved, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) {
   562  		t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scRemoved)
   563  	}
   564  	wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   565  	p = <-cc.NewPickerCh
   566  	want = []balancer.SubConn{sc1, sc4}
   567  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   568  		t.Fatalf("want %v, got %v", want, err)
   569  	}
   570  
   571  	// Turn sc1's connection down.
   572  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   573  	p = <-cc.NewPickerCh
   574  	want = []balancer.SubConn{sc4}
   575  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   576  		t.Fatalf("want %v, got %v", want, err)
   577  	}
   578  
   579  	// Turn last connection to connecting.
   580  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   581  	p = <-cc.NewPickerCh
   582  	for i := 0; i < 5; i++ {
   583  		if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable {
   584  			t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err)
   585  		}
   586  	}
   587  
   588  	// Turn all connections down.
   589  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   590  	p = <-cc.NewPickerCh
   591  	for i := 0; i < 5; i++ {
   592  		if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure {
   593  			t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err)
   594  		}
   595  	}
   596  }
   597  
   598  // TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends tests the
   599  // case where we have a weighted target balancer with two sub-balancers of
   600  // differing weights.
   601  func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *testing.T) {
   602  	cc := testutils.NewTestClientConn(t)
   603  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   604  	defer wtb.Close()
   605  
   606  	// Start with two subBalancers, one with twice the weight of the other.
   607  	config, err := wtbParser.ParseConfig([]byte(`
   608  {
   609    "targets": {
   610      "cluster_1": {
   611        "weight": 2,
   612        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   613      },
   614      "cluster_2": {
   615        "weight": 1,
   616        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   617      }
   618    }
   619  }`))
   620  	if err != nil {
   621  		t.Fatalf("failed to parse balancer config: %v", err)
   622  	}
   623  
   624  	// Send the config with two backends for each cluster.
   625  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   626  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   627  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   628  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
   629  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   630  		ResolverState: resolver.State{Addresses: []resolver.Address{
   631  			hierarchy.Set(addr1, []string{"cluster_1"}),
   632  			hierarchy.Set(addr2, []string{"cluster_1"}),
   633  			hierarchy.Set(addr3, []string{"cluster_2"}),
   634  			hierarchy.Set(addr4, []string{"cluster_2"}),
   635  		}},
   636  		BalancerConfig: config,
   637  	}); err != nil {
   638  		t.Fatalf("failed to update ClientConn state: %v", err)
   639  	}
   640  
   641  	scs := waitForNewSubConns(t, cc, 4)
   642  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   643  		"cluster_1": {addr1, addr2},
   644  		"cluster_2": {addr3, addr4},
   645  	})
   646  
   647  	// We expect two subConns on each subBalancer.
   648  	sc1 := scs["cluster_1"][0].sc
   649  	sc2 := scs["cluster_1"][1].sc
   650  	sc3 := scs["cluster_2"][0].sc
   651  	sc4 := scs["cluster_2"][1].sc
   652  
   653  	// Send state changes for all SubConns, and wait for the picker.
   654  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   655  	<-cc.NewPickerCh
   656  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   657  	<-cc.NewPickerCh
   658  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   659  	<-cc.NewPickerCh
   660  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   661  	<-cc.NewPickerCh
   662  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   663  	<-cc.NewPickerCh
   664  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   665  	<-cc.NewPickerCh
   666  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   667  	<-cc.NewPickerCh
   668  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   669  	p := <-cc.NewPickerCh
   670  
   671  	// Test roundrobin on the last picker. Twice the number of RPCs should be
   672  	// sent to cluster_1 when compared to cluster_2.
   673  	want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4}
   674  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   675  		t.Fatalf("want %v, got %v", want, err)
   676  	}
   677  }
   678  
   679  // TestWeightedTarget_ThreeSubBalancers_RemoveBalancer tests the case where we
   680  // have a weighted target balancer with three sub-balancers and we remove one of
   681  // the subBalancers.
   682  func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) {
   683  	cc := testutils.NewTestClientConn(t)
   684  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   685  	defer wtb.Close()
   686  
   687  	// Start with two subBalancers, one with twice the weight of the other.
   688  	config, err := wtbParser.ParseConfig([]byte(`
   689  {
   690    "targets": {
   691      "cluster_1": {
   692        "weight": 1,
   693        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   694      },
   695      "cluster_2": {
   696        "weight": 1,
   697        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   698      },
   699      "cluster_3": {
   700        "weight": 1,
   701        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   702      }
   703    }
   704  }`))
   705  	if err != nil {
   706  		t.Fatalf("failed to parse balancer config: %v", err)
   707  	}
   708  
   709  	// Send the config with one backend for each cluster.
   710  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   711  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   712  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   713  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   714  		ResolverState: resolver.State{Addresses: []resolver.Address{
   715  			hierarchy.Set(addr1, []string{"cluster_1"}),
   716  			hierarchy.Set(addr2, []string{"cluster_2"}),
   717  			hierarchy.Set(addr3, []string{"cluster_3"}),
   718  		}},
   719  		BalancerConfig: config,
   720  	}); err != nil {
   721  		t.Fatalf("failed to update ClientConn state: %v", err)
   722  	}
   723  
   724  	scs := waitForNewSubConns(t, cc, 3)
   725  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   726  		"cluster_1": {addr1},
   727  		"cluster_2": {addr2},
   728  		"cluster_3": {addr3},
   729  	})
   730  
   731  	// We expect one subConn on each subBalancer.
   732  	sc1 := scs["cluster_1"][0].sc
   733  	sc2 := scs["cluster_2"][0].sc
   734  	sc3 := scs["cluster_3"][0].sc
   735  
   736  	// Send state changes for all SubConns, and wait for the picker.
   737  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   738  	<-cc.NewPickerCh
   739  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   740  	<-cc.NewPickerCh
   741  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   742  	<-cc.NewPickerCh
   743  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   744  	<-cc.NewPickerCh
   745  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   746  	<-cc.NewPickerCh
   747  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   748  	p := <-cc.NewPickerCh
   749  
   750  	want := []balancer.SubConn{sc1, sc2, sc3}
   751  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   752  		t.Fatalf("want %v, got %v", want, err)
   753  	}
   754  
   755  	// Remove the second balancer, while the others two are ready.
   756  	config, err = wtbParser.ParseConfig([]byte(`
   757  {
   758    "targets": {
   759      "cluster_1": {
   760        "weight": 1,
   761        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   762      },
   763      "cluster_3": {
   764        "weight": 1,
   765        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   766      }
   767    }
   768  }`))
   769  	if err != nil {
   770  		t.Fatalf("failed to parse balancer config: %v", err)
   771  	}
   772  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   773  		ResolverState: resolver.State{Addresses: []resolver.Address{
   774  			hierarchy.Set(addr1, []string{"cluster_1"}),
   775  			hierarchy.Set(addr3, []string{"cluster_3"}),
   776  		}},
   777  		BalancerConfig: config,
   778  	}); err != nil {
   779  		t.Fatalf("failed to update ClientConn state: %v", err)
   780  	}
   781  
   782  	// Removing a subBalancer causes the weighted target LB policy to push a new
   783  	// picker which ensures that the removed subBalancer is not picked for RPCs.
   784  	p = <-cc.NewPickerCh
   785  
   786  	scRemoved := <-cc.RemoveSubConnCh
   787  	if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) {
   788  		t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scRemoved)
   789  	}
   790  	want = []balancer.SubConn{sc1, sc3}
   791  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   792  		t.Fatalf("want %v, got %v", want, err)
   793  	}
   794  
   795  	// Move balancer 3 into transient failure.
   796  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   797  	<-cc.NewPickerCh
   798  
   799  	// Remove the first balancer, while the third is transient failure.
   800  	config, err = wtbParser.ParseConfig([]byte(`
   801  {
   802    "targets": {
   803      "cluster_3": {
   804        "weight": 1,
   805        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   806      }
   807    }
   808  }`))
   809  	if err != nil {
   810  		t.Fatalf("failed to parse balancer config: %v", err)
   811  	}
   812  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   813  		ResolverState: resolver.State{Addresses: []resolver.Address{
   814  			hierarchy.Set(addr3, []string{"cluster_3"}),
   815  		}},
   816  		BalancerConfig: config,
   817  	}); err != nil {
   818  		t.Fatalf("failed to update ClientConn state: %v", err)
   819  	}
   820  
   821  	// Removing a subBalancer causes the weighted target LB policy to push a new
   822  	// picker which ensures that the removed subBalancer is not picked for RPCs.
   823  	p = <-cc.NewPickerCh
   824  
   825  	scRemoved = <-cc.RemoveSubConnCh
   826  	if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) {
   827  		t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved)
   828  	}
   829  	for i := 0; i < 5; i++ {
   830  		if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure {
   831  			t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err)
   832  		}
   833  	}
   834  }
   835  
   836  // TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends tests the case
   837  // where we have a weighted target balancer with two sub-balancers, and we
   838  // change the weight of these subBalancers.
   839  func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing.T) {
   840  	cc := testutils.NewTestClientConn(t)
   841  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   842  	defer wtb.Close()
   843  
   844  	// Start with two subBalancers, one with twice the weight of the other.
   845  	config, err := wtbParser.ParseConfig([]byte(`
   846  {
   847    "targets": {
   848      "cluster_1": {
   849        "weight": 2,
   850        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   851      },
   852      "cluster_2": {
   853        "weight": 1,
   854        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   855      }
   856    }
   857  }`))
   858  	if err != nil {
   859  		t.Fatalf("failed to parse balancer config: %v", err)
   860  	}
   861  
   862  	// Send the config with two backends for each cluster.
   863  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   864  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   865  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   866  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
   867  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   868  		ResolverState: resolver.State{Addresses: []resolver.Address{
   869  			hierarchy.Set(addr1, []string{"cluster_1"}),
   870  			hierarchy.Set(addr2, []string{"cluster_1"}),
   871  			hierarchy.Set(addr3, []string{"cluster_2"}),
   872  			hierarchy.Set(addr4, []string{"cluster_2"}),
   873  		}},
   874  		BalancerConfig: config,
   875  	}); err != nil {
   876  		t.Fatalf("failed to update ClientConn state: %v", err)
   877  	}
   878  
   879  	scs := waitForNewSubConns(t, cc, 4)
   880  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   881  		"cluster_1": {addr1, addr2},
   882  		"cluster_2": {addr3, addr4},
   883  	})
   884  
   885  	// We expect two subConns on each subBalancer.
   886  	sc1 := scs["cluster_1"][0].sc
   887  	sc2 := scs["cluster_1"][1].sc
   888  	sc3 := scs["cluster_2"][0].sc
   889  	sc4 := scs["cluster_2"][1].sc
   890  
   891  	// Send state changes for all SubConns, and wait for the picker.
   892  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   893  	<-cc.NewPickerCh
   894  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   895  	<-cc.NewPickerCh
   896  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   897  	<-cc.NewPickerCh
   898  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   899  	<-cc.NewPickerCh
   900  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   901  	<-cc.NewPickerCh
   902  	wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   903  	<-cc.NewPickerCh
   904  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   905  	<-cc.NewPickerCh
   906  	wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   907  	p := <-cc.NewPickerCh
   908  
   909  	// Test roundrobin on the last picker. Twice the number of RPCs should be
   910  	// sent to cluster_1 when compared to cluster_2.
   911  	want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4}
   912  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   913  		t.Fatalf("want %v, got %v", want, err)
   914  	}
   915  
   916  	// Change the weight of cluster_1.
   917  	config, err = wtbParser.ParseConfig([]byte(`
   918  {
   919    "targets": {
   920      "cluster_1": {
   921        "weight": 3,
   922        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   923      },
   924      "cluster_2": {
   925        "weight": 1,
   926        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   927      }
   928    }
   929  }`))
   930  	if err != nil {
   931  		t.Fatalf("failed to parse balancer config: %v", err)
   932  	}
   933  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   934  		ResolverState: resolver.State{Addresses: []resolver.Address{
   935  			hierarchy.Set(addr1, []string{"cluster_1"}),
   936  			hierarchy.Set(addr2, []string{"cluster_1"}),
   937  			hierarchy.Set(addr3, []string{"cluster_2"}),
   938  			hierarchy.Set(addr4, []string{"cluster_2"}),
   939  		}},
   940  		BalancerConfig: config,
   941  	}); err != nil {
   942  		t.Fatalf("failed to update ClientConn state: %v", err)
   943  	}
   944  
   945  	// Weight change causes a new picker to be pushed to the channel.
   946  	p = <-cc.NewPickerCh
   947  	want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4}
   948  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil {
   949  		t.Fatalf("want %v, got %v", want, err)
   950  	}
   951  }
   952  
   953  // TestWeightedTarget_InitOneSubBalancerTransientFailure tests that at init
   954  // time, with two sub-balancers, if one sub-balancer reports transient_failure,
   955  // the picks won't fail with transient_failure, and should instead wait for the
   956  // other sub-balancer.
   957  func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) {
   958  	cc := testutils.NewTestClientConn(t)
   959  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   960  	defer wtb.Close()
   961  
   962  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
   963  	config, err := wtbParser.ParseConfig([]byte(`
   964  {
   965    "targets": {
   966      "cluster_1": {
   967        "weight":1,
   968        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   969      },
   970      "cluster_2": {
   971        "weight":1,
   972        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   973      }
   974    }
   975  }`))
   976  	if err != nil {
   977  		t.Fatalf("failed to parse balancer config: %v", err)
   978  	}
   979  
   980  	// Send the config with one address for each cluster.
   981  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   982  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   983  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   984  		ResolverState: resolver.State{Addresses: []resolver.Address{
   985  			hierarchy.Set(addr1, []string{"cluster_1"}),
   986  			hierarchy.Set(addr2, []string{"cluster_2"}),
   987  		}},
   988  		BalancerConfig: config,
   989  	}); err != nil {
   990  		t.Fatalf("failed to update ClientConn state: %v", err)
   991  	}
   992  
   993  	scs := waitForNewSubConns(t, cc, 2)
   994  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   995  		"cluster_1": {addr1},
   996  		"cluster_2": {addr2},
   997  	})
   998  
   999  	// We expect a single subConn on each subBalancer.
  1000  	sc1 := scs["cluster_1"][0].sc
  1001  	_ = scs["cluster_2"][0].sc
  1002  
  1003  	// Set one subconn to TransientFailure, this will trigger one sub-balancer
  1004  	// to report transient failure.
  1005  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1006  
  1007  	p := <-cc.NewPickerCh
  1008  	for i := 0; i < 5; i++ {
  1009  		r, err := p.Pick(balancer.PickInfo{})
  1010  		if err != balancer.ErrNoSubConnAvailable {
  1011  			t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err)
  1012  		}
  1013  	}
  1014  }
  1015  
  1016  // Test that with two sub-balancers, both in transient_failure, if one turns
  1017  // connecting, the overall state stays in transient_failure, and all picks
  1018  // return transient failure error.
  1019  func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) {
  1020  	cc := testutils.NewTestClientConn(t)
  1021  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1022  	defer wtb.Close()
  1023  
  1024  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
  1025  	config, err := wtbParser.ParseConfig([]byte(`
  1026  {
  1027    "targets": {
  1028      "cluster_1": {
  1029        "weight":1,
  1030        "childPolicy": [{"test_config_balancer": "cluster_1"}]
  1031      },
  1032      "cluster_2": {
  1033        "weight":1,
  1034        "childPolicy": [{"test_config_balancer": "cluster_2"}]
  1035      }
  1036    }
  1037  }`))
  1038  	if err != nil {
  1039  		t.Fatalf("failed to parse balancer config: %v", err)
  1040  	}
  1041  
  1042  	// Send the config with one address for each cluster.
  1043  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
  1044  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
  1045  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1046  		ResolverState: resolver.State{Addresses: []resolver.Address{
  1047  			hierarchy.Set(addr1, []string{"cluster_1"}),
  1048  			hierarchy.Set(addr2, []string{"cluster_2"}),
  1049  		}},
  1050  		BalancerConfig: config,
  1051  	}); err != nil {
  1052  		t.Fatalf("failed to update ClientConn state: %v", err)
  1053  	}
  1054  
  1055  	scs := waitForNewSubConns(t, cc, 2)
  1056  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
  1057  		"cluster_1": {addr1},
  1058  		"cluster_2": {addr2},
  1059  	})
  1060  
  1061  	// We expect a single subConn on each subBalancer.
  1062  	sc1 := scs["cluster_1"][0].sc
  1063  	sc2 := scs["cluster_2"][0].sc
  1064  
  1065  	// Set both subconn to TransientFailure, this will put both sub-balancers in
  1066  	// transient failure.
  1067  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1068  	<-cc.NewPickerCh
  1069  	wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1070  	p := <-cc.NewPickerCh
  1071  
  1072  	for i := 0; i < 5; i++ {
  1073  		r, err := p.Pick(balancer.PickInfo{})
  1074  		if err != balancer.ErrTransientFailure {
  1075  			t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err)
  1076  		}
  1077  	}
  1078  
  1079  	// Set one subconn to Connecting, it shouldn't change the overall state.
  1080  	wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1081  	select {
  1082  	case <-time.After(100 * time.Millisecond):
  1083  	case <-cc.NewPickerCh:
  1084  		t.Fatal("received new picker from the LB policy when expecting none")
  1085  	}
  1086  
  1087  	for i := 0; i < 5; i++ {
  1088  		r, err := p.Pick(balancer.PickInfo{})
  1089  		if err != balancer.ErrTransientFailure {
  1090  			t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err)
  1091  		}
  1092  	}
  1093  }
  1094  
  1095  // Verify that a SubConn is created with the expected address and hierarchy
  1096  // path cleared.
  1097  func verifyAddressInNewSubConn(t *testing.T, cc *testutils.TestClientConn, addr resolver.Address) {
  1098  	t.Helper()
  1099  
  1100  	gotAddr := <-cc.NewSubConnAddrsCh
  1101  	wantAddr := []resolver.Address{hierarchy.Set(addr, []string{})}
  1102  	if diff := cmp.Diff(gotAddr, wantAddr, cmp.AllowUnexported(attributes.Attributes{})); diff != "" {
  1103  		t.Fatalf("got unexpected new subconn addrs: %v", diff)
  1104  	}
  1105  }
  1106  
  1107  // subConnWithAddr wraps a subConn and the address for which it was created.
  1108  type subConnWithAddr struct {
  1109  	sc   balancer.SubConn
  1110  	addr resolver.Address
  1111  }
  1112  
  1113  // waitForNewSubConns waits for `num` number of subConns to be created. This is
  1114  // expected to be used from tests using the "test_config_balancer" LB policy,
  1115  // which adds an address attribute with value set to the balancer config.
  1116  //
  1117  // Returned value is a map from subBalancer (identified by its config) to
  1118  // subConns created by it.
  1119  func waitForNewSubConns(t *testing.T, cc *testutils.TestClientConn, num int) map[string][]subConnWithAddr {
  1120  	t.Helper()
  1121  
  1122  	scs := make(map[string][]subConnWithAddr)
  1123  	for i := 0; i < num; i++ {
  1124  		addrs := <-cc.NewSubConnAddrsCh
  1125  		if len(addrs) != 1 {
  1126  			t.Fatalf("received subConns with %d addresses, want 1", len(addrs))
  1127  		}
  1128  		cfg, ok := getConfigKey(addrs[0].Attributes)
  1129  		if !ok {
  1130  			t.Fatalf("received subConn address %v contains no attribute for balancer config", addrs[0])
  1131  		}
  1132  		sc := <-cc.NewSubConnCh
  1133  		scWithAddr := subConnWithAddr{sc: sc, addr: addrs[0]}
  1134  		scs[cfg] = append(scs[cfg], scWithAddr)
  1135  	}
  1136  	return scs
  1137  }
  1138  
  1139  func verifySubConnAddrs(t *testing.T, scs map[string][]subConnWithAddr, wantSubConnAddrs map[string][]resolver.Address) {
  1140  	t.Helper()
  1141  
  1142  	if len(scs) != len(wantSubConnAddrs) {
  1143  		t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs)
  1144  	}
  1145  	for cfg, scsWithAddr := range scs {
  1146  		if len(scsWithAddr) != len(wantSubConnAddrs[cfg]) {
  1147  			t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs)
  1148  		}
  1149  		wantAddrs := wantSubConnAddrs[cfg]
  1150  		for i, scWithAddr := range scsWithAddr {
  1151  			if diff := cmp.Diff(wantAddrs[i].Addr, scWithAddr.addr.Addr); diff != "" {
  1152  				t.Fatalf("got unexpected new subconn addrs: %v", diff)
  1153  			}
  1154  		}
  1155  	}
  1156  }
  1157  
  1158  const initIdleBalancerName = "test-init-Idle-balancer"
  1159  
  1160  var errTestInitIdle = fmt.Errorf("init Idle balancer error 0")
  1161  
  1162  func init() {
  1163  	stub.Register(initIdleBalancerName, stub.BalancerFuncs{
  1164  		UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error {
  1165  			bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{})
  1166  			return nil
  1167  		},
  1168  		UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) {
  1169  			err := fmt.Errorf("wrong picker error")
  1170  			if state.ConnectivityState == connectivity.Idle {
  1171  				err = errTestInitIdle
  1172  			}
  1173  			bd.ClientConn.UpdateState(balancer.State{
  1174  				ConnectivityState: state.ConnectivityState,
  1175  				Picker:            &testutils.TestConstPicker{Err: err},
  1176  			})
  1177  		},
  1178  	})
  1179  }
  1180  
  1181  // TestInitialIdle covers the case that if the child reports Idle, the overall
  1182  // state will be Idle.
  1183  func (s) TestInitialIdle(t *testing.T) {
  1184  	cc := testutils.NewTestClientConn(t)
  1185  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1186  	defer wtb.Close()
  1187  
  1188  	config, err := wtbParser.ParseConfig([]byte(`
  1189  {
  1190    "targets": {
  1191      "cluster_1": {
  1192        "weight":1,
  1193        "childPolicy": [{"test-init-Idle-balancer": ""}]
  1194      }
  1195    }
  1196  }`))
  1197  	if err != nil {
  1198  		t.Fatalf("failed to parse balancer config: %v", err)
  1199  	}
  1200  
  1201  	// Send the config, and an address with hierarchy path ["cluster_1"].
  1202  	addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}}
  1203  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1204  		ResolverState:  resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}},
  1205  		BalancerConfig: config,
  1206  	}); err != nil {
  1207  		t.Fatalf("failed to update ClientConn state: %v", err)
  1208  	}
  1209  
  1210  	// Verify that a subconn is created with the address, and the hierarchy path
  1211  	// in the address is cleared.
  1212  	for range addrs {
  1213  		sc := <-cc.NewSubConnCh
  1214  		wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle})
  1215  	}
  1216  
  1217  	if state := <-cc.NewStateCh; state != connectivity.Idle {
  1218  		t.Fatalf("Received aggregated state: %v, want Idle", state)
  1219  	}
  1220  }