google.golang.org/grpc@v1.72.2/balancer/weightedtarget/weightedtarget_test.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package weightedtarget
    20  
    21  import (
    22  	"context"
    23  	"encoding/json"
    24  	"errors"
    25  	"fmt"
    26  	"strings"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/google/go-cmp/cmp"
    31  	"google.golang.org/grpc"
    32  	"google.golang.org/grpc/attributes"
    33  	"google.golang.org/grpc/balancer"
    34  	"google.golang.org/grpc/balancer/roundrobin"
    35  	"google.golang.org/grpc/codes"
    36  	"google.golang.org/grpc/connectivity"
    37  	"google.golang.org/grpc/credentials/insecure"
    38  	"google.golang.org/grpc/internal/balancer/stub"
    39  	"google.golang.org/grpc/internal/grpctest"
    40  	"google.golang.org/grpc/internal/hierarchy"
    41  	"google.golang.org/grpc/internal/testutils"
    42  	"google.golang.org/grpc/resolver"
    43  	"google.golang.org/grpc/serviceconfig"
    44  	"google.golang.org/grpc/status"
    45  
    46  	testgrpc "google.golang.org/grpc/interop/grpc_testing"
    47  	testpb "google.golang.org/grpc/interop/grpc_testing"
    48  )
    49  
    50  const (
    51  	defaultTestTimeout = 5 * time.Second
    52  )
    53  
    54  type s struct {
    55  	grpctest.Tester
    56  }
    57  
    58  func Test(t *testing.T) {
    59  	grpctest.RunSubTests(t, s{})
    60  }
    61  
    62  type testConfigBalancerBuilder struct {
    63  	balancer.Builder
    64  }
    65  
    66  func newTestConfigBalancerBuilder() *testConfigBalancerBuilder {
    67  	return &testConfigBalancerBuilder{
    68  		Builder: balancer.Get(roundrobin.Name),
    69  	}
    70  }
    71  
    72  // pickAndCheckError returns a function which takes a picker, invokes the Pick() method
    73  // multiple times and ensures that the error returned by the picker matches the provided error.
    74  func pickAndCheckError(want error) func(balancer.Picker) error {
    75  	const rpcCount = 5
    76  	return func(p balancer.Picker) error {
    77  		for i := 0; i < rpcCount; i++ {
    78  			if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), want.Error()) {
    79  				return fmt.Errorf("picker.Pick() returned error: %v, want: %v", err, want)
    80  			}
    81  		}
    82  		return nil
    83  	}
    84  }
    85  
    86  func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
    87  	rr := t.Builder.Build(cc, opts)
    88  	return &testConfigBalancer{
    89  		Balancer: rr,
    90  	}
    91  }
    92  
    93  const testConfigBalancerName = "test_config_balancer"
    94  
    95  func (t *testConfigBalancerBuilder) Name() string {
    96  	return testConfigBalancerName
    97  }
    98  
    99  type stringBalancerConfig struct {
   100  	serviceconfig.LoadBalancingConfig
   101  	configStr string
   102  }
   103  
   104  func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
   105  	var cfg string
   106  	if err := json.Unmarshal(c, &cfg); err != nil {
   107  		return nil, fmt.Errorf("failed to unmarshal config in %q: %v", testConfigBalancerName, err)
   108  	}
   109  	return stringBalancerConfig{configStr: cfg}, nil
   110  }
   111  
   112  // testConfigBalancer is a roundrobin balancer, but it takes the balancer config
   113  // string and adds it as an address attribute to the backend addresses.
   114  type testConfigBalancer struct {
   115  	balancer.Balancer
   116  }
   117  
   118  // configKey is the type used as the key to store balancer config in the
   119  // Attributes field of resolver.Address.
   120  type configKey struct{}
   121  
   122  func setConfigKey(addr resolver.Address, config string) resolver.Address {
   123  	addr.Attributes = addr.Attributes.WithValue(configKey{}, config)
   124  	return addr
   125  }
   126  
   127  func getConfigKey(attr *attributes.Attributes) (string, bool) {
   128  	v := attr.Value(configKey{})
   129  	name, ok := v.(string)
   130  	return name, ok
   131  }
   132  
   133  func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
   134  	c, ok := s.BalancerConfig.(stringBalancerConfig)
   135  	if !ok {
   136  		return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig)
   137  	}
   138  
   139  	for i, ep := range s.ResolverState.Endpoints {
   140  		addrsWithAttr := make([]resolver.Address, len(ep.Addresses))
   141  		for j, addr := range ep.Addresses {
   142  			addrsWithAttr[j] = setConfigKey(addr, c.configStr)
   143  		}
   144  		s.ResolverState.Endpoints[i].Addresses = addrsWithAttr
   145  	}
   146  	s.BalancerConfig = nil
   147  	return b.Balancer.UpdateClientConnState(s)
   148  }
   149  
   150  func (b *testConfigBalancer) Close() {
   151  	b.Balancer.Close()
   152  }
   153  
   154  var (
   155  	wtbBuilder          balancer.Builder
   156  	wtbParser           balancer.ConfigParser
   157  	testBackendAddrStrs []string
   158  )
   159  
   160  const testBackendAddrsCount = 12
   161  
   162  func init() {
   163  	balancer.Register(newTestConfigBalancerBuilder())
   164  	for i := 0; i < testBackendAddrsCount; i++ {
   165  		testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
   166  	}
   167  	wtbBuilder = balancer.Get(Name)
   168  	wtbParser = wtbBuilder.(balancer.ConfigParser)
   169  
   170  	NewRandomWRR = testutils.NewTestWRR
   171  }
   172  
   173  // Tests the behavior of the weighted_target LB policy when there are no targets
   174  // configured. It verifies that the LB policy sets the overall channel state to
   175  // TRANSIENT_FAILURE and fails RPCs with an expected status code and message.
   176  func (s) TestWeightedTarget_NoTargets(t *testing.T) {
   177  	dopts := []grpc.DialOption{
   178  		grpc.WithTransportCredentials(insecure.NewCredentials()),
   179  		grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"weighted_target_experimental":{}}]}`),
   180  	}
   181  	cc, err := grpc.NewClient("passthrough:///test.server", dopts...)
   182  	if err != nil {
   183  		t.Fatalf("grpc.NewClient() failed: %v", err)
   184  	}
   185  	defer cc.Close()
   186  	cc.Connect()
   187  
   188  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   189  	defer cancel()
   190  	client := testgrpc.NewTestServiceClient(cc)
   191  	_, err = client.EmptyCall(ctx, &testpb.Empty{})
   192  	if err == nil {
   193  		t.Error("EmptyCall() succeeded, want failure")
   194  	}
   195  	if gotCode, wantCode := status.Code(err), codes.Unavailable; gotCode != wantCode {
   196  		t.Errorf("EmptyCall() failed with code = %v, want %s", gotCode, wantCode)
   197  	}
   198  	if gotMsg, wantMsg := err.Error(), "no targets to pick from"; !strings.Contains(gotMsg, wantMsg) {
   199  		t.Errorf("EmptyCall() failed with message = %q, want to contain %q", gotMsg, wantMsg)
   200  	}
   201  	if gotState, wantState := cc.GetState(), connectivity.TransientFailure; gotState != wantState {
   202  		t.Errorf("cc.GetState() = %v, want %v", gotState, wantState)
   203  	}
   204  }
   205  
   206  // TestWeightedTarget covers the cases that a sub-balancer is added and a
   207  // sub-balancer is removed. It verifies that the addresses and balancer configs
   208  // are forwarded to the right sub-balancer. This test is intended to test the
   209  // glue code in weighted_target. It also tests an empty target config update,
   210  // which should trigger a transient failure state update.
   211  func (s) TestWeightedTarget(t *testing.T) {
   212  	cc := testutils.NewBalancerClientConn(t)
   213  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   214  	defer wtb.Close()
   215  
   216  	// Start with "cluster_1: round_robin".
   217  	config1, err := wtbParser.ParseConfig([]byte(`
   218  {
   219    "targets": {
   220      "cluster_1": {
   221        "weight":1,
   222        "childPolicy": [{"round_robin": ""}]
   223      }
   224    }
   225  }`))
   226  	if err != nil {
   227  		t.Fatalf("failed to parse balancer config: %v", err)
   228  	}
   229  
   230  	// Send the config, and an address with hierarchy path ["cluster_1"].
   231  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil}
   232  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   233  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   234  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   235  		}},
   236  		BalancerConfig: config1,
   237  	}); err != nil {
   238  		t.Fatalf("failed to update ClientConn state: %v", err)
   239  	}
   240  	verifyAddressInNewSubConn(t, cc, addr1)
   241  
   242  	// Send subconn state change.
   243  	sc1 := <-cc.NewSubConnCh
   244  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   245  	<-cc.NewPickerCh
   246  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   247  	p := <-cc.NewPickerCh
   248  
   249  	// Test pick with one backend.
   250  	for i := 0; i < 5; i++ {
   251  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   252  		if gotSCSt.SubConn != sc1 {
   253  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1)
   254  		}
   255  	}
   256  
   257  	// Remove cluster_1, and add "cluster_2: test_config_balancer". The
   258  	// test_config_balancer adds an address attribute whose value is set to the
   259  	// config that is passed to it.
   260  	config2, err := wtbParser.ParseConfig([]byte(`
   261  {
   262    "targets": {
   263      "cluster_2": {
   264         "weight":1,
   265         "childPolicy": [{"test_config_balancer": "cluster_2"}]
   266      }
   267    }
   268  }`))
   269  	if err != nil {
   270  		t.Fatalf("failed to parse balancer config: %v", err)
   271  	}
   272  
   273  	// Send the config, and one address with hierarchy path "cluster_2".
   274  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil}
   275  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   276  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   277  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}),
   278  		}},
   279  		BalancerConfig: config2,
   280  	}); err != nil {
   281  		t.Fatalf("failed to update ClientConn state: %v", err)
   282  	}
   283  
   284  	// Expect a new subConn from the test_config_balancer which has an address
   285  	// attribute set to the config that was passed to it.
   286  	verifyAddressInNewSubConn(t, cc, setConfigKey(addr2, "cluster_2"))
   287  
   288  	// The subconn for cluster_1 should be shut down.
   289  	scShutdown := <-cc.ShutdownSubConnCh
   290  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   291  	// are closed. Remove duplicate events.
   292  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   293  	// workaround once pickfirst is the only leaf policy and responsible for
   294  	// shutting down SubConns.
   295  	<-cc.ShutdownSubConnCh
   296  	if scShutdown != sc1 {
   297  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scShutdown)
   298  	}
   299  	scShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   300  
   301  	sc2 := <-cc.NewSubConnCh
   302  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   303  	<-cc.NewPickerCh
   304  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   305  	p = <-cc.NewPickerCh
   306  
   307  	// Test pick with one backend.
   308  	for i := 0; i < 5; i++ {
   309  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   310  		if gotSCSt.SubConn != sc2 {
   311  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2)
   312  		}
   313  	}
   314  
   315  	// Replace child policy of "cluster_1" to "round_robin".
   316  	config3, err := wtbParser.ParseConfig([]byte(`
   317  {
   318    "targets": {
   319      "cluster_2": {
   320        "weight":1,
   321        "childPolicy": [{"round_robin": ""}]
   322      }
   323    }
   324  }`))
   325  	if err != nil {
   326  		t.Fatalf("failed to parse balancer config: %v", err)
   327  	}
   328  
   329  	// Send the config, and an address with hierarchy path ["cluster_2"].
   330  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3], Attributes: nil}
   331  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   332  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   333  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}),
   334  		}},
   335  		BalancerConfig: config3,
   336  	}); err != nil {
   337  		t.Fatalf("failed to update ClientConn state: %v", err)
   338  	}
   339  	verifyAddressInNewSubConn(t, cc, addr3)
   340  
   341  	// The subconn from the test_config_balancer should be shut down.
   342  	scShutdown = <-cc.ShutdownSubConnCh
   343  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   344  	// are closed. Remove duplicate events.
   345  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   346  	// workaround once pickfirst is the only leaf policy and responsible for
   347  	// shutting down SubConns.
   348  	<-cc.ShutdownSubConnCh
   349  
   350  	if scShutdown != sc2 {
   351  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc2, scShutdown)
   352  	}
   353  	scShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   354  
   355  	// Send subconn state change.
   356  	sc3 := <-cc.NewSubConnCh
   357  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   358  	<-cc.NewPickerCh
   359  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   360  	p = <-cc.NewPickerCh
   361  
   362  	// Test pick with one backend.
   363  	for i := 0; i < 5; i++ {
   364  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   365  		if gotSCSt.SubConn != sc3 {
   366  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3)
   367  		}
   368  	}
   369  
   370  	// Update the Weighted Target Balancer with an empty address list and no
   371  	// targets. This should cause a Transient Failure State update to the Client
   372  	// Conn.
   373  	emptyConfig, err := wtbParser.ParseConfig([]byte(`{}`))
   374  	if err != nil {
   375  		t.Fatalf("Failed to parse balancer config: %v", err)
   376  	}
   377  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   378  		ResolverState:  resolver.State{},
   379  		BalancerConfig: emptyConfig,
   380  	}); err != nil {
   381  		t.Fatalf("Failed to update ClientConn state: %v", err)
   382  	}
   383  
   384  	state := <-cc.NewStateCh
   385  	if state != connectivity.TransientFailure {
   386  		t.Fatalf("Empty target update should have triggered a TF state update, got: %v", state)
   387  	}
   388  	p = <-cc.NewPickerCh
   389  	const wantErr = "no targets to pick from"
   390  	if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantErr) {
   391  		t.Fatalf("Pick() returned error: %v, want: %v", err, wantErr)
   392  	}
   393  }
   394  
   395  // TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we
   396  // have a weighted target balancer will one sub-balancer, and we add and remove
   397  // backends from the subBalancer.
   398  func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) {
   399  	cc := testutils.NewBalancerClientConn(t)
   400  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   401  	defer wtb.Close()
   402  
   403  	// Start with "cluster_1: round_robin".
   404  	config, err := wtbParser.ParseConfig([]byte(`
   405  {
   406    "targets": {
   407      "cluster_1": {
   408        "weight":1,
   409        "childPolicy": [{"round_robin": ""}]
   410      }
   411    }
   412  }`))
   413  	if err != nil {
   414  		t.Fatalf("failed to parse balancer config: %v", err)
   415  	}
   416  
   417  	// Send the config, and an address with hierarchy path ["cluster_1"].
   418  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   419  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   420  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   421  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   422  		}},
   423  		BalancerConfig: config,
   424  	}); err != nil {
   425  		t.Fatalf("failed to update ClientConn state: %v", err)
   426  	}
   427  	verifyAddressInNewSubConn(t, cc, addr1)
   428  
   429  	// Expect one SubConn, and move it to READY.
   430  	sc1 := <-cc.NewSubConnCh
   431  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   432  	<-cc.NewPickerCh
   433  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   434  	p := <-cc.NewPickerCh
   435  
   436  	// Test pick with one backend.
   437  	for i := 0; i < 5; i++ {
   438  		gotSCSt, _ := p.Pick(balancer.PickInfo{})
   439  		if gotSCSt.SubConn != sc1 {
   440  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1)
   441  		}
   442  	}
   443  
   444  	// Send two addresses.
   445  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   446  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   447  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   448  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   449  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
   450  		}},
   451  		BalancerConfig: config,
   452  	}); err != nil {
   453  		t.Fatalf("failed to update ClientConn state: %v", err)
   454  	}
   455  	verifyAddressInNewSubConn(t, cc, addr2)
   456  
   457  	// Expect one new SubConn, and move it to READY.
   458  	sc2 := <-cc.NewSubConnCh
   459  	// Update the SubConn to become READY.
   460  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   461  	<-cc.NewPickerCh
   462  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   463  	p = <-cc.NewPickerCh
   464  
   465  	// Test round robin pick.
   466  	want := []balancer.SubConn{sc1, sc2}
   467  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   468  		t.Fatalf("want %v, got %v", want, err)
   469  	}
   470  
   471  	// Remove the first address.
   472  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   473  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   474  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
   475  		}},
   476  		BalancerConfig: config,
   477  	}); err != nil {
   478  		t.Fatalf("failed to update ClientConn state: %v", err)
   479  	}
   480  
   481  	// Expect one SubConn to be shut down.
   482  	scShutdown := <-cc.ShutdownSubConnCh
   483  	if scShutdown != sc1 {
   484  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scShutdown)
   485  	}
   486  	scShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   487  	p = <-cc.NewPickerCh
   488  
   489  	// Test pick with only the second SubConn.
   490  	for i := 0; i < 5; i++ {
   491  		gotSC, _ := p.Pick(balancer.PickInfo{})
   492  		if gotSC.SubConn != sc2 {
   493  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2)
   494  		}
   495  	}
   496  }
   497  
   498  // TestWeightedTarget_TwoSubBalancers_OneBackend tests the case where we have a
   499  // weighted target balancer with two sub-balancers, each with one backend.
   500  func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) {
   501  	cc := testutils.NewBalancerClientConn(t)
   502  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   503  	defer wtb.Close()
   504  
   505  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
   506  	config, err := wtbParser.ParseConfig([]byte(`
   507  {
   508    "targets": {
   509      "cluster_1": {
   510        "weight":1,
   511        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   512      },
   513      "cluster_2": {
   514        "weight":1,
   515        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   516      }
   517    }
   518  }`))
   519  	if err != nil {
   520  		t.Fatalf("failed to parse balancer config: %v", err)
   521  	}
   522  
   523  	// Send the config with one address for each cluster.
   524  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   525  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   526  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   527  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   528  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   529  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}),
   530  		}},
   531  		BalancerConfig: config,
   532  	}); err != nil {
   533  		t.Fatalf("failed to update ClientConn state: %v", err)
   534  	}
   535  
   536  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   537  	defer cancel()
   538  	scs := waitForNewSubConns(ctx, t, cc, 2)
   539  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   540  		"cluster_1": {addr1},
   541  		"cluster_2": {addr2},
   542  	})
   543  
   544  	// We expect a single subConn on each subBalancer.
   545  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
   546  	sc2 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
   547  
   548  	// The CONNECTING picker should be sent by all leaf pickfirst policies on
   549  	// receiving the first resolver update.
   550  	<-cc.NewPickerCh
   551  	// Send state changes for both SubConns, and wait for the picker.
   552  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   553  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   554  	<-cc.NewPickerCh
   555  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   556  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   557  	p := <-cc.NewPickerCh
   558  
   559  	// Test roundrobin on the last picker.
   560  	want := []balancer.SubConn{sc1, sc2}
   561  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   562  		t.Fatalf("want %v, got %v", want, err)
   563  	}
   564  }
   565  
   566  // TestWeightedTarget_TwoSubBalancers_MoreBackends tests the case where we have
   567  // a weighted target balancer with two sub-balancers, each with more than one
   568  // backend.
   569  func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) {
   570  	cc := testutils.NewBalancerClientConn(t)
   571  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   572  	defer wtb.Close()
   573  
   574  	// Start with "cluster_1: round_robin, cluster_2: round_robin".
   575  	config, err := wtbParser.ParseConfig([]byte(`
   576  {
   577    "targets": {
   578      "cluster_1": {
   579        "weight":1,
   580        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   581      },
   582      "cluster_2": {
   583        "weight":1,
   584        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   585      }
   586    }
   587  }`))
   588  	if err != nil {
   589  		t.Fatalf("failed to parse balancer config: %v", err)
   590  	}
   591  
   592  	// Send the config with two backends for each cluster.
   593  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   594  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   595  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   596  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
   597  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   598  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   599  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   600  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
   601  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}),
   602  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}),
   603  		}},
   604  		BalancerConfig: config,
   605  	}); err != nil {
   606  		t.Fatalf("failed to update ClientConn state: %v", err)
   607  	}
   608  
   609  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   610  	defer cancel()
   611  	scs := waitForNewSubConns(ctx, t, cc, 4)
   612  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   613  		"cluster_1": {addr1, addr2},
   614  		"cluster_2": {addr3, addr4},
   615  	})
   616  
   617  	// We expect two subConns on each subBalancer.
   618  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
   619  	sc2 := scs["cluster_1"][1].sc.(*testutils.TestSubConn)
   620  	sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
   621  	sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn)
   622  
   623  	// The CONNECTING picker should be sent by all leaf pickfirst policies on
   624  	// receiving the first resolver update.
   625  	<-cc.NewPickerCh
   626  
   627  	// Send state changes for all SubConns, and wait for the picker.
   628  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   629  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   630  	<-cc.NewPickerCh
   631  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   632  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   633  	<-cc.NewPickerCh
   634  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   635  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   636  	<-cc.NewPickerCh
   637  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   638  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   639  	p := <-cc.NewPickerCh
   640  
   641  	// Test roundrobin on the last picker. RPCs should be sent equally to all
   642  	// backends.
   643  	want := []balancer.SubConn{sc1, sc2, sc3, sc4}
   644  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   645  		t.Fatalf("want %v, got %v", want, err)
   646  	}
   647  
   648  	// Turn sc2's connection down, should be RR between balancers.
   649  	wantSubConnErr := errors.New("subConn connection error")
   650  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   651  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   652  	sc2.UpdateState(balancer.SubConnState{
   653  		ConnectivityState: connectivity.TransientFailure,
   654  		ConnectionError:   wantSubConnErr,
   655  	})
   656  	p = <-cc.NewPickerCh
   657  	want = []balancer.SubConn{sc1, sc1, sc3, sc4}
   658  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   659  		t.Fatalf("want %v, got %v", want, err)
   660  	}
   661  
   662  	// Shut down subConn corresponding to addr3.
   663  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   664  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   665  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   666  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
   667  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}),
   668  		}},
   669  		BalancerConfig: config,
   670  	}); err != nil {
   671  		t.Fatalf("failed to update ClientConn state: %v", err)
   672  	}
   673  	scShutdown := <-cc.ShutdownSubConnCh
   674  	if scShutdown != sc3 {
   675  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc3, scShutdown)
   676  	}
   677  	scShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   678  	p = <-cc.NewPickerCh
   679  	want = []balancer.SubConn{sc1, sc4}
   680  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   681  		t.Fatalf("want %v, got %v", want, err)
   682  	}
   683  
   684  	// Turn sc1's connection down.
   685  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   686  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   687  	sc1.UpdateState(balancer.SubConnState{
   688  		ConnectivityState: connectivity.TransientFailure,
   689  		ConnectionError:   wantSubConnErr,
   690  	})
   691  	p = <-cc.NewPickerCh
   692  	want = []balancer.SubConn{sc4}
   693  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   694  		t.Fatalf("want %v, got %v", want, err)
   695  	}
   696  
   697  	// Turn last connection to connecting.
   698  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   699  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   700  	p = <-cc.NewPickerCh
   701  	for i := 0; i < 5; i++ {
   702  		if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable {
   703  			t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err)
   704  		}
   705  	}
   706  
   707  	// Turn all connections down.
   708  	sc4.UpdateState(balancer.SubConnState{
   709  		ConnectivityState: connectivity.TransientFailure,
   710  		ConnectionError:   wantSubConnErr,
   711  	})
   712  
   713  	if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil {
   714  		t.Fatal(err)
   715  	}
   716  }
   717  
   718  // TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends tests the
   719  // case where we have a weighted target balancer with two sub-balancers of
   720  // differing weights.
   721  func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *testing.T) {
   722  	cc := testutils.NewBalancerClientConn(t)
   723  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   724  	defer wtb.Close()
   725  
   726  	// Start with two subBalancers, one with twice the weight of the other.
   727  	config, err := wtbParser.ParseConfig([]byte(`
   728  {
   729    "targets": {
   730      "cluster_1": {
   731        "weight": 2,
   732        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   733      },
   734      "cluster_2": {
   735        "weight": 1,
   736        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   737      }
   738    }
   739  }`))
   740  	if err != nil {
   741  		t.Fatalf("failed to parse balancer config: %v", err)
   742  	}
   743  
   744  	// Send the config with two backends for each cluster.
   745  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   746  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   747  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   748  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
   749  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   750  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   751  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   752  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
   753  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}),
   754  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}),
   755  		}},
   756  		BalancerConfig: config,
   757  	}); err != nil {
   758  		t.Fatalf("failed to update ClientConn state: %v", err)
   759  	}
   760  
   761  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   762  	defer cancel()
   763  	scs := waitForNewSubConns(ctx, t, cc, 4)
   764  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   765  		"cluster_1": {addr1, addr2},
   766  		"cluster_2": {addr3, addr4},
   767  	})
   768  
   769  	// We expect two subConns on each subBalancer.
   770  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
   771  	sc2 := scs["cluster_1"][1].sc.(*testutils.TestSubConn)
   772  	sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
   773  	sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn)
   774  
   775  	// The CONNECTING picker should be sent by all leaf pickfirst policies on
   776  	// receiving the first resolver update.
   777  	<-cc.NewPickerCh
   778  
   779  	// Send state changes for all SubConns, and wait for the picker.
   780  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   781  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   782  	<-cc.NewPickerCh
   783  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   784  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   785  	<-cc.NewPickerCh
   786  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   787  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   788  	<-cc.NewPickerCh
   789  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   790  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   791  	p := <-cc.NewPickerCh
   792  
   793  	// Test roundrobin on the last picker. Twice the number of RPCs should be
   794  	// sent to cluster_1 when compared to cluster_2.
   795  	want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4}
   796  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   797  		t.Fatalf("want %v, got %v", want, err)
   798  	}
   799  }
   800  
   801  // TestWeightedTarget_ThreeSubBalancers_RemoveBalancer tests the case where we
   802  // have a weighted target balancer with three sub-balancers and we remove one of
   803  // the subBalancers.
   804  func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) {
   805  	cc := testutils.NewBalancerClientConn(t)
   806  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   807  	defer wtb.Close()
   808  
   809  	// Start with two subBalancers, one with twice the weight of the other.
   810  	config, err := wtbParser.ParseConfig([]byte(`
   811  {
   812    "targets": {
   813      "cluster_1": {
   814        "weight": 1,
   815        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   816      },
   817      "cluster_2": {
   818        "weight": 1,
   819        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   820      },
   821      "cluster_3": {
   822        "weight": 1,
   823        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   824      }
   825    }
   826  }`))
   827  	if err != nil {
   828  		t.Fatalf("failed to parse balancer config: %v", err)
   829  	}
   830  
   831  	// Send the config with one backend for each cluster.
   832  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
   833  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
   834  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
   835  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   836  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   837  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   838  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}),
   839  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}),
   840  		}},
   841  		BalancerConfig: config,
   842  	}); err != nil {
   843  		t.Fatalf("failed to update ClientConn state: %v", err)
   844  	}
   845  
   846  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   847  	defer cancel()
   848  	scs := waitForNewSubConns(ctx, t, cc, 3)
   849  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
   850  		"cluster_1": {addr1},
   851  		"cluster_2": {addr2},
   852  		"cluster_3": {addr3},
   853  	})
   854  
   855  	// We expect one subConn on each subBalancer.
   856  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
   857  	sc2 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
   858  	sc3 := scs["cluster_3"][0].sc.(*testutils.TestSubConn)
   859  
   860  	// Send state changes for all SubConns, and wait for the picker.
   861  	// The CONNECTING picker should be sent by all leaf pickfirst policies on
   862  	// receiving the first resolver update.
   863  	<-cc.NewPickerCh
   864  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   865  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   866  	<-cc.NewPickerCh
   867  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   868  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   869  	<-cc.NewPickerCh
   870  	<-sc3.ConnectCh
   871  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   872  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   873  	p := <-cc.NewPickerCh
   874  
   875  	want := []balancer.SubConn{sc1, sc2, sc3}
   876  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   877  		t.Fatalf("want %v, got %v", want, err)
   878  	}
   879  
   880  	// Remove the second balancer, while the others two are ready.
   881  	config, err = wtbParser.ParseConfig([]byte(`
   882  {
   883    "targets": {
   884      "cluster_1": {
   885        "weight": 1,
   886        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   887      },
   888      "cluster_3": {
   889        "weight": 1,
   890        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   891      }
   892    }
   893  }`))
   894  	if err != nil {
   895  		t.Fatalf("failed to parse balancer config: %v", err)
   896  	}
   897  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   898  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   899  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
   900  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}),
   901  		}},
   902  		BalancerConfig: config,
   903  	}); err != nil {
   904  		t.Fatalf("failed to update ClientConn state: %v", err)
   905  	}
   906  
   907  	// Removing a subBalancer causes the weighted target LB policy to push a new
   908  	// picker which ensures that the removed subBalancer is not picked for RPCs.
   909  	p = <-cc.NewPickerCh
   910  
   911  	scShutdown := <-cc.ShutdownSubConnCh
   912  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   913  	// are closed. Remove duplicate events.
   914  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   915  	// workaround once pickfirst is the only leaf policy and responsible for
   916  	// shutting down SubConns.
   917  	<-cc.ShutdownSubConnCh
   918  	if scShutdown != sc2 {
   919  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc2, scShutdown)
   920  	}
   921  	want = []balancer.SubConn{sc1, sc3}
   922  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
   923  		t.Fatalf("want %v, got %v", want, err)
   924  	}
   925  
   926  	// Move balancer 3 into transient failure.
   927  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   928  	<-sc3.ConnectCh
   929  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   930  	wantSubConnErr := errors.New("subConn connection error")
   931  	sc3.UpdateState(balancer.SubConnState{
   932  		ConnectivityState: connectivity.TransientFailure,
   933  		ConnectionError:   wantSubConnErr,
   934  	})
   935  	<-cc.NewPickerCh
   936  
   937  	// Remove the first balancer, while the third is transient failure.
   938  	config, err = wtbParser.ParseConfig([]byte(`
   939  {
   940    "targets": {
   941      "cluster_3": {
   942        "weight": 1,
   943        "childPolicy": [{"test_config_balancer": "cluster_3"}]
   944      }
   945    }
   946  }`))
   947  	if err != nil {
   948  		t.Fatalf("failed to parse balancer config: %v", err)
   949  	}
   950  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
   951  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   952  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}),
   953  		}},
   954  		BalancerConfig: config,
   955  	}); err != nil {
   956  		t.Fatalf("failed to update ClientConn state: %v", err)
   957  	}
   958  
   959  	// Removing a subBalancer causes the weighted target LB policy to push a new
   960  	// picker which ensures that the removed subBalancer is not picked for RPCs.
   961  	scShutdown = <-cc.ShutdownSubConnCh
   962  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   963  	// are closed. Remove duplicate events.
   964  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   965  	// workaround once pickfirst is the only leaf policy and responsible for
   966  	// shutting down SubConns.
   967  	<-cc.ShutdownSubConnCh
   968  	if scShutdown != sc1 {
   969  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scShutdown)
   970  	}
   971  
   972  	if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil {
   973  		t.Fatal(err)
   974  	}
   975  }
   976  
   977  // TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends tests the case
   978  // where we have a weighted target balancer with two sub-balancers, and we
   979  // change the weight of these subBalancers.
   980  func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing.T) {
   981  	cc := testutils.NewBalancerClientConn(t)
   982  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
   983  	defer wtb.Close()
   984  
   985  	// Start with two subBalancers, one with twice the weight of the other.
   986  	config, err := wtbParser.ParseConfig([]byte(`
   987  {
   988    "targets": {
   989      "cluster_1": {
   990        "weight": 2,
   991        "childPolicy": [{"test_config_balancer": "cluster_1"}]
   992      },
   993      "cluster_2": {
   994        "weight": 1,
   995        "childPolicy": [{"test_config_balancer": "cluster_2"}]
   996      }
   997    }
   998  }`))
   999  	if err != nil {
  1000  		t.Fatalf("failed to parse balancer config: %v", err)
  1001  	}
  1002  
  1003  	// Send the config with two backends for each cluster.
  1004  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
  1005  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
  1006  	addr3 := resolver.Address{Addr: testBackendAddrStrs[3]}
  1007  	addr4 := resolver.Address{Addr: testBackendAddrStrs[4]}
  1008  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1009  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1010  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
  1011  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
  1012  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}),
  1013  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}),
  1014  		}},
  1015  		BalancerConfig: config,
  1016  	}); err != nil {
  1017  		t.Fatalf("failed to update ClientConn state: %v", err)
  1018  	}
  1019  
  1020  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1021  	defer cancel()
  1022  	scs := waitForNewSubConns(ctx, t, cc, 4)
  1023  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
  1024  		"cluster_1": {addr1, addr2},
  1025  		"cluster_2": {addr3, addr4},
  1026  	})
  1027  
  1028  	// We expect two subConns on each subBalancer.
  1029  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
  1030  	sc2 := scs["cluster_1"][1].sc.(*testutils.TestSubConn)
  1031  	sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
  1032  	sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn)
  1033  
  1034  	// The CONNECTING picker should be sent by all leaf pickfirst policies on
  1035  	// receiving the first resolver update.
  1036  	<-cc.NewPickerCh
  1037  
  1038  	// Send state changes for all SubConns, and wait for the picker.
  1039  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1040  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1041  	<-cc.NewPickerCh
  1042  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1043  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1044  	<-cc.NewPickerCh
  1045  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1046  	sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1047  	<-cc.NewPickerCh
  1048  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1049  	sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1050  	p := <-cc.NewPickerCh
  1051  
  1052  	// Test roundrobin on the last picker. Twice the number of RPCs should be
  1053  	// sent to cluster_1 when compared to cluster_2.
  1054  	want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4}
  1055  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
  1056  		t.Fatalf("want %v, got %v", want, err)
  1057  	}
  1058  
  1059  	// Change the weight of cluster_1.
  1060  	config, err = wtbParser.ParseConfig([]byte(`
  1061  {
  1062    "targets": {
  1063      "cluster_1": {
  1064        "weight": 3,
  1065        "childPolicy": [{"test_config_balancer": "cluster_1"}]
  1066      },
  1067      "cluster_2": {
  1068        "weight": 1,
  1069        "childPolicy": [{"test_config_balancer": "cluster_2"}]
  1070      }
  1071    }
  1072  }`))
  1073  	if err != nil {
  1074  		t.Fatalf("failed to parse balancer config: %v", err)
  1075  	}
  1076  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1077  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1078  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
  1079  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}),
  1080  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}),
  1081  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}),
  1082  		}},
  1083  		BalancerConfig: config,
  1084  	}); err != nil {
  1085  		t.Fatalf("failed to update ClientConn state: %v", err)
  1086  	}
  1087  
  1088  	// Weight change causes a new picker to be pushed to the channel.
  1089  	p = <-cc.NewPickerCh
  1090  	want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4}
  1091  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil {
  1092  		t.Fatalf("want %v, got %v", want, err)
  1093  	}
  1094  }
  1095  
  1096  // TestWeightedTarget_InitOneSubBalancerTransientFailure tests that at init
  1097  // time, with two sub-balancers, if one sub-balancer reports transient_failure,
  1098  // the picks won't fail with transient_failure, and should instead wait for the
  1099  // other sub-balancer.
  1100  func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) {
  1101  	cc := testutils.NewBalancerClientConn(t)
  1102  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1103  	defer wtb.Close()
  1104  
  1105  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
  1106  	config, err := wtbParser.ParseConfig([]byte(`
  1107  {
  1108    "targets": {
  1109      "cluster_1": {
  1110        "weight":1,
  1111        "childPolicy": [{"test_config_balancer": "cluster_1"}]
  1112      },
  1113      "cluster_2": {
  1114        "weight":1,
  1115        "childPolicy": [{"test_config_balancer": "cluster_2"}]
  1116      }
  1117    }
  1118  }`))
  1119  	if err != nil {
  1120  		t.Fatalf("failed to parse balancer config: %v", err)
  1121  	}
  1122  
  1123  	// Send the config with one address for each cluster.
  1124  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
  1125  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
  1126  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1127  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1128  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}),
  1129  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}),
  1130  		}},
  1131  		BalancerConfig: config,
  1132  	}); err != nil {
  1133  		t.Fatalf("failed to update ClientConn state: %v", err)
  1134  	}
  1135  
  1136  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1137  	defer cancel()
  1138  	scs := waitForNewSubConns(ctx, t, cc, 2)
  1139  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
  1140  		"cluster_1": {addr1},
  1141  		"cluster_2": {addr2},
  1142  	})
  1143  
  1144  	// We expect a single subConn on each subBalancer.
  1145  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
  1146  	_ = scs["cluster_2"][0].sc
  1147  
  1148  	// Set one subconn to TransientFailure, this will trigger one sub-balancer
  1149  	// to report transient failure.
  1150  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1151  
  1152  	p := <-cc.NewPickerCh
  1153  	for i := 0; i < 5; i++ {
  1154  		r, err := p.Pick(balancer.PickInfo{})
  1155  		if err != balancer.ErrNoSubConnAvailable {
  1156  			t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err)
  1157  		}
  1158  	}
  1159  }
  1160  
  1161  // Test that with two sub-balancers, both in transient_failure, if one turns
  1162  // connecting, the overall state stays in transient_failure, and all picks
  1163  // return transient failure error.
  1164  func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) {
  1165  	cc := testutils.NewBalancerClientConn(t)
  1166  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1167  	defer wtb.Close()
  1168  
  1169  	// Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer".
  1170  	config, err := wtbParser.ParseConfig([]byte(`
  1171  {
  1172    "targets": {
  1173      "cluster_1": {
  1174        "weight":1,
  1175        "childPolicy": [{"test_config_balancer": "cluster_1"}]
  1176      },
  1177      "cluster_2": {
  1178        "weight":1,
  1179        "childPolicy": [{"test_config_balancer": "cluster_2"}]
  1180      }
  1181    }
  1182  }`))
  1183  	if err != nil {
  1184  		t.Fatalf("failed to parse balancer config: %v", err)
  1185  	}
  1186  
  1187  	// Send the config with one address for each cluster.
  1188  	addr1 := resolver.Address{Addr: testBackendAddrStrs[1]}
  1189  	addr2 := resolver.Address{Addr: testBackendAddrStrs[2]}
  1190  	ep1 := resolver.Endpoint{Addresses: []resolver.Address{addr1}}
  1191  	ep2 := resolver.Endpoint{Addresses: []resolver.Address{addr2}}
  1192  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1193  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1194  			hierarchy.SetInEndpoint(ep1, []string{"cluster_1"}),
  1195  			hierarchy.SetInEndpoint(ep2, []string{"cluster_2"}),
  1196  		}},
  1197  		BalancerConfig: config,
  1198  	}); err != nil {
  1199  		t.Fatalf("failed to update ClientConn state: %v", err)
  1200  	}
  1201  
  1202  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1203  	defer cancel()
  1204  	scs := waitForNewSubConns(ctx, t, cc, 2)
  1205  	verifySubConnAddrs(t, scs, map[string][]resolver.Address{
  1206  		"cluster_1": {addr1},
  1207  		"cluster_2": {addr2},
  1208  	})
  1209  
  1210  	// We expect a single subConn on each subBalancer.
  1211  	sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn)
  1212  	sc2 := scs["cluster_2"][0].sc.(*testutils.TestSubConn)
  1213  
  1214  	// Set both subconn to TransientFailure, this will put both sub-balancers in
  1215  	// transient failure.
  1216  	wantSubConnErr := errors.New("subConn connection error")
  1217  	sc1.UpdateState(balancer.SubConnState{
  1218  		ConnectivityState: connectivity.TransientFailure,
  1219  		ConnectionError:   wantSubConnErr,
  1220  	})
  1221  	<-cc.NewPickerCh
  1222  	sc2.UpdateState(balancer.SubConnState{
  1223  		ConnectivityState: connectivity.TransientFailure,
  1224  		ConnectionError:   wantSubConnErr,
  1225  	})
  1226  	p := <-cc.NewPickerCh
  1227  
  1228  	for i := 0; i < 5; i++ {
  1229  		if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) {
  1230  			t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr)
  1231  		}
  1232  	}
  1233  
  1234  	// Set one subconn to Connecting, it shouldn't change the overall state.
  1235  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1236  	select {
  1237  	case <-time.After(100 * time.Millisecond):
  1238  	case <-cc.NewPickerCh:
  1239  		t.Fatal("received new picker from the LB policy when expecting none")
  1240  	}
  1241  
  1242  	for i := 0; i < 5; i++ {
  1243  		if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) {
  1244  			t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr)
  1245  		}
  1246  	}
  1247  }
  1248  
  1249  // Verify that a SubConn is created with the expected address.
  1250  func verifyAddressInNewSubConn(t *testing.T, cc *testutils.BalancerClientConn, addr resolver.Address) {
  1251  	t.Helper()
  1252  
  1253  	gotAddr := <-cc.NewSubConnAddrsCh
  1254  	wantAddr := []resolver.Address{addr}
  1255  	gotAddr[0].BalancerAttributes = nil
  1256  	if diff := cmp.Diff(gotAddr, wantAddr, cmp.AllowUnexported(attributes.Attributes{})); diff != "" {
  1257  		t.Fatalf("got unexpected new subconn addrs: %v", diff)
  1258  	}
  1259  }
  1260  
  1261  // subConnWithAddr wraps a subConn and the address for which it was created.
  1262  type subConnWithAddr struct {
  1263  	sc   balancer.SubConn
  1264  	addr resolver.Address
  1265  }
  1266  
  1267  // waitForNewSubConns waits for `num` number of subConns to be created. This is
  1268  // expected to be used from tests using the "test_config_balancer" LB policy,
  1269  // which adds an address attribute with value set to the balancer config.
  1270  //
  1271  // Returned value is a map from subBalancer (identified by its config) to
  1272  // subConns created by it.
  1273  func waitForNewSubConns(ctx context.Context, t *testing.T, cc *testutils.BalancerClientConn, num int) map[string][]subConnWithAddr {
  1274  	t.Helper()
  1275  
  1276  	scs := make(map[string][]subConnWithAddr)
  1277  	for i := 0; i < num; i++ {
  1278  		var addrs []resolver.Address
  1279  		select {
  1280  		case <-ctx.Done():
  1281  			t.Fatalf("Timed out waiting for addresses for new SubConn.")
  1282  		case addrs = <-cc.NewSubConnAddrsCh:
  1283  		}
  1284  		if len(addrs) != 1 {
  1285  			t.Fatalf("received subConns with %d addresses, want 1", len(addrs))
  1286  		}
  1287  		cfg, ok := getConfigKey(addrs[0].Attributes)
  1288  		if !ok {
  1289  			t.Fatalf("received subConn address %v contains no attribute for balancer config", addrs[0])
  1290  		}
  1291  		var sc balancer.SubConn
  1292  		select {
  1293  		case <-ctx.Done():
  1294  			t.Fatalf("Timed out waiting for new SubConn.")
  1295  		case sc = <-cc.NewSubConnCh:
  1296  		}
  1297  		scWithAddr := subConnWithAddr{sc: sc, addr: addrs[0]}
  1298  		scs[cfg] = append(scs[cfg], scWithAddr)
  1299  	}
  1300  	return scs
  1301  }
  1302  
  1303  func verifySubConnAddrs(t *testing.T, scs map[string][]subConnWithAddr, wantSubConnAddrs map[string][]resolver.Address) {
  1304  	t.Helper()
  1305  
  1306  	if len(scs) != len(wantSubConnAddrs) {
  1307  		t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs)
  1308  	}
  1309  	for cfg, scsWithAddr := range scs {
  1310  		if len(scsWithAddr) != len(wantSubConnAddrs[cfg]) {
  1311  			t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs)
  1312  		}
  1313  		wantAddrs := wantSubConnAddrs[cfg]
  1314  		for i, scWithAddr := range scsWithAddr {
  1315  			if diff := cmp.Diff(wantAddrs[i].Addr, scWithAddr.addr.Addr); diff != "" {
  1316  				t.Fatalf("got unexpected new subconn addrs: %v", diff)
  1317  			}
  1318  		}
  1319  	}
  1320  }
  1321  
  1322  const initIdleBalancerName = "test-init-Idle-balancer"
  1323  
  1324  var errTestInitIdle = fmt.Errorf("init Idle balancer error 0")
  1325  
  1326  func init() {
  1327  	stub.Register(initIdleBalancerName, stub.BalancerFuncs{
  1328  		UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error {
  1329  			sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{
  1330  				StateListener: func(state balancer.SubConnState) {
  1331  					err := fmt.Errorf("wrong picker error")
  1332  					if state.ConnectivityState == connectivity.Idle {
  1333  						err = errTestInitIdle
  1334  					}
  1335  					bd.ClientConn.UpdateState(balancer.State{
  1336  						ConnectivityState: state.ConnectivityState,
  1337  						Picker:            &testutils.TestConstPicker{Err: err},
  1338  					})
  1339  				},
  1340  			})
  1341  			if err != nil {
  1342  				return err
  1343  			}
  1344  			sc.Connect()
  1345  			return nil
  1346  		},
  1347  	})
  1348  }
  1349  
  1350  // TestInitialIdle covers the case that if the child reports Idle, the overall
  1351  // state will be Idle.
  1352  func (s) TestInitialIdle(t *testing.T) {
  1353  	cc := testutils.NewBalancerClientConn(t)
  1354  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1355  	defer wtb.Close()
  1356  
  1357  	config, err := wtbParser.ParseConfig([]byte(`
  1358  {
  1359    "targets": {
  1360      "cluster_1": {
  1361        "weight":1,
  1362        "childPolicy": [{"test-init-Idle-balancer": ""}]
  1363      }
  1364    }
  1365  }`))
  1366  	if err != nil {
  1367  		t.Fatalf("failed to parse balancer config: %v", err)
  1368  	}
  1369  
  1370  	// Send the config, and an address with hierarchy path ["cluster_1"].
  1371  	addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}}
  1372  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1373  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1374  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addrs[0]}}, []string{"cds:cluster_1"}),
  1375  		}},
  1376  		BalancerConfig: config,
  1377  	}); err != nil {
  1378  		t.Fatalf("failed to update ClientConn state: %v", err)
  1379  	}
  1380  
  1381  	// Verify that a subconn is created with the address, and the hierarchy path
  1382  	// in the address is cleared.
  1383  	for range addrs {
  1384  		sc := <-cc.NewSubConnCh
  1385  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
  1386  	}
  1387  
  1388  	if state := <-cc.NewStateCh; state != connectivity.Idle {
  1389  		t.Fatalf("Received aggregated state: %v, want Idle", state)
  1390  	}
  1391  }
  1392  
  1393  // TestIgnoreSubBalancerStateTransitions covers the case that if the child reports a
  1394  // transition from TF to Connecting, the overall state will still be TF.
  1395  func (s) TestIgnoreSubBalancerStateTransitions(t *testing.T) {
  1396  	cc := &tcc{BalancerClientConn: testutils.NewBalancerClientConn(t)}
  1397  
  1398  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1399  	defer wtb.Close()
  1400  
  1401  	config, err := wtbParser.ParseConfig([]byte(`
  1402  {
  1403    "targets": {
  1404      "cluster_1": {
  1405        "weight":1,
  1406        "childPolicy": [{"round_robin": ""}]
  1407      }
  1408    }
  1409  }`))
  1410  	if err != nil {
  1411  		t.Fatalf("failed to parse balancer config: %v", err)
  1412  	}
  1413  
  1414  	// Send the config, and an address with hierarchy path ["cluster_1"].
  1415  	addr := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil}
  1416  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1417  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1418  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr}}, []string{"cluster_1"}),
  1419  		}},
  1420  		BalancerConfig: config,
  1421  	}); err != nil {
  1422  		t.Fatalf("failed to update ClientConn state: %v", err)
  1423  	}
  1424  
  1425  	sc := <-cc.NewSubConnCh
  1426  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1427  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1428  
  1429  	// Verify that the SubConnState update from TF to Connecting is ignored.
  1430  	if len(cc.states) != 2 || cc.states[0].ConnectivityState != connectivity.Connecting || cc.states[1].ConnectivityState != connectivity.TransientFailure {
  1431  		t.Fatalf("cc.states = %v; want [Connecting, TransientFailure]", cc.states)
  1432  	}
  1433  }
  1434  
  1435  // tcc wraps a testutils.TestClientConn but stores all state transitions in a
  1436  // slice.
  1437  type tcc struct {
  1438  	*testutils.BalancerClientConn
  1439  	states []balancer.State
  1440  }
  1441  
  1442  func (t *tcc) UpdateState(bs balancer.State) {
  1443  	t.states = append(t.states, bs)
  1444  	t.BalancerClientConn.UpdateState(bs)
  1445  }
  1446  
  1447  func (s) TestUpdateStatePauses(t *testing.T) {
  1448  	cc := &tcc{BalancerClientConn: testutils.NewBalancerClientConn(t)}
  1449  
  1450  	balFuncs := stub.BalancerFuncs{
  1451  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
  1452  			bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil})
  1453  			bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil})
  1454  			return nil
  1455  		},
  1456  	}
  1457  	stub.Register("update_state_balancer", balFuncs)
  1458  
  1459  	wtb := wtbBuilder.Build(cc, balancer.BuildOptions{})
  1460  	defer wtb.Close()
  1461  
  1462  	config, err := wtbParser.ParseConfig([]byte(`
  1463  {
  1464    "targets": {
  1465      "cluster_1": {
  1466        "weight":1,
  1467        "childPolicy": [{"update_state_balancer": ""}]
  1468      }
  1469    }
  1470  }`))
  1471  	if err != nil {
  1472  		t.Fatalf("failed to parse balancer config: %v", err)
  1473  	}
  1474  
  1475  	// Send the config, and an address with hierarchy path ["cluster_1"].
  1476  	addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}}
  1477  	if err := wtb.UpdateClientConnState(balancer.ClientConnState{
  1478  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
  1479  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addrs[0]}}, []string{"cds:cluster_1"}),
  1480  		}},
  1481  		BalancerConfig: config,
  1482  	}); err != nil {
  1483  		t.Fatalf("failed to update ClientConn state: %v", err)
  1484  	}
  1485  
  1486  	// Verify that the only state update is the second one called by the child.
  1487  	if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready {
  1488  		t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states)
  1489  	}
  1490  }