google.golang.org/grpc@v1.72.2/xds/internal/balancer/clustermanager/clustermanager_test.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package clustermanager
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/google/go-cmp/cmp"
    28  	"google.golang.org/grpc/balancer"
    29  	"google.golang.org/grpc/balancer/pickfirst"
    30  	"google.golang.org/grpc/codes"
    31  	"google.golang.org/grpc/connectivity"
    32  	"google.golang.org/grpc/credentials/insecure"
    33  	"google.golang.org/grpc/internal/balancer/stub"
    34  	"google.golang.org/grpc/internal/grpctest"
    35  	"google.golang.org/grpc/internal/hierarchy"
    36  	"google.golang.org/grpc/internal/testutils"
    37  	"google.golang.org/grpc/resolver"
    38  	"google.golang.org/grpc/status"
    39  )
    40  
    41  type s struct {
    42  	grpctest.Tester
    43  }
    44  
    45  func Test(t *testing.T) {
    46  	grpctest.RunSubTests(t, s{})
    47  }
    48  
    49  const (
    50  	defaultTestTimeout      = 5 * time.Second
    51  	defaultTestShortTimeout = 10 * time.Millisecond
    52  	testBackendAddrsCount   = 12
    53  )
    54  
    55  var testBackendAddrStrs []string
    56  
    57  func init() {
    58  	for i := 0; i < testBackendAddrsCount; i++ {
    59  		testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
    60  	}
    61  }
    62  
    63  func testPick(t *testing.T, p balancer.Picker, info balancer.PickInfo, wantSC balancer.SubConn, wantErr error) {
    64  	t.Helper()
    65  	for i := 0; i < 5; i++ {
    66  		gotSCSt, err := p.Pick(info)
    67  		if fmt.Sprint(err) != fmt.Sprint(wantErr) {
    68  			t.Fatalf("picker.Pick(%+v), got error %v, want %v", info, err, wantErr)
    69  		}
    70  		if gotSCSt.SubConn != wantSC {
    71  			t.Fatalf("picker.Pick(%+v), got %v, want SubConn=%v", info, gotSCSt, wantSC)
    72  		}
    73  	}
    74  }
    75  
    76  func TestClusterPicks(t *testing.T) {
    77  	cc := testutils.NewBalancerClientConn(t)
    78  	builder := balancer.Get(balancerName)
    79  	parser := builder.(balancer.ConfigParser)
    80  	bal := builder.Build(cc, balancer.BuildOptions{})
    81  
    82  	configJSON1 := `{
    83  "children": {
    84  	"cds:cluster_1":{ "childPolicy": [{"round_robin":""}] },
    85  	"cds:cluster_2":{ "childPolicy": [{"round_robin":""}] }
    86  }
    87  }`
    88  	config1, err := parser.ParseConfig([]byte(configJSON1))
    89  	if err != nil {
    90  		t.Fatalf("failed to parse balancer config: %v", err)
    91  	}
    92  
    93  	// Send the config, and an address with hierarchy path ["cluster_1"].
    94  	wantAddrs := []resolver.Address{
    95  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
    96  		{Addr: testBackendAddrStrs[1], BalancerAttributes: nil},
    97  	}
    98  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
    99  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   100  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   101  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}),
   102  		}},
   103  		BalancerConfig: config1,
   104  	}); err != nil {
   105  		t.Fatalf("failed to update ClientConn state: %v", err)
   106  	}
   107  
   108  	m1 := make(map[resolver.Address]balancer.SubConn)
   109  	// Verify that a subconn is created with the address, and the hierarchy path
   110  	// in the address is cleared.
   111  	for range wantAddrs {
   112  		addrs := <-cc.NewSubConnAddrsCh
   113  		if len(hierarchy.Get(addrs[0])) != 0 {
   114  			t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes)
   115  		}
   116  		sc := <-cc.NewSubConnCh
   117  		// Clear the attributes before adding to map.
   118  		addrs[0].BalancerAttributes = nil
   119  		m1[addrs[0]] = sc
   120  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   121  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   122  	}
   123  
   124  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   125  	defer cancel()
   126  	p1 := <-cc.NewPickerCh
   127  	for _, tt := range []struct {
   128  		pickInfo balancer.PickInfo
   129  		wantSC   balancer.SubConn
   130  		wantErr  error
   131  	}{
   132  		{
   133  			pickInfo: balancer.PickInfo{
   134  				Ctx: SetPickedCluster(ctx, "cds:cluster_1"),
   135  			},
   136  			wantSC: m1[wantAddrs[0]],
   137  		},
   138  		{
   139  			pickInfo: balancer.PickInfo{
   140  				Ctx: SetPickedCluster(ctx, "cds:cluster_2"),
   141  			},
   142  			wantSC: m1[wantAddrs[1]],
   143  		},
   144  		{
   145  			pickInfo: balancer.PickInfo{
   146  				Ctx: SetPickedCluster(ctx, "notacluster"),
   147  			},
   148  			wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "notacluster"`),
   149  		},
   150  	} {
   151  		testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
   152  	}
   153  }
   154  
   155  // TestConfigUpdateAddCluster covers the cases the balancer receives config
   156  // update with extra clusters.
   157  func TestConfigUpdateAddCluster(t *testing.T) {
   158  	cc := testutils.NewBalancerClientConn(t)
   159  	builder := balancer.Get(balancerName)
   160  	parser := builder.(balancer.ConfigParser)
   161  	bal := builder.Build(cc, balancer.BuildOptions{})
   162  
   163  	configJSON1 := `{
   164  "children": {
   165  	"cds:cluster_1":{ "childPolicy": [{"round_robin":""}] },
   166  	"cds:cluster_2":{ "childPolicy": [{"round_robin":""}] }
   167  }
   168  }`
   169  	config1, err := parser.ParseConfig([]byte(configJSON1))
   170  	if err != nil {
   171  		t.Fatalf("failed to parse balancer config: %v", err)
   172  	}
   173  
   174  	// Send the config, and an address with hierarchy path ["cluster_1"].
   175  	wantAddrs := []resolver.Address{
   176  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
   177  		{Addr: testBackendAddrStrs[1], BalancerAttributes: nil},
   178  	}
   179  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   180  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   181  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   182  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}),
   183  		}},
   184  		BalancerConfig: config1,
   185  	}); err != nil {
   186  		t.Fatalf("failed to update ClientConn state: %v", err)
   187  	}
   188  
   189  	m1 := make(map[resolver.Address]balancer.SubConn)
   190  	// Verify that a subconn is created with the address, and the hierarchy path
   191  	// in the address is cleared.
   192  	for range wantAddrs {
   193  		addrs := <-cc.NewSubConnAddrsCh
   194  		if len(hierarchy.Get(addrs[0])) != 0 {
   195  			t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes)
   196  		}
   197  		sc := <-cc.NewSubConnCh
   198  		// Clear the attributes before adding to map.
   199  		addrs[0].BalancerAttributes = nil
   200  		m1[addrs[0]] = sc
   201  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   202  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   203  	}
   204  
   205  	p1 := <-cc.NewPickerCh
   206  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   207  	defer cancel()
   208  	for _, tt := range []struct {
   209  		pickInfo balancer.PickInfo
   210  		wantSC   balancer.SubConn
   211  		wantErr  error
   212  	}{
   213  		{
   214  			pickInfo: balancer.PickInfo{
   215  				Ctx: SetPickedCluster(ctx, "cds:cluster_1"),
   216  			},
   217  			wantSC: m1[wantAddrs[0]],
   218  		},
   219  		{
   220  			pickInfo: balancer.PickInfo{
   221  				Ctx: SetPickedCluster(ctx, "cds:cluster_2"),
   222  			},
   223  			wantSC: m1[wantAddrs[1]],
   224  		},
   225  		{
   226  			pickInfo: balancer.PickInfo{
   227  				Ctx: SetPickedCluster(ctx, "cds:notacluster"),
   228  			},
   229  			wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
   230  		},
   231  	} {
   232  		testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
   233  	}
   234  
   235  	// A config update with different routes, and different actions. Expect a
   236  	// new subconn and a picker update.
   237  	configJSON2 := `{
   238  "children": {
   239  	"cds:cluster_1":{ "childPolicy": [{"round_robin":""}] },
   240  	"cds:cluster_2":{ "childPolicy": [{"round_robin":""}] },
   241  	"cds:cluster_3":{ "childPolicy": [{"round_robin":""}] }
   242  }
   243  }`
   244  	config2, err := parser.ParseConfig([]byte(configJSON2))
   245  	if err != nil {
   246  		t.Fatalf("failed to parse balancer config: %v", err)
   247  	}
   248  	wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], BalancerAttributes: nil})
   249  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   250  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   251  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   252  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}),
   253  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[2]}}, []string{"cds:cluster_3"}),
   254  		}},
   255  		BalancerConfig: config2,
   256  	}); err != nil {
   257  		t.Fatalf("failed to update ClientConn state: %v", err)
   258  	}
   259  
   260  	// Expect exactly one new subconn.
   261  	addrs := <-cc.NewSubConnAddrsCh
   262  	if len(hierarchy.Get(addrs[0])) != 0 {
   263  		t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes)
   264  	}
   265  	sc := <-cc.NewSubConnCh
   266  	// Clear the attributes before adding to map.
   267  	addrs[0].BalancerAttributes = nil
   268  	m1[addrs[0]] = sc
   269  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   270  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   271  
   272  	// Should have no more newSubConn.
   273  	select {
   274  	case <-time.After(time.Millisecond * 500):
   275  	case <-cc.NewSubConnCh:
   276  		addrs := <-cc.NewSubConnAddrsCh
   277  		t.Fatalf("unexpected NewSubConn with address %v", addrs)
   278  	}
   279  
   280  	p2 := <-cc.NewPickerCh
   281  	for _, tt := range []struct {
   282  		pickInfo balancer.PickInfo
   283  		wantSC   balancer.SubConn
   284  		wantErr  error
   285  	}{
   286  		{
   287  			pickInfo: balancer.PickInfo{
   288  				Ctx: SetPickedCluster(ctx, "cds:cluster_1"),
   289  			},
   290  			wantSC: m1[wantAddrs[0]],
   291  		},
   292  		{
   293  			pickInfo: balancer.PickInfo{
   294  				Ctx: SetPickedCluster(ctx, "cds:cluster_2"),
   295  			},
   296  			wantSC: m1[wantAddrs[1]],
   297  		},
   298  		{
   299  			pickInfo: balancer.PickInfo{
   300  				Ctx: SetPickedCluster(ctx, "cds:cluster_3"),
   301  			},
   302  			wantSC: m1[wantAddrs[2]],
   303  		},
   304  		{
   305  			pickInfo: balancer.PickInfo{
   306  				Ctx: SetPickedCluster(ctx, "cds:notacluster"),
   307  			},
   308  			wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
   309  		},
   310  	} {
   311  		testPick(t, p2, tt.pickInfo, tt.wantSC, tt.wantErr)
   312  	}
   313  }
   314  
   315  // TestRoutingConfigUpdateDeleteAll covers the cases the balancer receives
   316  // config update with no clusters. Pick should fail with details in error.
   317  func TestRoutingConfigUpdateDeleteAll(t *testing.T) {
   318  	cc := testutils.NewBalancerClientConn(t)
   319  	builder := balancer.Get(balancerName)
   320  	parser := builder.(balancer.ConfigParser)
   321  	bal := builder.Build(cc, balancer.BuildOptions{})
   322  
   323  	configJSON1 := `{
   324  "children": {
   325  	"cds:cluster_1":{ "childPolicy": [{"round_robin":""}] },
   326  	"cds:cluster_2":{ "childPolicy": [{"round_robin":""}] }
   327  }
   328  }`
   329  	config1, err := parser.ParseConfig([]byte(configJSON1))
   330  	if err != nil {
   331  		t.Fatalf("failed to parse balancer config: %v", err)
   332  	}
   333  
   334  	// Send the config, and an address with hierarchy path ["cluster_1"].
   335  	wantAddrs := []resolver.Address{
   336  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
   337  		{Addr: testBackendAddrStrs[1], BalancerAttributes: nil},
   338  	}
   339  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   340  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   341  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   342  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}),
   343  		}},
   344  		BalancerConfig: config1,
   345  	}); err != nil {
   346  		t.Fatalf("failed to update ClientConn state: %v", err)
   347  	}
   348  
   349  	m1 := make(map[resolver.Address]balancer.SubConn)
   350  	// Verify that a subconn is created with the address, and the hierarchy path
   351  	// in the address is cleared.
   352  	for range wantAddrs {
   353  		addrs := <-cc.NewSubConnAddrsCh
   354  		if len(hierarchy.Get(addrs[0])) != 0 {
   355  			t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes)
   356  		}
   357  		sc := <-cc.NewSubConnCh
   358  		// Clear the attributes before adding to map.
   359  		addrs[0].BalancerAttributes = nil
   360  		m1[addrs[0]] = sc
   361  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   362  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   363  	}
   364  
   365  	p1 := <-cc.NewPickerCh
   366  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   367  	defer cancel()
   368  	for _, tt := range []struct {
   369  		pickInfo balancer.PickInfo
   370  		wantSC   balancer.SubConn
   371  		wantErr  error
   372  	}{
   373  		{
   374  			pickInfo: balancer.PickInfo{
   375  				Ctx: SetPickedCluster(ctx, "cds:cluster_1"),
   376  			},
   377  			wantSC: m1[wantAddrs[0]],
   378  		},
   379  		{
   380  			pickInfo: balancer.PickInfo{
   381  				Ctx: SetPickedCluster(ctx, "cds:cluster_2"),
   382  			},
   383  			wantSC: m1[wantAddrs[1]],
   384  		},
   385  		{
   386  			pickInfo: balancer.PickInfo{
   387  				Ctx: SetPickedCluster(ctx, "cds:notacluster"),
   388  			},
   389  			wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
   390  		},
   391  	} {
   392  		testPick(t, p1, tt.pickInfo, tt.wantSC, tt.wantErr)
   393  	}
   394  
   395  	// A config update with no clusters.
   396  	configJSON2 := `{}`
   397  	config2, err := parser.ParseConfig([]byte(configJSON2))
   398  	if err != nil {
   399  		t.Fatalf("failed to parse balancer config: %v", err)
   400  	}
   401  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   402  		BalancerConfig: config2,
   403  	}); err != nil {
   404  		t.Fatalf("failed to update ClientConn state: %v", err)
   405  	}
   406  
   407  	// Expect two removed subconns.
   408  	for range wantAddrs {
   409  		select {
   410  		case <-time.After(time.Millisecond * 500):
   411  			t.Fatalf("timeout waiting for remove subconn")
   412  		case <-cc.ShutdownSubConnCh:
   413  		}
   414  	}
   415  
   416  	p2 := <-cc.NewPickerCh
   417  	for i := 0; i < 5; i++ {
   418  		gotSCSt, err := p2.Pick(balancer.PickInfo{Ctx: SetPickedCluster(ctx, "cds:notacluster")})
   419  		if fmt.Sprint(err) != status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`).Error() {
   420  			t.Fatalf("picker.Pick, got %v, %v, want error %v", gotSCSt, err, `unknown cluster selected for RPC: "cds:notacluster"`)
   421  		}
   422  	}
   423  
   424  	// Resend the previous config with clusters
   425  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   426  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   427  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   428  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}),
   429  		}},
   430  		BalancerConfig: config1,
   431  	}); err != nil {
   432  		t.Fatalf("failed to update ClientConn state: %v", err)
   433  	}
   434  
   435  	m2 := make(map[resolver.Address]balancer.SubConn)
   436  	// Verify that a subconn is created with the address, and the hierarchy path
   437  	// in the address is cleared.
   438  	for range wantAddrs {
   439  		addrs := <-cc.NewSubConnAddrsCh
   440  		if len(hierarchy.Get(addrs[0])) != 0 {
   441  			t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes)
   442  		}
   443  		sc := <-cc.NewSubConnCh
   444  		// Clear the attributes before adding to map.
   445  		addrs[0].BalancerAttributes = nil
   446  		m2[addrs[0]] = sc
   447  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   448  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   449  	}
   450  
   451  	p3 := <-cc.NewPickerCh
   452  	for _, tt := range []struct {
   453  		pickInfo balancer.PickInfo
   454  		wantSC   balancer.SubConn
   455  		wantErr  error
   456  	}{
   457  		{
   458  			pickInfo: balancer.PickInfo{
   459  				Ctx: SetPickedCluster(ctx, "cds:cluster_1"),
   460  			},
   461  			wantSC: m2[wantAddrs[0]],
   462  		},
   463  		{
   464  			pickInfo: balancer.PickInfo{
   465  				Ctx: SetPickedCluster(ctx, "cds:cluster_2"),
   466  			},
   467  			wantSC: m2[wantAddrs[1]],
   468  		},
   469  		{
   470  			pickInfo: balancer.PickInfo{
   471  				Ctx: SetPickedCluster(ctx, "cds:notacluster"),
   472  			},
   473  			wantErr: status.Errorf(codes.Unavailable, `unknown cluster selected for RPC: "cds:notacluster"`),
   474  		},
   475  	} {
   476  		testPick(t, p3, tt.pickInfo, tt.wantSC, tt.wantErr)
   477  	}
   478  }
   479  
   480  func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) {
   481  	const (
   482  		userAgent          = "ua"
   483  		defaultTestTimeout = 1 * time.Second
   484  	)
   485  
   486  	// Setup the stub balancer such that we can read the build options passed to
   487  	// it in the UpdateClientConnState method.
   488  	ccsCh := testutils.NewChannel()
   489  	bOpts := balancer.BuildOptions{
   490  		DialCreds:       insecure.NewCredentials(),
   491  		CustomUserAgent: userAgent,
   492  	}
   493  	stub.Register(t.Name(), stub.BalancerFuncs{
   494  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
   495  			if !cmp.Equal(bd.BuildOptions, bOpts) {
   496  				err := fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts)
   497  				ccsCh.Send(err)
   498  				return err
   499  			}
   500  			ccsCh.Send(nil)
   501  			return nil
   502  		},
   503  	})
   504  
   505  	cc := testutils.NewBalancerClientConn(t)
   506  	builder := balancer.Get(balancerName)
   507  	parser := builder.(balancer.ConfigParser)
   508  	bal := builder.Build(cc, bOpts)
   509  
   510  	configJSON1 := fmt.Sprintf(`{
   511  "children": {
   512  	"cds:cluster_1":{ "childPolicy": [{"%s":""}] }
   513  }
   514  }`, t.Name())
   515  	config1, err := parser.ParseConfig([]byte(configJSON1))
   516  	if err != nil {
   517  		t.Fatalf("failed to parse balancer config: %v", err)
   518  	}
   519  
   520  	if err := bal.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: config1}); err != nil {
   521  		t.Fatalf("failed to update ClientConn state: %v", err)
   522  	}
   523  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   524  	defer cancel()
   525  	v, err := ccsCh.Receive(ctx)
   526  	if err != nil {
   527  		t.Fatalf("timed out waiting for UpdateClientConnState result: %v", err)
   528  	}
   529  	if v != nil {
   530  		t.Fatal(v)
   531  	}
   532  }
   533  
   534  const initIdleBalancerName = "test-init-Idle-balancer"
   535  
   536  var errTestInitIdle = fmt.Errorf("init Idle balancer error 0")
   537  
   538  func init() {
   539  	stub.Register(initIdleBalancerName, stub.BalancerFuncs{
   540  		UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error {
   541  			sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{
   542  				StateListener: func(state balancer.SubConnState) {
   543  					err := fmt.Errorf("wrong picker error")
   544  					if state.ConnectivityState == connectivity.Idle {
   545  						err = errTestInitIdle
   546  					}
   547  					bd.ClientConn.UpdateState(balancer.State{
   548  						ConnectivityState: state.ConnectivityState,
   549  						Picker:            &testutils.TestConstPicker{Err: err},
   550  					})
   551  				},
   552  			})
   553  			if err != nil {
   554  				return err
   555  			}
   556  			sc.Connect()
   557  			return nil
   558  		},
   559  	})
   560  }
   561  
   562  // TestInitialIdle covers the case that if the child reports Idle, the overall
   563  // state will be Idle.
   564  func TestInitialIdle(t *testing.T) {
   565  	cc := testutils.NewBalancerClientConn(t)
   566  	builder := balancer.Get(balancerName)
   567  	parser := builder.(balancer.ConfigParser)
   568  	bal := builder.Build(cc, balancer.BuildOptions{})
   569  
   570  	configJSON1 := `{
   571  "children": {
   572  	"cds:cluster_1":{ "childPolicy": [{"test-init-Idle-balancer":""}] }
   573  }
   574  }`
   575  	config1, err := parser.ParseConfig([]byte(configJSON1))
   576  	if err != nil {
   577  		t.Fatalf("failed to parse balancer config: %v", err)
   578  	}
   579  
   580  	// Send the config, and an address with hierarchy path ["cluster_1"].
   581  	wantAddrs := []resolver.Address{
   582  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
   583  	}
   584  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   585  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   586  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   587  		}},
   588  		BalancerConfig: config1,
   589  	}); err != nil {
   590  		t.Fatalf("failed to update ClientConn state: %v", err)
   591  	}
   592  
   593  	// Verify that a subconn is created with the address, and the hierarchy path
   594  	// in the address is cleared.
   595  	for range wantAddrs {
   596  		sc := <-cc.NewSubConnCh
   597  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   598  	}
   599  
   600  	if state1 := <-cc.NewStateCh; state1 != connectivity.Idle {
   601  		t.Fatalf("Received aggregated state: %v, want Idle", state1)
   602  	}
   603  }
   604  
   605  // TestClusterGracefulSwitch tests the graceful switch functionality for a child
   606  // of the cluster manager. At first, the child is configured as a round robin
   607  // load balancer, and thus should behave accordingly. The test then gracefully
   608  // switches this child to a pick first load balancer. Once that balancer updates
   609  // it's state and completes the graceful switch process the new picker should
   610  // reflect this change.
   611  func TestClusterGracefulSwitch(t *testing.T) {
   612  	cc := testutils.NewBalancerClientConn(t)
   613  	builder := balancer.Get(balancerName)
   614  	parser := builder.(balancer.ConfigParser)
   615  	bal := builder.Build(cc, balancer.BuildOptions{})
   616  	defer bal.Close()
   617  
   618  	configJSON1 := `{
   619  "children": {
   620  	"csp:cluster":{ "childPolicy": [{"round_robin":""}] }
   621  }
   622  }`
   623  	config1, err := parser.ParseConfig([]byte(configJSON1))
   624  	if err != nil {
   625  		t.Fatalf("failed to parse balancer config: %v", err)
   626  	}
   627  	wantAddrs := []resolver.Address{
   628  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
   629  		{Addr: testBackendAddrStrs[1], BalancerAttributes: nil},
   630  	}
   631  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   632  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   633  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"csp:cluster"}),
   634  		}},
   635  		BalancerConfig: config1,
   636  	}); err != nil {
   637  		t.Fatalf("failed to update ClientConn state: %v", err)
   638  	}
   639  
   640  	sc1 := <-cc.NewSubConnCh
   641  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   642  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   643  	p1 := <-cc.NewPickerCh
   644  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   645  	defer cancel()
   646  	pi := balancer.PickInfo{
   647  		Ctx: SetPickedCluster(ctx, "csp:cluster"),
   648  	}
   649  	testPick(t, p1, pi, sc1, nil)
   650  
   651  	childPolicyName := t.Name()
   652  	stub.Register(childPolicyName, stub.BalancerFuncs{
   653  		Init: func(bd *stub.BalancerData) {
   654  			bd.Data = balancer.Get(pickfirst.Name).Build(bd.ClientConn, bd.BuildOptions)
   655  		},
   656  		Close: func(bd *stub.BalancerData) {
   657  			bd.Data.(balancer.Balancer).Close()
   658  		},
   659  		UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error {
   660  			bal := bd.Data.(balancer.Balancer)
   661  			return bal.UpdateClientConnState(ccs)
   662  		},
   663  	})
   664  	// Same cluster, different balancer type.
   665  	configJSON2 := fmt.Sprintf(`{
   666  "children": {
   667  	"csp:cluster":{ "childPolicy": [{"%s":""}] }
   668  }
   669  }`, childPolicyName)
   670  	config2, err := parser.ParseConfig([]byte(configJSON2))
   671  	if err != nil {
   672  		t.Fatalf("failed to parse balancer config: %v", err)
   673  	}
   674  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   675  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   676  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"csp:cluster"}),
   677  		}},
   678  		BalancerConfig: config2,
   679  	}); err != nil {
   680  		t.Fatalf("failed to update ClientConn state: %v", err)
   681  	}
   682  	sc2 := <-cc.NewSubConnCh
   683  	// Update the pick first balancers SubConn as CONNECTING. This will cause
   684  	// the pick first balancer to UpdateState() with CONNECTING, which shouldn't send
   685  	// a Picker update back, as the Graceful Switch process is not complete.
   686  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   687  	select {
   688  	case <-cc.NewPickerCh:
   689  		t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing")
   690  	case <-ctx.Done():
   691  	}
   692  
   693  	// Update the pick first balancers SubConn as READY. This will cause
   694  	// the pick first balancer to UpdateState() with READY, which should send a
   695  	// Picker update back, as the Graceful Switch process is complete. This
   696  	// Picker should always pick the pick first's created SubConn.
   697  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   698  	p2 := <-cc.NewPickerCh
   699  	testPick(t, p2, pi, sc2, nil)
   700  	// The Graceful Switch process completing for the child should cause the
   701  	// SubConns for the balancer being gracefully switched from to get deleted.
   702  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
   703  	defer cancel()
   704  	select {
   705  	case <-ctx.Done():
   706  		t.Fatalf("error waiting for sc.Shutdown()")
   707  	case rsc := <-cc.ShutdownSubConnCh:
   708  		// The SubConn removed should have been the created SubConn
   709  		// from the child before switching.
   710  		if rsc != sc1 {
   711  			t.Fatalf("Shutdown() got: %v, want %v", rsc, sc1)
   712  		}
   713  	}
   714  }
   715  
   716  // tcc wraps a testutils.TestClientConn but stores all state transitions in a
   717  // slice.
   718  type tcc struct {
   719  	*testutils.BalancerClientConn
   720  	states []balancer.State
   721  }
   722  
   723  func (t *tcc) UpdateState(bs balancer.State) {
   724  	t.states = append(t.states, bs)
   725  	t.BalancerClientConn.UpdateState(bs)
   726  }
   727  
   728  func (s) TestUpdateStatePauses(t *testing.T) {
   729  	cc := &tcc{BalancerClientConn: testutils.NewBalancerClientConn(t)}
   730  
   731  	balFuncs := stub.BalancerFuncs{
   732  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
   733  			bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil})
   734  			bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil})
   735  			return nil
   736  		},
   737  	}
   738  	stub.Register("update_state_balancer", balFuncs)
   739  
   740  	builder := balancer.Get(balancerName)
   741  	parser := builder.(balancer.ConfigParser)
   742  	bal := builder.Build(cc, balancer.BuildOptions{})
   743  	defer bal.Close()
   744  
   745  	configJSON1 := `{
   746  "children": {
   747  	"cds:cluster_1":{ "childPolicy": [{"update_state_balancer":""}] }
   748  }
   749  }`
   750  	config1, err := parser.ParseConfig([]byte(configJSON1))
   751  	if err != nil {
   752  		t.Fatalf("failed to parse balancer config: %v", err)
   753  	}
   754  
   755  	// Send the config, and an address with hierarchy path ["cluster_1"].
   756  	wantAddrs := []resolver.Address{
   757  		{Addr: testBackendAddrStrs[0], BalancerAttributes: nil},
   758  	}
   759  	if err := bal.UpdateClientConnState(balancer.ClientConnState{
   760  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   761  			hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}),
   762  		}},
   763  		BalancerConfig: config1,
   764  	}); err != nil {
   765  		t.Fatalf("failed to update ClientConn state: %v", err)
   766  	}
   767  
   768  	// Verify that the only state update is the second one called by the child.
   769  	if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready {
   770  		t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states)
   771  	}
   772  }