google.golang.org/grpc@v1.72.2/xds/internal/balancer/priority/balancer_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package priority
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"testing"
    26  	"time"
    27  
    28  	"google.golang.org/grpc/balancer"
    29  	"google.golang.org/grpc/balancer/roundrobin"
    30  	"google.golang.org/grpc/connectivity"
    31  	"google.golang.org/grpc/internal/balancer/stub"
    32  	"google.golang.org/grpc/internal/grpctest"
    33  	"google.golang.org/grpc/internal/hierarchy"
    34  	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
    35  	"google.golang.org/grpc/internal/testutils"
    36  	"google.golang.org/grpc/resolver"
    37  )
    38  
    39  const (
    40  	defaultTestTimeout      = 5 * time.Second
    41  	defaultTestShortTimeout = 100 * time.Millisecond
    42  )
    43  
    44  type s struct {
    45  	grpctest.Tester
    46  }
    47  
    48  func Test(t *testing.T) {
    49  	grpctest.RunSubTests(t, s{})
    50  }
    51  
    52  var testBackendAddrStrs []string
    53  
    54  const (
    55  	testBackendAddrsCount = 12
    56  	testRRBalancerName    = "another-round-robin"
    57  )
    58  
    59  type anotherRR struct {
    60  	balancer.Builder
    61  }
    62  
    63  func (*anotherRR) Name() string {
    64  	return testRRBalancerName
    65  }
    66  
    67  func init() {
    68  	for i := 0; i < testBackendAddrsCount; i++ {
    69  		testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
    70  	}
    71  	// Disable sub-balancer caching for all but the tests which exercise the
    72  	// caching behavior.
    73  	DefaultSubBalancerCloseTimeout = time.Duration(0)
    74  	balancer.Register(&anotherRR{Builder: balancer.Get(roundrobin.Name)})
    75  }
    76  
    77  // When a high priority is ready, adding/removing lower locality doesn't cause
    78  // changes.
    79  //
    80  // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0.
    81  func (s) TestPriority_HighPriorityReady(t *testing.T) {
    82  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
    83  	defer cancel()
    84  
    85  	cc := testutils.NewBalancerClientConn(t)
    86  	bb := balancer.Get(Name)
    87  	pb := bb.Build(cc, balancer.BuildOptions{})
    88  	defer pb.Close()
    89  
    90  	// Two children, with priorities [0, 1], each with one backend.
    91  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
    92  		ResolverState: resolver.State{
    93  			Endpoints: []resolver.Endpoint{
    94  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
    95  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
    96  			},
    97  		},
    98  		BalancerConfig: &LBConfig{
    99  			Children: map[string]*Child{
   100  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   101  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   102  			},
   103  			Priorities: []string{"child-0", "child-1"},
   104  		},
   105  	}); err != nil {
   106  		t.Fatalf("failed to update ClientConn state: %v", err)
   107  	}
   108  
   109  	addrs1 := <-cc.NewSubConnAddrsCh
   110  	if got, want := addrs1[0].Addr, testBackendAddrStrs[0]; got != want {
   111  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   112  	}
   113  	sc1 := <-cc.NewSubConnCh
   114  
   115  	// p0 is ready.
   116  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   117  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   118  
   119  	// Test roundrobin with only p0 subconns.
   120  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   121  		t.Fatal(err.Error())
   122  	}
   123  
   124  	// Add p2, it shouldn't cause any updates.
   125  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   126  		ResolverState: resolver.State{
   127  			Endpoints: []resolver.Endpoint{
   128  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   129  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   130  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}),
   131  			},
   132  		},
   133  		BalancerConfig: &LBConfig{
   134  			Children: map[string]*Child{
   135  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   136  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   137  				"child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   138  			},
   139  			Priorities: []string{"child-0", "child-1", "child-2"},
   140  		},
   141  	}); err != nil {
   142  		t.Fatalf("failed to update ClientConn state: %v", err)
   143  	}
   144  
   145  	select {
   146  	case sc := <-cc.NewSubConnCh:
   147  		t.Fatalf("got unexpected new SubConn: %s", sc)
   148  	case sc := <-cc.ShutdownSubConnCh:
   149  		t.Fatalf("got unexpected shutdown SubConn: %v", sc)
   150  	case <-time.After(time.Millisecond * 100):
   151  	}
   152  
   153  	// Test roundrobin with only p0 subconns.
   154  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   155  		t.Fatal(err.Error())
   156  	}
   157  
   158  	// Remove p2, no updates.
   159  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   160  		ResolverState: resolver.State{
   161  			Endpoints: []resolver.Endpoint{
   162  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   163  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   164  			},
   165  		},
   166  		BalancerConfig: &LBConfig{
   167  			Children: map[string]*Child{
   168  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   169  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   170  			},
   171  			Priorities: []string{"child-0", "child-1"},
   172  		},
   173  	}); err != nil {
   174  		t.Fatalf("failed to update ClientConn state: %v", err)
   175  	}
   176  
   177  	select {
   178  	case <-cc.NewSubConnCh:
   179  		t.Fatalf("got unexpected new SubConn")
   180  	case <-cc.ShutdownSubConnCh:
   181  		t.Fatalf("got unexpected shutdown SubConn")
   182  	case <-time.After(time.Millisecond * 100):
   183  	}
   184  
   185  	// Test roundrobin with only p0 subconns.
   186  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   187  		t.Fatal(err.Error())
   188  	}
   189  }
   190  
   191  // Lower priority is used when higher priority is not ready.
   192  //
   193  // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is
   194  // down, use 2; remove 2, use 1.
   195  func (s) TestPriority_SwitchPriority(t *testing.T) {
   196  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   197  	defer cancel()
   198  
   199  	cc := testutils.NewBalancerClientConn(t)
   200  	bb := balancer.Get(Name)
   201  	pb := bb.Build(cc, balancer.BuildOptions{})
   202  	defer pb.Close()
   203  
   204  	t.Log("Two localities, with priorities [0, 1], each with one backend.")
   205  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   206  		ResolverState: resolver.State{
   207  			Endpoints: []resolver.Endpoint{
   208  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   209  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   210  			},
   211  		},
   212  		BalancerConfig: &LBConfig{
   213  			Children: map[string]*Child{
   214  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   215  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   216  			},
   217  			Priorities: []string{"child-0", "child-1"},
   218  		},
   219  	}); err != nil {
   220  		t.Fatalf("failed to update ClientConn state: %v", err)
   221  	}
   222  
   223  	addrs0 := <-cc.NewSubConnAddrsCh
   224  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   225  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   226  	}
   227  	sc0 := <-cc.NewSubConnCh
   228  
   229  	t.Log("Make p0 ready.")
   230  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   231  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   232  
   233  	// Test roundrobin with only p0 subconns.
   234  	if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil {
   235  		t.Fatal(err.Error())
   236  	}
   237  
   238  	t.Log("Turn down 0, will start and use 1.")
   239  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   240  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
   241  	// will retry.
   242  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   243  		t.Fatal(err.Error())
   244  	}
   245  
   246  	t.Log("Handle SubConn creation from 1.")
   247  	addrs1 := <-cc.NewSubConnAddrsCh
   248  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
   249  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   250  	}
   251  	sc1 := <-cc.NewSubConnCh
   252  	<-sc1.ConnectCh
   253  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   254  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   255  
   256  	// Test pick with 1.
   257  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   258  		t.Fatal(err.Error())
   259  	}
   260  
   261  	t.Log("Add p2, it shouldn't cause any updates.")
   262  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   263  		ResolverState: resolver.State{
   264  			Endpoints: []resolver.Endpoint{
   265  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   266  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   267  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}),
   268  			},
   269  		},
   270  		BalancerConfig: &LBConfig{
   271  			Children: map[string]*Child{
   272  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   273  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   274  				"child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   275  			},
   276  			Priorities: []string{"child-0", "child-1", "child-2"},
   277  		},
   278  	}); err != nil {
   279  		t.Fatalf("failed to update ClientConn state: %v", err)
   280  	}
   281  
   282  	select {
   283  	case sc := <-cc.NewSubConnCh:
   284  		t.Fatalf("got unexpected new SubConn, %s", sc)
   285  	case <-cc.ShutdownSubConnCh:
   286  		t.Fatalf("got unexpected shutdown SubConn")
   287  	case <-time.After(time.Millisecond * 100):
   288  	}
   289  
   290  	t.Log("Turn down 1, use 2.")
   291  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   292  	<-sc1.ConnectCh
   293  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   294  	sc1.UpdateState(balancer.SubConnState{
   295  		ConnectivityState: connectivity.TransientFailure,
   296  		ConnectionError:   errors.New("test error"),
   297  	})
   298  
   299  	// Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs
   300  	// will retry.
   301  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   302  		t.Fatal(err.Error())
   303  	}
   304  
   305  	addrs2 := <-cc.NewSubConnAddrsCh
   306  	if got, want := addrs2[0].Addr, testBackendAddrStrs[2]; got != want {
   307  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   308  	}
   309  	sc2 := <-cc.NewSubConnCh
   310  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   311  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   312  
   313  	// Test pick with 2.
   314  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
   315  		t.Fatal(err.Error())
   316  	}
   317  
   318  	t.Log("Remove 2, use 1.")
   319  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   320  		ResolverState: resolver.State{
   321  			Endpoints: []resolver.Endpoint{
   322  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   323  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   324  			},
   325  		},
   326  		BalancerConfig: &LBConfig{
   327  			Children: map[string]*Child{
   328  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   329  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   330  			},
   331  			Priorities: []string{"child-0", "child-1"},
   332  		},
   333  	}); err != nil {
   334  		t.Fatalf("failed to update ClientConn state: %v", err)
   335  	}
   336  
   337  	// p2 SubConns are shut down.
   338  	scToShutdown := <-cc.ShutdownSubConnCh
   339  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   340  	// are closed. Remove duplicate events.
   341  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   342  	// workaround once pickfirst is the only leaf policy and responsible for
   343  	// shutting down SubConns.
   344  	<-cc.ShutdownSubConnCh
   345  	if scToShutdown != sc2 {
   346  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc2, scToShutdown)
   347  	}
   348  
   349  	// Should get an update with 1's old transient failure picker, to override
   350  	// 2's old picker.
   351  	if err := cc.WaitForErrPicker(ctx); err != nil {
   352  		t.Fatal(err.Error())
   353  	}
   354  	<-cc.NewStateCh // Drain to match picker
   355  
   356  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   357  	// Does not change the aggregate state, because round robin does not leave
   358  	// TRANSIENT_FAILURE if a subconn goes CONNECTING.
   359  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   360  
   361  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   362  		t.Fatal(err.Error())
   363  	}
   364  }
   365  
   366  // Lower priority is used when higher priority turns Connecting from Ready.
   367  // Because changing from Ready to Connecting is a failure.
   368  //
   369  // Init 0 and 1; 0 is up, use 0; 0 is connecting, 1 is up, use 1; 0 is ready,
   370  // use 0.
   371  func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) {
   372  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   373  	defer cancel()
   374  
   375  	cc := testutils.NewBalancerClientConn(t)
   376  	bb := balancer.Get(Name)
   377  	pb := bb.Build(cc, balancer.BuildOptions{})
   378  	defer pb.Close()
   379  
   380  	// Two localities, with priorities [0, 1], each with one backend.
   381  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   382  		ResolverState: resolver.State{
   383  			Endpoints: []resolver.Endpoint{
   384  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   385  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   386  			},
   387  		},
   388  		BalancerConfig: &LBConfig{
   389  			Children: map[string]*Child{
   390  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   391  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   392  			},
   393  			Priorities: []string{"child-0", "child-1"},
   394  		},
   395  	}); err != nil {
   396  		t.Fatalf("failed to update ClientConn state: %v", err)
   397  	}
   398  
   399  	addrs0 := <-cc.NewSubConnAddrsCh
   400  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   401  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   402  	}
   403  	sc0 := <-cc.NewSubConnCh
   404  
   405  	// p0 is ready.
   406  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   407  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   408  
   409  	// Test roundrobin with only p0 subconns.
   410  	if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil {
   411  		t.Fatal(err.Error())
   412  	}
   413  
   414  	// Turn 0 to TransientFailure, will start and use 1.
   415  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   416  
   417  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
   418  	// will retry.
   419  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   420  		t.Fatal(err.Error())
   421  	}
   422  
   423  	// Handle SubConn creation from 1.
   424  	addrs1 := <-cc.NewSubConnAddrsCh
   425  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
   426  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   427  	}
   428  	sc1 := <-cc.NewSubConnCh
   429  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   430  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   431  
   432  	// Test pick with 1.
   433  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   434  		t.Fatal(err.Error())
   435  	}
   436  
   437  	// Turn 0 back to Ready.
   438  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   439  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   440  
   441  	// p1 subconn should be shut down.
   442  	scToShutdown := <-cc.ShutdownSubConnCh
   443  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   444  	// are closed. Remove duplicate events.
   445  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   446  	// workaround once pickfirst is the only leaf policy and responsible for
   447  	// shutting down SubConns.
   448  	<-cc.ShutdownSubConnCh
   449  	if scToShutdown != sc1 {
   450  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc0, scToShutdown)
   451  	}
   452  
   453  	if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil {
   454  		t.Fatal(err.Error())
   455  	}
   456  }
   457  
   458  // Add a lower priority while the higher priority is down.
   459  //
   460  // Init 0 and 1; 0 and 1 both down; add 2, use 2.
   461  func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) {
   462  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   463  	defer cancel()
   464  
   465  	cc := testutils.NewBalancerClientConn(t)
   466  	bb := balancer.Get(Name)
   467  	pb := bb.Build(cc, balancer.BuildOptions{})
   468  	defer pb.Close()
   469  
   470  	// Two localities, with different priorities, each with one backend.
   471  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   472  		ResolverState: resolver.State{
   473  			Endpoints: []resolver.Endpoint{
   474  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   475  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   476  			},
   477  		},
   478  		BalancerConfig: &LBConfig{
   479  			Children: map[string]*Child{
   480  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   481  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   482  			},
   483  			Priorities: []string{"child-0", "child-1"},
   484  		},
   485  	}); err != nil {
   486  		t.Fatalf("failed to update ClientConn state: %v", err)
   487  	}
   488  
   489  	addrs0 := <-cc.NewSubConnAddrsCh
   490  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   491  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   492  	}
   493  	sc0 := <-cc.NewSubConnCh
   494  
   495  	t.Log("Turn down 0, 1 is used.")
   496  	testErr := errors.New("test error")
   497  	sc0.UpdateState(balancer.SubConnState{
   498  		ConnectivityState: connectivity.TransientFailure,
   499  		ConnectionError:   testErr,
   500  	})
   501  
   502  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
   503  	// will retry.
   504  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   505  		t.Fatal(err.Error())
   506  	}
   507  
   508  	addrs1 := <-cc.NewSubConnAddrsCh
   509  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
   510  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   511  	}
   512  	sc1 := <-cc.NewSubConnCh
   513  
   514  	t.Log("Turn down 1, pick should error.")
   515  	sc1.UpdateState(balancer.SubConnState{
   516  		ConnectivityState: connectivity.TransientFailure,
   517  		ConnectionError:   testErr,
   518  	})
   519  
   520  	// Test pick failure.
   521  	if err := cc.WaitForPickerWithErr(ctx, testErr); err != nil {
   522  		t.Fatal(err.Error())
   523  	}
   524  	<-cc.NewStateCh // Drain to match picker
   525  
   526  	t.Log("Add p2, it should create a new SubConn.")
   527  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   528  		ResolverState: resolver.State{
   529  			Endpoints: []resolver.Endpoint{
   530  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   531  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   532  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}),
   533  			},
   534  		},
   535  		BalancerConfig: &LBConfig{
   536  			Children: map[string]*Child{
   537  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   538  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   539  				"child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   540  			},
   541  			Priorities: []string{"child-0", "child-1", "child-2"},
   542  		},
   543  	}); err != nil {
   544  		t.Fatalf("failed to update ClientConn state: %v", err)
   545  	}
   546  
   547  	// A new connecting picker should be updated for the new priority.
   548  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   549  		t.Fatal(err.Error())
   550  	}
   551  
   552  	addrs2 := <-cc.NewSubConnAddrsCh
   553  	if got, want := addrs2[0].Addr, testBackendAddrStrs[2]; got != want {
   554  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   555  	}
   556  	sc2 := <-cc.NewSubConnCh
   557  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   558  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   559  
   560  	// Test pick with 2.
   561  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
   562  		t.Fatal(err.Error())
   563  	}
   564  }
   565  
   566  // When a higher priority becomes available, all lower priorities are closed.
   567  //
   568  // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2.
   569  func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) {
   570  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   571  	defer cancel()
   572  
   573  	cc := testutils.NewBalancerClientConn(t)
   574  	bb := balancer.Get(Name)
   575  	pb := bb.Build(cc, balancer.BuildOptions{})
   576  	defer pb.Close()
   577  
   578  	// Three localities, with priorities [0,1,2], each with one backend.
   579  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   580  		ResolverState: resolver.State{
   581  			Endpoints: []resolver.Endpoint{
   582  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   583  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   584  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}),
   585  			},
   586  		},
   587  		BalancerConfig: &LBConfig{
   588  			Children: map[string]*Child{
   589  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   590  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   591  				"child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   592  			},
   593  			Priorities: []string{"child-0", "child-1", "child-2"},
   594  		},
   595  	}); err != nil {
   596  		t.Fatalf("failed to update ClientConn state: %v", err)
   597  	}
   598  
   599  	addrs0 := <-cc.NewSubConnAddrsCh
   600  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   601  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   602  	}
   603  	sc0 := <-cc.NewSubConnCh
   604  
   605  	// Turn down 0, 1 is used.
   606  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   607  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
   608  	// will retry.
   609  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   610  		t.Fatal(err.Error())
   611  	}
   612  
   613  	addrs1 := <-cc.NewSubConnAddrsCh
   614  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
   615  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   616  	}
   617  	sc1 := <-cc.NewSubConnCh
   618  
   619  	// Turn down 1, 2 is used.
   620  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   621  	// Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs
   622  	// will retry.
   623  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   624  		t.Fatal(err.Error())
   625  	}
   626  
   627  	addrs2 := <-cc.NewSubConnAddrsCh
   628  	if got, want := addrs2[0].Addr, testBackendAddrStrs[2]; got != want {
   629  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   630  	}
   631  	sc2 := <-cc.NewSubConnCh
   632  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   633  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   634  
   635  	// Test pick with 2.
   636  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
   637  		t.Fatal(err.Error())
   638  	}
   639  
   640  	// When 0 becomes ready, 0 should be used, 1 and 2 should all be closed.
   641  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   642  
   643  	// sc1 and sc2 should be shut down.
   644  	//
   645  	// With localities caching, the lower priorities are closed after a timeout,
   646  	// in goroutines. The order is no longer guaranteed.
   647  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   648  	// are closed. Remove duplicate events.
   649  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   650  	// workaround once pickfirst is the only leaf policy and responsible for
   651  	// shutting down SubConns.
   652  	scToShutdown := [2]balancer.SubConn{}
   653  	scToShutdown[0] = <-cc.ShutdownSubConnCh
   654  	<-cc.ShutdownSubConnCh
   655  	scToShutdown[1] = <-cc.ShutdownSubConnCh
   656  	<-cc.ShutdownSubConnCh
   657  
   658  	if !(scToShutdown[0] == sc1 && scToShutdown[1] == sc2) && !(scToShutdown[0] == sc2 && scToShutdown[1] == sc1) {
   659  		t.Errorf("ShutdownSubConn, want [%v, %v], got %v", sc1, sc2, scToShutdown)
   660  	}
   661  
   662  	// Test pick with 0.
   663  	if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil {
   664  		t.Fatal(err.Error())
   665  	}
   666  }
   667  
   668  // At init, start the next lower priority after timeout if the higher priority
   669  // doesn't get ready.
   670  //
   671  // Init 0,1; 0 is not ready (in connecting), after timeout, use 1.
   672  func (s) TestPriority_InitTimeout(t *testing.T) {
   673  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   674  	defer cancel()
   675  
   676  	const testPriorityInitTimeout = 200 * time.Millisecond
   677  	defer func() func() {
   678  		old := DefaultPriorityInitTimeout
   679  		DefaultPriorityInitTimeout = testPriorityInitTimeout
   680  		return func() {
   681  			DefaultPriorityInitTimeout = old
   682  		}
   683  	}()()
   684  
   685  	cc := testutils.NewBalancerClientConn(t)
   686  	bb := balancer.Get(Name)
   687  	pb := bb.Build(cc, balancer.BuildOptions{})
   688  	defer pb.Close()
   689  
   690  	// Two localities, with different priorities, each with one backend.
   691  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   692  		ResolverState: resolver.State{
   693  			Endpoints: []resolver.Endpoint{
   694  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   695  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   696  			},
   697  		},
   698  		BalancerConfig: &LBConfig{
   699  			Children: map[string]*Child{
   700  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   701  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   702  			},
   703  			Priorities: []string{"child-0", "child-1"},
   704  		},
   705  	}); err != nil {
   706  		t.Fatalf("failed to update ClientConn state: %v", err)
   707  	}
   708  
   709  	addrs0 := <-cc.NewSubConnAddrsCh
   710  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   711  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   712  	}
   713  	sc0 := <-cc.NewSubConnCh
   714  
   715  	// Keep 0 in connecting, 1 will be used after init timeout.
   716  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   717  
   718  	// Make sure new SubConn is created before timeout.
   719  	select {
   720  	case <-time.After(testPriorityInitTimeout * 3 / 4):
   721  	case <-cc.NewSubConnAddrsCh:
   722  		t.Fatalf("Got a new SubConn too early (Within timeout). Expect a new SubConn only after timeout")
   723  	}
   724  
   725  	addrs1 := <-cc.NewSubConnAddrsCh
   726  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
   727  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   728  	}
   729  	sc1 := <-cc.NewSubConnCh
   730  
   731  	// After the init timer of p0, when switching to p1, a connecting picker
   732  	// will be sent to the parent. Clear it here.
   733  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   734  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   735  
   736  	// Test pick with 1.
   737  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   738  		t.Fatal(err.Error())
   739  	}
   740  }
   741  
   742  // EDS removes all priorities, and re-adds them.
   743  func (s) TestPriority_RemovesAllPriorities(t *testing.T) {
   744  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   745  	defer cancel()
   746  
   747  	const testPriorityInitTimeout = 200 * time.Millisecond
   748  	defer func() func() {
   749  		old := DefaultPriorityInitTimeout
   750  		DefaultPriorityInitTimeout = testPriorityInitTimeout
   751  		return func() {
   752  			DefaultPriorityInitTimeout = old
   753  		}
   754  	}()()
   755  
   756  	cc := testutils.NewBalancerClientConn(t)
   757  	bb := balancer.Get(Name)
   758  	pb := bb.Build(cc, balancer.BuildOptions{})
   759  	defer pb.Close()
   760  
   761  	// Two localities, with different priorities, each with one backend.
   762  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   763  		ResolverState: resolver.State{
   764  			Endpoints: []resolver.Endpoint{
   765  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   766  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   767  			},
   768  		},
   769  		BalancerConfig: &LBConfig{
   770  			Children: map[string]*Child{
   771  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   772  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   773  			},
   774  			Priorities: []string{"child-0", "child-1"},
   775  		},
   776  	}); err != nil {
   777  		t.Fatalf("failed to update ClientConn state: %v", err)
   778  	}
   779  
   780  	addrs0 := <-cc.NewSubConnAddrsCh
   781  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
   782  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   783  	}
   784  	sc0 := <-cc.NewSubConnCh
   785  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   786  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   787  
   788  	// Test roundrobin with only p0 subconns.
   789  	if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil {
   790  		t.Fatal(err.Error())
   791  	}
   792  
   793  	// Remove all priorities.
   794  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   795  		ResolverState: resolver.State{
   796  			Addresses: nil,
   797  		},
   798  		BalancerConfig: &LBConfig{
   799  			Children:   nil,
   800  			Priorities: nil,
   801  		},
   802  	}); err != nil {
   803  		t.Fatalf("failed to update ClientConn state: %v", err)
   804  	}
   805  
   806  	// p0 subconn should be shut down.
   807  	scToShutdown := <-cc.ShutdownSubConnCh
   808  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   809  	// are closed. Remove duplicate events.
   810  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   811  	// workaround once pickfirst is the only leaf policy and responsible for
   812  	// shutting down SubConns.
   813  	<-cc.ShutdownSubConnCh
   814  	if scToShutdown != sc0 {
   815  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc0, scToShutdown)
   816  	}
   817  
   818  	// Test pick return TransientFailure.
   819  	if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil {
   820  		t.Fatal(err.Error())
   821  	}
   822  
   823  	// Re-add two localities, with previous priorities, but different backends.
   824  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   825  		ResolverState: resolver.State{
   826  			Endpoints: []resolver.Endpoint{
   827  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}),
   828  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[3]}}}, []string{"child-1"}),
   829  			},
   830  		},
   831  		BalancerConfig: &LBConfig{
   832  			Children: map[string]*Child{
   833  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   834  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   835  			},
   836  			Priorities: []string{"child-0", "child-1"},
   837  		},
   838  	}); err != nil {
   839  		t.Fatalf("failed to update ClientConn state: %v", err)
   840  	}
   841  
   842  	addrs01 := <-cc.NewSubConnAddrsCh
   843  	if got, want := addrs01[0].Addr, testBackendAddrStrs[2]; got != want {
   844  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   845  	}
   846  	sc01 := <-cc.NewSubConnCh
   847  
   848  	// Don't send any update to p0, so to not override the old state of p0.
   849  	// Later, connect to p1 and then remove p1. This will fallback to p0, and
   850  	// will send p0's old picker if they are not correctly removed.
   851  
   852  	// p1 will be used after priority init timeout.
   853  	addrs11 := <-cc.NewSubConnAddrsCh
   854  	if got, want := addrs11[0].Addr, testBackendAddrStrs[3]; got != want {
   855  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   856  	}
   857  	sc11 := <-cc.NewSubConnCh
   858  	sc11.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   859  	sc11.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   860  
   861  	// Test roundrobin with only p1 subconns.
   862  	if err := cc.WaitForRoundRobinPicker(ctx, sc11); err != nil {
   863  		t.Fatal(err.Error())
   864  	}
   865  
   866  	// Remove p1, to fallback to p0.
   867  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   868  		ResolverState: resolver.State{
   869  			Endpoints: []resolver.Endpoint{
   870  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}),
   871  			},
   872  		},
   873  		BalancerConfig: &LBConfig{
   874  			Children: map[string]*Child{
   875  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   876  			},
   877  			Priorities: []string{"child-0"},
   878  		},
   879  	}); err != nil {
   880  		t.Fatalf("failed to update ClientConn state: %v", err)
   881  	}
   882  
   883  	// p1 subconn should be shut down.
   884  	scToShutdown1 := <-cc.ShutdownSubConnCh
   885  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
   886  	// are closed. Remove duplicate events.
   887  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
   888  	// workaround once pickfirst is the only leaf policy and responsible for
   889  	// shutting down SubConns.
   890  	<-cc.ShutdownSubConnCh
   891  	if scToShutdown1 != sc11 {
   892  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc11, scToShutdown1)
   893  	}
   894  
   895  	// Test pick return NoSubConn.
   896  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   897  		t.Fatal(err.Error())
   898  	}
   899  
   900  	// Send an ready update for the p0 sc that was received when re-adding
   901  	// priorities.
   902  	sc01.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   903  	sc01.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   904  
   905  	// Test roundrobin with only p0 subconns.
   906  	if err := cc.WaitForRoundRobinPicker(ctx, sc01); err != nil {
   907  		t.Fatal(err.Error())
   908  	}
   909  
   910  	select {
   911  	case <-cc.NewPickerCh:
   912  		t.Fatalf("got unexpected new picker")
   913  	case <-cc.NewSubConnCh:
   914  		t.Fatalf("got unexpected new SubConn")
   915  	case <-cc.ShutdownSubConnCh:
   916  		t.Fatalf("got unexpected shutdown SubConn")
   917  	case <-time.After(time.Millisecond * 100):
   918  	}
   919  }
   920  
   921  // Test the case where the high priority contains no backends. The low priority
   922  // will be used.
   923  func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) {
   924  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   925  	defer cancel()
   926  
   927  	cc := testutils.NewBalancerClientConn(t)
   928  	bb := balancer.Get(Name)
   929  	pb := bb.Build(cc, balancer.BuildOptions{})
   930  	defer pb.Close()
   931  
   932  	// Two localities, with priorities [0, 1], each with one backend.
   933  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   934  		ResolverState: resolver.State{
   935  			Endpoints: []resolver.Endpoint{
   936  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
   937  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   938  			},
   939  		},
   940  		BalancerConfig: &LBConfig{
   941  			Children: map[string]*Child{
   942  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   943  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   944  			},
   945  			Priorities: []string{"child-0", "child-1"},
   946  		},
   947  	}); err != nil {
   948  		t.Fatalf("failed to update ClientConn state: %v", err)
   949  	}
   950  
   951  	addrs1 := <-cc.NewSubConnAddrsCh
   952  	if got, want := addrs1[0].Addr, testBackendAddrStrs[0]; got != want {
   953  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   954  	}
   955  	sc1 := <-cc.NewSubConnCh
   956  
   957  	// p0 is ready.
   958  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   959  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   960  
   961  	// Test roundrobin with only p0 subconns.
   962  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
   963  		t.Fatal(err.Error())
   964  	}
   965  
   966  	// Remove addresses from priority 0, should use p1.
   967  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
   968  		ResolverState: resolver.State{
   969  			Endpoints: []resolver.Endpoint{
   970  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
   971  			},
   972  		},
   973  		BalancerConfig: &LBConfig{
   974  			Children: map[string]*Child{
   975  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   976  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
   977  			},
   978  			Priorities: []string{"child-0", "child-1"},
   979  		},
   980  	}); err != nil {
   981  		t.Fatalf("failed to update ClientConn state: %v", err)
   982  	}
   983  
   984  	// p0 will shutdown the subconn, and ClientConn will send a sc update to
   985  	// shutdown.
   986  	scToShutdown := <-cc.ShutdownSubConnCh
   987  	scToShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   988  
   989  	addrs2 := <-cc.NewSubConnAddrsCh
   990  	if got, want := addrs2[0].Addr, testBackendAddrStrs[1]; got != want {
   991  		t.Fatalf("sc is created with addr %v, want %v", got, want)
   992  	}
   993  	sc2 := <-cc.NewSubConnCh
   994  
   995  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
   996  	// will retry.
   997  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
   998  		t.Fatal(err.Error())
   999  	}
  1000  
  1001  	// p1 is ready.
  1002  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1003  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1004  
  1005  	// Test roundrobin with only p1 subconns.
  1006  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
  1007  		t.Fatal(err.Error())
  1008  	}
  1009  }
  1010  
  1011  // Test the case where the first and only priority is removed.
  1012  func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) {
  1013  	const testPriorityInitTimeout = 200 * time.Millisecond
  1014  	defer func(t time.Duration) {
  1015  		DefaultPriorityInitTimeout = t
  1016  	}(DefaultPriorityInitTimeout)
  1017  	DefaultPriorityInitTimeout = testPriorityInitTimeout
  1018  
  1019  	cc := testutils.NewBalancerClientConn(t)
  1020  	bb := balancer.Get(Name)
  1021  	pb := bb.Build(cc, balancer.BuildOptions{})
  1022  	defer pb.Close()
  1023  
  1024  	// One localities, with priorities [0], each with one backend.
  1025  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1026  		ResolverState: resolver.State{
  1027  			Endpoints: []resolver.Endpoint{
  1028  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1029  			},
  1030  		},
  1031  		BalancerConfig: &LBConfig{
  1032  			Children: map[string]*Child{
  1033  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1034  			},
  1035  			Priorities: []string{"child-0"},
  1036  		},
  1037  	}); err != nil {
  1038  		t.Fatalf("failed to update ClientConn state: %v", err)
  1039  	}
  1040  
  1041  	// Remove the only localities.
  1042  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1043  		ResolverState: resolver.State{
  1044  			Addresses: nil,
  1045  		},
  1046  		BalancerConfig: &LBConfig{
  1047  			Children:   nil,
  1048  			Priorities: nil,
  1049  		},
  1050  	}); err != nil {
  1051  		t.Fatalf("failed to update ClientConn state: %v", err)
  1052  	}
  1053  
  1054  	// Wait after double the init timer timeout, to ensure it doesn't panic.
  1055  	time.Sleep(testPriorityInitTimeout * 2)
  1056  }
  1057  
  1058  // When a child is moved from low priority to high.
  1059  //
  1060  // Init a(p0) and b(p1); a(p0) is up, use a; move b to p0, a to p1, use b.
  1061  func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) {
  1062  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1063  	defer cancel()
  1064  
  1065  	cc := testutils.NewBalancerClientConn(t)
  1066  	bb := balancer.Get(Name)
  1067  	pb := bb.Build(cc, balancer.BuildOptions{})
  1068  	defer pb.Close()
  1069  
  1070  	// Two children, with priorities [0, 1], each with one backend.
  1071  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1072  		ResolverState: resolver.State{
  1073  			Endpoints: []resolver.Endpoint{
  1074  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1075  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1076  			},
  1077  		},
  1078  		BalancerConfig: &LBConfig{
  1079  			Children: map[string]*Child{
  1080  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1081  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1082  			},
  1083  			Priorities: []string{"child-0", "child-1"},
  1084  		},
  1085  	}); err != nil {
  1086  		t.Fatalf("failed to update ClientConn state: %v", err)
  1087  	}
  1088  
  1089  	addrs1 := <-cc.NewSubConnAddrsCh
  1090  	if got, want := addrs1[0].Addr, testBackendAddrStrs[0]; got != want {
  1091  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1092  	}
  1093  	sc1 := <-cc.NewSubConnCh
  1094  
  1095  	// p0 is ready.
  1096  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1097  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1098  
  1099  	// Test roundrobin with only p0 subconns.
  1100  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1101  		t.Fatal(err.Error())
  1102  	}
  1103  
  1104  	// Swap child with p0 and p1, the child at lower priority should now be the
  1105  	// higher priority, and be used. The old SubConn should be closed.
  1106  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1107  		ResolverState: resolver.State{
  1108  			Endpoints: []resolver.Endpoint{
  1109  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1110  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1111  			},
  1112  		},
  1113  		BalancerConfig: &LBConfig{
  1114  			Children: map[string]*Child{
  1115  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1116  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1117  			},
  1118  			Priorities: []string{"child-1", "child-0"},
  1119  		},
  1120  	}); err != nil {
  1121  		t.Fatalf("failed to update ClientConn state: %v", err)
  1122  	}
  1123  
  1124  	// When the new child for p0 is changed from the previous child, the
  1125  	// balancer should immediately update the picker so the picker from old
  1126  	// child is not used. In this case, the picker becomes a
  1127  	// no-subconn-available picker because this child is just started.
  1128  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
  1129  		t.Fatal(err.Error())
  1130  	}
  1131  
  1132  	// Old subconn should be shut down.
  1133  	scToShutdown := <-cc.ShutdownSubConnCh
  1134  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
  1135  	// are closed. Remove duplicate events.
  1136  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
  1137  	// workaround once pickfirst is the only leaf policy and responsible for
  1138  	// shutting down SubConns.
  1139  	<-cc.ShutdownSubConnCh
  1140  	if scToShutdown != sc1 {
  1141  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scToShutdown)
  1142  	}
  1143  
  1144  	addrs2 := <-cc.NewSubConnAddrsCh
  1145  	if got, want := addrs2[0].Addr, testBackendAddrStrs[1]; got != want {
  1146  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1147  	}
  1148  	sc2 := <-cc.NewSubConnCh
  1149  
  1150  	// New p0 child is ready.
  1151  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1152  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1153  
  1154  	// Test roundrobin with only new subconns.
  1155  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
  1156  		t.Fatal(err.Error())
  1157  	}
  1158  }
  1159  
  1160  // When a child is in lower priority, and in use (because higher is down),
  1161  // move it from low priority to high.
  1162  //
  1163  // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b.
  1164  func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) {
  1165  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1166  	defer cancel()
  1167  
  1168  	cc := testutils.NewBalancerClientConn(t)
  1169  	bb := balancer.Get(Name)
  1170  	pb := bb.Build(cc, balancer.BuildOptions{})
  1171  	defer pb.Close()
  1172  
  1173  	// Two children, with priorities [0, 1], each with one backend.
  1174  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1175  		ResolverState: resolver.State{
  1176  			Endpoints: []resolver.Endpoint{
  1177  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1178  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1179  			},
  1180  		},
  1181  		BalancerConfig: &LBConfig{
  1182  			Children: map[string]*Child{
  1183  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1184  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1185  			},
  1186  			Priorities: []string{"child-0", "child-1"},
  1187  		},
  1188  	}); err != nil {
  1189  		t.Fatalf("failed to update ClientConn state: %v", err)
  1190  	}
  1191  
  1192  	addrs0 := <-cc.NewSubConnAddrsCh
  1193  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  1194  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1195  	}
  1196  	sc0 := <-cc.NewSubConnCh
  1197  
  1198  	// p0 is down.
  1199  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1200  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
  1201  	// will retry.
  1202  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
  1203  		t.Fatal(err.Error())
  1204  	}
  1205  
  1206  	addrs1 := <-cc.NewSubConnAddrsCh
  1207  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
  1208  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1209  	}
  1210  	sc1 := <-cc.NewSubConnCh
  1211  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1212  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1213  
  1214  	// Test roundrobin with only p1 subconns.
  1215  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1216  		t.Fatal(err.Error())
  1217  	}
  1218  
  1219  	// Swap child with p0 and p1, the child at lower priority should now be the
  1220  	// higher priority, and be used. The old SubConn should be closed.
  1221  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1222  		ResolverState: resolver.State{
  1223  			Endpoints: []resolver.Endpoint{
  1224  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1225  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1226  			},
  1227  		},
  1228  		BalancerConfig: &LBConfig{
  1229  			Children: map[string]*Child{
  1230  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1231  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1232  			},
  1233  			Priorities: []string{"child-1", "child-0"},
  1234  		},
  1235  	}); err != nil {
  1236  		t.Fatalf("failed to update ClientConn state: %v", err)
  1237  	}
  1238  
  1239  	// Old subconn from child-0 should be removed.
  1240  	scToShutdown := <-cc.ShutdownSubConnCh
  1241  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
  1242  	// are closed. Remove duplicate events.
  1243  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
  1244  	// workaround once pickfirst is the only leaf policy and responsible for
  1245  	// shutting down SubConns.
  1246  	<-cc.ShutdownSubConnCh
  1247  	if scToShutdown != sc0 {
  1248  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc0, scToShutdown)
  1249  	}
  1250  
  1251  	// Because this was a ready child moved to a higher priority, no new subconn
  1252  	// or picker should be updated.
  1253  	select {
  1254  	case <-cc.NewSubConnCh:
  1255  		t.Fatalf("got unexpected new SubConn")
  1256  	case <-cc.ShutdownSubConnCh:
  1257  		t.Fatalf("got unexpected shutdown SubConn")
  1258  	case <-time.After(time.Millisecond * 100):
  1259  	}
  1260  }
  1261  
  1262  // When the lowest child is in use, and is removed, should use the higher
  1263  // priority child even though it's not ready.
  1264  //
  1265  // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b.
  1266  func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) {
  1267  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1268  	defer cancel()
  1269  
  1270  	cc := testutils.NewBalancerClientConn(t)
  1271  	bb := balancer.Get(Name)
  1272  	pb := bb.Build(cc, balancer.BuildOptions{})
  1273  	defer pb.Close()
  1274  
  1275  	// Two children, with priorities [0, 1], each with one backend.
  1276  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1277  		ResolverState: resolver.State{
  1278  			Endpoints: []resolver.Endpoint{
  1279  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1280  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1281  			},
  1282  		},
  1283  		BalancerConfig: &LBConfig{
  1284  			Children: map[string]*Child{
  1285  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1286  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1287  			},
  1288  			Priorities: []string{"child-0", "child-1"},
  1289  		},
  1290  	}); err != nil {
  1291  		t.Fatalf("failed to update ClientConn state: %v", err)
  1292  	}
  1293  
  1294  	addrs0 := <-cc.NewSubConnAddrsCh
  1295  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  1296  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1297  	}
  1298  	sc0 := <-cc.NewSubConnCh
  1299  
  1300  	// p0 is down.
  1301  	sc0.UpdateState(balancer.SubConnState{
  1302  		ConnectivityState: connectivity.TransientFailure,
  1303  		ConnectionError:   errors.New("test error"),
  1304  	})
  1305  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
  1306  	// will retry.
  1307  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
  1308  		t.Fatal(err.Error())
  1309  	}
  1310  
  1311  	addrs1 := <-cc.NewSubConnAddrsCh
  1312  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
  1313  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1314  	}
  1315  	sc1 := <-cc.NewSubConnCh
  1316  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1317  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1318  
  1319  	// Test roundrobin with only p1 subconns.
  1320  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1321  		t.Fatal(err.Error())
  1322  	}
  1323  
  1324  	// Remove child with p1, the child at higher priority should now be used.
  1325  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1326  		ResolverState: resolver.State{
  1327  			Endpoints: []resolver.Endpoint{
  1328  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1329  			},
  1330  		},
  1331  		BalancerConfig: &LBConfig{
  1332  			Children: map[string]*Child{
  1333  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1334  			},
  1335  			Priorities: []string{"child-0"},
  1336  		},
  1337  	}); err != nil {
  1338  		t.Fatalf("failed to update ClientConn state: %v", err)
  1339  	}
  1340  
  1341  	// Old subconn from child-1 should be shut down.
  1342  	scToShutdown := <-cc.ShutdownSubConnCh
  1343  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
  1344  	// are closed. Remove duplicate events.
  1345  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
  1346  	// workaround once pickfirst is the only leaf policy and responsible for
  1347  	// shutting down SubConns.
  1348  	<-cc.ShutdownSubConnCh
  1349  	if scToShutdown != sc1 {
  1350  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scToShutdown)
  1351  	}
  1352  
  1353  	if err := cc.WaitForErrPicker(ctx); err != nil {
  1354  		t.Fatal(err.Error())
  1355  	}
  1356  	<-cc.NewStateCh // Drain to match picker
  1357  
  1358  	// Because there was no new child, no new subconn should be created.
  1359  	select {
  1360  	case <-cc.NewSubConnCh:
  1361  		t.Fatalf("got unexpected new SubConn")
  1362  	case <-time.After(time.Millisecond * 100):
  1363  	}
  1364  }
  1365  
  1366  // When a ready child is removed, it's kept in cache. Re-adding doesn't create subconns.
  1367  //
  1368  // Init 0; 0 is up, use 0; remove 0, only picker is updated, no subconn is
  1369  // removed; re-add 0, picker is updated.
  1370  func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) {
  1371  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1372  	defer cancel()
  1373  
  1374  	const testChildCacheTimeout = time.Second
  1375  	defer func() func() {
  1376  		old := DefaultSubBalancerCloseTimeout
  1377  		DefaultSubBalancerCloseTimeout = testChildCacheTimeout
  1378  		return func() {
  1379  			DefaultSubBalancerCloseTimeout = old
  1380  		}
  1381  	}()()
  1382  
  1383  	cc := testutils.NewBalancerClientConn(t)
  1384  	bb := balancer.Get(Name)
  1385  	pb := bb.Build(cc, balancer.BuildOptions{})
  1386  	defer pb.Close()
  1387  
  1388  	// One children, with priorities [0], with one backend.
  1389  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1390  		ResolverState: resolver.State{
  1391  			Endpoints: []resolver.Endpoint{
  1392  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1393  			},
  1394  		},
  1395  		BalancerConfig: &LBConfig{
  1396  			Children: map[string]*Child{
  1397  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1398  			},
  1399  			Priorities: []string{"child-0"},
  1400  		},
  1401  	}); err != nil {
  1402  		t.Fatalf("failed to update ClientConn state: %v", err)
  1403  	}
  1404  
  1405  	addrs1 := <-cc.NewSubConnAddrsCh
  1406  	if got, want := addrs1[0].Addr, testBackendAddrStrs[0]; got != want {
  1407  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1408  	}
  1409  	sc1 := <-cc.NewSubConnCh
  1410  
  1411  	// p0 is ready.
  1412  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1413  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1414  
  1415  	// Test roundrobin with only p0 subconns.
  1416  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1417  		t.Fatal(err.Error())
  1418  	}
  1419  
  1420  	// Remove the child, it shouldn't cause any conn changed, but picker should
  1421  	// be different.
  1422  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1423  		ResolverState:  resolver.State{},
  1424  		BalancerConfig: &LBConfig{},
  1425  	}); err != nil {
  1426  		t.Fatalf("failed to update ClientConn state: %v", err)
  1427  	}
  1428  
  1429  	if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil {
  1430  		t.Fatal(err.Error())
  1431  	}
  1432  
  1433  	// But no conn changes should happen. Child balancer is in cache.
  1434  	select {
  1435  	case sc := <-cc.NewSubConnCh:
  1436  		t.Fatalf("got unexpected new SubConn: %s", sc)
  1437  	case sc := <-cc.ShutdownSubConnCh:
  1438  		t.Fatalf("got unexpected shutdown SubConn: %v", sc)
  1439  	case <-time.After(time.Millisecond * 100):
  1440  	}
  1441  
  1442  	// Re-add the child, shouldn't create new connections.
  1443  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1444  		ResolverState: resolver.State{
  1445  			Endpoints: []resolver.Endpoint{
  1446  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1447  			},
  1448  		},
  1449  		BalancerConfig: &LBConfig{
  1450  			Children: map[string]*Child{
  1451  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1452  			},
  1453  			Priorities: []string{"child-0"},
  1454  		},
  1455  	}); err != nil {
  1456  		t.Fatalf("failed to update ClientConn state: %v", err)
  1457  	}
  1458  
  1459  	// Test roundrobin with only p0 subconns.
  1460  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1461  		t.Fatal(err.Error())
  1462  	}
  1463  
  1464  	// But no conn changes should happen. Child balancer is just taken out from
  1465  	// the cache.
  1466  	select {
  1467  	case sc := <-cc.NewSubConnCh:
  1468  		t.Fatalf("got unexpected new SubConn: %s", sc)
  1469  	case sc := <-cc.ShutdownSubConnCh:
  1470  		t.Fatalf("got unexpected shutdown SubConn: %v", sc)
  1471  	case <-time.After(time.Millisecond * 100):
  1472  	}
  1473  }
  1474  
  1475  // When the policy of a child is changed.
  1476  //
  1477  // Init 0; 0 is up, use 0; change 0's policy, 0 is used.
  1478  func (s) TestPriority_ChildPolicyChange(t *testing.T) {
  1479  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1480  	defer cancel()
  1481  
  1482  	cc := testutils.NewBalancerClientConn(t)
  1483  	bb := balancer.Get(Name)
  1484  	pb := bb.Build(cc, balancer.BuildOptions{})
  1485  	defer pb.Close()
  1486  
  1487  	// One children, with priorities [0], with one backend.
  1488  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1489  		ResolverState: resolver.State{
  1490  			Endpoints: []resolver.Endpoint{
  1491  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1492  			},
  1493  		},
  1494  		BalancerConfig: &LBConfig{
  1495  			Children: map[string]*Child{
  1496  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  1497  			},
  1498  			Priorities: []string{"child-0"},
  1499  		},
  1500  	}); err != nil {
  1501  		t.Fatalf("failed to update ClientConn state: %v", err)
  1502  	}
  1503  
  1504  	addrs1 := <-cc.NewSubConnAddrsCh
  1505  	if got, want := addrs1[0].Addr, testBackendAddrStrs[0]; got != want {
  1506  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1507  	}
  1508  	sc1 := <-cc.NewSubConnCh
  1509  
  1510  	// p0 is ready.
  1511  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1512  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1513  
  1514  	// Test roundrobin with only p0 subconns.
  1515  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  1516  		t.Fatal(err.Error())
  1517  	}
  1518  
  1519  	// Change the policy for the child (still roundrobin, but with a different
  1520  	// name).
  1521  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1522  		ResolverState: resolver.State{
  1523  			Endpoints: []resolver.Endpoint{
  1524  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1525  			},
  1526  		},
  1527  		BalancerConfig: &LBConfig{
  1528  			Children: map[string]*Child{
  1529  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: testRRBalancerName}},
  1530  			},
  1531  			Priorities: []string{"child-0"},
  1532  		},
  1533  	}); err != nil {
  1534  		t.Fatalf("failed to update ClientConn state: %v", err)
  1535  	}
  1536  
  1537  	// Old subconn should be shut down.
  1538  	scToShutdown := <-cc.ShutdownSubConnCh
  1539  	// The same SubConn is closed by gracefulswitch and pickfirstleaf when they
  1540  	// are closed. Remove duplicate events.
  1541  	// TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this
  1542  	// workaround once pickfirst is the only leaf policy and responsible for
  1543  	// shutting down SubConns.
  1544  	<-cc.ShutdownSubConnCh
  1545  	if scToShutdown != sc1 {
  1546  		t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scToShutdown)
  1547  	}
  1548  
  1549  	// A new subconn should be created.
  1550  	addrs2 := <-cc.NewSubConnAddrsCh
  1551  	if got, want := addrs2[0].Addr, testBackendAddrStrs[0]; got != want {
  1552  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1553  	}
  1554  	sc2 := <-cc.NewSubConnCh
  1555  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  1556  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  1557  
  1558  	// Test pickfirst with the new subconns.
  1559  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
  1560  		t.Fatal(err.Error())
  1561  	}
  1562  }
  1563  
  1564  const inlineUpdateBalancerName = "test-inline-update-balancer"
  1565  
  1566  var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not")
  1567  
  1568  func init() {
  1569  	stub.Register(inlineUpdateBalancerName, stub.BalancerFuncs{
  1570  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
  1571  			bd.ClientConn.UpdateState(balancer.State{
  1572  				ConnectivityState: connectivity.Ready,
  1573  				Picker:            &testutils.TestConstPicker{Err: errTestInlineStateUpdate},
  1574  			})
  1575  			return nil
  1576  		},
  1577  	})
  1578  }
  1579  
  1580  // When the child policy update picker inline in a handleClientUpdate call
  1581  // (e.g., roundrobin handling empty addresses). There could be deadlock caused
  1582  // by acquiring a locked mutex.
  1583  func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) {
  1584  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1585  	defer cancel()
  1586  
  1587  	cc := testutils.NewBalancerClientConn(t)
  1588  	bb := balancer.Get(Name)
  1589  	pb := bb.Build(cc, balancer.BuildOptions{})
  1590  	defer pb.Close()
  1591  
  1592  	// One children, with priorities [0], with one backend.
  1593  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1594  		ResolverState: resolver.State{
  1595  			Endpoints: []resolver.Endpoint{
  1596  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1597  			},
  1598  		},
  1599  		BalancerConfig: &LBConfig{
  1600  			Children: map[string]*Child{
  1601  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}},
  1602  			},
  1603  			Priorities: []string{"child-0"},
  1604  		},
  1605  	}); err != nil {
  1606  		t.Fatalf("failed to update ClientConn state: %v", err)
  1607  	}
  1608  
  1609  	if err := cc.WaitForPickerWithErr(ctx, errTestInlineStateUpdate); err != nil {
  1610  		t.Fatal(err.Error())
  1611  	}
  1612  }
  1613  
  1614  // TestPriority_IgnoreReresolutionRequest tests the case where the priority
  1615  // policy has a single child policy. The test verifies that ResolveNow() calls
  1616  // from the child policy are ignored based on the value of the
  1617  // IgnoreReresolutionRequests field in the configuration.
  1618  func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) {
  1619  	// Register a stub balancer to act the child policy of the priority policy.
  1620  	// Provide an init function to the stub balancer to capture the ClientConn
  1621  	// passed to the child policy.
  1622  	ccCh := testutils.NewChannel()
  1623  	childPolicyName := t.Name()
  1624  	stub.Register(childPolicyName, stub.BalancerFuncs{
  1625  		Init: func(data *stub.BalancerData) {
  1626  			ccCh.Send(data.ClientConn)
  1627  		},
  1628  	})
  1629  
  1630  	cc := testutils.NewBalancerClientConn(t)
  1631  	bb := balancer.Get(Name)
  1632  	pb := bb.Build(cc, balancer.BuildOptions{})
  1633  	defer pb.Close()
  1634  
  1635  	// One children, with priorities [0], with one backend, reresolution is
  1636  	// ignored.
  1637  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1638  		ResolverState: resolver.State{
  1639  			Endpoints: []resolver.Endpoint{
  1640  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1641  			},
  1642  		},
  1643  		BalancerConfig: &LBConfig{
  1644  			Children: map[string]*Child{
  1645  				"child-0": {
  1646  					Config:                     &internalserviceconfig.BalancerConfig{Name: childPolicyName},
  1647  					IgnoreReresolutionRequests: true,
  1648  				},
  1649  			},
  1650  			Priorities: []string{"child-0"},
  1651  		},
  1652  	}); err != nil {
  1653  		t.Fatalf("failed to update ClientConn state: %v", err)
  1654  	}
  1655  
  1656  	// Retrieve the ClientConn passed to the child policy.
  1657  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1658  	defer cancel()
  1659  	val, err := ccCh.Receive(ctx)
  1660  	if err != nil {
  1661  		t.Fatalf("timeout waiting for ClientConn from the child policy")
  1662  	}
  1663  	balancerCC := val.(balancer.ClientConn)
  1664  
  1665  	// Since IgnoreReresolutionRequests was set to true, all ResolveNow() calls
  1666  	// should be ignored.
  1667  	for i := 0; i < 5; i++ {
  1668  		balancerCC.ResolveNow(resolver.ResolveNowOptions{})
  1669  	}
  1670  	select {
  1671  	case <-cc.ResolveNowCh:
  1672  		t.Fatalf("got unexpected ResolveNow() call")
  1673  	case <-time.After(defaultTestShortTimeout):
  1674  	}
  1675  
  1676  	// Send another update to set IgnoreReresolutionRequests to false.
  1677  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1678  		ResolverState: resolver.State{
  1679  			Endpoints: []resolver.Endpoint{
  1680  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1681  			},
  1682  		},
  1683  		BalancerConfig: &LBConfig{
  1684  			Children: map[string]*Child{
  1685  				"child-0": {
  1686  					Config:                     &internalserviceconfig.BalancerConfig{Name: childPolicyName},
  1687  					IgnoreReresolutionRequests: false,
  1688  				},
  1689  			},
  1690  			Priorities: []string{"child-0"},
  1691  		},
  1692  	}); err != nil {
  1693  		t.Fatalf("failed to update ClientConn state: %v", err)
  1694  	}
  1695  
  1696  	// Call ResolveNow() on the CC, it should be forwarded.
  1697  	balancerCC.ResolveNow(resolver.ResolveNowOptions{})
  1698  	select {
  1699  	case <-cc.ResolveNowCh:
  1700  	case <-time.After(time.Second):
  1701  		t.Fatalf("timeout waiting for ResolveNow()")
  1702  	}
  1703  
  1704  }
  1705  
  1706  // TestPriority_IgnoreReresolutionRequestTwoChildren tests the case where the
  1707  // priority policy has two child policies, one of them has the
  1708  // IgnoreReresolutionRequests field set to true while the other one has it set
  1709  // to false. The test verifies that ResolveNow() calls from the child which is
  1710  // set to ignore reresolution requests are ignored, while calls from the other
  1711  // child are processed.
  1712  func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) {
  1713  	// Register a stub balancer to act the child policy of the priority policy.
  1714  	// Provide an init function to the stub balancer to capture the ClientConn
  1715  	// passed to the child policy.
  1716  	ccCh := testutils.NewChannel()
  1717  	childPolicyName := t.Name()
  1718  	stub.Register(childPolicyName, stub.BalancerFuncs{
  1719  		Init: func(bd *stub.BalancerData) {
  1720  			ccCh.Send(bd.ClientConn)
  1721  			bd.Data = balancer.Get(roundrobin.Name).Build(bd.ClientConn, bd.BuildOptions)
  1722  		},
  1723  		UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error {
  1724  			bal := bd.Data.(balancer.Balancer)
  1725  			return bal.UpdateClientConnState(ccs)
  1726  		},
  1727  	})
  1728  
  1729  	cc := testutils.NewBalancerClientConn(t)
  1730  	bb := balancer.Get(Name)
  1731  	pb := bb.Build(cc, balancer.BuildOptions{})
  1732  	defer pb.Close()
  1733  
  1734  	// One children, with priorities [0, 1], each with one backend.
  1735  	// Reresolution is ignored for p0.
  1736  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1737  		ResolverState: resolver.State{
  1738  			Endpoints: []resolver.Endpoint{
  1739  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1740  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1741  			},
  1742  		},
  1743  		BalancerConfig: &LBConfig{
  1744  			Children: map[string]*Child{
  1745  				"child-0": {
  1746  					Config:                     &internalserviceconfig.BalancerConfig{Name: childPolicyName},
  1747  					IgnoreReresolutionRequests: true,
  1748  				},
  1749  				"child-1": {
  1750  					Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName},
  1751  				},
  1752  			},
  1753  			Priorities: []string{"child-0", "child-1"},
  1754  		},
  1755  	}); err != nil {
  1756  		t.Fatalf("failed to update ClientConn state: %v", err)
  1757  	}
  1758  
  1759  	// Retrieve the ClientConn passed to the child policy from p0.
  1760  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1761  	defer cancel()
  1762  	val, err := ccCh.Receive(ctx)
  1763  	if err != nil {
  1764  		t.Fatalf("timeout waiting for ClientConn from the child policy")
  1765  	}
  1766  	balancerCC0 := val.(balancer.ClientConn)
  1767  
  1768  	// Set p0 to transient failure, p1 will be started.
  1769  	addrs0 := <-cc.NewSubConnAddrsCh
  1770  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  1771  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1772  	}
  1773  	sc0 := <-cc.NewSubConnCh
  1774  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1775  
  1776  	// Retrieve the ClientConn passed to the child policy from p1.
  1777  	val, err = ccCh.Receive(ctx)
  1778  	if err != nil {
  1779  		t.Fatalf("timeout waiting for ClientConn from the child policy")
  1780  	}
  1781  	balancerCC1 := val.(balancer.ClientConn)
  1782  
  1783  	// Since IgnoreReresolutionRequests was set to true for p0, ResolveNow()
  1784  	// from p0 should all be ignored.
  1785  	for i := 0; i < 5; i++ {
  1786  		balancerCC0.ResolveNow(resolver.ResolveNowOptions{})
  1787  	}
  1788  	select {
  1789  	case <-cc.ResolveNowCh:
  1790  		t.Fatalf("got unexpected ResolveNow() call")
  1791  	case <-time.After(defaultTestShortTimeout):
  1792  	}
  1793  
  1794  	// But IgnoreReresolutionRequests was false for p1, ResolveNow() from p1
  1795  	// should be forwarded.
  1796  	balancerCC1.ResolveNow(resolver.ResolveNowOptions{})
  1797  	select {
  1798  	case <-cc.ResolveNowCh:
  1799  	case <-time.After(defaultTestShortTimeout):
  1800  		t.Fatalf("timeout waiting for ResolveNow()")
  1801  	}
  1802  }
  1803  
  1804  const initIdleBalancerName = "test-init-Idle-balancer"
  1805  
  1806  var errsTestInitIdle = []error{
  1807  	fmt.Errorf("init Idle balancer error 0"),
  1808  	fmt.Errorf("init Idle balancer error 1"),
  1809  }
  1810  
  1811  func init() {
  1812  	for i := 0; i < 2; i++ {
  1813  		ii := i
  1814  		stub.Register(fmt.Sprintf("%s-%d", initIdleBalancerName, ii), stub.BalancerFuncs{
  1815  			UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error {
  1816  				lis := func(state balancer.SubConnState) {
  1817  					err := fmt.Errorf("wrong picker error")
  1818  					if state.ConnectivityState == connectivity.Idle {
  1819  						err = errsTestInitIdle[ii]
  1820  					}
  1821  					bd.ClientConn.UpdateState(balancer.State{
  1822  						ConnectivityState: state.ConnectivityState,
  1823  						Picker:            &testutils.TestConstPicker{Err: err},
  1824  					})
  1825  				}
  1826  
  1827  				sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Endpoints[0].Addresses, balancer.NewSubConnOptions{StateListener: lis})
  1828  				if err != nil {
  1829  					return err
  1830  				}
  1831  				sc.Connect()
  1832  				bd.ClientConn.UpdateState(balancer.State{
  1833  					ConnectivityState: connectivity.Connecting,
  1834  					Picker:            &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable},
  1835  				})
  1836  				return nil
  1837  			},
  1838  		})
  1839  	}
  1840  }
  1841  
  1842  // If the high priorities send initial pickers with Idle state, their pickers
  1843  // should get picks, because policies like ringhash starts in Idle, and doesn't
  1844  // connect.
  1845  //
  1846  // Init 0, 1; 0 is Idle, use 0; 0 is down, start 1; 1 is Idle, use 1.
  1847  func (s) TestPriority_HighPriorityInitIdle(t *testing.T) {
  1848  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1849  	defer cancel()
  1850  
  1851  	cc := testutils.NewBalancerClientConn(t)
  1852  	bb := balancer.Get(Name)
  1853  	pb := bb.Build(cc, balancer.BuildOptions{})
  1854  	defer pb.Close()
  1855  
  1856  	// Two children, with priorities [0, 1], each with one backend.
  1857  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1858  		ResolverState: resolver.State{
  1859  			Endpoints: []resolver.Endpoint{
  1860  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1861  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1862  			},
  1863  		},
  1864  		BalancerConfig: &LBConfig{
  1865  			Children: map[string]*Child{
  1866  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}},
  1867  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}},
  1868  			},
  1869  			Priorities: []string{"child-0", "child-1"},
  1870  		},
  1871  	}); err != nil {
  1872  		t.Fatalf("failed to update ClientConn state: %v", err)
  1873  	}
  1874  
  1875  	addrs0 := <-cc.NewSubConnAddrsCh
  1876  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  1877  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1878  	}
  1879  	sc0 := <-cc.NewSubConnCh
  1880  
  1881  	// Send an Idle state update to trigger an Idle picker update.
  1882  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
  1883  	if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil {
  1884  		t.Fatal(err.Error())
  1885  	}
  1886  
  1887  	// Turn p0 down, to start p1.
  1888  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  1889  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
  1890  	// will retry.
  1891  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
  1892  		t.Fatal(err.Error())
  1893  	}
  1894  
  1895  	addrs1 := <-cc.NewSubConnAddrsCh
  1896  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
  1897  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1898  	}
  1899  	sc1 := <-cc.NewSubConnCh
  1900  	// Idle picker from p1 should also be forwarded.
  1901  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
  1902  	if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[1]); err != nil {
  1903  		t.Fatal(err.Error())
  1904  	}
  1905  }
  1906  
  1907  // If the high priorities send initial pickers with Idle state, their pickers
  1908  // should get picks, because policies like ringhash starts in Idle, and doesn't
  1909  // connect. In this case, if a lower priority is added, it shouldn't switch to
  1910  // the lower priority.
  1911  //
  1912  // Init 0; 0 is Idle, use 0; add 1, use 0.
  1913  func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) {
  1914  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1915  	defer cancel()
  1916  
  1917  	cc := testutils.NewBalancerClientConn(t)
  1918  	bb := balancer.Get(Name)
  1919  	pb := bb.Build(cc, balancer.BuildOptions{})
  1920  	defer pb.Close()
  1921  
  1922  	// One child, with priorities [0], one backend.
  1923  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1924  		ResolverState: resolver.State{
  1925  			Endpoints: []resolver.Endpoint{
  1926  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1927  			},
  1928  		},
  1929  		BalancerConfig: &LBConfig{
  1930  			Children: map[string]*Child{
  1931  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}},
  1932  			},
  1933  			Priorities: []string{"child-0"},
  1934  		},
  1935  	}); err != nil {
  1936  		t.Fatalf("failed to update ClientConn state: %v", err)
  1937  	}
  1938  
  1939  	addrs0 := <-cc.NewSubConnAddrsCh
  1940  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  1941  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  1942  	}
  1943  	sc0 := <-cc.NewSubConnCh
  1944  
  1945  	// Send an Idle state update to trigger an Idle picker update.
  1946  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
  1947  	if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil {
  1948  		t.Fatal(err.Error())
  1949  	}
  1950  
  1951  	// Add 1, should keep using 0.
  1952  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  1953  		ResolverState: resolver.State{
  1954  			Endpoints: []resolver.Endpoint{
  1955  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  1956  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  1957  			},
  1958  		},
  1959  		BalancerConfig: &LBConfig{
  1960  			Children: map[string]*Child{
  1961  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}},
  1962  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}},
  1963  			},
  1964  			Priorities: []string{"child-0", "child-1"},
  1965  		},
  1966  	}); err != nil {
  1967  		t.Fatalf("failed to update ClientConn state: %v", err)
  1968  	}
  1969  
  1970  	// The ClientConn state update triggers a priority switch, from p0 -> p0
  1971  	// (since p0 is still in use). Along with this the update, p0 also gets a
  1972  	// ClientConn state update, with the addresses, which didn't change in this
  1973  	// test (this update to the child is necessary in case the addresses are
  1974  	// different).
  1975  	//
  1976  	// The test child policy, initIdleBalancer, blindly calls NewSubConn with
  1977  	// all the addresses it receives, so this will trigger a NewSubConn with the
  1978  	// old p0 addresses. (Note that in a real balancer, like roundrobin, no new
  1979  	// SubConn will be created because the addresses didn't change).
  1980  	//
  1981  	// The check below makes sure that the addresses are still from p0, and not
  1982  	// from p1. This is good enough for the purpose of this test.
  1983  	addrsNew := <-cc.NewSubConnAddrsCh
  1984  	if got, want := addrsNew[0].Addr, testBackendAddrStrs[0]; got != want {
  1985  		// Fail if p1 is started and creates a SubConn.
  1986  		t.Fatalf("got unexpected call to NewSubConn with addr: %v, want %v", addrsNew, want)
  1987  	}
  1988  }
  1989  
  1990  // Lower priority is used when higher priority is not ready; higher priority
  1991  // still gets updates.
  1992  //
  1993  // Init 0 and 1; 0 is down, 1 is up, use 1; update 0; 0 is up, use 0
  1994  func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) {
  1995  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1996  	defer cancel()
  1997  
  1998  	cc := testutils.NewBalancerClientConn(t)
  1999  	bb := balancer.Get(Name)
  2000  	pb := bb.Build(cc, balancer.BuildOptions{})
  2001  	defer pb.Close()
  2002  
  2003  	t.Log("Two localities, with priorities [0, 1], each with one backend.")
  2004  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  2005  		ResolverState: resolver.State{
  2006  			Endpoints: []resolver.Endpoint{
  2007  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}),
  2008  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}),
  2009  			},
  2010  		},
  2011  		BalancerConfig: &LBConfig{
  2012  			Children: map[string]*Child{
  2013  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  2014  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  2015  			},
  2016  			Priorities: []string{"child-0", "child-1"},
  2017  		},
  2018  	}); err != nil {
  2019  		t.Fatalf("failed to update ClientConn state: %v", err)
  2020  	}
  2021  
  2022  	addrs0 := <-cc.NewSubConnAddrsCh
  2023  	if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want {
  2024  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  2025  	}
  2026  	sc0 := <-cc.NewSubConnCh
  2027  
  2028  	t.Log("Make p0 fail.")
  2029  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  2030  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
  2031  
  2032  	// Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs
  2033  	// will retry.
  2034  	if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil {
  2035  		t.Fatal(err.Error())
  2036  	}
  2037  
  2038  	t.Log("Make p1 ready.")
  2039  	addrs1 := <-cc.NewSubConnAddrsCh
  2040  	if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want {
  2041  		t.Fatalf("sc is created with addr %v, want %v", got, want)
  2042  	}
  2043  	sc1 := <-cc.NewSubConnCh
  2044  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  2045  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  2046  
  2047  	// Test pick with 1.
  2048  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  2049  		t.Fatal(err.Error())
  2050  	}
  2051  
  2052  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  2053  	// Does not change the aggregate state, because round robin does not leave
  2054  	// TRANSIENT_FAILURE if a subconn goes CONNECTING.
  2055  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  2056  
  2057  	if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil {
  2058  		t.Fatal(err.Error())
  2059  	}
  2060  
  2061  	t.Log("Change p0 to use new address.")
  2062  	if err := pb.UpdateClientConnState(balancer.ClientConnState{
  2063  		ResolverState: resolver.State{
  2064  			Endpoints: []resolver.Endpoint{
  2065  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}),
  2066  				hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[3]}}}, []string{"child-1"}),
  2067  			},
  2068  		},
  2069  		BalancerConfig: &LBConfig{
  2070  			Children: map[string]*Child{
  2071  				"child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  2072  				"child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}},
  2073  			},
  2074  			Priorities: []string{"child-0", "child-1"},
  2075  		},
  2076  	}); err != nil {
  2077  		t.Fatalf("failed to update ClientConn state: %v", err)
  2078  	}
  2079  
  2080  	// Two new subconns are created by the previous update; one by p0 and one
  2081  	// by p1.  They don't happen concurrently, but they could happen in any
  2082  	// order.
  2083  	t.Log("Make p0 and p1 both ready; p0 should be used.")
  2084  	var sc2, sc3 balancer.SubConn
  2085  	for i := 0; i < 2; i++ {
  2086  		addr := <-cc.NewSubConnAddrsCh
  2087  		sc := <-cc.NewSubConnCh
  2088  		switch addr[0].Addr {
  2089  		case testBackendAddrStrs[2]:
  2090  			sc2 = sc
  2091  		case testBackendAddrStrs[3]:
  2092  			sc3 = sc
  2093  		default:
  2094  			t.Fatalf("sc is created with addr %v, want %v or %v", addr[0].Addr, testBackendAddrStrs[2], testBackendAddrStrs[3])
  2095  		}
  2096  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
  2097  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
  2098  	}
  2099  	if sc2 == nil {
  2100  		t.Fatalf("sc not created with addr %v", testBackendAddrStrs[2])
  2101  	}
  2102  	if sc3 == nil {
  2103  		t.Fatalf("sc not created with addr %v", testBackendAddrStrs[3])
  2104  	}
  2105  
  2106  	// Test pick with 0.
  2107  	if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil {
  2108  		t.Fatal(err.Error())
  2109  	}
  2110  }