google.golang.org/grpc@v1.62.1/internal/balancergroup/balancergroup_test.go (about)

     1  /*
     2   * Copyright 2019 gRPC authors.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package balancergroup
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/google/go-cmp/cmp"
    26  	"google.golang.org/grpc"
    27  	"google.golang.org/grpc/balancer"
    28  	"google.golang.org/grpc/balancer/roundrobin"
    29  	"google.golang.org/grpc/balancer/weightedtarget/weightedaggregator"
    30  	"google.golang.org/grpc/connectivity"
    31  	"google.golang.org/grpc/credentials/insecure"
    32  	"google.golang.org/grpc/internal/balancer/stub"
    33  	"google.golang.org/grpc/internal/channelz"
    34  	"google.golang.org/grpc/internal/grpctest"
    35  	"google.golang.org/grpc/internal/testutils"
    36  	"google.golang.org/grpc/resolver"
    37  )
    38  
    39  const (
    40  	defaultTestTimeout      = 5 * time.Second
    41  	defaultTestShortTimeout = 10 * time.Millisecond
    42  )
    43  
    44  var (
    45  	rrBuilder        = balancer.Get(roundrobin.Name)
    46  	testBalancerIDs  = []string{"b1", "b2", "b3"}
    47  	testBackendAddrs []resolver.Address
    48  )
    49  
    50  const testBackendAddrsCount = 12
    51  
    52  func init() {
    53  	for i := 0; i < testBackendAddrsCount; i++ {
    54  		testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)})
    55  	}
    56  }
    57  
    58  type s struct {
    59  	grpctest.Tester
    60  }
    61  
    62  func Test(t *testing.T) {
    63  	grpctest.RunSubTests(t, s{})
    64  }
    65  
    66  // Create a new balancer group, add balancer and backends, but not start.
    67  // - b1, weight 2, backends [0,1]
    68  // - b2, weight 1, backends [2,3]
    69  // Start the balancer group and check behavior.
    70  //
    71  // Close the balancer group, call add/remove/change weight/change address.
    72  // - b2, weight 3, backends [0,3]
    73  // - b3, weight 1, backends [1,2]
    74  // Start the balancer group again and check for behavior.
    75  func (s) TestBalancerGroup_start_close(t *testing.T) {
    76  	cc := testutils.NewBalancerClientConn(t)
    77  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
    78  	gator.Start()
    79  	bg := New(Options{
    80  		CC:                      cc,
    81  		BuildOpts:               balancer.BuildOptions{},
    82  		StateAggregator:         gator,
    83  		Logger:                  nil,
    84  		SubBalancerCloseTimeout: time.Duration(0),
    85  	})
    86  
    87  	// Add two balancers to group and send two resolved addresses to both
    88  	// balancers.
    89  	gator.Add(testBalancerIDs[0], 2)
    90  	bg.Add(testBalancerIDs[0], rrBuilder)
    91  	bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
    92  	gator.Add(testBalancerIDs[1], 1)
    93  	bg.Add(testBalancerIDs[1], rrBuilder)
    94  	bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
    95  
    96  	bg.Start()
    97  
    98  	m1 := make(map[resolver.Address]balancer.SubConn)
    99  	for i := 0; i < 4; i++ {
   100  		addrs := <-cc.NewSubConnAddrsCh
   101  		sc := <-cc.NewSubConnCh
   102  		m1[addrs[0]] = sc
   103  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   104  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   105  	}
   106  
   107  	// Test roundrobin on the last picker.
   108  	p1 := <-cc.NewPickerCh
   109  	want := []balancer.SubConn{
   110  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   111  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   112  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   113  	}
   114  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil {
   115  		t.Fatalf("want %v, got %v", want, err)
   116  	}
   117  
   118  	gator.Stop()
   119  	bg.Close()
   120  	for i := 0; i < 4; i++ {
   121  		(<-cc.ShutdownSubConnCh).UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   122  	}
   123  
   124  	// Add b3, weight 1, backends [1,2].
   125  	gator.Add(testBalancerIDs[2], 1)
   126  	bg.Add(testBalancerIDs[2], rrBuilder)
   127  	bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}})
   128  
   129  	// Remove b1.
   130  	gator.Remove(testBalancerIDs[0])
   131  	bg.Remove(testBalancerIDs[0])
   132  
   133  	// Update b2 to weight 3, backends [0,3].
   134  	gator.UpdateWeight(testBalancerIDs[1], 3)
   135  	bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}})
   136  
   137  	gator.Start()
   138  	bg.Start()
   139  
   140  	m2 := make(map[resolver.Address]balancer.SubConn)
   141  	for i := 0; i < 4; i++ {
   142  		addrs := <-cc.NewSubConnAddrsCh
   143  		sc := <-cc.NewSubConnCh
   144  		m2[addrs[0]] = sc
   145  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   146  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   147  	}
   148  
   149  	// Test roundrobin on the last picker.
   150  	p2 := <-cc.NewPickerCh
   151  	want = []balancer.SubConn{
   152  		m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]],
   153  		m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]],
   154  		m2[testBackendAddrs[1]], m2[testBackendAddrs[2]],
   155  	}
   156  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil {
   157  		t.Fatalf("want %v, got %v", want, err)
   158  	}
   159  }
   160  
   161  // Test that balancer group start() doesn't deadlock if the balancer calls back
   162  // into balancer group inline when it gets an update.
   163  //
   164  // The potential deadlock can happen if we
   165  //   - hold a lock and send updates to balancer (e.g. update resolved addresses)
   166  //   - the balancer calls back (NewSubConn or update picker) in line
   167  //
   168  // The callback will try to hold hte same lock again, which will cause a
   169  // deadlock.
   170  //
   171  // This test starts the balancer group with a test balancer, will updates picker
   172  // whenever it gets an address update. It's expected that start() doesn't block
   173  // because of deadlock.
   174  func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) {
   175  	const balancerName = "stub-TestBalancerGroup_start_close_deadlock"
   176  	stub.Register(balancerName, stub.BalancerFuncs{})
   177  	builder := balancer.Get(balancerName)
   178  
   179  	cc := testutils.NewBalancerClientConn(t)
   180  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   181  	gator.Start()
   182  	bg := New(Options{
   183  		CC:                      cc,
   184  		BuildOpts:               balancer.BuildOptions{},
   185  		StateAggregator:         gator,
   186  		Logger:                  nil,
   187  		SubBalancerCloseTimeout: time.Duration(0),
   188  	})
   189  
   190  	gator.Add(testBalancerIDs[0], 2)
   191  	bg.Add(testBalancerIDs[0], builder)
   192  	bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   193  	gator.Add(testBalancerIDs[1], 1)
   194  	bg.Add(testBalancerIDs[1], builder)
   195  	bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   196  
   197  	bg.Start()
   198  }
   199  
   200  // initBalancerGroupForCachingTest creates a balancer group, and initialize it
   201  // to be ready for caching tests.
   202  //
   203  // Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer
   204  // is removed later, so the balancer group returned has one sub-balancer in its
   205  // own map, and one sub-balancer in cache.
   206  func initBalancerGroupForCachingTest(t *testing.T, idleCacheTimeout time.Duration) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.BalancerClientConn, map[resolver.Address]*testutils.TestSubConn) {
   207  	cc := testutils.NewBalancerClientConn(t)
   208  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   209  	gator.Start()
   210  	bg := New(Options{
   211  		CC:                      cc,
   212  		BuildOpts:               balancer.BuildOptions{},
   213  		StateAggregator:         gator,
   214  		Logger:                  nil,
   215  		SubBalancerCloseTimeout: idleCacheTimeout,
   216  	})
   217  
   218  	// Add two balancers to group and send two resolved addresses to both
   219  	// balancers.
   220  	gator.Add(testBalancerIDs[0], 2)
   221  	bg.Add(testBalancerIDs[0], rrBuilder)
   222  	bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   223  	gator.Add(testBalancerIDs[1], 1)
   224  	bg.Add(testBalancerIDs[1], rrBuilder)
   225  	bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   226  
   227  	bg.Start()
   228  
   229  	m1 := make(map[resolver.Address]*testutils.TestSubConn)
   230  	for i := 0; i < 4; i++ {
   231  		addrs := <-cc.NewSubConnAddrsCh
   232  		sc := <-cc.NewSubConnCh
   233  		m1[addrs[0]] = sc
   234  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   235  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   236  	}
   237  
   238  	// Test roundrobin on the last picker.
   239  	p1 := <-cc.NewPickerCh
   240  	want := []balancer.SubConn{
   241  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   242  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   243  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   244  	}
   245  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil {
   246  		t.Fatalf("want %v, got %v", want, err)
   247  	}
   248  
   249  	gator.Remove(testBalancerIDs[1])
   250  	bg.Remove(testBalancerIDs[1])
   251  	// Don't wait for SubConns to be removed after close, because they are only
   252  	// removed after close timeout.
   253  	for i := 0; i < 10; i++ {
   254  		select {
   255  		case sc := <-cc.ShutdownSubConnCh:
   256  			t.Fatalf("Got request to shut down subconn %v, want no shut down subconn (because subconns were still in cache)", sc)
   257  		default:
   258  		}
   259  		time.Sleep(time.Millisecond)
   260  	}
   261  	// Test roundrobin on the with only sub-balancer0.
   262  	p2 := <-cc.NewPickerCh
   263  	want = []balancer.SubConn{
   264  		m1[testBackendAddrs[0]], m1[testBackendAddrs[1]],
   265  	}
   266  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil {
   267  		t.Fatalf("want %v, got %v", want, err)
   268  	}
   269  
   270  	return gator, bg, cc, m1
   271  }
   272  
   273  // Test that if a sub-balancer is removed, and re-added within close timeout,
   274  // the subConns won't be re-created.
   275  func (s) TestBalancerGroup_locality_caching(t *testing.T) {
   276  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
   277  
   278  	// Turn down subconn for addr2, shouldn't get picker update because
   279  	// sub-balancer1 was removed.
   280  	addrToSC[testBackendAddrs[2]].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   281  	for i := 0; i < 10; i++ {
   282  		select {
   283  		case <-cc.NewPickerCh:
   284  			t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)")
   285  		default:
   286  		}
   287  		time.Sleep(defaultTestShortTimeout)
   288  	}
   289  
   290  	// Re-add sub-balancer-1, because subconns were in cache, no new subconns
   291  	// should be created. But a new picker will still be generated, with subconn
   292  	// states update to date.
   293  	gator.Add(testBalancerIDs[1], 1)
   294  	bg.Add(testBalancerIDs[1], rrBuilder)
   295  
   296  	p3 := <-cc.NewPickerCh
   297  	want := []balancer.SubConn{
   298  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   299  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   300  		// addr2 is down, b2 only has addr3 in READY state.
   301  		addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]],
   302  	}
   303  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil {
   304  		t.Fatalf("want %v, got %v", want, err)
   305  	}
   306  
   307  	for i := 0; i < 10; i++ {
   308  		select {
   309  		case <-cc.NewSubConnAddrsCh:
   310  			t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)")
   311  		default:
   312  		}
   313  		time.Sleep(defaultTestShortTimeout)
   314  	}
   315  }
   316  
   317  // Sub-balancers are put in cache when they are shut down. If balancer group is
   318  // closed within close timeout, all subconns should still be rmeoved
   319  // immediately.
   320  func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
   321  	_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
   322  
   323  	bg.Close()
   324  	// The balancer group is closed. The subconns should be shutdown immediately.
   325  	shutdownTimeout := time.After(time.Millisecond * 500)
   326  	scToShutdown := map[balancer.SubConn]int{
   327  		addrToSC[testBackendAddrs[0]]: 1,
   328  		addrToSC[testBackendAddrs[1]]: 1,
   329  		addrToSC[testBackendAddrs[2]]: 1,
   330  		addrToSC[testBackendAddrs[3]]: 1,
   331  	}
   332  	for i := 0; i < len(scToShutdown); i++ {
   333  		select {
   334  		case sc := <-cc.ShutdownSubConnCh:
   335  			c := scToShutdown[sc]
   336  			if c == 0 {
   337  				t.Fatalf("Got Shutdown for %v when there's %d shutdown expected", sc, c)
   338  			}
   339  			scToShutdown[sc] = c - 1
   340  		case <-shutdownTimeout:
   341  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be shut down")
   342  		}
   343  	}
   344  }
   345  
   346  // Sub-balancers in cache will be closed if not re-added within timeout, and
   347  // subConns will be shut down.
   348  func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
   349  	_, _, cc, addrToSC := initBalancerGroupForCachingTest(t, time.Second)
   350  
   351  	// The sub-balancer is not re-added within timeout. The subconns should be
   352  	// shut down.
   353  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   354  	defer cancel()
   355  	scToShutdown := map[balancer.SubConn]int{
   356  		addrToSC[testBackendAddrs[2]]: 1,
   357  		addrToSC[testBackendAddrs[3]]: 1,
   358  	}
   359  	for i := 0; i < len(scToShutdown); i++ {
   360  		select {
   361  		case sc := <-cc.ShutdownSubConnCh:
   362  			c := scToShutdown[sc]
   363  			if c == 0 {
   364  				t.Fatalf("Got Shutdown for %v when there's %d shutdown expected", sc, c)
   365  			}
   366  			scToShutdown[sc] = c - 1
   367  		case <-ctx.Done():
   368  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be shut down")
   369  		}
   370  	}
   371  }
   372  
   373  // Wrap the rr builder, so it behaves the same, but has a different name.
   374  type noopBalancerBuilderWrapper struct {
   375  	balancer.Builder
   376  }
   377  
   378  func init() {
   379  	balancer.Register(&noopBalancerBuilderWrapper{Builder: rrBuilder})
   380  }
   381  
   382  func (*noopBalancerBuilderWrapper) Name() string {
   383  	return "noopBalancerBuilderWrapper"
   384  }
   385  
   386  // After removing a sub-balancer, re-add with same ID, but different balancer
   387  // builder. Old subconns should be shut down, and new subconns should be created.
   388  func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
   389  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t, defaultTestTimeout)
   390  
   391  	// Re-add sub-balancer-1, but with a different balancer builder. The
   392  	// sub-balancer was still in cache, but cann't be reused. This should cause
   393  	// old sub-balancer's subconns to be shut down immediately, and new
   394  	// subconns to be created.
   395  	gator.Add(testBalancerIDs[1], 1)
   396  	bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder})
   397  
   398  	// The cached sub-balancer should be closed, and the subconns should be
   399  	// shut down immediately.
   400  	shutdownTimeout := time.After(time.Millisecond * 500)
   401  	scToShutdown := map[balancer.SubConn]int{
   402  		addrToSC[testBackendAddrs[2]]: 1,
   403  		addrToSC[testBackendAddrs[3]]: 1,
   404  	}
   405  	for i := 0; i < len(scToShutdown); i++ {
   406  		select {
   407  		case sc := <-cc.ShutdownSubConnCh:
   408  			c := scToShutdown[sc]
   409  			if c == 0 {
   410  				t.Fatalf("Got Shutdown for %v when there's %d shutdown expected", sc, c)
   411  			}
   412  			scToShutdown[sc] = c - 1
   413  		case <-shutdownTimeout:
   414  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be shut down")
   415  		}
   416  	}
   417  
   418  	bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}})
   419  
   420  	newSCTimeout := time.After(time.Millisecond * 500)
   421  	scToAdd := map[resolver.Address]int{
   422  		testBackendAddrs[4]: 1,
   423  		testBackendAddrs[5]: 1,
   424  	}
   425  	for i := 0; i < len(scToAdd); i++ {
   426  		select {
   427  		case addr := <-cc.NewSubConnAddrsCh:
   428  			c := scToAdd[addr[0]]
   429  			if c == 0 {
   430  				t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c)
   431  			}
   432  			scToAdd[addr[0]] = c - 1
   433  			sc := <-cc.NewSubConnCh
   434  			addrToSC[addr[0]] = sc
   435  			sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   436  			sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   437  		case <-newSCTimeout:
   438  			t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed")
   439  		}
   440  	}
   441  
   442  	// Test roundrobin on the new picker.
   443  	p3 := <-cc.NewPickerCh
   444  	want := []balancer.SubConn{
   445  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   446  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   447  		addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]],
   448  	}
   449  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil {
   450  		t.Fatalf("want %v, got %v", want, err)
   451  	}
   452  }
   453  
   454  // After removing a sub-balancer, it will be kept in cache. Make sure that this
   455  // sub-balancer's Close is called when the balancer group is closed.
   456  func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) {
   457  	const balancerName = "stub-TestBalancerGroup_check_close"
   458  	closed := make(chan struct{})
   459  	stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) {
   460  		close(closed)
   461  	}})
   462  	builder := balancer.Get(balancerName)
   463  
   464  	gator, bg, _, _ := initBalancerGroupForCachingTest(t, time.Second)
   465  
   466  	// Add balancer, and remove
   467  	gator.Add(testBalancerIDs[2], 1)
   468  	bg.Add(testBalancerIDs[2], builder)
   469  	gator.Remove(testBalancerIDs[2])
   470  	bg.Remove(testBalancerIDs[2])
   471  
   472  	// Immediately close balancergroup, before the cache timeout.
   473  	bg.Close()
   474  
   475  	// Make sure the removed child balancer is closed eventually.
   476  	select {
   477  	case <-closed:
   478  	case <-time.After(time.Second * 2):
   479  		t.Fatalf("timeout waiting for the child balancer in cache to be closed")
   480  	}
   481  }
   482  
   483  // TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed
   484  // to the balancergroup at creation time is passed to child policies.
   485  func (s) TestBalancerGroupBuildOptions(t *testing.T) {
   486  	const (
   487  		balancerName = "stubBalancer-TestBalancerGroupBuildOptions"
   488  		userAgent    = "ua"
   489  	)
   490  
   491  	// Setup the stub balancer such that we can read the build options passed to
   492  	// it in the UpdateClientConnState method.
   493  	bOpts := balancer.BuildOptions{
   494  		DialCreds:        insecure.NewCredentials(),
   495  		ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil),
   496  		CustomUserAgent:  userAgent,
   497  	}
   498  	stub.Register(balancerName, stub.BalancerFuncs{
   499  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
   500  			if !cmp.Equal(bd.BuildOptions, bOpts) {
   501  				return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts)
   502  			}
   503  			return nil
   504  		},
   505  	})
   506  	cc := testutils.NewBalancerClientConn(t)
   507  	bg := New(Options{
   508  		CC:              cc,
   509  		BuildOpts:       bOpts,
   510  		StateAggregator: nil,
   511  		Logger:          nil,
   512  	})
   513  	bg.Start()
   514  
   515  	// Add the stub balancer build above as a child policy.
   516  	balancerBuilder := balancer.Get(balancerName)
   517  	bg.Add(testBalancerIDs[0], balancerBuilder)
   518  
   519  	// Send an empty clientConn state change. This should trigger the
   520  	// verification of the buildOptions being passed to the child policy.
   521  	if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil {
   522  		t.Fatal(err)
   523  	}
   524  }
   525  
   526  func (s) TestBalancerExitIdleOne(t *testing.T) {
   527  	const balancerName = "stub-balancer-test-balancergroup-exit-idle-one"
   528  	exitIdleCh := make(chan struct{}, 1)
   529  	stub.Register(balancerName, stub.BalancerFuncs{
   530  		ExitIdle: func(*stub.BalancerData) {
   531  			exitIdleCh <- struct{}{}
   532  		},
   533  	})
   534  	cc := testutils.NewBalancerClientConn(t)
   535  	bg := New(Options{
   536  		CC:              cc,
   537  		BuildOpts:       balancer.BuildOptions{},
   538  		StateAggregator: nil,
   539  		Logger:          nil,
   540  	})
   541  	bg.Start()
   542  	defer bg.Close()
   543  
   544  	// Add the stub balancer build above as a child policy.
   545  	builder := balancer.Get(balancerName)
   546  	bg.Add(testBalancerIDs[0], builder)
   547  
   548  	// Call ExitIdle on the child policy.
   549  	bg.ExitIdleOne(testBalancerIDs[0])
   550  	select {
   551  	case <-time.After(time.Second):
   552  		t.Fatal("Timeout when waiting for ExitIdle to be invoked on child policy")
   553  	case <-exitIdleCh:
   554  	}
   555  }
   556  
   557  // TestBalancerGracefulSwitch tests the graceful switch functionality for a
   558  // child of the balancer group. At first, the child is configured as a round
   559  // robin load balancer, and thus should behave accordingly. The test then
   560  // gracefully switches this child to a custom type which only creates a SubConn
   561  // for the second passed in address and also only picks that created SubConn.
   562  // The new aggregated picker should reflect this change for the child.
   563  func (s) TestBalancerGracefulSwitch(t *testing.T) {
   564  	cc := testutils.NewBalancerClientConn(t)
   565  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   566  	gator.Start()
   567  	bg := New(Options{
   568  		CC:              cc,
   569  		BuildOpts:       balancer.BuildOptions{},
   570  		StateAggregator: gator,
   571  		Logger:          nil,
   572  	})
   573  	gator.Add(testBalancerIDs[0], 1)
   574  	bg.Add(testBalancerIDs[0], rrBuilder)
   575  	bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   576  
   577  	bg.Start()
   578  
   579  	m1 := make(map[resolver.Address]balancer.SubConn)
   580  	scs := make(map[balancer.SubConn]bool)
   581  	for i := 0; i < 2; i++ {
   582  		addrs := <-cc.NewSubConnAddrsCh
   583  		sc := <-cc.NewSubConnCh
   584  		m1[addrs[0]] = sc
   585  		scs[sc] = true
   586  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   587  		sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   588  	}
   589  
   590  	p1 := <-cc.NewPickerCh
   591  	want := []balancer.SubConn{
   592  		m1[testBackendAddrs[0]], m1[testBackendAddrs[1]],
   593  	}
   594  	if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil {
   595  		t.Fatal(err)
   596  	}
   597  
   598  	// The balancer type for testBalancersIDs[0] is currently Round Robin. Now,
   599  	// change it to a balancer that has separate behavior logically (creating
   600  	// SubConn for second address in address list and always picking that
   601  	// SubConn), and see if the downstream behavior reflects that change.
   602  	childPolicyName := t.Name()
   603  	stub.Register(childPolicyName, stub.BalancerFuncs{
   604  		Init: func(bd *stub.BalancerData) {
   605  			bd.Data = balancer.Get(grpc.PickFirstBalancerName).Build(bd.ClientConn, bd.BuildOptions)
   606  		},
   607  		UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error {
   608  			ccs.ResolverState.Addresses = ccs.ResolverState.Addresses[1:]
   609  			bal := bd.Data.(balancer.Balancer)
   610  			return bal.UpdateClientConnState(ccs)
   611  		},
   612  	})
   613  	builder := balancer.Get(childPolicyName)
   614  	bg.UpdateBuilder(testBalancerIDs[0], builder)
   615  	if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}); err != nil {
   616  		t.Fatalf("error updating ClientConn state: %v", err)
   617  	}
   618  
   619  	addrs := <-cc.NewSubConnAddrsCh
   620  	if addrs[0].Addr != testBackendAddrs[3].Addr {
   621  		// Verifies forwarded to new created balancer, as the wrapped pick first
   622  		// balancer will delete first address.
   623  		t.Fatalf("newSubConn called with wrong address, want: %v, got : %v", testBackendAddrs[3].Addr, addrs[0].Addr)
   624  	}
   625  	sc := <-cc.NewSubConnCh
   626  
   627  	// Update the pick first balancers SubConn as CONNECTING. This will cause
   628  	// the pick first balancer to UpdateState() with CONNECTING, which shouldn't send
   629  	// a Picker update back, as the Graceful Switch process is not complete.
   630  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   631  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
   632  	defer cancel()
   633  	select {
   634  	case <-cc.NewPickerCh:
   635  		t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing")
   636  	case <-ctx.Done():
   637  	}
   638  
   639  	// Update the pick first balancers SubConn as READY. This will cause
   640  	// the pick first balancer to UpdateState() with READY, which should send a
   641  	// Picker update back, as the Graceful Switch process is complete. This
   642  	// Picker should always pick the pick first's created SubConn which
   643  	// corresponds to address 3.
   644  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   645  	p2 := <-cc.NewPickerCh
   646  	pr, err := p2.Pick(balancer.PickInfo{})
   647  	if err != nil {
   648  		t.Fatalf("error picking: %v", err)
   649  	}
   650  	if pr.SubConn != sc {
   651  		t.Fatalf("picker.Pick(), want %v, got %v", sc, pr.SubConn)
   652  	}
   653  
   654  	// The Graceful Switch process completing for the child should cause the
   655  	// SubConns for the balancer being gracefully switched from to get deleted.
   656  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
   657  	defer cancel()
   658  	for i := 0; i < 2; i++ {
   659  		select {
   660  		case <-ctx.Done():
   661  			t.Fatalf("error waiting for Shutdown()")
   662  		case sc := <-cc.ShutdownSubConnCh:
   663  			// The SubConn shut down should have been one of the two created
   664  			// SubConns, and both should be deleted.
   665  			if ok := scs[sc]; ok {
   666  				delete(scs, sc)
   667  				continue
   668  			} else {
   669  				t.Fatalf("Shutdown called for wrong SubConn %v, want in %v", sc, scs)
   670  			}
   671  		}
   672  	}
   673  }