gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/internal/balancergroup/balancergroup_test.go (about)

     1  /*
     2   * Copyright 2019 gRPC authors.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package balancergroup
    18  
    19  import (
    20  	"fmt"
    21  	"testing"
    22  	"time"
    23  
    24  	"gitee.com/ks-custle/core-gm/grpc/balancer"
    25  	"gitee.com/ks-custle/core-gm/grpc/balancer/roundrobin"
    26  	"gitee.com/ks-custle/core-gm/grpc/balancer/weightedtarget/weightedaggregator"
    27  	"gitee.com/ks-custle/core-gm/grpc/connectivity"
    28  	"gitee.com/ks-custle/core-gm/grpc/credentials/insecure"
    29  	"gitee.com/ks-custle/core-gm/grpc/internal/balancer/stub"
    30  	"gitee.com/ks-custle/core-gm/grpc/internal/grpctest"
    31  	"gitee.com/ks-custle/core-gm/grpc/internal/testutils"
    32  	"gitee.com/ks-custle/core-gm/grpc/resolver"
    33  	"github.com/google/go-cmp/cmp"
    34  )
    35  
    36  var (
    37  	rrBuilder        = balancer.Get(roundrobin.Name)
    38  	testBalancerIDs  = []string{"b1", "b2", "b3"}
    39  	testBackendAddrs []resolver.Address
    40  )
    41  
    42  const testBackendAddrsCount = 12
    43  
    44  func init() {
    45  	for i := 0; i < testBackendAddrsCount; i++ {
    46  		testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)})
    47  	}
    48  
    49  	// Disable caching for all tests. It will be re-enabled in caching specific
    50  	// tests.
    51  	DefaultSubBalancerCloseTimeout = time.Millisecond
    52  }
    53  
    54  type s struct {
    55  	grpctest.Tester
    56  }
    57  
    58  func Test(t *testing.T) {
    59  	grpctest.RunSubTests(t, s{})
    60  }
    61  
    62  func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
    63  	return func() balancer.SubConn {
    64  		scst, _ := p.Pick(balancer.PickInfo{})
    65  		return scst.SubConn
    66  	}
    67  }
    68  
    69  // Create a new balancer group, add balancer and backends, but not start.
    70  // - b1, weight 2, backends [0,1]
    71  // - b2, weight 1, backends [2,3]
    72  // Start the balancer group and check behavior.
    73  //
    74  // Close the balancer group, call add/remove/change weight/change address.
    75  // - b2, weight 3, backends [0,3]
    76  // - b3, weight 1, backends [1,2]
    77  // Start the balancer group again and check for behavior.
    78  func (s) TestBalancerGroup_start_close(t *testing.T) {
    79  	cc := testutils.NewTestClientConn(t)
    80  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
    81  	gator.Start()
    82  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
    83  
    84  	// Add two balancers to group and send two resolved addresses to both
    85  	// balancers.
    86  	gator.Add(testBalancerIDs[0], 2)
    87  	bg.Add(testBalancerIDs[0], rrBuilder)
    88  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
    89  	gator.Add(testBalancerIDs[1], 1)
    90  	bg.Add(testBalancerIDs[1], rrBuilder)
    91  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
    92  
    93  	bg.Start()
    94  
    95  	m1 := make(map[resolver.Address]balancer.SubConn)
    96  	for i := 0; i < 4; i++ {
    97  		addrs := <-cc.NewSubConnAddrsCh
    98  		sc := <-cc.NewSubConnCh
    99  		m1[addrs[0]] = sc
   100  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   101  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   102  	}
   103  
   104  	// Test roundrobin on the last picker.
   105  	p1 := <-cc.NewPickerCh
   106  	want := []balancer.SubConn{
   107  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   108  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   109  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   110  	}
   111  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil {
   112  		t.Fatalf("want %v, got %v", want, err)
   113  	}
   114  
   115  	gator.Stop()
   116  	bg.Close()
   117  	for i := 0; i < 4; i++ {
   118  		bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   119  	}
   120  
   121  	// Add b3, weight 1, backends [1,2].
   122  	gator.Add(testBalancerIDs[2], 1)
   123  	bg.Add(testBalancerIDs[2], rrBuilder)
   124  	_ = bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}})
   125  
   126  	// Remove b1.
   127  	gator.Remove(testBalancerIDs[0])
   128  	bg.Remove(testBalancerIDs[0])
   129  
   130  	// Update b2 to weight 3, backends [0,3].
   131  	gator.UpdateWeight(testBalancerIDs[1], 3)
   132  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}})
   133  
   134  	gator.Start()
   135  	bg.Start()
   136  
   137  	m2 := make(map[resolver.Address]balancer.SubConn)
   138  	for i := 0; i < 4; i++ {
   139  		addrs := <-cc.NewSubConnAddrsCh
   140  		sc := <-cc.NewSubConnCh
   141  		m2[addrs[0]] = sc
   142  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   143  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   144  	}
   145  
   146  	// Test roundrobin on the last picker.
   147  	p2 := <-cc.NewPickerCh
   148  	want = []balancer.SubConn{
   149  		m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]],
   150  		m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]],
   151  		m2[testBackendAddrs[1]], m2[testBackendAddrs[2]],
   152  	}
   153  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil {
   154  		t.Fatalf("want %v, got %v", want, err)
   155  	}
   156  }
   157  
   158  // Test that balancer group start() doesn't deadlock if the balancer calls back
   159  // into balancer group inline when it gets an update.
   160  //
   161  // The potential deadlock can happen if we
   162  //   - hold a lock and send updates to balancer (e.g. update resolved addresses)
   163  //   - the balancer calls back (NewSubConn or update picker) in line
   164  //
   165  // The callback will try to hold hte same lock again, which will cause a
   166  // deadlock.
   167  //
   168  // This test starts the balancer group with a test balancer, will updates picker
   169  // whenever it gets an address update. It's expected that start() doesn't block
   170  // because of deadlock.
   171  func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) {
   172  	const balancerName = "stub-TestBalancerGroup_start_close_deadlock"
   173  	stub.Register(balancerName, stub.BalancerFuncs{})
   174  	builder := balancer.Get(balancerName)
   175  
   176  	cc := testutils.NewTestClientConn(t)
   177  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   178  	gator.Start()
   179  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
   180  
   181  	gator.Add(testBalancerIDs[0], 2)
   182  	bg.Add(testBalancerIDs[0], builder)
   183  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   184  	gator.Add(testBalancerIDs[1], 1)
   185  	bg.Add(testBalancerIDs[1], builder)
   186  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   187  
   188  	bg.Start()
   189  }
   190  
   191  func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() {
   192  	old := DefaultSubBalancerCloseTimeout
   193  	DefaultSubBalancerCloseTimeout = n
   194  	return func() { DefaultSubBalancerCloseTimeout = old }
   195  }
   196  
   197  // initBalancerGroupForCachingTest creates a balancer group, and initialize it
   198  // to be ready for caching tests.
   199  //
   200  // Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer
   201  // is removed later, so the balancer group returned has one sub-balancer in its
   202  // own map, and one sub-balancer in cache.
   203  func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) {
   204  	cc := testutils.NewTestClientConn(t)
   205  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   206  	gator.Start()
   207  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
   208  
   209  	// Add two balancers to group and send two resolved addresses to both
   210  	// balancers.
   211  	gator.Add(testBalancerIDs[0], 2)
   212  	bg.Add(testBalancerIDs[0], rrBuilder)
   213  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   214  	gator.Add(testBalancerIDs[1], 1)
   215  	bg.Add(testBalancerIDs[1], rrBuilder)
   216  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   217  
   218  	bg.Start()
   219  
   220  	m1 := make(map[resolver.Address]balancer.SubConn)
   221  	for i := 0; i < 4; i++ {
   222  		addrs := <-cc.NewSubConnAddrsCh
   223  		sc := <-cc.NewSubConnCh
   224  		m1[addrs[0]] = sc
   225  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   226  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   227  	}
   228  
   229  	// Test roundrobin on the last picker.
   230  	p1 := <-cc.NewPickerCh
   231  	want := []balancer.SubConn{
   232  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   233  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   234  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   235  	}
   236  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil {
   237  		t.Fatalf("want %v, got %v", want, err)
   238  	}
   239  
   240  	gator.Remove(testBalancerIDs[1])
   241  	bg.Remove(testBalancerIDs[1])
   242  	gator.BuildAndUpdate()
   243  	// Don't wait for SubConns to be removed after close, because they are only
   244  	// removed after close timeout.
   245  	for i := 0; i < 10; i++ {
   246  		select {
   247  		case <-cc.RemoveSubConnCh:
   248  			t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)")
   249  		default:
   250  		}
   251  		time.Sleep(time.Millisecond)
   252  	}
   253  	// Test roundrobin on the with only sub-balancer0.
   254  	p2 := <-cc.NewPickerCh
   255  	want = []balancer.SubConn{
   256  		m1[testBackendAddrs[0]], m1[testBackendAddrs[1]],
   257  	}
   258  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil {
   259  		t.Fatalf("want %v, got %v", want, err)
   260  	}
   261  
   262  	return gator, bg, cc, m1
   263  }
   264  
   265  // Test that if a sub-balancer is removed, and re-added within close timeout,
   266  // the subConns won't be re-created.
   267  func (s) TestBalancerGroup_locality_caching(t *testing.T) {
   268  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   269  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   270  
   271  	// Turn down subconn for addr2, shouldn't get picker update because
   272  	// sub-balancer1 was removed.
   273  	bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   274  	for i := 0; i < 10; i++ {
   275  		select {
   276  		case <-cc.NewPickerCh:
   277  			t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)")
   278  		default:
   279  		}
   280  		time.Sleep(time.Millisecond)
   281  	}
   282  
   283  	// Sleep, but sleep less then close timeout.
   284  	time.Sleep(time.Millisecond * 100)
   285  
   286  	// Re-add sub-balancer-1, because subconns were in cache, no new subconns
   287  	// should be created. But a new picker will still be generated, with subconn
   288  	// states update to date.
   289  	gator.Add(testBalancerIDs[1], 1)
   290  	bg.Add(testBalancerIDs[1], rrBuilder)
   291  
   292  	p3 := <-cc.NewPickerCh
   293  	want := []balancer.SubConn{
   294  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   295  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   296  		// addr2 is down, b2 only has addr3 in READY state.
   297  		addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]],
   298  	}
   299  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil {
   300  		t.Fatalf("want %v, got %v", want, err)
   301  	}
   302  
   303  	for i := 0; i < 10; i++ {
   304  		select {
   305  		case <-cc.NewSubConnAddrsCh:
   306  			t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)")
   307  		default:
   308  		}
   309  		time.Sleep(time.Millisecond * 10)
   310  	}
   311  }
   312  
   313  // Sub-balancers are put in cache when they are removed. If balancer group is
   314  // closed within close timeout, all subconns should still be rmeoved
   315  // immediately.
   316  func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
   317  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   318  	_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   319  
   320  	bg.Close()
   321  	// The balancer group is closed. The subconns should be removed immediately.
   322  	removeTimeout := time.After(time.Millisecond * 500)
   323  	scToRemove := map[balancer.SubConn]int{
   324  		addrToSC[testBackendAddrs[0]]: 1,
   325  		addrToSC[testBackendAddrs[1]]: 1,
   326  		addrToSC[testBackendAddrs[2]]: 1,
   327  		addrToSC[testBackendAddrs[3]]: 1,
   328  	}
   329  	for i := 0; i < len(scToRemove); i++ {
   330  		select {
   331  		case sc := <-cc.RemoveSubConnCh:
   332  			c := scToRemove[sc]
   333  			if c == 0 {
   334  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   335  			}
   336  			scToRemove[sc] = c - 1
   337  		case <-removeTimeout:
   338  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   339  		}
   340  	}
   341  }
   342  
   343  // Sub-balancers in cache will be closed if not re-added within timeout, and
   344  // subConns will be removed.
   345  func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
   346  	defer replaceDefaultSubBalancerCloseTimeout(time.Second)()
   347  	_, _, cc, addrToSC := initBalancerGroupForCachingTest(t)
   348  
   349  	// The sub-balancer is not re-added within timeout. The subconns should be
   350  	// removed.
   351  	removeTimeout := time.After(DefaultSubBalancerCloseTimeout)
   352  	scToRemove := map[balancer.SubConn]int{
   353  		addrToSC[testBackendAddrs[2]]: 1,
   354  		addrToSC[testBackendAddrs[3]]: 1,
   355  	}
   356  	for i := 0; i < len(scToRemove); i++ {
   357  		select {
   358  		case sc := <-cc.RemoveSubConnCh:
   359  			c := scToRemove[sc]
   360  			if c == 0 {
   361  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   362  			}
   363  			scToRemove[sc] = c - 1
   364  		case <-removeTimeout:
   365  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   366  		}
   367  	}
   368  }
   369  
   370  // Wrap the rr builder, so it behaves the same, but has a different pointer.
   371  type noopBalancerBuilderWrapper struct {
   372  	balancer.Builder
   373  }
   374  
   375  // After removing a sub-balancer, re-add with same ID, but different balancer
   376  // builder. Old subconns should be removed, and new subconns should be created.
   377  func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
   378  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   379  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   380  
   381  	// Re-add sub-balancer-1, but with a different balancer builder. The
   382  	// sub-balancer was still in cache, but cann't be reused. This should cause
   383  	// old sub-balancer's subconns to be removed immediately, and new subconns
   384  	// to be created.
   385  	gator.Add(testBalancerIDs[1], 1)
   386  	bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder})
   387  
   388  	// The cached sub-balancer should be closed, and the subconns should be
   389  	// removed immediately.
   390  	removeTimeout := time.After(time.Millisecond * 500)
   391  	scToRemove := map[balancer.SubConn]int{
   392  		addrToSC[testBackendAddrs[2]]: 1,
   393  		addrToSC[testBackendAddrs[3]]: 1,
   394  	}
   395  	for i := 0; i < len(scToRemove); i++ {
   396  		select {
   397  		case sc := <-cc.RemoveSubConnCh:
   398  			c := scToRemove[sc]
   399  			if c == 0 {
   400  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   401  			}
   402  			scToRemove[sc] = c - 1
   403  		case <-removeTimeout:
   404  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   405  		}
   406  	}
   407  
   408  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}})
   409  
   410  	newSCTimeout := time.After(time.Millisecond * 500)
   411  	scToAdd := map[resolver.Address]int{
   412  		testBackendAddrs[4]: 1,
   413  		testBackendAddrs[5]: 1,
   414  	}
   415  	for i := 0; i < len(scToAdd); i++ {
   416  		select {
   417  		case addr := <-cc.NewSubConnAddrsCh:
   418  			c := scToAdd[addr[0]]
   419  			if c == 0 {
   420  				t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c)
   421  			}
   422  			scToAdd[addr[0]] = c - 1
   423  			sc := <-cc.NewSubConnCh
   424  			addrToSC[addr[0]] = sc
   425  			bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   426  			bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   427  		case <-newSCTimeout:
   428  			t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed")
   429  		}
   430  	}
   431  
   432  	// Test roundrobin on the new picker.
   433  	p3 := <-cc.NewPickerCh
   434  	want := []balancer.SubConn{
   435  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   436  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   437  		addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]],
   438  	}
   439  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil {
   440  		t.Fatalf("want %v, got %v", want, err)
   441  	}
   442  }
   443  
   444  // After removing a sub-balancer, it will be kept in cache. Make sure that this
   445  // sub-balancer's Close is called when the balancer group is closed.
   446  func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) {
   447  	const balancerName = "stub-TestBalancerGroup_check_close"
   448  	closed := make(chan struct{})
   449  	stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) {
   450  		close(closed)
   451  	}})
   452  	builder := balancer.Get(balancerName)
   453  
   454  	defer replaceDefaultSubBalancerCloseTimeout(time.Second)()
   455  	gator, bg, _, _ := initBalancerGroupForCachingTest(t)
   456  
   457  	// Add balancer, and remove
   458  	gator.Add(testBalancerIDs[2], 1)
   459  	bg.Add(testBalancerIDs[2], builder)
   460  	gator.Remove(testBalancerIDs[2])
   461  	bg.Remove(testBalancerIDs[2])
   462  
   463  	// Immediately close balancergroup, before the cache timeout.
   464  	bg.Close()
   465  
   466  	// Make sure the removed child balancer is closed eventually.
   467  	select {
   468  	case <-closed:
   469  	case <-time.After(time.Second * 2):
   470  		t.Fatalf("timeout waiting for the child balancer in cache to be closed")
   471  	}
   472  }
   473  
   474  // TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed
   475  // to the balancergroup at creation time is passed to child policies.
   476  func (s) TestBalancerGroupBuildOptions(t *testing.T) {
   477  	//goland:noinspection GoUnusedConst
   478  	const (
   479  		balancerName       = "stubBalancer-TestBalancerGroupBuildOptions"
   480  		parent             = int64(1234)
   481  		userAgent          = "ua"
   482  		defaultTestTimeout = 1 * time.Second
   483  	)
   484  
   485  	// Setup the stub balancer such that we can read the build options passed to
   486  	// it in the UpdateClientConnState method.
   487  	bOpts := balancer.BuildOptions{
   488  		DialCreds:        insecure.NewCredentials(),
   489  		ChannelzParentID: parent,
   490  		CustomUserAgent:  userAgent,
   491  	}
   492  	stub.Register(balancerName, stub.BalancerFuncs{
   493  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
   494  			if !cmp.Equal(bd.BuildOptions, bOpts) {
   495  				return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts)
   496  			}
   497  			return nil
   498  		},
   499  	})
   500  	cc := testutils.NewTestClientConn(t)
   501  	bg := New(cc, bOpts, nil, nil)
   502  	bg.Start()
   503  
   504  	// Add the stub balancer build above as a child policy.
   505  	balancerBuilder := balancer.Get(balancerName)
   506  	bg.Add(testBalancerIDs[0], balancerBuilder)
   507  
   508  	// Send an empty clientConn state change. This should trigger the
   509  	// verification of the buildOptions being passed to the child policy.
   510  	if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil {
   511  		t.Fatal(err)
   512  	}
   513  }
   514  
   515  func (s) TestBalancerExitIdleOne(t *testing.T) {
   516  	const balancerName = "stub-balancer-test-balancergroup-exit-idle-one"
   517  	exitIdleCh := make(chan struct{}, 1)
   518  	stub.Register(balancerName, stub.BalancerFuncs{
   519  		ExitIdle: func(*stub.BalancerData) {
   520  			exitIdleCh <- struct{}{}
   521  		},
   522  	})
   523  	cc := testutils.NewTestClientConn(t)
   524  	bg := New(cc, balancer.BuildOptions{}, nil, nil)
   525  	bg.Start()
   526  	defer bg.Close()
   527  
   528  	// Add the stub balancer build above as a child policy.
   529  	builder := balancer.Get(balancerName)
   530  	bg.Add(testBalancerIDs[0], builder)
   531  
   532  	// Call ExitIdle on the child policy.
   533  	bg.ExitIdleOne(testBalancerIDs[0])
   534  	select {
   535  	case <-time.After(time.Second):
   536  		t.Fatal("Timeout when waiting for ExitIdle to be invoked on child policy")
   537  	case <-exitIdleCh:
   538  	}
   539  }