gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/internal/balancergroup/balancergroup_test.go (about)

     1  /*
     2   * Copyright 2019 gRPC authors.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package balancergroup
    18  
    19  import (
    20  	"fmt"
    21  	"testing"
    22  	"time"
    23  
    24  	"gitee.com/zhaochuninhefei/gmgo/grpc/balancer"
    25  	"gitee.com/zhaochuninhefei/gmgo/grpc/balancer/roundrobin"
    26  	"gitee.com/zhaochuninhefei/gmgo/grpc/balancer/weightedtarget/weightedaggregator"
    27  	"gitee.com/zhaochuninhefei/gmgo/grpc/connectivity"
    28  	"gitee.com/zhaochuninhefei/gmgo/grpc/credentials/insecure"
    29  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/balancer/stub"
    30  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpctest"
    31  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/testutils"
    32  	"gitee.com/zhaochuninhefei/gmgo/grpc/resolver"
    33  	"github.com/google/go-cmp/cmp"
    34  )
    35  
    36  var (
    37  	rrBuilder        = balancer.Get(roundrobin.Name)
    38  	testBalancerIDs  = []string{"b1", "b2", "b3"}
    39  	testBackendAddrs []resolver.Address
    40  )
    41  
    42  const testBackendAddrsCount = 12
    43  
    44  func init() {
    45  	for i := 0; i < testBackendAddrsCount; i++ {
    46  		testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)})
    47  	}
    48  
    49  	// Disable caching for all tests. It will be re-enabled in caching specific
    50  	// tests.
    51  	DefaultSubBalancerCloseTimeout = time.Millisecond
    52  }
    53  
    54  type s struct {
    55  	grpctest.Tester
    56  }
    57  
    58  func Test(t *testing.T) {
    59  	grpctest.RunSubTests(t, s{})
    60  }
    61  
    62  func subConnFromPicker(p balancer.Picker) func() balancer.SubConn {
    63  	return func() balancer.SubConn {
    64  		scst, _ := p.Pick(balancer.PickInfo{})
    65  		return scst.SubConn
    66  	}
    67  }
    68  
    69  // Create a new balancer group, add balancer and backends, but not start.
    70  // - b1, weight 2, backends [0,1]
    71  // - b2, weight 1, backends [2,3]
    72  // Start the balancer group and check behavior.
    73  //
    74  // Close the balancer group, call add/remove/change weight/change address.
    75  // - b2, weight 3, backends [0,3]
    76  // - b3, weight 1, backends [1,2]
    77  // Start the balancer group again and check for behavior.
    78  func (s) TestBalancerGroup_start_close(t *testing.T) {
    79  	cc := testutils.NewTestClientConn(t)
    80  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
    81  	gator.Start()
    82  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
    83  
    84  	// Add two balancers to group and send two resolved addresses to both
    85  	// balancers.
    86  	gator.Add(testBalancerIDs[0], 2)
    87  	bg.Add(testBalancerIDs[0], rrBuilder)
    88  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
    89  	gator.Add(testBalancerIDs[1], 1)
    90  	bg.Add(testBalancerIDs[1], rrBuilder)
    91  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
    92  
    93  	bg.Start()
    94  
    95  	m1 := make(map[resolver.Address]balancer.SubConn)
    96  	for i := 0; i < 4; i++ {
    97  		addrs := <-cc.NewSubConnAddrsCh
    98  		sc := <-cc.NewSubConnCh
    99  		m1[addrs[0]] = sc
   100  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   101  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   102  	}
   103  
   104  	// Test roundrobin on the last picker.
   105  	p1 := <-cc.NewPickerCh
   106  	want := []balancer.SubConn{
   107  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   108  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   109  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   110  	}
   111  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil {
   112  		t.Fatalf("want %v, got %v", want, err)
   113  	}
   114  
   115  	gator.Stop()
   116  	bg.Close()
   117  	for i := 0; i < 4; i++ {
   118  		bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown})
   119  	}
   120  
   121  	// Add b3, weight 1, backends [1,2].
   122  	gator.Add(testBalancerIDs[2], 1)
   123  	bg.Add(testBalancerIDs[2], rrBuilder)
   124  	_ = bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}})
   125  
   126  	// Remove b1.
   127  	gator.Remove(testBalancerIDs[0])
   128  	bg.Remove(testBalancerIDs[0])
   129  
   130  	// Update b2 to weight 3, backends [0,3].
   131  	gator.UpdateWeight(testBalancerIDs[1], 3)
   132  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}})
   133  
   134  	gator.Start()
   135  	bg.Start()
   136  
   137  	m2 := make(map[resolver.Address]balancer.SubConn)
   138  	for i := 0; i < 4; i++ {
   139  		addrs := <-cc.NewSubConnAddrsCh
   140  		sc := <-cc.NewSubConnCh
   141  		m2[addrs[0]] = sc
   142  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   143  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   144  	}
   145  
   146  	// Test roundrobin on the last picker.
   147  	p2 := <-cc.NewPickerCh
   148  	want = []balancer.SubConn{
   149  		m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]],
   150  		m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]],
   151  		m2[testBackendAddrs[1]], m2[testBackendAddrs[2]],
   152  	}
   153  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil {
   154  		t.Fatalf("want %v, got %v", want, err)
   155  	}
   156  }
   157  
   158  // Test that balancer group start() doesn't deadlock if the balancer calls back
   159  // into balancer group inline when it gets an update.
   160  //
   161  // The potential deadlock can happen if we
   162  //  - hold a lock and send updates to balancer (e.g. update resolved addresses)
   163  //  - the balancer calls back (NewSubConn or update picker) in line
   164  // The callback will try to hold hte same lock again, which will cause a
   165  // deadlock.
   166  //
   167  // This test starts the balancer group with a test balancer, will updates picker
   168  // whenever it gets an address update. It's expected that start() doesn't block
   169  // because of deadlock.
   170  func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) {
   171  	const balancerName = "stub-TestBalancerGroup_start_close_deadlock"
   172  	stub.Register(balancerName, stub.BalancerFuncs{})
   173  	builder := balancer.Get(balancerName)
   174  
   175  	cc := testutils.NewTestClientConn(t)
   176  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   177  	gator.Start()
   178  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
   179  
   180  	gator.Add(testBalancerIDs[0], 2)
   181  	bg.Add(testBalancerIDs[0], builder)
   182  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   183  	gator.Add(testBalancerIDs[1], 1)
   184  	bg.Add(testBalancerIDs[1], builder)
   185  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   186  
   187  	bg.Start()
   188  }
   189  
   190  func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() {
   191  	old := DefaultSubBalancerCloseTimeout
   192  	DefaultSubBalancerCloseTimeout = n
   193  	return func() { DefaultSubBalancerCloseTimeout = old }
   194  }
   195  
   196  // initBalancerGroupForCachingTest creates a balancer group, and initialize it
   197  // to be ready for caching tests.
   198  //
   199  // Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer
   200  // is removed later, so the balancer group returned has one sub-balancer in its
   201  // own map, and one sub-balancer in cache.
   202  func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) {
   203  	cc := testutils.NewTestClientConn(t)
   204  	gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR)
   205  	gator.Start()
   206  	bg := New(cc, balancer.BuildOptions{}, gator, nil)
   207  
   208  	// Add two balancers to group and send two resolved addresses to both
   209  	// balancers.
   210  	gator.Add(testBalancerIDs[0], 2)
   211  	bg.Add(testBalancerIDs[0], rrBuilder)
   212  	_ = bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}})
   213  	gator.Add(testBalancerIDs[1], 1)
   214  	bg.Add(testBalancerIDs[1], rrBuilder)
   215  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}})
   216  
   217  	bg.Start()
   218  
   219  	m1 := make(map[resolver.Address]balancer.SubConn)
   220  	for i := 0; i < 4; i++ {
   221  		addrs := <-cc.NewSubConnAddrsCh
   222  		sc := <-cc.NewSubConnCh
   223  		m1[addrs[0]] = sc
   224  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   225  		bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   226  	}
   227  
   228  	// Test roundrobin on the last picker.
   229  	p1 := <-cc.NewPickerCh
   230  	want := []balancer.SubConn{
   231  		m1[testBackendAddrs[0]], m1[testBackendAddrs[0]],
   232  		m1[testBackendAddrs[1]], m1[testBackendAddrs[1]],
   233  		m1[testBackendAddrs[2]], m1[testBackendAddrs[3]],
   234  	}
   235  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil {
   236  		t.Fatalf("want %v, got %v", want, err)
   237  	}
   238  
   239  	gator.Remove(testBalancerIDs[1])
   240  	bg.Remove(testBalancerIDs[1])
   241  	gator.BuildAndUpdate()
   242  	// Don't wait for SubConns to be removed after close, because they are only
   243  	// removed after close timeout.
   244  	for i := 0; i < 10; i++ {
   245  		select {
   246  		case <-cc.RemoveSubConnCh:
   247  			t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)")
   248  		default:
   249  		}
   250  		time.Sleep(time.Millisecond)
   251  	}
   252  	// Test roundrobin on the with only sub-balancer0.
   253  	p2 := <-cc.NewPickerCh
   254  	want = []balancer.SubConn{
   255  		m1[testBackendAddrs[0]], m1[testBackendAddrs[1]],
   256  	}
   257  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil {
   258  		t.Fatalf("want %v, got %v", want, err)
   259  	}
   260  
   261  	return gator, bg, cc, m1
   262  }
   263  
   264  // Test that if a sub-balancer is removed, and re-added within close timeout,
   265  // the subConns won't be re-created.
   266  func (s) TestBalancerGroup_locality_caching(t *testing.T) {
   267  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   268  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   269  
   270  	// Turn down subconn for addr2, shouldn't get picker update because
   271  	// sub-balancer1 was removed.
   272  	bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   273  	for i := 0; i < 10; i++ {
   274  		select {
   275  		case <-cc.NewPickerCh:
   276  			t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)")
   277  		default:
   278  		}
   279  		time.Sleep(time.Millisecond)
   280  	}
   281  
   282  	// Sleep, but sleep less then close timeout.
   283  	time.Sleep(time.Millisecond * 100)
   284  
   285  	// Re-add sub-balancer-1, because subconns were in cache, no new subconns
   286  	// should be created. But a new picker will still be generated, with subconn
   287  	// states update to date.
   288  	gator.Add(testBalancerIDs[1], 1)
   289  	bg.Add(testBalancerIDs[1], rrBuilder)
   290  
   291  	p3 := <-cc.NewPickerCh
   292  	want := []balancer.SubConn{
   293  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   294  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   295  		// addr2 is down, b2 only has addr3 in READY state.
   296  		addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]],
   297  	}
   298  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil {
   299  		t.Fatalf("want %v, got %v", want, err)
   300  	}
   301  
   302  	for i := 0; i < 10; i++ {
   303  		select {
   304  		case <-cc.NewSubConnAddrsCh:
   305  			t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)")
   306  		default:
   307  		}
   308  		time.Sleep(time.Millisecond * 10)
   309  	}
   310  }
   311  
   312  // Sub-balancers are put in cache when they are removed. If balancer group is
   313  // closed within close timeout, all subconns should still be rmeoved
   314  // immediately.
   315  func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) {
   316  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   317  	_, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   318  
   319  	bg.Close()
   320  	// The balancer group is closed. The subconns should be removed immediately.
   321  	removeTimeout := time.After(time.Millisecond * 500)
   322  	scToRemove := map[balancer.SubConn]int{
   323  		addrToSC[testBackendAddrs[0]]: 1,
   324  		addrToSC[testBackendAddrs[1]]: 1,
   325  		addrToSC[testBackendAddrs[2]]: 1,
   326  		addrToSC[testBackendAddrs[3]]: 1,
   327  	}
   328  	for i := 0; i < len(scToRemove); i++ {
   329  		select {
   330  		case sc := <-cc.RemoveSubConnCh:
   331  			c := scToRemove[sc]
   332  			if c == 0 {
   333  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   334  			}
   335  			scToRemove[sc] = c - 1
   336  		case <-removeTimeout:
   337  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   338  		}
   339  	}
   340  }
   341  
   342  // Sub-balancers in cache will be closed if not re-added within timeout, and
   343  // subConns will be removed.
   344  func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) {
   345  	defer replaceDefaultSubBalancerCloseTimeout(time.Second)()
   346  	_, _, cc, addrToSC := initBalancerGroupForCachingTest(t)
   347  
   348  	// The sub-balancer is not re-added within timeout. The subconns should be
   349  	// removed.
   350  	removeTimeout := time.After(DefaultSubBalancerCloseTimeout)
   351  	scToRemove := map[balancer.SubConn]int{
   352  		addrToSC[testBackendAddrs[2]]: 1,
   353  		addrToSC[testBackendAddrs[3]]: 1,
   354  	}
   355  	for i := 0; i < len(scToRemove); i++ {
   356  		select {
   357  		case sc := <-cc.RemoveSubConnCh:
   358  			c := scToRemove[sc]
   359  			if c == 0 {
   360  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   361  			}
   362  			scToRemove[sc] = c - 1
   363  		case <-removeTimeout:
   364  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   365  		}
   366  	}
   367  }
   368  
   369  // Wrap the rr builder, so it behaves the same, but has a different pointer.
   370  type noopBalancerBuilderWrapper struct {
   371  	balancer.Builder
   372  }
   373  
   374  // After removing a sub-balancer, re-add with same ID, but different balancer
   375  // builder. Old subconns should be removed, and new subconns should be created.
   376  func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) {
   377  	defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)()
   378  	gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t)
   379  
   380  	// Re-add sub-balancer-1, but with a different balancer builder. The
   381  	// sub-balancer was still in cache, but cann't be reused. This should cause
   382  	// old sub-balancer's subconns to be removed immediately, and new subconns
   383  	// to be created.
   384  	gator.Add(testBalancerIDs[1], 1)
   385  	bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder})
   386  
   387  	// The cached sub-balancer should be closed, and the subconns should be
   388  	// removed immediately.
   389  	removeTimeout := time.After(time.Millisecond * 500)
   390  	scToRemove := map[balancer.SubConn]int{
   391  		addrToSC[testBackendAddrs[2]]: 1,
   392  		addrToSC[testBackendAddrs[3]]: 1,
   393  	}
   394  	for i := 0; i < len(scToRemove); i++ {
   395  		select {
   396  		case sc := <-cc.RemoveSubConnCh:
   397  			c := scToRemove[sc]
   398  			if c == 0 {
   399  				t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c)
   400  			}
   401  			scToRemove[sc] = c - 1
   402  		case <-removeTimeout:
   403  			t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed")
   404  		}
   405  	}
   406  
   407  	_ = bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}})
   408  
   409  	newSCTimeout := time.After(time.Millisecond * 500)
   410  	scToAdd := map[resolver.Address]int{
   411  		testBackendAddrs[4]: 1,
   412  		testBackendAddrs[5]: 1,
   413  	}
   414  	for i := 0; i < len(scToAdd); i++ {
   415  		select {
   416  		case addr := <-cc.NewSubConnAddrsCh:
   417  			c := scToAdd[addr[0]]
   418  			if c == 0 {
   419  				t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c)
   420  			}
   421  			scToAdd[addr[0]] = c - 1
   422  			sc := <-cc.NewSubConnCh
   423  			addrToSC[addr[0]] = sc
   424  			bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   425  			bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready})
   426  		case <-newSCTimeout:
   427  			t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed")
   428  		}
   429  	}
   430  
   431  	// Test roundrobin on the new picker.
   432  	p3 := <-cc.NewPickerCh
   433  	want := []balancer.SubConn{
   434  		addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]],
   435  		addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]],
   436  		addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]],
   437  	}
   438  	if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil {
   439  		t.Fatalf("want %v, got %v", want, err)
   440  	}
   441  }
   442  
   443  // After removing a sub-balancer, it will be kept in cache. Make sure that this
   444  // sub-balancer's Close is called when the balancer group is closed.
   445  func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) {
   446  	const balancerName = "stub-TestBalancerGroup_check_close"
   447  	closed := make(chan struct{})
   448  	stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) {
   449  		close(closed)
   450  	}})
   451  	builder := balancer.Get(balancerName)
   452  
   453  	defer replaceDefaultSubBalancerCloseTimeout(time.Second)()
   454  	gator, bg, _, _ := initBalancerGroupForCachingTest(t)
   455  
   456  	// Add balancer, and remove
   457  	gator.Add(testBalancerIDs[2], 1)
   458  	bg.Add(testBalancerIDs[2], builder)
   459  	gator.Remove(testBalancerIDs[2])
   460  	bg.Remove(testBalancerIDs[2])
   461  
   462  	// Immediately close balancergroup, before the cache timeout.
   463  	bg.Close()
   464  
   465  	// Make sure the removed child balancer is closed eventually.
   466  	select {
   467  	case <-closed:
   468  	case <-time.After(time.Second * 2):
   469  		t.Fatalf("timeout waiting for the child balancer in cache to be closed")
   470  	}
   471  }
   472  
   473  // TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed
   474  // to the balancergroup at creation time is passed to child policies.
   475  func (s) TestBalancerGroupBuildOptions(t *testing.T) {
   476  	//goland:noinspection GoUnusedConst
   477  	const (
   478  		balancerName       = "stubBalancer-TestBalancerGroupBuildOptions"
   479  		parent             = int64(1234)
   480  		userAgent          = "ua"
   481  		defaultTestTimeout = 1 * time.Second
   482  	)
   483  
   484  	// Setup the stub balancer such that we can read the build options passed to
   485  	// it in the UpdateClientConnState method.
   486  	bOpts := balancer.BuildOptions{
   487  		DialCreds:        insecure.NewCredentials(),
   488  		ChannelzParentID: parent,
   489  		CustomUserAgent:  userAgent,
   490  	}
   491  	stub.Register(balancerName, stub.BalancerFuncs{
   492  		UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error {
   493  			if !cmp.Equal(bd.BuildOptions, bOpts) {
   494  				return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts)
   495  			}
   496  			return nil
   497  		},
   498  	})
   499  	cc := testutils.NewTestClientConn(t)
   500  	bg := New(cc, bOpts, nil, nil)
   501  	bg.Start()
   502  
   503  	// Add the stub balancer build above as a child policy.
   504  	balancerBuilder := balancer.Get(balancerName)
   505  	bg.Add(testBalancerIDs[0], balancerBuilder)
   506  
   507  	// Send an empty clientConn state change. This should trigger the
   508  	// verification of the buildOptions being passed to the child policy.
   509  	if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil {
   510  		t.Fatal(err)
   511  	}
   512  }
   513  
   514  func (s) TestBalancerExitIdleOne(t *testing.T) {
   515  	const balancerName = "stub-balancer-test-balancergroup-exit-idle-one"
   516  	exitIdleCh := make(chan struct{}, 1)
   517  	stub.Register(balancerName, stub.BalancerFuncs{
   518  		ExitIdle: func(*stub.BalancerData) {
   519  			exitIdleCh <- struct{}{}
   520  		},
   521  	})
   522  	cc := testutils.NewTestClientConn(t)
   523  	bg := New(cc, balancer.BuildOptions{}, nil, nil)
   524  	bg.Start()
   525  	defer bg.Close()
   526  
   527  	// Add the stub balancer build above as a child policy.
   528  	builder := balancer.Get(balancerName)
   529  	bg.Add(testBalancerIDs[0], builder)
   530  
   531  	// Call ExitIdle on the child policy.
   532  	bg.ExitIdleOne(testBalancerIDs[0])
   533  	select {
   534  	case <-time.After(time.Second):
   535  		t.Fatal("Timeout when waiting for ExitIdle to be invoked on child policy")
   536  	case <-exitIdleCh:
   537  	}
   538  }