google.golang.org/grpc@v1.74.2/internal/balancergroup/balancergroup.go (about)

     1  /*
     2   * Copyright 2019 gRPC authors.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  // Package balancergroup implements a utility struct to bind multiple balancers
    18  // into one balancer.
    19  package balancergroup
    20  
    21  import (
    22  	"encoding/json"
    23  	"fmt"
    24  	"sync"
    25  	"time"
    26  
    27  	"google.golang.org/grpc/balancer"
    28  	"google.golang.org/grpc/connectivity"
    29  	"google.golang.org/grpc/internal/balancer/gracefulswitch"
    30  	"google.golang.org/grpc/internal/cache"
    31  	"google.golang.org/grpc/internal/grpclog"
    32  	"google.golang.org/grpc/resolver"
    33  	"google.golang.org/grpc/serviceconfig"
    34  )
    35  
    36  // subBalancerWrapper is used to keep the configurations that will be used to start
    37  // the underlying balancer. It can be called to start/stop the underlying
    38  // balancer.
    39  //
    40  // When the config changes, it will pass the update to the underlying balancer
    41  // if it exists.
    42  //
    43  // TODO: move to a separate file?
    44  type subBalancerWrapper struct {
    45  	// subBalancerWrapper is passed to the sub-balancer as a ClientConn
    46  	// wrapper, only to keep the state and picker.  When sub-balancer is
    47  	// restarted while in cache, the picker needs to be resent.
    48  	//
    49  	// It also contains the sub-balancer ID, so the parent balancer group can
    50  	// keep track of SubConn/pickers and the sub-balancers they belong to. Some
    51  	// of the actions are forwarded to the parent ClientConn with no change.
    52  	// Some are forward to balancer group with the sub-balancer ID.
    53  	balancer.ClientConn
    54  	id    string
    55  	group *BalancerGroup
    56  
    57  	mu    sync.Mutex
    58  	state balancer.State
    59  
    60  	// The static part of sub-balancer. Keeps balancerBuilders and addresses.
    61  	// To be used when restarting sub-balancer.
    62  	builder balancer.Builder
    63  	// Options to be passed to sub-balancer at the time of creation.
    64  	buildOpts balancer.BuildOptions
    65  	// ccState is a cache of the addresses/balancer config, so when the balancer
    66  	// is restarted after close, it will get the previous update. It's a pointer
    67  	// and is set to nil at init, so when the balancer is built for the first
    68  	// time (not a restart), it won't receive an empty update. Note that this
    69  	// isn't reset to nil when the underlying balancer is closed.
    70  	ccState *balancer.ClientConnState
    71  	// The dynamic part of sub-balancer. Only used when balancer group is
    72  	// started. Gets cleared when sub-balancer is closed.
    73  	balancer *gracefulswitch.Balancer
    74  }
    75  
    76  // UpdateState overrides balancer.ClientConn, to keep state and picker.
    77  func (sbc *subBalancerWrapper) UpdateState(state balancer.State) {
    78  	sbc.mu.Lock()
    79  	sbc.state = state
    80  	sbc.group.updateBalancerState(sbc.id, state)
    81  	sbc.mu.Unlock()
    82  }
    83  
    84  // NewSubConn overrides balancer.ClientConn, so balancer group can keep track of
    85  // the relation between subconns and sub-balancers.
    86  func (sbc *subBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
    87  	return sbc.group.newSubConn(sbc, addrs, opts)
    88  }
    89  
    90  func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() {
    91  	sbc.mu.Lock()
    92  	if sbc.state.Picker != nil {
    93  		sbc.group.updateBalancerState(sbc.id, sbc.state)
    94  	}
    95  	sbc.mu.Unlock()
    96  }
    97  
    98  func (sbc *subBalancerWrapper) startBalancer() {
    99  	if sbc.balancer == nil {
   100  		sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts)
   101  	}
   102  	sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id)
   103  	sbc.balancer.SwitchTo(sbc.builder)
   104  	if sbc.ccState != nil {
   105  		sbc.balancer.UpdateClientConnState(*sbc.ccState)
   106  	}
   107  }
   108  
   109  // exitIdle invokes the ExitIdle method on the sub-balancer, a gracefulswitch
   110  // balancer.
   111  func (sbc *subBalancerWrapper) exitIdle() {
   112  	b := sbc.balancer
   113  	if b == nil {
   114  		return
   115  	}
   116  	b.ExitIdle()
   117  }
   118  
   119  func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) error {
   120  	sbc.ccState = &s
   121  	b := sbc.balancer
   122  	if b == nil {
   123  		// A sub-balancer is closed when it is removed from the group or the
   124  		// group is closed as a whole, and is not expected to receive updates
   125  		// after that. But when used with the priority LB policy a sub-balancer
   126  		// (and the whole balancer group) could be closed because it's the lower
   127  		// priority, but it can still get address updates.
   128  		return nil
   129  	}
   130  	return b.UpdateClientConnState(s)
   131  }
   132  
   133  func (sbc *subBalancerWrapper) resolverError(err error) {
   134  	b := sbc.balancer
   135  	if b == nil {
   136  		// A sub-balancer is closed when it is removed from the group or the
   137  		// group is closed as a whole, and is not expected to receive updates
   138  		// after that. But when used with the priority LB policy a sub-balancer
   139  		// (and the whole balancer group) could be closed because it's the lower
   140  		// priority, but it can still get address updates.
   141  		return
   142  	}
   143  	b.ResolverError(err)
   144  }
   145  
   146  func (sbc *subBalancerWrapper) stopBalancer() {
   147  	if sbc.balancer == nil {
   148  		return
   149  	}
   150  	sbc.balancer.Close()
   151  	sbc.balancer = nil
   152  }
   153  
   154  // BalancerGroup takes a list of balancers, each behind a gracefulswitch
   155  // balancer, and make them into one balancer.
   156  //
   157  // Note that this struct doesn't implement balancer.Balancer, because it's not
   158  // intended to be used directly as a balancer. It's expected to be used as a
   159  // sub-balancer manager by a high level balancer.
   160  //
   161  //	Updates from ClientConn are forwarded to sub-balancers
   162  //	- service config update
   163  //	- address update
   164  //	- subConn state change
   165  //	  - find the corresponding balancer and forward
   166  //
   167  //	Actions from sub-balances are forwarded to parent ClientConn
   168  //	- new/remove SubConn
   169  //	- picker update and health states change
   170  //	  - sub-pickers are sent to an aggregator provided by the parent, which
   171  //	    will group them into a group-picker. The aggregated connectivity state is
   172  //	    also handled by the aggregator.
   173  //	- resolveNow
   174  //
   175  // Sub-balancers are only built when the balancer group is started. If the
   176  // balancer group is closed, the sub-balancers are also closed. And it's
   177  // guaranteed that no updates will be sent to parent ClientConn from a closed
   178  // balancer group.
   179  type BalancerGroup struct {
   180  	cc        balancer.ClientConn
   181  	buildOpts balancer.BuildOptions
   182  	logger    *grpclog.PrefixLogger
   183  
   184  	// stateAggregator is where the state/picker updates will be sent to. It's
   185  	// provided by the parent balancer, to build a picker with all the
   186  	// sub-pickers.
   187  	stateAggregator BalancerStateAggregator
   188  
   189  	// outgoingMu guards all operations in the direction:
   190  	// ClientConn-->Sub-balancer. Including start, stop, resolver updates and
   191  	// SubConn state changes.
   192  	//
   193  	// The corresponding boolean outgoingStarted is used to stop further updates
   194  	// to sub-balancers after they are closed.
   195  	outgoingMu         sync.Mutex
   196  	outgoingClosed     bool
   197  	idToBalancerConfig map[string]*subBalancerWrapper
   198  	// Cache for sub-balancers when they are removed. This is `nil` if caching
   199  	// is disabled by passing `0` for Options.SubBalancerCloseTimeout`.
   200  	deletedBalancerCache *cache.TimeoutCache
   201  
   202  	// incomingMu is to make sure this balancer group doesn't send updates to cc
   203  	// after it's closed.
   204  	//
   205  	// We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer
   206  	// may call back to balancer group inline. It causes deadlock if they
   207  	// require the same mutex).
   208  	//
   209  	// We should never need to hold multiple locks at the same time in this
   210  	// struct. The case where two locks are held can only happen when the
   211  	// underlying balancer calls back into balancer group inline. So there's an
   212  	// implicit lock acquisition order that outgoingMu is locked before
   213  	// incomingMu.
   214  
   215  	// incomingMu guards all operations in the direction:
   216  	// Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn. It also
   217  	// guards the map from SubConn to balancer ID, so updateSubConnState needs
   218  	// to hold it shortly to potentially delete from the map.
   219  	//
   220  	// UpdateState is called by the balancer state aggregator, and it will
   221  	// decide when and whether to call.
   222  	//
   223  	// The corresponding boolean incomingStarted is used to stop further updates
   224  	// from sub-balancers after they are closed.
   225  	incomingMu      sync.Mutex
   226  	incomingClosed  bool // This boolean only guards calls back to ClientConn.
   227  	scToSubBalancer map[balancer.SubConn]*subBalancerWrapper
   228  }
   229  
   230  // Options wraps the arguments to be passed to the BalancerGroup ctor.
   231  type Options struct {
   232  	// CC is a reference to the parent balancer.ClientConn.
   233  	CC balancer.ClientConn
   234  	// BuildOpts contains build options to be used when creating sub-balancers.
   235  	BuildOpts balancer.BuildOptions
   236  	// StateAggregator is an implementation of the BalancerStateAggregator
   237  	// interface to aggregate picker and connectivity states from sub-balancers.
   238  	StateAggregator BalancerStateAggregator
   239  	// Logger is a group specific prefix logger.
   240  	Logger *grpclog.PrefixLogger
   241  	// SubBalancerCloseTimeout is the amount of time deleted sub-balancers spend
   242  	// in the idle cache. A value of zero here disables caching of deleted
   243  	// sub-balancers.
   244  	SubBalancerCloseTimeout time.Duration
   245  }
   246  
   247  // New creates a new BalancerGroup. Note that the BalancerGroup
   248  // needs to be started to work.
   249  func New(opts Options) *BalancerGroup {
   250  	var bc *cache.TimeoutCache
   251  	if opts.SubBalancerCloseTimeout != time.Duration(0) {
   252  		bc = cache.NewTimeoutCache(opts.SubBalancerCloseTimeout)
   253  	}
   254  
   255  	return &BalancerGroup{
   256  		cc:              opts.CC,
   257  		buildOpts:       opts.BuildOpts,
   258  		stateAggregator: opts.StateAggregator,
   259  		logger:          opts.Logger,
   260  
   261  		deletedBalancerCache: bc,
   262  		idToBalancerConfig:   make(map[string]*subBalancerWrapper),
   263  		scToSubBalancer:      make(map[balancer.SubConn]*subBalancerWrapper),
   264  	}
   265  }
   266  
   267  // AddWithClientConn adds a balancer with the given id to the group. The
   268  // balancer is built with a balancer builder registered with balancerName. The
   269  // given ClientConn is passed to the newly built balancer instead of the
   270  // one passed to balancergroup.New().
   271  //
   272  // TODO: Get rid of the existing Add() API and replace it with this.
   273  func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
   274  	bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id)
   275  	builder := balancer.Get(balancerName)
   276  	if builder == nil {
   277  		return fmt.Errorf("balancergroup: unregistered balancer name %q", balancerName)
   278  	}
   279  
   280  	// Store data in static map, and then check to see if bg is started.
   281  	bg.outgoingMu.Lock()
   282  	defer bg.outgoingMu.Unlock()
   283  	if bg.outgoingClosed {
   284  		return fmt.Errorf("balancergroup: already closed")
   285  	}
   286  	var sbc *subBalancerWrapper
   287  	// Skip searching the cache if disabled.
   288  	if bg.deletedBalancerCache != nil {
   289  		if old, ok := bg.deletedBalancerCache.Remove(id); ok {
   290  			if bg.logger.V(2) {
   291  				bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id)
   292  				bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   293  			}
   294  
   295  			sbc, _ = old.(*subBalancerWrapper)
   296  			if sbc != nil && sbc.builder != builder {
   297  				// If the sub-balancer in cache was built with a different
   298  				// balancer builder, don't use it, cleanup this old-balancer,
   299  				// and behave as sub-balancer is not found in cache.
   300  				//
   301  				// NOTE that this will also drop the cached addresses for this
   302  				// sub-balancer, which seems to be reasonable.
   303  				sbc.stopBalancer()
   304  				// cleanupSubConns must be done before the new balancer starts,
   305  				// otherwise new SubConns created by the new balancer might be
   306  				// removed by mistake.
   307  				bg.cleanupSubConns(sbc)
   308  				sbc = nil
   309  			}
   310  		}
   311  	}
   312  	if sbc == nil {
   313  		sbc = &subBalancerWrapper{
   314  			ClientConn: cc,
   315  			id:         id,
   316  			group:      bg,
   317  			builder:    builder,
   318  			buildOpts:  bg.buildOpts,
   319  		}
   320  		sbc.startBalancer()
   321  	} else {
   322  		// When brining back a sub-balancer from cache, re-send the cached
   323  		// picker and state.
   324  		sbc.updateBalancerStateWithCachedPicker()
   325  	}
   326  	bg.idToBalancerConfig[id] = sbc
   327  	return nil
   328  }
   329  
   330  // Add adds a balancer built by builder to the group, with given id.
   331  func (bg *BalancerGroup) Add(id string, builder balancer.Builder) {
   332  	bg.AddWithClientConn(id, builder.Name(), bg.cc)
   333  }
   334  
   335  // Remove removes the balancer with id from the group.
   336  //
   337  // But doesn't close the balancer. The balancer is kept in a cache, and will be
   338  // closed after timeout. Cleanup work (closing sub-balancer and removing
   339  // subconns) will be done after timeout.
   340  func (bg *BalancerGroup) Remove(id string) {
   341  	bg.logger.Infof("Removing child policy for child %q", id)
   342  
   343  	bg.outgoingMu.Lock()
   344  	if bg.outgoingClosed {
   345  		bg.outgoingMu.Unlock()
   346  		return
   347  	}
   348  
   349  	sbToRemove, ok := bg.idToBalancerConfig[id]
   350  	if !ok {
   351  		bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id)
   352  		bg.outgoingMu.Unlock()
   353  		return
   354  	}
   355  
   356  	// Unconditionally remove the sub-balancer config from the map.
   357  	delete(bg.idToBalancerConfig, id)
   358  
   359  	if bg.deletedBalancerCache != nil {
   360  		if bg.logger.V(2) {
   361  			bg.logger.Infof("Adding child policy for child %q to the balancer cache", id)
   362  			bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   363  		}
   364  
   365  		bg.deletedBalancerCache.Add(id, sbToRemove, func() {
   366  			if bg.logger.V(2) {
   367  				bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id)
   368  				bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   369  			}
   370  
   371  			// A sub-balancer evicted from the timeout cache needs to closed
   372  			// and its subConns need to removed, unconditionally. There is a
   373  			// possibility that a sub-balancer might be removed (thereby
   374  			// moving it to the cache) around the same time that the
   375  			// balancergroup is closed, and by the time we get here the
   376  			// balancergroup might be closed.  Check for `outgoingStarted ==
   377  			// true` at that point can lead to a leaked sub-balancer.
   378  			bg.outgoingMu.Lock()
   379  			sbToRemove.stopBalancer()
   380  			bg.outgoingMu.Unlock()
   381  			bg.cleanupSubConns(sbToRemove)
   382  		})
   383  		bg.outgoingMu.Unlock()
   384  		return
   385  	}
   386  
   387  	// Remove the sub-balancer with immediate effect if we are not caching.
   388  	sbToRemove.stopBalancer()
   389  	bg.outgoingMu.Unlock()
   390  	bg.cleanupSubConns(sbToRemove)
   391  }
   392  
   393  // bg.remove(id) doesn't do cleanup for the sub-balancer. This function does
   394  // cleanup after the timeout.
   395  func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) {
   396  	bg.incomingMu.Lock()
   397  	defer bg.incomingMu.Unlock()
   398  	// Remove SubConns. This is only done after the balancer is
   399  	// actually closed.
   400  	//
   401  	// NOTE: if NewSubConn is called by this (closed) balancer later, the
   402  	// SubConn will be leaked. This shouldn't happen if the balancer
   403  	// implementation is correct. To make sure this never happens, we need to
   404  	// add another layer (balancer manager) between balancer group and the
   405  	// sub-balancers.
   406  	for sc, b := range bg.scToSubBalancer {
   407  		if b == config {
   408  			delete(bg.scToSubBalancer, sc)
   409  		}
   410  	}
   411  }
   412  
   413  // Following are actions from the parent grpc.ClientConn, forward to sub-balancers.
   414  
   415  // updateSubConnState forwards the update to cb and updates scToSubBalancer if
   416  // needed.
   417  func (bg *BalancerGroup) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
   418  	bg.incomingMu.Lock()
   419  	if bg.incomingClosed {
   420  		bg.incomingMu.Unlock()
   421  		return
   422  	}
   423  	if _, ok := bg.scToSubBalancer[sc]; !ok {
   424  		bg.incomingMu.Unlock()
   425  		return
   426  	}
   427  	if state.ConnectivityState == connectivity.Shutdown {
   428  		// Only delete sc from the map when state changed to Shutdown.
   429  		delete(bg.scToSubBalancer, sc)
   430  	}
   431  	bg.incomingMu.Unlock()
   432  
   433  	bg.outgoingMu.Lock()
   434  	defer bg.outgoingMu.Unlock()
   435  	if bg.outgoingClosed {
   436  		return
   437  	}
   438  	if cb != nil {
   439  		cb(state)
   440  	}
   441  }
   442  
   443  // UpdateSubConnState handles the state for the subconn. It finds the
   444  // corresponding balancer and forwards the update.
   445  func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
   446  	bg.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
   447  }
   448  
   449  // UpdateClientConnState handles ClientState (including balancer config and
   450  // addresses) from resolver. It finds the balancer and forwards the update.
   451  func (bg *BalancerGroup) UpdateClientConnState(id string, s balancer.ClientConnState) error {
   452  	bg.outgoingMu.Lock()
   453  	defer bg.outgoingMu.Unlock()
   454  	if bg.outgoingClosed {
   455  		return nil
   456  	}
   457  	if config, ok := bg.idToBalancerConfig[id]; ok {
   458  		return config.updateClientConnState(s)
   459  	}
   460  	return nil
   461  }
   462  
   463  // ResolverError forwards resolver errors to all sub-balancers.
   464  func (bg *BalancerGroup) ResolverError(err error) {
   465  	bg.outgoingMu.Lock()
   466  	defer bg.outgoingMu.Unlock()
   467  	if bg.outgoingClosed {
   468  		return
   469  	}
   470  	for _, config := range bg.idToBalancerConfig {
   471  		config.resolverError(err)
   472  	}
   473  }
   474  
   475  // Following are actions from sub-balancers, forward to ClientConn.
   476  
   477  // newSubConn: forward to ClientConn, and also create a map from sc to balancer,
   478  // so state update will find the right balancer.
   479  //
   480  // One note about removing SubConn: only forward to ClientConn, but not delete
   481  // from map. Delete sc from the map only when state changes to Shutdown. Since
   482  // it's just forwarding the action, there's no need for a removeSubConn()
   483  // wrapper function.
   484  func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
   485  	// NOTE: if balancer with id was already removed, this should also return
   486  	// error. But since we call balancer.stopBalancer when removing the balancer, this
   487  	// shouldn't happen.
   488  	bg.incomingMu.Lock()
   489  	if bg.incomingClosed {
   490  		bg.incomingMu.Unlock()
   491  		return nil, fmt.Errorf("balancergroup: NewSubConn is called after balancer group is closed")
   492  	}
   493  	var sc balancer.SubConn
   494  	oldListener := opts.StateListener
   495  	opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) }
   496  	sc, err := bg.cc.NewSubConn(addrs, opts)
   497  	if err != nil {
   498  		bg.incomingMu.Unlock()
   499  		return nil, err
   500  	}
   501  	bg.scToSubBalancer[sc] = config
   502  	bg.incomingMu.Unlock()
   503  	return sc, nil
   504  }
   505  
   506  // updateBalancerState: forward the new state to balancer state aggregator. The
   507  // aggregator will create an aggregated picker and an aggregated connectivity
   508  // state, then forward to ClientConn.
   509  func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
   510  	bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state)
   511  
   512  	// Send new state to the aggregator, without holding the incomingMu.
   513  	// incomingMu is to protect all calls to the parent ClientConn, this update
   514  	// doesn't necessary trigger a call to ClientConn, and should already be
   515  	// protected by aggregator's mutex if necessary.
   516  	if bg.stateAggregator != nil {
   517  		bg.stateAggregator.UpdateState(id, state)
   518  	}
   519  }
   520  
   521  // Close closes the balancer. It stops sub-balancers, and removes the subconns.
   522  // When a BalancerGroup is closed, it can not receive further address updates.
   523  func (bg *BalancerGroup) Close() {
   524  	bg.incomingMu.Lock()
   525  	bg.incomingClosed = true
   526  	// Also remove all SubConns.
   527  	for sc := range bg.scToSubBalancer {
   528  		sc.Shutdown()
   529  		delete(bg.scToSubBalancer, sc)
   530  	}
   531  	bg.incomingMu.Unlock()
   532  
   533  	bg.outgoingMu.Lock()
   534  	// Setting `outgoingClosed` ensures that no entries are added to
   535  	// `deletedBalancerCache` after this point.
   536  	bg.outgoingClosed = true
   537  	bg.outgoingMu.Unlock()
   538  
   539  	// Clear(true) runs clear function to close sub-balancers in cache. It
   540  	// must be called out of outgoing mutex.
   541  	if bg.deletedBalancerCache != nil {
   542  		bg.deletedBalancerCache.Clear(true)
   543  	}
   544  
   545  	bg.outgoingMu.Lock()
   546  	for id, config := range bg.idToBalancerConfig {
   547  		config.stopBalancer()
   548  		delete(bg.idToBalancerConfig, id)
   549  	}
   550  	bg.outgoingMu.Unlock()
   551  }
   552  
   553  // ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked.
   554  // It will trigger this on all sub-balancers, or reconnect their subconns if
   555  // not supported.
   556  func (bg *BalancerGroup) ExitIdle() {
   557  	bg.outgoingMu.Lock()
   558  	defer bg.outgoingMu.Unlock()
   559  	if bg.outgoingClosed {
   560  		return
   561  	}
   562  	for _, config := range bg.idToBalancerConfig {
   563  		config.exitIdle()
   564  	}
   565  }
   566  
   567  // ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if
   568  // appropriate and possible.
   569  func (bg *BalancerGroup) ExitIdleOne(id string) {
   570  	bg.outgoingMu.Lock()
   571  	defer bg.outgoingMu.Unlock()
   572  	if bg.outgoingClosed {
   573  		return
   574  	}
   575  	if config := bg.idToBalancerConfig[id]; config != nil {
   576  		config.exitIdle()
   577  	}
   578  }
   579  
   580  // ParseConfig parses a child config list and returns a LB config for the
   581  // gracefulswitch Balancer.
   582  //
   583  // cfg is expected to be a json.RawMessage containing a JSON array of LB policy
   584  // names + configs as the format of the "loadBalancingConfig" field in
   585  // ServiceConfig.  It returns a type that should be passed to
   586  // UpdateClientConnState in the BalancerConfig field.
   587  func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
   588  	return gracefulswitch.ParseConfig(cfg)
   589  }