google.golang.org/grpc@v1.72.2/internal/balancergroup/balancergroup.go (about)

     1  /*
     2   * Copyright 2019 gRPC authors.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  // Package balancergroup implements a utility struct to bind multiple balancers
    18  // into one balancer.
    19  package balancergroup
    20  
    21  import (
    22  	"encoding/json"
    23  	"fmt"
    24  	"sync"
    25  	"time"
    26  
    27  	"google.golang.org/grpc/balancer"
    28  	"google.golang.org/grpc/connectivity"
    29  	"google.golang.org/grpc/internal/balancer/gracefulswitch"
    30  	"google.golang.org/grpc/internal/cache"
    31  	"google.golang.org/grpc/internal/grpclog"
    32  	"google.golang.org/grpc/resolver"
    33  	"google.golang.org/grpc/serviceconfig"
    34  )
    35  
    36  // subBalancerWrapper is used to keep the configurations that will be used to start
    37  // the underlying balancer. It can be called to start/stop the underlying
    38  // balancer.
    39  //
    40  // When the config changes, it will pass the update to the underlying balancer
    41  // if it exists.
    42  //
    43  // TODO: move to a separate file?
    44  type subBalancerWrapper struct {
    45  	// subBalancerWrapper is passed to the sub-balancer as a ClientConn
    46  	// wrapper, only to keep the state and picker.  When sub-balancer is
    47  	// restarted while in cache, the picker needs to be resent.
    48  	//
    49  	// It also contains the sub-balancer ID, so the parent balancer group can
    50  	// keep track of SubConn/pickers and the sub-balancers they belong to. Some
    51  	// of the actions are forwarded to the parent ClientConn with no change.
    52  	// Some are forward to balancer group with the sub-balancer ID.
    53  	balancer.ClientConn
    54  	id    string
    55  	group *BalancerGroup
    56  
    57  	mu    sync.Mutex
    58  	state balancer.State
    59  
    60  	// The static part of sub-balancer. Keeps balancerBuilders and addresses.
    61  	// To be used when restarting sub-balancer.
    62  	builder balancer.Builder
    63  	// Options to be passed to sub-balancer at the time of creation.
    64  	buildOpts balancer.BuildOptions
    65  	// ccState is a cache of the addresses/balancer config, so when the balancer
    66  	// is restarted after close, it will get the previous update. It's a pointer
    67  	// and is set to nil at init, so when the balancer is built for the first
    68  	// time (not a restart), it won't receive an empty update. Note that this
    69  	// isn't reset to nil when the underlying balancer is closed.
    70  	ccState *balancer.ClientConnState
    71  	// The dynamic part of sub-balancer. Only used when balancer group is
    72  	// started. Gets cleared when sub-balancer is closed.
    73  	balancer *gracefulswitch.Balancer
    74  }
    75  
    76  // UpdateState overrides balancer.ClientConn, to keep state and picker.
    77  func (sbc *subBalancerWrapper) UpdateState(state balancer.State) {
    78  	sbc.mu.Lock()
    79  	sbc.state = state
    80  	sbc.group.updateBalancerState(sbc.id, state)
    81  	sbc.mu.Unlock()
    82  }
    83  
    84  // NewSubConn overrides balancer.ClientConn, so balancer group can keep track of
    85  // the relation between subconns and sub-balancers.
    86  func (sbc *subBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
    87  	return sbc.group.newSubConn(sbc, addrs, opts)
    88  }
    89  
    90  func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() {
    91  	sbc.mu.Lock()
    92  	if sbc.state.Picker != nil {
    93  		sbc.group.updateBalancerState(sbc.id, sbc.state)
    94  	}
    95  	sbc.mu.Unlock()
    96  }
    97  
    98  func (sbc *subBalancerWrapper) startBalancer() {
    99  	if sbc.balancer == nil {
   100  		sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts)
   101  	}
   102  	sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id)
   103  	sbc.balancer.SwitchTo(sbc.builder)
   104  	if sbc.ccState != nil {
   105  		sbc.balancer.UpdateClientConnState(*sbc.ccState)
   106  	}
   107  }
   108  
   109  // exitIdle invokes the sub-balancer's ExitIdle method. Returns a boolean
   110  // indicating whether or not the operation was completed.
   111  func (sbc *subBalancerWrapper) exitIdle() (complete bool) {
   112  	b := sbc.balancer
   113  	if b == nil {
   114  		return true
   115  	}
   116  	b.ExitIdle()
   117  	return true
   118  }
   119  
   120  func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) error {
   121  	sbc.ccState = &s
   122  	b := sbc.balancer
   123  	if b == nil {
   124  		// A sub-balancer is closed when it is removed from the group or the
   125  		// group is closed as a whole, and is not expected to receive updates
   126  		// after that. But when used with the priority LB policy a sub-balancer
   127  		// (and the whole balancer group) could be closed because it's the lower
   128  		// priority, but it can still get address updates.
   129  		return nil
   130  	}
   131  	return b.UpdateClientConnState(s)
   132  }
   133  
   134  func (sbc *subBalancerWrapper) resolverError(err error) {
   135  	b := sbc.balancer
   136  	if b == nil {
   137  		// A sub-balancer is closed when it is removed from the group or the
   138  		// group is closed as a whole, and is not expected to receive updates
   139  		// after that. But when used with the priority LB policy a sub-balancer
   140  		// (and the whole balancer group) could be closed because it's the lower
   141  		// priority, but it can still get address updates.
   142  		return
   143  	}
   144  	b.ResolverError(err)
   145  }
   146  
   147  func (sbc *subBalancerWrapper) stopBalancer() {
   148  	if sbc.balancer == nil {
   149  		return
   150  	}
   151  	sbc.balancer.Close()
   152  	sbc.balancer = nil
   153  }
   154  
   155  // BalancerGroup takes a list of balancers, each behind a gracefulswitch
   156  // balancer, and make them into one balancer.
   157  //
   158  // Note that this struct doesn't implement balancer.Balancer, because it's not
   159  // intended to be used directly as a balancer. It's expected to be used as a
   160  // sub-balancer manager by a high level balancer.
   161  //
   162  //	Updates from ClientConn are forwarded to sub-balancers
   163  //	- service config update
   164  //	- address update
   165  //	- subConn state change
   166  //	  - find the corresponding balancer and forward
   167  //
   168  //	Actions from sub-balances are forwarded to parent ClientConn
   169  //	- new/remove SubConn
   170  //	- picker update and health states change
   171  //	  - sub-pickers are sent to an aggregator provided by the parent, which
   172  //	    will group them into a group-picker. The aggregated connectivity state is
   173  //	    also handled by the aggregator.
   174  //	- resolveNow
   175  //
   176  // Sub-balancers are only built when the balancer group is started. If the
   177  // balancer group is closed, the sub-balancers are also closed. And it's
   178  // guaranteed that no updates will be sent to parent ClientConn from a closed
   179  // balancer group.
   180  type BalancerGroup struct {
   181  	cc        balancer.ClientConn
   182  	buildOpts balancer.BuildOptions
   183  	logger    *grpclog.PrefixLogger
   184  
   185  	// stateAggregator is where the state/picker updates will be sent to. It's
   186  	// provided by the parent balancer, to build a picker with all the
   187  	// sub-pickers.
   188  	stateAggregator BalancerStateAggregator
   189  
   190  	// outgoingMu guards all operations in the direction:
   191  	// ClientConn-->Sub-balancer. Including start, stop, resolver updates and
   192  	// SubConn state changes.
   193  	//
   194  	// The corresponding boolean outgoingStarted is used to stop further updates
   195  	// to sub-balancers after they are closed.
   196  	outgoingMu         sync.Mutex
   197  	outgoingClosed     bool
   198  	idToBalancerConfig map[string]*subBalancerWrapper
   199  	// Cache for sub-balancers when they are removed. This is `nil` if caching
   200  	// is disabled by passing `0` for Options.SubBalancerCloseTimeout`.
   201  	deletedBalancerCache *cache.TimeoutCache
   202  
   203  	// incomingMu is to make sure this balancer group doesn't send updates to cc
   204  	// after it's closed.
   205  	//
   206  	// We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer
   207  	// may call back to balancer group inline. It causes deadlock if they
   208  	// require the same mutex).
   209  	//
   210  	// We should never need to hold multiple locks at the same time in this
   211  	// struct. The case where two locks are held can only happen when the
   212  	// underlying balancer calls back into balancer group inline. So there's an
   213  	// implicit lock acquisition order that outgoingMu is locked before
   214  	// incomingMu.
   215  
   216  	// incomingMu guards all operations in the direction:
   217  	// Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn. It also
   218  	// guards the map from SubConn to balancer ID, so updateSubConnState needs
   219  	// to hold it shortly to potentially delete from the map.
   220  	//
   221  	// UpdateState is called by the balancer state aggregator, and it will
   222  	// decide when and whether to call.
   223  	//
   224  	// The corresponding boolean incomingStarted is used to stop further updates
   225  	// from sub-balancers after they are closed.
   226  	incomingMu      sync.Mutex
   227  	incomingClosed  bool // This boolean only guards calls back to ClientConn.
   228  	scToSubBalancer map[balancer.SubConn]*subBalancerWrapper
   229  }
   230  
   231  // Options wraps the arguments to be passed to the BalancerGroup ctor.
   232  type Options struct {
   233  	// CC is a reference to the parent balancer.ClientConn.
   234  	CC balancer.ClientConn
   235  	// BuildOpts contains build options to be used when creating sub-balancers.
   236  	BuildOpts balancer.BuildOptions
   237  	// StateAggregator is an implementation of the BalancerStateAggregator
   238  	// interface to aggregate picker and connectivity states from sub-balancers.
   239  	StateAggregator BalancerStateAggregator
   240  	// Logger is a group specific prefix logger.
   241  	Logger *grpclog.PrefixLogger
   242  	// SubBalancerCloseTimeout is the amount of time deleted sub-balancers spend
   243  	// in the idle cache. A value of zero here disables caching of deleted
   244  	// sub-balancers.
   245  	SubBalancerCloseTimeout time.Duration
   246  }
   247  
   248  // New creates a new BalancerGroup. Note that the BalancerGroup
   249  // needs to be started to work.
   250  func New(opts Options) *BalancerGroup {
   251  	var bc *cache.TimeoutCache
   252  	if opts.SubBalancerCloseTimeout != time.Duration(0) {
   253  		bc = cache.NewTimeoutCache(opts.SubBalancerCloseTimeout)
   254  	}
   255  
   256  	return &BalancerGroup{
   257  		cc:              opts.CC,
   258  		buildOpts:       opts.BuildOpts,
   259  		stateAggregator: opts.StateAggregator,
   260  		logger:          opts.Logger,
   261  
   262  		deletedBalancerCache: bc,
   263  		idToBalancerConfig:   make(map[string]*subBalancerWrapper),
   264  		scToSubBalancer:      make(map[balancer.SubConn]*subBalancerWrapper),
   265  	}
   266  }
   267  
   268  // AddWithClientConn adds a balancer with the given id to the group. The
   269  // balancer is built with a balancer builder registered with balancerName. The
   270  // given ClientConn is passed to the newly built balancer instead of the
   271  // one passed to balancergroup.New().
   272  //
   273  // TODO: Get rid of the existing Add() API and replace it with this.
   274  func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
   275  	bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id)
   276  	builder := balancer.Get(balancerName)
   277  	if builder == nil {
   278  		return fmt.Errorf("balancergroup: unregistered balancer name %q", balancerName)
   279  	}
   280  
   281  	// Store data in static map, and then check to see if bg is started.
   282  	bg.outgoingMu.Lock()
   283  	defer bg.outgoingMu.Unlock()
   284  	if bg.outgoingClosed {
   285  		return fmt.Errorf("balancergroup: already closed")
   286  	}
   287  	var sbc *subBalancerWrapper
   288  	// Skip searching the cache if disabled.
   289  	if bg.deletedBalancerCache != nil {
   290  		if old, ok := bg.deletedBalancerCache.Remove(id); ok {
   291  			if bg.logger.V(2) {
   292  				bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id)
   293  				bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   294  			}
   295  
   296  			sbc, _ = old.(*subBalancerWrapper)
   297  			if sbc != nil && sbc.builder != builder {
   298  				// If the sub-balancer in cache was built with a different
   299  				// balancer builder, don't use it, cleanup this old-balancer,
   300  				// and behave as sub-balancer is not found in cache.
   301  				//
   302  				// NOTE that this will also drop the cached addresses for this
   303  				// sub-balancer, which seems to be reasonable.
   304  				sbc.stopBalancer()
   305  				// cleanupSubConns must be done before the new balancer starts,
   306  				// otherwise new SubConns created by the new balancer might be
   307  				// removed by mistake.
   308  				bg.cleanupSubConns(sbc)
   309  				sbc = nil
   310  			}
   311  		}
   312  	}
   313  	if sbc == nil {
   314  		sbc = &subBalancerWrapper{
   315  			ClientConn: cc,
   316  			id:         id,
   317  			group:      bg,
   318  			builder:    builder,
   319  			buildOpts:  bg.buildOpts,
   320  		}
   321  		sbc.startBalancer()
   322  	} else {
   323  		// When brining back a sub-balancer from cache, re-send the cached
   324  		// picker and state.
   325  		sbc.updateBalancerStateWithCachedPicker()
   326  	}
   327  	bg.idToBalancerConfig[id] = sbc
   328  	return nil
   329  }
   330  
   331  // Add adds a balancer built by builder to the group, with given id.
   332  func (bg *BalancerGroup) Add(id string, builder balancer.Builder) {
   333  	bg.AddWithClientConn(id, builder.Name(), bg.cc)
   334  }
   335  
   336  // Remove removes the balancer with id from the group.
   337  //
   338  // But doesn't close the balancer. The balancer is kept in a cache, and will be
   339  // closed after timeout. Cleanup work (closing sub-balancer and removing
   340  // subconns) will be done after timeout.
   341  func (bg *BalancerGroup) Remove(id string) {
   342  	bg.logger.Infof("Removing child policy for child %q", id)
   343  
   344  	bg.outgoingMu.Lock()
   345  	if bg.outgoingClosed {
   346  		bg.outgoingMu.Unlock()
   347  		return
   348  	}
   349  
   350  	sbToRemove, ok := bg.idToBalancerConfig[id]
   351  	if !ok {
   352  		bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id)
   353  		bg.outgoingMu.Unlock()
   354  		return
   355  	}
   356  
   357  	// Unconditionally remove the sub-balancer config from the map.
   358  	delete(bg.idToBalancerConfig, id)
   359  
   360  	if bg.deletedBalancerCache != nil {
   361  		if bg.logger.V(2) {
   362  			bg.logger.Infof("Adding child policy for child %q to the balancer cache", id)
   363  			bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   364  		}
   365  
   366  		bg.deletedBalancerCache.Add(id, sbToRemove, func() {
   367  			if bg.logger.V(2) {
   368  				bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id)
   369  				bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
   370  			}
   371  
   372  			// A sub-balancer evicted from the timeout cache needs to closed
   373  			// and its subConns need to removed, unconditionally. There is a
   374  			// possibility that a sub-balancer might be removed (thereby
   375  			// moving it to the cache) around the same time that the
   376  			// balancergroup is closed, and by the time we get here the
   377  			// balancergroup might be closed.  Check for `outgoingStarted ==
   378  			// true` at that point can lead to a leaked sub-balancer.
   379  			bg.outgoingMu.Lock()
   380  			sbToRemove.stopBalancer()
   381  			bg.outgoingMu.Unlock()
   382  			bg.cleanupSubConns(sbToRemove)
   383  		})
   384  		bg.outgoingMu.Unlock()
   385  		return
   386  	}
   387  
   388  	// Remove the sub-balancer with immediate effect if we are not caching.
   389  	sbToRemove.stopBalancer()
   390  	bg.outgoingMu.Unlock()
   391  	bg.cleanupSubConns(sbToRemove)
   392  }
   393  
   394  // bg.remove(id) doesn't do cleanup for the sub-balancer. This function does
   395  // cleanup after the timeout.
   396  func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) {
   397  	bg.incomingMu.Lock()
   398  	defer bg.incomingMu.Unlock()
   399  	// Remove SubConns. This is only done after the balancer is
   400  	// actually closed.
   401  	//
   402  	// NOTE: if NewSubConn is called by this (closed) balancer later, the
   403  	// SubConn will be leaked. This shouldn't happen if the balancer
   404  	// implementation is correct. To make sure this never happens, we need to
   405  	// add another layer (balancer manager) between balancer group and the
   406  	// sub-balancers.
   407  	for sc, b := range bg.scToSubBalancer {
   408  		if b == config {
   409  			delete(bg.scToSubBalancer, sc)
   410  		}
   411  	}
   412  }
   413  
   414  // connect attempts to connect to all subConns belonging to sb.
   415  func (bg *BalancerGroup) connect(sb *subBalancerWrapper) {
   416  	bg.incomingMu.Lock()
   417  	defer bg.incomingMu.Unlock()
   418  	if bg.incomingClosed {
   419  		return
   420  	}
   421  	for sc, b := range bg.scToSubBalancer {
   422  		if b == sb {
   423  			sc.Connect()
   424  		}
   425  	}
   426  }
   427  
   428  // Following are actions from the parent grpc.ClientConn, forward to sub-balancers.
   429  
   430  // updateSubConnState forwards the update to cb and updates scToSubBalancer if
   431  // needed.
   432  func (bg *BalancerGroup) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
   433  	bg.incomingMu.Lock()
   434  	if bg.incomingClosed {
   435  		bg.incomingMu.Unlock()
   436  		return
   437  	}
   438  	if _, ok := bg.scToSubBalancer[sc]; !ok {
   439  		bg.incomingMu.Unlock()
   440  		return
   441  	}
   442  	if state.ConnectivityState == connectivity.Shutdown {
   443  		// Only delete sc from the map when state changed to Shutdown.
   444  		delete(bg.scToSubBalancer, sc)
   445  	}
   446  	bg.incomingMu.Unlock()
   447  
   448  	bg.outgoingMu.Lock()
   449  	defer bg.outgoingMu.Unlock()
   450  	if bg.outgoingClosed {
   451  		return
   452  	}
   453  	if cb != nil {
   454  		cb(state)
   455  	}
   456  }
   457  
   458  // UpdateSubConnState handles the state for the subconn. It finds the
   459  // corresponding balancer and forwards the update.
   460  func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
   461  	bg.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
   462  }
   463  
   464  // UpdateClientConnState handles ClientState (including balancer config and
   465  // addresses) from resolver. It finds the balancer and forwards the update.
   466  func (bg *BalancerGroup) UpdateClientConnState(id string, s balancer.ClientConnState) error {
   467  	bg.outgoingMu.Lock()
   468  	defer bg.outgoingMu.Unlock()
   469  	if bg.outgoingClosed {
   470  		return nil
   471  	}
   472  	if config, ok := bg.idToBalancerConfig[id]; ok {
   473  		return config.updateClientConnState(s)
   474  	}
   475  	return nil
   476  }
   477  
   478  // ResolverError forwards resolver errors to all sub-balancers.
   479  func (bg *BalancerGroup) ResolverError(err error) {
   480  	bg.outgoingMu.Lock()
   481  	defer bg.outgoingMu.Unlock()
   482  	if bg.outgoingClosed {
   483  		return
   484  	}
   485  	for _, config := range bg.idToBalancerConfig {
   486  		config.resolverError(err)
   487  	}
   488  }
   489  
   490  // Following are actions from sub-balancers, forward to ClientConn.
   491  
   492  // newSubConn: forward to ClientConn, and also create a map from sc to balancer,
   493  // so state update will find the right balancer.
   494  //
   495  // One note about removing SubConn: only forward to ClientConn, but not delete
   496  // from map. Delete sc from the map only when state changes to Shutdown. Since
   497  // it's just forwarding the action, there's no need for a removeSubConn()
   498  // wrapper function.
   499  func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
   500  	// NOTE: if balancer with id was already removed, this should also return
   501  	// error. But since we call balancer.stopBalancer when removing the balancer, this
   502  	// shouldn't happen.
   503  	bg.incomingMu.Lock()
   504  	if bg.incomingClosed {
   505  		bg.incomingMu.Unlock()
   506  		return nil, fmt.Errorf("balancergroup: NewSubConn is called after balancer group is closed")
   507  	}
   508  	var sc balancer.SubConn
   509  	oldListener := opts.StateListener
   510  	opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) }
   511  	sc, err := bg.cc.NewSubConn(addrs, opts)
   512  	if err != nil {
   513  		bg.incomingMu.Unlock()
   514  		return nil, err
   515  	}
   516  	bg.scToSubBalancer[sc] = config
   517  	bg.incomingMu.Unlock()
   518  	return sc, nil
   519  }
   520  
   521  // updateBalancerState: forward the new state to balancer state aggregator. The
   522  // aggregator will create an aggregated picker and an aggregated connectivity
   523  // state, then forward to ClientConn.
   524  func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
   525  	bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state)
   526  
   527  	// Send new state to the aggregator, without holding the incomingMu.
   528  	// incomingMu is to protect all calls to the parent ClientConn, this update
   529  	// doesn't necessary trigger a call to ClientConn, and should already be
   530  	// protected by aggregator's mutex if necessary.
   531  	if bg.stateAggregator != nil {
   532  		bg.stateAggregator.UpdateState(id, state)
   533  	}
   534  }
   535  
   536  // Close closes the balancer. It stops sub-balancers, and removes the subconns.
   537  // When a BalancerGroup is closed, it can not receive further address updates.
   538  func (bg *BalancerGroup) Close() {
   539  	bg.incomingMu.Lock()
   540  	bg.incomingClosed = true
   541  	// Also remove all SubConns.
   542  	for sc := range bg.scToSubBalancer {
   543  		sc.Shutdown()
   544  		delete(bg.scToSubBalancer, sc)
   545  	}
   546  	bg.incomingMu.Unlock()
   547  
   548  	bg.outgoingMu.Lock()
   549  	// Setting `outgoingClosed` ensures that no entries are added to
   550  	// `deletedBalancerCache` after this point.
   551  	bg.outgoingClosed = true
   552  	bg.outgoingMu.Unlock()
   553  
   554  	// Clear(true) runs clear function to close sub-balancers in cache. It
   555  	// must be called out of outgoing mutex.
   556  	if bg.deletedBalancerCache != nil {
   557  		bg.deletedBalancerCache.Clear(true)
   558  	}
   559  
   560  	bg.outgoingMu.Lock()
   561  	for id, config := range bg.idToBalancerConfig {
   562  		config.stopBalancer()
   563  		delete(bg.idToBalancerConfig, id)
   564  	}
   565  	bg.outgoingMu.Unlock()
   566  }
   567  
   568  // ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked.
   569  // It will trigger this on all sub-balancers, or reconnect their subconns if
   570  // not supported.
   571  func (bg *BalancerGroup) ExitIdle() {
   572  	bg.outgoingMu.Lock()
   573  	defer bg.outgoingMu.Unlock()
   574  	if bg.outgoingClosed {
   575  		return
   576  	}
   577  	for _, config := range bg.idToBalancerConfig {
   578  		if !config.exitIdle() {
   579  			bg.connect(config)
   580  		}
   581  	}
   582  }
   583  
   584  // ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if
   585  // appropriate and possible.
   586  func (bg *BalancerGroup) ExitIdleOne(id string) {
   587  	bg.outgoingMu.Lock()
   588  	defer bg.outgoingMu.Unlock()
   589  	if bg.outgoingClosed {
   590  		return
   591  	}
   592  	if config := bg.idToBalancerConfig[id]; config != nil {
   593  		if !config.exitIdle() {
   594  			bg.connect(config)
   595  		}
   596  	}
   597  }
   598  
   599  // ParseConfig parses a child config list and returns a LB config for the
   600  // gracefulswitch Balancer.
   601  //
   602  // cfg is expected to be a json.RawMessage containing a JSON array of LB policy
   603  // names + configs as the format of the "loadBalancingConfig" field in
   604  // ServiceConfig.  It returns a type that should be passed to
   605  // UpdateClientConnState in the BalancerConfig field.
   606  func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
   607  	return gracefulswitch.ParseConfig(cfg)
   608  }