google.golang.org/grpc@v1.72.2/balancer/weightedtarget/weightedaggregator/aggregator.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  // Package weightedaggregator implements state aggregator for weighted_target
    20  // balancer.
    21  //
    22  // This is a separate package so it can be shared by weighted_target and eds.
    23  // The eds balancer will be refactored to use weighted_target directly. After
    24  // that, all functions and structs in this package can be moved to package
    25  // weightedtarget and unexported.
    26  package weightedaggregator
    27  
    28  import (
    29  	"errors"
    30  	"fmt"
    31  	"sync"
    32  
    33  	"google.golang.org/grpc/balancer"
    34  	"google.golang.org/grpc/balancer/base"
    35  	"google.golang.org/grpc/connectivity"
    36  	"google.golang.org/grpc/internal/grpclog"
    37  	"google.golang.org/grpc/internal/wrr"
    38  )
    39  
    40  type weightedPickerState struct {
    41  	weight uint32
    42  	state  balancer.State
    43  	// stateToAggregate is the connectivity state used only for state
    44  	// aggregation. It could be different from state.ConnectivityState. For
    45  	// example when a sub-balancer transitions from TransientFailure to
    46  	// connecting, state.ConnectivityState is Connecting, but stateToAggregate
    47  	// is still TransientFailure.
    48  	stateToAggregate connectivity.State
    49  }
    50  
    51  func (s *weightedPickerState) String() string {
    52  	return fmt.Sprintf("weight:%v,picker:%p,state:%v,stateToAggregate:%v", s.weight, s.state.Picker, s.state.ConnectivityState, s.stateToAggregate)
    53  }
    54  
    55  // Aggregator is the weighted balancer state aggregator.
    56  type Aggregator struct {
    57  	cc     balancer.ClientConn
    58  	logger *grpclog.PrefixLogger
    59  	newWRR func() wrr.WRR
    60  
    61  	csEvltr *balancer.ConnectivityStateEvaluator
    62  
    63  	mu sync.Mutex
    64  	// If started is false, no updates should be sent to the parent cc. A closed
    65  	// sub-balancer could still send pickers to this aggregator. This makes sure
    66  	// that no updates will be forwarded to parent when the whole balancer group
    67  	// and states aggregator is closed.
    68  	started bool
    69  	// All balancer IDs exist as keys in this map, even if balancer group is not
    70  	// started.
    71  	//
    72  	// If an ID is not in map, it's either removed or never added.
    73  	idToPickerState map[string]*weightedPickerState
    74  	// Set when UpdateState call propagation is paused.
    75  	pauseUpdateState bool
    76  	// Set when UpdateState call propagation is paused and an UpdateState call
    77  	// is suppressed.
    78  	needUpdateStateOnResume bool
    79  }
    80  
    81  // New creates a new weighted balancer state aggregator.
    82  func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr.WRR) *Aggregator {
    83  	return &Aggregator{
    84  		cc:              cc,
    85  		logger:          logger,
    86  		newWRR:          newWRR,
    87  		csEvltr:         &balancer.ConnectivityStateEvaluator{},
    88  		idToPickerState: make(map[string]*weightedPickerState),
    89  	}
    90  }
    91  
    92  // Start starts the aggregator. It can be called after Stop to restart the
    93  // aggregator.
    94  func (wbsa *Aggregator) Start() {
    95  	wbsa.mu.Lock()
    96  	defer wbsa.mu.Unlock()
    97  	wbsa.started = true
    98  }
    99  
   100  // Stop stops the aggregator. When the aggregator is stopped, it won't call
   101  // parent ClientConn to update balancer state.
   102  func (wbsa *Aggregator) Stop() {
   103  	wbsa.mu.Lock()
   104  	defer wbsa.mu.Unlock()
   105  	wbsa.started = false
   106  	wbsa.clearStates()
   107  }
   108  
   109  // Add adds a sub-balancer state with weight. It adds a place holder, and waits for
   110  // the real sub-balancer to update state.
   111  func (wbsa *Aggregator) Add(id string, weight uint32) {
   112  	wbsa.mu.Lock()
   113  	defer wbsa.mu.Unlock()
   114  	wbsa.idToPickerState[id] = &weightedPickerState{
   115  		weight: weight,
   116  		// Start everything in CONNECTING, so if one of the sub-balancers
   117  		// reports TransientFailure, the RPCs will still wait for the other
   118  		// sub-balancers.
   119  		state: balancer.State{
   120  			ConnectivityState: connectivity.Connecting,
   121  			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
   122  		},
   123  		stateToAggregate: connectivity.Connecting,
   124  	}
   125  	wbsa.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Connecting)
   126  
   127  	wbsa.buildAndUpdateLocked()
   128  }
   129  
   130  // Remove removes the sub-balancer state. Future updates from this sub-balancer,
   131  // if any, will be ignored.
   132  func (wbsa *Aggregator) Remove(id string) {
   133  	wbsa.mu.Lock()
   134  	defer wbsa.mu.Unlock()
   135  	if _, ok := wbsa.idToPickerState[id]; !ok {
   136  		return
   137  	}
   138  	// Setting the state of the deleted sub-balancer to Shutdown will get csEvltr
   139  	// to remove the previous state for any aggregated state evaluations.
   140  	// transitions to and from connectivity.Shutdown are ignored by csEvltr.
   141  	wbsa.csEvltr.RecordTransition(wbsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown)
   142  	// Remove id and picker from picker map. This also results in future updates
   143  	// for this ID to be ignored.
   144  	delete(wbsa.idToPickerState, id)
   145  	wbsa.buildAndUpdateLocked()
   146  }
   147  
   148  // UpdateWeight updates the weight for the given id. Note that this doesn't
   149  // trigger an update to the parent ClientConn. The caller should decide when
   150  // it's necessary, and call BuildAndUpdate.
   151  func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) {
   152  	wbsa.mu.Lock()
   153  	defer wbsa.mu.Unlock()
   154  	pState, ok := wbsa.idToPickerState[id]
   155  	if !ok {
   156  		return
   157  	}
   158  	pState.weight = newWeight
   159  }
   160  
   161  // PauseStateUpdates causes UpdateState calls to not propagate to the parent
   162  // ClientConn.  The last state will be remembered and propagated when
   163  // ResumeStateUpdates is called.
   164  func (wbsa *Aggregator) PauseStateUpdates() {
   165  	wbsa.mu.Lock()
   166  	defer wbsa.mu.Unlock()
   167  	wbsa.pauseUpdateState = true
   168  	wbsa.needUpdateStateOnResume = false
   169  }
   170  
   171  // ResumeStateUpdates will resume propagating UpdateState calls to the parent,
   172  // and call UpdateState on the parent if any UpdateState call was suppressed.
   173  func (wbsa *Aggregator) ResumeStateUpdates() {
   174  	wbsa.mu.Lock()
   175  	defer wbsa.mu.Unlock()
   176  	wbsa.pauseUpdateState = false
   177  	if wbsa.needUpdateStateOnResume {
   178  		wbsa.cc.UpdateState(wbsa.build())
   179  	}
   180  }
   181  
   182  // NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a
   183  // picker update be sent once ResumeStateUpdates is called.
   184  func (wbsa *Aggregator) NeedUpdateStateOnResume() {
   185  	wbsa.mu.Lock()
   186  	defer wbsa.mu.Unlock()
   187  	wbsa.needUpdateStateOnResume = true
   188  }
   189  
   190  // UpdateState is called to report a balancer state change from sub-balancer.
   191  // It's usually called by the balancer group.
   192  //
   193  // It calls parent ClientConn's UpdateState with the new aggregated state.
   194  func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) {
   195  	wbsa.mu.Lock()
   196  	defer wbsa.mu.Unlock()
   197  	state, ok := wbsa.idToPickerState[id]
   198  	if !ok {
   199  		// All state starts with an entry in pickStateMap. If ID is not in map,
   200  		// it's either removed, or never existed.
   201  		return
   202  	}
   203  
   204  	if !(state.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) {
   205  		// If old state is TransientFailure, and new state is Connecting, don't
   206  		// update the state, to prevent the aggregated state from being always
   207  		// CONNECTING. Otherwise, stateToAggregate is the same as
   208  		// state.ConnectivityState.
   209  		wbsa.csEvltr.RecordTransition(state.stateToAggregate, newState.ConnectivityState)
   210  		state.stateToAggregate = newState.ConnectivityState
   211  	}
   212  	state.state = newState
   213  
   214  	wbsa.buildAndUpdateLocked()
   215  }
   216  
   217  // clearState Reset everything to init state (Connecting) but keep the entry in
   218  // map (to keep the weight).
   219  //
   220  // Caller must hold wbsa.mu.
   221  func (wbsa *Aggregator) clearStates() {
   222  	for _, pState := range wbsa.idToPickerState {
   223  		pState.state = balancer.State{
   224  			ConnectivityState: connectivity.Connecting,
   225  			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
   226  		}
   227  		pState.stateToAggregate = connectivity.Connecting
   228  	}
   229  }
   230  
   231  // buildAndUpdateLocked aggregates the connectivity states of the sub-balancers,
   232  // builds a new picker and sends an update to the parent ClientConn.
   233  //
   234  // Caller must hold wbsa.mu.
   235  func (wbsa *Aggregator) buildAndUpdateLocked() {
   236  	if !wbsa.started {
   237  		return
   238  	}
   239  	if wbsa.pauseUpdateState {
   240  		// If updates are paused, do not call UpdateState, but remember that we
   241  		// need to call it when they are resumed.
   242  		wbsa.needUpdateStateOnResume = true
   243  		return
   244  	}
   245  
   246  	wbsa.cc.UpdateState(wbsa.build())
   247  }
   248  
   249  // build combines sub-states into one.
   250  //
   251  // Caller must hold wbsa.mu.
   252  func (wbsa *Aggregator) build() balancer.State {
   253  	wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState)
   254  
   255  	if len(wbsa.idToPickerState) == 0 {
   256  		// This is the case when all sub-balancers are removed.
   257  		return balancer.State{
   258  			ConnectivityState: connectivity.TransientFailure,
   259  			Picker:            base.NewErrPicker(errors.New("weighted-target: no targets to pick from")),
   260  		}
   261  	}
   262  
   263  	// Make sure picker's return error is consistent with the aggregatedState.
   264  	pickers := make([]weightedPickerState, 0, len(wbsa.idToPickerState))
   265  
   266  	switch aggState := wbsa.csEvltr.CurrentState(); aggState {
   267  	case connectivity.Connecting:
   268  		return balancer.State{
   269  			ConnectivityState: aggState,
   270  			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable)}
   271  	case connectivity.TransientFailure:
   272  		// this means that all sub-balancers are now in TransientFailure.
   273  		for _, ps := range wbsa.idToPickerState {
   274  			pickers = append(pickers, *ps)
   275  		}
   276  		return balancer.State{
   277  			ConnectivityState: aggState,
   278  			Picker:            newWeightedPickerGroup(pickers, wbsa.newWRR)}
   279  	default:
   280  		for _, ps := range wbsa.idToPickerState {
   281  			if ps.stateToAggregate == connectivity.Ready {
   282  				pickers = append(pickers, *ps)
   283  			}
   284  		}
   285  		return balancer.State{
   286  			ConnectivityState: aggState,
   287  			Picker:            newWeightedPickerGroup(pickers, wbsa.newWRR)}
   288  	}
   289  
   290  }
   291  
   292  type weightedPickerGroup struct {
   293  	w wrr.WRR
   294  }
   295  
   296  // newWeightedPickerGroup takes pickers with weights, and groups them into one
   297  // picker.
   298  //
   299  // Note it only takes ready pickers. The map shouldn't contain non-ready
   300  // pickers.
   301  func newWeightedPickerGroup(readyWeightedPickers []weightedPickerState, newWRR func() wrr.WRR) *weightedPickerGroup {
   302  	w := newWRR()
   303  	for _, ps := range readyWeightedPickers {
   304  		w.Add(ps.state.Picker, int64(ps.weight))
   305  	}
   306  
   307  	return &weightedPickerGroup{
   308  		w: w,
   309  	}
   310  }
   311  
   312  func (pg *weightedPickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
   313  	p, ok := pg.w.Next().(balancer.Picker)
   314  	if !ok {
   315  		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
   316  	}
   317  	return p.Pick(info)
   318  }