google.golang.org/grpc@v1.72.2/balancer/weightedroundrobin/scheduler.go (about)

     1  /*
     2   *
     3   * Copyright 2023 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package weightedroundrobin
    20  
    21  import (
    22  	"math"
    23  )
    24  
    25  type scheduler interface {
    26  	nextIndex() int
    27  }
    28  
    29  // newScheduler uses scWeights to create a new scheduler for selecting endpoints
    30  // in a picker.  It will return a round robin implementation if at least
    31  // len(scWeights)-1 are zero or there is only a single endpoint, otherwise it
    32  // will return an Earliest Deadline First (EDF) scheduler implementation that
    33  // selects the endpoints according to their weights.
    34  func (p *picker) newScheduler(recordMetrics bool) scheduler {
    35  	epWeights := p.endpointWeights(recordMetrics)
    36  	n := len(epWeights)
    37  	if n == 0 {
    38  		return nil
    39  	}
    40  	if n == 1 {
    41  		if recordMetrics {
    42  			rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality)
    43  		}
    44  		return &rrScheduler{numSCs: 1, inc: p.inc}
    45  	}
    46  	sum := float64(0)
    47  	numZero := 0
    48  	max := float64(0)
    49  	for _, w := range epWeights {
    50  		sum += w
    51  		if w > max {
    52  			max = w
    53  		}
    54  		if w == 0 {
    55  			numZero++
    56  		}
    57  	}
    58  
    59  	if numZero >= n-1 {
    60  		if recordMetrics {
    61  			rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality)
    62  		}
    63  		return &rrScheduler{numSCs: uint32(n), inc: p.inc}
    64  	}
    65  	unscaledMean := sum / float64(n-numZero)
    66  	scalingFactor := maxWeight / max
    67  	mean := uint16(math.Round(scalingFactor * unscaledMean))
    68  
    69  	weights := make([]uint16, n)
    70  	allEqual := true
    71  	for i, w := range epWeights {
    72  		if w == 0 {
    73  			// Backends with weight = 0 use the mean.
    74  			weights[i] = mean
    75  		} else {
    76  			scaledWeight := uint16(math.Round(scalingFactor * w))
    77  			weights[i] = scaledWeight
    78  			if scaledWeight != mean {
    79  				allEqual = false
    80  			}
    81  		}
    82  	}
    83  
    84  	if allEqual {
    85  		return &rrScheduler{numSCs: uint32(n), inc: p.inc}
    86  	}
    87  
    88  	logger.Infof("using edf scheduler with weights: %v", weights)
    89  	return &edfScheduler{weights: weights, inc: p.inc}
    90  }
    91  
    92  const maxWeight = math.MaxUint16
    93  
    94  // edfScheduler implements EDF using the same algorithm as grpc-c++ here:
    95  //
    96  // https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc
    97  type edfScheduler struct {
    98  	inc     func() uint32
    99  	weights []uint16
   100  }
   101  
   102  // Returns the index in s.weights for the picker to choose.
   103  func (s *edfScheduler) nextIndex() int {
   104  	const offset = maxWeight / 2
   105  
   106  	for {
   107  		idx := uint64(s.inc())
   108  
   109  		// The sequence number (idx) is split in two: the lower %n gives the
   110  		// index of the backend, and the rest gives the number of times we've
   111  		// iterated through all backends. `generation` is used to
   112  		// deterministically decide whether we pick or skip the backend on this
   113  		// iteration, in proportion to the backend's weight.
   114  
   115  		backendIndex := idx % uint64(len(s.weights))
   116  		generation := idx / uint64(len(s.weights))
   117  		weight := uint64(s.weights[backendIndex])
   118  
   119  		// We pick a backend `weight` times per `maxWeight` generations. The
   120  		// multiply and modulus ~evenly spread out the picks for a given
   121  		// backend between different generations. The offset by `backendIndex`
   122  		// helps to reduce the chance of multiple consecutive non-picks: if we
   123  		// have two consecutive backends with an equal, say, 80% weight of the
   124  		// max, with no offset we would see 1/5 generations that skipped both.
   125  		// TODO(b/190488683): add test for offset efficacy.
   126  		mod := uint64(weight*generation+backendIndex*offset) % maxWeight
   127  
   128  		if mod < maxWeight-weight {
   129  			continue
   130  		}
   131  		return int(backendIndex)
   132  	}
   133  }
   134  
   135  // A simple RR scheduler to use for fallback when fewer than two backends have
   136  // non-zero weights, or all backends have the same weight, or when only one
   137  // subconn exists.
   138  type rrScheduler struct {
   139  	inc    func() uint32
   140  	numSCs uint32
   141  }
   142  
   143  func (s *rrScheduler) nextIndex() int {
   144  	idx := s.inc()
   145  	return int(idx % s.numSCs)
   146  }