github.com/bazelbuild/remote-apis-sdks@v0.0.0-20240425170053-8a36686a6350/go/pkg/balancer/gcp_picker.go (about)

     1  package balancer
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"sort"
     7  	"strings"
     8  	"sync"
     9  
    10  	pb "github.com/bazelbuild/remote-apis-sdks/go/pkg/balancer/proto"
    11  	"google.golang.org/grpc/balancer"
    12  )
    13  
    14  func newGCPPicker(readySCRefs []*subConnRef, gb *gcpBalancer) balancer.Picker {
    15  	return &gcpPicker{
    16  		gcpBalancer: gb,
    17  		scRefs:      readySCRefs,
    18  		poolCfg:     nil,
    19  	}
    20  }
    21  
    22  type gcpPicker struct {
    23  	gcpBalancer *gcpBalancer
    24  	mu          sync.Mutex
    25  	scRefs      []*subConnRef
    26  	poolCfg     *poolConfig
    27  }
    28  
    29  // Pick picks the appropriate subconnection.
    30  func (p *gcpPicker) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
    31  	if len(p.scRefs) <= 0 {
    32  		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
    33  	}
    34  
    35  	p.mu.Lock()
    36  	defer p.mu.Unlock()
    37  
    38  	gcpCtx, hasGcpCtx := info.Ctx.Value(gcpKey).(*gcpContext)
    39  	boundKey := ""
    40  
    41  	if hasGcpCtx {
    42  		if p.poolCfg == nil {
    43  			// Initialize poolConfig for picker.
    44  			p.poolCfg = gcpCtx.poolCfg
    45  		}
    46  		affinity := gcpCtx.affinityCfg
    47  		if affinity != nil {
    48  			locator := affinity.GetAffinityKey()
    49  			cmd := affinity.GetCommand()
    50  			if cmd == pb.AffinityConfig_BOUND || cmd == pb.AffinityConfig_UNBIND {
    51  				a, err := getAffinityKeyFromMessage(locator, gcpCtx.reqMsg)
    52  				if err != nil {
    53  					return balancer.PickResult{}, fmt.Errorf(
    54  						"failed to retrieve affinity key from request message: %v", err)
    55  				}
    56  				boundKey = a
    57  			}
    58  		}
    59  	}
    60  
    61  	var scRef *subConnRef
    62  	scRef, err = p.getSubConnRef(boundKey)
    63  	if err != nil {
    64  		return balancer.PickResult{}, err
    65  	}
    66  	result.SubConn = scRef.subConn
    67  	scRef.streamsIncr()
    68  
    69  	// define callback for post process once call is done
    70  	result.Done = func(info balancer.DoneInfo) {
    71  		if info.Err == nil {
    72  			if hasGcpCtx {
    73  				affinity := gcpCtx.affinityCfg
    74  				locator := affinity.GetAffinityKey()
    75  				cmd := affinity.GetCommand()
    76  				if cmd == pb.AffinityConfig_BIND {
    77  					bindKey, err := getAffinityKeyFromMessage(locator, gcpCtx.replyMsg)
    78  					if err == nil {
    79  						p.gcpBalancer.bindSubConn(bindKey, scRef.subConn)
    80  					}
    81  				} else if cmd == pb.AffinityConfig_UNBIND {
    82  					p.gcpBalancer.unbindSubConn(boundKey)
    83  				}
    84  			}
    85  		}
    86  		scRef.streamsDecr()
    87  	}
    88  	return result, err
    89  }
    90  
    91  // getSubConnRef returns the subConnRef object that contains the subconn
    92  // ready to be used by picker.
    93  func (p *gcpPicker) getSubConnRef(boundKey string) (*subConnRef, error) {
    94  	if boundKey != "" {
    95  		if ref, ok := p.gcpBalancer.getReadySubConnRef(boundKey); ok {
    96  			return ref, nil
    97  		}
    98  	}
    99  
   100  	sort.Slice(p.scRefs, func(i, j int) bool {
   101  		return p.scRefs[i].getStreamsCnt() < p.scRefs[j].getStreamsCnt()
   102  	})
   103  
   104  	// If the least busy connection still has capacity, use it
   105  	if len(p.scRefs) > 0 && p.scRefs[0].getStreamsCnt() < int32(p.poolCfg.maxStream) {
   106  		return p.scRefs[0], nil
   107  	}
   108  
   109  	if p.poolCfg.maxConn == 0 || p.gcpBalancer.getConnectionPoolSize() < int(p.poolCfg.maxConn) {
   110  		// Ask balancer to create new subconn when all current subconns are busy and
   111  		// the connection pool still has capacity (either unlimited or maxSize is not reached).
   112  		p.gcpBalancer.newSubConn()
   113  
   114  		// Let this picker return ErrNoSubConnAvailable because it needs some time
   115  		// for the subconn to be READY.
   116  		return nil, balancer.ErrNoSubConnAvailable
   117  	}
   118  
   119  	if len(p.scRefs) == 0 {
   120  		return nil, balancer.ErrNoSubConnAvailable
   121  	}
   122  
   123  	// If no capacity for the pool size and every connection reachs the soft limit,
   124  	// Then picks the least busy one anyway.
   125  	return p.scRefs[0], nil
   126  }
   127  
   128  // getAffinityKeyFromMessage retrieves the affinity key from proto message using
   129  // the key locator defined in the affinity config.
   130  func getAffinityKeyFromMessage(
   131  	locator string,
   132  	msg interface{},
   133  ) (affinityKey string, err error) {
   134  	names := strings.Split(locator, ".")
   135  	if len(names) == 0 {
   136  		return "", fmt.Errorf("Empty affinityKey locator")
   137  	}
   138  
   139  	val := reflect.ValueOf(msg).Elem()
   140  
   141  	// Fields in names except for the last one.
   142  	for _, name := range names[:len(names)-1] {
   143  		valField := val.FieldByName(strings.Title(name))
   144  		if valField.Kind() != reflect.Ptr && valField.Kind() != reflect.Struct {
   145  			return "", fmt.Errorf("Invalid locator path for %v", locator)
   146  		}
   147  		val = valField.Elem()
   148  	}
   149  
   150  	valField := val.FieldByName(strings.Title(names[len(names)-1]))
   151  	if valField.Kind() != reflect.String {
   152  		return "", fmt.Errorf("Cannot get string value from %v", locator)
   153  	}
   154  	return valField.String(), nil
   155  }
   156  
   157  // NewErrPicker returns a picker that always returns err on Pick().
   158  func newErrPicker(err error) balancer.Picker {
   159  	return &errPicker{err: err}
   160  }
   161  
   162  type errPicker struct {
   163  	err error // Pick() always returns this err.
   164  }
   165  
   166  func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
   167  	return balancer.PickResult{}, p.err
   168  }