gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/xds/internal/resolver/serviceconfig.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package resolver
    20  
    21  import (
    22  	"context"
    23  	"encoding/json"
    24  	"fmt"
    25  	"math/bits"
    26  	"strings"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"gitee.com/ks-custle/core-gm/grpc/codes"
    31  	"gitee.com/ks-custle/core-gm/grpc/internal/envconfig"
    32  	"gitee.com/ks-custle/core-gm/grpc/internal/grpcrand"
    33  	iresolver "gitee.com/ks-custle/core-gm/grpc/internal/resolver"
    34  	"gitee.com/ks-custle/core-gm/grpc/internal/serviceconfig"
    35  	"gitee.com/ks-custle/core-gm/grpc/internal/wrr"
    36  	"gitee.com/ks-custle/core-gm/grpc/metadata"
    37  	"gitee.com/ks-custle/core-gm/grpc/status"
    38  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/balancer/clustermanager"
    39  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/balancer/ringhash"
    40  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/httpfilter"
    41  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/httpfilter/router"
    42  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/xdsclient/xdsresource"
    43  	xxhash "github.com/cespare/xxhash/v2"
    44  )
    45  
    46  const (
    47  	cdsName                      = "cds_experimental"
    48  	xdsClusterManagerName        = "xds_cluster_manager_experimental"
    49  	clusterPrefix                = "cluster:"
    50  	clusterSpecifierPluginPrefix = "cluster_specifier_plugin:"
    51  )
    52  
    53  type serviceConfig struct {
    54  	LoadBalancingConfig balancerConfig `json:"loadBalancingConfig"`
    55  }
    56  
    57  type balancerConfig []map[string]interface{}
    58  
    59  func newBalancerConfig(name string, config interface{}) balancerConfig {
    60  	return []map[string]interface{}{{name: config}}
    61  }
    62  
    63  type cdsBalancerConfig struct {
    64  	Cluster string `json:"cluster"`
    65  }
    66  
    67  type xdsChildConfig struct {
    68  	ChildPolicy balancerConfig `json:"childPolicy"`
    69  }
    70  
    71  type xdsClusterManagerConfig struct {
    72  	Children map[string]xdsChildConfig `json:"children"`
    73  }
    74  
    75  // pruneActiveClusters deletes entries in r.activeClusters with zero
    76  // references.
    77  func (r *xdsResolver) pruneActiveClusters() {
    78  	for cluster, ci := range r.activeClusters {
    79  		if atomic.LoadInt32(&ci.refCount) == 0 {
    80  			delete(r.activeClusters, cluster)
    81  		}
    82  	}
    83  }
    84  
    85  // serviceConfigJSON produces a service config in JSON format representing all
    86  // the clusters referenced in activeClusters.  This includes clusters with zero
    87  // references, so they must be pruned first.
    88  func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) {
    89  	// Generate children (all entries in activeClusters).
    90  	children := make(map[string]xdsChildConfig)
    91  	for cluster, ci := range activeClusters {
    92  		children[cluster] = ci.cfg
    93  	}
    94  
    95  	sc := serviceConfig{
    96  		LoadBalancingConfig: newBalancerConfig(
    97  			xdsClusterManagerName, xdsClusterManagerConfig{Children: children},
    98  		),
    99  	}
   100  
   101  	bs, err := json.Marshal(sc)
   102  	if err != nil {
   103  		return nil, fmt.Errorf("failed to marshal json: %v", err)
   104  	}
   105  	return bs, nil
   106  }
   107  
   108  type virtualHost struct {
   109  	// map from filter name to its config
   110  	httpFilterConfigOverride map[string]httpfilter.FilterConfig
   111  	// retry policy present in virtual host
   112  	retryConfig *xdsresource.RetryConfig
   113  }
   114  
   115  // routeCluster holds information about a cluster as referenced by a route.
   116  type routeCluster struct {
   117  	name string
   118  	// map from filter name to its config
   119  	httpFilterConfigOverride map[string]httpfilter.FilterConfig
   120  }
   121  
   122  type route struct {
   123  	m                 *xdsresource.CompositeMatcher // converted from route matchers
   124  	clusters          wrr.WRR                       // holds *routeCluster entries
   125  	maxStreamDuration time.Duration
   126  	// map from filter name to its config
   127  	httpFilterConfigOverride map[string]httpfilter.FilterConfig
   128  	retryConfig              *xdsresource.RetryConfig
   129  	hashPolicies             []*xdsresource.HashPolicy
   130  }
   131  
   132  func (r route) String() string {
   133  	return fmt.Sprintf("%s -> { clusters: %v, maxStreamDuration: %v }", r.m.String(), r.clusters, r.maxStreamDuration)
   134  }
   135  
   136  type configSelector struct {
   137  	r                *xdsResolver
   138  	virtualHost      virtualHost
   139  	routes           []route
   140  	clusters         map[string]*clusterInfo
   141  	httpFilterConfig []xdsresource.HTTPFilter
   142  }
   143  
   144  var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
   145  
   146  func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
   147  	if cs == nil {
   148  		return nil, status.Errorf(codes.Unavailable, "no valid clusters")
   149  	}
   150  	var rt *route
   151  	// Loop through routes in order and select first match.
   152  	for _, r := range cs.routes {
   153  		if r.m.Match(rpcInfo) {
   154  			rt = &r
   155  			break
   156  		}
   157  	}
   158  	if rt == nil || rt.clusters == nil {
   159  		return nil, errNoMatchedRouteFound
   160  	}
   161  
   162  	cluster, ok := rt.clusters.Next().(*routeCluster)
   163  	if !ok {
   164  		return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster)
   165  	}
   166  
   167  	// Add a ref to the selected cluster, as this RPC needs this cluster until
   168  	// it is committed.
   169  	ref := &cs.clusters[cluster.name].refCount
   170  	atomic.AddInt32(ref, 1)
   171  
   172  	interceptor, err := cs.newInterceptor(rt, cluster)
   173  	if err != nil {
   174  		return nil, err
   175  	}
   176  
   177  	lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name)
   178  	// Request Hashes are only applicable for a Ring Hash LB.
   179  	if envconfig.XDSRingHash {
   180  		lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies))
   181  	}
   182  
   183  	config := &iresolver.RPCConfig{
   184  		// Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy.
   185  		Context: lbCtx,
   186  		OnCommitted: func() {
   187  			// When the RPC is committed, the cluster is no longer required.
   188  			// Decrease its ref.
   189  			if v := atomic.AddInt32(ref, -1); v == 0 {
   190  				// This entry will be removed from activeClusters when
   191  				// producing the service config for the empty update.
   192  				select {
   193  				case cs.r.updateCh <- suWithError{emptyUpdate: true}:
   194  				default:
   195  				}
   196  			}
   197  		},
   198  		Interceptor: interceptor,
   199  	}
   200  
   201  	if rt.maxStreamDuration != 0 {
   202  		config.MethodConfig.Timeout = &rt.maxStreamDuration
   203  	}
   204  	if rt.retryConfig != nil {
   205  		config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig)
   206  	} else if cs.virtualHost.retryConfig != nil {
   207  		config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig)
   208  	}
   209  
   210  	return config, nil
   211  }
   212  
   213  func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy {
   214  	return &serviceconfig.RetryPolicy{
   215  		MaxAttempts:          int(config.NumRetries) + 1,
   216  		InitialBackoff:       config.RetryBackoff.BaseInterval,
   217  		MaxBackoff:           config.RetryBackoff.MaxInterval,
   218  		BackoffMultiplier:    2,
   219  		RetryableStatusCodes: config.RetryOn,
   220  	}
   221  }
   222  
   223  func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 {
   224  	var hash uint64
   225  	var generatedHash bool
   226  	for _, policy := range hashPolicies {
   227  		var policyHash uint64
   228  		var generatedPolicyHash bool
   229  		switch policy.HashPolicyType {
   230  		case xdsresource.HashPolicyTypeHeader:
   231  			md, ok := metadata.FromOutgoingContext(rpcInfo.Context)
   232  			if !ok {
   233  				continue
   234  			}
   235  			values := md.Get(policy.HeaderName)
   236  			// If the header isn't present, no-op.
   237  			if len(values) == 0 {
   238  				continue
   239  			}
   240  			joinedValues := strings.Join(values, ",")
   241  			if policy.Regex != nil {
   242  				joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution)
   243  			}
   244  			policyHash = xxhash.Sum64String(joinedValues)
   245  			generatedHash = true
   246  			generatedPolicyHash = true
   247  		case xdsresource.HashPolicyTypeChannelID:
   248  			// Hash the ClientConn pointer which logically uniquely
   249  			// identifies the client.
   250  			policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc))
   251  			generatedHash = true
   252  			generatedPolicyHash = true
   253  		}
   254  
   255  		// Deterministically combine the hash policies. Rotating prevents
   256  		// duplicate hash policies from cancelling each other out and preserves
   257  		// the 64 bits of entropy.
   258  		if generatedPolicyHash {
   259  			hash = bits.RotateLeft64(hash, 1)
   260  			hash = hash ^ policyHash
   261  		}
   262  
   263  		// If terminal policy and a hash has already been generated, ignore the
   264  		// rest of the policies and use that hash already generated.
   265  		if policy.Terminal && generatedHash {
   266  			break
   267  		}
   268  	}
   269  
   270  	if generatedHash {
   271  		return hash
   272  	}
   273  	// If no generated hash return a random long. In the grand scheme of things
   274  	// this logically will map to choosing a random backend to route request to.
   275  	return grpcrand.Uint64()
   276  }
   277  
   278  func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) {
   279  	if len(cs.httpFilterConfig) == 0 {
   280  		return nil, nil
   281  	}
   282  	interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig))
   283  	for _, filter := range cs.httpFilterConfig {
   284  		if router.IsRouterFilter(filter.Filter) {
   285  			// Ignore any filters after the router filter.  The router itself
   286  			// is currently a nop.
   287  			return &interceptorList{interceptors: interceptors}, nil
   288  		}
   289  		override := cluster.httpFilterConfigOverride[filter.Name] // cluster is highest priority
   290  		if override == nil {
   291  			override = rt.httpFilterConfigOverride[filter.Name] // route is second priority
   292  		}
   293  		if override == nil {
   294  			override = cs.virtualHost.httpFilterConfigOverride[filter.Name] // VH is third & lowest priority
   295  		}
   296  		ib, ok := filter.Filter.(httpfilter.ClientInterceptorBuilder)
   297  		if !ok {
   298  			// Should not happen if it passed xdsClient validation.
   299  			return nil, fmt.Errorf("filter does not support use in client")
   300  		}
   301  		i, err := ib.BuildClientInterceptor(filter.Config, override)
   302  		if err != nil {
   303  			return nil, fmt.Errorf("error constructing filter: %v", err)
   304  		}
   305  		if i != nil {
   306  			interceptors = append(interceptors, i)
   307  		}
   308  	}
   309  	return nil, fmt.Errorf("error in xds config: no router filter present")
   310  }
   311  
   312  // stop decrements refs of all clusters referenced by this config selector.
   313  func (cs *configSelector) stop() {
   314  	// The resolver's old configSelector may be nil.  Handle that here.
   315  	if cs == nil {
   316  		return
   317  	}
   318  	// If any refs drop to zero, we'll need a service config update to delete
   319  	// the cluster.
   320  	needUpdate := false
   321  	// Loops over cs.clusters, but these are pointers to entries in
   322  	// activeClusters.
   323  	for _, ci := range cs.clusters {
   324  		if v := atomic.AddInt32(&ci.refCount, -1); v == 0 {
   325  			needUpdate = true
   326  		}
   327  	}
   328  	// We stop the old config selector immediately after sending a new config
   329  	// selector; we need another update to delete clusters from the config (if
   330  	// we don't have another update pending already).
   331  	if needUpdate {
   332  		select {
   333  		case cs.r.updateCh <- suWithError{emptyUpdate: true}:
   334  		default:
   335  		}
   336  	}
   337  }
   338  
   339  // A global for testing.
   340  var newWRR = wrr.NewRandom
   341  
   342  // newConfigSelector creates the config selector for su; may add entries to
   343  // r.activeClusters for previously-unseen clusters.
   344  func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, error) {
   345  	cs := &configSelector{
   346  		r: r,
   347  		virtualHost: virtualHost{
   348  			httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride,
   349  			retryConfig:              su.virtualHost.RetryConfig,
   350  		},
   351  		routes:           make([]route, len(su.virtualHost.Routes)),
   352  		clusters:         make(map[string]*clusterInfo),
   353  		httpFilterConfig: su.ldsConfig.httpFilterConfig,
   354  	}
   355  
   356  	for i, rt := range su.virtualHost.Routes {
   357  		clusters := newWRR()
   358  		if rt.ClusterSpecifierPlugin != "" {
   359  			clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin
   360  			clusters.Add(&routeCluster{
   361  				name: clusterName,
   362  			}, 1)
   363  			cs.initializeCluster(clusterName, xdsChildConfig{
   364  				ChildPolicy: balancerConfig(su.clusterSpecifierPlugins[rt.ClusterSpecifierPlugin]),
   365  			})
   366  		} else {
   367  			for cluster, wc := range rt.WeightedClusters {
   368  				clusterName := clusterPrefix + cluster
   369  				clusters.Add(&routeCluster{
   370  					name:                     clusterName,
   371  					httpFilterConfigOverride: wc.HTTPFilterConfigOverride,
   372  				}, int64(wc.Weight))
   373  				cs.initializeCluster(clusterName, xdsChildConfig{
   374  					ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
   375  				})
   376  			}
   377  		}
   378  		cs.routes[i].clusters = clusters
   379  
   380  		var err error
   381  		cs.routes[i].m, err = xdsresource.RouteToMatcher(rt)
   382  		if err != nil {
   383  			return nil, err
   384  		}
   385  		if rt.MaxStreamDuration == nil {
   386  			cs.routes[i].maxStreamDuration = su.ldsConfig.maxStreamDuration
   387  		} else {
   388  			cs.routes[i].maxStreamDuration = *rt.MaxStreamDuration
   389  		}
   390  
   391  		cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride
   392  		cs.routes[i].retryConfig = rt.RetryConfig
   393  		cs.routes[i].hashPolicies = rt.HashPolicies
   394  	}
   395  
   396  	// Account for this config selector's clusters.  Do this after no further
   397  	// errors may occur.  Note: cs.clusters are pointers to entries in
   398  	// activeClusters.
   399  	for _, ci := range cs.clusters {
   400  		atomic.AddInt32(&ci.refCount, 1)
   401  	}
   402  
   403  	return cs, nil
   404  }
   405  
   406  // initializeCluster initializes entries in cs.clusters map, creating entries in
   407  // r.activeClusters as necessary.  Any created entries will have a ref count set
   408  // to zero as their ref count will be incremented by incRefs.
   409  func (cs *configSelector) initializeCluster(clusterName string, cfg xdsChildConfig) {
   410  	ci := cs.r.activeClusters[clusterName]
   411  	if ci == nil {
   412  		ci = &clusterInfo{refCount: 0}
   413  		cs.r.activeClusters[clusterName] = ci
   414  	}
   415  	cs.clusters[clusterName] = ci
   416  	cs.clusters[clusterName].cfg = cfg
   417  }
   418  
   419  type clusterInfo struct {
   420  	// number of references to this cluster; accessed atomically
   421  	refCount int32
   422  	// cfg is the child configuration for this cluster, containing either the
   423  	// csp config or the cds cluster config.
   424  	cfg xdsChildConfig
   425  }
   426  
   427  type interceptorList struct {
   428  	interceptors []iresolver.ClientInterceptor
   429  }
   430  
   431  func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) {
   432  	for i := len(il.interceptors) - 1; i >= 0; i-- {
   433  		ns := newStream
   434  		interceptor := il.interceptors[i]
   435  		newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
   436  			return interceptor.NewStream(ctx, ri, done, ns)
   437  		}
   438  	}
   439  	return newStream(ctx, func() {})
   440  }