istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/networking/core/cluster.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package core
    16  
    17  import (
    18  	"fmt"
    19  	"net"
    20  	"strconv"
    21  	"strings"
    22  
    23  	cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
    24  	core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    25  	endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
    26  	discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
    27  	"google.golang.org/protobuf/types/known/structpb"
    28  	wrappers "google.golang.org/protobuf/types/known/wrapperspb"
    29  
    30  	meshconfig "istio.io/api/mesh/v1alpha1"
    31  	networking "istio.io/api/networking/v1alpha3"
    32  	"istio.io/istio/pilot/pkg/features"
    33  	"istio.io/istio/pilot/pkg/model"
    34  	"istio.io/istio/pilot/pkg/networking/core/envoyfilter"
    35  	"istio.io/istio/pilot/pkg/networking/util"
    36  	"istio.io/istio/pilot/pkg/serviceregistry/provider"
    37  	"istio.io/istio/pilot/pkg/util/protoconv"
    38  	"istio.io/istio/pilot/pkg/xds/endpoints"
    39  	"istio.io/istio/pkg/config/host"
    40  	"istio.io/istio/pkg/config/protocol"
    41  	"istio.io/istio/pkg/config/schema/kind"
    42  	"istio.io/istio/pkg/log"
    43  	"istio.io/istio/pkg/security"
    44  	netutil "istio.io/istio/pkg/util/net"
    45  	"istio.io/istio/pkg/util/sets"
    46  )
    47  
    48  // deltaConfigTypes are used to detect changes and trigger delta calculations. When config updates has ONLY entries
    49  // in this map, then delta calculation is triggered.
    50  var deltaConfigTypes = sets.New(kind.ServiceEntry.String(), kind.DestinationRule.String())
    51  
    52  // BuildClusters returns the list of clusters for the given proxy. This is the CDS output
    53  // For outbound: Cluster for each service/subset hostname or cidr with SNI set to service hostname
    54  // Cluster type based on resolution
    55  // For inbound (sidecar only): Cluster for each inbound endpoint port and for each service port
    56  func (configgen *ConfigGeneratorImpl) BuildClusters(proxy *model.Proxy, req *model.PushRequest) ([]*discovery.Resource, model.XdsLogDetails) {
    57  	// In Sotw, we care about all services.
    58  	var services []*model.Service
    59  	if features.FilterGatewayClusterConfig && proxy.Type == model.Router {
    60  		services = req.Push.GatewayServices(proxy)
    61  	} else {
    62  		services = proxy.SidecarScope.Services()
    63  	}
    64  	return configgen.buildClusters(proxy, req, services)
    65  }
    66  
    67  // BuildDeltaClusters generates the deltas (add and delete) for a given proxy. Currently, only service changes are reflected with deltas.
    68  // Otherwise, we fall back onto generating everything.
    69  func (configgen *ConfigGeneratorImpl) BuildDeltaClusters(proxy *model.Proxy, updates *model.PushRequest,
    70  	watched *model.WatchedResource,
    71  ) ([]*discovery.Resource, []string, model.XdsLogDetails, bool) {
    72  	// if we can't use delta, fall back to generate all
    73  	if !shouldUseDelta(updates) {
    74  		cl, lg := configgen.BuildClusters(proxy, updates)
    75  		return cl, nil, lg, false
    76  	}
    77  
    78  	deletedClusters := sets.New[string]()
    79  	var services []*model.Service
    80  	// Holds clusters per service, keyed by hostname.
    81  	serviceClusters := make(map[string]sets.String)
    82  	// Holds service ports, keyed by hostname.Inner map port and its cluster name.
    83  	// This is mainly used when service is updated and a port has been removed.
    84  	servicePortClusters := make(map[string]map[int]string)
    85  	// Holds subset clusters per service, keyed by hostname.
    86  	subsetClusters := make(map[string]sets.String)
    87  
    88  	for _, cluster := range watched.ResourceNames {
    89  		// WatchedResources.ResourceNames will contain the names of the clusters it is subscribed to. We can
    90  		// check with the name of our service (cluster names are in the format outbound|<port>|<subset>|<hostname>).
    91  		dir, subset, svcHost, port := model.ParseSubsetKey(cluster)
    92  		// Inbound clusters don't have svchost in its format. So don't add it to serviceClusters.
    93  		if dir == model.TrafficDirectionInbound {
    94  			// Append all inbound clusters because in both stow/delta we always build all inbound clusters.
    95  			// In reality, the delta building is only for outbound clusters. We need to revist here once we support delta for inbound.
    96  			// So deletedClusters.Difference(builtClusters) would give us the correct deleted inbound clusters.
    97  			deletedClusters.Insert(cluster)
    98  		} else {
    99  			if subset == "" {
   100  				sets.InsertOrNew(serviceClusters, string(svcHost), cluster)
   101  			} else {
   102  				sets.InsertOrNew(subsetClusters, string(svcHost), cluster)
   103  			}
   104  			if servicePortClusters[string(svcHost)] == nil {
   105  				servicePortClusters[string(svcHost)] = make(map[int]string)
   106  			}
   107  			servicePortClusters[string(svcHost)][port] = cluster
   108  		}
   109  	}
   110  
   111  	for key := range updates.ConfigsUpdated {
   112  		// deleted clusters for this config.
   113  		var deleted []string
   114  		var svcs []*model.Service
   115  		switch key.Kind {
   116  		case kind.ServiceEntry:
   117  			svcs, deleted = configgen.deltaFromServices(key, proxy, updates.Push, serviceClusters,
   118  				servicePortClusters, subsetClusters)
   119  		case kind.DestinationRule:
   120  			svcs, deleted = configgen.deltaFromDestinationRules(key, proxy, subsetClusters)
   121  		}
   122  		services = append(services, svcs...)
   123  		deletedClusters.InsertAll(deleted...)
   124  	}
   125  	clusters, log := configgen.buildClusters(proxy, updates, services)
   126  	// DeletedClusters contains list of all subset clusters for the deleted DR or updated DR.
   127  	// When clusters are rebuilt, we rebuild the subset clusters as well. So, we know what
   128  	// subset clusters are really needed. So if deleted cluster is not rebuilt, then it is really deleted.
   129  	builtClusters := sets.New[string]()
   130  	for _, c := range clusters {
   131  		builtClusters.Insert(c.Name)
   132  	}
   133  	// Remove anything we built from the deleted list
   134  	deletedClusters = deletedClusters.DifferenceInPlace(builtClusters)
   135  	return clusters, sets.SortedList(deletedClusters), log, true
   136  }
   137  
   138  // deltaFromServices computes the delta clusters from the updated services.
   139  func (configgen *ConfigGeneratorImpl) deltaFromServices(key model.ConfigKey, proxy *model.Proxy, push *model.PushContext,
   140  	serviceClusters map[string]sets.String, servicePortClusters map[string]map[int]string, subsetClusters map[string]sets.String,
   141  ) ([]*model.Service, []string) {
   142  	var deletedClusters []string
   143  	var services []*model.Service
   144  	service := push.ServiceForHostname(proxy, host.Name(key.Name))
   145  	// push.ServiceForHostname will return nil if the proxy doesn't care about the service OR it was deleted.
   146  	// we can cross-reference with WatchedResources to figure out which services were deleted.
   147  	if service == nil {
   148  		// We assume a service was deleted and delete all clusters for that service.
   149  		deletedClusters = append(deletedClusters, serviceClusters[key.Name].UnsortedList()...)
   150  		deletedClusters = append(deletedClusters, subsetClusters[key.Name].UnsortedList()...)
   151  	} else {
   152  		// Service exists. If the service update has port change, we need to the corresponding port clusters.
   153  		services = append(services, service)
   154  		for port, cluster := range servicePortClusters[service.Hostname.String()] {
   155  			// if this service port is removed, we can conclude that it is a removed cluster.
   156  			if _, exists := service.Ports.GetByPort(port); !exists {
   157  				deletedClusters = append(deletedClusters, cluster)
   158  			}
   159  		}
   160  	}
   161  	return services, deletedClusters
   162  }
   163  
   164  // deltaFromDestinationRules computes the delta clusters from the updated destination rules.
   165  func (configgen *ConfigGeneratorImpl) deltaFromDestinationRules(updatedDr model.ConfigKey, proxy *model.Proxy,
   166  	subsetClusters map[string]sets.String,
   167  ) ([]*model.Service, []string) {
   168  	var deletedClusters []string
   169  	var services []*model.Service
   170  	cfg := proxy.SidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
   171  	if cfg == nil {
   172  		// Destinationrule was deleted. Find matching services from previous destinationrule.
   173  		prevCfg := proxy.PrevSidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
   174  		if prevCfg == nil {
   175  			log.Debugf("Prev DestinationRule form PrevSidecarScope is missing for %s/%s", updatedDr.Namespace, updatedDr.Name)
   176  			return nil, nil
   177  		}
   178  		dr := prevCfg.Spec.(*networking.DestinationRule)
   179  		services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(dr.Host))...)
   180  	} else {
   181  		dr := cfg.Spec.(*networking.DestinationRule)
   182  		// Destinationrule was updated. Find matching services from updated destinationrule.
   183  		services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(dr.Host))...)
   184  		// Check if destination rule host is changed, if yes, then we need to add previous host matching services.
   185  		prevCfg := proxy.PrevSidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
   186  		if prevCfg != nil {
   187  			prevDr := prevCfg.Spec.(*networking.DestinationRule)
   188  			if dr.Host != prevDr.Host {
   189  				services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(prevDr.Host))...)
   190  			}
   191  		}
   192  	}
   193  
   194  	// Remove all matched service subsets. When we rebuild clusters, we will rebuild the subset clusters as well.
   195  	// We can reconcile the actual subsets that are needed when we rebuild the clusters.
   196  	for _, matchedSvc := range services {
   197  		if subsetClusters[matchedSvc.Hostname.String()] != nil {
   198  			deletedClusters = append(deletedClusters, subsetClusters[matchedSvc.Hostname.String()].UnsortedList()...)
   199  		}
   200  	}
   201  	return services, deletedClusters
   202  }
   203  
   204  // buildClusters builds clusters for the proxy with the services passed.
   205  func (configgen *ConfigGeneratorImpl) buildClusters(proxy *model.Proxy, req *model.PushRequest,
   206  	services []*model.Service,
   207  ) ([]*discovery.Resource, model.XdsLogDetails) {
   208  	clusters := make([]*cluster.Cluster, 0)
   209  	resources := model.Resources{}
   210  	envoyFilterPatches := req.Push.EnvoyFilters(proxy)
   211  	cb := NewClusterBuilder(proxy, req, configgen.Cache)
   212  	instances := proxy.ServiceTargets
   213  	cacheStats := cacheStats{}
   214  	switch proxy.Type {
   215  	case model.SidecarProxy:
   216  		// Setup outbound clusters
   217  		outboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_OUTBOUND}
   218  		ob, cs := configgen.buildOutboundClusters(cb, proxy, outboundPatcher, services)
   219  		cacheStats = cacheStats.merge(cs)
   220  		resources = append(resources, ob...)
   221  		// Add a blackhole and passthrough cluster for catching traffic to unresolved routes
   222  		clusters = outboundPatcher.conditionallyAppend(clusters, nil, cb.buildBlackHoleCluster(), cb.buildDefaultPassthroughCluster())
   223  		clusters = append(clusters, outboundPatcher.insertedClusters()...)
   224  		// Setup inbound clusters
   225  		inboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_INBOUND}
   226  		clusters = append(clusters, configgen.buildInboundClusters(cb, proxy, instances, inboundPatcher)...)
   227  		if proxy.EnableHBONEListen() {
   228  			clusters = append(clusters, configgen.buildInboundHBONEClusters())
   229  		}
   230  		// Pass through clusters for inbound traffic. These cluster bind loopback-ish src address to access node local service.
   231  		clusters = inboundPatcher.conditionallyAppend(clusters, nil, cb.buildInboundPassthroughClusters()...)
   232  		clusters = append(clusters, inboundPatcher.insertedClusters()...)
   233  	case model.Waypoint:
   234  		_, wps := findWaypointResources(proxy, req.Push)
   235  		// Waypoint proxies do not need outbound clusters in most cases, unless we have a route pointing to something
   236  		outboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_OUTBOUND}
   237  		ob, cs := configgen.buildOutboundClusters(cb, proxy, outboundPatcher, filterWaypointOutboundServices(
   238  			req.Push.ServicesAttachedToMesh(), wps.services, req.Push.ExtraWaypointServices(proxy), services))
   239  		cacheStats = cacheStats.merge(cs)
   240  		resources = append(resources, ob...)
   241  		// Setup inbound clusters
   242  		inboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_INBOUND}
   243  		clusters = append(clusters, configgen.buildWaypointInboundClusters(cb, proxy, req.Push, wps.services)...)
   244  		clusters = append(clusters, inboundPatcher.insertedClusters()...)
   245  	default: // Gateways
   246  		patcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_GATEWAY}
   247  		ob, cs := configgen.buildOutboundClusters(cb, proxy, patcher, services)
   248  		cacheStats = cacheStats.merge(cs)
   249  		resources = append(resources, ob...)
   250  		// Gateways do not require the default passthrough cluster as they do not have original dst listeners.
   251  		clusters = patcher.conditionallyAppend(clusters, nil, cb.buildBlackHoleCluster())
   252  		if proxy.Type == model.Router && proxy.MergedGateway != nil && proxy.MergedGateway.ContainsAutoPassthroughGateways {
   253  			clusters = append(clusters, configgen.buildOutboundSniDnatClusters(proxy, req, patcher)...)
   254  		}
   255  		clusters = append(clusters, patcher.insertedClusters()...)
   256  	}
   257  
   258  	// OutboundTunnel cluster is needed for sidecar and gateway.
   259  	if features.EnableHBONESend && proxy.Type != model.Waypoint && bool(!proxy.Metadata.DisableHBONESend) {
   260  		clusters = append(clusters, cb.buildConnectOriginate(proxy, req.Push, nil))
   261  	}
   262  
   263  	// if credential socket exists, create a cluster for it
   264  	if proxy.Metadata != nil && proxy.Metadata.Raw[security.CredentialMetaDataName] == "true" {
   265  		clusters = append(clusters, cb.buildExternalSDSCluster(security.CredentialNameSocketPath))
   266  	}
   267  	for _, c := range clusters {
   268  		resources = append(resources, &discovery.Resource{Name: c.Name, Resource: protoconv.MessageToAny(c)})
   269  	}
   270  	resources = cb.normalizeClusters(resources)
   271  
   272  	if cacheStats.empty() {
   273  		return resources, model.DefaultXdsLogDetails
   274  	}
   275  	return resources, model.XdsLogDetails{AdditionalInfo: fmt.Sprintf("cached:%v/%v", cacheStats.hits, cacheStats.hits+cacheStats.miss)}
   276  }
   277  
   278  func shouldUseDelta(updates *model.PushRequest) bool {
   279  	return updates != nil && deltaAwareConfigTypes(updates.ConfigsUpdated) && len(updates.ConfigsUpdated) > 0
   280  }
   281  
   282  // deltaAwareConfigTypes returns true if all updated configs are delta enabled.
   283  func deltaAwareConfigTypes(cfgs sets.Set[model.ConfigKey]) bool {
   284  	for k := range cfgs {
   285  		if !deltaConfigTypes.Contains(k.Kind.String()) {
   286  			return false
   287  		}
   288  	}
   289  	return true
   290  }
   291  
   292  // buildOutboundClusters generates all outbound (including subsets) clusters for a given proxy.
   293  func (configgen *ConfigGeneratorImpl) buildOutboundClusters(cb *ClusterBuilder, proxy *model.Proxy, cp clusterPatcher,
   294  	services []*model.Service,
   295  ) ([]*discovery.Resource, cacheStats) {
   296  	resources := make([]*discovery.Resource, 0)
   297  	efKeys := cp.efw.KeysApplyingTo(networking.EnvoyFilter_CLUSTER)
   298  	hit, miss := 0, 0
   299  	for _, service := range services {
   300  		if service.Resolution == model.Alias {
   301  			continue
   302  		}
   303  		for _, port := range service.Ports {
   304  			if port.Protocol == protocol.UDP {
   305  				continue
   306  			}
   307  			clusterKey := buildClusterKey(service, port, cb, proxy, efKeys)
   308  			cached, allFound := cb.getAllCachedSubsetClusters(clusterKey)
   309  			if allFound && !features.EnableUnsafeAssertions {
   310  				hit += len(cached)
   311  				resources = append(resources, cached...)
   312  				continue
   313  			}
   314  			miss += len(cached)
   315  
   316  			// We have a cache miss, so we will re-generate the cluster and later store it in the cache.
   317  			var lbEndpoints []*endpoint.LocalityLbEndpoints
   318  			if clusterKey.endpointBuilder != nil {
   319  				lbEndpoints = clusterKey.endpointBuilder.FromServiceEndpoints()
   320  			}
   321  
   322  			// create default cluster
   323  			discoveryType := convertResolution(cb.proxyType, service)
   324  			defaultCluster := cb.buildCluster(clusterKey.clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, port, service, nil, "")
   325  			if defaultCluster == nil {
   326  				continue
   327  			}
   328  
   329  			// if the service uses persistent sessions, override status allows
   330  			// DRAINING endpoints to be kept as 'UNHEALTHY' coarse status in envoy.
   331  			// Will not be used for normal traffic, only when explicit override.
   332  			if service.Attributes.Labels[features.PersistentSessionLabel] != "" {
   333  				// Default is UNKNOWN, HEALTHY, DEGRADED. Without this change, Envoy will drop endpoints with any other
   334  				// status received in EDS. With this setting, the DRAINING and UNHEALTHY endpoints are kept - both marked
   335  				// as UNHEALTHY ('coarse state'), which is what will show in config dumps.
   336  				// DRAINING/UNHEALTHY will not be used normally for new requests. They will be used if cookie/header
   337  				// selects them.
   338  				defaultCluster.cluster.CommonLbConfig.OverrideHostStatus = &core.HealthStatusSet{
   339  					Statuses: []core.HealthStatus{
   340  						core.HealthStatus_HEALTHY,
   341  						core.HealthStatus_DRAINING, core.HealthStatus_UNKNOWN, core.HealthStatus_DEGRADED,
   342  					},
   343  				}
   344  			}
   345  
   346  			subsetClusters := cb.applyDestinationRule(defaultCluster, DefaultClusterMode, service, port,
   347  				clusterKey.endpointBuilder, clusterKey.destinationRule.GetRule(), clusterKey.serviceAccounts)
   348  
   349  			if patched := cp.patch(nil, defaultCluster.build()); patched != nil {
   350  				resources = append(resources, patched)
   351  				if features.EnableCDSCaching {
   352  					cb.cache.Add(&clusterKey, cb.req, patched)
   353  				}
   354  			}
   355  			for _, ss := range subsetClusters {
   356  				if patched := cp.patch(nil, ss); patched != nil {
   357  					resources = append(resources, patched)
   358  					if features.EnableCDSCaching {
   359  						nk := clusterKey
   360  						nk.clusterName = ss.Name
   361  						cb.cache.Add(&nk, cb.req, patched)
   362  					}
   363  				}
   364  			}
   365  		}
   366  	}
   367  
   368  	return resources, cacheStats{hits: hit, miss: miss}
   369  }
   370  
   371  type clusterPatcher struct {
   372  	efw  *model.EnvoyFilterWrapper
   373  	pctx networking.EnvoyFilter_PatchContext
   374  }
   375  
   376  func (p clusterPatcher) patch(hosts []host.Name, c *cluster.Cluster) *discovery.Resource {
   377  	cluster := p.doPatch(hosts, c)
   378  	if cluster == nil {
   379  		return nil
   380  	}
   381  	return &discovery.Resource{Name: cluster.Name, Resource: protoconv.MessageToAny(cluster)}
   382  }
   383  
   384  func (p clusterPatcher) doPatch(hosts []host.Name, c *cluster.Cluster) *cluster.Cluster {
   385  	if !envoyfilter.ShouldKeepCluster(p.pctx, p.efw, c, hosts) {
   386  		return nil
   387  	}
   388  	return envoyfilter.ApplyClusterMerge(p.pctx, p.efw, c, hosts)
   389  }
   390  
   391  func (p clusterPatcher) conditionallyAppend(l []*cluster.Cluster, hosts []host.Name, clusters ...*cluster.Cluster) []*cluster.Cluster {
   392  	if !p.hasPatches() {
   393  		return append(l, clusters...)
   394  	}
   395  	for _, c := range clusters {
   396  		if patched := p.doPatch(hosts, c); patched != nil {
   397  			l = append(l, patched)
   398  		}
   399  	}
   400  	return l
   401  }
   402  
   403  func (p clusterPatcher) insertedClusters() []*cluster.Cluster {
   404  	return envoyfilter.InsertedClusters(p.pctx, p.efw)
   405  }
   406  
   407  func (p clusterPatcher) hasPatches() bool {
   408  	return p.efw != nil && len(p.efw.Patches[networking.EnvoyFilter_CLUSTER]) > 0
   409  }
   410  
   411  // SniDnat clusters do not have any TLS setting, as they simply forward traffic to upstream
   412  // All SniDnat clusters are internal services in the mesh.
   413  // TODO enable cache - there is no blockers here, skipped to simplify the original caching implementation
   414  func (configgen *ConfigGeneratorImpl) buildOutboundSniDnatClusters(proxy *model.Proxy, req *model.PushRequest,
   415  	cp clusterPatcher,
   416  ) []*cluster.Cluster {
   417  	clusters := make([]*cluster.Cluster, 0)
   418  	cb := NewClusterBuilder(proxy, req, nil)
   419  
   420  	for _, service := range proxy.SidecarScope.Services() {
   421  		if service.MeshExternal {
   422  			continue
   423  		}
   424  
   425  		destRule := proxy.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, proxy, service.Hostname)
   426  		for _, port := range service.Ports {
   427  			if port.Protocol == protocol.UDP {
   428  				continue
   429  			}
   430  
   431  			// create default cluster
   432  			discoveryType := convertResolution(cb.proxyType, service)
   433  			clusterName := model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "",
   434  				service.Hostname, port.Port)
   435  
   436  			var lbEndpoints []*endpoint.LocalityLbEndpoints
   437  			var endpointBuilder *endpoints.EndpointBuilder
   438  			if service.Resolution == model.DNSLB || service.Resolution == model.DNSRoundRobinLB {
   439  				endpointBuilder = endpoints.NewCDSEndpointBuilder(proxy, cb.req.Push,
   440  					clusterName, model.TrafficDirectionOutbound, "", service.Hostname, port.Port,
   441  					service, destRule,
   442  				)
   443  				lbEndpoints = endpointBuilder.FromServiceEndpoints()
   444  			}
   445  
   446  			defaultCluster := cb.buildCluster(clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, port, service, nil, "")
   447  			if defaultCluster == nil {
   448  				continue
   449  			}
   450  			subsetClusters := cb.applyDestinationRule(defaultCluster, SniDnatClusterMode, service, port, endpointBuilder, destRule.GetRule(), nil)
   451  			clusters = cp.conditionallyAppend(clusters, nil, defaultCluster.build())
   452  			clusters = cp.conditionallyAppend(clusters, nil, subsetClusters...)
   453  		}
   454  	}
   455  
   456  	return clusters
   457  }
   458  
   459  func buildInboundLocalityLbEndpoints(bind string, port uint32) []*endpoint.LocalityLbEndpoints {
   460  	if bind == "" {
   461  		return nil
   462  	}
   463  	address := util.BuildAddress(bind, port)
   464  	lbEndpoint := &endpoint.LbEndpoint{
   465  		HostIdentifier: &endpoint.LbEndpoint_Endpoint{
   466  			Endpoint: &endpoint.Endpoint{
   467  				Address: address,
   468  			},
   469  		},
   470  	}
   471  	return []*endpoint.LocalityLbEndpoints{
   472  		{
   473  			LbEndpoints: []*endpoint.LbEndpoint{lbEndpoint},
   474  		},
   475  	}
   476  }
   477  
   478  func buildInboundClustersFromServiceInstances(cb *ClusterBuilder, proxy *model.Proxy,
   479  	instances []model.ServiceTarget, cp clusterPatcher,
   480  	enableSidecarServiceInboundListenerMerge bool,
   481  ) []*cluster.Cluster {
   482  	clusters := make([]*cluster.Cluster, 0)
   483  	_, actualLocalHosts := getWildcardsAndLocalHost(proxy.GetIPMode())
   484  	clustersToBuild := make(map[int][]model.ServiceTarget)
   485  
   486  	ingressPortListSet := sets.New[int]()
   487  	sidecarScope := proxy.SidecarScope
   488  	if enableSidecarServiceInboundListenerMerge && sidecarScope.HasIngressListener() {
   489  		ingressPortListSet = getSidecarIngressPortList(proxy)
   490  	}
   491  	for _, instance := range instances {
   492  		// For service instances with the same port,
   493  		// we still need to capture all the instances on this port, as its required to populate telemetry metadata
   494  		// The first instance will be used as the "primary" instance; this means if we have an conflicts between
   495  		// Services the first one wins
   496  		port := int(instance.Port.TargetPort)
   497  		clustersToBuild[port] = append(clustersToBuild[port], instance)
   498  	}
   499  
   500  	bind := actualLocalHosts[0]
   501  	if cb.req.Push.Mesh.GetInboundTrafficPolicy().GetMode() == meshconfig.MeshConfig_InboundTrafficPolicy_PASSTHROUGH {
   502  		bind = ""
   503  	}
   504  	// For each workload port, we will construct a cluster
   505  	for epPort, instances := range clustersToBuild {
   506  		if ingressPortListSet.Contains(int(instances[0].Port.TargetPort)) {
   507  			// here if port is declared in service and sidecar ingress both, we continue to take the one on sidecar + other service ports
   508  			// e.g. 1,2, 3 in service and 3,4 in sidecar ingress,
   509  			// this will still generate listeners for 1,2,3,4 where 3 is picked from sidecar ingress
   510  			// port present in sidecarIngress listener so let sidecar take precedence
   511  			continue
   512  		}
   513  		localCluster := cb.buildInboundCluster(epPort, bind, proxy, instances[0], instances)
   514  		// If inbound cluster match has service, we should see if it matches with any host name across all instances.
   515  		hosts := make([]host.Name, 0, len(instances))
   516  		for _, si := range instances {
   517  			hosts = append(hosts, si.Service.Hostname)
   518  		}
   519  		clusters = cp.conditionallyAppend(clusters, hosts, localCluster.build())
   520  	}
   521  	return clusters
   522  }
   523  
   524  func (configgen *ConfigGeneratorImpl) buildInboundClusters(cb *ClusterBuilder, proxy *model.Proxy, instances []model.ServiceTarget,
   525  	cp clusterPatcher,
   526  ) []*cluster.Cluster {
   527  	clusters := make([]*cluster.Cluster, 0)
   528  
   529  	// The inbound clusters for a node depends on whether the node has a SidecarScope with inbound listeners
   530  	// or not. If the node has a sidecarscope with ingress listeners, we only return clusters corresponding
   531  	// to those listeners i.e. clusters made out of the defaultEndpoint field.
   532  	// If the node has no sidecarScope and has interception mode set to NONE, then we should skip the inbound
   533  	// clusters, because there would be no corresponding inbound listeners
   534  	sidecarScope := proxy.SidecarScope
   535  	noneMode := proxy.GetInterceptionMode() == model.InterceptionNone
   536  	// No user supplied sidecar scope or the user supplied one has no ingress listeners
   537  	if !sidecarScope.HasIngressListener() {
   538  		// We should not create inbound listeners in NONE mode based on the service instances
   539  		// Doing so will prevent the workloads from starting as they would be listening on the same port
   540  		// Users are required to provide the sidecar config to define the inbound listeners
   541  		if noneMode {
   542  			return nil
   543  		}
   544  		clusters = buildInboundClustersFromServiceInstances(cb, proxy, instances, cp, false)
   545  		return clusters
   546  	}
   547  
   548  	if features.EnableSidecarServiceInboundListenerMerge {
   549  		// only allow to merge inbound listeners if sidecar has ingress listener and pilot has env EnableSidecarServiceInboundListenerMerge set
   550  		clusters = buildInboundClustersFromServiceInstances(cb, proxy, instances, cp, true)
   551  	}
   552  	clusters = append(clusters, buildInboundClustersFromSidecar(cb, proxy, instances, cp)...)
   553  	return clusters
   554  }
   555  
   556  func buildInboundClustersFromSidecar(cb *ClusterBuilder, proxy *model.Proxy,
   557  	instances []model.ServiceTarget, cp clusterPatcher,
   558  ) []*cluster.Cluster {
   559  	clusters := make([]*cluster.Cluster, 0)
   560  	_, actualLocalHosts := getWildcardsAndLocalHost(proxy.GetIPMode())
   561  	sidecarScope := proxy.SidecarScope
   562  	for _, ingressListener := range sidecarScope.Sidecar.Ingress {
   563  		// LDS would have setup the inbound clusters
   564  		// as inbound|portNumber|portName|Hostname[or]SidecarScopeID
   565  		listenPort := &model.Port{
   566  			Port:     int(ingressListener.Port.Number),
   567  			Protocol: protocol.Parse(ingressListener.Port.Protocol),
   568  			Name:     ingressListener.Port.Name,
   569  		}
   570  
   571  		// Set up the endpoint. By default, we set this empty which will use ORIGINAL_DST passthrough.
   572  		// This can be overridden by ingress.defaultEndpoint.
   573  		// * 127.0.0.1: send to localhost
   574  		// * 0.0.0.0: send to INSTANCE_IP
   575  		// * unix:///...: send to configured unix domain socket
   576  		endpointAddress := ""
   577  		port := 0
   578  		if strings.HasPrefix(ingressListener.DefaultEndpoint, model.UnixAddressPrefix) {
   579  			// this is a UDS endpoint. assign it as is
   580  			endpointAddress = ingressListener.DefaultEndpoint
   581  		} else if len(ingressListener.DefaultEndpoint) > 0 {
   582  			// parse the ip, port. Validation guarantees presence of :
   583  			hostIP, hostPort, hostErr := net.SplitHostPort(ingressListener.DefaultEndpoint)
   584  			if hostPort == "" || hostErr != nil {
   585  				continue
   586  			}
   587  			var err error
   588  			if port, err = strconv.Atoi(hostPort); err != nil {
   589  				continue
   590  			}
   591  			if hostIP == model.PodIPAddressPrefix {
   592  				for _, proxyIPAddr := range cb.proxyIPAddresses {
   593  					if netutil.IsIPv4Address(proxyIPAddr) {
   594  						endpointAddress = proxyIPAddr
   595  						break
   596  					}
   597  				}
   598  				// if there is no any IPv4 address in proxyIPAddresses
   599  				if endpointAddress == "" {
   600  					endpointAddress = model.LocalhostAddressPrefix
   601  				}
   602  			} else if hostIP == model.PodIPv6AddressPrefix {
   603  				for _, proxyIPAddr := range cb.proxyIPAddresses {
   604  					if netutil.IsIPv6Address(proxyIPAddr) {
   605  						endpointAddress = proxyIPAddr
   606  						break
   607  					}
   608  				}
   609  				// if there is no any IPv6 address in proxyIPAddresses
   610  				if endpointAddress == "" {
   611  					endpointAddress = model.LocalhostIPv6AddressPrefix
   612  				}
   613  			} else if hostIP == model.LocalhostAddressPrefix {
   614  				// prefer 127.0.0.1 to ::1, but if given no option choose ::1
   615  				ipV6EndpointAddress := ""
   616  				for _, host := range actualLocalHosts {
   617  					if netutil.IsIPv4Address(host) {
   618  						endpointAddress = host
   619  						break
   620  					}
   621  					if netutil.IsIPv6Address(host) {
   622  						ipV6EndpointAddress = host
   623  					}
   624  				}
   625  				if endpointAddress == "" {
   626  					endpointAddress = ipV6EndpointAddress
   627  				}
   628  			} else if hostIP == model.LocalhostIPv6AddressPrefix {
   629  				// prefer ::1 to 127.0.0.1, but if given no option choose 127.0.0.1
   630  				ipV4EndpointAddress := ""
   631  				for _, host := range actualLocalHosts {
   632  					if netutil.IsIPv6Address(host) {
   633  						endpointAddress = host
   634  						break
   635  					}
   636  					if netutil.IsIPv4Address(host) {
   637  						ipV4EndpointAddress = host
   638  					}
   639  				}
   640  				if endpointAddress == "" {
   641  					endpointAddress = ipV4EndpointAddress
   642  				}
   643  			}
   644  		}
   645  		// Find the service instance that corresponds to this ingress listener by looking
   646  		// for a service instance that matches this ingress port as this will allow us
   647  		// to generate the right cluster name that LDS expects inbound|portNumber|portName|Hostname
   648  		svc := findOrCreateService(instances, ingressListener, sidecarScope.Name, sidecarScope.Namespace)
   649  		endpoint := model.ServiceTarget{
   650  			Service: svc,
   651  			Port: model.ServiceInstancePort{
   652  				ServicePort: listenPort,
   653  				TargetPort:  uint32(port),
   654  			},
   655  		}
   656  		localCluster := cb.buildInboundCluster(int(ingressListener.Port.Number), endpointAddress, proxy, endpoint, nil)
   657  		clusters = cp.conditionallyAppend(clusters, []host.Name{endpoint.Service.Hostname}, localCluster.build())
   658  	}
   659  	return clusters
   660  }
   661  
   662  func findOrCreateService(instances []model.ServiceTarget,
   663  	ingressListener *networking.IstioIngressListener, sidecar string, sidecarns string,
   664  ) *model.Service {
   665  	for _, realInstance := range instances {
   666  		if realInstance.Port.TargetPort == ingressListener.Port.Number {
   667  			return realInstance.Service
   668  		}
   669  	}
   670  	// We didn't find a matching instance. Create a dummy one because we need the right
   671  	// params to generate the right cluster name i.e. inbound|portNumber|portName|SidecarScopeID - which is uniformly generated by LDS/CDS.
   672  	return &model.Service{
   673  		Hostname: host.Name(sidecar + "." + sidecarns),
   674  		Attributes: model.ServiceAttributes{
   675  			Name: sidecar,
   676  			// This will ensure that the right AuthN policies are selected
   677  			Namespace: sidecarns,
   678  		},
   679  	}
   680  }
   681  
   682  func convertResolution(proxyType model.NodeType, service *model.Service) cluster.Cluster_DiscoveryType {
   683  	switch service.Resolution {
   684  	case model.ClientSideLB:
   685  		return cluster.Cluster_EDS
   686  	case model.DNSLB:
   687  		return cluster.Cluster_STRICT_DNS
   688  	case model.DNSRoundRobinLB:
   689  		return cluster.Cluster_LOGICAL_DNS
   690  	case model.Passthrough:
   691  		// Gateways cannot use passthrough clusters. So fallback to EDS
   692  		if proxyType == model.Router {
   693  			return cluster.Cluster_EDS
   694  		}
   695  		if service.Attributes.ServiceRegistry == provider.Kubernetes && features.EnableEDSForHeadless {
   696  			return cluster.Cluster_EDS
   697  		}
   698  		return cluster.Cluster_ORIGINAL_DST
   699  	default:
   700  		return cluster.Cluster_EDS
   701  	}
   702  }
   703  
   704  // ClusterMode defines whether the cluster is being built for SNI-DNATing (sni passthrough) or not
   705  type ClusterMode string
   706  
   707  const (
   708  	// SniDnatClusterMode indicates cluster is being built for SNI dnat mode
   709  	SniDnatClusterMode ClusterMode = "sni-dnat"
   710  	// DefaultClusterMode indicates usual cluster with mTLS et al
   711  	DefaultClusterMode ClusterMode = "outbound"
   712  )
   713  
   714  type buildClusterOpts struct {
   715  	mesh            *meshconfig.MeshConfig
   716  	mutable         *clusterWrapper
   717  	policy          *networking.TrafficPolicy
   718  	port            *model.Port
   719  	serviceAccounts []string
   720  	serviceTargets  []model.ServiceTarget
   721  	// Used for traffic across multiple network clusters
   722  	// the east-west gateway in a remote cluster will use this value to route
   723  	// traffic to the appropriate service
   724  	istioMtlsSni    string
   725  	clusterMode     ClusterMode
   726  	direction       model.TrafficDirection
   727  	meshExternal    bool
   728  	serviceMTLSMode model.MutualTLSMode
   729  	// Indicates the service registry of the cluster being built.
   730  	serviceRegistry provider.ID
   731  	// Indicates if the destinationRule has a workloadSelector
   732  	isDrWithSelector bool
   733  }
   734  
   735  func applyTCPKeepalive(mesh *meshconfig.MeshConfig, c *cluster.Cluster, tcp *networking.ConnectionPoolSettings_TCPSettings) {
   736  	// Apply mesh wide TCP keepalive if available.
   737  	setKeepAliveSettings(c, mesh.TcpKeepalive)
   738  
   739  	// Apply/Override individual attributes with DestinationRule TCP keepalive if set.
   740  	if tcp != nil {
   741  		setKeepAliveSettings(c, tcp.TcpKeepalive)
   742  	}
   743  }
   744  
   745  func setKeepAliveSettings(c *cluster.Cluster, keepalive *networking.ConnectionPoolSettings_TCPSettings_TcpKeepalive) {
   746  	if keepalive == nil {
   747  		return
   748  	}
   749  	// Start with empty tcp_keepalive, which would set SO_KEEPALIVE on the socket with OS default values.
   750  	if c.UpstreamConnectionOptions == nil {
   751  		c.UpstreamConnectionOptions = &cluster.UpstreamConnectionOptions{
   752  			TcpKeepalive: &core.TcpKeepalive{},
   753  		}
   754  	}
   755  	if keepalive.Probes > 0 {
   756  		c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveProbes = &wrappers.UInt32Value{Value: keepalive.Probes}
   757  	}
   758  
   759  	if keepalive.Time != nil {
   760  		c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveTime = &wrappers.UInt32Value{Value: uint32(keepalive.Time.Seconds)}
   761  	}
   762  
   763  	if keepalive.Interval != nil {
   764  		c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveInterval = &wrappers.UInt32Value{Value: uint32(keepalive.Interval.Seconds)}
   765  	}
   766  }
   767  
   768  // Build a struct which contains service metadata and will be added into cluster label.
   769  func buildServiceMetadata(svc *model.Service) *structpb.Value {
   770  	return &structpb.Value{
   771  		Kind: &structpb.Value_StructValue{
   772  			StructValue: &structpb.Struct{
   773  				Fields: map[string]*structpb.Value{
   774  					// service fqdn
   775  					"host": {
   776  						Kind: &structpb.Value_StringValue{
   777  							StringValue: string(svc.Hostname),
   778  						},
   779  					},
   780  					// short name of the service
   781  					"name": {
   782  						Kind: &structpb.Value_StringValue{
   783  							StringValue: svc.Attributes.Name,
   784  						},
   785  					},
   786  					// namespace of the service
   787  					"namespace": {
   788  						Kind: &structpb.Value_StringValue{
   789  							StringValue: svc.Attributes.Namespace,
   790  						},
   791  					},
   792  				},
   793  			},
   794  		},
   795  	}
   796  }
   797  
   798  func getOrCreateIstioMetadata(cluster *cluster.Cluster) *structpb.Struct {
   799  	if cluster.Metadata == nil {
   800  		cluster.Metadata = &core.Metadata{
   801  			FilterMetadata: map[string]*structpb.Struct{},
   802  		}
   803  	}
   804  	// Create Istio metadata if does not exist yet
   805  	if _, ok := cluster.Metadata.FilterMetadata[util.IstioMetadataKey]; !ok {
   806  		cluster.Metadata.FilterMetadata[util.IstioMetadataKey] = &structpb.Struct{
   807  			Fields: map[string]*structpb.Value{},
   808  		}
   809  	}
   810  	return cluster.Metadata.FilterMetadata[util.IstioMetadataKey]
   811  }