istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/networking/core/cluster_builder.go (about) 1 // Copyright Istio Authors. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package core 16 17 import ( 18 "fmt" 19 "time" 20 21 cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 22 core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" 23 endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 24 http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3" 25 discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" 26 "google.golang.org/protobuf/proto" 27 anypb "google.golang.org/protobuf/types/known/anypb" 28 "google.golang.org/protobuf/types/known/durationpb" 29 "google.golang.org/protobuf/types/known/structpb" 30 wrappers "google.golang.org/protobuf/types/known/wrapperspb" 31 32 networking "istio.io/api/networking/v1alpha3" 33 "istio.io/istio/pilot/pkg/features" 34 "istio.io/istio/pilot/pkg/model" 35 "istio.io/istio/pilot/pkg/networking/telemetry" 36 "istio.io/istio/pilot/pkg/networking/util" 37 networkutil "istio.io/istio/pilot/pkg/util/network" 38 "istio.io/istio/pilot/pkg/util/protoconv" 39 "istio.io/istio/pilot/pkg/xds/endpoints" 40 xdsfilters "istio.io/istio/pilot/pkg/xds/filters" 41 v3 "istio.io/istio/pilot/pkg/xds/v3" 42 "istio.io/istio/pkg/config" 43 "istio.io/istio/pkg/config/host" 44 "istio.io/istio/pkg/log" 45 "istio.io/istio/pkg/security" 46 "istio.io/istio/pkg/util/sets" 47 ) 48 49 // passthroughHttpProtocolOptions are http protocol options used for pass through clusters. 50 // nolint 51 // revive:disable-next-line 52 var passthroughHttpProtocolOptions = protoconv.MessageToAny(&http.HttpProtocolOptions{ 53 CommonHttpProtocolOptions: &core.HttpProtocolOptions{ 54 IdleTimeout: durationpb.New(5 * time.Minute), 55 }, 56 UpstreamProtocolOptions: &http.HttpProtocolOptions_UseDownstreamProtocolConfig{ 57 UseDownstreamProtocolConfig: &http.HttpProtocolOptions_UseDownstreamHttpConfig{ 58 HttpProtocolOptions: &core.Http1ProtocolOptions{}, 59 Http2ProtocolOptions: http2ProtocolOptions(), 60 }, 61 }, 62 }) 63 64 // clusterWrapper wraps Cluster object along with upstream protocol options. 65 type clusterWrapper struct { 66 cluster *cluster.Cluster 67 // httpProtocolOptions stores the HttpProtocolOptions which will be marshaled when build is called. 68 httpProtocolOptions *http.HttpProtocolOptions 69 } 70 71 // metadataCerts hosts client certificate related metadata specified in proxy metadata. 72 type metadataCerts struct { 73 // tlsClientCertChain is the absolute path to client cert-chain file 74 tlsClientCertChain string 75 // tlsClientKey is the absolute path to client private key file 76 tlsClientKey string 77 // tlsClientRootCert is the absolute path to client root cert file 78 tlsClientRootCert string 79 } 80 81 // ClusterBuilder interface provides an abstraction for building Envoy Clusters. 82 type ClusterBuilder struct { 83 // Proxy related information used to build clusters. 84 serviceTargets []model.ServiceTarget // Service targets of Proxy. 85 metadataCerts *metadataCerts // Client certificates specified in metadata. 86 clusterID string // Cluster in which proxy is running. 87 proxyID string // Identifier that uniquely identifies a proxy. 88 proxyVersion string // Version of Proxy. 89 proxyType model.NodeType // Indicates whether the proxy is sidecar or gateway. 90 sidecarScope *model.SidecarScope // Computed sidecar for the proxy. 91 passThroughBindIPs []string // Passthrough IPs to be used while building clusters. 92 supportsIPv4 bool // Whether Proxy IPs has IPv4 address. 93 supportsIPv6 bool // Whether Proxy IPs has IPv6 address. 94 sendHbone bool // Does the proxy support HBONE 95 locality *core.Locality // Locality information of proxy. 96 proxyLabels map[string]string // Proxy labels. 97 proxyView model.ProxyView // Proxy view of endpoints. 98 proxyIPAddresses []string // IP addresses on which proxy is listening on. 99 configNamespace string // Proxy config namespace. 100 // PushRequest to look for updates. 101 req *model.PushRequest 102 cache model.XdsCache 103 credentialSocketExist bool 104 } 105 106 // NewClusterBuilder builds an instance of ClusterBuilder. 107 func NewClusterBuilder(proxy *model.Proxy, req *model.PushRequest, cache model.XdsCache) *ClusterBuilder { 108 cb := &ClusterBuilder{ 109 serviceTargets: proxy.ServiceTargets, 110 proxyID: proxy.ID, 111 proxyType: proxy.Type, 112 proxyVersion: proxy.Metadata.IstioVersion, 113 sidecarScope: proxy.SidecarScope, 114 passThroughBindIPs: getPassthroughBindIPs(proxy.GetIPMode()), 115 supportsIPv4: proxy.SupportsIPv4(), 116 supportsIPv6: proxy.SupportsIPv6(), 117 sendHbone: features.EnableHBONESend || proxy.IsWaypointProxy(), 118 locality: proxy.Locality, 119 proxyLabels: proxy.Labels, 120 proxyView: proxy.GetView(), 121 proxyIPAddresses: proxy.IPAddresses, 122 configNamespace: proxy.ConfigNamespace, 123 req: req, 124 cache: cache, 125 } 126 if proxy.Metadata != nil { 127 if proxy.Metadata.TLSClientCertChain != "" { 128 cb.metadataCerts = &metadataCerts{ 129 tlsClientCertChain: proxy.Metadata.TLSClientCertChain, 130 tlsClientKey: proxy.Metadata.TLSClientKey, 131 tlsClientRootCert: proxy.Metadata.TLSClientRootCert, 132 } 133 } 134 cb.clusterID = string(proxy.Metadata.ClusterID) 135 if proxy.Metadata.Raw[security.CredentialMetaDataName] == "true" { 136 cb.credentialSocketExist = true 137 } 138 } 139 return cb 140 } 141 142 func (m *metadataCerts) String() string { 143 return m.tlsClientCertChain + "~" + m.tlsClientKey + "~" + m.tlsClientRootCert 144 } 145 146 // newClusterWrapper initializes clusterWrapper with the cluster passed. 147 func newClusterWrapper(cluster *cluster.Cluster) *clusterWrapper { 148 return &clusterWrapper{ 149 cluster: cluster, 150 } 151 } 152 153 // sidecarProxy returns true if the clusters are being built for sidecar proxy otherwise false. 154 func (cb *ClusterBuilder) sidecarProxy() bool { 155 return cb.proxyType == model.SidecarProxy 156 } 157 158 func (cb *ClusterBuilder) buildSubsetCluster( 159 opts buildClusterOpts, destRule *config.Config, subset *networking.Subset, service *model.Service, 160 endpointBuilder *endpoints.EndpointBuilder, 161 ) *cluster.Cluster { 162 opts.serviceMTLSMode = cb.req.Push.BestEffortInferServiceMTLSMode(subset.GetTrafficPolicy(), service, opts.port) 163 var subsetClusterName string 164 var defaultSni string 165 if opts.clusterMode == DefaultClusterMode { 166 subsetClusterName = model.BuildSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port) 167 defaultSni = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port) 168 } else { 169 subsetClusterName = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port) 170 } 171 // clusters with discovery type STATIC, STRICT_DNS rely on cluster.LoadAssignment field. 172 // ServiceEntry's need to filter hosts based on subset.labels in order to perform weighted routing 173 var lbEndpoints []*endpoint.LocalityLbEndpoints 174 175 isPassthrough := subset.GetTrafficPolicy().GetLoadBalancer().GetSimple() == networking.LoadBalancerSettings_PASSTHROUGH 176 clusterType := opts.mutable.cluster.GetType() 177 if isPassthrough { 178 clusterType = cluster.Cluster_ORIGINAL_DST 179 } 180 if !(isPassthrough || clusterType == cluster.Cluster_EDS) { 181 lbEndpoints = endpointBuilder.WithSubset(subset.Name).FromServiceEndpoints() 182 } 183 184 subsetCluster := cb.buildCluster(subsetClusterName, clusterType, lbEndpoints, model.TrafficDirectionOutbound, opts.port, service, nil, subset.Name) 185 if subsetCluster == nil { 186 return nil 187 } 188 189 // Apply traffic policy for subset cluster with the destination rule traffic policy. 190 opts.mutable = subsetCluster 191 opts.istioMtlsSni = defaultSni 192 193 // If subset has a traffic policy, apply it so that it overrides the destination rule traffic policy. 194 opts.policy = util.MergeSubsetTrafficPolicy(opts.policy, subset.TrafficPolicy, opts.port) 195 196 if destRule != nil { 197 destinationRule := CastDestinationRule(destRule) 198 opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil 199 } 200 // Apply traffic policy for the subset cluster. 201 cb.applyTrafficPolicy(opts) 202 203 maybeApplyEdsConfig(subsetCluster.cluster) 204 205 cb.applyMetadataExchange(opts.mutable.cluster) 206 207 // Add the DestinationRule+subsets metadata. Metadata here is generated on a per-cluster 208 // basis in buildCluster, so we can just insert without a copy. 209 subsetCluster.cluster.Metadata = util.AddConfigInfoMetadata(subsetCluster.cluster.Metadata, destRule.Meta) 210 util.AddSubsetToMetadata(subsetCluster.cluster.Metadata, subset.Name) 211 subsetCluster.cluster.Metadata = util.AddALPNOverrideToMetadata(subsetCluster.cluster.Metadata, opts.policy.GetTls().GetMode()) 212 return subsetCluster.build() 213 } 214 215 // applyDestinationRule applies the destination rule if it exists for the Service. 216 // It returns the subset clusters if any created as it applies the destination rule. 217 func (cb *ClusterBuilder) applyDestinationRule(mc *clusterWrapper, clusterMode ClusterMode, service *model.Service, 218 port *model.Port, eb *endpoints.EndpointBuilder, destRule *config.Config, serviceAccounts []string, 219 ) []*cluster.Cluster { 220 destinationRule := CastDestinationRule(destRule) 221 // merge applicable port level traffic policy settings 222 trafficPolicy, _ := util.GetPortLevelTrafficPolicy(destinationRule.GetTrafficPolicy(), port) 223 opts := buildClusterOpts{ 224 mesh: cb.req.Push.Mesh, 225 serviceTargets: cb.serviceTargets, 226 mutable: mc, 227 policy: trafficPolicy, 228 port: port, 229 clusterMode: clusterMode, 230 direction: model.TrafficDirectionOutbound, 231 } 232 233 if clusterMode == DefaultClusterMode { 234 opts.serviceAccounts = serviceAccounts 235 opts.istioMtlsSni = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port) 236 opts.meshExternal = service.MeshExternal 237 opts.serviceRegistry = service.Attributes.ServiceRegistry 238 opts.serviceMTLSMode = cb.req.Push.BestEffortInferServiceMTLSMode(destinationRule.GetTrafficPolicy(), service, port) 239 } 240 241 if destRule != nil { 242 opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil 243 } 244 // Apply traffic policy for the main default cluster. 245 cb.applyTrafficPolicy(opts) 246 247 // Apply EdsConfig if needed. This should be called after traffic policy is applied because, traffic policy might change 248 // discovery type. 249 maybeApplyEdsConfig(mc.cluster) 250 251 cb.applyMetadataExchange(opts.mutable.cluster) 252 253 if service.MeshExternal { 254 im := getOrCreateIstioMetadata(mc.cluster) 255 im.Fields["external"] = &structpb.Value{ 256 Kind: &structpb.Value_BoolValue{ 257 BoolValue: true, 258 }, 259 } 260 } 261 262 if destRule != nil { 263 mc.cluster.Metadata = util.AddConfigInfoMetadata(mc.cluster.Metadata, destRule.Meta) 264 mc.cluster.Metadata = util.AddALPNOverrideToMetadata(mc.cluster.Metadata, opts.policy.GetTls().GetMode()) 265 } 266 subsetClusters := make([]*cluster.Cluster, 0) 267 for _, subset := range destinationRule.GetSubsets() { 268 subsetCluster := cb.buildSubsetCluster(opts, destRule, subset, service, eb) 269 if subsetCluster != nil { 270 subsetClusters = append(subsetClusters, subsetCluster) 271 } 272 } 273 return subsetClusters 274 } 275 276 func (cb *ClusterBuilder) applyMetadataExchange(c *cluster.Cluster) { 277 if features.MetadataExchange { 278 c.Filters = append(c.Filters, xdsfilters.TCPClusterMx) 279 } 280 } 281 282 // buildCluster builds the default cluster and also applies global options. 283 // It is used for building both inbound and outbound cluster. 284 func (cb *ClusterBuilder) buildCluster(name string, discoveryType cluster.Cluster_DiscoveryType, 285 localityLbEndpoints []*endpoint.LocalityLbEndpoints, direction model.TrafficDirection, 286 port *model.Port, service *model.Service, inboundServices []model.ServiceTarget, 287 subset string, 288 ) *clusterWrapper { 289 c := &cluster.Cluster{ 290 Name: name, 291 ClusterDiscoveryType: &cluster.Cluster_Type{Type: discoveryType}, 292 CommonLbConfig: &cluster.Cluster_CommonLbConfig{}, 293 } 294 switch discoveryType { 295 case cluster.Cluster_STRICT_DNS, cluster.Cluster_LOGICAL_DNS: 296 if networkutil.AllIPv4(cb.proxyIPAddresses) { 297 // IPv4 only 298 c.DnsLookupFamily = cluster.Cluster_V4_ONLY 299 } else if networkutil.AllIPv6(cb.proxyIPAddresses) { 300 // IPv6 only 301 c.DnsLookupFamily = cluster.Cluster_V6_ONLY 302 } else { 303 // Dual Stack 304 if features.EnableDualStack { 305 // using Cluster_ALL to enable Happy Eyeballsfor upstream connections 306 c.DnsLookupFamily = cluster.Cluster_ALL 307 } else { 308 // keep the original logic if Dual Stack is disable 309 c.DnsLookupFamily = cluster.Cluster_V4_ONLY 310 } 311 } 312 c.DnsRefreshRate = cb.req.Push.Mesh.DnsRefreshRate 313 c.RespectDnsTtl = true 314 // we want to run all the STATIC parts as well to build the load assignment 315 fallthrough 316 case cluster.Cluster_STATIC: 317 if len(localityLbEndpoints) == 0 { 318 log.Debugf("locality endpoints missing for cluster %s", c.Name) 319 cb.req.Push.AddMetric(model.DNSNoEndpointClusters, c.Name, cb.proxyID, 320 fmt.Sprintf("%s cluster without endpoints %s found while pushing CDS", discoveryType.String(), c.Name)) 321 return nil 322 } 323 c.LoadAssignment = &endpoint.ClusterLoadAssignment{ 324 ClusterName: name, 325 Endpoints: localityLbEndpoints, 326 } 327 case cluster.Cluster_ORIGINAL_DST: 328 if features.PassthroughTargetPort { 329 if override, f := service.Attributes.PassthroughTargetPorts[uint32(port.Port)]; f { 330 c.LbConfig = &cluster.Cluster_OriginalDstLbConfig_{ 331 OriginalDstLbConfig: &cluster.Cluster_OriginalDstLbConfig{ 332 UpstreamPortOverride: wrappers.UInt32(override), 333 }, 334 } 335 } 336 } 337 } 338 339 ec := newClusterWrapper(c) 340 cb.setUpstreamProtocol(ec, port) 341 addTelemetryMetadata(c, port, service, direction, inboundServices) 342 if direction == model.TrafficDirectionOutbound { 343 // If stat name is configured, build the alternate stats name. 344 if len(cb.req.Push.Mesh.OutboundClusterStatName) != 0 { 345 ec.cluster.AltStatName = telemetry.BuildStatPrefix(cb.req.Push.Mesh.OutboundClusterStatName, 346 string(service.Hostname), subset, port, 0, &service.Attributes) 347 } 348 } 349 350 return ec 351 } 352 353 // buildInboundCluster constructs a single inbound cluster. The cluster will be bound to 354 // `inbound|clusterPort||`, and send traffic to <bind>:<instance.Endpoint.EndpointPort>. A workload 355 // will have a single inbound cluster per port. In general this works properly, with the exception of 356 // the Service-oriented DestinationRule, and upstream protocol selection. Our documentation currently 357 // requires a single protocol per port, and the DestinationRule issue is slated to move to Sidecar. 358 // Note: clusterPort and instance.Endpoint.EndpointPort are identical for standard Services; however, 359 // Sidecar.Ingress allows these to be different. 360 func (cb *ClusterBuilder) buildInboundCluster(clusterPort int, bind string, 361 proxy *model.Proxy, instance model.ServiceTarget, inboundServices []model.ServiceTarget, 362 ) *clusterWrapper { 363 clusterName := model.BuildInboundSubsetKey(clusterPort) 364 localityLbEndpoints := buildInboundLocalityLbEndpoints(bind, instance.Port.TargetPort) 365 clusterType := cluster.Cluster_ORIGINAL_DST 366 if len(localityLbEndpoints) > 0 { 367 clusterType = cluster.Cluster_STATIC 368 } 369 localCluster := cb.buildCluster(clusterName, clusterType, localityLbEndpoints, 370 model.TrafficDirectionInbound, instance.Port.ServicePort, instance.Service, inboundServices, "") 371 // If stat name is configured, build the alt statname. 372 if len(cb.req.Push.Mesh.InboundClusterStatName) != 0 { 373 localCluster.cluster.AltStatName = telemetry.BuildStatPrefix(cb.req.Push.Mesh.InboundClusterStatName, 374 string(instance.Service.Hostname), "", instance.Port.ServicePort, clusterPort, &instance.Service.Attributes) 375 } 376 377 opts := buildClusterOpts{ 378 mesh: cb.req.Push.Mesh, 379 mutable: localCluster, 380 policy: nil, 381 port: instance.Port.ServicePort, 382 serviceAccounts: nil, 383 serviceTargets: cb.serviceTargets, 384 istioMtlsSni: "", 385 clusterMode: DefaultClusterMode, 386 direction: model.TrafficDirectionInbound, 387 } 388 // When users specify circuit breakers, they need to be set on the receiver end 389 // (server side) as well as client side, so that the server has enough capacity 390 // (not the defaults) to handle the increased traffic volume 391 // TODO: This is not foolproof - if instance is part of multiple services listening on same port, 392 // choice of inbound cluster is arbitrary. So the connection pool settings may not apply cleanly. 393 cfg := proxy.SidecarScope.DestinationRule(model.TrafficDirectionInbound, proxy, instance.Service.Hostname).GetRule() 394 if cfg != nil { 395 destinationRule := CastDestinationRule(cfg) 396 opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil 397 if destinationRule.TrafficPolicy != nil { 398 opts.policy, _ = util.GetPortLevelTrafficPolicy(destinationRule.TrafficPolicy, instance.Port.ServicePort) 399 util.AddConfigInfoMetadata(localCluster.cluster.Metadata, cfg.Meta) 400 } 401 } 402 // If there's a connection pool set on the Sidecar then override any settings derived from the DestinationRule 403 // with those set by Sidecar resource. This allows the user to resolve any ambiguity, e.g. in the case that 404 // multiple services are listening on the same port. 405 if sidecarConnPool := proxy.SidecarScope.InboundConnectionPoolForPort(clusterPort); sidecarConnPool != nil { 406 if opts.policy == nil { 407 // There was no destination rule, so no inbound traffic policy; we'll create a default 408 opts.policy = &networking.TrafficPolicy{} 409 } else { 410 // copy policy to prevent mutating the original destinationRule trafficPolicy 411 opts.policy = util.ShallowCopyTrafficPolicy(opts.policy) 412 } 413 opts.policy.ConnectionPool = sidecarConnPool 414 } 415 cb.applyTrafficPolicy(opts) 416 417 if bind != LocalhostAddress && bind != LocalhostIPv6Address { 418 // iptables will redirect our own traffic to localhost back to us if we do not use the "magic" upstream bind 419 // config which will be skipped. 420 localCluster.cluster.UpstreamBindConfig = &core.BindConfig{ 421 SourceAddress: &core.SocketAddress{ 422 Address: cb.passThroughBindIPs[0], 423 PortSpecifier: &core.SocketAddress_PortValue{ 424 PortValue: uint32(0), 425 }, 426 }, 427 } 428 // There is a usage doc here: 429 // https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/address.proto#config-core-v3-bindconfig 430 // to support Dual Stack via Envoy BindConfig, and below is the related issue/PR in Envoy: 431 // https://github.com/envoyproxy/envoy/issues/9811 432 // https://github.com/envoyproxy/envoy/pull/22639. 433 // The extra source address for UpstreamBindConfig should be added if dual stack is enabled and there is 434 // more than one IP for the proxy. 435 if features.EnableDualStack && len(cb.passThroughBindIPs) > 1 { 436 // add extra source addresses to cluster builder 437 var extraSrcAddrs []*core.ExtraSourceAddress 438 for _, extraBdIP := range cb.passThroughBindIPs[1:] { 439 extraSrcAddr := &core.ExtraSourceAddress{ 440 Address: &core.SocketAddress{ 441 Address: extraBdIP, 442 PortSpecifier: &core.SocketAddress_PortValue{ 443 PortValue: uint32(0), 444 }, 445 }, 446 } 447 extraSrcAddrs = append(extraSrcAddrs, extraSrcAddr) 448 } 449 localCluster.cluster.UpstreamBindConfig.ExtraSourceAddresses = extraSrcAddrs 450 } 451 } 452 return localCluster 453 } 454 455 // buildInboundPassthroughClusters builds passthrough clusters for inbound. 456 func (cb *ClusterBuilder) buildInboundPassthroughClusters() []*cluster.Cluster { 457 // ipv4 and ipv6 feature detection. Envoy cannot ignore a config where the ip version is not supported 458 clusters := make([]*cluster.Cluster, 0, 2) 459 if cb.supportsIPv4 { 460 inboundPassthroughClusterIpv4 := cb.buildDefaultPassthroughCluster() 461 inboundPassthroughClusterIpv4.Name = util.InboundPassthroughClusterIpv4 462 inboundPassthroughClusterIpv4.Filters = nil 463 inboundPassthroughClusterIpv4.UpstreamBindConfig = &core.BindConfig{ 464 SourceAddress: &core.SocketAddress{ 465 Address: InboundPassthroughBindIpv4, 466 PortSpecifier: &core.SocketAddress_PortValue{ 467 PortValue: uint32(0), 468 }, 469 }, 470 } 471 clusters = append(clusters, inboundPassthroughClusterIpv4) 472 } 473 if cb.supportsIPv6 { 474 inboundPassthroughClusterIpv6 := cb.buildDefaultPassthroughCluster() 475 inboundPassthroughClusterIpv6.Name = util.InboundPassthroughClusterIpv6 476 inboundPassthroughClusterIpv6.Filters = nil 477 inboundPassthroughClusterIpv6.UpstreamBindConfig = &core.BindConfig{ 478 SourceAddress: &core.SocketAddress{ 479 Address: InboundPassthroughBindIpv6, 480 PortSpecifier: &core.SocketAddress_PortValue{ 481 PortValue: uint32(0), 482 }, 483 }, 484 } 485 clusters = append(clusters, inboundPassthroughClusterIpv6) 486 } 487 return clusters 488 } 489 490 // generates a cluster that sends traffic to dummy localport 0 491 // This cluster is used to catch all traffic to unresolved destinations in virtual service 492 func (cb *ClusterBuilder) buildBlackHoleCluster() *cluster.Cluster { 493 c := &cluster.Cluster{ 494 Name: util.BlackHoleCluster, 495 ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_STATIC}, 496 ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration), 497 LbPolicy: cluster.Cluster_ROUND_ROBIN, 498 } 499 return c 500 } 501 502 // generates a cluster that sends traffic to the original destination. 503 // This cluster is used to catch all traffic to unknown listener ports 504 func (cb *ClusterBuilder) buildDefaultPassthroughCluster() *cluster.Cluster { 505 cluster := &cluster.Cluster{ 506 Name: util.PassthroughCluster, 507 ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_ORIGINAL_DST}, 508 ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration), 509 LbPolicy: cluster.Cluster_CLUSTER_PROVIDED, 510 TypedExtensionProtocolOptions: map[string]*anypb.Any{ 511 v3.HttpProtocolOptionsType: passthroughHttpProtocolOptions, 512 }, 513 } 514 cb.applyConnectionPool(cb.req.Push.Mesh, newClusterWrapper(cluster), &networking.ConnectionPoolSettings{}) 515 cb.applyMetadataExchange(cluster) 516 return cluster 517 } 518 519 // setH2Options make the cluster an h2 cluster by setting http2ProtocolOptions. 520 func setH2Options(mc *clusterWrapper) { 521 if mc == nil { 522 return 523 } 524 if mc.httpProtocolOptions == nil { 525 mc.httpProtocolOptions = &http.HttpProtocolOptions{} 526 } 527 options := mc.httpProtocolOptions 528 if options.UpstreamHttpProtocolOptions == nil { 529 options.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{ 530 ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{ 531 ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{ 532 Http2ProtocolOptions: http2ProtocolOptions(), 533 }, 534 }, 535 } 536 } 537 } 538 539 type mtlsContextType int 540 541 const ( 542 userSupplied mtlsContextType = iota 543 autoDetected 544 ) 545 546 func (cb *ClusterBuilder) setUseDownstreamProtocol(mc *clusterWrapper) { 547 if mc.httpProtocolOptions == nil { 548 mc.httpProtocolOptions = &http.HttpProtocolOptions{} 549 } 550 options := mc.httpProtocolOptions 551 options.UpstreamProtocolOptions = &http.HttpProtocolOptions_UseDownstreamProtocolConfig{ 552 UseDownstreamProtocolConfig: &http.HttpProtocolOptions_UseDownstreamHttpConfig{ 553 HttpProtocolOptions: &core.Http1ProtocolOptions{}, 554 Http2ProtocolOptions: http2ProtocolOptions(), 555 }, 556 } 557 } 558 559 func http2ProtocolOptions() *core.Http2ProtocolOptions { 560 return &core.Http2ProtocolOptions{} 561 } 562 563 // nolint 564 // revive:disable-next-line 565 func (cb *ClusterBuilder) isHttp2Cluster(mc *clusterWrapper) bool { 566 options := mc.httpProtocolOptions 567 return options != nil && options.GetExplicitHttpConfig().GetHttp2ProtocolOptions() != nil 568 } 569 570 // This is called after traffic policy applied 571 func (cb *ClusterBuilder) setUpstreamProtocol(cluster *clusterWrapper, port *model.Port) { 572 if port.Protocol.IsHTTP2() { 573 setH2Options(cluster) 574 return 575 } 576 577 // Add use_downstream_protocol for sidecar proxy only if protocol sniffing is enabled. Since 578 // protocol detection is disabled for gateway and use_downstream_protocol is used under protocol 579 // detection for cluster to select upstream connection protocol when the service port is unnamed. 580 // use_downstream_protocol should be disabled for gateway; while it sort of makes sense there, even 581 // without sniffing, a concern is that clients will do ALPN negotiation, and we always advertise 582 // h2. Clients would then connect with h2, while the upstream may not support it. This is not a 583 // concern for plaintext, but we do not have a way to distinguish https vs http here. If users of 584 // gateway want this behavior, they can configure UseClientProtocol explicitly. 585 if cb.sidecarProxy() && port.Protocol.IsUnsupported() { 586 // Use downstream protocol. If the incoming traffic use HTTP 1.1, the 587 // upstream cluster will use HTTP 1.1, if incoming traffic use HTTP2, 588 // the upstream cluster will use HTTP2. 589 cb.setUseDownstreamProtocol(cluster) 590 } 591 } 592 593 // normalizeClusters normalizes clusters to avoid duplicate clusters. This should be called 594 // at the end before adding the cluster to list of clusters. 595 func (cb *ClusterBuilder) normalizeClusters(clusters []*discovery.Resource) []*discovery.Resource { 596 // resolve cluster name conflicts. there can be duplicate cluster names if there are conflicting service definitions. 597 // for any clusters that share the same name the first cluster is kept and the others are discarded. 598 have := sets.String{} 599 out := make([]*discovery.Resource, 0, len(clusters)) 600 for _, c := range clusters { 601 if !have.InsertContains(c.Name) { 602 out = append(out, c) 603 } else { 604 cb.req.Push.AddMetric(model.DuplicatedClusters, c.Name, cb.proxyID, 605 fmt.Sprintf("Duplicate cluster %s found while pushing CDS", c.Name)) 606 } 607 } 608 return out 609 } 610 611 // getAllCachedSubsetClusters either fetches all cached clusters for a given key (there may be multiple due to subsets) 612 // and returns them along with allFound=True, or returns allFound=False indicating a cache miss. In either case, 613 // the cache tokens are returned to allow future writes to the cache. 614 // This code will only trigger a cache hit if all subset clusters are present. This simplifies the code a bit, 615 // as the non-subset and subset cluster generation are tightly coupled, in exchange for a likely trivial cache hit rate impact. 616 func (cb *ClusterBuilder) getAllCachedSubsetClusters(clusterKey clusterCache) ([]*discovery.Resource, bool) { 617 if !features.EnableCDSCaching { 618 return nil, false 619 } 620 destinationRule := CastDestinationRule(clusterKey.destinationRule.GetRule()) 621 res := make([]*discovery.Resource, 0, 1+len(destinationRule.GetSubsets())) 622 cachedCluster := cb.cache.Get(&clusterKey) 623 allFound := cachedCluster != nil 624 res = append(res, cachedCluster) 625 dir, _, host, port := model.ParseSubsetKey(clusterKey.clusterName) 626 for _, ss := range destinationRule.GetSubsets() { 627 clusterKey.clusterName = model.BuildSubsetKey(dir, ss.Name, host, port) 628 cachedCluster := cb.cache.Get(&clusterKey) 629 if cachedCluster == nil { 630 allFound = false 631 } 632 res = append(res, cachedCluster) 633 } 634 return res, allFound 635 } 636 637 // build does any final build operations needed, like marshaling etc. 638 func (mc *clusterWrapper) build() *cluster.Cluster { 639 if mc == nil { 640 return nil 641 } 642 // Marshall Http Protocol options if they exist. 643 if mc.httpProtocolOptions != nil { 644 // UpstreamProtocolOptions is required field in Envoy. If we have not set this option earlier 645 // we need to set it to default http protocol options. 646 if mc.httpProtocolOptions.UpstreamProtocolOptions == nil { 647 mc.httpProtocolOptions.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{ 648 ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{ 649 ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_HttpProtocolOptions{}, 650 }, 651 } 652 } 653 mc.cluster.TypedExtensionProtocolOptions = map[string]*anypb.Any{ 654 v3.HttpProtocolOptionsType: protoconv.MessageToAny(mc.httpProtocolOptions), 655 } 656 } 657 return mc.cluster 658 } 659 660 // CastDestinationRule returns the destination rule enclosed by the config, if not null. 661 // Otherwise, return nil. 662 func CastDestinationRule(config *config.Config) *networking.DestinationRule { 663 if config != nil { 664 return config.Spec.(*networking.DestinationRule) 665 } 666 667 return nil 668 } 669 670 // maybeApplyEdsConfig applies EdsClusterConfig on the passed in cluster if it is an EDS type of cluster. 671 func maybeApplyEdsConfig(c *cluster.Cluster) { 672 if c.GetType() != cluster.Cluster_EDS { 673 return 674 } 675 676 c.EdsClusterConfig = &cluster.Cluster_EdsClusterConfig{ 677 ServiceName: c.Name, 678 EdsConfig: &core.ConfigSource{ 679 ConfigSourceSpecifier: &core.ConfigSource_Ads{ 680 Ads: &core.AggregatedConfigSource{}, 681 }, 682 InitialFetchTimeout: durationpb.New(0), 683 ResourceApiVersion: core.ApiVersion_V3, 684 }, 685 } 686 } 687 688 // buildExternalSDSCluster generates a cluster that acts as external SDS server 689 func (cb *ClusterBuilder) buildExternalSDSCluster(addr string) *cluster.Cluster { 690 ep := &endpoint.LbEndpoint{ 691 HostIdentifier: &endpoint.LbEndpoint_Endpoint{ 692 Endpoint: &endpoint.Endpoint{ 693 Address: &core.Address{ 694 Address: &core.Address_Pipe{ 695 Pipe: &core.Pipe{ 696 Path: addr, 697 }, 698 }, 699 }, 700 }, 701 }, 702 } 703 options := &http.HttpProtocolOptions{} 704 options.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{ 705 ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{ 706 ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{ 707 Http2ProtocolOptions: http2ProtocolOptions(), 708 }, 709 }, 710 } 711 c := &cluster.Cluster{ 712 Name: security.SDSExternalClusterName, 713 ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_STATIC}, 714 ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration), 715 LoadAssignment: &endpoint.ClusterLoadAssignment{ 716 ClusterName: security.SDSExternalClusterName, 717 Endpoints: []*endpoint.LocalityLbEndpoints{ 718 { 719 LbEndpoints: []*endpoint.LbEndpoint{ep}, 720 }, 721 }, 722 }, 723 TypedExtensionProtocolOptions: map[string]*anypb.Any{ 724 v3.HttpProtocolOptionsType: protoconv.MessageToAny(options), 725 }, 726 } 727 return c 728 } 729 730 func addTelemetryMetadata(cluster *cluster.Cluster, 731 port *model.Port, service *model.Service, 732 direction model.TrafficDirection, inboundServices []model.ServiceTarget, 733 ) { 734 if !features.EnableTelemetryLabel { 735 return 736 } 737 if cluster == nil { 738 return 739 } 740 if direction == model.TrafficDirectionInbound && (len(inboundServices) == 0 || port == nil) { 741 // At inbound, port and local service instance has to be provided 742 return 743 } 744 if direction == model.TrafficDirectionOutbound && service == nil { 745 // At outbound, the service corresponding to the cluster has to be provided. 746 return 747 } 748 749 im := getOrCreateIstioMetadata(cluster) 750 751 // Add services field into istio metadata 752 im.Fields["services"] = &structpb.Value{ 753 Kind: &structpb.Value_ListValue{ 754 ListValue: &structpb.ListValue{ 755 Values: []*structpb.Value{}, 756 }, 757 }, 758 } 759 760 svcMetaList := im.Fields["services"].GetListValue() 761 762 // Add service related metadata. This will be consumed by telemetry v2 filter for metric labels. 763 if direction == model.TrafficDirectionInbound { 764 // For inbound cluster, add all services on the cluster port 765 have := sets.New[host.Name]() 766 for _, svc := range inboundServices { 767 if svc.Port.Port != port.Port { 768 // If the service port is different from the port of the cluster that is being built, 769 // skip adding telemetry metadata for the service to the cluster. 770 continue 771 } 772 if have.Contains(svc.Service.Hostname) { 773 // Skip adding metadata for instance with the same host name. 774 // This could happen when a service has multiple IPs. 775 continue 776 } 777 svcMetaList.Values = append(svcMetaList.Values, buildServiceMetadata(svc.Service)) 778 have.Insert(svc.Service.Hostname) 779 } 780 } else if direction == model.TrafficDirectionOutbound { 781 // For outbound cluster, add telemetry metadata based on the service that the cluster is built for. 782 svcMetaList.Values = append(svcMetaList.Values, buildServiceMetadata(service)) 783 } 784 }