k8s.io/kubernetes@v1.29.3/pkg/proxy/winkernel/proxier.go (about) 1 //go:build windows 2 // +build windows 3 4 /* 5 Copyright 2017 The Kubernetes Authors. 6 7 Licensed under the Apache License, Version 2.0 (the "License"); 8 you may not use this file except in compliance with the License. 9 You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 See the License for the specific language governing permissions and 17 limitations under the License. 18 */ 19 20 package winkernel 21 22 import ( 23 "fmt" 24 "net" 25 "os" 26 "strconv" 27 "strings" 28 "sync" 29 "sync/atomic" 30 "time" 31 32 "github.com/Microsoft/hcsshim" 33 "github.com/Microsoft/hcsshim/hcn" 34 v1 "k8s.io/api/core/v1" 35 discovery "k8s.io/api/discovery/v1" 36 "k8s.io/apimachinery/pkg/util/intstr" 37 apiutil "k8s.io/apimachinery/pkg/util/net" 38 "k8s.io/apimachinery/pkg/util/sets" 39 "k8s.io/apimachinery/pkg/util/wait" 40 utilfeature "k8s.io/apiserver/pkg/util/feature" 41 "k8s.io/client-go/tools/events" 42 "k8s.io/klog/v2" 43 kubefeatures "k8s.io/kubernetes/pkg/features" 44 "k8s.io/kubernetes/pkg/proxy" 45 "k8s.io/kubernetes/pkg/proxy/apis/config" 46 proxyconfig "k8s.io/kubernetes/pkg/proxy/config" 47 "k8s.io/kubernetes/pkg/proxy/healthcheck" 48 "k8s.io/kubernetes/pkg/proxy/metaproxier" 49 "k8s.io/kubernetes/pkg/proxy/metrics" 50 proxyutil "k8s.io/kubernetes/pkg/proxy/util" 51 "k8s.io/kubernetes/pkg/util/async" 52 netutils "k8s.io/utils/net" 53 ) 54 55 // KernelCompatTester tests whether the required kernel capabilities are 56 // present to run the windows kernel proxier. 57 type KernelCompatTester interface { 58 IsCompatible() error 59 } 60 61 // CanUseWinKernelProxier returns true if we should use the Kernel Proxier 62 // instead of the "classic" userspace Proxier. This is determined by checking 63 // the windows kernel version and for the existence of kernel features. 64 func CanUseWinKernelProxier(kcompat KernelCompatTester) (bool, error) { 65 // Check that the kernel supports what we need. 66 if err := kcompat.IsCompatible(); err != nil { 67 return false, err 68 } 69 return true, nil 70 } 71 72 type WindowsKernelCompatTester struct{} 73 74 // IsCompatible returns true if winkernel can support this mode of proxy 75 func (lkct WindowsKernelCompatTester) IsCompatible() error { 76 _, err := hcsshim.HNSListPolicyListRequest() 77 if err != nil { 78 return fmt.Errorf("Windows kernel is not compatible for Kernel mode") 79 } 80 return nil 81 } 82 83 type externalIPInfo struct { 84 ip string 85 hnsID string 86 } 87 88 type loadBalancerIngressInfo struct { 89 ip string 90 hnsID string 91 healthCheckHnsID string 92 } 93 94 type loadBalancerInfo struct { 95 hnsID string 96 } 97 98 type loadBalancerIdentifier struct { 99 protocol uint16 100 internalPort uint16 101 externalPort uint16 102 vip string 103 endpointsHash [20]byte 104 } 105 106 type loadBalancerFlags struct { 107 isILB bool 108 isDSR bool 109 isVipExternalIP bool 110 localRoutedVIP bool 111 useMUX bool 112 preserveDIP bool 113 sessionAffinity bool 114 isIPv6 bool 115 } 116 117 // internal struct for string service information 118 type serviceInfo struct { 119 *proxy.BaseServicePortInfo 120 targetPort int 121 externalIPs []*externalIPInfo 122 loadBalancerIngressIPs []*loadBalancerIngressInfo 123 hnsID string 124 nodePorthnsID string 125 policyApplied bool 126 remoteEndpoint *endpointInfo 127 hns HostNetworkService 128 preserveDIP bool 129 localTrafficDSR bool 130 internalTrafficLocal bool 131 winProxyOptimization bool 132 } 133 134 type hnsNetworkInfo struct { 135 name string 136 id string 137 networkType string 138 remoteSubnets []*remoteSubnetInfo 139 } 140 141 type remoteSubnetInfo struct { 142 destinationPrefix string 143 isolationID uint16 144 providerAddress string 145 drMacAddress string 146 } 147 148 const ( 149 NETWORK_TYPE_OVERLAY = "overlay" 150 // MAX_COUNT_STALE_LOADBALANCERS is the maximum number of stale loadbalancers which cleanedup in single syncproxyrules. 151 // If there are more stale loadbalancers to clean, it will go to next iteration of syncproxyrules. 152 MAX_COUNT_STALE_LOADBALANCERS = 20 153 ) 154 155 func newHostNetworkService(hcnImpl HcnService) (HostNetworkService, hcn.SupportedFeatures) { 156 var h HostNetworkService 157 supportedFeatures := hcnImpl.GetSupportedFeatures() 158 if supportedFeatures.Api.V2 { 159 h = hns{ 160 hcn: hcnImpl, 161 } 162 } else { 163 panic("Windows HNS Api V2 required. This version of windows does not support API V2") 164 } 165 return h, supportedFeatures 166 } 167 168 // logFormattedEndpoints will log all endpoints and its states which are taking part in endpointmap change. 169 // This mostly for debugging purpose and verbosity is set to 5. 170 func logFormattedEndpoints(logMsg string, logLevel klog.Level, svcPortName proxy.ServicePortName, eps []proxy.Endpoint) { 171 if klog.V(logLevel).Enabled() { 172 var epInfo string 173 for _, v := range eps { 174 epInfo = epInfo + fmt.Sprintf("\n %s={Ready:%v,Serving:%v,Terminating:%v,IsRemote:%v}", v.String(), v.IsReady(), v.IsServing(), v.IsTerminating(), !v.IsLocal()) 175 } 176 klog.V(logLevel).InfoS(logMsg, "svcPortName", svcPortName, "endpoints", epInfo) 177 } 178 } 179 180 // This will cleanup stale load balancers which are pending delete 181 // in last iteration. This function will act like a self healing of stale 182 // loadbalancer entries. 183 func (proxier *Proxier) cleanupStaleLoadbalancers() { 184 i := 0 185 countStaleLB := len(proxier.mapStaleLoadbalancers) 186 if countStaleLB == 0 { 187 return 188 } 189 klog.V(3).InfoS("Cleanup of stale loadbalancers triggered", "LB Count", countStaleLB) 190 for lbID := range proxier.mapStaleLoadbalancers { 191 i++ 192 if err := proxier.hns.deleteLoadBalancer(lbID); err == nil { 193 delete(proxier.mapStaleLoadbalancers, lbID) 194 } 195 if i == MAX_COUNT_STALE_LOADBALANCERS { 196 // The remaining stale loadbalancers will be cleaned up in next iteration 197 break 198 } 199 } 200 countStaleLB = len(proxier.mapStaleLoadbalancers) 201 if countStaleLB > 0 { 202 klog.V(3).InfoS("Stale loadbalancers still remaining", "LB Count", countStaleLB, "stale_lb_ids", proxier.mapStaleLoadbalancers) 203 } 204 } 205 206 func getNetworkName(hnsNetworkName string) (string, error) { 207 if len(hnsNetworkName) == 0 { 208 klog.V(3).InfoS("Flag --network-name not set, checking environment variable") 209 hnsNetworkName = os.Getenv("KUBE_NETWORK") 210 if len(hnsNetworkName) == 0 { 211 return "", fmt.Errorf("Environment variable KUBE_NETWORK and network-flag not initialized") 212 } 213 } 214 return hnsNetworkName, nil 215 } 216 217 func getNetworkInfo(hns HostNetworkService, hnsNetworkName string) (*hnsNetworkInfo, error) { 218 hnsNetworkInfo, err := hns.getNetworkByName(hnsNetworkName) 219 for err != nil { 220 klog.ErrorS(err, "Unable to find HNS Network specified, please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) 221 time.Sleep(1 * time.Second) 222 hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) 223 } 224 return hnsNetworkInfo, err 225 } 226 227 func isOverlay(hnsNetworkInfo *hnsNetworkInfo) bool { 228 return strings.EqualFold(hnsNetworkInfo.networkType, NETWORK_TYPE_OVERLAY) 229 } 230 231 // StackCompatTester tests whether the required kernel and network are dualstack capable 232 type StackCompatTester interface { 233 DualStackCompatible(networkName string) bool 234 } 235 236 type DualStackCompatTester struct{} 237 238 func (t DualStackCompatTester) DualStackCompatible(networkName string) bool { 239 hcnImpl := newHcnImpl() 240 // First tag of hcsshim that has a proper check for dual stack support is v0.8.22 due to a bug. 241 if err := hcnImpl.Ipv6DualStackSupported(); err != nil { 242 // Hcn *can* fail the query to grab the version of hcn itself (which this call will do internally before parsing 243 // to see if dual stack is supported), but the only time this can happen, at least that can be discerned, is if the host 244 // is pre-1803 and hcn didn't exist. hcsshim should truthfully return a known error if this happened that we can 245 // check against, and the case where 'err != this known error' would be the 'this feature isn't supported' case, as is being 246 // used here. For now, seeming as how nothing before ws2019 (1809) is listed as supported for k8s we can pretty much assume 247 // any error here isn't because the query failed, it's just that dualstack simply isn't supported on the host. With all 248 // that in mind, just log as info and not error to let the user know we're falling back. 249 klog.InfoS("This version of Windows does not support dual-stack, falling back to single-stack", "err", err.Error()) 250 return false 251 } 252 253 // check if network is using overlay 254 hns, _ := newHostNetworkService(hcnImpl) 255 networkName, err := getNetworkName(networkName) 256 if err != nil { 257 klog.ErrorS(err, "Unable to determine dual-stack status, falling back to single-stack") 258 return false 259 } 260 networkInfo, err := getNetworkInfo(hns, networkName) 261 if err != nil { 262 klog.ErrorS(err, "Unable to determine dual-stack status, falling back to single-stack") 263 return false 264 } 265 266 if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) && isOverlay(networkInfo) { 267 // Overlay (VXLAN) networks on Windows do not support dual-stack networking today 268 klog.InfoS("Winoverlay does not support dual-stack, falling back to single-stack") 269 return false 270 } 271 272 return true 273 } 274 275 // internal struct for endpoints information 276 type endpointInfo struct { 277 ip string 278 port uint16 279 isLocal bool 280 macAddress string 281 hnsID string 282 refCount *uint16 283 providerAddress string 284 hns HostNetworkService 285 286 // conditions 287 ready bool 288 serving bool 289 terminating bool 290 } 291 292 // String is part of proxy.Endpoint interface. 293 func (info *endpointInfo) String() string { 294 return net.JoinHostPort(info.ip, strconv.Itoa(int(info.port))) 295 } 296 297 // IsLocal is part of proxy.Endpoint interface. 298 func (info *endpointInfo) IsLocal() bool { 299 return info.isLocal 300 } 301 302 // IsReady returns true if an endpoint is ready and not terminating. 303 func (info *endpointInfo) IsReady() bool { 304 return info.ready 305 } 306 307 // IsServing returns true if an endpoint is ready, regardless of it's terminating state. 308 func (info *endpointInfo) IsServing() bool { 309 return info.serving 310 } 311 312 // IsTerminating returns true if an endpoint is terminating. 313 func (info *endpointInfo) IsTerminating() bool { 314 return info.terminating 315 } 316 317 // ZoneHints returns the zone hints for the endpoint. 318 func (info *endpointInfo) ZoneHints() sets.Set[string] { 319 return sets.Set[string]{} 320 } 321 322 // IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. 323 func (info *endpointInfo) IP() string { 324 return info.ip 325 } 326 327 // Port returns just the Port part of the endpoint. 328 func (info *endpointInfo) Port() int { 329 return int(info.port) 330 } 331 332 // Uses mac prefix and IPv4 address to return a mac address 333 // This ensures mac addresses are unique for proper load balancing 334 // There is a possibility of MAC collisions but this Mac address is used for remote endpoints only 335 // and not sent on the wire. 336 func conjureMac(macPrefix string, ip net.IP) string { 337 if ip4 := ip.To4(); ip4 != nil { 338 a, b, c, d := ip4[0], ip4[1], ip4[2], ip4[3] 339 return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d) 340 } else if ip6 := ip.To16(); ip6 != nil { 341 a, b, c, d := ip6[15], ip6[14], ip6[13], ip6[12] 342 return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d) 343 } 344 return "02-11-22-33-44-55" 345 } 346 347 func (proxier *Proxier) endpointsMapChange(oldEndpointsMap, newEndpointsMap proxy.EndpointsMap) { 348 // This will optimize remote endpoint and loadbalancer deletion based on the annotation 349 var svcPortMap = make(map[proxy.ServicePortName]bool) 350 var logLevel klog.Level = 5 351 for svcPortName, eps := range oldEndpointsMap { 352 logFormattedEndpoints("endpointsMapChange oldEndpointsMap", logLevel, svcPortName, eps) 353 svcPortMap[svcPortName] = true 354 proxier.onEndpointsMapChange(&svcPortName, false) 355 } 356 357 for svcPortName, eps := range newEndpointsMap { 358 logFormattedEndpoints("endpointsMapChange newEndpointsMap", logLevel, svcPortName, eps) 359 // redundantCleanup true means cleanup is called second time on the same svcPort 360 redundantCleanup := svcPortMap[svcPortName] 361 proxier.onEndpointsMapChange(&svcPortName, redundantCleanup) 362 } 363 } 364 365 func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName, redundantCleanup bool) { 366 367 svc, exists := proxier.svcPortMap[*svcPortName] 368 369 if exists { 370 svcInfo, ok := svc.(*serviceInfo) 371 372 if !ok { 373 klog.ErrorS(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName) 374 return 375 } 376 377 if svcInfo.winProxyOptimization && redundantCleanup { 378 // This is a second cleanup call. 379 // Second cleanup on the same svcPort will be ignored if the 380 // winProxyOptimization is Enabled 381 return 382 } 383 384 klog.V(3).InfoS("Endpoints are modified. Service is stale", "servicePortName", svcPortName) 385 svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, true) 386 } else { 387 // If no service exists, just cleanup the remote endpoints 388 klog.V(3).InfoS("Endpoints are orphaned, cleaning up") 389 // Cleanup Endpoints references 390 epInfos, exists := proxier.endpointsMap[*svcPortName] 391 392 if exists { 393 // Cleanup Endpoints references 394 for _, ep := range epInfos { 395 epInfo, ok := ep.(*endpointInfo) 396 397 if ok { 398 epInfo.Cleanup() 399 } 400 401 } 402 } 403 } 404 } 405 406 func (proxier *Proxier) serviceMapChange(previous, current proxy.ServicePortMap) { 407 for svcPortName := range current { 408 proxier.onServiceMapChange(&svcPortName) 409 } 410 411 for svcPortName := range previous { 412 if _, ok := current[svcPortName]; ok { 413 continue 414 } 415 proxier.onServiceMapChange(&svcPortName) 416 } 417 } 418 419 func (proxier *Proxier) onServiceMapChange(svcPortName *proxy.ServicePortName) { 420 421 svc, exists := proxier.svcPortMap[*svcPortName] 422 423 if exists { 424 svcInfo, ok := svc.(*serviceInfo) 425 426 if !ok { 427 klog.ErrorS(nil, "Failed to cast serviceInfo", "servicePortName", svcPortName) 428 return 429 } 430 431 klog.V(3).InfoS("Updating existing service port", "servicePortName", svcPortName, "clusterIP", svcInfo.ClusterIP(), "port", svcInfo.Port(), "protocol", svcInfo.Protocol()) 432 svcInfo.cleanupAllPolicies(proxier.endpointsMap[*svcPortName], proxier.mapStaleLoadbalancers, false) 433 } 434 } 435 436 // returns a new proxy.Endpoint which abstracts a endpointInfo 437 func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, _ *proxy.ServicePortName) proxy.Endpoint { 438 439 info := &endpointInfo{ 440 ip: baseInfo.IP(), 441 port: uint16(baseInfo.Port()), 442 isLocal: baseInfo.IsLocal(), 443 macAddress: conjureMac("02-11", netutils.ParseIPSloppy(baseInfo.IP())), 444 refCount: new(uint16), 445 hnsID: "", 446 hns: proxier.hns, 447 448 ready: baseInfo.IsReady(), 449 serving: baseInfo.IsServing(), 450 terminating: baseInfo.IsTerminating(), 451 } 452 453 return info 454 } 455 456 func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, providerAddress string) (*endpointInfo, error) { 457 hnsEndpoint := &endpointInfo{ 458 ip: ip, 459 isLocal: true, 460 macAddress: mac, 461 providerAddress: providerAddress, 462 463 ready: true, 464 serving: true, 465 terminating: false, 466 } 467 ep, err := hns.createEndpoint(hnsEndpoint, network) 468 return ep, err 469 } 470 471 func (ep *endpointInfo) DecrementRefCount() { 472 klog.V(3).InfoS("Decrementing Endpoint RefCount", "endpointInfo", ep) 473 if !ep.IsLocal() && ep.refCount != nil && *ep.refCount > 0 { 474 *ep.refCount-- 475 } 476 } 477 478 func (ep *endpointInfo) Cleanup() { 479 klog.V(3).InfoS("Endpoint cleanup", "endpointInfo", ep) 480 if !ep.IsLocal() && ep.refCount != nil { 481 *ep.refCount-- 482 483 // Remove the remote hns endpoint, if no service is referring it 484 // Never delete a Local Endpoint. Local Endpoints are already created by other entities. 485 // Remove only remote endpoints created by this service 486 if *ep.refCount <= 0 && !ep.IsLocal() { 487 klog.V(4).InfoS("Removing endpoints, since no one is referencing it", "endpoint", ep) 488 err := ep.hns.deleteEndpoint(ep.hnsID) 489 if err == nil { 490 ep.hnsID = "" 491 } else { 492 klog.ErrorS(err, "Endpoint deletion failed", "ip", ep.IP()) 493 } 494 } 495 496 ep.refCount = nil 497 } 498 } 499 500 func (refCountMap endPointsReferenceCountMap) getRefCount(hnsID string) *uint16 { 501 refCount, exists := refCountMap[hnsID] 502 if !exists { 503 refCountMap[hnsID] = new(uint16) 504 refCount = refCountMap[hnsID] 505 } 506 return refCount 507 } 508 509 // returns a new proxy.ServicePort which abstracts a serviceInfo 510 func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *proxy.BaseServicePortInfo) proxy.ServicePort { 511 info := &serviceInfo{BaseServicePortInfo: bsvcPortInfo} 512 preserveDIP := service.Annotations["preserve-destination"] == "true" 513 // Annotation introduced to enable optimized loadbalancing 514 winProxyOptimization := !(strings.ToUpper(service.Annotations["winProxyOptimization"]) == "DISABLED") 515 localTrafficDSR := service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal 516 var internalTrafficLocal bool 517 if service.Spec.InternalTrafficPolicy != nil { 518 internalTrafficLocal = *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal 519 } 520 hcnImpl := proxier.hcn 521 err := hcnImpl.DsrSupported() 522 if err != nil { 523 preserveDIP = false 524 localTrafficDSR = false 525 } 526 // targetPort is zero if it is specified as a name in port.TargetPort. 527 // Its real value would be got later from endpoints. 528 targetPort := 0 529 if port.TargetPort.Type == intstr.Int { 530 targetPort = port.TargetPort.IntValue() 531 } 532 533 info.preserveDIP = preserveDIP 534 info.targetPort = targetPort 535 info.hns = proxier.hns 536 info.localTrafficDSR = localTrafficDSR 537 info.internalTrafficLocal = internalTrafficLocal 538 info.winProxyOptimization = winProxyOptimization 539 klog.V(3).InfoS("Flags enabled for service", "service", service.Name, "localTrafficDSR", localTrafficDSR, "internalTrafficLocal", internalTrafficLocal, "preserveDIP", preserveDIP, "winProxyOptimization", winProxyOptimization) 540 541 for _, eip := range service.Spec.ExternalIPs { 542 info.externalIPs = append(info.externalIPs, &externalIPInfo{ip: eip}) 543 } 544 545 for _, ingress := range service.Status.LoadBalancer.Ingress { 546 if netutils.ParseIPSloppy(ingress.IP) != nil { 547 info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP}) 548 } 549 } 550 return info 551 } 552 553 func (network hnsNetworkInfo) findRemoteSubnetProviderAddress(ip string) string { 554 var providerAddress string 555 for _, rs := range network.remoteSubnets { 556 _, ipNet, err := netutils.ParseCIDRSloppy(rs.destinationPrefix) 557 if err != nil { 558 klog.ErrorS(err, "Failed to parse CIDR") 559 } 560 if ipNet.Contains(netutils.ParseIPSloppy(ip)) { 561 providerAddress = rs.providerAddress 562 } 563 if ip == rs.providerAddress { 564 providerAddress = rs.providerAddress 565 } 566 } 567 568 return providerAddress 569 } 570 571 type endPointsReferenceCountMap map[string]*uint16 572 573 // Proxier is an hns based proxy for connections between a localhost:lport 574 // and services that provide the actual backends. 575 type Proxier struct { 576 // ipFamily defines the IP family which this proxier is tracking. 577 ipFamily v1.IPFamily 578 // TODO(imroc): implement node handler for winkernel proxier. 579 proxyconfig.NoopNodeHandler 580 581 // endpointsChanges and serviceChanges contains all changes to endpoints and 582 // services that happened since policies were synced. For a single object, 583 // changes are accumulated, i.e. previous is state from before all of them, 584 // current is state after applying all of those. 585 endpointsChanges *proxy.EndpointsChangeTracker 586 serviceChanges *proxy.ServiceChangeTracker 587 endPointsRefCount endPointsReferenceCountMap 588 mu sync.Mutex // protects the following fields 589 svcPortMap proxy.ServicePortMap 590 endpointsMap proxy.EndpointsMap 591 // endpointSlicesSynced and servicesSynced are set to true when corresponding 592 // objects are synced after startup. This is used to avoid updating hns policies 593 // with some partial data after kube-proxy restart. 594 endpointSlicesSynced bool 595 servicesSynced bool 596 initialized int32 597 syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules 598 // These are effectively const and do not need the mutex to be held. 599 clusterCIDR string 600 hostname string 601 nodeIP net.IP 602 recorder events.EventRecorder 603 604 serviceHealthServer healthcheck.ServiceHealthServer 605 healthzServer *healthcheck.ProxierHealthServer 606 607 hns HostNetworkService 608 hcn HcnService 609 network hnsNetworkInfo 610 sourceVip string 611 hostMac string 612 isDSR bool 613 supportedFeatures hcn.SupportedFeatures 614 healthzPort int 615 616 forwardHealthCheckVip bool 617 rootHnsEndpointName string 618 mapStaleLoadbalancers map[string]bool // This maintains entries of stale load balancers which are pending delete in last iteration 619 } 620 621 type localPort struct { 622 desc string 623 ip string 624 port int 625 protocol string 626 } 627 628 func (lp *localPort) String() string { 629 return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol) 630 } 631 632 func Enum(p v1.Protocol) uint16 { 633 if p == v1.ProtocolTCP { 634 return 6 635 } 636 if p == v1.ProtocolUDP { 637 return 17 638 } 639 if p == v1.ProtocolSCTP { 640 return 132 641 } 642 return 0 643 } 644 645 type closeable interface { 646 Close() error 647 } 648 649 // Proxier implements proxy.Provider 650 var _ proxy.Provider = &Proxier{} 651 652 // NewProxier returns a new Proxier 653 func NewProxier( 654 ipFamily v1.IPFamily, 655 syncPeriod time.Duration, 656 minSyncPeriod time.Duration, 657 clusterCIDR string, 658 hostname string, 659 nodeIP net.IP, 660 recorder events.EventRecorder, 661 healthzServer *healthcheck.ProxierHealthServer, 662 config config.KubeProxyWinkernelConfiguration, 663 healthzPort int, 664 ) (*Proxier, error) { 665 if nodeIP == nil { 666 klog.InfoS("Invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") 667 nodeIP = netutils.ParseIPSloppy("127.0.0.1") 668 } 669 670 if len(clusterCIDR) == 0 { 671 klog.InfoS("ClusterCIDR not specified, unable to distinguish between internal and external traffic") 672 } 673 674 // windows listens to all node addresses 675 nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil) 676 serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) 677 678 hcnImpl := newHcnImpl() 679 hns, supportedFeatures := newHostNetworkService(hcnImpl) 680 hnsNetworkName, err := getNetworkName(config.NetworkName) 681 if err != nil { 682 return nil, err 683 } 684 685 klog.V(3).InfoS("Cleaning up old HNS policy lists") 686 hcnImpl.DeleteAllHnsLoadBalancerPolicy() 687 688 // Get HNS network information 689 hnsNetworkInfo, err := getNetworkInfo(hns, hnsNetworkName) 690 if err != nil { 691 return nil, err 692 } 693 694 // Network could have been detected before Remote Subnet Routes are applied or ManagementIP is updated 695 // Sleep and update the network to include new information 696 if isOverlay(hnsNetworkInfo) { 697 time.Sleep(10 * time.Second) 698 hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) 699 if err != nil { 700 return nil, fmt.Errorf("could not find HNS network %s", hnsNetworkName) 701 } 702 } 703 704 klog.V(1).InfoS("Hns Network loaded", "hnsNetworkInfo", hnsNetworkInfo) 705 isDSR := config.EnableDSR 706 if isDSR && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinDSR) { 707 return nil, fmt.Errorf("WinDSR feature gate not enabled") 708 } 709 710 err = hcnImpl.DsrSupported() 711 if isDSR && err != nil { 712 return nil, err 713 } 714 715 var sourceVip string 716 var hostMac string 717 if isOverlay(hnsNetworkInfo) { 718 if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) { 719 return nil, fmt.Errorf("WinOverlay feature gate not enabled") 720 } 721 err = hcn.RemoteSubnetSupported() 722 if err != nil { 723 return nil, err 724 } 725 sourceVip = config.SourceVip 726 if len(sourceVip) == 0 { 727 return nil, fmt.Errorf("source-vip flag not set") 728 } 729 730 if nodeIP.IsUnspecified() { 731 // attempt to get the correct ip address 732 klog.V(2).InfoS("Node ip was unspecified, attempting to find node ip") 733 nodeIP, err = apiutil.ResolveBindAddress(nodeIP) 734 if err != nil { 735 klog.InfoS("Failed to find an ip. You may need set the --bind-address flag", "err", err) 736 } 737 } 738 739 interfaces, _ := net.Interfaces() //TODO create interfaces 740 for _, inter := range interfaces { 741 addresses, _ := inter.Addrs() 742 for _, addr := range addresses { 743 addrIP, _, _ := netutils.ParseCIDRSloppy(addr.String()) 744 if addrIP.String() == nodeIP.String() { 745 klog.V(2).InfoS("Record Host MAC address", "addr", inter.HardwareAddr) 746 hostMac = inter.HardwareAddr.String() 747 } 748 } 749 } 750 if len(hostMac) == 0 { 751 return nil, fmt.Errorf("could not find host mac address for %s", nodeIP) 752 } 753 } 754 755 proxier := &Proxier{ 756 ipFamily: ipFamily, 757 endPointsRefCount: make(endPointsReferenceCountMap), 758 svcPortMap: make(proxy.ServicePortMap), 759 endpointsMap: make(proxy.EndpointsMap), 760 clusterCIDR: clusterCIDR, 761 hostname: hostname, 762 nodeIP: nodeIP, 763 recorder: recorder, 764 serviceHealthServer: serviceHealthServer, 765 healthzServer: healthzServer, 766 hns: hns, 767 hcn: hcnImpl, 768 network: *hnsNetworkInfo, 769 sourceVip: sourceVip, 770 hostMac: hostMac, 771 isDSR: isDSR, 772 supportedFeatures: supportedFeatures, 773 healthzPort: healthzPort, 774 rootHnsEndpointName: config.RootHnsEndpointName, 775 forwardHealthCheckVip: config.ForwardHealthCheckVip, 776 mapStaleLoadbalancers: make(map[string]bool), 777 } 778 779 serviceChanges := proxy.NewServiceChangeTracker(proxier.newServiceInfo, ipFamily, recorder, proxier.serviceMapChange) 780 endPointChangeTracker := proxy.NewEndpointsChangeTracker(hostname, proxier.newEndpointInfo, ipFamily, recorder, proxier.endpointsMapChange) 781 proxier.endpointsChanges = endPointChangeTracker 782 proxier.serviceChanges = serviceChanges 783 784 burstSyncs := 2 785 klog.V(3).InfoS("Record sync param", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", burstSyncs) 786 proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) 787 return proxier, nil 788 } 789 790 func NewDualStackProxier( 791 syncPeriod time.Duration, 792 minSyncPeriod time.Duration, 793 clusterCIDR string, 794 hostname string, 795 nodeIPs map[v1.IPFamily]net.IP, 796 recorder events.EventRecorder, 797 healthzServer *healthcheck.ProxierHealthServer, 798 config config.KubeProxyWinkernelConfiguration, 799 healthzPort int, 800 ) (proxy.Provider, error) { 801 802 // Create an ipv4 instance of the single-stack proxier 803 ipv4Proxier, err := NewProxier(v1.IPv4Protocol, syncPeriod, minSyncPeriod, 804 clusterCIDR, hostname, nodeIPs[v1.IPv4Protocol], recorder, healthzServer, 805 config, healthzPort) 806 807 if err != nil { 808 return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIPs[v1.IPv4Protocol]) 809 } 810 811 ipv6Proxier, err := NewProxier(v1.IPv6Protocol, syncPeriod, minSyncPeriod, 812 clusterCIDR, hostname, nodeIPs[v1.IPv6Protocol], recorder, healthzServer, 813 config, healthzPort) 814 if err != nil { 815 return nil, fmt.Errorf("unable to create ipv6 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIPs[v1.IPv6Protocol]) 816 } 817 818 // Return a meta-proxier that dispatch calls between the two 819 // single-stack proxier instances 820 return metaproxier.NewMetaProxier(ipv4Proxier, ipv6Proxier), nil 821 } 822 823 // CleanupLeftovers removes all hns rules created by the Proxier 824 // It returns true if an error was encountered. Errors are logged. 825 func CleanupLeftovers() (encounteredError bool) { 826 // Delete all Hns Load Balancer Policies 827 newHcnImpl().DeleteAllHnsLoadBalancerPolicy() 828 // TODO 829 // Delete all Hns Remote endpoints 830 831 return encounteredError 832 } 833 834 func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint, mapStaleLoadbalancers map[string]bool, isEndpointChange bool) { 835 klog.V(3).InfoS("Service cleanup", "serviceInfo", svcInfo) 836 // if it's an endpoint change and winProxyOptimization annotation enable, skip lb deletion and remoteEndpoint deletion 837 winProxyOptimization := isEndpointChange && svcInfo.winProxyOptimization 838 if winProxyOptimization { 839 klog.V(3).InfoS("Skipped loadbalancer deletion.", "hnsID", svcInfo.hnsID, "nodePorthnsID", svcInfo.nodePorthnsID, "winProxyOptimization", svcInfo.winProxyOptimization, "isEndpointChange", isEndpointChange) 840 } else { 841 // Skip the svcInfo.policyApplied check to remove all the policies 842 svcInfo.deleteLoadBalancerPolicy(mapStaleLoadbalancers) 843 } 844 // Cleanup Endpoints references 845 for _, ep := range endpoints { 846 epInfo, ok := ep.(*endpointInfo) 847 if ok { 848 if winProxyOptimization { 849 epInfo.DecrementRefCount() 850 } else { 851 epInfo.Cleanup() 852 } 853 } 854 } 855 if svcInfo.remoteEndpoint != nil { 856 svcInfo.remoteEndpoint.Cleanup() 857 } 858 859 svcInfo.policyApplied = false 860 } 861 862 func (svcInfo *serviceInfo) deleteLoadBalancerPolicy(mapStaleLoadbalancer map[string]bool) { 863 // Remove the Hns Policy corresponding to this service 864 hns := svcInfo.hns 865 if err := hns.deleteLoadBalancer(svcInfo.hnsID); err != nil { 866 mapStaleLoadbalancer[svcInfo.hnsID] = true 867 klog.V(1).ErrorS(err, "Error deleting Hns loadbalancer policy resource.", "hnsID", svcInfo.hnsID, "ClusterIP", svcInfo.ClusterIP()) 868 } else { 869 // On successful delete, remove hnsId 870 svcInfo.hnsID = "" 871 } 872 873 if err := hns.deleteLoadBalancer(svcInfo.nodePorthnsID); err != nil { 874 mapStaleLoadbalancer[svcInfo.nodePorthnsID] = true 875 klog.V(1).ErrorS(err, "Error deleting Hns NodePort policy resource.", "hnsID", svcInfo.nodePorthnsID, "NodePort", svcInfo.NodePort()) 876 } else { 877 // On successful delete, remove hnsId 878 svcInfo.nodePorthnsID = "" 879 } 880 881 for _, externalIP := range svcInfo.externalIPs { 882 mapStaleLoadbalancer[externalIP.hnsID] = true 883 if err := hns.deleteLoadBalancer(externalIP.hnsID); err != nil { 884 klog.V(1).ErrorS(err, "Error deleting Hns ExternalIP policy resource.", "hnsID", externalIP.hnsID, "IP", externalIP.ip) 885 } else { 886 // On successful delete, remove hnsId 887 externalIP.hnsID = "" 888 } 889 } 890 for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { 891 klog.V(3).InfoS("Loadbalancer Hns LoadBalancer delete triggered for loadBalancer Ingress resources in cleanup", "lbIngressIP", lbIngressIP) 892 if err := hns.deleteLoadBalancer(lbIngressIP.hnsID); err != nil { 893 mapStaleLoadbalancer[lbIngressIP.hnsID] = true 894 klog.V(1).ErrorS(err, "Error deleting Hns IngressIP policy resource.", "hnsID", lbIngressIP.hnsID, "IP", lbIngressIP.ip) 895 } else { 896 // On successful delete, remove hnsId 897 lbIngressIP.hnsID = "" 898 } 899 900 if lbIngressIP.healthCheckHnsID != "" { 901 if err := hns.deleteLoadBalancer(lbIngressIP.healthCheckHnsID); err != nil { 902 mapStaleLoadbalancer[lbIngressIP.healthCheckHnsID] = true 903 klog.V(1).ErrorS(err, "Error deleting Hns IngressIP HealthCheck policy resource.", "hnsID", lbIngressIP.healthCheckHnsID, "IP", lbIngressIP.ip) 904 } else { 905 // On successful delete, remove hnsId 906 lbIngressIP.healthCheckHnsID = "" 907 } 908 } 909 } 910 } 911 912 // Sync is called to synchronize the proxier state to hns as soon as possible. 913 func (proxier *Proxier) Sync() { 914 if proxier.healthzServer != nil { 915 proxier.healthzServer.QueuedUpdate(proxier.ipFamily) 916 } 917 metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() 918 proxier.syncRunner.Run() 919 } 920 921 // SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. 922 func (proxier *Proxier) SyncLoop() { 923 // Update healthz timestamp at beginning in case Sync() never succeeds. 924 if proxier.healthzServer != nil { 925 proxier.healthzServer.Updated(proxier.ipFamily) 926 } 927 // synthesize "last change queued" time as the informers are syncing. 928 metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() 929 proxier.syncRunner.Loop(wait.NeverStop) 930 } 931 932 func (proxier *Proxier) setInitialized(value bool) { 933 var initialized int32 934 if value { 935 initialized = 1 936 } 937 atomic.StoreInt32(&proxier.initialized, initialized) 938 } 939 940 func (proxier *Proxier) isInitialized() bool { 941 return atomic.LoadInt32(&proxier.initialized) > 0 942 } 943 944 // OnServiceAdd is called whenever creation of new service object 945 // is observed. 946 func (proxier *Proxier) OnServiceAdd(service *v1.Service) { 947 proxier.OnServiceUpdate(nil, service) 948 } 949 950 // OnServiceUpdate is called whenever modification of an existing 951 // service object is observed. 952 func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { 953 if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() { 954 proxier.Sync() 955 } 956 } 957 958 // OnServiceDelete is called whenever deletion of an existing service 959 // object is observed. 960 func (proxier *Proxier) OnServiceDelete(service *v1.Service) { 961 proxier.OnServiceUpdate(service, nil) 962 } 963 964 // OnServiceSynced is called once all the initial event handlers were 965 // called and the state is fully propagated to local cache. 966 func (proxier *Proxier) OnServiceSynced() { 967 proxier.mu.Lock() 968 proxier.servicesSynced = true 969 proxier.setInitialized(proxier.endpointSlicesSynced) 970 proxier.mu.Unlock() 971 972 // Sync unconditionally - this is called once per lifetime. 973 proxier.syncProxyRules() 974 } 975 976 // OnEndpointSliceAdd is called whenever creation of a new endpoint slice object 977 // is observed. 978 func (proxier *Proxier) OnEndpointSliceAdd(endpointSlice *discovery.EndpointSlice) { 979 if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { 980 proxier.Sync() 981 } 982 } 983 984 // OnEndpointSliceUpdate is called whenever modification of an existing endpoint 985 // slice object is observed. 986 func (proxier *Proxier) OnEndpointSliceUpdate(_, endpointSlice *discovery.EndpointSlice) { 987 if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, false) && proxier.isInitialized() { 988 proxier.Sync() 989 } 990 } 991 992 // OnEndpointSliceDelete is called whenever deletion of an existing endpoint slice 993 // object is observed. 994 func (proxier *Proxier) OnEndpointSliceDelete(endpointSlice *discovery.EndpointSlice) { 995 if proxier.endpointsChanges.EndpointSliceUpdate(endpointSlice, true) && proxier.isInitialized() { 996 proxier.Sync() 997 } 998 } 999 1000 // OnEndpointSlicesSynced is called once all the initial event handlers were 1001 // called and the state is fully propagated to local cache. 1002 func (proxier *Proxier) OnEndpointSlicesSynced() { 1003 proxier.mu.Lock() 1004 proxier.endpointSlicesSynced = true 1005 proxier.setInitialized(proxier.servicesSynced) 1006 proxier.mu.Unlock() 1007 1008 // Sync unconditionally - this is called once per lifetime. 1009 proxier.syncProxyRules() 1010 } 1011 1012 func (proxier *Proxier) cleanupAllPolicies() { 1013 for svcName, svc := range proxier.svcPortMap { 1014 svcInfo, ok := svc.(*serviceInfo) 1015 if !ok { 1016 klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) 1017 continue 1018 } 1019 svcInfo.cleanupAllPolicies(proxier.endpointsMap[svcName], proxier.mapStaleLoadbalancers, false) 1020 } 1021 } 1022 1023 func isNetworkNotFoundError(err error) bool { 1024 if err == nil { 1025 return false 1026 } 1027 if _, ok := err.(hcn.NetworkNotFoundError); ok { 1028 return true 1029 } 1030 if _, ok := err.(hcsshim.NetworkNotFoundError); ok { 1031 return true 1032 } 1033 return false 1034 } 1035 1036 // isAllEndpointsTerminating function will return true if all the endpoints are terminating. 1037 // If atleast one is not terminating, then return false 1038 func (proxier *Proxier) isAllEndpointsTerminating(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { 1039 for _, epInfo := range proxier.endpointsMap[svcName] { 1040 ep, ok := epInfo.(*endpointInfo) 1041 if !ok { 1042 continue 1043 } 1044 if isLocalTrafficDSR && !ep.IsLocal() { 1045 // KEP-1669: Ignore remote endpoints when the ExternalTrafficPolicy is Local (DSR Mode) 1046 continue 1047 } 1048 // If Readiness Probe fails and pod is not under delete, then 1049 // the state of the endpoint will be - Ready:False, Serving:False, Terminating:False 1050 if !ep.IsReady() && !ep.IsTerminating() { 1051 // Ready:false, Terminating:False, ignore 1052 continue 1053 } 1054 if !ep.IsTerminating() { 1055 return false 1056 } 1057 } 1058 return true 1059 } 1060 1061 // isAllEndpointsNonServing function will return true if all the endpoints are non serving. 1062 // If atleast one is serving, then return false 1063 func (proxier *Proxier) isAllEndpointsNonServing(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { 1064 for _, epInfo := range proxier.endpointsMap[svcName] { 1065 ep, ok := epInfo.(*endpointInfo) 1066 if !ok { 1067 continue 1068 } 1069 if isLocalTrafficDSR && !ep.IsLocal() { 1070 continue 1071 } 1072 if ep.IsServing() { 1073 return false 1074 } 1075 } 1076 return true 1077 } 1078 1079 // updateQueriedEndpoints updates the queriedEndpoints map with newly created endpoint details 1080 func updateQueriedEndpoints(newHnsEndpoint *endpointInfo, queriedEndpoints map[string]*endpointInfo) { 1081 // store newly created endpoints in queriedEndpoints 1082 queriedEndpoints[newHnsEndpoint.hnsID] = newHnsEndpoint 1083 queriedEndpoints[newHnsEndpoint.ip] = newHnsEndpoint 1084 } 1085 1086 // This is where all of the hns save/restore calls happen. 1087 // assumes proxier.mu is held 1088 func (proxier *Proxier) syncProxyRules() { 1089 proxier.mu.Lock() 1090 defer proxier.mu.Unlock() 1091 1092 // don't sync rules till we've received services and endpoints 1093 if !proxier.isInitialized() { 1094 klog.V(2).InfoS("Not syncing hns until Services and Endpoints have been received from master") 1095 return 1096 } 1097 1098 // Keep track of how long syncs take. 1099 start := time.Now() 1100 defer func() { 1101 metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) 1102 klog.V(4).InfoS("Syncing proxy rules complete", "elapsed", time.Since(start)) 1103 }() 1104 1105 hnsNetworkName := proxier.network.name 1106 hns := proxier.hns 1107 1108 var gatewayHnsendpoint *endpointInfo 1109 if proxier.forwardHealthCheckVip { 1110 gatewayHnsendpoint, _ = hns.getEndpointByName(proxier.rootHnsEndpointName) 1111 } 1112 1113 prevNetworkID := proxier.network.id 1114 updatedNetwork, err := hns.getNetworkByName(hnsNetworkName) 1115 if updatedNetwork == nil || updatedNetwork.id != prevNetworkID || isNetworkNotFoundError(err) { 1116 klog.InfoS("The HNS network is not present or has changed since the last sync, please check the CNI deployment", "hnsNetworkName", hnsNetworkName) 1117 proxier.cleanupAllPolicies() 1118 if updatedNetwork != nil { 1119 proxier.network = *updatedNetwork 1120 } 1121 return 1122 } 1123 1124 // We assume that if this was called, we really want to sync them, 1125 // even if nothing changed in the meantime. In other words, callers are 1126 // responsible for detecting no-op changes and not calling this function. 1127 serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) 1128 endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) 1129 1130 deletedUDPClusterIPs := serviceUpdateResult.DeletedUDPClusterIPs 1131 // merge stale services gathered from EndpointsMap.Update 1132 for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { 1133 if svcInfo, ok := proxier.svcPortMap[svcPortName]; ok && svcInfo != nil && svcInfo.Protocol() == v1.ProtocolUDP { 1134 klog.V(2).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) 1135 deletedUDPClusterIPs.Insert(svcInfo.ClusterIP().String()) 1136 } 1137 } 1138 // Query HNS for endpoints and load balancers 1139 queriedEndpoints, err := hns.getAllEndpointsByNetwork(hnsNetworkName) 1140 if err != nil { 1141 klog.ErrorS(err, "Querying HNS for endpoints failed") 1142 return 1143 } 1144 if queriedEndpoints == nil { 1145 klog.V(4).InfoS("No existing endpoints found in HNS") 1146 queriedEndpoints = make(map[string]*(endpointInfo)) 1147 } 1148 queriedLoadBalancers, err := hns.getAllLoadBalancers() 1149 if queriedLoadBalancers == nil { 1150 klog.V(4).InfoS("No existing load balancers found in HNS") 1151 queriedLoadBalancers = make(map[loadBalancerIdentifier]*(loadBalancerInfo)) 1152 } 1153 if err != nil { 1154 klog.ErrorS(err, "Querying HNS for load balancers failed") 1155 return 1156 } 1157 if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { 1158 if _, ok := queriedEndpoints[proxier.sourceVip]; !ok { 1159 _, err = newSourceVIP(hns, hnsNetworkName, proxier.sourceVip, proxier.hostMac, proxier.nodeIP.String()) 1160 if err != nil { 1161 klog.ErrorS(err, "Source Vip endpoint creation failed") 1162 return 1163 } 1164 } 1165 } 1166 1167 klog.V(3).InfoS("Syncing Policies") 1168 1169 // Program HNS by adding corresponding policies for each service. 1170 for svcName, svc := range proxier.svcPortMap { 1171 svcInfo, ok := svc.(*serviceInfo) 1172 if !ok { 1173 klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName) 1174 continue 1175 } 1176 1177 if svcInfo.policyApplied { 1178 klog.V(4).InfoS("Policy already applied", "serviceInfo", svcInfo) 1179 continue 1180 } 1181 1182 if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { 1183 serviceVipEndpoint := queriedEndpoints[svcInfo.ClusterIP().String()] 1184 if serviceVipEndpoint == nil { 1185 klog.V(4).InfoS("No existing remote endpoint", "IP", svcInfo.ClusterIP()) 1186 hnsEndpoint := &endpointInfo{ 1187 ip: svcInfo.ClusterIP().String(), 1188 isLocal: false, 1189 macAddress: proxier.hostMac, 1190 providerAddress: proxier.nodeIP.String(), 1191 } 1192 1193 newHnsEndpoint, err := hns.createEndpoint(hnsEndpoint, hnsNetworkName) 1194 if err != nil { 1195 klog.ErrorS(err, "Remote endpoint creation failed for service VIP") 1196 continue 1197 } 1198 1199 newHnsEndpoint.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) 1200 *newHnsEndpoint.refCount++ 1201 svcInfo.remoteEndpoint = newHnsEndpoint 1202 updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) 1203 } 1204 } 1205 1206 var hnsEndpoints []endpointInfo 1207 var hnsLocalEndpoints []endpointInfo 1208 klog.V(4).InfoS("Applying Policy", "serviceInfo", svcName) 1209 // Create Remote endpoints for every endpoint, corresponding to the service 1210 containsPublicIP := false 1211 containsNodeIP := false 1212 var allEndpointsTerminating, allEndpointsNonServing bool 1213 someEndpointsServing := true 1214 1215 if len(svcInfo.loadBalancerIngressIPs) > 0 { 1216 // Check should be done only if comes under the feature gate or enabled 1217 // The check should be done only if Spec.Type == Loadbalancer. 1218 allEndpointsTerminating = proxier.isAllEndpointsTerminating(svcName, svcInfo.localTrafficDSR) 1219 allEndpointsNonServing = proxier.isAllEndpointsNonServing(svcName, svcInfo.localTrafficDSR) 1220 someEndpointsServing = !allEndpointsNonServing 1221 klog.V(4).InfoS("Terminating status checked for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "allEndpointsTerminating", allEndpointsTerminating, "allEndpointsNonServing", allEndpointsNonServing, "localTrafficDSR", svcInfo.localTrafficDSR) 1222 } else { 1223 klog.V(4).InfoS("Skipped terminating status check for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "ingressLBCount", len(svcInfo.loadBalancerIngressIPs)) 1224 } 1225 1226 for _, epInfo := range proxier.endpointsMap[svcName] { 1227 ep, ok := epInfo.(*endpointInfo) 1228 if !ok { 1229 klog.ErrorS(nil, "Failed to cast endpointInfo", "serviceName", svcName) 1230 continue 1231 } 1232 1233 if svcInfo.internalTrafficLocal && svcInfo.localTrafficDSR && !ep.IsLocal() { 1234 // No need to use or create remote endpoint when internal and external traffic policy is remote 1235 klog.V(3).InfoS("Skipping the endpoint. Both internalTraffic and external traffic policies are local", "EpIP", ep.ip, " EpPort", ep.port) 1236 continue 1237 } 1238 1239 if someEndpointsServing { 1240 1241 if !allEndpointsTerminating && !ep.IsReady() { 1242 klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is either not ready or all not all endpoints are terminating", "EpIP", ep.ip, " EpPort", ep.port, "allEndpointsTerminating", allEndpointsTerminating, "IsEpReady", ep.IsReady()) 1243 continue 1244 } 1245 if !ep.IsServing() { 1246 klog.V(3).InfoS("Skipping the endpoint for LB creation. Endpoint is not serving", "EpIP", ep.ip, " EpPort", ep.port, "IsEpServing", ep.IsServing()) 1247 continue 1248 } 1249 1250 } 1251 1252 var newHnsEndpoint *endpointInfo 1253 hnsNetworkName := proxier.network.name 1254 var err error 1255 1256 // targetPort is zero if it is specified as a name in port.TargetPort, so the real port should be got from endpoints. 1257 // Note that hcsshim.AddLoadBalancer() doesn't support endpoints with different ports, so only port from first endpoint is used. 1258 // TODO(feiskyer): add support of different endpoint ports after hcsshim.AddLoadBalancer() add that. 1259 if svcInfo.targetPort == 0 { 1260 svcInfo.targetPort = int(ep.port) 1261 } 1262 // There is a bug in Windows Server 2019 that can cause two endpoints to be created with the same IP address, so we need to check using endpoint ID first. 1263 // TODO: Remove lookup by endpoint ID, and use the IP address only, so we don't need to maintain multiple keys for lookup. 1264 if len(ep.hnsID) > 0 { 1265 newHnsEndpoint = queriedEndpoints[ep.hnsID] 1266 } 1267 1268 if newHnsEndpoint == nil { 1269 // First check if an endpoint resource exists for this IP, on the current host 1270 // A Local endpoint could exist here already 1271 // A remote endpoint was already created and proxy was restarted 1272 newHnsEndpoint = queriedEndpoints[ep.IP()] 1273 } 1274 1275 if newHnsEndpoint == nil { 1276 if ep.IsLocal() { 1277 klog.ErrorS(err, "Local endpoint not found: on network", "ip", ep.IP(), "hnsNetworkName", hnsNetworkName) 1278 continue 1279 } 1280 1281 if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) { 1282 klog.InfoS("Updating network to check for new remote subnet policies", "networkName", proxier.network.name) 1283 networkName := proxier.network.name 1284 updatedNetwork, err := hns.getNetworkByName(networkName) 1285 if err != nil { 1286 klog.ErrorS(err, "Unable to find HNS Network specified, please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) 1287 proxier.cleanupAllPolicies() 1288 return 1289 } 1290 proxier.network = *updatedNetwork 1291 providerAddress := proxier.network.findRemoteSubnetProviderAddress(ep.IP()) 1292 if len(providerAddress) == 0 { 1293 klog.InfoS("Could not find provider address, assuming it is a public IP", "IP", ep.IP()) 1294 providerAddress = proxier.nodeIP.String() 1295 } 1296 1297 hnsEndpoint := &endpointInfo{ 1298 ip: ep.ip, 1299 isLocal: false, 1300 macAddress: conjureMac("02-11", netutils.ParseIPSloppy(ep.ip)), 1301 providerAddress: providerAddress, 1302 } 1303 1304 newHnsEndpoint, err = hns.createEndpoint(hnsEndpoint, hnsNetworkName) 1305 if err != nil { 1306 klog.ErrorS(err, "Remote endpoint creation failed", "endpointInfo", hnsEndpoint) 1307 continue 1308 } 1309 updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) 1310 } else { 1311 1312 hnsEndpoint := &endpointInfo{ 1313 ip: ep.ip, 1314 isLocal: false, 1315 macAddress: ep.macAddress, 1316 } 1317 1318 newHnsEndpoint, err = hns.createEndpoint(hnsEndpoint, hnsNetworkName) 1319 if err != nil { 1320 klog.ErrorS(err, "Remote endpoint creation failed") 1321 continue 1322 } 1323 updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) 1324 } 1325 } 1326 // For Overlay networks 'SourceVIP' on an Load balancer Policy can either be chosen as 1327 // a) Source VIP configured on kube-proxy (or) 1328 // b) Node IP of the current node 1329 // 1330 // For L2Bridge network the Source VIP is always the NodeIP of the current node and the same 1331 // would be configured on kube-proxy as SourceVIP 1332 // 1333 // The logic for choosing the SourceVIP in Overlay networks is based on the backend endpoints: 1334 // a) Endpoints are any IP's outside the cluster ==> Choose NodeIP as the SourceVIP 1335 // b) Endpoints are IP addresses of a remote node => Choose NodeIP as the SourceVIP 1336 // c) Everything else (Local POD's, Remote POD's, Node IP of current node) ==> Choose the configured SourceVIP 1337 if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) && !ep.IsLocal() { 1338 providerAddress := proxier.network.findRemoteSubnetProviderAddress(ep.IP()) 1339 1340 isNodeIP := (ep.IP() == providerAddress) 1341 isPublicIP := (len(providerAddress) == 0) 1342 klog.InfoS("Endpoint on overlay network", "ip", ep.IP(), "hnsNetworkName", hnsNetworkName, "isNodeIP", isNodeIP, "isPublicIP", isPublicIP) 1343 1344 containsNodeIP = containsNodeIP || isNodeIP 1345 containsPublicIP = containsPublicIP || isPublicIP 1346 } 1347 1348 // Save the hnsId for reference 1349 klog.V(1).InfoS("Hns endpoint resource", "endpointInfo", newHnsEndpoint) 1350 1351 hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint) 1352 if newHnsEndpoint.IsLocal() { 1353 hnsLocalEndpoints = append(hnsLocalEndpoints, *newHnsEndpoint) 1354 } else { 1355 // We only share the refCounts for remote endpoints 1356 ep.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) 1357 *ep.refCount++ 1358 } 1359 1360 ep.hnsID = newHnsEndpoint.hnsID 1361 1362 klog.V(3).InfoS("Endpoint resource found", "endpointInfo", ep) 1363 } 1364 1365 klog.V(3).InfoS("Associated endpoints for service", "endpointInfo", hnsEndpoints, "serviceName", svcName) 1366 1367 if len(svcInfo.hnsID) > 0 { 1368 // This should not happen 1369 klog.InfoS("Load Balancer already exists -- Debug ", "hnsID", svcInfo.hnsID) 1370 } 1371 1372 // In ETP:Cluster, if all endpoints are under termination, 1373 // it will have serving and terminating, else only ready and serving 1374 if len(hnsEndpoints) == 0 { 1375 if svcInfo.winProxyOptimization { 1376 // Deleting loadbalancers when there are no endpoints to serve. 1377 klog.V(3).InfoS("Cleanup existing ", "endpointInfo", hnsEndpoints, "serviceName", svcName) 1378 svcInfo.deleteLoadBalancerPolicy(proxier.mapStaleLoadbalancers) 1379 } 1380 klog.ErrorS(nil, "Endpoint information not available for service, not applying any policy", "serviceName", svcName) 1381 continue 1382 } 1383 1384 klog.V(4).InfoS("Trying to apply Policies for service", "serviceInfo", svcInfo) 1385 var hnsLoadBalancer *loadBalancerInfo 1386 var sourceVip = proxier.sourceVip 1387 if containsPublicIP || containsNodeIP { 1388 sourceVip = proxier.nodeIP.String() 1389 } 1390 1391 sessionAffinityClientIP := svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP 1392 if sessionAffinityClientIP && !proxier.supportedFeatures.SessionAffinity { 1393 klog.InfoS("Session Affinity is not supported on this version of Windows") 1394 } 1395 1396 endpointsAvailableForLB := !allEndpointsTerminating && !allEndpointsNonServing 1397 proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), hnsEndpoints, queriedLoadBalancers) 1398 1399 // clusterIPEndpoints is the endpoint list used for creating ClusterIP loadbalancer. 1400 clusterIPEndpoints := hnsEndpoints 1401 if svcInfo.internalTrafficLocal { 1402 // Take local endpoints for clusterip loadbalancer when internal traffic policy is local. 1403 clusterIPEndpoints = hnsLocalEndpoints 1404 } 1405 1406 if len(clusterIPEndpoints) > 0 { 1407 1408 // If all endpoints are terminating, then no need to create Cluster IP LoadBalancer 1409 // Cluster IP LoadBalancer creation 1410 hnsLoadBalancer, err := hns.getLoadBalancer( 1411 clusterIPEndpoints, 1412 loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.ipFamily == v1.IPv6Protocol, sessionAffinity: sessionAffinityClientIP}, 1413 sourceVip, 1414 svcInfo.ClusterIP().String(), 1415 Enum(svcInfo.Protocol()), 1416 uint16(svcInfo.targetPort), 1417 uint16(svcInfo.Port()), 1418 queriedLoadBalancers, 1419 ) 1420 if err != nil { 1421 klog.ErrorS(err, "Policy creation failed") 1422 continue 1423 } 1424 1425 svcInfo.hnsID = hnsLoadBalancer.hnsID 1426 klog.V(3).InfoS("Hns LoadBalancer resource created for cluster ip resources", "clusterIP", svcInfo.ClusterIP(), "hnsID", hnsLoadBalancer.hnsID) 1427 1428 } else { 1429 klog.V(3).InfoS("Skipped creating Hns LoadBalancer for cluster ip resources. Reason : all endpoints are terminating", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) 1430 } 1431 1432 // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints 1433 if svcInfo.NodePort() > 0 { 1434 // If the preserve-destination service annotation is present, we will disable routing mesh for NodePort. 1435 // This means that health services can use Node Port without falsely getting results from a different node. 1436 nodePortEndpoints := hnsEndpoints 1437 if svcInfo.preserveDIP || svcInfo.localTrafficDSR { 1438 nodePortEndpoints = hnsLocalEndpoints 1439 } 1440 1441 proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &svcInfo.nodePorthnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), nodePortEndpoints, queriedLoadBalancers) 1442 1443 if len(nodePortEndpoints) > 0 && endpointsAvailableForLB { 1444 // If all endpoints are in terminating stage, then no need to create Node Port LoadBalancer 1445 hnsLoadBalancer, err := hns.getLoadBalancer( 1446 nodePortEndpoints, 1447 loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, 1448 sourceVip, 1449 "", 1450 Enum(svcInfo.Protocol()), 1451 uint16(svcInfo.targetPort), 1452 uint16(svcInfo.NodePort()), 1453 queriedLoadBalancers, 1454 ) 1455 if err != nil { 1456 klog.ErrorS(err, "Policy creation failed") 1457 continue 1458 } 1459 1460 svcInfo.nodePorthnsID = hnsLoadBalancer.hnsID 1461 klog.V(3).InfoS("Hns LoadBalancer resource created for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "hnsID", hnsLoadBalancer.hnsID) 1462 } else { 1463 klog.V(3).InfoS("Skipped creating Hns LoadBalancer for nodePort resources", "clusterIP", svcInfo.ClusterIP(), "nodeport", svcInfo.NodePort(), "allEndpointsTerminating", allEndpointsTerminating) 1464 } 1465 } 1466 1467 // Create a Load Balancer Policy for each external IP 1468 for _, externalIP := range svcInfo.externalIPs { 1469 // Disable routing mesh if ExternalTrafficPolicy is set to local 1470 externalIPEndpoints := hnsEndpoints 1471 if svcInfo.localTrafficDSR { 1472 externalIPEndpoints = hnsLocalEndpoints 1473 } 1474 1475 proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &externalIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), externalIPEndpoints, queriedLoadBalancers) 1476 1477 if len(externalIPEndpoints) > 0 && endpointsAvailableForLB { 1478 // If all endpoints are in terminating stage, then no need to External IP LoadBalancer 1479 // Try loading existing policies, if already available 1480 hnsLoadBalancer, err = hns.getLoadBalancer( 1481 externalIPEndpoints, 1482 loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, 1483 sourceVip, 1484 externalIP.ip, 1485 Enum(svcInfo.Protocol()), 1486 uint16(svcInfo.targetPort), 1487 uint16(svcInfo.Port()), 1488 queriedLoadBalancers, 1489 ) 1490 if err != nil { 1491 klog.ErrorS(err, "Policy creation failed") 1492 continue 1493 } 1494 externalIP.hnsID = hnsLoadBalancer.hnsID 1495 klog.V(3).InfoS("Hns LoadBalancer resource created for externalIP resources", "externalIP", externalIP, "hnsID", hnsLoadBalancer.hnsID) 1496 } else { 1497 klog.V(3).InfoS("Skipped creating Hns LoadBalancer for externalIP resources", "externalIP", externalIP, "allEndpointsTerminating", allEndpointsTerminating) 1498 } 1499 } 1500 // Create a Load Balancer Policy for each loadbalancer ingress 1501 for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { 1502 // Try loading existing policies, if already available 1503 lbIngressEndpoints := hnsEndpoints 1504 if svcInfo.preserveDIP || svcInfo.localTrafficDSR { 1505 lbIngressEndpoints = hnsLocalEndpoints 1506 } 1507 1508 proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.hnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), lbIngressEndpoints, queriedLoadBalancers) 1509 1510 if len(lbIngressEndpoints) > 0 { 1511 hnsLoadBalancer, err := hns.getLoadBalancer( 1512 lbIngressEndpoints, 1513 loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, 1514 sourceVip, 1515 lbIngressIP.ip, 1516 Enum(svcInfo.Protocol()), 1517 uint16(svcInfo.targetPort), 1518 uint16(svcInfo.Port()), 1519 queriedLoadBalancers, 1520 ) 1521 if err != nil { 1522 klog.ErrorS(err, "Policy creation failed") 1523 continue 1524 } 1525 lbIngressIP.hnsID = hnsLoadBalancer.hnsID 1526 klog.V(3).InfoS("Hns LoadBalancer resource created for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) 1527 } else { 1528 klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) 1529 } 1530 1531 if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil && endpointsAvailableForLB { 1532 // Avoid creating health check loadbalancer if all the endpoints are terminating 1533 nodeport := proxier.healthzPort 1534 if svcInfo.HealthCheckNodePort() != 0 { 1535 nodeport = svcInfo.HealthCheckNodePort() 1536 } 1537 1538 proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.healthCheckHnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), []endpointInfo{*gatewayHnsendpoint}, queriedLoadBalancers) 1539 1540 hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer( 1541 []endpointInfo{*gatewayHnsendpoint}, 1542 loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP}, 1543 sourceVip, 1544 lbIngressIP.ip, 1545 Enum(svcInfo.Protocol()), 1546 uint16(nodeport), 1547 uint16(nodeport), 1548 queriedLoadBalancers, 1549 ) 1550 if err != nil { 1551 klog.ErrorS(err, "Policy creation failed") 1552 continue 1553 } 1554 lbIngressIP.healthCheckHnsID = hnsHealthCheckLoadBalancer.hnsID 1555 klog.V(3).InfoS("Hns Health Check LoadBalancer resource created for loadBalancer Ingress resources", "ip", lbIngressIP) 1556 } else { 1557 klog.V(3).InfoS("Skipped creating Hns Health Check LoadBalancer for loadBalancer Ingress resources", "ip", lbIngressIP, "allEndpointsTerminating", allEndpointsTerminating) 1558 } 1559 } 1560 svcInfo.policyApplied = true 1561 klog.V(2).InfoS("Policy successfully applied for service", "serviceInfo", svcInfo) 1562 } 1563 1564 if proxier.healthzServer != nil { 1565 proxier.healthzServer.Updated(proxier.ipFamily) 1566 } 1567 metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() 1568 1569 // Update service healthchecks. The endpoints list might include services that are 1570 // not "OnlyLocal", but the services list will not, and the serviceHealthServer 1571 // will just drop those endpoints. 1572 if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil { 1573 klog.ErrorS(err, "Error syncing healthcheck services") 1574 } 1575 if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil { 1576 klog.ErrorS(err, "Error syncing healthcheck endpoints") 1577 } 1578 1579 // Finish housekeeping. 1580 // TODO: these could be made more consistent. 1581 for _, svcIP := range deletedUDPClusterIPs.UnsortedList() { 1582 // TODO : Check if this is required to cleanup stale services here 1583 klog.V(5).InfoS("Pending delete stale service IP connections", "IP", svcIP) 1584 } 1585 1586 // remove stale endpoint refcount entries 1587 for hnsID, referenceCount := range proxier.endPointsRefCount { 1588 if *referenceCount <= 0 { 1589 klog.V(3).InfoS("Deleting unreferenced remote endpoint", "hnsID", hnsID) 1590 proxier.hns.deleteEndpoint(hnsID) 1591 delete(proxier.endPointsRefCount, hnsID) 1592 } 1593 } 1594 // This will cleanup stale load balancers which are pending delete 1595 // in last iteration 1596 proxier.cleanupStaleLoadbalancers() 1597 } 1598 1599 // deleteExistingLoadBalancer checks whether loadbalancer delete is needed or not. 1600 // If it is needed, the function will delete the existing loadbalancer and return true, else false. 1601 func (proxier *Proxier) deleteExistingLoadBalancer(hns HostNetworkService, winProxyOptimization bool, lbHnsID *string, sourceVip string, protocol, intPort, extPort uint16, endpoints []endpointInfo, queriedLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) bool { 1602 1603 if !winProxyOptimization || *lbHnsID == "" { 1604 // Loadbalancer delete not needed 1605 return false 1606 } 1607 1608 lbID, lbIdErr := findLoadBalancerID( 1609 endpoints, 1610 sourceVip, 1611 protocol, 1612 intPort, 1613 extPort, 1614 ) 1615 1616 if lbIdErr != nil { 1617 return proxier.deleteLoadBalancer(hns, lbHnsID) 1618 } 1619 1620 if _, ok := queriedLoadBalancers[lbID]; ok { 1621 // The existing loadbalancer in the system is same as what we try to delete and recreate. So we skip deleting. 1622 return false 1623 } 1624 1625 return proxier.deleteLoadBalancer(hns, lbHnsID) 1626 } 1627 1628 func (proxier *Proxier) deleteLoadBalancer(hns HostNetworkService, lbHnsID *string) bool { 1629 klog.V(3).InfoS("Hns LoadBalancer delete triggered for loadBalancer resources", "lbHnsID", *lbHnsID) 1630 if err := hns.deleteLoadBalancer(*lbHnsID); err != nil { 1631 // This will be cleanup by cleanupStaleLoadbalancer fnction. 1632 proxier.mapStaleLoadbalancers[*lbHnsID] = true 1633 } 1634 *lbHnsID = "" 1635 return true 1636 }