k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/proxy/nftables/proxier_test.go (about)

     1  //go:build linux
     2  // +build linux
     3  
     4  /*
     5  Copyright 2015 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package nftables
    21  
    22  import (
    23  	"fmt"
    24  	"net"
    25  	"reflect"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/lithammer/dedent"
    30  	"github.com/stretchr/testify/assert"
    31  
    32  	v1 "k8s.io/api/core/v1"
    33  	discovery "k8s.io/api/discovery/v1"
    34  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    35  	"k8s.io/apimachinery/pkg/types"
    36  	"k8s.io/apimachinery/pkg/util/intstr"
    37  	"k8s.io/apimachinery/pkg/util/sets"
    38  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    39  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    40  	"k8s.io/component-base/metrics/testutil"
    41  	"k8s.io/kubernetes/pkg/features"
    42  	"k8s.io/kubernetes/pkg/proxy"
    43  	kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
    44  	"k8s.io/kubernetes/pkg/proxy/conntrack"
    45  	"k8s.io/kubernetes/pkg/proxy/healthcheck"
    46  	"k8s.io/kubernetes/pkg/proxy/metrics"
    47  	proxyutil "k8s.io/kubernetes/pkg/proxy/util"
    48  	proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
    49  	"k8s.io/kubernetes/pkg/util/async"
    50  	netutils "k8s.io/utils/net"
    51  	"k8s.io/utils/ptr"
    52  	"sigs.k8s.io/knftables"
    53  )
    54  
    55  // Conventions for tests using NewFakeProxier:
    56  //
    57  // Pod IPs:             10.0.0.0/8
    58  // Service ClusterIPs:  172.30.0.0/16
    59  // Node IPs:            192.168.0.0/24
    60  // Local Node IP:       192.168.0.2
    61  // Service ExternalIPs: 192.168.99.0/24
    62  // LoadBalancer IPs:    1.2.3.4, 5.6.7.8, 9.10.11.12
    63  // Non-cluster IPs:     203.0.113.0/24
    64  // LB Source Range:     203.0.113.0/25
    65  
    66  const testHostname = "test-hostname"
    67  const testNodeIP = "192.168.0.2"
    68  const testNodeIPAlt = "192.168.1.2"
    69  const testExternalIP = "192.168.99.11"
    70  const testNodeIPv6 = "2001:db8::1"
    71  const testNodeIPv6Alt = "2001:db8:1::2"
    72  const testExternalClient = "203.0.113.2"
    73  const testExternalClientBlocked = "203.0.113.130"
    74  
    75  var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt}
    76  
    77  func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
    78  	// TODO: Call NewProxier after refactoring out the goroutine
    79  	// invocation into a Run() method.
    80  	nftablesFamily := knftables.IPv4Family
    81  	podCIDR := "10.0.0.0/8"
    82  	serviceCIDRs := "172.30.0.0/16"
    83  	if ipFamily == v1.IPv6Protocol {
    84  		nftablesFamily = knftables.IPv6Family
    85  		podCIDR = "fd00:10::/64"
    86  		serviceCIDRs = "fd00:10:96::/112"
    87  	}
    88  	detectLocal := proxyutil.NewDetectLocalByCIDR(podCIDR)
    89  	nodePortAddresses := []string{fmt.Sprintf("%s/32", testNodeIP), fmt.Sprintf("%s/128", testNodeIPv6)}
    90  
    91  	networkInterfacer := proxyutiltest.NewFakeNetwork()
    92  	itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
    93  	addrs := []net.Addr{
    94  		&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
    95  		&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
    96  	}
    97  	networkInterfacer.AddInterfaceAddr(&itf, addrs)
    98  	itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
    99  	addrs1 := []net.Addr{
   100  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
   101  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)},
   102  		&net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)},
   103  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)},
   104  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)},
   105  	}
   106  	networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
   107  
   108  	nft := knftables.NewFake(nftablesFamily, kubeProxyTable)
   109  
   110  	var nodeIP net.IP
   111  	if ipFamily == v1.IPv4Protocol {
   112  		nodeIP = netutils.ParseIPSloppy(testNodeIP)
   113  	} else {
   114  		nodeIP = netutils.ParseIPSloppy(testNodeIPv6)
   115  	}
   116  	p := &Proxier{
   117  		ipFamily:            ipFamily,
   118  		svcPortMap:          make(proxy.ServicePortMap),
   119  		serviceChanges:      proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, nil, nil),
   120  		endpointsMap:        make(proxy.EndpointsMap),
   121  		endpointsChanges:    proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipFamily, nil, nil),
   122  		nftables:            nft,
   123  		masqueradeMark:      "0x4000",
   124  		conntrack:           conntrack.NewFake(),
   125  		localDetector:       detectLocal,
   126  		hostname:            testHostname,
   127  		serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
   128  		nodeIP:              nodeIP,
   129  		nodePortAddresses:   proxyutil.NewNodePortAddresses(ipFamily, nodePortAddresses),
   130  		networkInterfacer:   networkInterfacer,
   131  		staleChains:         make(map[string]time.Time),
   132  		serviceCIDRs:        serviceCIDRs,
   133  	}
   134  	p.setInitialized(true)
   135  	p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
   136  
   137  	return nft, p
   138  }
   139  
   140  // TestOverallNFTablesRules creates a variety of services and verifies that the generated
   141  // rules are exactly as expected.
   142  func TestOverallNFTablesRules(t *testing.T) {
   143  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   144  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeNFTables)
   145  
   146  	makeServiceMap(fp,
   147  		// create ClusterIP service
   148  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
   149  			svc.Spec.ClusterIP = "172.30.0.41"
   150  			svc.Spec.Ports = []v1.ServicePort{{
   151  				Name:     "p80",
   152  				Port:     80,
   153  				Protocol: v1.ProtocolTCP,
   154  			}}
   155  		}),
   156  		// create LoadBalancer service with Local traffic policy
   157  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
   158  			svc.Spec.Type = "LoadBalancer"
   159  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
   160  			svc.Spec.ClusterIP = "172.30.0.42"
   161  			svc.Spec.Ports = []v1.ServicePort{{
   162  				Name:     "p80",
   163  				Port:     80,
   164  				Protocol: v1.ProtocolTCP,
   165  				NodePort: 3001,
   166  			}}
   167  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   168  				IP: "1.2.3.4",
   169  			}}
   170  			svc.Spec.ExternalIPs = []string{"192.168.99.22"}
   171  			svc.Spec.HealthCheckNodePort = 30000
   172  		}),
   173  		// create NodePort service
   174  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
   175  			svc.Spec.Type = "NodePort"
   176  			svc.Spec.ClusterIP = "172.30.0.43"
   177  			svc.Spec.Ports = []v1.ServicePort{{
   178  				Name:     "p80",
   179  				Port:     80,
   180  				Protocol: v1.ProtocolTCP,
   181  				NodePort: 3003,
   182  			}}
   183  		}),
   184  		// create ExternalIP service
   185  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
   186  			svc.Spec.Type = "NodePort"
   187  			svc.Spec.ClusterIP = "172.30.0.44"
   188  			svc.Spec.ExternalIPs = []string{"192.168.99.33"}
   189  			svc.Spec.Ports = []v1.ServicePort{{
   190  				Name:       "p80",
   191  				Port:       80,
   192  				Protocol:   v1.ProtocolTCP,
   193  				TargetPort: intstr.FromInt32(80),
   194  			}}
   195  		}),
   196  		// create LoadBalancer service with Cluster traffic policy, source ranges,
   197  		// and session affinity
   198  		makeTestService("ns5", "svc5", func(svc *v1.Service) {
   199  			svc.Spec.Type = "LoadBalancer"
   200  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
   201  			svc.Spec.ClusterIP = "172.30.0.45"
   202  			svc.Spec.Ports = []v1.ServicePort{{
   203  				Name:     "p80",
   204  				Port:     80,
   205  				Protocol: v1.ProtocolTCP,
   206  				NodePort: 3002,
   207  			}}
   208  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   209  				IP: "5.6.7.8",
   210  			}}
   211  			svc.Spec.HealthCheckNodePort = 30000
   212  			// Extra whitespace to ensure that invalid value will not result
   213  			// in a crash, for backward compatibility.
   214  			svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
   215  
   216  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
   217  			svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
   218  				ClientIP: &v1.ClientIPConfig{
   219  					TimeoutSeconds: ptr.To[int32](10800),
   220  				},
   221  			}
   222  		}),
   223  		// create ClusterIP service with no endpoints
   224  		makeTestService("ns6", "svc6", func(svc *v1.Service) {
   225  			svc.Spec.Type = "ClusterIP"
   226  			svc.Spec.ClusterIP = "172.30.0.46"
   227  			svc.Spec.Ports = []v1.ServicePort{{
   228  				Name:       "p80",
   229  				Port:       80,
   230  				Protocol:   v1.ProtocolTCP,
   231  				TargetPort: intstr.FromInt32(80),
   232  			}}
   233  		}),
   234  	)
   235  	populateEndpointSlices(fp,
   236  		// create ClusterIP service endpoints
   237  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   238  			eps.AddressType = discovery.AddressTypeIPv4
   239  			eps.Endpoints = []discovery.Endpoint{{
   240  				Addresses: []string{"10.180.0.1"},
   241  			}}
   242  			eps.Ports = []discovery.EndpointPort{{
   243  				Name:     ptr.To("p80"),
   244  				Port:     ptr.To[int32](80),
   245  				Protocol: ptr.To(v1.ProtocolTCP),
   246  			}}
   247  		}),
   248  		// create Local LoadBalancer endpoints. Note that since we aren't setting
   249  		// its NodeName, this endpoint will be considered non-local and ignored.
   250  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
   251  			eps.AddressType = discovery.AddressTypeIPv4
   252  			eps.Endpoints = []discovery.Endpoint{{
   253  				Addresses: []string{"10.180.0.2"},
   254  			}}
   255  			eps.Ports = []discovery.EndpointPort{{
   256  				Name:     ptr.To("p80"),
   257  				Port:     ptr.To[int32](80),
   258  				Protocol: ptr.To(v1.ProtocolTCP),
   259  			}}
   260  		}),
   261  		// create NodePort service endpoints
   262  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
   263  			eps.AddressType = discovery.AddressTypeIPv4
   264  			eps.Endpoints = []discovery.Endpoint{{
   265  				Addresses: []string{"10.180.0.3"},
   266  			}}
   267  			eps.Ports = []discovery.EndpointPort{{
   268  				Name:     ptr.To("p80"),
   269  				Port:     ptr.To[int32](80),
   270  				Protocol: ptr.To(v1.ProtocolTCP),
   271  			}}
   272  		}),
   273  		// create ExternalIP service endpoints
   274  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
   275  			eps.AddressType = discovery.AddressTypeIPv4
   276  			eps.Endpoints = []discovery.Endpoint{{
   277  				Addresses: []string{"10.180.0.4"},
   278  			}, {
   279  				Addresses: []string{"10.180.0.5"},
   280  				NodeName:  ptr.To(testHostname),
   281  			}}
   282  			eps.Ports = []discovery.EndpointPort{{
   283  				Name:     ptr.To("p80"),
   284  				Port:     ptr.To[int32](80),
   285  				Protocol: ptr.To(v1.ProtocolTCP),
   286  			}}
   287  		}),
   288  		// create Cluster LoadBalancer endpoints
   289  		makeTestEndpointSlice("ns5", "svc5", 1, func(eps *discovery.EndpointSlice) {
   290  			eps.AddressType = discovery.AddressTypeIPv4
   291  			eps.Endpoints = []discovery.Endpoint{{
   292  				Addresses: []string{"10.180.0.3"},
   293  			}}
   294  			eps.Ports = []discovery.EndpointPort{{
   295  				Name:     ptr.To("p80"),
   296  				Port:     ptr.To[int32](80),
   297  				Protocol: ptr.To(v1.ProtocolTCP),
   298  			}}
   299  		}),
   300  	)
   301  
   302  	fp.syncProxyRules()
   303  
   304  	expected := dedent.Dedent(`
   305  		add table ip kube-proxy { comment "rules for kube-proxy" ; }
   306  
   307  		add chain ip kube-proxy mark-for-masquerade
   308  		add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
   309  		add chain ip kube-proxy masquerading
   310  		add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
   311  		add rule ip kube-proxy masquerading mark set mark xor 0x4000
   312  		add rule ip kube-proxy masquerading masquerade fully-random
   313  		add chain ip kube-proxy services
   314  		add chain ip kube-proxy service-endpoints-check
   315  		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
   316  		add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
   317  		add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
   318  		add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
   319  		add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check
   320  		add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check
   321  		add chain ip kube-proxy filter-input { type filter hook input priority -110 ; }
   322  		add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check
   323  		add rule ip kube-proxy filter-input ct state new jump service-endpoints-check
   324  		add chain ip kube-proxy filter-output { type filter hook output priority -110 ; }
   325  		add rule ip kube-proxy filter-output ct state new jump service-endpoints-check
   326  		add rule ip kube-proxy filter-output ct state new jump firewall-check
   327  		add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; }
   328  		add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check
   329  		add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
   330  		add rule ip kube-proxy nat-output jump services
   331  		add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
   332  		add rule ip kube-proxy nat-postrouting jump masquerading
   333  		add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
   334  		add rule ip kube-proxy nat-prerouting jump services
   335  		add chain ip kube-proxy nodeport-endpoints-check
   336  		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
   337  
   338  		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
   339  		add chain ip kube-proxy cluster-ips-check
   340  		add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
   341  		add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
   342  
   343  		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
   344  		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
   345  		add chain ip kube-proxy firewall-check
   346  		add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
   347  
   348  		add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; }
   349  		add rule ip kube-proxy reject-chain reject
   350  
   351  		add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
   352  		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
   353  
   354  		add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
   355  		add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
   356  		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
   357  		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
   358  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
   359  
   360  		# svc1
   361  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
   362  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   363  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 }
   364  
   365  		add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80
   366  		add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade
   367  		add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
   368  
   369  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
   370  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
   371  
   372  		# svc2
   373  		add chain ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80
   374  		add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 ip daddr 172.30.0.42 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   375  		add rule ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 }
   376  		add chain ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80
   377  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 ip saddr 10.0.0.0/8 goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit pod traffic"
   378  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local jump mark-for-masquerade comment "masquerade local traffic"
   379  		add rule ip kube-proxy external-42NFTM6N-ns2/svc2/tcp/p80 fib saddr type local goto service-42NFTM6N-ns2/svc2/tcp/p80 comment "short-circuit local traffic"
   380  		add chain ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80
   381  		add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 ip saddr 10.180.0.2 jump mark-for-masquerade
   382  		add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 meta l4proto tcp dnat to 10.180.0.2:80
   383  
   384  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
   385  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 80 : goto service-42NFTM6N-ns2/svc2/tcp/p80 }
   386  		add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   387  		add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   388  		add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
   389  
   390  		add element ip kube-proxy no-endpoint-nodeports { tcp . 3001 comment "ns2/svc2:p80" : drop }
   391  		add element ip kube-proxy no-endpoint-services { 1.2.3.4 . tcp . 80 comment "ns2/svc2:p80" : drop }
   392  		add element ip kube-proxy no-endpoint-services { 192.168.99.22 . tcp . 80 comment "ns2/svc2:p80" : drop }
   393  
   394  		# svc3
   395  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
   396  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   397  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 }
   398  		add chain ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80
   399  		add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 jump mark-for-masquerade
   400  		add rule ip kube-proxy external-4AT6LBPK-ns3/svc3/tcp/p80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
   401  		add chain ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80
   402  		add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
   403  		add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
   404  
   405  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
   406  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
   407  		add element ip kube-proxy service-nodeports { tcp . 3003 : goto external-4AT6LBPK-ns3/svc3/tcp/p80 }
   408  
   409  		# svc4
   410  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
   411  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   412  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 , 1 : goto endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 }
   413  		add chain ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80
   414  		add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 jump mark-for-masquerade
   415  		add rule ip kube-proxy external-LAUZTJTB-ns4/svc4/tcp/p80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
   416  		add chain ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80
   417  		add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 ip saddr 10.180.0.5 jump mark-for-masquerade
   418  		add rule ip kube-proxy endpoint-5RFCDDV7-ns4/svc4/tcp/p80__10.180.0.5/80 meta l4proto tcp dnat to 10.180.0.5:80
   419  		add chain ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80
   420  		add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 ip saddr 10.180.0.4 jump mark-for-masquerade
   421  		add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 meta l4proto tcp dnat to 10.180.0.4:80
   422  
   423  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
   424  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
   425  		add element ip kube-proxy service-ips { 192.168.99.33 . tcp . 80 : goto external-LAUZTJTB-ns4/svc4/tcp/p80 }
   426  
   427  		# svc5
   428  		add set ip kube-proxy affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { type ipv4_addr ; flags dynamic,timeout ; timeout 10800s ; }
   429  		add chain ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80
   430  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip daddr 172.30.0.45 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
   431  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
   432  		add rule ip kube-proxy service-HVFWP5L3-ns5/svc5/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 }
   433  		add chain ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80
   434  		add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 jump mark-for-masquerade
   435  		add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
   436  
   437  		add chain ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
   438  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
   439  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 update @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { ip saddr }
   440  		add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
   441  
   442  		add chain ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80
   443  		add rule ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr != { 203.0.113.0/25 } drop
   444  
   445  		add element ip kube-proxy cluster-ips { 172.30.0.45 }
   446  		add element ip kube-proxy service-ips { 172.30.0.45 . tcp . 80 : goto service-HVFWP5L3-ns5/svc5/tcp/p80 }
   447  		add element ip kube-proxy service-ips { 5.6.7.8 . tcp . 80 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
   448  		add element ip kube-proxy service-nodeports { tcp . 3002 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
   449  		add element ip kube-proxy firewall-ips { 5.6.7.8 . tcp . 80 comment "ns5/svc5:p80" : goto firewall-HVFWP5L3-ns5/svc5/tcp/p80 }
   450  
   451  		# svc6
   452  		add element ip kube-proxy cluster-ips { 172.30.0.46 }
   453  		add element ip kube-proxy no-endpoint-services { 172.30.0.46 . tcp . 80 comment "ns6/svc6:p80" : goto reject-chain }
   454  		`)
   455  
   456  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
   457  }
   458  
   459  // TestNoEndpointsReject tests that a service with no endpoints rejects connections to
   460  // its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP.
   461  func TestNoEndpointsReject(t *testing.T) {
   462  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   463  	svcIP := "172.30.0.41"
   464  	svcPort := 80
   465  	svcNodePort := 3001
   466  	svcExternalIPs := "192.168.99.11"
   467  	svcLBIP := "1.2.3.4"
   468  	svcPortName := proxy.ServicePortName{
   469  		NamespacedName: makeNSN("ns1", "svc1"),
   470  		Port:           "p80",
   471  	}
   472  
   473  	makeServiceMap(fp,
   474  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
   475  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
   476  			svc.Spec.ClusterIP = svcIP
   477  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
   478  			svc.Spec.Ports = []v1.ServicePort{{
   479  				Name:     svcPortName.Port,
   480  				Protocol: v1.ProtocolTCP,
   481  				Port:     int32(svcPort),
   482  				NodePort: int32(svcNodePort),
   483  			}}
   484  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
   485  				IP: svcLBIP,
   486  			}}
   487  		}),
   488  	)
   489  	fp.syncProxyRules()
   490  
   491  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   492  		{
   493  			name:     "pod to cluster IP with no endpoints",
   494  			sourceIP: "10.0.0.2",
   495  			destIP:   svcIP,
   496  			destPort: svcPort,
   497  			output:   "REJECT",
   498  		},
   499  		{
   500  			name:     "external to external IP with no endpoints",
   501  			sourceIP: testExternalClient,
   502  			destIP:   svcExternalIPs,
   503  			destPort: svcPort,
   504  			output:   "REJECT",
   505  		},
   506  		{
   507  			name:     "pod to NodePort with no endpoints",
   508  			sourceIP: "10.0.0.2",
   509  			destIP:   testNodeIP,
   510  			destPort: svcNodePort,
   511  			output:   "REJECT",
   512  		},
   513  		{
   514  			name:     "external to NodePort with no endpoints",
   515  			sourceIP: testExternalClient,
   516  			destIP:   testNodeIP,
   517  			destPort: svcNodePort,
   518  			output:   "REJECT",
   519  		},
   520  		{
   521  			name:     "pod to LoadBalancer IP with no endpoints",
   522  			sourceIP: "10.0.0.2",
   523  			destIP:   svcLBIP,
   524  			destPort: svcPort,
   525  			output:   "REJECT",
   526  		},
   527  		{
   528  			name:     "external to LoadBalancer IP with no endpoints",
   529  			sourceIP: testExternalClient,
   530  			destIP:   svcLBIP,
   531  			destPort: svcPort,
   532  			output:   "REJECT",
   533  		},
   534  	})
   535  }
   536  
   537  // TestClusterIPGeneral tests various basic features of a ClusterIP service
   538  func TestClusterIPGeneral(t *testing.T) {
   539  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   540  
   541  	makeServiceMap(fp,
   542  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
   543  			svc.Spec.ClusterIP = "172.30.0.41"
   544  			svc.Spec.Ports = []v1.ServicePort{{
   545  				Name:     "http",
   546  				Port:     80,
   547  				Protocol: v1.ProtocolTCP,
   548  			}}
   549  		}),
   550  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
   551  			svc.Spec.ClusterIP = "172.30.0.42"
   552  			svc.Spec.Ports = []v1.ServicePort{
   553  				{
   554  					Name:     "http",
   555  					Port:     80,
   556  					Protocol: v1.ProtocolTCP,
   557  				},
   558  				{
   559  					Name:       "https",
   560  					Port:       443,
   561  					Protocol:   v1.ProtocolTCP,
   562  					TargetPort: intstr.FromInt32(8443),
   563  				},
   564  				{
   565  					Name:     "dns-udp",
   566  					Port:     53,
   567  					Protocol: v1.ProtocolUDP,
   568  				},
   569  				{
   570  					Name:     "dns-tcp",
   571  					Port:     53,
   572  					Protocol: v1.ProtocolTCP,
   573  					// We use TargetPort on TCP but not UDP/SCTP to
   574  					// help disambiguate the output.
   575  					TargetPort: intstr.FromInt32(5353),
   576  				},
   577  				{
   578  					Name:     "dns-sctp",
   579  					Port:     53,
   580  					Protocol: v1.ProtocolSCTP,
   581  				},
   582  			}
   583  		}),
   584  	)
   585  
   586  	populateEndpointSlices(fp,
   587  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   588  			eps.AddressType = discovery.AddressTypeIPv4
   589  			eps.Endpoints = []discovery.Endpoint{{
   590  				Addresses: []string{"10.180.0.1"},
   591  				NodeName:  ptr.To(testHostname),
   592  			}}
   593  			eps.Ports = []discovery.EndpointPort{{
   594  				Name:     ptr.To("http"),
   595  				Port:     ptr.To[int32](80),
   596  				Protocol: ptr.To(v1.ProtocolTCP),
   597  			}}
   598  		}),
   599  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
   600  			eps.AddressType = discovery.AddressTypeIPv4
   601  			eps.Endpoints = []discovery.Endpoint{
   602  				{
   603  					Addresses: []string{"10.180.0.1"},
   604  					NodeName:  ptr.To(testHostname),
   605  				},
   606  				{
   607  					Addresses: []string{"10.180.2.1"},
   608  					NodeName:  ptr.To("host2"),
   609  				},
   610  			}
   611  			eps.Ports = []discovery.EndpointPort{
   612  				{
   613  					Name:     ptr.To("http"),
   614  					Port:     ptr.To[int32](80),
   615  					Protocol: ptr.To(v1.ProtocolTCP),
   616  				},
   617  				{
   618  					Name:     ptr.To("https"),
   619  					Port:     ptr.To[int32](8443),
   620  					Protocol: ptr.To(v1.ProtocolTCP),
   621  				},
   622  				{
   623  					Name:     ptr.To("dns-udp"),
   624  					Port:     ptr.To[int32](53),
   625  					Protocol: ptr.To(v1.ProtocolUDP),
   626  				},
   627  				{
   628  					Name:     ptr.To("dns-tcp"),
   629  					Port:     ptr.To[int32](5353),
   630  					Protocol: ptr.To(v1.ProtocolTCP),
   631  				},
   632  				{
   633  					Name:     ptr.To("dns-sctp"),
   634  					Port:     ptr.To[int32](53),
   635  					Protocol: ptr.To(v1.ProtocolSCTP),
   636  				},
   637  			}
   638  		}),
   639  	)
   640  
   641  	fp.syncProxyRules()
   642  
   643  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   644  		{
   645  			name:     "simple clusterIP",
   646  			sourceIP: "10.180.0.2",
   647  			destIP:   "172.30.0.41",
   648  			destPort: 80,
   649  			output:   "10.180.0.1:80",
   650  			masq:     false,
   651  		},
   652  		{
   653  			name:     "hairpin to cluster IP",
   654  			sourceIP: "10.180.0.1",
   655  			destIP:   "172.30.0.41",
   656  			destPort: 80,
   657  			output:   "10.180.0.1:80",
   658  			masq:     true,
   659  		},
   660  		{
   661  			name:     "clusterIP with multiple endpoints",
   662  			sourceIP: "10.180.0.2",
   663  			destIP:   "172.30.0.42",
   664  			destPort: 80,
   665  			output:   "10.180.0.1:80, 10.180.2.1:80",
   666  			masq:     false,
   667  		},
   668  		{
   669  			name:     "clusterIP with TargetPort",
   670  			sourceIP: "10.180.0.2",
   671  			destIP:   "172.30.0.42",
   672  			destPort: 443,
   673  			output:   "10.180.0.1:8443, 10.180.2.1:8443",
   674  			masq:     false,
   675  		},
   676  		{
   677  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
   678  			sourceIP: "10.180.0.2",
   679  			protocol: v1.ProtocolTCP,
   680  			destIP:   "172.30.0.42",
   681  			destPort: 53,
   682  			output:   "10.180.0.1:5353, 10.180.2.1:5353",
   683  			masq:     false,
   684  		},
   685  		{
   686  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
   687  			sourceIP: "10.180.0.2",
   688  			protocol: v1.ProtocolUDP,
   689  			destIP:   "172.30.0.42",
   690  			destPort: 53,
   691  			output:   "10.180.0.1:53, 10.180.2.1:53",
   692  			masq:     false,
   693  		},
   694  		{
   695  			name:     "clusterIP with TCP, UDP, and SCTP on same port (SCTP)",
   696  			sourceIP: "10.180.0.2",
   697  			protocol: v1.ProtocolSCTP,
   698  			destIP:   "172.30.0.42",
   699  			destPort: 53,
   700  			output:   "10.180.0.1:53, 10.180.2.1:53",
   701  			masq:     false,
   702  		},
   703  		{
   704  			name:     "TCP-only port does not match UDP traffic",
   705  			sourceIP: "10.180.0.2",
   706  			protocol: v1.ProtocolUDP,
   707  			destIP:   "172.30.0.42",
   708  			destPort: 80,
   709  			output:   "REJECT",
   710  		},
   711  		{
   712  			name:     "svc1 does not accept svc2's ports",
   713  			sourceIP: "10.180.0.2",
   714  			destIP:   "172.30.0.41",
   715  			destPort: 443,
   716  			output:   "REJECT",
   717  		},
   718  		{
   719  			name:     "packet to unallocated cluster ip",
   720  			sourceIP: "10.180.0.2",
   721  			destIP:   "172.30.0.50",
   722  			destPort: 80,
   723  			output:   "DROP",
   724  		},
   725  	})
   726  }
   727  
   728  func TestLoadBalancer(t *testing.T) {
   729  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
   730  	svcIP := "172.30.0.41"
   731  	svcPort := 80
   732  	svcNodePort := 3001
   733  	svcLBIP1 := "1.2.3.4"
   734  	svcLBIP2 := "5.6.7.8"
   735  	svcPortName := proxy.ServicePortName{
   736  		NamespacedName: makeNSN("ns1", "svc1"),
   737  		Port:           "p80",
   738  		Protocol:       v1.ProtocolTCP,
   739  	}
   740  
   741  	makeServiceMap(fp,
   742  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
   743  			svc.Spec.Type = "LoadBalancer"
   744  			svc.Spec.ClusterIP = svcIP
   745  			svc.Spec.Ports = []v1.ServicePort{{
   746  				Name:     svcPortName.Port,
   747  				Port:     int32(svcPort),
   748  				Protocol: v1.ProtocolTCP,
   749  				NodePort: int32(svcNodePort),
   750  			}}
   751  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
   752  				{IP: svcLBIP1},
   753  				{IP: svcLBIP2},
   754  			}
   755  			svc.Spec.LoadBalancerSourceRanges = []string{
   756  				"192.168.0.0/24",
   757  
   758  				// Regression test that excess whitespace gets ignored
   759  				" 203.0.113.0/25",
   760  			}
   761  		}),
   762  	)
   763  
   764  	epIP := "10.180.0.1"
   765  	populateEndpointSlices(fp,
   766  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
   767  			eps.AddressType = discovery.AddressTypeIPv4
   768  			eps.Endpoints = []discovery.Endpoint{{
   769  				Addresses: []string{epIP},
   770  			}}
   771  			eps.Ports = []discovery.EndpointPort{{
   772  				Name:     ptr.To(svcPortName.Port),
   773  				Port:     ptr.To(int32(svcPort)),
   774  				Protocol: ptr.To(v1.ProtocolTCP),
   775  			}}
   776  		}),
   777  	)
   778  
   779  	fp.syncProxyRules()
   780  
   781  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
   782  		{
   783  			name:     "pod to cluster IP",
   784  			sourceIP: "10.0.0.2",
   785  			destIP:   svcIP,
   786  			destPort: svcPort,
   787  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   788  			masq:     false,
   789  		},
   790  		{
   791  			name:     "external to nodePort",
   792  			sourceIP: testExternalClient,
   793  			destIP:   testNodeIP,
   794  			destPort: svcNodePort,
   795  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   796  			masq:     true,
   797  		},
   798  		{
   799  			name:     "nodePort bypasses LoadBalancerSourceRanges",
   800  			sourceIP: testExternalClientBlocked,
   801  			destIP:   testNodeIP,
   802  			destPort: svcNodePort,
   803  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   804  			masq:     true,
   805  		},
   806  		{
   807  			name:     "accepted external to LB1",
   808  			sourceIP: testExternalClient,
   809  			destIP:   svcLBIP1,
   810  			destPort: svcPort,
   811  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   812  			masq:     true,
   813  		},
   814  		{
   815  			name:     "accepted external to LB2",
   816  			sourceIP: testExternalClient,
   817  			destIP:   svcLBIP2,
   818  			destPort: svcPort,
   819  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   820  			masq:     true,
   821  		},
   822  		{
   823  			name:     "blocked external to LB1",
   824  			sourceIP: testExternalClientBlocked,
   825  			destIP:   svcLBIP1,
   826  			destPort: svcPort,
   827  			output:   "DROP",
   828  		},
   829  		{
   830  			name:     "blocked external to LB2",
   831  			sourceIP: testExternalClientBlocked,
   832  			destIP:   svcLBIP2,
   833  			destPort: svcPort,
   834  			output:   "DROP",
   835  		},
   836  		{
   837  			name:     "pod to LB1 (blocked by LoadBalancerSourceRanges)",
   838  			sourceIP: "10.0.0.2",
   839  			destIP:   svcLBIP1,
   840  			destPort: svcPort,
   841  			output:   "DROP",
   842  		},
   843  		{
   844  			name:     "pod to LB2 (blocked by LoadBalancerSourceRanges)",
   845  			sourceIP: "10.0.0.2",
   846  			destIP:   svcLBIP2,
   847  			destPort: svcPort,
   848  			output:   "DROP",
   849  		},
   850  		{
   851  			name:     "node to LB1 (allowed by LoadBalancerSourceRanges)",
   852  			sourceIP: testNodeIP,
   853  			destIP:   svcLBIP1,
   854  			destPort: svcPort,
   855  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   856  			masq:     true,
   857  		},
   858  		{
   859  			name:     "node to LB2 (allowed by LoadBalancerSourceRanges)",
   860  			sourceIP: testNodeIP,
   861  			destIP:   svcLBIP2,
   862  			destPort: svcPort,
   863  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   864  			masq:     true,
   865  		},
   866  
   867  		// The LB rules assume that when you connect from a node to a LB IP, that
   868  		// something external to kube-proxy will cause the connection to be
   869  		// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
   870  		// node IP, then we add a rule allowing traffic from the LB IP as well...
   871  		{
   872  			name:     "same node to LB1, SNATted to LB1 (implicitly allowed)",
   873  			sourceIP: svcLBIP1,
   874  			destIP:   svcLBIP1,
   875  			destPort: svcPort,
   876  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   877  			masq:     true,
   878  		},
   879  		{
   880  			name:     "same node to LB2, SNATted to LB2 (implicitly allowed)",
   881  			sourceIP: svcLBIP2,
   882  			destIP:   svcLBIP2,
   883  			destPort: svcPort,
   884  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
   885  			masq:     true,
   886  		},
   887  	})
   888  }
   889  
   890  // TestNodePorts tests NodePort services under various combinations of the
   891  // --nodeport-addresses flags.
   892  func TestNodePorts(t *testing.T) {
   893  	testCases := []struct {
   894  		name string
   895  
   896  		family            v1.IPFamily
   897  		nodePortAddresses []string
   898  
   899  		// allowAltNodeIP is true if we expect NodePort traffic on the alternate
   900  		// node IP to be accepted
   901  		allowAltNodeIP bool
   902  	}{
   903  		{
   904  			name: "ipv4",
   905  
   906  			family:            v1.IPv4Protocol,
   907  			nodePortAddresses: nil,
   908  
   909  			allowAltNodeIP: false,
   910  		},
   911  		{
   912  			name: "ipv4, multiple nodeport-addresses",
   913  
   914  			family:            v1.IPv4Protocol,
   915  			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
   916  
   917  			allowAltNodeIP: true,
   918  		},
   919  		{
   920  			name: "ipv6",
   921  
   922  			family:            v1.IPv6Protocol,
   923  			nodePortAddresses: nil,
   924  
   925  			allowAltNodeIP: false,
   926  		},
   927  		{
   928  			name: "ipv6, multiple nodeport-addresses",
   929  
   930  			family:            v1.IPv6Protocol,
   931  			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64", "2001:db8:1::2/128"},
   932  
   933  			allowAltNodeIP: true,
   934  		},
   935  	}
   936  
   937  	for _, tc := range testCases {
   938  		t.Run(tc.name, func(t *testing.T) {
   939  			nft, fp := NewFakeProxier(tc.family)
   940  
   941  			var svcIP, epIP1, epIP2 string
   942  			var nodeIP string
   943  			if tc.family == v1.IPv4Protocol {
   944  				svcIP = "172.30.0.41"
   945  				epIP1 = "10.180.0.1"
   946  				epIP2 = "10.180.2.1"
   947  				nodeIP = testNodeIP
   948  			} else {
   949  				svcIP = "fd00:172:30::41"
   950  				epIP1 = "fd00:10:180::1"
   951  				epIP2 = "fd00:10:180::2:1"
   952  				nodeIP = testNodeIPv6
   953  			}
   954  			if tc.nodePortAddresses != nil {
   955  				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
   956  			}
   957  
   958  			makeServiceMap(fp,
   959  				makeTestService("ns1", "svc1", func(svc *v1.Service) {
   960  					svc.Spec.Type = v1.ServiceTypeNodePort
   961  					svc.Spec.ClusterIP = svcIP
   962  					svc.Spec.Ports = []v1.ServicePort{{
   963  						Name:     "p80",
   964  						Port:     80,
   965  						Protocol: v1.ProtocolTCP,
   966  						NodePort: 3001,
   967  					}}
   968  				}),
   969  			)
   970  
   971  			populateEndpointSlices(fp,
   972  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
   973  					if tc.family == v1.IPv4Protocol {
   974  						eps.AddressType = discovery.AddressTypeIPv4
   975  					} else {
   976  						eps.AddressType = discovery.AddressTypeIPv6
   977  					}
   978  					eps.Endpoints = []discovery.Endpoint{{
   979  						Addresses: []string{epIP1},
   980  						NodeName:  nil,
   981  					}, {
   982  						Addresses: []string{epIP2},
   983  						NodeName:  ptr.To(testHostname),
   984  					}}
   985  					eps.Ports = []discovery.EndpointPort{{
   986  						Name:     ptr.To("p80"),
   987  						Port:     ptr.To[int32](80),
   988  						Protocol: ptr.To(v1.ProtocolTCP),
   989  					}}
   990  				}),
   991  			)
   992  
   993  			fp.syncProxyRules()
   994  
   995  			var podIP, externalClientIP, altNodeIP string
   996  			if tc.family == v1.IPv4Protocol {
   997  				podIP = "10.0.0.2"
   998  				externalClientIP = testExternalClient
   999  				altNodeIP = testNodeIPAlt
  1000  			} else {
  1001  				podIP = "fd00:10::2"
  1002  				externalClientIP = "2600:5200::1"
  1003  				altNodeIP = testNodeIPv6Alt
  1004  			}
  1005  			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
  1006  
  1007  			// Basic tests are the same for all cases
  1008  			runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1009  				{
  1010  					name:     "pod to cluster IP",
  1011  					sourceIP: podIP,
  1012  					destIP:   svcIP,
  1013  					destPort: 80,
  1014  					output:   output,
  1015  					masq:     false,
  1016  				},
  1017  				{
  1018  					name:     "external to nodePort",
  1019  					sourceIP: externalClientIP,
  1020  					destIP:   nodeIP,
  1021  					destPort: 3001,
  1022  					output:   output,
  1023  					masq:     true,
  1024  				},
  1025  				{
  1026  					name:     "node to nodePort",
  1027  					sourceIP: nodeIP,
  1028  					destIP:   nodeIP,
  1029  					destPort: 3001,
  1030  					output:   output,
  1031  					masq:     true,
  1032  				},
  1033  			})
  1034  
  1035  			if tc.allowAltNodeIP {
  1036  				runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1037  					{
  1038  						name:     "external to nodePort on secondary IP",
  1039  						sourceIP: externalClientIP,
  1040  						destIP:   altNodeIP,
  1041  						destPort: 3001,
  1042  						output:   output,
  1043  						masq:     true,
  1044  					},
  1045  				})
  1046  			} else {
  1047  				runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1048  					{
  1049  						name:     "secondary nodeIP ignores NodePorts",
  1050  						sourceIP: externalClientIP,
  1051  						destIP:   altNodeIP,
  1052  						destPort: 3001,
  1053  						output:   "",
  1054  					},
  1055  				})
  1056  			}
  1057  		})
  1058  	}
  1059  }
  1060  
  1061  // TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get
  1062  // masqueraded when using Local traffic policy. For traffic from external sources, that
  1063  // means it can also only be routed to local endpoints, but for traffic from internal
  1064  // sources, it gets routed to all endpoints.
  1065  func TestExternalTrafficPolicyLocal(t *testing.T) {
  1066  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  1067  
  1068  	svcIP := "172.30.0.41"
  1069  	svcPort := 80
  1070  	svcNodePort := 3001
  1071  	svcHealthCheckNodePort := 30000
  1072  	svcExternalIPs := "192.168.99.11"
  1073  	svcLBIP := "1.2.3.4"
  1074  	svcPortName := proxy.ServicePortName{
  1075  		NamespacedName: makeNSN("ns1", "svc1"),
  1076  		Port:           "p80",
  1077  	}
  1078  
  1079  	makeServiceMap(fp,
  1080  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1081  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1082  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1083  			svc.Spec.ClusterIP = svcIP
  1084  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1085  			svc.Spec.Ports = []v1.ServicePort{{
  1086  				Name:       svcPortName.Port,
  1087  				Port:       int32(svcPort),
  1088  				Protocol:   v1.ProtocolTCP,
  1089  				NodePort:   int32(svcNodePort),
  1090  				TargetPort: intstr.FromInt32(int32(svcPort)),
  1091  			}}
  1092  			svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  1093  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1094  				IP: svcLBIP,
  1095  			}}
  1096  		}),
  1097  	)
  1098  
  1099  	epIP1 := "10.180.0.1"
  1100  	epIP2 := "10.180.2.1"
  1101  	populateEndpointSlices(fp,
  1102  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  1103  			eps.AddressType = discovery.AddressTypeIPv4
  1104  			eps.Endpoints = []discovery.Endpoint{{
  1105  				Addresses: []string{epIP1},
  1106  			}, {
  1107  				Addresses: []string{epIP2},
  1108  				NodeName:  ptr.To(testHostname),
  1109  			}}
  1110  			eps.Ports = []discovery.EndpointPort{{
  1111  				Name:     ptr.To(svcPortName.Port),
  1112  				Port:     ptr.To(int32(svcPort)),
  1113  				Protocol: ptr.To(v1.ProtocolTCP),
  1114  			}}
  1115  		}),
  1116  	)
  1117  
  1118  	fp.syncProxyRules()
  1119  
  1120  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1121  		{
  1122  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  1123  			sourceIP: "10.0.0.2",
  1124  			destIP:   svcIP,
  1125  			destPort: svcPort,
  1126  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1127  			masq:     false,
  1128  		},
  1129  		{
  1130  			name:     "pod to external IP hits both endpoints, unmasqueraded",
  1131  			sourceIP: "10.0.0.2",
  1132  			destIP:   svcExternalIPs,
  1133  			destPort: svcPort,
  1134  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1135  			masq:     false,
  1136  		},
  1137  		{
  1138  			name:     "external to external IP hits only local endpoint, unmasqueraded",
  1139  			sourceIP: testExternalClient,
  1140  			destIP:   svcExternalIPs,
  1141  			destPort: svcPort,
  1142  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1143  			masq:     false,
  1144  		},
  1145  		{
  1146  			name:     "pod to LB IP hits only both endpoints, unmasqueraded",
  1147  			sourceIP: "10.0.0.2",
  1148  			destIP:   svcLBIP,
  1149  			destPort: svcPort,
  1150  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1151  			masq:     false,
  1152  		},
  1153  		{
  1154  			name:     "external to LB IP hits only local endpoint, unmasqueraded",
  1155  			sourceIP: testExternalClient,
  1156  			destIP:   svcLBIP,
  1157  			destPort: svcPort,
  1158  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1159  			masq:     false,
  1160  		},
  1161  		{
  1162  			name:     "pod to NodePort hits both endpoints, unmasqueraded",
  1163  			sourceIP: "10.0.0.2",
  1164  			destIP:   testNodeIP,
  1165  			destPort: svcNodePort,
  1166  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1167  			masq:     false,
  1168  		},
  1169  		{
  1170  			name:     "external to NodePort hits only local endpoint, unmasqueraded",
  1171  			sourceIP: testExternalClient,
  1172  			destIP:   testNodeIP,
  1173  			destPort: svcNodePort,
  1174  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  1175  			masq:     false,
  1176  		},
  1177  	})
  1178  }
  1179  
  1180  // TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets
  1181  // masqueraded when using Cluster traffic policy.
  1182  func TestExternalTrafficPolicyCluster(t *testing.T) {
  1183  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  1184  
  1185  	svcIP := "172.30.0.41"
  1186  	svcPort := 80
  1187  	svcNodePort := 3001
  1188  	svcExternalIPs := "192.168.99.11"
  1189  	svcLBIP := "1.2.3.4"
  1190  	svcPortName := proxy.ServicePortName{
  1191  		NamespacedName: makeNSN("ns1", "svc1"),
  1192  		Port:           "p80",
  1193  	}
  1194  
  1195  	makeServiceMap(fp,
  1196  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1197  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1198  			svc.Spec.ClusterIP = svcIP
  1199  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1200  			svc.Spec.Ports = []v1.ServicePort{{
  1201  				Name:       svcPortName.Port,
  1202  				Port:       int32(svcPort),
  1203  				Protocol:   v1.ProtocolTCP,
  1204  				NodePort:   int32(svcNodePort),
  1205  				TargetPort: intstr.FromInt32(int32(svcPort)),
  1206  			}}
  1207  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1208  				IP: svcLBIP,
  1209  			}}
  1210  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  1211  		}),
  1212  	)
  1213  
  1214  	epIP1 := "10.180.0.1"
  1215  	epIP2 := "10.180.2.1"
  1216  	populateEndpointSlices(fp,
  1217  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  1218  			eps.AddressType = discovery.AddressTypeIPv4
  1219  			eps.Endpoints = []discovery.Endpoint{{
  1220  				Addresses: []string{epIP1},
  1221  				NodeName:  nil,
  1222  			}, {
  1223  				Addresses: []string{epIP2},
  1224  				NodeName:  ptr.To(testHostname),
  1225  			}}
  1226  			eps.Ports = []discovery.EndpointPort{{
  1227  				Name:     ptr.To(svcPortName.Port),
  1228  				Port:     ptr.To(int32(svcPort)),
  1229  				Protocol: ptr.To(v1.ProtocolTCP),
  1230  			}}
  1231  		}),
  1232  	)
  1233  
  1234  	fp.syncProxyRules()
  1235  
  1236  	runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
  1237  		{
  1238  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  1239  			sourceIP: "10.0.0.2",
  1240  			destIP:   svcIP,
  1241  			destPort: svcPort,
  1242  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1243  			masq:     false,
  1244  		},
  1245  		{
  1246  			name:     "pod to external IP hits both endpoints, masqueraded",
  1247  			sourceIP: "10.0.0.2",
  1248  			destIP:   svcExternalIPs,
  1249  			destPort: svcPort,
  1250  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1251  			masq:     true,
  1252  		},
  1253  		{
  1254  			name:     "external to external IP hits both endpoints, masqueraded",
  1255  			sourceIP: testExternalClient,
  1256  			destIP:   svcExternalIPs,
  1257  			destPort: svcPort,
  1258  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1259  			masq:     true,
  1260  		},
  1261  		{
  1262  			name:     "pod to LB IP hits both endpoints, masqueraded",
  1263  			sourceIP: "10.0.0.2",
  1264  			destIP:   svcLBIP,
  1265  			destPort: svcPort,
  1266  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1267  			masq:     true,
  1268  		},
  1269  		{
  1270  			name:     "external to LB IP hits both endpoints, masqueraded",
  1271  			sourceIP: testExternalClient,
  1272  			destIP:   svcLBIP,
  1273  			destPort: svcPort,
  1274  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1275  			masq:     true,
  1276  		},
  1277  		{
  1278  			name:     "pod to NodePort hits both endpoints, masqueraded",
  1279  			sourceIP: "10.0.0.2",
  1280  			destIP:   testNodeIP,
  1281  			destPort: svcNodePort,
  1282  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1283  			masq:     true,
  1284  		},
  1285  		{
  1286  			name:     "external to NodePort hits both endpoints, masqueraded",
  1287  			sourceIP: testExternalClient,
  1288  			destIP:   testNodeIP,
  1289  			destPort: svcNodePort,
  1290  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  1291  			masq:     true,
  1292  		},
  1293  	})
  1294  }
  1295  
  1296  func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
  1297  	svc := &v1.Service{
  1298  		ObjectMeta: metav1.ObjectMeta{
  1299  			Name:        name,
  1300  			Namespace:   namespace,
  1301  			Annotations: map[string]string{},
  1302  		},
  1303  		Spec:   v1.ServiceSpec{},
  1304  		Status: v1.ServiceStatus{},
  1305  	}
  1306  	svcFunc(svc)
  1307  	return svc
  1308  }
  1309  
  1310  func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
  1311  	svcPort := v1.ServicePort{
  1312  		Name:       name,
  1313  		Protocol:   protocol,
  1314  		Port:       port,
  1315  		NodePort:   nodeport,
  1316  		TargetPort: intstr.FromInt32(int32(targetPort)),
  1317  	}
  1318  	return append(array, svcPort)
  1319  }
  1320  
  1321  func TestBuildServiceMapAddRemove(t *testing.T) {
  1322  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1323  
  1324  	services := []*v1.Service{
  1325  		makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  1326  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1327  			svc.Spec.ClusterIP = "172.30.55.4"
  1328  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  1329  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  1330  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
  1331  		}),
  1332  		makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
  1333  			svc.Spec.Type = v1.ServiceTypeNodePort
  1334  			svc.Spec.ClusterIP = "172.30.55.10"
  1335  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
  1336  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
  1337  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
  1338  		}),
  1339  		makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
  1340  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1341  			svc.Spec.ClusterIP = "172.30.55.11"
  1342  			svc.Spec.LoadBalancerIP = "1.2.3.4"
  1343  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
  1344  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
  1345  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1346  				Ingress: []v1.LoadBalancerIngress{
  1347  					{IP: "1.2.3.4"},
  1348  				},
  1349  			}
  1350  		}),
  1351  		makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
  1352  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1353  			svc.Spec.ClusterIP = "172.30.55.12"
  1354  			svc.Spec.LoadBalancerIP = "5.6.7.8"
  1355  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
  1356  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
  1357  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1358  				Ingress: []v1.LoadBalancerIngress{
  1359  					{IP: "5.6.7.8"},
  1360  				},
  1361  			}
  1362  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1363  			svc.Spec.HealthCheckNodePort = 345
  1364  		}),
  1365  	}
  1366  
  1367  	for i := range services {
  1368  		fp.OnServiceAdd(services[i])
  1369  	}
  1370  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1371  	if len(fp.svcPortMap) != 10 {
  1372  		t.Errorf("expected service map length 10, got %v", fp.svcPortMap)
  1373  	}
  1374  
  1375  	if len(result.DeletedUDPClusterIPs) != 0 {
  1376  		// Services only added, so nothing stale yet
  1377  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1378  	}
  1379  
  1380  	// The only-local-loadbalancer ones get added
  1381  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1382  	if len(healthCheckNodePorts) != 1 {
  1383  		t.Errorf("expected 1 healthcheck port, got %v", healthCheckNodePorts)
  1384  	} else {
  1385  		nsn := makeNSN("somewhere", "only-local-load-balancer")
  1386  		if port, found := healthCheckNodePorts[nsn]; !found || port != 345 {
  1387  			t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, healthCheckNodePorts)
  1388  		}
  1389  	}
  1390  
  1391  	// Remove some stuff
  1392  	// oneService is a modification of services[0] with removed first port.
  1393  	oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  1394  		svc.Spec.Type = v1.ServiceTypeClusterIP
  1395  		svc.Spec.ClusterIP = "172.30.55.4"
  1396  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  1397  	})
  1398  
  1399  	fp.OnServiceUpdate(services[0], oneService)
  1400  	fp.OnServiceDelete(services[1])
  1401  	fp.OnServiceDelete(services[2])
  1402  	fp.OnServiceDelete(services[3])
  1403  
  1404  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1405  	if len(fp.svcPortMap) != 1 {
  1406  		t.Errorf("expected service map length 1, got %v", fp.svcPortMap)
  1407  	}
  1408  
  1409  	// All services but one were deleted. While you'd expect only the ClusterIPs
  1410  	// from the three deleted services here, we still have the ClusterIP for
  1411  	// the not-deleted service, because one of it's ServicePorts was deleted.
  1412  	expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
  1413  	if len(result.DeletedUDPClusterIPs) != len(expectedStaleUDPServices) {
  1414  		t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.DeletedUDPClusterIPs.UnsortedList())
  1415  	}
  1416  	for _, ip := range expectedStaleUDPServices {
  1417  		if !result.DeletedUDPClusterIPs.Has(ip) {
  1418  			t.Errorf("expected stale UDP service service %s", ip)
  1419  		}
  1420  	}
  1421  
  1422  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1423  	if len(healthCheckNodePorts) != 0 {
  1424  		t.Errorf("expected 0 healthcheck ports, got %v", healthCheckNodePorts)
  1425  	}
  1426  }
  1427  
  1428  func TestBuildServiceMapServiceHeadless(t *testing.T) {
  1429  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1430  
  1431  	makeServiceMap(fp,
  1432  		makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
  1433  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1434  			svc.Spec.ClusterIP = v1.ClusterIPNone
  1435  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
  1436  		}),
  1437  		makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
  1438  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1439  			svc.Spec.ClusterIP = v1.ClusterIPNone
  1440  		}),
  1441  	)
  1442  
  1443  	// Headless service should be ignored
  1444  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1445  	if len(fp.svcPortMap) != 0 {
  1446  		t.Errorf("expected service map length 0, got %d", len(fp.svcPortMap))
  1447  	}
  1448  
  1449  	if len(result.DeletedUDPClusterIPs) != 0 {
  1450  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1451  	}
  1452  
  1453  	// No proxied services, so no healthchecks
  1454  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1455  	if len(healthCheckNodePorts) != 0 {
  1456  		t.Errorf("expected healthcheck ports length 0, got %d", len(healthCheckNodePorts))
  1457  	}
  1458  }
  1459  
  1460  func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
  1461  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1462  
  1463  	makeServiceMap(fp,
  1464  		makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
  1465  			svc.Spec.Type = v1.ServiceTypeExternalName
  1466  			svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
  1467  			svc.Spec.ExternalName = "foo2.bar.com"
  1468  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
  1469  		}),
  1470  	)
  1471  
  1472  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1473  	if len(fp.svcPortMap) != 0 {
  1474  		t.Errorf("expected service map length 0, got %v", fp.svcPortMap)
  1475  	}
  1476  	if len(result.DeletedUDPClusterIPs) != 0 {
  1477  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs)
  1478  	}
  1479  	// No proxied services, so no healthchecks
  1480  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1481  	if len(healthCheckNodePorts) != 0 {
  1482  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1483  	}
  1484  }
  1485  
  1486  func TestBuildServiceMapServiceUpdate(t *testing.T) {
  1487  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  1488  
  1489  	servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  1490  		svc.Spec.Type = v1.ServiceTypeClusterIP
  1491  		svc.Spec.ClusterIP = "172.30.55.4"
  1492  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  1493  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
  1494  	})
  1495  	servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  1496  		svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1497  		svc.Spec.ClusterIP = "172.30.55.4"
  1498  		svc.Spec.LoadBalancerIP = "1.2.3.4"
  1499  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
  1500  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
  1501  		svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  1502  			Ingress: []v1.LoadBalancerIngress{
  1503  				{IP: "1.2.3.4"},
  1504  			},
  1505  		}
  1506  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1507  		svc.Spec.HealthCheckNodePort = 345
  1508  	})
  1509  
  1510  	fp.OnServiceAdd(servicev1)
  1511  
  1512  	result := fp.svcPortMap.Update(fp.serviceChanges)
  1513  	if len(fp.svcPortMap) != 2 {
  1514  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1515  	}
  1516  	if len(result.DeletedUDPClusterIPs) != 0 {
  1517  		// Services only added, so nothing stale yet
  1518  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1519  	}
  1520  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  1521  	if len(healthCheckNodePorts) != 0 {
  1522  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1523  	}
  1524  
  1525  	// Change service to load-balancer
  1526  	fp.OnServiceUpdate(servicev1, servicev2)
  1527  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1528  	if len(fp.svcPortMap) != 2 {
  1529  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1530  	}
  1531  	if len(result.DeletedUDPClusterIPs) != 0 {
  1532  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  1533  	}
  1534  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1535  	if len(healthCheckNodePorts) != 1 {
  1536  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  1537  	}
  1538  
  1539  	// No change; make sure the service map stays the same and there are
  1540  	// no health-check changes
  1541  	fp.OnServiceUpdate(servicev2, servicev2)
  1542  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1543  	if len(fp.svcPortMap) != 2 {
  1544  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1545  	}
  1546  	if len(result.DeletedUDPClusterIPs) != 0 {
  1547  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  1548  	}
  1549  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1550  	if len(healthCheckNodePorts) != 1 {
  1551  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  1552  	}
  1553  
  1554  	// And back to ClusterIP
  1555  	fp.OnServiceUpdate(servicev2, servicev1)
  1556  	result = fp.svcPortMap.Update(fp.serviceChanges)
  1557  	if len(fp.svcPortMap) != 2 {
  1558  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  1559  	}
  1560  	if len(result.DeletedUDPClusterIPs) != 0 {
  1561  		// Services only added, so nothing stale yet
  1562  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  1563  	}
  1564  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  1565  	if len(healthCheckNodePorts) != 0 {
  1566  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  1567  	}
  1568  }
  1569  
  1570  func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
  1571  	for i := range allEndpointSlices {
  1572  		proxier.OnEndpointSliceAdd(allEndpointSlices[i])
  1573  	}
  1574  }
  1575  
  1576  func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
  1577  	eps := &discovery.EndpointSlice{
  1578  		ObjectMeta: metav1.ObjectMeta{
  1579  			Name:      fmt.Sprintf("%s-%d", name, sliceNum),
  1580  			Namespace: namespace,
  1581  			Labels:    map[string]string{discovery.LabelServiceName: name},
  1582  		},
  1583  	}
  1584  	epsFunc(eps)
  1585  	return eps
  1586  }
  1587  
  1588  func makeNSN(namespace, name string) types.NamespacedName {
  1589  	return types.NamespacedName{Namespace: namespace, Name: name}
  1590  }
  1591  
  1592  func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
  1593  	return proxy.ServicePortName{
  1594  		NamespacedName: makeNSN(ns, name),
  1595  		Port:           port,
  1596  		Protocol:       protocol,
  1597  	}
  1598  }
  1599  
  1600  func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
  1601  	for i := range allServices {
  1602  		proxier.OnServiceAdd(allServices[i])
  1603  	}
  1604  
  1605  	proxier.mu.Lock()
  1606  	defer proxier.mu.Unlock()
  1607  	proxier.servicesSynced = true
  1608  }
  1609  
  1610  type endpointExpectation struct {
  1611  	endpoint string
  1612  	isLocal  bool
  1613  }
  1614  
  1615  func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) {
  1616  	if len(newMap) != len(expected) {
  1617  		t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
  1618  	}
  1619  	for x := range expected {
  1620  		if len(newMap[x]) != len(expected[x]) {
  1621  			t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
  1622  		} else {
  1623  			for i := range expected[x] {
  1624  				newEp := newMap[x][i]
  1625  				if newEp.String() != expected[x][i].endpoint ||
  1626  					newEp.IsLocal() != expected[x][i].isLocal {
  1627  					t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
  1628  				}
  1629  			}
  1630  		}
  1631  	}
  1632  }
  1633  
  1634  func TestUpdateEndpointsMap(t *testing.T) {
  1635  	emptyEndpointSlices := []*discovery.EndpointSlice{
  1636  		makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
  1637  	}
  1638  	subset1 := func(eps *discovery.EndpointSlice) {
  1639  		eps.AddressType = discovery.AddressTypeIPv4
  1640  		eps.Endpoints = []discovery.Endpoint{{
  1641  			Addresses: []string{"10.1.1.1"},
  1642  		}}
  1643  		eps.Ports = []discovery.EndpointPort{{
  1644  			Name:     ptr.To("p11"),
  1645  			Port:     ptr.To[int32](11),
  1646  			Protocol: ptr.To(v1.ProtocolUDP),
  1647  		}}
  1648  	}
  1649  	subset2 := func(eps *discovery.EndpointSlice) {
  1650  		eps.AddressType = discovery.AddressTypeIPv4
  1651  		eps.Endpoints = []discovery.Endpoint{{
  1652  			Addresses: []string{"10.1.1.2"},
  1653  		}}
  1654  		eps.Ports = []discovery.EndpointPort{{
  1655  			Name:     ptr.To("p12"),
  1656  			Port:     ptr.To[int32](12),
  1657  			Protocol: ptr.To(v1.ProtocolUDP),
  1658  		}}
  1659  	}
  1660  	namedPortLocal := []*discovery.EndpointSlice{
  1661  		makeTestEndpointSlice("ns1", "ep1", 1,
  1662  			func(eps *discovery.EndpointSlice) {
  1663  				eps.AddressType = discovery.AddressTypeIPv4
  1664  				eps.Endpoints = []discovery.Endpoint{{
  1665  					Addresses: []string{"10.1.1.1"},
  1666  					NodeName:  ptr.To(testHostname),
  1667  				}}
  1668  				eps.Ports = []discovery.EndpointPort{{
  1669  					Name:     ptr.To("p11"),
  1670  					Port:     ptr.To[int32](11),
  1671  					Protocol: ptr.To(v1.ProtocolUDP),
  1672  				}}
  1673  			}),
  1674  	}
  1675  	namedPort := []*discovery.EndpointSlice{
  1676  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1677  	}
  1678  	namedPortRenamed := []*discovery.EndpointSlice{
  1679  		makeTestEndpointSlice("ns1", "ep1", 1,
  1680  			func(eps *discovery.EndpointSlice) {
  1681  				eps.AddressType = discovery.AddressTypeIPv4
  1682  				eps.Endpoints = []discovery.Endpoint{{
  1683  					Addresses: []string{"10.1.1.1"},
  1684  				}}
  1685  				eps.Ports = []discovery.EndpointPort{{
  1686  					Name:     ptr.To("p11-2"),
  1687  					Port:     ptr.To[int32](11),
  1688  					Protocol: ptr.To(v1.ProtocolUDP),
  1689  				}}
  1690  			}),
  1691  	}
  1692  	namedPortRenumbered := []*discovery.EndpointSlice{
  1693  		makeTestEndpointSlice("ns1", "ep1", 1,
  1694  			func(eps *discovery.EndpointSlice) {
  1695  				eps.AddressType = discovery.AddressTypeIPv4
  1696  				eps.Endpoints = []discovery.Endpoint{{
  1697  					Addresses: []string{"10.1.1.1"},
  1698  				}}
  1699  				eps.Ports = []discovery.EndpointPort{{
  1700  					Name:     ptr.To("p11"),
  1701  					Port:     ptr.To[int32](22),
  1702  					Protocol: ptr.To(v1.ProtocolUDP),
  1703  				}}
  1704  			}),
  1705  	}
  1706  	namedPortsLocalNoLocal := []*discovery.EndpointSlice{
  1707  		makeTestEndpointSlice("ns1", "ep1", 1,
  1708  			func(eps *discovery.EndpointSlice) {
  1709  				eps.AddressType = discovery.AddressTypeIPv4
  1710  				eps.Endpoints = []discovery.Endpoint{{
  1711  					Addresses: []string{"10.1.1.1"},
  1712  				}, {
  1713  					Addresses: []string{"10.1.1.2"},
  1714  					NodeName:  ptr.To(testHostname),
  1715  				}}
  1716  				eps.Ports = []discovery.EndpointPort{{
  1717  					Name:     ptr.To("p11"),
  1718  					Port:     ptr.To[int32](11),
  1719  					Protocol: ptr.To(v1.ProtocolUDP),
  1720  				}, {
  1721  					Name:     ptr.To("p12"),
  1722  					Port:     ptr.To[int32](12),
  1723  					Protocol: ptr.To(v1.ProtocolUDP),
  1724  				}}
  1725  			}),
  1726  	}
  1727  	multipleSubsets := []*discovery.EndpointSlice{
  1728  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1729  		makeTestEndpointSlice("ns1", "ep1", 2, subset2),
  1730  	}
  1731  	subsetLocal := func(eps *discovery.EndpointSlice) {
  1732  		eps.AddressType = discovery.AddressTypeIPv4
  1733  		eps.Endpoints = []discovery.Endpoint{{
  1734  			Addresses: []string{"10.1.1.2"},
  1735  			NodeName:  ptr.To(testHostname),
  1736  		}}
  1737  		eps.Ports = []discovery.EndpointPort{{
  1738  			Name:     ptr.To("p12"),
  1739  			Port:     ptr.To[int32](12),
  1740  			Protocol: ptr.To(v1.ProtocolUDP),
  1741  		}}
  1742  	}
  1743  	multipleSubsetsWithLocal := []*discovery.EndpointSlice{
  1744  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1745  		makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
  1746  	}
  1747  	subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
  1748  		eps.AddressType = discovery.AddressTypeIPv4
  1749  		eps.Endpoints = []discovery.Endpoint{{
  1750  			Addresses: []string{"10.1.1.1"},
  1751  			NodeName:  ptr.To(testHostname),
  1752  		}}
  1753  		eps.Ports = []discovery.EndpointPort{{
  1754  			Name:     ptr.To("p11"),
  1755  			Port:     ptr.To[int32](11),
  1756  			Protocol: ptr.To(v1.ProtocolUDP),
  1757  		}, {
  1758  			Name:     ptr.To("p12"),
  1759  			Port:     ptr.To[int32](12),
  1760  			Protocol: ptr.To(v1.ProtocolUDP),
  1761  		}}
  1762  	}
  1763  	subset3 := func(eps *discovery.EndpointSlice) {
  1764  		eps.AddressType = discovery.AddressTypeIPv4
  1765  		eps.Endpoints = []discovery.Endpoint{{
  1766  			Addresses: []string{"10.1.1.3"},
  1767  		}}
  1768  		eps.Ports = []discovery.EndpointPort{{
  1769  			Name:     ptr.To("p13"),
  1770  			Port:     ptr.To[int32](13),
  1771  			Protocol: ptr.To(v1.ProtocolUDP),
  1772  		}}
  1773  	}
  1774  	multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
  1775  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
  1776  		makeTestEndpointSlice("ns1", "ep1", 2, subset3),
  1777  	}
  1778  	subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
  1779  		eps.AddressType = discovery.AddressTypeIPv4
  1780  		eps.Endpoints = []discovery.Endpoint{{
  1781  			Addresses: []string{"10.1.1.1"},
  1782  		}, {
  1783  			Addresses: []string{"10.1.1.2"},
  1784  			NodeName:  ptr.To(testHostname),
  1785  		}}
  1786  		eps.Ports = []discovery.EndpointPort{{
  1787  			Name:     ptr.To("p11"),
  1788  			Port:     ptr.To[int32](11),
  1789  			Protocol: ptr.To(v1.ProtocolUDP),
  1790  		}, {
  1791  			Name:     ptr.To("p12"),
  1792  			Port:     ptr.To[int32](12),
  1793  			Protocol: ptr.To(v1.ProtocolUDP),
  1794  		}}
  1795  	}
  1796  	subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
  1797  		eps.AddressType = discovery.AddressTypeIPv4
  1798  		eps.Endpoints = []discovery.Endpoint{{
  1799  			Addresses: []string{"10.1.1.3"},
  1800  		}, {
  1801  			Addresses: []string{"10.1.1.4"},
  1802  			NodeName:  ptr.To(testHostname),
  1803  		}}
  1804  		eps.Ports = []discovery.EndpointPort{{
  1805  			Name:     ptr.To("p13"),
  1806  			Port:     ptr.To[int32](13),
  1807  			Protocol: ptr.To(v1.ProtocolUDP),
  1808  		}, {
  1809  			Name:     ptr.To("p14"),
  1810  			Port:     ptr.To[int32](14),
  1811  			Protocol: ptr.To(v1.ProtocolUDP),
  1812  		}}
  1813  	}
  1814  	subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
  1815  		eps.AddressType = discovery.AddressTypeIPv4
  1816  		eps.Endpoints = []discovery.Endpoint{{
  1817  			Addresses: []string{"10.2.2.1"},
  1818  		}, {
  1819  			Addresses: []string{"10.2.2.2"},
  1820  			NodeName:  ptr.To(testHostname),
  1821  		}}
  1822  		eps.Ports = []discovery.EndpointPort{{
  1823  			Name:     ptr.To("p21"),
  1824  			Port:     ptr.To[int32](21),
  1825  			Protocol: ptr.To(v1.ProtocolUDP),
  1826  		}, {
  1827  			Name:     ptr.To("p22"),
  1828  			Port:     ptr.To[int32](22),
  1829  			Protocol: ptr.To(v1.ProtocolUDP),
  1830  		}}
  1831  	}
  1832  	multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
  1833  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
  1834  		makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
  1835  		makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
  1836  	}
  1837  	complexSubset1 := func(eps *discovery.EndpointSlice) {
  1838  		eps.AddressType = discovery.AddressTypeIPv4
  1839  		eps.Endpoints = []discovery.Endpoint{{
  1840  			Addresses: []string{"10.2.2.2"},
  1841  			NodeName:  ptr.To(testHostname),
  1842  		}, {
  1843  			Addresses: []string{"10.2.2.22"},
  1844  			NodeName:  ptr.To(testHostname),
  1845  		}}
  1846  		eps.Ports = []discovery.EndpointPort{{
  1847  			Name:     ptr.To("p22"),
  1848  			Port:     ptr.To[int32](22),
  1849  			Protocol: ptr.To(v1.ProtocolUDP),
  1850  		}}
  1851  	}
  1852  	complexSubset2 := func(eps *discovery.EndpointSlice) {
  1853  		eps.AddressType = discovery.AddressTypeIPv4
  1854  		eps.Endpoints = []discovery.Endpoint{{
  1855  			Addresses: []string{"10.2.2.3"},
  1856  			NodeName:  ptr.To(testHostname),
  1857  		}}
  1858  		eps.Ports = []discovery.EndpointPort{{
  1859  			Name:     ptr.To("p23"),
  1860  			Port:     ptr.To[int32](23),
  1861  			Protocol: ptr.To(v1.ProtocolUDP),
  1862  		}}
  1863  	}
  1864  	complexSubset3 := func(eps *discovery.EndpointSlice) {
  1865  		eps.AddressType = discovery.AddressTypeIPv4
  1866  		eps.Endpoints = []discovery.Endpoint{{
  1867  			Addresses: []string{"10.4.4.4"},
  1868  			NodeName:  ptr.To(testHostname),
  1869  		}, {
  1870  			Addresses: []string{"10.4.4.5"},
  1871  			NodeName:  ptr.To(testHostname),
  1872  		}}
  1873  		eps.Ports = []discovery.EndpointPort{{
  1874  			Name:     ptr.To("p44"),
  1875  			Port:     ptr.To[int32](44),
  1876  			Protocol: ptr.To(v1.ProtocolUDP),
  1877  		}}
  1878  	}
  1879  	complexSubset4 := func(eps *discovery.EndpointSlice) {
  1880  		eps.AddressType = discovery.AddressTypeIPv4
  1881  		eps.Endpoints = []discovery.Endpoint{{
  1882  			Addresses: []string{"10.4.4.6"},
  1883  			NodeName:  ptr.To(testHostname),
  1884  		}}
  1885  		eps.Ports = []discovery.EndpointPort{{
  1886  			Name:     ptr.To("p45"),
  1887  			Port:     ptr.To[int32](45),
  1888  			Protocol: ptr.To(v1.ProtocolUDP),
  1889  		}}
  1890  	}
  1891  	complexSubset5 := func(eps *discovery.EndpointSlice) {
  1892  		eps.AddressType = discovery.AddressTypeIPv4
  1893  		eps.Endpoints = []discovery.Endpoint{{
  1894  			Addresses: []string{"10.1.1.1"},
  1895  		}, {
  1896  			Addresses: []string{"10.1.1.11"},
  1897  		}}
  1898  		eps.Ports = []discovery.EndpointPort{{
  1899  			Name:     ptr.To("p11"),
  1900  			Port:     ptr.To[int32](11),
  1901  			Protocol: ptr.To(v1.ProtocolUDP),
  1902  		}}
  1903  	}
  1904  	complexSubset6 := func(eps *discovery.EndpointSlice) {
  1905  		eps.AddressType = discovery.AddressTypeIPv4
  1906  		eps.Endpoints = []discovery.Endpoint{{
  1907  			Addresses: []string{"10.1.1.2"},
  1908  		}}
  1909  		eps.Ports = []discovery.EndpointPort{{
  1910  			Name:     ptr.To("p12"),
  1911  			Port:     ptr.To[int32](12),
  1912  			Protocol: ptr.To(v1.ProtocolUDP),
  1913  		}, {
  1914  			Name:     ptr.To("p122"),
  1915  			Port:     ptr.To[int32](122),
  1916  			Protocol: ptr.To(v1.ProtocolUDP),
  1917  		}}
  1918  	}
  1919  	complexSubset7 := func(eps *discovery.EndpointSlice) {
  1920  		eps.AddressType = discovery.AddressTypeIPv4
  1921  		eps.Endpoints = []discovery.Endpoint{{
  1922  			Addresses: []string{"10.3.3.3"},
  1923  		}}
  1924  		eps.Ports = []discovery.EndpointPort{{
  1925  			Name:     ptr.To("p33"),
  1926  			Port:     ptr.To[int32](33),
  1927  			Protocol: ptr.To(v1.ProtocolUDP),
  1928  		}}
  1929  	}
  1930  	complexSubset8 := func(eps *discovery.EndpointSlice) {
  1931  		eps.AddressType = discovery.AddressTypeIPv4
  1932  		eps.Endpoints = []discovery.Endpoint{{
  1933  			Addresses: []string{"10.4.4.4"},
  1934  			NodeName:  ptr.To(testHostname),
  1935  		}}
  1936  		eps.Ports = []discovery.EndpointPort{{
  1937  			Name:     ptr.To("p44"),
  1938  			Port:     ptr.To[int32](44),
  1939  			Protocol: ptr.To(v1.ProtocolUDP),
  1940  		}}
  1941  	}
  1942  	complexBefore := []*discovery.EndpointSlice{
  1943  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  1944  		nil,
  1945  		makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
  1946  		makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
  1947  		nil,
  1948  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
  1949  		makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
  1950  	}
  1951  	complexAfter := []*discovery.EndpointSlice{
  1952  		makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
  1953  		makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
  1954  		nil,
  1955  		nil,
  1956  		makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
  1957  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
  1958  		nil,
  1959  	}
  1960  
  1961  	testCases := []struct {
  1962  		// previousEndpoints and currentEndpoints are used to call appropriate
  1963  		// handlers OnEndpoints* (based on whether corresponding values are nil
  1964  		// or non-nil) and must be of equal length.
  1965  		name                           string
  1966  		previousEndpoints              []*discovery.EndpointSlice
  1967  		currentEndpoints               []*discovery.EndpointSlice
  1968  		oldEndpoints                   map[proxy.ServicePortName][]endpointExpectation
  1969  		expectedResult                 map[proxy.ServicePortName][]endpointExpectation
  1970  		expectedDeletedUDPEndpoints    []proxy.ServiceEndpoint
  1971  		expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool
  1972  		expectedLocalEndpoints         map[types.NamespacedName]int
  1973  	}{{
  1974  		// Case[0]: nothing
  1975  		name:                           "nothing",
  1976  		oldEndpoints:                   map[proxy.ServicePortName][]endpointExpectation{},
  1977  		expectedResult:                 map[proxy.ServicePortName][]endpointExpectation{},
  1978  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  1979  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  1980  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  1981  	}, {
  1982  		// Case[1]: no change, named port, local
  1983  		name:              "no change, named port, local",
  1984  		previousEndpoints: namedPortLocal,
  1985  		currentEndpoints:  namedPortLocal,
  1986  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  1987  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  1988  				{endpoint: "10.1.1.1:11", isLocal: true},
  1989  			},
  1990  		},
  1991  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  1992  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  1993  				{endpoint: "10.1.1.1:11", isLocal: true},
  1994  			},
  1995  		},
  1996  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  1997  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  1998  		expectedLocalEndpoints: map[types.NamespacedName]int{
  1999  			makeNSN("ns1", "ep1"): 1,
  2000  		},
  2001  	}, {
  2002  		// Case[2]: no change, multiple subsets
  2003  		name:              "no change, multiple subsets",
  2004  		previousEndpoints: multipleSubsets,
  2005  		currentEndpoints:  multipleSubsets,
  2006  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2007  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2008  				{endpoint: "10.1.1.1:11", isLocal: false},
  2009  			},
  2010  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2011  				{endpoint: "10.1.1.2:12", isLocal: false},
  2012  			},
  2013  		},
  2014  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2015  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2016  				{endpoint: "10.1.1.1:11", isLocal: false},
  2017  			},
  2018  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2019  				{endpoint: "10.1.1.2:12", isLocal: false},
  2020  			},
  2021  		},
  2022  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2023  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2024  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2025  	}, {
  2026  		// Case[3]: no change, multiple subsets, multiple ports, local
  2027  		name:              "no change, multiple subsets, multiple ports, local",
  2028  		previousEndpoints: multipleSubsetsMultiplePortsLocal,
  2029  		currentEndpoints:  multipleSubsetsMultiplePortsLocal,
  2030  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2031  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2032  				{endpoint: "10.1.1.1:11", isLocal: true},
  2033  			},
  2034  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2035  				{endpoint: "10.1.1.1:12", isLocal: true},
  2036  			},
  2037  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2038  				{endpoint: "10.1.1.3:13", isLocal: false},
  2039  			},
  2040  		},
  2041  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2042  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2043  				{endpoint: "10.1.1.1:11", isLocal: true},
  2044  			},
  2045  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2046  				{endpoint: "10.1.1.1:12", isLocal: true},
  2047  			},
  2048  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2049  				{endpoint: "10.1.1.3:13", isLocal: false},
  2050  			},
  2051  		},
  2052  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2053  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2054  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2055  			makeNSN("ns1", "ep1"): 1,
  2056  		},
  2057  	}, {
  2058  		// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
  2059  		name:              "no change, multiple endpoints, subsets, IPs, and ports",
  2060  		previousEndpoints: multipleSubsetsIPsPorts,
  2061  		currentEndpoints:  multipleSubsetsIPsPorts,
  2062  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2063  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2064  				{endpoint: "10.1.1.1:11", isLocal: false},
  2065  				{endpoint: "10.1.1.2:11", isLocal: true},
  2066  			},
  2067  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2068  				{endpoint: "10.1.1.1:12", isLocal: false},
  2069  				{endpoint: "10.1.1.2:12", isLocal: true},
  2070  			},
  2071  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2072  				{endpoint: "10.1.1.3:13", isLocal: false},
  2073  				{endpoint: "10.1.1.4:13", isLocal: true},
  2074  			},
  2075  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  2076  				{endpoint: "10.1.1.3:14", isLocal: false},
  2077  				{endpoint: "10.1.1.4:14", isLocal: true},
  2078  			},
  2079  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  2080  				{endpoint: "10.2.2.1:21", isLocal: false},
  2081  				{endpoint: "10.2.2.2:21", isLocal: true},
  2082  			},
  2083  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2084  				{endpoint: "10.2.2.1:22", isLocal: false},
  2085  				{endpoint: "10.2.2.2:22", isLocal: true},
  2086  			},
  2087  		},
  2088  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2089  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2090  				{endpoint: "10.1.1.1:11", isLocal: false},
  2091  				{endpoint: "10.1.1.2:11", isLocal: true},
  2092  			},
  2093  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2094  				{endpoint: "10.1.1.1:12", isLocal: false},
  2095  				{endpoint: "10.1.1.2:12", isLocal: true},
  2096  			},
  2097  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  2098  				{endpoint: "10.1.1.3:13", isLocal: false},
  2099  				{endpoint: "10.1.1.4:13", isLocal: true},
  2100  			},
  2101  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  2102  				{endpoint: "10.1.1.3:14", isLocal: false},
  2103  				{endpoint: "10.1.1.4:14", isLocal: true},
  2104  			},
  2105  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  2106  				{endpoint: "10.2.2.1:21", isLocal: false},
  2107  				{endpoint: "10.2.2.2:21", isLocal: true},
  2108  			},
  2109  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2110  				{endpoint: "10.2.2.1:22", isLocal: false},
  2111  				{endpoint: "10.2.2.2:22", isLocal: true},
  2112  			},
  2113  		},
  2114  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  2115  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2116  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2117  			makeNSN("ns1", "ep1"): 2,
  2118  			makeNSN("ns2", "ep2"): 1,
  2119  		},
  2120  	}, {
  2121  		// Case[5]: add an Endpoints
  2122  		name:              "add an Endpoints",
  2123  		previousEndpoints: []*discovery.EndpointSlice{nil},
  2124  		currentEndpoints:  namedPortLocal,
  2125  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  2126  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2127  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2128  				{endpoint: "10.1.1.1:11", isLocal: true},
  2129  			},
  2130  		},
  2131  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2132  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2133  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  2134  		},
  2135  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2136  			makeNSN("ns1", "ep1"): 1,
  2137  		},
  2138  	}, {
  2139  		// Case[6]: remove an Endpoints
  2140  		name:              "remove an Endpoints",
  2141  		previousEndpoints: namedPortLocal,
  2142  		currentEndpoints:  []*discovery.EndpointSlice{nil},
  2143  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2144  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2145  				{endpoint: "10.1.1.1:11", isLocal: true},
  2146  			},
  2147  		},
  2148  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{},
  2149  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2150  			Endpoint:        "10.1.1.1:11",
  2151  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2152  		}},
  2153  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2154  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2155  	}, {
  2156  		// Case[7]: add an IP and port
  2157  		name:              "add an IP and port",
  2158  		previousEndpoints: namedPort,
  2159  		currentEndpoints:  namedPortsLocalNoLocal,
  2160  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2161  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2162  				{endpoint: "10.1.1.1:11", isLocal: false},
  2163  			},
  2164  		},
  2165  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2166  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2167  				{endpoint: "10.1.1.1:11", isLocal: false},
  2168  				{endpoint: "10.1.1.2:11", isLocal: true},
  2169  			},
  2170  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2171  				{endpoint: "10.1.1.1:12", isLocal: false},
  2172  				{endpoint: "10.1.1.2:12", isLocal: true},
  2173  			},
  2174  		},
  2175  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2176  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2177  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  2178  		},
  2179  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2180  			makeNSN("ns1", "ep1"): 1,
  2181  		},
  2182  	}, {
  2183  		// Case[8]: remove an IP and port
  2184  		name:              "remove an IP and port",
  2185  		previousEndpoints: namedPortsLocalNoLocal,
  2186  		currentEndpoints:  namedPort,
  2187  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2188  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2189  				{endpoint: "10.1.1.1:11", isLocal: false},
  2190  				{endpoint: "10.1.1.2:11", isLocal: true},
  2191  			},
  2192  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2193  				{endpoint: "10.1.1.1:12", isLocal: false},
  2194  				{endpoint: "10.1.1.2:12", isLocal: true},
  2195  			},
  2196  		},
  2197  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2198  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2199  				{endpoint: "10.1.1.1:11", isLocal: false},
  2200  			},
  2201  		},
  2202  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2203  			Endpoint:        "10.1.1.2:11",
  2204  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2205  		}, {
  2206  			Endpoint:        "10.1.1.1:12",
  2207  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2208  		}, {
  2209  			Endpoint:        "10.1.1.2:12",
  2210  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2211  		}},
  2212  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2213  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2214  	}, {
  2215  		// Case[9]: add a subset
  2216  		name:              "add a subset",
  2217  		previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
  2218  		currentEndpoints:  multipleSubsetsWithLocal,
  2219  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2220  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2221  				{endpoint: "10.1.1.1:11", isLocal: false},
  2222  			},
  2223  		},
  2224  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2225  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2226  				{endpoint: "10.1.1.1:11", isLocal: false},
  2227  			},
  2228  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2229  				{endpoint: "10.1.1.2:12", isLocal: true},
  2230  			},
  2231  		},
  2232  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2233  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2234  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  2235  		},
  2236  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2237  			makeNSN("ns1", "ep1"): 1,
  2238  		},
  2239  	}, {
  2240  		// Case[10]: remove a subset
  2241  		name:              "remove a subset",
  2242  		previousEndpoints: multipleSubsets,
  2243  		currentEndpoints:  []*discovery.EndpointSlice{namedPort[0], nil},
  2244  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2245  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2246  				{endpoint: "10.1.1.1:11", isLocal: false},
  2247  			},
  2248  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2249  				{endpoint: "10.1.1.2:12", isLocal: false},
  2250  			},
  2251  		},
  2252  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2253  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2254  				{endpoint: "10.1.1.1:11", isLocal: false},
  2255  			},
  2256  		},
  2257  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2258  			Endpoint:        "10.1.1.2:12",
  2259  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  2260  		}},
  2261  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2262  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2263  	}, {
  2264  		// Case[11]: rename a port
  2265  		name:              "rename a port",
  2266  		previousEndpoints: namedPort,
  2267  		currentEndpoints:  namedPortRenamed,
  2268  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2269  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2270  				{endpoint: "10.1.1.1:11", isLocal: false},
  2271  			},
  2272  		},
  2273  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2274  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
  2275  				{endpoint: "10.1.1.1:11", isLocal: false},
  2276  			},
  2277  		},
  2278  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2279  			Endpoint:        "10.1.1.1:11",
  2280  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2281  		}},
  2282  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2283  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
  2284  		},
  2285  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  2286  	}, {
  2287  		// Case[12]: renumber a port
  2288  		name:              "renumber a port",
  2289  		previousEndpoints: namedPort,
  2290  		currentEndpoints:  namedPortRenumbered,
  2291  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2292  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2293  				{endpoint: "10.1.1.1:11", isLocal: false},
  2294  			},
  2295  		},
  2296  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2297  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2298  				{endpoint: "10.1.1.1:22", isLocal: false},
  2299  			},
  2300  		},
  2301  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2302  			Endpoint:        "10.1.1.1:11",
  2303  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  2304  		}},
  2305  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  2306  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  2307  	}, {
  2308  		// Case[13]: complex add and remove
  2309  		name:              "complex add and remove",
  2310  		previousEndpoints: complexBefore,
  2311  		currentEndpoints:  complexAfter,
  2312  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  2313  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2314  				{endpoint: "10.1.1.1:11", isLocal: false},
  2315  			},
  2316  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  2317  				{endpoint: "10.2.2.22:22", isLocal: true},
  2318  				{endpoint: "10.2.2.2:22", isLocal: true},
  2319  			},
  2320  			makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
  2321  				{endpoint: "10.2.2.3:23", isLocal: true},
  2322  			},
  2323  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  2324  				{endpoint: "10.4.4.4:44", isLocal: true},
  2325  				{endpoint: "10.4.4.5:44", isLocal: true},
  2326  			},
  2327  			makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
  2328  				{endpoint: "10.4.4.6:45", isLocal: true},
  2329  			},
  2330  		},
  2331  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2332  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2333  				{endpoint: "10.1.1.11:11", isLocal: false},
  2334  				{endpoint: "10.1.1.1:11", isLocal: false},
  2335  			},
  2336  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  2337  				{endpoint: "10.1.1.2:12", isLocal: false},
  2338  			},
  2339  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
  2340  				{endpoint: "10.1.1.2:122", isLocal: false},
  2341  			},
  2342  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
  2343  				{endpoint: "10.3.3.3:33", isLocal: false},
  2344  			},
  2345  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  2346  				{endpoint: "10.4.4.4:44", isLocal: true},
  2347  			},
  2348  		},
  2349  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  2350  			Endpoint:        "10.2.2.2:22",
  2351  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  2352  		}, {
  2353  			Endpoint:        "10.2.2.22:22",
  2354  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  2355  		}, {
  2356  			Endpoint:        "10.2.2.3:23",
  2357  			ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
  2358  		}, {
  2359  			Endpoint:        "10.4.4.5:44",
  2360  			ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
  2361  		}, {
  2362  			Endpoint:        "10.4.4.6:45",
  2363  			ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
  2364  		}},
  2365  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2366  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP):  true,
  2367  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
  2368  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP):  true,
  2369  		},
  2370  		expectedLocalEndpoints: map[types.NamespacedName]int{
  2371  			makeNSN("ns4", "ep4"): 1,
  2372  		},
  2373  	}, {
  2374  		// Case[14]: change from 0 endpoint address to 1 unnamed port
  2375  		name:              "change from 0 endpoint address to 1 unnamed port",
  2376  		previousEndpoints: emptyEndpointSlices,
  2377  		currentEndpoints:  namedPort,
  2378  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  2379  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  2380  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  2381  				{endpoint: "10.1.1.1:11", isLocal: false},
  2382  			},
  2383  		},
  2384  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  2385  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  2386  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  2387  		},
  2388  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  2389  	},
  2390  	}
  2391  
  2392  	for tci, tc := range testCases {
  2393  		t.Run(tc.name, func(t *testing.T) {
  2394  			_, fp := NewFakeProxier(v1.IPv4Protocol)
  2395  			fp.hostname = testHostname
  2396  
  2397  			// First check that after adding all previous versions of endpoints,
  2398  			// the fp.oldEndpoints is as we expect.
  2399  			for i := range tc.previousEndpoints {
  2400  				if tc.previousEndpoints[i] != nil {
  2401  					fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
  2402  				}
  2403  			}
  2404  			fp.endpointsMap.Update(fp.endpointsChanges)
  2405  			checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints)
  2406  
  2407  			// Now let's call appropriate handlers to get to state we want to be.
  2408  			if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
  2409  				t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
  2410  			}
  2411  
  2412  			for i := range tc.previousEndpoints {
  2413  				prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
  2414  				switch {
  2415  				case prev == nil:
  2416  					fp.OnEndpointSliceAdd(curr)
  2417  				case curr == nil:
  2418  					fp.OnEndpointSliceDelete(prev)
  2419  				default:
  2420  					fp.OnEndpointSliceUpdate(prev, curr)
  2421  				}
  2422  			}
  2423  			result := fp.endpointsMap.Update(fp.endpointsChanges)
  2424  			newMap := fp.endpointsMap
  2425  			checkEndpointExpectations(t, tci, newMap, tc.expectedResult)
  2426  			if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) {
  2427  				t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints)
  2428  			}
  2429  			for _, x := range tc.expectedDeletedUDPEndpoints {
  2430  				found := false
  2431  				for _, stale := range result.DeletedUDPEndpoints {
  2432  					if stale == x {
  2433  						found = true
  2434  						break
  2435  					}
  2436  				}
  2437  				if !found {
  2438  					t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.DeletedUDPEndpoints)
  2439  				}
  2440  			}
  2441  			if len(result.NewlyActiveUDPServices) != len(tc.expectedNewlyActiveUDPServices) {
  2442  				t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedNewlyActiveUDPServices), len(result.NewlyActiveUDPServices), result.NewlyActiveUDPServices)
  2443  			}
  2444  			for svcName := range tc.expectedNewlyActiveUDPServices {
  2445  				found := false
  2446  				for _, stale := range result.NewlyActiveUDPServices {
  2447  					if stale == svcName {
  2448  						found = true
  2449  					}
  2450  				}
  2451  				if !found {
  2452  					t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.NewlyActiveUDPServices)
  2453  				}
  2454  			}
  2455  			localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  2456  			if !reflect.DeepEqual(localReadyEndpoints, tc.expectedLocalEndpoints) {
  2457  				t.Errorf("[%d] expected local endpoints %v, got %v", tci, tc.expectedLocalEndpoints, localReadyEndpoints)
  2458  			}
  2459  		})
  2460  	}
  2461  }
  2462  
  2463  // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
  2464  func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
  2465  	_, fp := NewFakeProxier(v1.IPv4Protocol)
  2466  	fp.OnServiceSynced()
  2467  	fp.OnEndpointSlicesSynced()
  2468  
  2469  	serviceName := "svc1"
  2470  	namespaceName := "ns1"
  2471  
  2472  	fp.OnServiceAdd(&v1.Service{
  2473  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  2474  		Spec: v1.ServiceSpec{
  2475  			ClusterIP: "172.30.1.1",
  2476  			Selector:  map[string]string{"foo": "bar"},
  2477  			Ports:     []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
  2478  		},
  2479  	})
  2480  
  2481  	endpointSlice := &discovery.EndpointSlice{
  2482  		ObjectMeta: metav1.ObjectMeta{
  2483  			Name:      fmt.Sprintf("%s-1", serviceName),
  2484  			Namespace: namespaceName,
  2485  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2486  		},
  2487  		Ports: []discovery.EndpointPort{{
  2488  			Name:     ptr.To(""),
  2489  			Port:     ptr.To[int32](80),
  2490  			Protocol: ptr.To(v1.ProtocolTCP),
  2491  		}},
  2492  		AddressType: discovery.AddressTypeIPv4,
  2493  		Endpoints: []discovery.Endpoint{{
  2494  			Addresses:  []string{"10.0.1.1"},
  2495  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2496  			NodeName:   ptr.To(testHostname),
  2497  		}, {
  2498  			Addresses:  []string{"10.0.1.2"},
  2499  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2500  			NodeName:   ptr.To(testHostname),
  2501  		}, {
  2502  			Addresses:  []string{"10.0.1.3"},
  2503  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2504  			NodeName:   ptr.To(testHostname),
  2505  		}, { // not ready endpoints should be ignored
  2506  			Addresses:  []string{"10.0.1.4"},
  2507  			Conditions: discovery.EndpointConditions{Ready: ptr.To(false)},
  2508  			NodeName:   ptr.To(testHostname),
  2509  		}},
  2510  	}
  2511  
  2512  	fp.OnEndpointSliceAdd(endpointSlice)
  2513  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  2514  	localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  2515  	if len(localReadyEndpoints) != 1 {
  2516  		t.Errorf("unexpected number of local ready endpoints, expected 1 but got: %d", len(localReadyEndpoints))
  2517  	}
  2518  
  2519  	// set all endpoints to terminating
  2520  	endpointSliceTerminating := &discovery.EndpointSlice{
  2521  		ObjectMeta: metav1.ObjectMeta{
  2522  			Name:      fmt.Sprintf("%s-1", serviceName),
  2523  			Namespace: namespaceName,
  2524  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2525  		},
  2526  		Ports: []discovery.EndpointPort{{
  2527  			Name:     ptr.To(""),
  2528  			Port:     ptr.To[int32](80),
  2529  			Protocol: ptr.To(v1.ProtocolTCP),
  2530  		}},
  2531  		AddressType: discovery.AddressTypeIPv4,
  2532  		Endpoints: []discovery.Endpoint{{
  2533  			Addresses: []string{"10.0.1.1"},
  2534  			Conditions: discovery.EndpointConditions{
  2535  				Ready:       ptr.To(false),
  2536  				Serving:     ptr.To(true),
  2537  				Terminating: ptr.To(false),
  2538  			},
  2539  			NodeName: ptr.To(testHostname),
  2540  		}, {
  2541  			Addresses: []string{"10.0.1.2"},
  2542  			Conditions: discovery.EndpointConditions{
  2543  				Ready:       ptr.To(false),
  2544  				Serving:     ptr.To(true),
  2545  				Terminating: ptr.To(true),
  2546  			},
  2547  			NodeName: ptr.To(testHostname),
  2548  		}, {
  2549  			Addresses: []string{"10.0.1.3"},
  2550  			Conditions: discovery.EndpointConditions{
  2551  				Ready:       ptr.To(false),
  2552  				Serving:     ptr.To(true),
  2553  				Terminating: ptr.To(true),
  2554  			},
  2555  			NodeName: ptr.To(testHostname),
  2556  		}, { // not ready endpoints should be ignored
  2557  			Addresses: []string{"10.0.1.4"},
  2558  			Conditions: discovery.EndpointConditions{
  2559  				Ready:       ptr.To(false),
  2560  				Serving:     ptr.To(false),
  2561  				Terminating: ptr.To(true),
  2562  			},
  2563  			NodeName: ptr.To(testHostname),
  2564  		}},
  2565  	}
  2566  
  2567  	fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
  2568  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  2569  	localReadyEndpoints = fp.endpointsMap.LocalReadyEndpoints()
  2570  	if len(localReadyEndpoints) != 0 {
  2571  		t.Errorf("unexpected number of local ready endpoints, expected 0 but got: %d", len(localReadyEndpoints))
  2572  	}
  2573  }
  2574  
  2575  // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
  2576  
  2577  // This test ensures that the iptables proxier supports translating Endpoints to
  2578  // iptables output when internalTrafficPolicy is specified
  2579  func TestInternalTrafficPolicy(t *testing.T) {
  2580  	type endpoint struct {
  2581  		ip       string
  2582  		hostname string
  2583  	}
  2584  
  2585  	testCases := []struct {
  2586  		name                  string
  2587  		line                  string
  2588  		internalTrafficPolicy *v1.ServiceInternalTrafficPolicy
  2589  		endpoints             []endpoint
  2590  		flowTests             []packetFlowTest
  2591  	}{
  2592  		{
  2593  			name:                  "internalTrafficPolicy is cluster",
  2594  			line:                  getLine(),
  2595  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster),
  2596  			endpoints: []endpoint{
  2597  				{"10.0.1.1", testHostname},
  2598  				{"10.0.1.2", "host1"},
  2599  				{"10.0.1.3", "host2"},
  2600  			},
  2601  			flowTests: []packetFlowTest{
  2602  				{
  2603  					name:     "pod to ClusterIP hits all endpoints",
  2604  					sourceIP: "10.0.0.2",
  2605  					destIP:   "172.30.1.1",
  2606  					destPort: 80,
  2607  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
  2608  					masq:     false,
  2609  				},
  2610  			},
  2611  		},
  2612  		{
  2613  			name:                  "internalTrafficPolicy is local and there is one local endpoint",
  2614  			line:                  getLine(),
  2615  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2616  			endpoints: []endpoint{
  2617  				{"10.0.1.1", testHostname},
  2618  				{"10.0.1.2", "host1"},
  2619  				{"10.0.1.3", "host2"},
  2620  			},
  2621  			flowTests: []packetFlowTest{
  2622  				{
  2623  					name:     "pod to ClusterIP hits only local endpoint",
  2624  					sourceIP: "10.0.0.2",
  2625  					destIP:   "172.30.1.1",
  2626  					destPort: 80,
  2627  					output:   "10.0.1.1:80",
  2628  					masq:     false,
  2629  				},
  2630  			},
  2631  		},
  2632  		{
  2633  			name:                  "internalTrafficPolicy is local and there are multiple local endpoints",
  2634  			line:                  getLine(),
  2635  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2636  			endpoints: []endpoint{
  2637  				{"10.0.1.1", testHostname},
  2638  				{"10.0.1.2", testHostname},
  2639  				{"10.0.1.3", "host2"},
  2640  			},
  2641  			flowTests: []packetFlowTest{
  2642  				{
  2643  					name:     "pod to ClusterIP hits all local endpoints",
  2644  					sourceIP: "10.0.0.2",
  2645  					destIP:   "172.30.1.1",
  2646  					destPort: 80,
  2647  					output:   "10.0.1.1:80, 10.0.1.2:80",
  2648  					masq:     false,
  2649  				},
  2650  			},
  2651  		},
  2652  		{
  2653  			name:                  "internalTrafficPolicy is local and there are no local endpoints",
  2654  			line:                  getLine(),
  2655  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  2656  			endpoints: []endpoint{
  2657  				{"10.0.1.1", "host0"},
  2658  				{"10.0.1.2", "host1"},
  2659  				{"10.0.1.3", "host2"},
  2660  			},
  2661  			flowTests: []packetFlowTest{
  2662  				{
  2663  					name:     "no endpoints",
  2664  					sourceIP: "10.0.0.2",
  2665  					destIP:   "172.30.1.1",
  2666  					destPort: 80,
  2667  					output:   "DROP",
  2668  				},
  2669  			},
  2670  		},
  2671  	}
  2672  
  2673  	for _, tc := range testCases {
  2674  		t.Run(tc.name, func(t *testing.T) {
  2675  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  2676  			fp.OnServiceSynced()
  2677  			fp.OnEndpointSlicesSynced()
  2678  
  2679  			serviceName := "svc1"
  2680  			namespaceName := "ns1"
  2681  
  2682  			svc := &v1.Service{
  2683  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  2684  				Spec: v1.ServiceSpec{
  2685  					ClusterIP: "172.30.1.1",
  2686  					Selector:  map[string]string{"foo": "bar"},
  2687  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
  2688  				},
  2689  			}
  2690  			if tc.internalTrafficPolicy != nil {
  2691  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  2692  			}
  2693  
  2694  			fp.OnServiceAdd(svc)
  2695  
  2696  			endpointSlice := &discovery.EndpointSlice{
  2697  				ObjectMeta: metav1.ObjectMeta{
  2698  					Name:      fmt.Sprintf("%s-1", serviceName),
  2699  					Namespace: namespaceName,
  2700  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  2701  				},
  2702  				Ports: []discovery.EndpointPort{{
  2703  					Name:     ptr.To(""),
  2704  					Port:     ptr.To[int32](80),
  2705  					Protocol: ptr.To(v1.ProtocolTCP),
  2706  				}},
  2707  				AddressType: discovery.AddressTypeIPv4,
  2708  			}
  2709  			for _, ep := range tc.endpoints {
  2710  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  2711  					Addresses:  []string{ep.ip},
  2712  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  2713  					NodeName:   ptr.To(ep.hostname),
  2714  				})
  2715  			}
  2716  
  2717  			fp.OnEndpointSliceAdd(endpointSlice)
  2718  			fp.syncProxyRules()
  2719  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, tc.flowTests)
  2720  
  2721  			fp.OnEndpointSliceDelete(endpointSlice)
  2722  			fp.syncProxyRules()
  2723  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, []packetFlowTest{
  2724  				{
  2725  					name:     "endpoints deleted",
  2726  					sourceIP: "10.0.0.2",
  2727  					destIP:   "172.30.1.1",
  2728  					destPort: 80,
  2729  					output:   "REJECT",
  2730  				},
  2731  			})
  2732  		})
  2733  	}
  2734  }
  2735  
  2736  // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
  2737  // ready + terminating endpoints, only the ready endpoints are used.
  2738  func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
  2739  	service := &v1.Service{
  2740  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  2741  		Spec: v1.ServiceSpec{
  2742  			ClusterIP:             "172.30.1.1",
  2743  			Type:                  v1.ServiceTypeLoadBalancer,
  2744  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  2745  			Ports: []v1.ServicePort{
  2746  				{
  2747  					Name:       "",
  2748  					TargetPort: intstr.FromInt32(80),
  2749  					Port:       80,
  2750  					Protocol:   v1.ProtocolTCP,
  2751  				},
  2752  			},
  2753  			HealthCheckNodePort: 30000,
  2754  		},
  2755  		Status: v1.ServiceStatus{
  2756  			LoadBalancer: v1.LoadBalancerStatus{
  2757  				Ingress: []v1.LoadBalancerIngress{
  2758  					{IP: "1.2.3.4"},
  2759  				},
  2760  			},
  2761  		},
  2762  	}
  2763  
  2764  	testcases := []struct {
  2765  		name          string
  2766  		line          string
  2767  		endpointslice *discovery.EndpointSlice
  2768  		flowTests     []packetFlowTest
  2769  	}{
  2770  		{
  2771  			name: "ready endpoints exist",
  2772  			line: getLine(),
  2773  			endpointslice: &discovery.EndpointSlice{
  2774  				ObjectMeta: metav1.ObjectMeta{
  2775  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2776  					Namespace: "ns1",
  2777  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2778  				},
  2779  				Ports: []discovery.EndpointPort{{
  2780  					Name:     ptr.To(""),
  2781  					Port:     ptr.To[int32](80),
  2782  					Protocol: ptr.To(v1.ProtocolTCP),
  2783  				}},
  2784  				AddressType: discovery.AddressTypeIPv4,
  2785  				Endpoints: []discovery.Endpoint{
  2786  					{
  2787  						Addresses: []string{"10.0.1.1"},
  2788  						Conditions: discovery.EndpointConditions{
  2789  							Ready:       ptr.To(true),
  2790  							Serving:     ptr.To(true),
  2791  							Terminating: ptr.To(false),
  2792  						},
  2793  						NodeName: ptr.To(testHostname),
  2794  					},
  2795  					{
  2796  						Addresses: []string{"10.0.1.2"},
  2797  						Conditions: discovery.EndpointConditions{
  2798  							Ready:       ptr.To(true),
  2799  							Serving:     ptr.To(true),
  2800  							Terminating: ptr.To(false),
  2801  						},
  2802  						NodeName: ptr.To(testHostname),
  2803  					},
  2804  					{
  2805  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  2806  						Addresses: []string{"10.0.1.3"},
  2807  						Conditions: discovery.EndpointConditions{
  2808  							Ready:       ptr.To(false),
  2809  							Serving:     ptr.To(true),
  2810  							Terminating: ptr.To(true),
  2811  						},
  2812  						NodeName: ptr.To(testHostname),
  2813  					},
  2814  					{
  2815  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  2816  						Addresses: []string{"10.0.1.4"},
  2817  						Conditions: discovery.EndpointConditions{
  2818  							Ready:       ptr.To(false),
  2819  							Serving:     ptr.To(false),
  2820  							Terminating: ptr.To(true),
  2821  						},
  2822  						NodeName: ptr.To(testHostname),
  2823  					},
  2824  					{
  2825  						// this endpoint should be ignored for external since it's not local
  2826  						Addresses: []string{"10.0.1.5"},
  2827  						Conditions: discovery.EndpointConditions{
  2828  							Ready:       ptr.To(true),
  2829  							Serving:     ptr.To(true),
  2830  							Terminating: ptr.To(false),
  2831  						},
  2832  						NodeName: ptr.To("host-1"),
  2833  					},
  2834  				},
  2835  			},
  2836  			flowTests: []packetFlowTest{
  2837  				{
  2838  					name:     "pod to clusterIP",
  2839  					sourceIP: "10.0.0.2",
  2840  					destIP:   "172.30.1.1",
  2841  					destPort: 80,
  2842  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  2843  					masq:     false,
  2844  				},
  2845  				{
  2846  					name:     "external to LB",
  2847  					sourceIP: testExternalClient,
  2848  					destIP:   "1.2.3.4",
  2849  					destPort: 80,
  2850  					output:   "10.0.1.1:80, 10.0.1.2:80",
  2851  					masq:     false,
  2852  				},
  2853  			},
  2854  		},
  2855  		{
  2856  			name: "only terminating endpoints exist",
  2857  			line: getLine(),
  2858  			endpointslice: &discovery.EndpointSlice{
  2859  				ObjectMeta: metav1.ObjectMeta{
  2860  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2861  					Namespace: "ns1",
  2862  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2863  				},
  2864  				Ports: []discovery.EndpointPort{{
  2865  					Name:     ptr.To(""),
  2866  					Port:     ptr.To[int32](80),
  2867  					Protocol: ptr.To(v1.ProtocolTCP),
  2868  				}},
  2869  				AddressType: discovery.AddressTypeIPv4,
  2870  				Endpoints: []discovery.Endpoint{
  2871  					{
  2872  						// this endpoint should be used since there are only ready terminating endpoints
  2873  						Addresses: []string{"10.0.1.2"},
  2874  						Conditions: discovery.EndpointConditions{
  2875  							Ready:       ptr.To(false),
  2876  							Serving:     ptr.To(true),
  2877  							Terminating: ptr.To(true),
  2878  						},
  2879  						NodeName: ptr.To(testHostname),
  2880  					},
  2881  					{
  2882  						// this endpoint should be used since there are only ready terminating endpoints
  2883  						Addresses: []string{"10.0.1.3"},
  2884  						Conditions: discovery.EndpointConditions{
  2885  							Ready:       ptr.To(false),
  2886  							Serving:     ptr.To(true),
  2887  							Terminating: ptr.To(true),
  2888  						},
  2889  						NodeName: ptr.To(testHostname),
  2890  					},
  2891  					{
  2892  						// this endpoint should not be used since it is both terminating and not ready.
  2893  						Addresses: []string{"10.0.1.4"},
  2894  						Conditions: discovery.EndpointConditions{
  2895  							Ready:       ptr.To(false),
  2896  							Serving:     ptr.To(false),
  2897  							Terminating: ptr.To(true),
  2898  						},
  2899  						NodeName: ptr.To(testHostname),
  2900  					},
  2901  					{
  2902  						// this endpoint should be ignored for external since it's not local
  2903  						Addresses: []string{"10.0.1.5"},
  2904  						Conditions: discovery.EndpointConditions{
  2905  							Ready:       ptr.To(true),
  2906  							Serving:     ptr.To(true),
  2907  							Terminating: ptr.To(false),
  2908  						},
  2909  						NodeName: ptr.To("host-1"),
  2910  					},
  2911  				},
  2912  			},
  2913  			flowTests: []packetFlowTest{
  2914  				{
  2915  					name:     "pod to clusterIP",
  2916  					sourceIP: "10.0.0.2",
  2917  					destIP:   "172.30.1.1",
  2918  					destPort: 80,
  2919  					output:   "10.0.1.5:80",
  2920  					masq:     false,
  2921  				},
  2922  				{
  2923  					name:     "external to LB",
  2924  					sourceIP: testExternalClient,
  2925  					destIP:   "1.2.3.4",
  2926  					destPort: 80,
  2927  					output:   "10.0.1.2:80, 10.0.1.3:80",
  2928  					masq:     false,
  2929  				},
  2930  			},
  2931  		},
  2932  		{
  2933  			name: "terminating endpoints on remote node",
  2934  			line: getLine(),
  2935  			endpointslice: &discovery.EndpointSlice{
  2936  				ObjectMeta: metav1.ObjectMeta{
  2937  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2938  					Namespace: "ns1",
  2939  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2940  				},
  2941  				Ports: []discovery.EndpointPort{{
  2942  					Name:     ptr.To(""),
  2943  					Port:     ptr.To[int32](80),
  2944  					Protocol: ptr.To(v1.ProtocolTCP),
  2945  				}},
  2946  				AddressType: discovery.AddressTypeIPv4,
  2947  				Endpoints: []discovery.Endpoint{
  2948  					{
  2949  						// this endpoint won't be used because it's not local,
  2950  						// but it will prevent a REJECT rule from being created
  2951  						Addresses: []string{"10.0.1.5"},
  2952  						Conditions: discovery.EndpointConditions{
  2953  							Ready:       ptr.To(false),
  2954  							Serving:     ptr.To(true),
  2955  							Terminating: ptr.To(true),
  2956  						},
  2957  						NodeName: ptr.To("host-1"),
  2958  					},
  2959  				},
  2960  			},
  2961  			flowTests: []packetFlowTest{
  2962  				{
  2963  					name:     "pod to clusterIP",
  2964  					sourceIP: "10.0.0.2",
  2965  					destIP:   "172.30.1.1",
  2966  					destPort: 80,
  2967  					output:   "10.0.1.5:80",
  2968  				},
  2969  				{
  2970  					name:     "external to LB, no locally-usable endpoints",
  2971  					sourceIP: testExternalClient,
  2972  					destIP:   "1.2.3.4",
  2973  					destPort: 80,
  2974  					output:   "DROP",
  2975  				},
  2976  			},
  2977  		},
  2978  		{
  2979  			name: "no usable endpoints on any node",
  2980  			line: getLine(),
  2981  			endpointslice: &discovery.EndpointSlice{
  2982  				ObjectMeta: metav1.ObjectMeta{
  2983  					Name:      fmt.Sprintf("%s-1", "svc1"),
  2984  					Namespace: "ns1",
  2985  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  2986  				},
  2987  				Ports: []discovery.EndpointPort{{
  2988  					Name:     ptr.To(""),
  2989  					Port:     ptr.To[int32](80),
  2990  					Protocol: ptr.To(v1.ProtocolTCP),
  2991  				}},
  2992  				AddressType: discovery.AddressTypeIPv4,
  2993  				Endpoints: []discovery.Endpoint{
  2994  					{
  2995  						// Local but not ready or serving
  2996  						Addresses: []string{"10.0.1.5"},
  2997  						Conditions: discovery.EndpointConditions{
  2998  							Ready:       ptr.To(false),
  2999  							Serving:     ptr.To(false),
  3000  							Terminating: ptr.To(true),
  3001  						},
  3002  						NodeName: ptr.To(testHostname),
  3003  					},
  3004  					{
  3005  						// Remote and not ready or serving
  3006  						Addresses: []string{"10.0.1.5"},
  3007  						Conditions: discovery.EndpointConditions{
  3008  							Ready:       ptr.To(false),
  3009  							Serving:     ptr.To(false),
  3010  							Terminating: ptr.To(true),
  3011  						},
  3012  						NodeName: ptr.To("host-1"),
  3013  					},
  3014  				},
  3015  			},
  3016  			flowTests: []packetFlowTest{
  3017  				{
  3018  					name:     "pod to clusterIP, no usable endpoints",
  3019  					sourceIP: "10.0.0.2",
  3020  					destIP:   "172.30.1.1",
  3021  					destPort: 80,
  3022  					output:   "REJECT",
  3023  				},
  3024  				{
  3025  					name:     "external to LB, no usable endpoints",
  3026  					sourceIP: testExternalClient,
  3027  					destIP:   "1.2.3.4",
  3028  					destPort: 80,
  3029  					output:   "REJECT",
  3030  				},
  3031  			},
  3032  		},
  3033  	}
  3034  
  3035  	for _, testcase := range testcases {
  3036  		t.Run(testcase.name, func(t *testing.T) {
  3037  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3038  			fp.OnServiceSynced()
  3039  			fp.OnEndpointSlicesSynced()
  3040  
  3041  			fp.OnServiceAdd(service)
  3042  
  3043  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  3044  			fp.syncProxyRules()
  3045  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, testcase.flowTests)
  3046  
  3047  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  3048  			fp.syncProxyRules()
  3049  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, []packetFlowTest{
  3050  				{
  3051  					name:     "pod to clusterIP after endpoints deleted",
  3052  					sourceIP: "10.0.0.2",
  3053  					destIP:   "172.30.1.1",
  3054  					destPort: 80,
  3055  					output:   "REJECT",
  3056  				},
  3057  				{
  3058  					name:     "external to LB after endpoints deleted",
  3059  					sourceIP: testExternalClient,
  3060  					destIP:   "1.2.3.4",
  3061  					destPort: 80,
  3062  					output:   "REJECT",
  3063  				},
  3064  			})
  3065  		})
  3066  	}
  3067  }
  3068  
  3069  // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
  3070  // ready and ready + terminating endpoints, only the ready endpoints are used.
  3071  func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
  3072  	service := &v1.Service{
  3073  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  3074  		Spec: v1.ServiceSpec{
  3075  			ClusterIP:             "172.30.1.1",
  3076  			Type:                  v1.ServiceTypeLoadBalancer,
  3077  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster,
  3078  			Ports: []v1.ServicePort{
  3079  				{
  3080  					Name:       "",
  3081  					TargetPort: intstr.FromInt32(80),
  3082  					Port:       80,
  3083  					Protocol:   v1.ProtocolTCP,
  3084  				},
  3085  			},
  3086  			HealthCheckNodePort: 30000,
  3087  		},
  3088  		Status: v1.ServiceStatus{
  3089  			LoadBalancer: v1.LoadBalancerStatus{
  3090  				Ingress: []v1.LoadBalancerIngress{
  3091  					{IP: "1.2.3.4"},
  3092  				},
  3093  			},
  3094  		},
  3095  	}
  3096  
  3097  	testcases := []struct {
  3098  		name          string
  3099  		line          string
  3100  		endpointslice *discovery.EndpointSlice
  3101  		flowTests     []packetFlowTest
  3102  	}{
  3103  		{
  3104  			name: "ready endpoints exist",
  3105  			line: getLine(),
  3106  			endpointslice: &discovery.EndpointSlice{
  3107  				ObjectMeta: metav1.ObjectMeta{
  3108  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3109  					Namespace: "ns1",
  3110  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3111  				},
  3112  				Ports: []discovery.EndpointPort{{
  3113  					Name:     ptr.To(""),
  3114  					Port:     ptr.To[int32](80),
  3115  					Protocol: ptr.To(v1.ProtocolTCP),
  3116  				}},
  3117  				AddressType: discovery.AddressTypeIPv4,
  3118  				Endpoints: []discovery.Endpoint{
  3119  					{
  3120  						Addresses: []string{"10.0.1.1"},
  3121  						Conditions: discovery.EndpointConditions{
  3122  							Ready:       ptr.To(true),
  3123  							Serving:     ptr.To(true),
  3124  							Terminating: ptr.To(false),
  3125  						},
  3126  						NodeName: ptr.To(testHostname),
  3127  					},
  3128  					{
  3129  						Addresses: []string{"10.0.1.2"},
  3130  						Conditions: discovery.EndpointConditions{
  3131  							Ready:       ptr.To(true),
  3132  							Serving:     ptr.To(true),
  3133  							Terminating: ptr.To(false),
  3134  						},
  3135  						NodeName: ptr.To(testHostname),
  3136  					},
  3137  					{
  3138  						// this endpoint should be ignored since there are ready non-terminating endpoints
  3139  						Addresses: []string{"10.0.1.3"},
  3140  						Conditions: discovery.EndpointConditions{
  3141  							Ready:       ptr.To(false),
  3142  							Serving:     ptr.To(true),
  3143  							Terminating: ptr.To(true),
  3144  						},
  3145  						NodeName: ptr.To("another-host"),
  3146  					},
  3147  					{
  3148  						// this endpoint should be ignored since it is not "serving"
  3149  						Addresses: []string{"10.0.1.4"},
  3150  						Conditions: discovery.EndpointConditions{
  3151  							Ready:       ptr.To(false),
  3152  							Serving:     ptr.To(false),
  3153  							Terminating: ptr.To(true),
  3154  						},
  3155  						NodeName: ptr.To("another-host"),
  3156  					},
  3157  					{
  3158  						Addresses: []string{"10.0.1.5"},
  3159  						Conditions: discovery.EndpointConditions{
  3160  							Ready:       ptr.To(true),
  3161  							Serving:     ptr.To(true),
  3162  							Terminating: ptr.To(false),
  3163  						},
  3164  						NodeName: ptr.To("another-host"),
  3165  					},
  3166  				},
  3167  			},
  3168  			flowTests: []packetFlowTest{
  3169  				{
  3170  					name:     "pod to clusterIP",
  3171  					sourceIP: "10.0.0.2",
  3172  					destIP:   "172.30.1.1",
  3173  					destPort: 80,
  3174  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  3175  					masq:     false,
  3176  				},
  3177  				{
  3178  					name:     "external to LB",
  3179  					sourceIP: testExternalClient,
  3180  					destIP:   "1.2.3.4",
  3181  					destPort: 80,
  3182  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  3183  					masq:     true,
  3184  				},
  3185  			},
  3186  		},
  3187  		{
  3188  			name: "only terminating endpoints exist",
  3189  			line: getLine(),
  3190  			endpointslice: &discovery.EndpointSlice{
  3191  				ObjectMeta: metav1.ObjectMeta{
  3192  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3193  					Namespace: "ns1",
  3194  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3195  				},
  3196  				Ports: []discovery.EndpointPort{{
  3197  					Name:     ptr.To(""),
  3198  					Port:     ptr.To[int32](80),
  3199  					Protocol: ptr.To(v1.ProtocolTCP),
  3200  				}},
  3201  				AddressType: discovery.AddressTypeIPv4,
  3202  				Endpoints: []discovery.Endpoint{
  3203  					{
  3204  						// this endpoint should be used since there are only ready terminating endpoints
  3205  						Addresses: []string{"10.0.1.2"},
  3206  						Conditions: discovery.EndpointConditions{
  3207  							Ready:       ptr.To(false),
  3208  							Serving:     ptr.To(true),
  3209  							Terminating: ptr.To(true),
  3210  						},
  3211  						NodeName: ptr.To(testHostname),
  3212  					},
  3213  					{
  3214  						// this endpoint should be used since there are only ready terminating endpoints
  3215  						Addresses: []string{"10.0.1.3"},
  3216  						Conditions: discovery.EndpointConditions{
  3217  							Ready:       ptr.To(false),
  3218  							Serving:     ptr.To(true),
  3219  							Terminating: ptr.To(true),
  3220  						},
  3221  						NodeName: ptr.To(testHostname),
  3222  					},
  3223  					{
  3224  						// this endpoint should not be used since it is both terminating and not ready.
  3225  						Addresses: []string{"10.0.1.4"},
  3226  						Conditions: discovery.EndpointConditions{
  3227  							Ready:       ptr.To(false),
  3228  							Serving:     ptr.To(false),
  3229  							Terminating: ptr.To(true),
  3230  						},
  3231  						NodeName: ptr.To("another-host"),
  3232  					},
  3233  					{
  3234  						// this endpoint should be used since there are only ready terminating endpoints
  3235  						Addresses: []string{"10.0.1.5"},
  3236  						Conditions: discovery.EndpointConditions{
  3237  							Ready:       ptr.To(false),
  3238  							Serving:     ptr.To(true),
  3239  							Terminating: ptr.To(true),
  3240  						},
  3241  						NodeName: ptr.To("another-host"),
  3242  					},
  3243  				},
  3244  			},
  3245  			flowTests: []packetFlowTest{
  3246  				{
  3247  					name:     "pod to clusterIP",
  3248  					sourceIP: "10.0.0.2",
  3249  					destIP:   "172.30.1.1",
  3250  					destPort: 80,
  3251  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  3252  					masq:     false,
  3253  				},
  3254  				{
  3255  					name:     "external to LB",
  3256  					sourceIP: testExternalClient,
  3257  					destIP:   "1.2.3.4",
  3258  					destPort: 80,
  3259  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  3260  					masq:     true,
  3261  				},
  3262  			},
  3263  		},
  3264  		{
  3265  			name: "terminating endpoints on remote node",
  3266  			line: getLine(),
  3267  			endpointslice: &discovery.EndpointSlice{
  3268  				ObjectMeta: metav1.ObjectMeta{
  3269  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3270  					Namespace: "ns1",
  3271  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3272  				},
  3273  				Ports: []discovery.EndpointPort{{
  3274  					Name:     ptr.To(""),
  3275  					Port:     ptr.To[int32](80),
  3276  					Protocol: ptr.To(v1.ProtocolTCP),
  3277  				}},
  3278  				AddressType: discovery.AddressTypeIPv4,
  3279  				Endpoints: []discovery.Endpoint{
  3280  					{
  3281  						Addresses: []string{"10.0.1.5"},
  3282  						Conditions: discovery.EndpointConditions{
  3283  							Ready:       ptr.To(false),
  3284  							Serving:     ptr.To(true),
  3285  							Terminating: ptr.To(true),
  3286  						},
  3287  						NodeName: ptr.To("host-1"),
  3288  					},
  3289  				},
  3290  			},
  3291  			flowTests: []packetFlowTest{
  3292  				{
  3293  					name:     "pod to clusterIP",
  3294  					sourceIP: "10.0.0.2",
  3295  					destIP:   "172.30.1.1",
  3296  					destPort: 80,
  3297  					output:   "10.0.1.5:80",
  3298  					masq:     false,
  3299  				},
  3300  				{
  3301  					name:     "external to LB",
  3302  					sourceIP: testExternalClient,
  3303  					destIP:   "1.2.3.4",
  3304  					destPort: 80,
  3305  					output:   "10.0.1.5:80",
  3306  					masq:     true,
  3307  				},
  3308  			},
  3309  		},
  3310  		{
  3311  			name: "no usable endpoints on any node",
  3312  			line: getLine(),
  3313  			endpointslice: &discovery.EndpointSlice{
  3314  				ObjectMeta: metav1.ObjectMeta{
  3315  					Name:      fmt.Sprintf("%s-1", "svc1"),
  3316  					Namespace: "ns1",
  3317  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  3318  				},
  3319  				Ports: []discovery.EndpointPort{{
  3320  					Name:     ptr.To(""),
  3321  					Port:     ptr.To[int32](80),
  3322  					Protocol: ptr.To(v1.ProtocolTCP),
  3323  				}},
  3324  				AddressType: discovery.AddressTypeIPv4,
  3325  				Endpoints: []discovery.Endpoint{
  3326  					{
  3327  						// Local, not ready or serving
  3328  						Addresses: []string{"10.0.1.5"},
  3329  						Conditions: discovery.EndpointConditions{
  3330  							Ready:       ptr.To(false),
  3331  							Serving:     ptr.To(false),
  3332  							Terminating: ptr.To(true),
  3333  						},
  3334  						NodeName: ptr.To(testHostname),
  3335  					},
  3336  					{
  3337  						// Remote, not ready or serving
  3338  						Addresses: []string{"10.0.1.5"},
  3339  						Conditions: discovery.EndpointConditions{
  3340  							Ready:       ptr.To(false),
  3341  							Serving:     ptr.To(false),
  3342  							Terminating: ptr.To(true),
  3343  						},
  3344  						NodeName: ptr.To("host-1"),
  3345  					},
  3346  				},
  3347  			},
  3348  			flowTests: []packetFlowTest{
  3349  				{
  3350  					name:     "pod to clusterIP",
  3351  					sourceIP: "10.0.0.2",
  3352  					destIP:   "172.30.1.1",
  3353  					destPort: 80,
  3354  					output:   "REJECT",
  3355  				},
  3356  				{
  3357  					name:     "external to LB",
  3358  					sourceIP: testExternalClient,
  3359  					destIP:   "1.2.3.4",
  3360  					destPort: 80,
  3361  					output:   "REJECT",
  3362  				},
  3363  			},
  3364  		},
  3365  	}
  3366  
  3367  	for _, testcase := range testcases {
  3368  		t.Run(testcase.name, func(t *testing.T) {
  3369  
  3370  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3371  			fp.OnServiceSynced()
  3372  			fp.OnEndpointSlicesSynced()
  3373  
  3374  			fp.OnServiceAdd(service)
  3375  
  3376  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  3377  			fp.syncProxyRules()
  3378  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, testcase.flowTests)
  3379  
  3380  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  3381  			fp.syncProxyRules()
  3382  			runPacketFlowTests(t, testcase.line, nft, testNodeIPs, []packetFlowTest{
  3383  				{
  3384  					name:     "pod to clusterIP after endpoints deleted",
  3385  					sourceIP: "10.0.0.2",
  3386  					destIP:   "172.30.1.1",
  3387  					destPort: 80,
  3388  					output:   "REJECT",
  3389  				},
  3390  				{
  3391  					name:     "external to LB after endpoints deleted",
  3392  					sourceIP: testExternalClient,
  3393  					destIP:   "1.2.3.4",
  3394  					destPort: 80,
  3395  					output:   "REJECT",
  3396  				},
  3397  			})
  3398  		})
  3399  	}
  3400  }
  3401  
  3402  func TestInternalExternalMasquerade(t *testing.T) {
  3403  	// (Put the test setup code in an internal function so we can have it here at the
  3404  	// top, before the test cases that will be run against it.)
  3405  	setupTest := func(fp *Proxier) {
  3406  		makeServiceMap(fp,
  3407  			makeTestService("ns1", "svc1", func(svc *v1.Service) {
  3408  				svc.Spec.Type = "LoadBalancer"
  3409  				svc.Spec.ClusterIP = "172.30.0.41"
  3410  				svc.Spec.Ports = []v1.ServicePort{{
  3411  					Name:     "p80",
  3412  					Port:     80,
  3413  					Protocol: v1.ProtocolTCP,
  3414  					NodePort: int32(3001),
  3415  				}}
  3416  				svc.Spec.HealthCheckNodePort = 30001
  3417  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3418  					IP: "1.2.3.4",
  3419  				}}
  3420  			}),
  3421  			makeTestService("ns2", "svc2", func(svc *v1.Service) {
  3422  				svc.Spec.Type = "LoadBalancer"
  3423  				svc.Spec.ClusterIP = "172.30.0.42"
  3424  				svc.Spec.Ports = []v1.ServicePort{{
  3425  					Name:     "p80",
  3426  					Port:     80,
  3427  					Protocol: v1.ProtocolTCP,
  3428  					NodePort: int32(3002),
  3429  				}}
  3430  				svc.Spec.HealthCheckNodePort = 30002
  3431  				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3432  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3433  					IP: "5.6.7.8",
  3434  				}}
  3435  			}),
  3436  			makeTestService("ns3", "svc3", func(svc *v1.Service) {
  3437  				svc.Spec.Type = "LoadBalancer"
  3438  				svc.Spec.ClusterIP = "172.30.0.43"
  3439  				svc.Spec.Ports = []v1.ServicePort{{
  3440  					Name:     "p80",
  3441  					Port:     80,
  3442  					Protocol: v1.ProtocolTCP,
  3443  					NodePort: int32(3003),
  3444  				}}
  3445  				svc.Spec.HealthCheckNodePort = 30003
  3446  				svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal)
  3447  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  3448  					IP: "9.10.11.12",
  3449  				}}
  3450  			}),
  3451  		)
  3452  
  3453  		populateEndpointSlices(fp,
  3454  			makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  3455  				eps.AddressType = discovery.AddressTypeIPv4
  3456  				eps.Endpoints = []discovery.Endpoint{
  3457  					{
  3458  						Addresses: []string{"10.180.0.1"},
  3459  						NodeName:  ptr.To(testHostname),
  3460  					},
  3461  					{
  3462  						Addresses: []string{"10.180.1.1"},
  3463  						NodeName:  ptr.To("remote"),
  3464  					},
  3465  				}
  3466  				eps.Ports = []discovery.EndpointPort{{
  3467  					Name:     ptr.To("p80"),
  3468  					Port:     ptr.To[int32](80),
  3469  					Protocol: ptr.To(v1.ProtocolTCP),
  3470  				}}
  3471  			}),
  3472  			makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  3473  				eps.AddressType = discovery.AddressTypeIPv4
  3474  				eps.Endpoints = []discovery.Endpoint{
  3475  					{
  3476  						Addresses: []string{"10.180.0.2"},
  3477  						NodeName:  ptr.To(testHostname),
  3478  					},
  3479  					{
  3480  						Addresses: []string{"10.180.1.2"},
  3481  						NodeName:  ptr.To("remote"),
  3482  					},
  3483  				}
  3484  				eps.Ports = []discovery.EndpointPort{{
  3485  					Name:     ptr.To("p80"),
  3486  					Port:     ptr.To[int32](80),
  3487  					Protocol: ptr.To(v1.ProtocolTCP),
  3488  				}}
  3489  			}),
  3490  			makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  3491  				eps.AddressType = discovery.AddressTypeIPv4
  3492  				eps.Endpoints = []discovery.Endpoint{
  3493  					{
  3494  						Addresses: []string{"10.180.0.3"},
  3495  						NodeName:  ptr.To(testHostname),
  3496  					},
  3497  					{
  3498  						Addresses: []string{"10.180.1.3"},
  3499  						NodeName:  ptr.To("remote"),
  3500  					},
  3501  				}
  3502  				eps.Ports = []discovery.EndpointPort{{
  3503  					Name:     ptr.To("p80"),
  3504  					Port:     ptr.To[int32](80),
  3505  					Protocol: ptr.To(v1.ProtocolTCP),
  3506  				}}
  3507  			}),
  3508  		)
  3509  
  3510  		fp.syncProxyRules()
  3511  	}
  3512  
  3513  	// We use the same flowTests for all of the testCases. The "output" and "masq"
  3514  	// values here represent the normal case (working localDetector, no masqueradeAll)
  3515  	flowTests := []packetFlowTest{
  3516  		{
  3517  			name:     "pod to ClusterIP",
  3518  			sourceIP: "10.0.0.2",
  3519  			destIP:   "172.30.0.41",
  3520  			destPort: 80,
  3521  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3522  			masq:     false,
  3523  		},
  3524  		{
  3525  			name:     "pod to NodePort",
  3526  			sourceIP: "10.0.0.2",
  3527  			destIP:   testNodeIP,
  3528  			destPort: 3001,
  3529  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3530  			masq:     true,
  3531  		},
  3532  		{
  3533  			name:     "pod to LB",
  3534  			sourceIP: "10.0.0.2",
  3535  			destIP:   "1.2.3.4",
  3536  			destPort: 80,
  3537  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3538  			masq:     true,
  3539  		},
  3540  		{
  3541  			name:     "node to ClusterIP",
  3542  			sourceIP: testNodeIP,
  3543  			destIP:   "172.30.0.41",
  3544  			destPort: 80,
  3545  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3546  			masq:     true,
  3547  		},
  3548  		{
  3549  			name:     "node to NodePort",
  3550  			sourceIP: testNodeIP,
  3551  			destIP:   testNodeIP,
  3552  			destPort: 3001,
  3553  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3554  			masq:     true,
  3555  		},
  3556  		{
  3557  			name:     "node to LB",
  3558  			sourceIP: testNodeIP,
  3559  			destIP:   "1.2.3.4",
  3560  			destPort: 80,
  3561  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3562  			masq:     true,
  3563  		},
  3564  		{
  3565  			name:     "external to ClusterIP",
  3566  			sourceIP: testExternalClient,
  3567  			destIP:   "172.30.0.41",
  3568  			destPort: 80,
  3569  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3570  			masq:     true,
  3571  		},
  3572  		{
  3573  			name:     "external to NodePort",
  3574  			sourceIP: testExternalClient,
  3575  			destIP:   testNodeIP,
  3576  			destPort: 3001,
  3577  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3578  			masq:     true,
  3579  		},
  3580  		{
  3581  			name:     "external to LB",
  3582  			sourceIP: testExternalClient,
  3583  			destIP:   "1.2.3.4",
  3584  			destPort: 80,
  3585  			output:   "10.180.0.1:80, 10.180.1.1:80",
  3586  			masq:     true,
  3587  		},
  3588  		{
  3589  			name:     "pod to ClusterIP with eTP:Local",
  3590  			sourceIP: "10.0.0.2",
  3591  			destIP:   "172.30.0.42",
  3592  			destPort: 80,
  3593  
  3594  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3595  			// as "Pod to ClusterIP"
  3596  			output: "10.180.0.2:80, 10.180.1.2:80",
  3597  			masq:   false,
  3598  		},
  3599  		{
  3600  			name:     "pod to NodePort with eTP:Local",
  3601  			sourceIP: "10.0.0.2",
  3602  			destIP:   testNodeIP,
  3603  			destPort: 3002,
  3604  
  3605  			// See the comment below in the "pod to LB with eTP:Local" case.
  3606  			// It doesn't actually make sense to short-circuit here, since if
  3607  			// you connect directly to a NodePort from outside the cluster,
  3608  			// you only get the local endpoints. But it's simpler for us and
  3609  			// slightly more convenient for users to have this case get
  3610  			// short-circuited too.
  3611  			output: "10.180.0.2:80, 10.180.1.2:80",
  3612  			masq:   false,
  3613  		},
  3614  		{
  3615  			name:     "pod to LB with eTP:Local",
  3616  			sourceIP: "10.0.0.2",
  3617  			destIP:   "5.6.7.8",
  3618  			destPort: 80,
  3619  
  3620  			// The short-circuit rule is supposed to make this behave the same
  3621  			// way it would if the packet actually went out to the LB and then
  3622  			// came back into the cluster. So it gets routed to all endpoints,
  3623  			// not just local ones. In reality, if the packet actually left
  3624  			// the cluster, it would have to get masqueraded, but since we can
  3625  			// avoid doing that in the short-circuit case, and not masquerading
  3626  			// is more useful, we avoid masquerading.
  3627  			output: "10.180.0.2:80, 10.180.1.2:80",
  3628  			masq:   false,
  3629  		},
  3630  		{
  3631  			name:     "node to ClusterIP with eTP:Local",
  3632  			sourceIP: testNodeIP,
  3633  			destIP:   "172.30.0.42",
  3634  			destPort: 80,
  3635  
  3636  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3637  			// as "node to ClusterIP"
  3638  			output: "10.180.0.2:80, 10.180.1.2:80",
  3639  			masq:   true,
  3640  		},
  3641  		{
  3642  			name:     "node to NodePort with eTP:Local",
  3643  			sourceIP: testNodeIP,
  3644  			destIP:   testNodeIP,
  3645  			destPort: 3001,
  3646  
  3647  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  3648  			// same as "node to NodePort" above.
  3649  			output: "10.180.0.1:80, 10.180.1.1:80",
  3650  			masq:   true,
  3651  		},
  3652  		{
  3653  			name:     "node to LB with eTP:Local",
  3654  			sourceIP: testNodeIP,
  3655  			destIP:   "5.6.7.8",
  3656  			destPort: 80,
  3657  
  3658  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  3659  			// same as "node to LB" above.
  3660  			output: "10.180.0.2:80, 10.180.1.2:80",
  3661  			masq:   true,
  3662  		},
  3663  		{
  3664  			name:     "external to ClusterIP with eTP:Local",
  3665  			sourceIP: testExternalClient,
  3666  			destIP:   "172.30.0.42",
  3667  			destPort: 80,
  3668  
  3669  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  3670  			// as "external to ClusterIP" above.
  3671  			output: "10.180.0.2:80, 10.180.1.2:80",
  3672  			masq:   true,
  3673  		},
  3674  		{
  3675  			name:     "external to NodePort with eTP:Local",
  3676  			sourceIP: testExternalClient,
  3677  			destIP:   testNodeIP,
  3678  			destPort: 3002,
  3679  
  3680  			// externalTrafficPolicy applies; only the local endpoint is
  3681  			// selected, and we don't masquerade.
  3682  			output: "10.180.0.2:80",
  3683  			masq:   false,
  3684  		},
  3685  		{
  3686  			name:     "external to LB with eTP:Local",
  3687  			sourceIP: testExternalClient,
  3688  			destIP:   "5.6.7.8",
  3689  			destPort: 80,
  3690  
  3691  			// externalTrafficPolicy applies; only the local endpoint is
  3692  			// selected, and we don't masquerade.
  3693  			output: "10.180.0.2:80",
  3694  			masq:   false,
  3695  		},
  3696  		{
  3697  			name:     "pod to ClusterIP with iTP:Local",
  3698  			sourceIP: "10.0.0.2",
  3699  			destIP:   "172.30.0.43",
  3700  			destPort: 80,
  3701  
  3702  			// internalTrafficPolicy applies; only the local endpoint is
  3703  			// selected.
  3704  			output: "10.180.0.3:80",
  3705  			masq:   false,
  3706  		},
  3707  		{
  3708  			name:     "pod to NodePort with iTP:Local",
  3709  			sourceIP: "10.0.0.2",
  3710  			destIP:   testNodeIP,
  3711  			destPort: 3003,
  3712  
  3713  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3714  			// "pod to NodePort" above.
  3715  			output: "10.180.0.3:80, 10.180.1.3:80",
  3716  			masq:   true,
  3717  		},
  3718  		{
  3719  			name:     "pod to LB with iTP:Local",
  3720  			sourceIP: "10.0.0.2",
  3721  			destIP:   "9.10.11.12",
  3722  			destPort: 80,
  3723  
  3724  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3725  			// same as "pod to LB" above.
  3726  			output: "10.180.0.3:80, 10.180.1.3:80",
  3727  			masq:   true,
  3728  		},
  3729  		{
  3730  			name:     "node to ClusterIP with iTP:Local",
  3731  			sourceIP: testNodeIP,
  3732  			destIP:   "172.30.0.43",
  3733  			destPort: 80,
  3734  
  3735  			// internalTrafficPolicy applies; only the local endpoint is selected.
  3736  			// Traffic is masqueraded as in the "node to ClusterIP" case because
  3737  			// internalTrafficPolicy does not affect masquerading.
  3738  			output: "10.180.0.3:80",
  3739  			masq:   true,
  3740  		},
  3741  		{
  3742  			name:     "node to NodePort with iTP:Local",
  3743  			sourceIP: testNodeIP,
  3744  			destIP:   testNodeIP,
  3745  			destPort: 3003,
  3746  
  3747  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3748  			// "node to NodePort" above.
  3749  			output: "10.180.0.3:80, 10.180.1.3:80",
  3750  			masq:   true,
  3751  		},
  3752  		{
  3753  			name:     "node to LB with iTP:Local",
  3754  			sourceIP: testNodeIP,
  3755  			destIP:   "9.10.11.12",
  3756  			destPort: 80,
  3757  
  3758  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3759  			// same as "node to LB" above.
  3760  			output: "10.180.0.3:80, 10.180.1.3:80",
  3761  			masq:   true,
  3762  		},
  3763  		{
  3764  			name:     "external to ClusterIP with iTP:Local",
  3765  			sourceIP: testExternalClient,
  3766  			destIP:   "172.30.0.43",
  3767  			destPort: 80,
  3768  
  3769  			// internalTrafficPolicy applies; only the local endpoint is selected.
  3770  			// Traffic is masqueraded as in the "external to ClusterIP" case
  3771  			// because internalTrafficPolicy does not affect masquerading.
  3772  			output: "10.180.0.3:80",
  3773  			masq:   true,
  3774  		},
  3775  		{
  3776  			name:     "external to NodePort with iTP:Local",
  3777  			sourceIP: testExternalClient,
  3778  			destIP:   testNodeIP,
  3779  			destPort: 3003,
  3780  
  3781  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  3782  			// "external to NodePort" above.
  3783  			output: "10.180.0.3:80, 10.180.1.3:80",
  3784  			masq:   true,
  3785  		},
  3786  		{
  3787  			name:     "external to LB with iTP:Local",
  3788  			sourceIP: testExternalClient,
  3789  			destIP:   "9.10.11.12",
  3790  			destPort: 80,
  3791  
  3792  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  3793  			// same as "external to LB" above.
  3794  			output: "10.180.0.3:80, 10.180.1.3:80",
  3795  			masq:   true,
  3796  		},
  3797  	}
  3798  
  3799  	type packetFlowTestOverride struct {
  3800  		output *string
  3801  		masq   *bool
  3802  	}
  3803  
  3804  	testCases := []struct {
  3805  		name          string
  3806  		line          string
  3807  		masqueradeAll bool
  3808  		localDetector bool
  3809  		overrides     map[string]packetFlowTestOverride
  3810  	}{
  3811  		{
  3812  			name:          "base",
  3813  			line:          getLine(),
  3814  			masqueradeAll: false,
  3815  			localDetector: true,
  3816  			overrides:     nil,
  3817  		},
  3818  		{
  3819  			name:          "no LocalTrafficDetector",
  3820  			line:          getLine(),
  3821  			masqueradeAll: false,
  3822  			localDetector: false,
  3823  			overrides: map[string]packetFlowTestOverride{
  3824  				// With no LocalTrafficDetector, all traffic to a
  3825  				// ClusterIP is assumed to be from a pod, and thus to not
  3826  				// require masquerading.
  3827  				"node to ClusterIP": {
  3828  					masq: ptr.To(false),
  3829  				},
  3830  				"node to ClusterIP with eTP:Local": {
  3831  					masq: ptr.To(false),
  3832  				},
  3833  				"node to ClusterIP with iTP:Local": {
  3834  					masq: ptr.To(false),
  3835  				},
  3836  				"external to ClusterIP": {
  3837  					masq: ptr.To(false),
  3838  				},
  3839  				"external to ClusterIP with eTP:Local": {
  3840  					masq: ptr.To(false),
  3841  				},
  3842  				"external to ClusterIP with iTP:Local": {
  3843  					masq: ptr.To(false),
  3844  				},
  3845  
  3846  				// And there's no eTP:Local short-circuit for pod traffic,
  3847  				// so pods get only the local endpoints.
  3848  				"pod to NodePort with eTP:Local": {
  3849  					output: ptr.To("10.180.0.2:80"),
  3850  				},
  3851  				"pod to LB with eTP:Local": {
  3852  					output: ptr.To("10.180.0.2:80"),
  3853  				},
  3854  			},
  3855  		},
  3856  		{
  3857  			name:          "masqueradeAll",
  3858  			line:          getLine(),
  3859  			masqueradeAll: true,
  3860  			localDetector: true,
  3861  			overrides: map[string]packetFlowTestOverride{
  3862  				// All "to ClusterIP" traffic gets masqueraded when using
  3863  				// --masquerade-all.
  3864  				"pod to ClusterIP": {
  3865  					masq: ptr.To(true),
  3866  				},
  3867  				"pod to ClusterIP with eTP:Local": {
  3868  					masq: ptr.To(true),
  3869  				},
  3870  				"pod to ClusterIP with iTP:Local": {
  3871  					masq: ptr.To(true),
  3872  				},
  3873  			},
  3874  		},
  3875  		{
  3876  			name:          "masqueradeAll, no LocalTrafficDetector",
  3877  			line:          getLine(),
  3878  			masqueradeAll: true,
  3879  			localDetector: false,
  3880  			overrides: map[string]packetFlowTestOverride{
  3881  				// As in "masqueradeAll"
  3882  				"pod to ClusterIP": {
  3883  					masq: ptr.To(true),
  3884  				},
  3885  				"pod to ClusterIP with eTP:Local": {
  3886  					masq: ptr.To(true),
  3887  				},
  3888  				"pod to ClusterIP with iTP:Local": {
  3889  					masq: ptr.To(true),
  3890  				},
  3891  
  3892  				// As in "no LocalTrafficDetector"
  3893  				"pod to NodePort with eTP:Local": {
  3894  					output: ptr.To("10.180.0.2:80"),
  3895  				},
  3896  				"pod to LB with eTP:Local": {
  3897  					output: ptr.To("10.180.0.2:80"),
  3898  				},
  3899  			},
  3900  		},
  3901  	}
  3902  
  3903  	for _, tc := range testCases {
  3904  		t.Run(tc.name, func(t *testing.T) {
  3905  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3906  			fp.masqueradeAll = tc.masqueradeAll
  3907  			if !tc.localDetector {
  3908  				fp.localDetector = proxyutil.NewNoOpLocalDetector()
  3909  			}
  3910  			setupTest(fp)
  3911  
  3912  			// Merge base flowTests with per-test-case overrides
  3913  			tcFlowTests := make([]packetFlowTest, len(flowTests))
  3914  			overridesApplied := 0
  3915  			for i := range flowTests {
  3916  				tcFlowTests[i] = flowTests[i]
  3917  				if overrides, set := tc.overrides[flowTests[i].name]; set {
  3918  					overridesApplied++
  3919  					if overrides.masq != nil {
  3920  						if tcFlowTests[i].masq == *overrides.masq {
  3921  							t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
  3922  						}
  3923  						tcFlowTests[i].masq = *overrides.masq
  3924  					}
  3925  					if overrides.output != nil {
  3926  						if tcFlowTests[i].output == *overrides.output {
  3927  							t.Errorf("%q override value for output is same as base value", flowTests[i].name)
  3928  						}
  3929  						tcFlowTests[i].output = *overrides.output
  3930  					}
  3931  				}
  3932  			}
  3933  			if overridesApplied != len(tc.overrides) {
  3934  				t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
  3935  			}
  3936  			runPacketFlowTests(t, tc.line, nft, testNodeIPs, tcFlowTests)
  3937  		})
  3938  	}
  3939  }
  3940  
  3941  // Test calling syncProxyRules() multiple times with various changes
  3942  func TestSyncProxyRulesRepeated(t *testing.T) {
  3943  	nft, fp := NewFakeProxier(v1.IPv4Protocol)
  3944  
  3945  	baseRules := dedent.Dedent(`
  3946  		add table ip kube-proxy { comment "rules for kube-proxy" ; }
  3947  
  3948  		add chain ip kube-proxy cluster-ips-check
  3949  		add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
  3950  		add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
  3951  		add chain ip kube-proxy filter-input { type filter hook input priority -110 ; }
  3952  		add chain ip kube-proxy filter-output { type filter hook output priority -110 ; }
  3953  		add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; }
  3954  		add chain ip kube-proxy firewall-check
  3955  		add chain ip kube-proxy mark-for-masquerade
  3956  		add chain ip kube-proxy masquerading
  3957  		add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
  3958  		add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
  3959  		add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
  3960  		add chain ip kube-proxy nodeport-endpoints-check
  3961  		add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; }
  3962  		add chain ip kube-proxy services
  3963  		add chain ip kube-proxy service-endpoints-check
  3964  
  3965  		add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
  3966  		add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
  3967  		add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
  3968  		add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check
  3969  		add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check
  3970  		add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check
  3971  		add rule ip kube-proxy filter-input ct state new jump service-endpoints-check
  3972  		add rule ip kube-proxy filter-output ct state new jump service-endpoints-check
  3973  		add rule ip kube-proxy filter-output ct state new jump firewall-check
  3974  		add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check
  3975  		add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
  3976  		add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
  3977  		add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
  3978  		add rule ip kube-proxy masquerading mark set mark xor 0x4000
  3979  		add rule ip kube-proxy masquerading masquerade fully-random
  3980  		add rule ip kube-proxy nat-output jump services
  3981  		add rule ip kube-proxy nat-postrouting jump masquerading
  3982  		add rule ip kube-proxy nat-prerouting jump services
  3983  		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
  3984  		add rule ip kube-proxy reject-chain reject
  3985  		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
  3986  		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
  3987  		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
  3988  		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
  3989  		add element ip kube-proxy nodeport-ips { 192.168.0.2 }
  3990  		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
  3991  
  3992  		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
  3993  		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
  3994  		add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
  3995  		add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
  3996  		add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
  3997  		`)
  3998  
  3999  	// Helper function to make it look like time has passed (from the point of view of
  4000  	// the stale-chain-deletion code).
  4001  	ageStaleChains := func() {
  4002  		for chain, t := range fp.staleChains {
  4003  			fp.staleChains[chain] = t.Add(-2 * time.Second)
  4004  		}
  4005  	}
  4006  
  4007  	// Create initial state
  4008  	var svc2 *v1.Service
  4009  
  4010  	makeServiceMap(fp,
  4011  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  4012  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4013  			svc.Spec.ClusterIP = "172.30.0.41"
  4014  			svc.Spec.Ports = []v1.ServicePort{{
  4015  				Name:     "p80",
  4016  				Port:     80,
  4017  				Protocol: v1.ProtocolTCP,
  4018  			}}
  4019  		}),
  4020  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  4021  			svc2 = svc
  4022  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4023  			svc.Spec.ClusterIP = "172.30.0.42"
  4024  			svc.Spec.Ports = []v1.ServicePort{{
  4025  				Name:     "p8080",
  4026  				Port:     8080,
  4027  				Protocol: v1.ProtocolTCP,
  4028  			}}
  4029  		}),
  4030  	)
  4031  
  4032  	populateEndpointSlices(fp,
  4033  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  4034  			eps.AddressType = discovery.AddressTypeIPv4
  4035  			eps.Endpoints = []discovery.Endpoint{{
  4036  				Addresses: []string{"10.0.1.1"},
  4037  			}}
  4038  			eps.Ports = []discovery.EndpointPort{{
  4039  				Name:     ptr.To("p80"),
  4040  				Port:     ptr.To[int32](80),
  4041  				Protocol: ptr.To(v1.ProtocolTCP),
  4042  			}}
  4043  		}),
  4044  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  4045  			eps.AddressType = discovery.AddressTypeIPv4
  4046  			eps.Endpoints = []discovery.Endpoint{{
  4047  				Addresses: []string{"10.0.2.1"},
  4048  			}}
  4049  			eps.Ports = []discovery.EndpointPort{{
  4050  				Name:     ptr.To("p8080"),
  4051  				Port:     ptr.To[int32](8080),
  4052  				Protocol: ptr.To(v1.ProtocolTCP),
  4053  			}}
  4054  		}),
  4055  	)
  4056  
  4057  	fp.syncProxyRules()
  4058  
  4059  	expected := baseRules + dedent.Dedent(`
  4060  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4061  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
  4062  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4063  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
  4064  
  4065  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4066  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4067  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4068  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4069  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4070  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4071  
  4072  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4073  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 tcp dport 8080 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4074  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
  4075  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4076  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 jump mark-for-masquerade
  4077  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
  4078                  `)
  4079  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4080  
  4081  	// Add a new service and its endpoints
  4082  	makeServiceMap(fp,
  4083  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  4084  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4085  			svc.Spec.ClusterIP = "172.30.0.43"
  4086  			svc.Spec.Ports = []v1.ServicePort{{
  4087  				Name:     "p80",
  4088  				Port:     80,
  4089  				Protocol: v1.ProtocolTCP,
  4090  			}}
  4091  		}),
  4092  	)
  4093  	var eps3 *discovery.EndpointSlice
  4094  	populateEndpointSlices(fp,
  4095  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  4096  			eps3 = eps
  4097  			eps.AddressType = discovery.AddressTypeIPv4
  4098  			eps.Endpoints = []discovery.Endpoint{{
  4099  				Addresses: []string{"10.0.3.1"},
  4100  			}}
  4101  			eps.Ports = []discovery.EndpointPort{{
  4102  				Name:     ptr.To("p80"),
  4103  				Port:     ptr.To[int32](80),
  4104  				Protocol: ptr.To(v1.ProtocolTCP),
  4105  			}}
  4106  		}),
  4107  	)
  4108  	fp.syncProxyRules()
  4109  
  4110  	expected = baseRules + dedent.Dedent(`
  4111  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4112  		add element ip kube-proxy cluster-ips { 172.30.0.42 }
  4113  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4114  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4115  		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
  4116  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4117  
  4118  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4119  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4120  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4121  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4122  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4123  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4124  
  4125  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4126  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 ip daddr 172.30.0.42 tcp dport 8080 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4127  		add rule ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080 numgen random mod 1 vmap { 0 : goto endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 }
  4128  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4129  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 ip saddr 10.0.2.1 jump mark-for-masquerade
  4130  		add rule ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080 meta l4proto tcp dnat to 10.0.2.1:8080
  4131  
  4132  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4133  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4134  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4135  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4136  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4137  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4138  		`)
  4139  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4140  
  4141  	// Delete a service; its chains will be flushed, but not immediately deleted.
  4142  	fp.OnServiceDelete(svc2)
  4143  	fp.syncProxyRules()
  4144  	expected = baseRules + dedent.Dedent(`
  4145  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4146  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4147  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4148  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4149  
  4150  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4151  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4152  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4153  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4154  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4155  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4156  
  4157  		add chain ip kube-proxy service-MHHHYRWA-ns2/svc2/tcp/p8080
  4158  		add chain ip kube-proxy endpoint-7RVP4LUQ-ns2/svc2/tcp/p8080__10.0.2.1/8080
  4159  
  4160  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4161  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4162  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4163  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4164  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4165  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4166  		`)
  4167  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4168  
  4169  	// Fake the passage of time and confirm that the stale chains get deleted.
  4170  	ageStaleChains()
  4171  	fp.syncProxyRules()
  4172  	expected = baseRules + dedent.Dedent(`
  4173  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4174  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4175  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4176  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4177  
  4178  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4179  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4180  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4181  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4182  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4183  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4184  
  4185  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4186  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4187  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4188  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4189  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4190  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4191  		`)
  4192  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4193  
  4194  	// Add a service, sync, then add its endpoints.
  4195  	makeServiceMap(fp,
  4196  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  4197  			svc.Spec.Type = v1.ServiceTypeClusterIP
  4198  			svc.Spec.ClusterIP = "172.30.0.44"
  4199  			svc.Spec.Ports = []v1.ServicePort{{
  4200  				Name:     "p80",
  4201  				Port:     80,
  4202  				Protocol: v1.ProtocolTCP,
  4203  			}}
  4204  		}),
  4205  	)
  4206  	fp.syncProxyRules()
  4207  	expected = baseRules + dedent.Dedent(`
  4208  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4209  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4210  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4211  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4212  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4213  
  4214  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4215  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4216  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4217  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4218  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4219  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4220  
  4221  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4222  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4223  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4224  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4225  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4226  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4227  
  4228  		add element ip kube-proxy no-endpoint-services { 172.30.0.44 . tcp . 80 comment "ns4/svc4:p80" : goto reject-chain }
  4229  		`)
  4230  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4231  
  4232  	populateEndpointSlices(fp,
  4233  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  4234  			eps.AddressType = discovery.AddressTypeIPv4
  4235  			eps.Endpoints = []discovery.Endpoint{{
  4236  				Addresses: []string{"10.0.4.1"},
  4237  			}}
  4238  			eps.Ports = []discovery.EndpointPort{{
  4239  				Name:     ptr.To("p80"),
  4240  				Port:     ptr.To[int32](80),
  4241  				Protocol: ptr.To(v1.ProtocolTCP),
  4242  			}}
  4243  		}),
  4244  	)
  4245  	fp.syncProxyRules()
  4246  	expected = baseRules + dedent.Dedent(`
  4247  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4248  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4249  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4250  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4251  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4252  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4253  
  4254  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4255  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4256  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4257  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4258  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4259  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4260  
  4261  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4262  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4263  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 }
  4264  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4265  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 ip saddr 10.0.3.1 jump mark-for-masquerade
  4266  		add rule ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80 meta l4proto tcp dnat to 10.0.3.1:80
  4267  
  4268  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4269  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4270  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4271  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4272  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4273  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4274  		`)
  4275  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4276  
  4277  	// Change an endpoint of an existing service.
  4278  	eps3update := eps3.DeepCopy()
  4279  	eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
  4280  	fp.OnEndpointSliceUpdate(eps3, eps3update)
  4281  	fp.syncProxyRules()
  4282  
  4283  	// The old endpoint chain (for 10.0.3.1) will not be deleted yet.
  4284  	expected = baseRules + dedent.Dedent(`
  4285  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4286  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4287  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4288  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4289  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4290  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4291  
  4292  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4293  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4294  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4295  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4296  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4297  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4298  
  4299  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4300  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4301  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 }
  4302  		add chain ip kube-proxy endpoint-2OCDJSZQ-ns3/svc3/tcp/p80__10.0.3.1/80
  4303  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4304  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4305  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4306  
  4307  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4308  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4309  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4310  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4311  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4312  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4313  		`)
  4314  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4315  
  4316  	// (Ensure the old svc3 chain gets deleted in the next sync.)
  4317  	ageStaleChains()
  4318  
  4319  	// Add an endpoint to a service.
  4320  	eps3update2 := eps3update.DeepCopy()
  4321  	eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
  4322  	fp.OnEndpointSliceUpdate(eps3update, eps3update2)
  4323  	fp.syncProxyRules()
  4324  
  4325  	expected = baseRules + dedent.Dedent(`
  4326  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4327  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4328  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4329  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4330  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4331  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4332  
  4333  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4334  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4335  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4336  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4337  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4338  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4339  
  4340  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4341  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4342  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
  4343  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4344  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4345  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4346  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4347  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 jump mark-for-masquerade
  4348  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
  4349  
  4350  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4351  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4352  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4353  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4354  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4355  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4356  		`)
  4357  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4358  
  4359  	// Empty a service's endpoints; its chains will be flushed, but not immediately deleted.
  4360  	eps3update3 := eps3update2.DeepCopy()
  4361  	eps3update3.Endpoints = []discovery.Endpoint{}
  4362  	fp.OnEndpointSliceUpdate(eps3update2, eps3update3)
  4363  	fp.syncProxyRules()
  4364  	expected = baseRules + dedent.Dedent(`
  4365  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4366  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4367  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4368  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4369  		add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
  4370  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4371  
  4372  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4373  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4374  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4375  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4376  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4377  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4378  
  4379  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4380  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4381  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4382  
  4383  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4384  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4385  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4386  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4387  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4388  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4389  		`)
  4390  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4391  	expectedStaleChains := sets.NewString("service-4AT6LBPK-ns3/svc3/tcp/p80", "endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80", "endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80")
  4392  	gotStaleChains := sets.StringKeySet(fp.staleChains)
  4393  	if !expectedStaleChains.Equal(gotStaleChains) {
  4394  		t.Errorf("expected stale chains %v, got %v", expectedStaleChains, gotStaleChains)
  4395  	}
  4396  	// Restore endpoints to non-empty immediately; its chains will be restored, and deleted from staleChains.
  4397  	fp.OnEndpointSliceUpdate(eps3update3, eps3update2)
  4398  	fp.syncProxyRules()
  4399  	expected = baseRules + dedent.Dedent(`
  4400  		add element ip kube-proxy cluster-ips { 172.30.0.41 }
  4401  		add element ip kube-proxy cluster-ips { 172.30.0.43 }
  4402  		add element ip kube-proxy cluster-ips { 172.30.0.44 }
  4403  		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
  4404  		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
  4405  		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
  4406  
  4407  		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
  4408  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4409  		add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 }
  4410  		add chain ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80
  4411  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 ip saddr 10.0.1.1 jump mark-for-masquerade
  4412  		add rule ip kube-proxy endpoint-5TPGNJF2-ns1/svc1/tcp/p80__10.0.1.1/80 meta l4proto tcp dnat to 10.0.1.1:80
  4413  
  4414  		add chain ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80
  4415  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 ip daddr 172.30.0.43 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4416  		add rule ip kube-proxy service-4AT6LBPK-ns3/svc3/tcp/p80 numgen random mod 2 vmap { 0 : goto endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 , 1 : goto endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 }
  4417  		add chain ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80
  4418  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 ip saddr 10.0.3.2 jump mark-for-masquerade
  4419  		add rule ip kube-proxy endpoint-SWWHDC7X-ns3/svc3/tcp/p80__10.0.3.2/80 meta l4proto tcp dnat to 10.0.3.2:80
  4420  		add chain ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80
  4421  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 ip saddr 10.0.3.3 jump mark-for-masquerade
  4422  		add rule ip kube-proxy endpoint-TQ2QKHCZ-ns3/svc3/tcp/p80__10.0.3.3/80 meta l4proto tcp dnat to 10.0.3.3:80
  4423  
  4424  		add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
  4425  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 ip daddr 172.30.0.44 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
  4426  		add rule ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 }
  4427  		add chain ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80
  4428  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 ip saddr 10.0.4.1 jump mark-for-masquerade
  4429  		add rule ip kube-proxy endpoint-WAHRBT2B-ns4/svc4/tcp/p80__10.0.4.1/80 meta l4proto tcp dnat to 10.0.4.1:80
  4430  		`)
  4431  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4432  	if len(fp.staleChains) != 0 {
  4433  		t.Errorf("unexpected stale chains: %v", fp.staleChains)
  4434  	}
  4435  
  4436  	// Empty a service's endpoints and restore it after stale chains age.
  4437  	// - its chains will be flushed, but not immediately deleted in the first sync.
  4438  	// - its chains will be deleted first, then recreated in the second sync.
  4439  	fp.OnEndpointSliceUpdate(eps3update2, eps3update3)
  4440  	fp.syncProxyRules()
  4441  	ageStaleChains()
  4442  	fp.OnEndpointSliceUpdate(eps3update3, eps3update2)
  4443  	fp.syncProxyRules()
  4444  	// The second change counteracts the first one, so same expected rules as last time
  4445  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4446  
  4447  	// Sync with no new changes, so same expected rules as last time
  4448  	fp.syncProxyRules()
  4449  	assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
  4450  }
  4451  
  4452  func TestNoEndpointsMetric(t *testing.T) {
  4453  	type endpoint struct {
  4454  		ip       string
  4455  		hostname string
  4456  	}
  4457  
  4458  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeNFTables)
  4459  	testCases := []struct {
  4460  		name                                                string
  4461  		internalTrafficPolicy                               *v1.ServiceInternalTrafficPolicy
  4462  		externalTrafficPolicy                               v1.ServiceExternalTrafficPolicy
  4463  		endpoints                                           []endpoint
  4464  		expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
  4465  		expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
  4466  	}{
  4467  		{
  4468  			name:                  "internalTrafficPolicy is set and there are local endpoints",
  4469  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4470  			endpoints: []endpoint{
  4471  				{"10.0.1.1", testHostname},
  4472  				{"10.0.1.2", "host1"},
  4473  				{"10.0.1.3", "host2"},
  4474  			},
  4475  		},
  4476  		{
  4477  			name:                  "externalTrafficPolicy is set and there are local endpoints",
  4478  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4479  			endpoints: []endpoint{
  4480  				{"10.0.1.1", testHostname},
  4481  				{"10.0.1.2", "host1"},
  4482  				{"10.0.1.3", "host2"},
  4483  			},
  4484  		},
  4485  		{
  4486  			name:                  "both policies are set and there are local endpoints",
  4487  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4488  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4489  			endpoints: []endpoint{
  4490  				{"10.0.1.1", testHostname},
  4491  				{"10.0.1.2", "host1"},
  4492  				{"10.0.1.3", "host2"},
  4493  			},
  4494  		},
  4495  		{
  4496  			name:                  "internalTrafficPolicy is set and there are no local endpoints",
  4497  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4498  			endpoints: []endpoint{
  4499  				{"10.0.1.1", "host0"},
  4500  				{"10.0.1.2", "host1"},
  4501  				{"10.0.1.3", "host2"},
  4502  			},
  4503  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  4504  		},
  4505  		{
  4506  			name:                  "externalTrafficPolicy is set and there are no local endpoints",
  4507  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4508  			endpoints: []endpoint{
  4509  				{"10.0.1.1", "host0"},
  4510  				{"10.0.1.2", "host1"},
  4511  				{"10.0.1.3", "host2"},
  4512  			},
  4513  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  4514  		},
  4515  		{
  4516  			name:                  "both policies are set and there are no local endpoints",
  4517  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4518  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4519  			endpoints: []endpoint{
  4520  				{"10.0.1.1", "host0"},
  4521  				{"10.0.1.2", "host1"},
  4522  				{"10.0.1.3", "host2"},
  4523  			},
  4524  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  4525  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  4526  		},
  4527  		{
  4528  			name:                  "both policies are set and there are no endpoints at all",
  4529  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4530  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4531  			endpoints:             []endpoint{},
  4532  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
  4533  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
  4534  		},
  4535  	}
  4536  
  4537  	for _, tc := range testCases {
  4538  		t.Run(tc.name, func(t *testing.T) {
  4539  			_, fp := NewFakeProxier(v1.IPv4Protocol)
  4540  			fp.OnServiceSynced()
  4541  			fp.OnEndpointSlicesSynced()
  4542  
  4543  			serviceName := "svc1"
  4544  			namespaceName := "ns1"
  4545  
  4546  			svc := &v1.Service{
  4547  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4548  				Spec: v1.ServiceSpec{
  4549  					ClusterIP: "172.30.1.1",
  4550  					Selector:  map[string]string{"foo": "bar"},
  4551  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
  4552  				},
  4553  			}
  4554  			if tc.internalTrafficPolicy != nil {
  4555  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  4556  			}
  4557  			if tc.externalTrafficPolicy != "" {
  4558  				svc.Spec.Type = v1.ServiceTypeNodePort
  4559  				svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
  4560  			}
  4561  
  4562  			fp.OnServiceAdd(svc)
  4563  
  4564  			endpointSlice := &discovery.EndpointSlice{
  4565  				ObjectMeta: metav1.ObjectMeta{
  4566  					Name:      fmt.Sprintf("%s-1", serviceName),
  4567  					Namespace: namespaceName,
  4568  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4569  				},
  4570  				Ports: []discovery.EndpointPort{{
  4571  					Name:     ptr.To(""),
  4572  					Port:     ptr.To[int32](80),
  4573  					Protocol: ptr.To(v1.ProtocolTCP),
  4574  				}},
  4575  				AddressType: discovery.AddressTypeIPv4,
  4576  			}
  4577  			for _, ep := range tc.endpoints {
  4578  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  4579  					Addresses:  []string{ep.ip},
  4580  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4581  					NodeName:   ptr.To(ep.hostname),
  4582  				})
  4583  			}
  4584  
  4585  			fp.OnEndpointSliceAdd(endpointSlice)
  4586  			fp.syncProxyRules()
  4587  			syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
  4588  			if err != nil {
  4589  				t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  4590  			}
  4591  
  4592  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
  4593  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
  4594  			}
  4595  
  4596  			syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
  4597  			if err != nil {
  4598  				t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  4599  			}
  4600  
  4601  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
  4602  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
  4603  			}
  4604  		})
  4605  	}
  4606  }
  4607  
  4608  func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
  4609  	testCases := []struct {
  4610  		name          string
  4611  		ipModeEnabled bool
  4612  		svcIP         string
  4613  		svcLBIP       string
  4614  		ipMode        *v1.LoadBalancerIPMode
  4615  		expectedRule  bool
  4616  	}{
  4617  		/* LoadBalancerIPMode disabled */
  4618  		{
  4619  			name:          "LoadBalancerIPMode disabled, ipMode Proxy",
  4620  			ipModeEnabled: false,
  4621  			svcIP:         "10.20.30.41",
  4622  			svcLBIP:       "1.2.3.4",
  4623  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  4624  			expectedRule:  true,
  4625  		},
  4626  		{
  4627  			name:          "LoadBalancerIPMode disabled, ipMode VIP",
  4628  			ipModeEnabled: false,
  4629  			svcIP:         "10.20.30.42",
  4630  			svcLBIP:       "1.2.3.5",
  4631  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  4632  			expectedRule:  true,
  4633  		},
  4634  		{
  4635  			name:          "LoadBalancerIPMode disabled, ipMode nil",
  4636  			ipModeEnabled: false,
  4637  			svcIP:         "10.20.30.43",
  4638  			svcLBIP:       "1.2.3.6",
  4639  			ipMode:        nil,
  4640  			expectedRule:  true,
  4641  		},
  4642  		/* LoadBalancerIPMode enabled */
  4643  		{
  4644  			name:          "LoadBalancerIPMode enabled, ipMode Proxy",
  4645  			ipModeEnabled: true,
  4646  			svcIP:         "10.20.30.41",
  4647  			svcLBIP:       "1.2.3.4",
  4648  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  4649  			expectedRule:  false,
  4650  		},
  4651  		{
  4652  			name:          "LoadBalancerIPMode enabled, ipMode VIP",
  4653  			ipModeEnabled: true,
  4654  			svcIP:         "10.20.30.42",
  4655  			svcLBIP:       "1.2.3.5",
  4656  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  4657  			expectedRule:  true,
  4658  		},
  4659  		{
  4660  			name:          "LoadBalancerIPMode enabled, ipMode nil",
  4661  			ipModeEnabled: true,
  4662  			svcIP:         "10.20.30.43",
  4663  			svcLBIP:       "1.2.3.6",
  4664  			ipMode:        nil,
  4665  			expectedRule:  true,
  4666  		},
  4667  	}
  4668  
  4669  	svcPort := 80
  4670  	svcNodePort := 3001
  4671  	svcPortName := proxy.ServicePortName{
  4672  		NamespacedName: makeNSN("ns1", "svc1"),
  4673  		Port:           "p80",
  4674  		Protocol:       v1.ProtocolTCP,
  4675  	}
  4676  
  4677  	for _, testCase := range testCases {
  4678  		t.Run(testCase.name, func(t *testing.T) {
  4679  			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)
  4680  			nft, fp := NewFakeProxier(v1.IPv4Protocol)
  4681  			makeServiceMap(fp,
  4682  				makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4683  					svc.Spec.Type = "LoadBalancer"
  4684  					svc.Spec.ClusterIP = testCase.svcIP
  4685  					svc.Spec.Ports = []v1.ServicePort{{
  4686  						Name:     svcPortName.Port,
  4687  						Port:     int32(svcPort),
  4688  						Protocol: v1.ProtocolTCP,
  4689  						NodePort: int32(svcNodePort),
  4690  					}}
  4691  					svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  4692  						IP:     testCase.svcLBIP,
  4693  						IPMode: testCase.ipMode,
  4694  					}}
  4695  				}),
  4696  			)
  4697  
  4698  			populateEndpointSlices(fp,
  4699  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  4700  					eps.AddressType = discovery.AddressTypeIPv4
  4701  					eps.Endpoints = []discovery.Endpoint{{
  4702  						Addresses: []string{"10.180.0.1"},
  4703  					}}
  4704  					eps.Ports = []discovery.EndpointPort{{
  4705  						Name:     ptr.To("p80"),
  4706  						Port:     ptr.To[int32](80),
  4707  						Protocol: ptr.To(v1.ProtocolTCP),
  4708  					}}
  4709  				}),
  4710  			)
  4711  
  4712  			fp.syncProxyRules()
  4713  
  4714  			element := nft.Table.Maps["service-ips"].FindElement(testCase.svcLBIP, "tcp", "80")
  4715  			ruleExists := element != nil
  4716  			if ruleExists != testCase.expectedRule {
  4717  				t.Errorf("unexpected rule for %s", testCase.svcLBIP)
  4718  			}
  4719  		})
  4720  	}
  4721  }
  4722  
  4723  func Test_servicePortChainNameBase(t *testing.T) {
  4724  	testCases := []struct {
  4725  		name     string
  4726  		spn      proxy.ServicePortName
  4727  		protocol string
  4728  		expected string
  4729  	}{
  4730  		{
  4731  			name: "simple",
  4732  			spn: proxy.ServicePortName{
  4733  				NamespacedName: types.NamespacedName{
  4734  					Namespace: "testing",
  4735  					Name:      "service",
  4736  				},
  4737  				Port: "http",
  4738  			},
  4739  			protocol: "tcp",
  4740  			expected: "P4ZYZVCF-testing/service/tcp/http",
  4741  		},
  4742  		{
  4743  			name: "different port, different hash",
  4744  			spn: proxy.ServicePortName{
  4745  				NamespacedName: types.NamespacedName{
  4746  					Namespace: "testing",
  4747  					Name:      "service",
  4748  				},
  4749  				Port: "https",
  4750  			},
  4751  			protocol: "tcp",
  4752  			expected: "LZBRENCP-testing/service/tcp/https",
  4753  		},
  4754  		{
  4755  			name: "max length",
  4756  			spn: proxy.ServicePortName{
  4757  				NamespacedName: types.NamespacedName{
  4758  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4759  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4760  				},
  4761  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4762  			},
  4763  			protocol: "sctp",
  4764  			expected: "KR6NACJP-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4765  		},
  4766  	}
  4767  
  4768  	for _, tc := range testCases {
  4769  		t.Run(tc.name, func(t *testing.T) {
  4770  			name := servicePortChainNameBase(&tc.spn, tc.protocol)
  4771  			if name != tc.expected {
  4772  				t.Errorf("expected %q, got %q", tc.expected, name)
  4773  			}
  4774  		})
  4775  	}
  4776  }
  4777  
  4778  func Test_servicePortEndpointChainNameBase(t *testing.T) {
  4779  	testCases := []struct {
  4780  		name     string
  4781  		spn      proxy.ServicePortName
  4782  		protocol string
  4783  		endpoint string
  4784  		expected string
  4785  	}{
  4786  		{
  4787  			name: "simple",
  4788  			spn: proxy.ServicePortName{
  4789  				NamespacedName: types.NamespacedName{
  4790  					Namespace: "testing",
  4791  					Name:      "service",
  4792  				},
  4793  				Port: "http",
  4794  			},
  4795  			protocol: "tcp",
  4796  			endpoint: "10.180.0.1:80",
  4797  			expected: "JO2XBXZR-testing/service/tcp/http__10.180.0.1/80",
  4798  		},
  4799  		{
  4800  			name: "different endpoint, different hash",
  4801  			spn: proxy.ServicePortName{
  4802  				NamespacedName: types.NamespacedName{
  4803  					Namespace: "testing",
  4804  					Name:      "service",
  4805  				},
  4806  				Port: "http",
  4807  			},
  4808  			protocol: "tcp",
  4809  			endpoint: "10.180.0.2:80",
  4810  			expected: "5S6H3H22-testing/service/tcp/http__10.180.0.2/80",
  4811  		},
  4812  		{
  4813  			name: "ipv6",
  4814  			spn: proxy.ServicePortName{
  4815  				NamespacedName: types.NamespacedName{
  4816  					Namespace: "testing",
  4817  					Name:      "service",
  4818  				},
  4819  				Port: "http",
  4820  			},
  4821  			protocol: "tcp",
  4822  			endpoint: "[fd80:abcd:12::a1b2:c3d4:e5f6:9999]:80",
  4823  			expected: "U7E2ET36-testing/service/tcp/http__fd80.abcd.12..a1b2.c3d4.e5f6.9999/80",
  4824  		},
  4825  		{
  4826  			name: "max length without truncation",
  4827  			spn: proxy.ServicePortName{
  4828  				NamespacedName: types.NamespacedName{
  4829  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4830  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4831  				},
  4832  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4833  			},
  4834  			protocol: "sctp",
  4835  			endpoint: "[1234:5678:9abc:def0::abc:1234]:443",
  4836  			expected: "5YS7AFEA-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abc.1234/443",
  4837  		},
  4838  		{
  4839  			name: "truncated, 1",
  4840  			spn: proxy.ServicePortName{
  4841  				NamespacedName: types.NamespacedName{
  4842  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4843  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4844  				},
  4845  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4846  			},
  4847  			protocol: "sctp",
  4848  			endpoint: "[1234:5678:9abc:def0::abcd:1234:5678]:443",
  4849  			expected: "CI6C53Q3-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abcd.1234...",
  4850  		},
  4851  		{
  4852  			name: "truncated, 2 (different IP, which is not visible in the result)",
  4853  			spn: proxy.ServicePortName{
  4854  				NamespacedName: types.NamespacedName{
  4855  					Namespace: "very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx",
  4856  					Name:      "very-long-service-name-why-would-you-even-do-this-i-mean-really",
  4857  				},
  4858  				Port: "port-443-providing-the-hypertext-transmission-protocol-with-tls",
  4859  			},
  4860  			protocol: "sctp",
  4861  			endpoint: "[1234:5678:9abc:def0::abcd:1234:8765]:443",
  4862  			expected: "2FLXFK6X-very-long-namespace-name-abcdefghijklmnopqrstuvwxyz0123456789xx/very-long-service-name-why-would-you-even-do-this-i-mean-really/sctp/port-443-providing-the-hypertext-transmission-protocol-with-tls__1234.5678.9abc.def0..abcd.1234...",
  4863  		},
  4864  	}
  4865  
  4866  	for _, tc := range testCases {
  4867  		t.Run(tc.name, func(t *testing.T) {
  4868  			name := servicePortEndpointChainNameBase(&tc.spn, tc.protocol, tc.endpoint)
  4869  			if name != tc.expected {
  4870  				t.Errorf("expected %q, got %q", tc.expected, name)
  4871  			}
  4872  		})
  4873  	}
  4874  }
  4875  
  4876  func TestProxier_OnServiceCIDRsChanged(t *testing.T) {
  4877  	var proxier *Proxier
  4878  
  4879  	proxier = &Proxier{ipFamily: v1.IPv4Protocol}
  4880  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
  4881  	assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16")
  4882  
  4883  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
  4884  	assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16,172.50.0.0/16")
  4885  
  4886  	proxier = &Proxier{ipFamily: v1.IPv6Protocol}
  4887  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
  4888  	assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112")
  4889  
  4890  	proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
  4891  	assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112")
  4892  }