k8s.io/kubernetes@v1.29.3/pkg/proxy/iptables/proxier_test.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package iptables
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"net"
    23  	"reflect"
    24  	"regexp"
    25  	stdruntime "runtime"
    26  	"sort"
    27  	"strconv"
    28  	"strings"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/google/go-cmp/cmp"
    33  	"github.com/lithammer/dedent"
    34  	"github.com/stretchr/testify/assert"
    35  	v1 "k8s.io/api/core/v1"
    36  	discovery "k8s.io/api/discovery/v1"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/types"
    39  	"k8s.io/apimachinery/pkg/util/intstr"
    40  	"k8s.io/apimachinery/pkg/util/sets"
    41  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    42  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    43  	"k8s.io/component-base/metrics/legacyregistry"
    44  	"k8s.io/component-base/metrics/testutil"
    45  	"k8s.io/klog/v2"
    46  	"k8s.io/kubernetes/pkg/features"
    47  	"k8s.io/kubernetes/pkg/proxy"
    48  	"k8s.io/kubernetes/pkg/proxy/conntrack"
    49  	"k8s.io/kubernetes/pkg/proxy/metrics"
    50  
    51  	"k8s.io/kubernetes/pkg/proxy/healthcheck"
    52  	proxyutil "k8s.io/kubernetes/pkg/proxy/util"
    53  	proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
    54  	proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
    55  	"k8s.io/kubernetes/pkg/util/async"
    56  	utiliptables "k8s.io/kubernetes/pkg/util/iptables"
    57  	iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
    58  	"k8s.io/utils/exec"
    59  	fakeexec "k8s.io/utils/exec/testing"
    60  	netutils "k8s.io/utils/net"
    61  	"k8s.io/utils/ptr"
    62  )
    63  
    64  func TestDeleteEndpointConnections(t *testing.T) {
    65  	const (
    66  		UDP  = v1.ProtocolUDP
    67  		TCP  = v1.ProtocolTCP
    68  		SCTP = v1.ProtocolSCTP
    69  	)
    70  
    71  	testCases := []struct {
    72  		description  string
    73  		svcName      string
    74  		svcIP        string
    75  		svcPort      int32
    76  		protocol     v1.Protocol
    77  		endpoint     string // IP:port endpoint
    78  		simulatedErr string
    79  	}{
    80  		{
    81  			description: "V4 UDP",
    82  			svcName:     "v4-udp",
    83  			svcIP:       "172.30.1.1",
    84  			svcPort:     80,
    85  			protocol:    UDP,
    86  			endpoint:    "10.240.0.3:80",
    87  		},
    88  		{
    89  			description: "V4 TCP",
    90  			svcName:     "v4-tcp",
    91  			svcIP:       "172.30.2.2",
    92  			svcPort:     80,
    93  			protocol:    TCP,
    94  			endpoint:    "10.240.0.4:80",
    95  		},
    96  		{
    97  			description: "V4 SCTP",
    98  			svcName:     "v4-sctp",
    99  			svcIP:       "172.30.3.3",
   100  			svcPort:     80,
   101  			protocol:    SCTP,
   102  			endpoint:    "10.240.0.5:80",
   103  		},
   104  		{
   105  			description:  "V4 UDP, nothing to delete, benign error",
   106  			svcName:      "v4-udp-nothing-to-delete",
   107  			svcIP:        "172.30.4.4",
   108  			svcPort:      80,
   109  			protocol:     UDP,
   110  			endpoint:     "10.240.0.6:80",
   111  			simulatedErr: conntrack.NoConnectionToDelete,
   112  		},
   113  		{
   114  			description:  "V4 UDP, unexpected error, should be glogged",
   115  			svcName:      "v4-udp-simulated-error",
   116  			svcIP:        "172.30.5.5",
   117  			svcPort:      80,
   118  			protocol:     UDP,
   119  			endpoint:     "10.240.0.7:80",
   120  			simulatedErr: "simulated error",
   121  		},
   122  		{
   123  			description: "V6 UDP",
   124  			svcName:     "v6-udp",
   125  			svcIP:       "fd00:1234::20",
   126  			svcPort:     80,
   127  			protocol:    UDP,
   128  			endpoint:    "[2001:db8::2]:80",
   129  		},
   130  		{
   131  			description: "V6 TCP",
   132  			svcName:     "v6-tcp",
   133  			svcIP:       "fd00:1234::30",
   134  			svcPort:     80,
   135  			protocol:    TCP,
   136  			endpoint:    "[2001:db8::3]:80",
   137  		},
   138  		{
   139  			description: "V6 SCTP",
   140  			svcName:     "v6-sctp",
   141  			svcIP:       "fd00:1234::40",
   142  			svcPort:     80,
   143  			protocol:    SCTP,
   144  			endpoint:    "[2001:db8::4]:80",
   145  		},
   146  	}
   147  
   148  	for _, tc := range testCases {
   149  		t.Run(tc.description, func(t *testing.T) {
   150  			priorGlogErrs := klog.Stats.Error.Lines()
   151  
   152  			// Create a fake executor for the conntrack utility.
   153  			fcmd := fakeexec.FakeCmd{}
   154  			fexec := &fakeexec.FakeExec{
   155  				LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
   156  			}
   157  			execFunc := func(cmd string, args ...string) exec.Cmd {
   158  				return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
   159  			}
   160  
   161  			if tc.protocol == UDP {
   162  				cmdOutput := "1 flow entries have been deleted"
   163  				var simErr error
   164  
   165  				// First call outputs cmdOutput and succeeds
   166  				fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript,
   167  					func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil },
   168  				)
   169  				fexec.CommandScript = append(fexec.CommandScript, execFunc)
   170  
   171  				// Second call may succeed or fail
   172  				if tc.simulatedErr != "" {
   173  					cmdOutput = ""
   174  					simErr = fmt.Errorf(tc.simulatedErr)
   175  				}
   176  				fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript,
   177  					func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr },
   178  				)
   179  				fexec.CommandScript = append(fexec.CommandScript, execFunc)
   180  			}
   181  
   182  			endpointIP := proxyutil.IPPart(tc.endpoint)
   183  			isIPv6 := netutils.IsIPv6String(endpointIP)
   184  
   185  			var ipt utiliptables.Interface
   186  			if isIPv6 {
   187  				ipt = iptablestest.NewIPv6Fake()
   188  			} else {
   189  				ipt = iptablestest.NewFake()
   190  			}
   191  			fp := NewFakeProxier(ipt)
   192  			fp.exec = fexec
   193  
   194  			makeServiceMap(fp,
   195  				makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
   196  					svc.Spec.ClusterIP = tc.svcIP
   197  					svc.Spec.Ports = []v1.ServicePort{{
   198  						Name:     "p80",
   199  						Port:     tc.svcPort,
   200  						Protocol: tc.protocol,
   201  					}}
   202  					svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
   203  				}),
   204  			)
   205  			fp.svcPortMap.Update(fp.serviceChanges)
   206  
   207  			slice := makeTestEndpointSlice("ns1", tc.svcName, 1, func(eps *discovery.EndpointSlice) {
   208  				if isIPv6 {
   209  					eps.AddressType = discovery.AddressTypeIPv6
   210  				} else {
   211  					eps.AddressType = discovery.AddressTypeIPv4
   212  				}
   213  				eps.Endpoints = []discovery.Endpoint{{
   214  					Addresses: []string{endpointIP},
   215  				}}
   216  				eps.Ports = []discovery.EndpointPort{{
   217  					Name:     ptr.To("p80"),
   218  					Port:     ptr.To[int32](80),
   219  					Protocol: ptr.To(tc.protocol),
   220  				}}
   221  			})
   222  
   223  			// Add and then remove the endpoint slice
   224  			fp.OnEndpointSliceAdd(slice)
   225  			fp.syncProxyRules()
   226  			fp.OnEndpointSliceDelete(slice)
   227  			fp.syncProxyRules()
   228  
   229  			// Check the executed conntrack command
   230  			if tc.protocol == UDP {
   231  				if fexec.CommandCalls != 2 {
   232  					t.Fatalf("Expected conntrack to be executed 2 times, but got %d", fexec.CommandCalls)
   233  				}
   234  
   235  				// First clear conntrack entries for the clusterIP when the
   236  				// endpoint is first added.
   237  				expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s -p udp", tc.svcIP)
   238  				if isIPv6 {
   239  					expectCommand += " -f ipv6"
   240  				}
   241  				actualCommand := strings.Join(fcmd.CombinedOutputLog[0], " ")
   242  				if actualCommand != expectCommand {
   243  					t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
   244  				}
   245  
   246  				// Then clear conntrack entries for the endpoint when it is
   247  				// deleted.
   248  				expectCommand = fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p udp", tc.svcIP, endpointIP)
   249  				if isIPv6 {
   250  					expectCommand += " -f ipv6"
   251  				}
   252  				actualCommand = strings.Join(fcmd.CombinedOutputLog[1], " ")
   253  				if actualCommand != expectCommand {
   254  					t.Errorf("Expected command: %s, but executed %s", expectCommand, actualCommand)
   255  				}
   256  			} else if fexec.CommandCalls != 0 {
   257  				t.Fatalf("Expected conntrack to be executed 0 times, but got %d", fexec.CommandCalls)
   258  			}
   259  
   260  			// Check the number of new glog errors
   261  			var expGlogErrs int64
   262  			if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
   263  				expGlogErrs = 1
   264  			}
   265  			glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
   266  			if glogErrs != expGlogErrs {
   267  				t.Errorf("Expected %d glogged errors, but got %d", expGlogErrs, glogErrs)
   268  			}
   269  		})
   270  	}
   271  }
   272  
   273  // Conventions for tests using NewFakeProxier:
   274  //
   275  // Pod IPs:             10.0.0.0/8
   276  // Service ClusterIPs:  172.30.0.0/16
   277  // Node IPs:            192.168.0.0/24
   278  // Local Node IP:       192.168.0.2
   279  // Service ExternalIPs: 192.168.99.0/24
   280  // LoadBalancer IPs:    1.2.3.4, 5.6.7.8, 9.10.11.12
   281  // Non-cluster IPs:     203.0.113.0/24
   282  // LB Source Range:     203.0.113.0/25
   283  
   284  const testHostname = "test-hostname"
   285  const testNodeIP = "192.168.0.2"
   286  const testNodeIPAlt = "192.168.1.2"
   287  const testExternalIP = "192.168.99.11"
   288  const testNodeIPv6 = "2001:db8::1"
   289  const testNodeIPv6Alt = "2001:db8:1::2"
   290  const testExternalClient = "203.0.113.2"
   291  const testExternalClientBlocked = "203.0.113.130"
   292  
   293  var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt}
   294  
   295  func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
   296  	// TODO: Call NewProxier after refactoring out the goroutine
   297  	// invocation into a Run() method.
   298  	ipfamily := v1.IPv4Protocol
   299  	podCIDR := "10.0.0.0/8"
   300  	if ipt.IsIPv6() {
   301  		ipfamily = v1.IPv6Protocol
   302  		podCIDR = "fd00:10::/64"
   303  	}
   304  	detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR)
   305  
   306  	networkInterfacer := proxyutiltest.NewFakeNetwork()
   307  	itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
   308  	addrs := []net.Addr{
   309  		&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
   310  		&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
   311  	}
   312  	networkInterfacer.AddInterfaceAddr(&itf, addrs)
   313  	itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
   314  	addrs1 := []net.Addr{
   315  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
   316  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)},
   317  		&net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)},
   318  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)},
   319  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)},
   320  	}
   321  	networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
   322  
   323  	p := &Proxier{
   324  		exec:                     &fakeexec.FakeExec{},
   325  		svcPortMap:               make(proxy.ServicePortMap),
   326  		serviceChanges:           proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
   327  		endpointsMap:             make(proxy.EndpointsMap),
   328  		endpointsChanges:         proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
   329  		needFullSync:             true,
   330  		iptables:                 ipt,
   331  		masqueradeMark:           "0x4000",
   332  		localDetector:            detectLocal,
   333  		hostname:                 testHostname,
   334  		serviceHealthServer:      healthcheck.NewFakeServiceHealthServer(),
   335  		precomputedProbabilities: make([]string, 0, 1001),
   336  		iptablesData:             bytes.NewBuffer(nil),
   337  		existingFilterChainsData: bytes.NewBuffer(nil),
   338  		filterChains:             proxyutil.NewLineBuffer(),
   339  		filterRules:              proxyutil.NewLineBuffer(),
   340  		natChains:                proxyutil.NewLineBuffer(),
   341  		natRules:                 proxyutil.NewLineBuffer(),
   342  		nodeIP:                   netutils.ParseIPSloppy(testNodeIP),
   343  		localhostNodePorts:       true,
   344  		nodePortAddresses:        proxyutil.NewNodePortAddresses(ipfamily, nil),
   345  		networkInterfacer:        networkInterfacer,
   346  	}
   347  	p.setInitialized(true)
   348  	p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
   349  	return p
   350  }
   351  
   352  // parseIPTablesData takes iptables-save output and returns a map of table name to array of lines.
   353  func parseIPTablesData(ruleData string) (map[string][]string, error) {
   354  	// Split ruleData at the "COMMIT" lines; given valid input, this will result in
   355  	// one element for each table plus an extra empty element (since the ruleData
   356  	// should end with a "COMMIT" line).
   357  	rawTables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
   358  	nTables := len(rawTables) - 1
   359  	if nTables < 2 || rawTables[nTables] != "" {
   360  		return nil, fmt.Errorf("bad ruleData (%d tables)\n%s", nTables, ruleData)
   361  	}
   362  
   363  	tables := make(map[string][]string, nTables)
   364  	for i, table := range rawTables[:nTables] {
   365  		lines := strings.Split(strings.Trim(table, "\n"), "\n")
   366  		// The first line should be, eg, "*nat" or "*filter"
   367  		if lines[0][0] != '*' {
   368  			return nil, fmt.Errorf("bad ruleData (table %d starts with %q)", i+1, lines[0])
   369  		}
   370  		// add back the "COMMIT" line that got eaten by the strings.Split above
   371  		lines = append(lines, "COMMIT")
   372  		tables[lines[0][1:]] = lines
   373  	}
   374  
   375  	if tables["nat"] == nil {
   376  		return nil, fmt.Errorf("bad ruleData (no %q table)", "nat")
   377  	}
   378  	if tables["filter"] == nil {
   379  		return nil, fmt.Errorf("bad ruleData (no %q table)", "filter")
   380  	}
   381  	return tables, nil
   382  }
   383  
   384  func TestParseIPTablesData(t *testing.T) {
   385  	for _, tc := range []struct {
   386  		name   string
   387  		input  string
   388  		output map[string][]string
   389  		error  string
   390  	}{
   391  		{
   392  			name: "basic test",
   393  			input: dedent.Dedent(`
   394  				*filter
   395  				:KUBE-SERVICES - [0:0]
   396  				:KUBE-EXTERNAL-SERVICES - [0:0]
   397  				:KUBE-FORWARD - [0:0]
   398  				:KUBE-NODEPORTS - [0:0]
   399  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   400  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   401  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   402  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   403  				COMMIT
   404  				*nat
   405  				:KUBE-SERVICES - [0:0]
   406  				:KUBE-NODEPORTS - [0:0]
   407  				:KUBE-POSTROUTING - [0:0]
   408  				:KUBE-MARK-MASQ - [0:0]
   409  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   410  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   411  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   412  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   413  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   414  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   415  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   416  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   417  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   418  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   419  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   420  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   421  				COMMIT
   422  				`),
   423  			output: map[string][]string{
   424  				"filter": {
   425  					`*filter`,
   426  					`:KUBE-SERVICES - [0:0]`,
   427  					`:KUBE-EXTERNAL-SERVICES - [0:0]`,
   428  					`:KUBE-FORWARD - [0:0]`,
   429  					`:KUBE-NODEPORTS - [0:0]`,
   430  					`-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
   431  					`-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP`,
   432  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT`,
   433  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT`,
   434  					`COMMIT`,
   435  				},
   436  				"nat": {
   437  					`*nat`,
   438  					`:KUBE-SERVICES - [0:0]`,
   439  					`:KUBE-NODEPORTS - [0:0]`,
   440  					`:KUBE-POSTROUTING - [0:0]`,
   441  					`:KUBE-MARK-MASQ - [0:0]`,
   442  					`:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]`,
   443  					`:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]`,
   444  					`-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN`,
   445  					`-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
   446  					`-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE`,
   447  					`-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000`,
   448  					`-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
   449  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`,
   450  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
   451  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
   452  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
   453  					`-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS`,
   454  					`COMMIT`,
   455  				},
   456  			},
   457  		},
   458  		{
   459  			name: "not enough tables",
   460  			input: dedent.Dedent(`
   461  				*filter
   462  				:KUBE-SERVICES - [0:0]
   463  				:KUBE-EXTERNAL-SERVICES - [0:0]
   464  				:KUBE-FORWARD - [0:0]
   465  				:KUBE-NODEPORTS - [0:0]
   466  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   467  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   468  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   469  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   470  				COMMIT
   471  				`),
   472  			error: "bad ruleData (1 tables)",
   473  		},
   474  		{
   475  			name: "trailing junk",
   476  			input: dedent.Dedent(`
   477  				*filter
   478  				:KUBE-SERVICES - [0:0]
   479  				:KUBE-EXTERNAL-SERVICES - [0:0]
   480  				:KUBE-FORWARD - [0:0]
   481  				:KUBE-NODEPORTS - [0:0]
   482  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   483  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   484  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   485  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   486  				COMMIT
   487  				*nat
   488  				:KUBE-SERVICES - [0:0]
   489  				:KUBE-EXTERNAL-SERVICES - [0:0]
   490  				:KUBE-FORWARD - [0:0]
   491  				:KUBE-NODEPORTS - [0:0]
   492  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   493  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   494  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   495  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   496  				COMMIT
   497  				junk
   498  				`),
   499  			error: "bad ruleData (2 tables)",
   500  		},
   501  		{
   502  			name: "bad start line",
   503  			input: dedent.Dedent(`
   504  				*filter
   505  				:KUBE-SERVICES - [0:0]
   506  				:KUBE-EXTERNAL-SERVICES - [0:0]
   507  				:KUBE-FORWARD - [0:0]
   508  				:KUBE-NODEPORTS - [0:0]
   509  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   510  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   511  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   512  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   513  				COMMIT
   514  				:KUBE-SERVICES - [0:0]
   515  				:KUBE-EXTERNAL-SERVICES - [0:0]
   516  				:KUBE-FORWARD - [0:0]
   517  				:KUBE-NODEPORTS - [0:0]
   518  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   519  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   520  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   521  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   522  				COMMIT
   523  				`),
   524  			error: `bad ruleData (table 2 starts with ":KUBE-SERVICES - [0:0]")`,
   525  		},
   526  		{
   527  			name: "no nat",
   528  			input: dedent.Dedent(`
   529  				*filter
   530  				:KUBE-SERVICES - [0:0]
   531  				:KUBE-EXTERNAL-SERVICES - [0:0]
   532  				:KUBE-FORWARD - [0:0]
   533  				:KUBE-NODEPORTS - [0:0]
   534  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   535  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   536  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   537  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   538  				COMMIT
   539  				*mangle
   540  				:KUBE-SERVICES - [0:0]
   541  				:KUBE-EXTERNAL-SERVICES - [0:0]
   542  				:KUBE-FORWARD - [0:0]
   543  				:KUBE-NODEPORTS - [0:0]
   544  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   545  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   546  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   547  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   548  				COMMIT
   549  				`),
   550  			error: `bad ruleData (no "nat" table)`,
   551  		},
   552  		{
   553  			name: "no filter",
   554  			input: dedent.Dedent(`
   555  				*mangle
   556  				:KUBE-SERVICES - [0:0]
   557  				:KUBE-EXTERNAL-SERVICES - [0:0]
   558  				:KUBE-FORWARD - [0:0]
   559  				:KUBE-NODEPORTS - [0:0]
   560  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   561  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   562  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   563  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   564  				COMMIT
   565  				*nat
   566  				:KUBE-SERVICES - [0:0]
   567  				:KUBE-EXTERNAL-SERVICES - [0:0]
   568  				:KUBE-FORWARD - [0:0]
   569  				:KUBE-NODEPORTS - [0:0]
   570  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   571  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   572  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   573  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   574  				COMMIT
   575  				`),
   576  			error: `bad ruleData (no "filter" table)`,
   577  		},
   578  	} {
   579  		t.Run(tc.name, func(t *testing.T) {
   580  			out, err := parseIPTablesData(tc.input)
   581  			if err == nil {
   582  				if tc.error != "" {
   583  					t.Errorf("unexpectedly did not get error")
   584  				} else {
   585  					assert.Equal(t, tc.output, out)
   586  				}
   587  			} else {
   588  				if tc.error == "" {
   589  					t.Errorf("got unexpected error: %v", err)
   590  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   591  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   592  				}
   593  			}
   594  		})
   595  	}
   596  }
   597  
   598  func countRules(tableName utiliptables.Table, ruleData string) int {
   599  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   600  	if err != nil {
   601  		klog.ErrorS(err, "error parsing iptables rules")
   602  		return -1
   603  	}
   604  
   605  	rules := 0
   606  	table, err := dump.GetTable(tableName)
   607  	if err != nil {
   608  		klog.ErrorS(err, "can't find table", "table", tableName)
   609  		return -1
   610  	}
   611  
   612  	for _, c := range table.Chains {
   613  		rules += len(c.Rules)
   614  	}
   615  	return rules
   616  }
   617  
   618  func countRulesFromMetric(tableName utiliptables.Table) int {
   619  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(tableName)))
   620  	if err != nil {
   621  		klog.ErrorS(err, "metrics are not registered?")
   622  		return -1
   623  	}
   624  	return int(numRulesFloat)
   625  }
   626  
   627  func countRulesFromLastSyncMetric(tableName utiliptables.Table) int {
   628  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesLastSync.WithLabelValues(string(tableName)))
   629  	if err != nil {
   630  		klog.ErrorS(err, "metrics are not registered?")
   631  		return -1
   632  	}
   633  	return int(numRulesFloat)
   634  }
   635  
   636  // findAllMatches takes an array of lines and a pattern with one parenthesized group, and
   637  // returns a sorted array of all of the unique matches of the parenthesized group.
   638  func findAllMatches(lines []string, pattern string) []string {
   639  	regex := regexp.MustCompile(pattern)
   640  	allMatches := sets.New[string]()
   641  	for _, line := range lines {
   642  		match := regex.FindStringSubmatch(line)
   643  		if len(match) == 2 {
   644  			allMatches.Insert(match[1])
   645  		}
   646  	}
   647  	return sets.List(allMatches)
   648  }
   649  
   650  // checkIPTablesRuleJumps checks that every `-j` in the given rules jumps to a chain
   651  // that we created and added rules to
   652  func checkIPTablesRuleJumps(ruleData string) error {
   653  	tables, err := parseIPTablesData(ruleData)
   654  	if err != nil {
   655  		return err
   656  	}
   657  
   658  	for tableName, lines := range tables {
   659  		// Find all of the lines like ":KUBE-SERVICES", indicating chains that
   660  		// iptables-restore would create when loading the data.
   661  		createdChains := sets.New[string](findAllMatches(lines, `^:([^ ]*)`)...)
   662  		// Find all of the lines like "-X KUBE-SERVICES ..." indicating chains
   663  		// that we are deleting because they are no longer used, and remove
   664  		// those chains from createdChains.
   665  		createdChains = createdChains.Delete(findAllMatches(lines, `-X ([^ ]*)`)...)
   666  
   667  		// Find all of the lines like "-A KUBE-SERVICES ..." indicating chains
   668  		// that we are adding at least one rule to.
   669  		filledChains := sets.New[string](findAllMatches(lines, `-A ([^ ]*)`)...)
   670  
   671  		// Find all of the chains that are jumped to by some rule so we can make
   672  		// sure we only jump to valid chains.
   673  		jumpedChains := sets.New[string](findAllMatches(lines, `-j ([^ ]*)`)...)
   674  		// Ignore jumps to chains that we expect to exist even if kube-proxy
   675  		// didn't create them itself.
   676  		jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
   677  
   678  		// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
   679  		// that we are jumping to a chain that was not created.
   680  		missingChains := jumpedChains.Difference(createdChains)
   681  		missingChains = missingChains.Union(filledChains.Difference(createdChains))
   682  		if len(missingChains) > 0 {
   683  			return fmt.Errorf("some chains in %s are used but were not created: %v", tableName, missingChains.UnsortedList())
   684  		}
   685  
   686  		// Find cases where we have "-A FOO ... -j BAR", but no "-A BAR ...",
   687  		// meaning that we are jumping to a chain that we didn't write out any
   688  		// rules for, which is normally a bug. (Except that KUBE-SERVICES always
   689  		// jumps to KUBE-NODEPORTS, even when there are no NodePort rules.)
   690  		emptyChains := jumpedChains.Difference(filledChains)
   691  		emptyChains.Delete(string(kubeNodePortsChain))
   692  		if len(emptyChains) > 0 {
   693  			return fmt.Errorf("some chains in %s are jumped to but have no rules: %v", tableName, emptyChains.UnsortedList())
   694  		}
   695  
   696  		// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
   697  		// that we are creating an empty chain but not using it for anything.
   698  		extraChains := createdChains.Difference(jumpedChains)
   699  		extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(kubeMarkMasqChain), string(kubeProxyFirewallChain), string(kubeletFirewallChain))
   700  		if len(extraChains) > 0 {
   701  			return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.UnsortedList())
   702  		}
   703  	}
   704  
   705  	return nil
   706  }
   707  
   708  func TestCheckIPTablesRuleJumps(t *testing.T) {
   709  	for _, tc := range []struct {
   710  		name  string
   711  		input string
   712  		error string
   713  	}{
   714  		{
   715  			name: "valid",
   716  			input: dedent.Dedent(`
   717  				*filter
   718  				COMMIT
   719  				*nat
   720  				:KUBE-MARK-MASQ - [0:0]
   721  				:KUBE-SERVICES - [0:0]
   722  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   723  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   724  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   725  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   726  				COMMIT
   727  				`),
   728  			error: "",
   729  		},
   730  		{
   731  			name: "can't jump to chain that wasn't created",
   732  			input: dedent.Dedent(`
   733  				*filter
   734  				COMMIT
   735  				*nat
   736  				:KUBE-SERVICES - [0:0]
   737  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   738  				COMMIT
   739  				`),
   740  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   741  		},
   742  		{
   743  			name: "can't jump to chain that has no rules",
   744  			input: dedent.Dedent(`
   745  				*filter
   746  				COMMIT
   747  				*nat
   748  				:KUBE-SERVICES - [0:0]
   749  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   750  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   751  				COMMIT
   752  				`),
   753  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   754  		},
   755  		{
   756  			name: "can't add rules to a chain that wasn't created",
   757  			input: dedent.Dedent(`
   758  				*filter
   759  				COMMIT
   760  				*nat
   761  				:KUBE-MARK-MASQ - [0:0]
   762  				:KUBE-SERVICES - [0:0]
   763  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   764  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   765  				COMMIT
   766  				`),
   767  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   768  		},
   769  		{
   770  			name: "can't jump to chain that wasn't created",
   771  			input: dedent.Dedent(`
   772  				*filter
   773  				COMMIT
   774  				*nat
   775  				:KUBE-SERVICES - [0:0]
   776  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   777  				COMMIT
   778  				`),
   779  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   780  		},
   781  		{
   782  			name: "can't jump to chain that has no rules",
   783  			input: dedent.Dedent(`
   784  				*filter
   785  				COMMIT
   786  				*nat
   787  				:KUBE-SERVICES - [0:0]
   788  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   789  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   790  				COMMIT
   791  				`),
   792  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   793  		},
   794  		{
   795  			name: "can't add rules to a chain that wasn't created",
   796  			input: dedent.Dedent(`
   797  				*filter
   798  				COMMIT
   799  				*nat
   800  				:KUBE-MARK-MASQ - [0:0]
   801  				:KUBE-SERVICES - [0:0]
   802  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   803  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   804  				COMMIT
   805  				`),
   806  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   807  		},
   808  		{
   809  			name: "can't create chain and then not use it",
   810  			input: dedent.Dedent(`
   811  				*filter
   812  				COMMIT
   813  				*nat
   814  				:KUBE-MARK-MASQ - [0:0]
   815  				:KUBE-SERVICES - [0:0]
   816  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   817  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   818  				COMMIT
   819  				`),
   820  			error: "some chains in nat are created but not used: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   821  		},
   822  	} {
   823  		t.Run(tc.name, func(t *testing.T) {
   824  			err := checkIPTablesRuleJumps(tc.input)
   825  			if err == nil {
   826  				if tc.error != "" {
   827  					t.Errorf("unexpectedly did not get error")
   828  				}
   829  			} else {
   830  				if tc.error == "" {
   831  					t.Errorf("got unexpected error: %v", err)
   832  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   833  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   834  				}
   835  			}
   836  		})
   837  	}
   838  }
   839  
   840  // orderByCommentServiceName is a helper function that orders two IPTables rules
   841  // based on the service name in their comment. (If either rule has no comment then the
   842  // return value is undefined.)
   843  func orderByCommentServiceName(rule1, rule2 *iptablestest.Rule) bool {
   844  	if rule1.Comment == nil || rule2.Comment == nil {
   845  		return false
   846  	}
   847  	name1, name2 := rule1.Comment.Value, rule2.Comment.Value
   848  
   849  	// The service name is the comment up to the first space or colon
   850  	i := strings.IndexAny(name1, " :")
   851  	if i != -1 {
   852  		name1 = name1[:i]
   853  	}
   854  	i = strings.IndexAny(name2, " :")
   855  	if i != -1 {
   856  		name2 = name2[:i]
   857  	}
   858  
   859  	return name1 < name2
   860  }
   861  
   862  // sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
   863  // Services get processed in, while preserving the relative ordering of related rules.
   864  func sortIPTablesRules(ruleData string) (string, error) {
   865  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   866  	if err != nil {
   867  		return "", err
   868  	}
   869  
   870  	// Sort tables
   871  	sort.Slice(dump.Tables, func(i, j int) bool {
   872  		return dump.Tables[i].Name < dump.Tables[j].Name
   873  	})
   874  
   875  	// Sort chains
   876  	for t := range dump.Tables {
   877  		table := &dump.Tables[t]
   878  		sort.Slice(table.Chains, func(i, j int) bool {
   879  			switch {
   880  			case table.Chains[i].Name == kubeNodePortsChain:
   881  				// KUBE-NODEPORTS comes before anything
   882  				return true
   883  			case table.Chains[j].Name == kubeNodePortsChain:
   884  				// anything goes after KUBE-NODEPORTS
   885  				return false
   886  			case table.Chains[i].Name == kubeServicesChain:
   887  				// KUBE-SERVICES comes before anything (except KUBE-NODEPORTS)
   888  				return true
   889  			case table.Chains[j].Name == kubeServicesChain:
   890  				// anything (except KUBE-NODEPORTS) goes after KUBE-SERVICES
   891  				return false
   892  			case strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && !strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   893  				// KUBE-* comes before non-KUBE-*
   894  				return true
   895  			case !strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   896  				// non-KUBE-* goes after KUBE-*
   897  				return false
   898  			default:
   899  				// We have two KUBE-* chains or two non-KUBE-* chains; either
   900  				// way they sort alphabetically
   901  				return table.Chains[i].Name < table.Chains[j].Name
   902  			}
   903  		})
   904  	}
   905  
   906  	// Sort KUBE-NODEPORTS chains by service name
   907  	chain, _ := dump.GetChain(utiliptables.TableFilter, kubeNodePortsChain)
   908  	if chain != nil {
   909  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   910  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   911  		})
   912  	}
   913  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeNodePortsChain)
   914  	if chain != nil {
   915  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   916  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   917  		})
   918  	}
   919  
   920  	// Sort KUBE-SERVICES chains by service name (but keeping the "must be the last
   921  	// rule" rule in the "nat" table's KUBE-SERVICES chain last).
   922  	chain, _ = dump.GetChain(utiliptables.TableFilter, kubeServicesChain)
   923  	if chain != nil {
   924  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   925  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   926  		})
   927  	}
   928  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
   929  	if chain != nil {
   930  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   931  			if chain.Rules[i].Comment != nil && strings.Contains(chain.Rules[i].Comment.Value, "must be the last rule") {
   932  				return false
   933  			} else if chain.Rules[j].Comment != nil && strings.Contains(chain.Rules[j].Comment.Value, "must be the last rule") {
   934  				return true
   935  			}
   936  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   937  		})
   938  	}
   939  
   940  	return dump.String(), nil
   941  }
   942  
   943  func TestSortIPTablesRules(t *testing.T) {
   944  	for _, tc := range []struct {
   945  		name   string
   946  		input  string
   947  		output string
   948  		error  string
   949  	}{
   950  		{
   951  			name: "basic test using each match type",
   952  			input: dedent.Dedent(`
   953  				*filter
   954  				:KUBE-SERVICES - [0:0]
   955  				:KUBE-EXTERNAL-SERVICES - [0:0]
   956  				:KUBE-FIREWALL - [0:0]
   957  				:KUBE-FORWARD - [0:0]
   958  				:KUBE-NODEPORTS - [0:0]
   959  				:KUBE-PROXY-FIREWALL - [0:0]
   960  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   961  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
   962  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
   963  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
   964  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
   965  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   966  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   967  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   968  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
   969  				COMMIT
   970  				*nat
   971  				:KUBE-SERVICES - [0:0]
   972  				:KUBE-NODEPORTS - [0:0]
   973  				:KUBE-POSTROUTING - [0:0]
   974  				:KUBE-MARK-MASQ - [0:0]
   975  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   976  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   977  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
   978  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
   979  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
   980  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
   981  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
   982  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
   983  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
   984  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
   985  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
   986  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
   987  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   988  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   989  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   990  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   991  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   992  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   993  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   994  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   995  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   996  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   997  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   998  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
   999  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1000  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1001  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1002  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1003  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1004  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1005  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1006  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1007  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1008  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1009  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
  1010  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
  1011  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1012  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1013  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1014  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
  1015  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
  1016  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1017  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1018  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1019  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1020  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1021  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1022  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1023  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1024  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1025  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1026  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1027  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1028  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1029  				COMMIT
  1030  				`),
  1031  			output: dedent.Dedent(`
  1032  				*filter
  1033  				:KUBE-NODEPORTS - [0:0]
  1034  				:KUBE-SERVICES - [0:0]
  1035  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1036  				:KUBE-FIREWALL - [0:0]
  1037  				:KUBE-FORWARD - [0:0]
  1038  				:KUBE-PROXY-FIREWALL - [0:0]
  1039  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1040  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1041  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1042  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1043  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1044  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1045  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1046  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1047  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1048  				COMMIT
  1049  				*nat
  1050  				:KUBE-NODEPORTS - [0:0]
  1051  				:KUBE-SERVICES - [0:0]
  1052  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1053  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
  1054  				:KUBE-MARK-MASQ - [0:0]
  1055  				:KUBE-POSTROUTING - [0:0]
  1056  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1057  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1058  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1059  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1060  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1061  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1062  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1063  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1064  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1065  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
  1066  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1067  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1068  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1069  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1070  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1071  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
  1072  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1073  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1074  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1075  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1076  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1077  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1078  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1079  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
  1080  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1081  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1082  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1083  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1084  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1085  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1086  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1087  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1088  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1089  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1090  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1091  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1092  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1093  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1094  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1095  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1096  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1097  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1098  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1099  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1100  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1101  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1102  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1103  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
  1104  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
  1105  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1106  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1107  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
  1108  				COMMIT
  1109  				`),
  1110  		},
  1111  		{
  1112  			name: "extra tables",
  1113  			input: dedent.Dedent(`
  1114  				*filter
  1115  				:KUBE-SERVICES - [0:0]
  1116  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1117  				:KUBE-FORWARD - [0:0]
  1118  				:KUBE-NODEPORTS - [0:0]
  1119  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1120  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1121  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1122  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1123  				COMMIT
  1124  				*nat
  1125  				:KUBE-SERVICES - [0:0]
  1126  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1127  				:KUBE-FORWARD - [0:0]
  1128  				:KUBE-NODEPORTS - [0:0]
  1129  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1130  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1131  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1132  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1133  				COMMIT
  1134  				*mangle
  1135  				:KUBE-SERVICES - [0:0]
  1136  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1137  				:KUBE-FORWARD - [0:0]
  1138  				:KUBE-NODEPORTS - [0:0]
  1139  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1140  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1141  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1142  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1143  				COMMIT
  1144  				`),
  1145  			output: dedent.Dedent(`
  1146  				*filter
  1147  				:KUBE-NODEPORTS - [0:0]
  1148  				:KUBE-SERVICES - [0:0]
  1149  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1150  				:KUBE-FORWARD - [0:0]
  1151  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1152  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1153  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1154  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1155  				COMMIT
  1156  				*mangle
  1157  				:KUBE-NODEPORTS - [0:0]
  1158  				:KUBE-SERVICES - [0:0]
  1159  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1160  				:KUBE-FORWARD - [0:0]
  1161  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1162  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1163  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1164  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1165  				COMMIT
  1166  				*nat
  1167  				:KUBE-NODEPORTS - [0:0]
  1168  				:KUBE-SERVICES - [0:0]
  1169  				:KUBE-EXTERNAL-SERVICES - [0:0]
  1170  				:KUBE-FORWARD - [0:0]
  1171  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1172  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1173  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1174  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1175  				COMMIT
  1176  				`),
  1177  		},
  1178  		{
  1179  			name: "correctly match same service name in different styles of comments",
  1180  			input: dedent.Dedent(`
  1181  				*filter
  1182  				COMMIT
  1183  				*nat
  1184  				:KUBE-SERVICES - [0:0]
  1185  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1186  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1187  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1188  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
  1189  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
  1190  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
  1191  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
  1192  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
  1193  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
  1194  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
  1195  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
  1196  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
  1197  				COMMIT
  1198  				`),
  1199  			output: dedent.Dedent(`
  1200  				*filter
  1201  				COMMIT
  1202  				*nat
  1203  				:KUBE-SERVICES - [0:0]
  1204  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
  1205  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
  1206  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
  1207  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1208  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1209  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1210  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
  1211  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
  1212  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
  1213  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
  1214  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
  1215  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
  1216  				COMMIT
  1217  				`),
  1218  		},
  1219  		{
  1220  			name: "unexpected junk lines are preserved",
  1221  			input: dedent.Dedent(`
  1222  				*filter
  1223  				COMMIT
  1224  				*nat
  1225  				:KUBE-SERVICES - [0:0]
  1226  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1227  				:KUBE-AAAAA - [0:0]
  1228  				:KUBE-ZZZZZ - [0:0]
  1229  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1230  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1231  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1232  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1233  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1234  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1235  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1236  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1237  				COMMIT
  1238  				`),
  1239  			output: dedent.Dedent(`
  1240  				*filter
  1241  				COMMIT
  1242  				*nat
  1243  				:KUBE-SERVICES - [0:0]
  1244  				:KUBE-AAAAA - [0:0]
  1245  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1246  				:KUBE-ZZZZZ - [0:0]
  1247  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1248  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1249  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1250  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1251  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1252  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1253  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1254  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1255  				COMMIT
  1256  				`),
  1257  		},
  1258  	} {
  1259  		t.Run(tc.name, func(t *testing.T) {
  1260  			out, err := sortIPTablesRules(tc.input)
  1261  			if err == nil {
  1262  				if tc.error != "" {
  1263  					t.Errorf("unexpectedly did not get error")
  1264  				} else {
  1265  					assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
  1266  				}
  1267  			} else {
  1268  				if tc.error == "" {
  1269  					t.Errorf("got unexpected error: %v", err)
  1270  				} else if !strings.HasPrefix(err.Error(), tc.error) {
  1271  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
  1272  				}
  1273  			}
  1274  		})
  1275  	}
  1276  }
  1277  
  1278  // getLine returns the line number of the caller, if possible.  This is useful in
  1279  // tests with a large number of cases - when something goes wrong you can find
  1280  // which case more easily.
  1281  func getLine() int {
  1282  	_, _, line, ok := stdruntime.Caller(1)
  1283  	if ok {
  1284  		return line
  1285  	}
  1286  	return 0
  1287  }
  1288  
  1289  // assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
  1290  // expected, ignoring irrelevant ordering differences. By default this also checks the
  1291  // rules for consistency (eg, no jumps to chains that aren't defined), but that can be
  1292  // disabled by passing false for checkConsistency if you are passing a partial set of rules.
  1293  func assertIPTablesRulesEqual(t *testing.T, line int, checkConsistency bool, expected, result string) {
  1294  	expected = strings.TrimLeft(expected, " \t\n")
  1295  
  1296  	result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n"))
  1297  	if err != nil {
  1298  		t.Fatalf("%s", err)
  1299  	}
  1300  
  1301  	lineStr := ""
  1302  	if line != 0 {
  1303  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1304  	}
  1305  	if diff := cmp.Diff(expected, result); diff != "" {
  1306  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1307  	}
  1308  
  1309  	if checkConsistency {
  1310  		err = checkIPTablesRuleJumps(expected)
  1311  		if err != nil {
  1312  			t.Fatalf("%s%s", err, lineStr)
  1313  		}
  1314  	}
  1315  }
  1316  
  1317  // assertIPTablesChainEqual asserts that the indicated chain in the indicated table in
  1318  // result contains exactly the rules in expected (in that order).
  1319  func assertIPTablesChainEqual(t *testing.T, line int, table utiliptables.Table, chain utiliptables.Chain, expected, result string) {
  1320  	expected = strings.TrimLeft(expected, " \t\n")
  1321  
  1322  	dump, err := iptablestest.ParseIPTablesDump(strings.TrimLeft(result, " \t\n"))
  1323  	if err != nil {
  1324  		t.Fatalf("%s", err)
  1325  	}
  1326  
  1327  	result = ""
  1328  	if ch, _ := dump.GetChain(table, chain); ch != nil {
  1329  		for _, rule := range ch.Rules {
  1330  			result += rule.Raw + "\n"
  1331  		}
  1332  	}
  1333  
  1334  	lineStr := ""
  1335  	if line != 0 {
  1336  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1337  	}
  1338  	if diff := cmp.Diff(expected, result); diff != "" {
  1339  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1340  	}
  1341  }
  1342  
  1343  // addressMatches helps test whether an iptables rule such as "! -s 192.168.0.0/16" matches
  1344  // ipStr. address.Value is either an IP address ("1.2.3.4") or a CIDR string
  1345  // ("1.2.3.0/24").
  1346  func addressMatches(t *testing.T, address *iptablestest.IPTablesValue, ipStr string) bool {
  1347  	ip := netutils.ParseIPSloppy(ipStr)
  1348  	if ip == nil {
  1349  		t.Fatalf("Bad IP in test case: %s", ipStr)
  1350  	}
  1351  
  1352  	var matches bool
  1353  	if strings.Contains(address.Value, "/") {
  1354  		_, cidr, err := netutils.ParseCIDRSloppy(address.Value)
  1355  		if err != nil {
  1356  			t.Errorf("Bad CIDR in kube-proxy output: %v", err)
  1357  		}
  1358  		matches = cidr.Contains(ip)
  1359  	} else {
  1360  		ip2 := netutils.ParseIPSloppy(address.Value)
  1361  		if ip2 == nil {
  1362  			t.Errorf("Bad IP/CIDR in kube-proxy output: %s", address.Value)
  1363  		}
  1364  		matches = ip.Equal(ip2)
  1365  	}
  1366  	return (!address.Negated && matches) || (address.Negated && !matches)
  1367  }
  1368  
  1369  // iptablesTracer holds data used while virtually tracing a packet through a set of
  1370  // iptables rules
  1371  type iptablesTracer struct {
  1372  	ipt      *iptablestest.FakeIPTables
  1373  	localIPs sets.Set[string]
  1374  	t        *testing.T
  1375  
  1376  	// matches accumulates the list of rules that were matched, for debugging purposes.
  1377  	matches []string
  1378  
  1379  	// outputs accumulates the list of matched terminal rule targets (endpoint
  1380  	// IP:ports, or a special target like "REJECT") and is eventually used to generate
  1381  	// the return value of tracePacket.
  1382  	outputs []string
  1383  
  1384  	// markMasq tracks whether the packet has been marked for masquerading
  1385  	markMasq bool
  1386  }
  1387  
  1388  // newIPTablesTracer creates an iptablesTracer. nodeIPs are the IPs to treat as local
  1389  // node IPs (for determining whether rules with "--src-type LOCAL" or "--dst-type LOCAL"
  1390  // match).
  1391  func newIPTablesTracer(t *testing.T, ipt *iptablestest.FakeIPTables, nodeIPs []string) *iptablesTracer {
  1392  	localIPs := sets.New("127.0.0.1", "::1")
  1393  	localIPs.Insert(nodeIPs...)
  1394  
  1395  	return &iptablesTracer{
  1396  		ipt:      ipt,
  1397  		localIPs: localIPs,
  1398  		t:        t,
  1399  	}
  1400  }
  1401  
  1402  // ruleMatches checks if the given iptables rule matches (at least probabilistically) a
  1403  // packet with the given sourceIP, destIP, and destPort.
  1404  func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, protocol, destIP, destPort string) bool {
  1405  	// The sub-rules within an iptables rule are ANDed together, so the rule only
  1406  	// matches if all of them match. So go through the subrules, and if any of them
  1407  	// DON'T match, then fail.
  1408  
  1409  	if rule.SourceAddress != nil && !addressMatches(tracer.t, rule.SourceAddress, sourceIP) {
  1410  		return false
  1411  	}
  1412  	if rule.SourceType != nil {
  1413  		addrtype := "not-matched"
  1414  		if tracer.localIPs.Has(sourceIP) {
  1415  			addrtype = "LOCAL"
  1416  		}
  1417  		if !rule.SourceType.Matches(addrtype) {
  1418  			return false
  1419  		}
  1420  	}
  1421  
  1422  	if rule.Protocol != nil && !rule.Protocol.Matches(protocol) {
  1423  		return false
  1424  	}
  1425  
  1426  	if rule.DestinationAddress != nil && !addressMatches(tracer.t, rule.DestinationAddress, destIP) {
  1427  		return false
  1428  	}
  1429  	if rule.DestinationType != nil {
  1430  		addrtype := "not-matched"
  1431  		if tracer.localIPs.Has(destIP) {
  1432  			addrtype = "LOCAL"
  1433  		}
  1434  		if !rule.DestinationType.Matches(addrtype) {
  1435  			return false
  1436  		}
  1437  	}
  1438  	if rule.DestinationPort != nil && !rule.DestinationPort.Matches(destPort) {
  1439  		return false
  1440  	}
  1441  
  1442  	// Any rule that checks for past state/history does not match
  1443  	if rule.AffinityCheck != nil || rule.MarkCheck != nil || rule.CTStateCheck != nil {
  1444  		return false
  1445  	}
  1446  
  1447  	// Anything else is assumed to match
  1448  	return true
  1449  }
  1450  
  1451  // runChain runs the given packet through the rules in the given table and chain, updating
  1452  // tracer's internal state accordingly. It returns true if it hits a terminal action.
  1453  func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptables.Chain, sourceIP, protocol, destIP, destPort string) bool {
  1454  	c, _ := tracer.ipt.Dump.GetChain(table, chain)
  1455  	if c == nil {
  1456  		return false
  1457  	}
  1458  
  1459  	for _, rule := range c.Rules {
  1460  		if rule.Jump == nil {
  1461  			continue
  1462  		}
  1463  
  1464  		if !tracer.ruleMatches(rule, sourceIP, protocol, destIP, destPort) {
  1465  			continue
  1466  		}
  1467  		// record the matched rule for debugging purposes
  1468  		tracer.matches = append(tracer.matches, rule.Raw)
  1469  
  1470  		switch rule.Jump.Value {
  1471  		case "KUBE-MARK-MASQ":
  1472  			tracer.markMasq = true
  1473  			continue
  1474  
  1475  		case "ACCEPT", "REJECT", "DROP":
  1476  			// (only valid in filter)
  1477  			tracer.outputs = append(tracer.outputs, rule.Jump.Value)
  1478  			return true
  1479  
  1480  		case "DNAT":
  1481  			// (only valid in nat)
  1482  			tracer.outputs = append(tracer.outputs, rule.DNATDestination.Value)
  1483  			return true
  1484  
  1485  		default:
  1486  			// We got a "-j KUBE-SOMETHING", so process that chain
  1487  			terminated := tracer.runChain(table, utiliptables.Chain(rule.Jump.Value), sourceIP, protocol, destIP, destPort)
  1488  
  1489  			// If the subchain hit a terminal rule AND the rule that sent us
  1490  			// to that chain was non-probabilistic, then this chain terminates
  1491  			// as well. But if we went there because of a --probability rule,
  1492  			// then we want to keep accumulating further matches against this
  1493  			// chain.
  1494  			if terminated && rule.Probability == nil {
  1495  				return true
  1496  			}
  1497  		}
  1498  	}
  1499  
  1500  	return false
  1501  }
  1502  
  1503  // tracePacket determines what would happen to a packet with the given sourceIP, protocol,
  1504  // destIP, and destPort, given the indicated iptables ruleData. nodeIP is the local node
  1505  // IP (for rules matching "LOCAL"). (The protocol value should be lowercase as in iptables
  1506  // rules, not uppercase as in corev1.)
  1507  //
  1508  // The return values are: an array of matched rules (for debugging), the final packet
  1509  // destinations (a comma-separated list of IPs, or one of the special targets "ACCEPT",
  1510  // "DROP", or "REJECT"), and whether the packet would be masqueraded.
  1511  func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, protocol, destIP, destPort string, nodeIPs []string) ([]string, string, bool) {
  1512  	tracer := newIPTablesTracer(t, ipt, nodeIPs)
  1513  
  1514  	// nat:PREROUTING goes first
  1515  	tracer.runChain(utiliptables.TableNAT, utiliptables.ChainPrerouting, sourceIP, protocol, destIP, destPort)
  1516  
  1517  	// After the PREROUTING rules run, pending DNATs are processed (which would affect
  1518  	// the destination IP that later rules match against).
  1519  	if len(tracer.outputs) != 0 {
  1520  		destIP = strings.Split(tracer.outputs[0], ":")[0]
  1521  	}
  1522  
  1523  	// Now the filter rules get run; exactly which ones depend on whether this is an
  1524  	// inbound, outbound, or intra-host packet, which we don't know. So we just run
  1525  	// the interesting tables manually. (Theoretically this could cause conflicts in
  1526  	// the future in which case we'd have to do something more complicated.)
  1527  	tracer.runChain(utiliptables.TableFilter, kubeServicesChain, sourceIP, protocol, destIP, destPort)
  1528  	tracer.runChain(utiliptables.TableFilter, kubeExternalServicesChain, sourceIP, protocol, destIP, destPort)
  1529  	tracer.runChain(utiliptables.TableFilter, kubeNodePortsChain, sourceIP, protocol, destIP, destPort)
  1530  	tracer.runChain(utiliptables.TableFilter, kubeProxyFirewallChain, sourceIP, protocol, destIP, destPort)
  1531  
  1532  	// Finally, the nat:POSTROUTING rules run, but the only interesting thing that
  1533  	// happens there is that the masquerade mark gets turned into actual masquerading.
  1534  
  1535  	return tracer.matches, strings.Join(tracer.outputs, ", "), tracer.markMasq
  1536  }
  1537  
  1538  type packetFlowTest struct {
  1539  	name     string
  1540  	sourceIP string
  1541  	protocol v1.Protocol
  1542  	destIP   string
  1543  	destPort int
  1544  	output   string
  1545  	masq     bool
  1546  }
  1547  
  1548  func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, nodeIPs []string, testCases []packetFlowTest) {
  1549  	lineStr := ""
  1550  	if line != 0 {
  1551  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1552  	}
  1553  	for _, tc := range testCases {
  1554  		t.Run(tc.name, func(t *testing.T) {
  1555  			protocol := strings.ToLower(string(tc.protocol))
  1556  			if protocol == "" {
  1557  				protocol = "tcp"
  1558  			}
  1559  			matches, output, masq := tracePacket(t, ipt, tc.sourceIP, protocol, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIPs)
  1560  			var errors []string
  1561  			if output != tc.output {
  1562  				errors = append(errors, fmt.Sprintf("wrong output: expected %q got %q", tc.output, output))
  1563  			}
  1564  			if masq != tc.masq {
  1565  				errors = append(errors, fmt.Sprintf("wrong masq: expected %v got %v", tc.masq, masq))
  1566  			}
  1567  			if errors != nil {
  1568  				t.Errorf("Test %q of a %s packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n",
  1569  					tc.name, protocol, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n"))
  1570  			}
  1571  		})
  1572  	}
  1573  }
  1574  
  1575  // This tests tracePackets against static data, just to make sure we match things in the
  1576  // way we expect to.
  1577  func TestTracePackets(t *testing.T) {
  1578  	rules := dedent.Dedent(`
  1579  		*filter
  1580  		:INPUT - [0:0]
  1581  		:FORWARD - [0:0]
  1582  		:OUTPUT - [0:0]
  1583  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1584  		:KUBE-FIREWALL - [0:0]
  1585  		:KUBE-FORWARD - [0:0]
  1586  		:KUBE-NODEPORTS - [0:0]
  1587  		:KUBE-SERVICES - [0:0]
  1588  		:KUBE-PROXY-FIREWALL - [0:0]
  1589  		-A INPUT -m comment --comment kubernetes health check service ports -j KUBE-NODEPORTS
  1590  		-A INPUT -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1591  		-A FORWARD -m comment --comment kubernetes forwarding rules -j KUBE-FORWARD
  1592  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1593  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1594  		-A OUTPUT -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1595  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1596  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1597  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1598  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1599  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1600  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1601  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1602  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1603  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1604  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1605  		COMMIT
  1606  		*nat
  1607  		:PREROUTING - [0:0]
  1608  		:INPUT - [0:0]
  1609  		:OUTPUT - [0:0]
  1610  		:POSTROUTING - [0:0]
  1611  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1612  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1613  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1614  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1615  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1616  		:KUBE-MARK-MASQ - [0:0]
  1617  		:KUBE-NODEPORTS - [0:0]
  1618  		:KUBE-POSTROUTING - [0:0]
  1619  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1620  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1621  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1622  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1623  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1624  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1625  		:KUBE-SERVICES - [0:0]
  1626  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1627  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1628  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1629  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1630  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1631  		-A PREROUTING -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1632  		-A OUTPUT -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1633  		-A POSTROUTING -m comment --comment kubernetes postrouting rules -j KUBE-POSTROUTING
  1634  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1635  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1636  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1637  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1638  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1639  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1640  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1641  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1642  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1643  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1644  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1645  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1646  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1647  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1648  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1649  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1650  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1651  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1652  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1653  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1654  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1655  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1656  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1657  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1658  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1659  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1660  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1661  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1662  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1663  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1664  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1665  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1666  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1667  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1668  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1669  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1670  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1671  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1672  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1673  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1674  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1675  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1676  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1677  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1678  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1679  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1680  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  1681  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1682  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  1683  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1684  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1685  		COMMIT
  1686  		`)
  1687  
  1688  	ipt := iptablestest.NewFake()
  1689  	err := ipt.RestoreAll([]byte(rules), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
  1690  	if err != nil {
  1691  		t.Fatalf("Restore of test data failed: %v", err)
  1692  	}
  1693  
  1694  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1695  		{
  1696  			name:     "no match",
  1697  			sourceIP: "10.0.0.2",
  1698  			destIP:   "10.0.0.3",
  1699  			destPort: 80,
  1700  			output:   "",
  1701  		},
  1702  		{
  1703  			name:     "single endpoint",
  1704  			sourceIP: "10.0.0.2",
  1705  			destIP:   "172.30.0.41",
  1706  			destPort: 80,
  1707  			output:   "10.180.0.1:80",
  1708  		},
  1709  		{
  1710  			name:     "multiple endpoints",
  1711  			sourceIP: "10.0.0.2",
  1712  			destIP:   "172.30.0.44",
  1713  			destPort: 80,
  1714  			output:   "10.180.0.4:80, 10.180.0.5:80",
  1715  		},
  1716  		{
  1717  			name:     "LOCAL, KUBE-MARK-MASQ",
  1718  			sourceIP: testNodeIP,
  1719  			destIP:   "192.168.99.22",
  1720  			destPort: 80,
  1721  			output:   "10.180.0.2:80",
  1722  			masq:     true,
  1723  		},
  1724  		{
  1725  			name:     "DROP",
  1726  			sourceIP: testExternalClient,
  1727  			destIP:   "192.168.99.22",
  1728  			destPort: 80,
  1729  			output:   "DROP",
  1730  		},
  1731  		{
  1732  			name:     "ACCEPT (NodePortHealthCheck)",
  1733  			sourceIP: testNodeIP,
  1734  			destIP:   testNodeIP,
  1735  			destPort: 30000,
  1736  			output:   "ACCEPT",
  1737  		},
  1738  		{
  1739  			name:     "REJECT",
  1740  			sourceIP: "10.0.0.2",
  1741  			destIP:   "172.30.0.46",
  1742  			destPort: 80,
  1743  			output:   "REJECT",
  1744  		},
  1745  	})
  1746  }
  1747  
  1748  // TestOverallIPTablesRules creates a variety of services and verifies that the generated
  1749  // rules are exactly as expected.
  1750  func TestOverallIPTablesRules(t *testing.T) {
  1751  	ipt := iptablestest.NewFake()
  1752  	fp := NewFakeProxier(ipt)
  1753  	metrics.RegisterMetrics()
  1754  
  1755  	makeServiceMap(fp,
  1756  		// create ClusterIP service
  1757  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  1758  			svc.Spec.ClusterIP = "172.30.0.41"
  1759  			svc.Spec.Ports = []v1.ServicePort{{
  1760  				Name:     "p80",
  1761  				Port:     80,
  1762  				Protocol: v1.ProtocolTCP,
  1763  			}}
  1764  		}),
  1765  		// create LoadBalancer service with Local traffic policy
  1766  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  1767  			svc.Spec.Type = "LoadBalancer"
  1768  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1769  			svc.Spec.ClusterIP = "172.30.0.42"
  1770  			svc.Spec.Ports = []v1.ServicePort{{
  1771  				Name:     "p80",
  1772  				Port:     80,
  1773  				Protocol: v1.ProtocolTCP,
  1774  				NodePort: 3001,
  1775  			}}
  1776  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1777  				IP: "1.2.3.4",
  1778  			}}
  1779  			svc.Spec.ExternalIPs = []string{"192.168.99.22"}
  1780  			svc.Spec.HealthCheckNodePort = 30000
  1781  		}),
  1782  		// create NodePort service
  1783  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  1784  			svc.Spec.Type = "NodePort"
  1785  			svc.Spec.ClusterIP = "172.30.0.43"
  1786  			svc.Spec.Ports = []v1.ServicePort{{
  1787  				Name:     "p80",
  1788  				Port:     80,
  1789  				Protocol: v1.ProtocolTCP,
  1790  				NodePort: 3003,
  1791  			}}
  1792  		}),
  1793  		// create ExternalIP service
  1794  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  1795  			svc.Spec.Type = "NodePort"
  1796  			svc.Spec.ClusterIP = "172.30.0.44"
  1797  			svc.Spec.ExternalIPs = []string{"192.168.99.33"}
  1798  			svc.Spec.Ports = []v1.ServicePort{{
  1799  				Name:       "p80",
  1800  				Port:       80,
  1801  				Protocol:   v1.ProtocolTCP,
  1802  				TargetPort: intstr.FromInt32(80),
  1803  			}}
  1804  		}),
  1805  		// create LoadBalancer service with Cluster traffic policy, source ranges,
  1806  		// and session affinity
  1807  		makeTestService("ns5", "svc5", func(svc *v1.Service) {
  1808  			svc.Spec.Type = "LoadBalancer"
  1809  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  1810  			svc.Spec.ClusterIP = "172.30.0.45"
  1811  			svc.Spec.Ports = []v1.ServicePort{{
  1812  				Name:     "p80",
  1813  				Port:     80,
  1814  				Protocol: v1.ProtocolTCP,
  1815  				NodePort: 3002,
  1816  			}}
  1817  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1818  				IP: "5.6.7.8",
  1819  			}}
  1820  			svc.Spec.HealthCheckNodePort = 30000
  1821  			// Extra whitespace to ensure that invalid value will not result
  1822  			// in a crash, for backward compatibility.
  1823  			svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
  1824  
  1825  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  1826  			svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
  1827  				ClientIP: &v1.ClientIPConfig{
  1828  					TimeoutSeconds: ptr.To[int32](10800),
  1829  				},
  1830  			}
  1831  		}),
  1832  		// create ClusterIP service with no endpoints
  1833  		makeTestService("ns6", "svc6", func(svc *v1.Service) {
  1834  			svc.Spec.Type = "ClusterIP"
  1835  			svc.Spec.ClusterIP = "172.30.0.46"
  1836  			svc.Spec.Ports = []v1.ServicePort{{
  1837  				Name:       "p80",
  1838  				Port:       80,
  1839  				Protocol:   v1.ProtocolTCP,
  1840  				TargetPort: intstr.FromInt32(80),
  1841  			}}
  1842  		}),
  1843  	)
  1844  	populateEndpointSlices(fp,
  1845  		// create ClusterIP service endpoints
  1846  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  1847  			eps.AddressType = discovery.AddressTypeIPv4
  1848  			eps.Endpoints = []discovery.Endpoint{{
  1849  				Addresses: []string{"10.180.0.1"},
  1850  			}}
  1851  			eps.Ports = []discovery.EndpointPort{{
  1852  				Name:     ptr.To("p80"),
  1853  				Port:     ptr.To[int32](80),
  1854  				Protocol: ptr.To(v1.ProtocolTCP),
  1855  			}}
  1856  		}),
  1857  		// create Local LoadBalancer endpoints. Note that since we aren't setting
  1858  		// its NodeName, this endpoint will be considered non-local and ignored.
  1859  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  1860  			eps.AddressType = discovery.AddressTypeIPv4
  1861  			eps.Endpoints = []discovery.Endpoint{{
  1862  				Addresses: []string{"10.180.0.2"},
  1863  			}}
  1864  			eps.Ports = []discovery.EndpointPort{{
  1865  				Name:     ptr.To("p80"),
  1866  				Port:     ptr.To[int32](80),
  1867  				Protocol: ptr.To(v1.ProtocolTCP),
  1868  			}}
  1869  		}),
  1870  		// create NodePort service endpoints
  1871  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  1872  			eps.AddressType = discovery.AddressTypeIPv4
  1873  			eps.Endpoints = []discovery.Endpoint{{
  1874  				Addresses: []string{"10.180.0.3"},
  1875  			}}
  1876  			eps.Ports = []discovery.EndpointPort{{
  1877  				Name:     ptr.To("p80"),
  1878  				Port:     ptr.To[int32](80),
  1879  				Protocol: ptr.To(v1.ProtocolTCP),
  1880  			}}
  1881  		}),
  1882  		// create ExternalIP service endpoints
  1883  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  1884  			eps.AddressType = discovery.AddressTypeIPv4
  1885  			eps.Endpoints = []discovery.Endpoint{{
  1886  				Addresses: []string{"10.180.0.4"},
  1887  			}, {
  1888  				Addresses: []string{"10.180.0.5"},
  1889  				NodeName:  ptr.To(testHostname),
  1890  			}}
  1891  			eps.Ports = []discovery.EndpointPort{{
  1892  				Name:     ptr.To("p80"),
  1893  				Port:     ptr.To[int32](80),
  1894  				Protocol: ptr.To(v1.ProtocolTCP),
  1895  			}}
  1896  		}),
  1897  		// create Cluster LoadBalancer endpoints
  1898  		makeTestEndpointSlice("ns5", "svc5", 1, func(eps *discovery.EndpointSlice) {
  1899  			eps.AddressType = discovery.AddressTypeIPv4
  1900  			eps.Endpoints = []discovery.Endpoint{{
  1901  				Addresses: []string{"10.180.0.3"},
  1902  			}}
  1903  			eps.Ports = []discovery.EndpointPort{{
  1904  				Name:     ptr.To("p80"),
  1905  				Port:     ptr.To[int32](80),
  1906  				Protocol: ptr.To(v1.ProtocolTCP),
  1907  			}}
  1908  		}),
  1909  	)
  1910  
  1911  	fp.syncProxyRules()
  1912  
  1913  	expected := dedent.Dedent(`
  1914  		*filter
  1915  		:KUBE-NODEPORTS - [0:0]
  1916  		:KUBE-SERVICES - [0:0]
  1917  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1918  		:KUBE-FIREWALL - [0:0]
  1919  		:KUBE-FORWARD - [0:0]
  1920  		:KUBE-PROXY-FIREWALL - [0:0]
  1921  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1922  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1923  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1924  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1925  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1926  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1927  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1928  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1929  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1930  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1931  		COMMIT
  1932  		*nat
  1933  		:KUBE-NODEPORTS - [0:0]
  1934  		:KUBE-SERVICES - [0:0]
  1935  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1936  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1937  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1938  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1939  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1940  		:KUBE-MARK-MASQ - [0:0]
  1941  		:KUBE-POSTROUTING - [0:0]
  1942  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1943  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1944  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1945  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1946  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1947  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1948  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1949  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1950  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1951  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1952  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1953  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1954  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1955  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1956  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1957  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1958  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1959  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1960  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1961  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1962  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1963  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1964  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1965  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1966  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1967  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1968  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1969  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1970  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1971  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1972  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1973  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1974  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1975  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1976  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1977  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1978  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1979  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1980  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1981  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1982  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1983  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1984  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1985  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1986  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1987  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1988  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1989  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1990  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1991  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1992  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1993  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1994  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1995  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1996  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1997  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1998  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1999  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --rcheck --seconds 10800 --reap -j KUBE-SEP-I77PXRDZVX7PMWMN
  2000  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  2001  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  2002  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  2003  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  2004  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  2005  		COMMIT
  2006  		`)
  2007  
  2008  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  2009  
  2010  	nNatRules := countRulesFromMetric(utiliptables.TableNAT)
  2011  	expectedNatRules := countRules(utiliptables.TableNAT, fp.iptablesData.String())
  2012  
  2013  	if nNatRules != expectedNatRules {
  2014  		t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
  2015  	}
  2016  }
  2017  
  2018  // TestNoEndpointsReject tests that a service with no endpoints rejects connections to
  2019  // its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP.
  2020  func TestNoEndpointsReject(t *testing.T) {
  2021  	ipt := iptablestest.NewFake()
  2022  	fp := NewFakeProxier(ipt)
  2023  	svcIP := "172.30.0.41"
  2024  	svcPort := 80
  2025  	svcNodePort := 3001
  2026  	svcExternalIPs := "192.168.99.11"
  2027  	svcLBIP := "1.2.3.4"
  2028  	svcPortName := proxy.ServicePortName{
  2029  		NamespacedName: makeNSN("ns1", "svc1"),
  2030  		Port:           "p80",
  2031  	}
  2032  
  2033  	makeServiceMap(fp,
  2034  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2035  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2036  			svc.Spec.ClusterIP = svcIP
  2037  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2038  			svc.Spec.Ports = []v1.ServicePort{{
  2039  				Name:     svcPortName.Port,
  2040  				Protocol: v1.ProtocolTCP,
  2041  				Port:     int32(svcPort),
  2042  				NodePort: int32(svcNodePort),
  2043  			}}
  2044  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2045  				IP: svcLBIP,
  2046  			}}
  2047  		}),
  2048  	)
  2049  	fp.syncProxyRules()
  2050  
  2051  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2052  		{
  2053  			name:     "pod to cluster IP with no endpoints",
  2054  			sourceIP: "10.0.0.2",
  2055  			destIP:   svcIP,
  2056  			destPort: svcPort,
  2057  			output:   "REJECT",
  2058  		},
  2059  		{
  2060  			name:     "external to external IP with no endpoints",
  2061  			sourceIP: testExternalClient,
  2062  			destIP:   svcExternalIPs,
  2063  			destPort: svcPort,
  2064  			output:   "REJECT",
  2065  		},
  2066  		{
  2067  			name:     "pod to NodePort with no endpoints",
  2068  			sourceIP: "10.0.0.2",
  2069  			destIP:   testNodeIP,
  2070  			destPort: svcNodePort,
  2071  			output:   "REJECT",
  2072  		},
  2073  		{
  2074  			name:     "external to NodePort with no endpoints",
  2075  			sourceIP: testExternalClient,
  2076  			destIP:   testNodeIP,
  2077  			destPort: svcNodePort,
  2078  			output:   "REJECT",
  2079  		},
  2080  		{
  2081  			name:     "pod to LoadBalancer IP with no endpoints",
  2082  			sourceIP: "10.0.0.2",
  2083  			destIP:   svcLBIP,
  2084  			destPort: svcPort,
  2085  			output:   "REJECT",
  2086  		},
  2087  		{
  2088  			name:     "external to LoadBalancer IP with no endpoints",
  2089  			sourceIP: testExternalClient,
  2090  			destIP:   svcLBIP,
  2091  			destPort: svcPort,
  2092  			output:   "REJECT",
  2093  		},
  2094  	})
  2095  }
  2096  
  2097  // TestClusterIPGeneral tests various basic features of a ClusterIP service
  2098  func TestClusterIPGeneral(t *testing.T) {
  2099  	ipt := iptablestest.NewFake()
  2100  	fp := NewFakeProxier(ipt)
  2101  
  2102  	makeServiceMap(fp,
  2103  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  2104  			svc.Spec.ClusterIP = "172.30.0.41"
  2105  			svc.Spec.Ports = []v1.ServicePort{{
  2106  				Name:     "http",
  2107  				Port:     80,
  2108  				Protocol: v1.ProtocolTCP,
  2109  			}}
  2110  		}),
  2111  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  2112  			svc.Spec.ClusterIP = "172.30.0.42"
  2113  			svc.Spec.Ports = []v1.ServicePort{
  2114  				{
  2115  					Name:     "http",
  2116  					Port:     80,
  2117  					Protocol: v1.ProtocolTCP,
  2118  				},
  2119  				{
  2120  					Name:       "https",
  2121  					Port:       443,
  2122  					Protocol:   v1.ProtocolTCP,
  2123  					TargetPort: intstr.FromInt32(8443),
  2124  				},
  2125  				{
  2126  					// Of course this should really be UDP, but if we
  2127  					// create a service with UDP ports, the Proxier will
  2128  					// try to do conntrack cleanup and we'd have to set
  2129  					// the FakeExec up to be able to deal with that...
  2130  					Name:     "dns-sctp",
  2131  					Port:     53,
  2132  					Protocol: v1.ProtocolSCTP,
  2133  				},
  2134  				{
  2135  					Name:     "dns-tcp",
  2136  					Port:     53,
  2137  					Protocol: v1.ProtocolTCP,
  2138  					// We use TargetPort on TCP but not SCTP to help
  2139  					// disambiguate the output.
  2140  					TargetPort: intstr.FromInt32(5353),
  2141  				},
  2142  			}
  2143  		}),
  2144  	)
  2145  
  2146  	populateEndpointSlices(fp,
  2147  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  2148  			eps.AddressType = discovery.AddressTypeIPv4
  2149  			eps.Endpoints = []discovery.Endpoint{{
  2150  				Addresses: []string{"10.180.0.1"},
  2151  				NodeName:  ptr.To(testHostname),
  2152  			}}
  2153  			eps.Ports = []discovery.EndpointPort{{
  2154  				Name:     ptr.To("http"),
  2155  				Port:     ptr.To[int32](80),
  2156  				Protocol: ptr.To(v1.ProtocolTCP),
  2157  			}}
  2158  		}),
  2159  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  2160  			eps.AddressType = discovery.AddressTypeIPv4
  2161  			eps.Endpoints = []discovery.Endpoint{
  2162  				{
  2163  					Addresses: []string{"10.180.0.1"},
  2164  					NodeName:  ptr.To(testHostname),
  2165  				},
  2166  				{
  2167  					Addresses: []string{"10.180.2.1"},
  2168  					NodeName:  ptr.To("host2"),
  2169  				},
  2170  			}
  2171  			eps.Ports = []discovery.EndpointPort{
  2172  				{
  2173  					Name:     ptr.To("http"),
  2174  					Port:     ptr.To[int32](80),
  2175  					Protocol: ptr.To(v1.ProtocolTCP),
  2176  				},
  2177  				{
  2178  					Name:     ptr.To("https"),
  2179  					Port:     ptr.To[int32](8443),
  2180  					Protocol: ptr.To(v1.ProtocolTCP),
  2181  				},
  2182  				{
  2183  					Name:     ptr.To("dns-sctp"),
  2184  					Port:     ptr.To[int32](53),
  2185  					Protocol: ptr.To(v1.ProtocolSCTP),
  2186  				},
  2187  				{
  2188  					Name:     ptr.To("dns-tcp"),
  2189  					Port:     ptr.To[int32](5353),
  2190  					Protocol: ptr.To(v1.ProtocolTCP),
  2191  				},
  2192  			}
  2193  		}),
  2194  	)
  2195  
  2196  	fp.syncProxyRules()
  2197  
  2198  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2199  		{
  2200  			name:     "simple clusterIP",
  2201  			sourceIP: "10.180.0.2",
  2202  			destIP:   "172.30.0.41",
  2203  			destPort: 80,
  2204  			output:   "10.180.0.1:80",
  2205  			masq:     false,
  2206  		},
  2207  		{
  2208  			name:     "hairpin to cluster IP",
  2209  			sourceIP: "10.180.0.1",
  2210  			destIP:   "172.30.0.41",
  2211  			destPort: 80,
  2212  			output:   "10.180.0.1:80",
  2213  			masq:     true,
  2214  		},
  2215  		{
  2216  			name:     "clusterIP with multiple endpoints",
  2217  			sourceIP: "10.180.0.2",
  2218  			destIP:   "172.30.0.42",
  2219  			destPort: 80,
  2220  			output:   "10.180.0.1:80, 10.180.2.1:80",
  2221  			masq:     false,
  2222  		},
  2223  		{
  2224  			name:     "clusterIP with TargetPort",
  2225  			sourceIP: "10.180.0.2",
  2226  			destIP:   "172.30.0.42",
  2227  			destPort: 443,
  2228  			output:   "10.180.0.1:8443, 10.180.2.1:8443",
  2229  			masq:     false,
  2230  		},
  2231  		{
  2232  			name:     "clusterIP with TCP and SCTP on same port (TCP)",
  2233  			sourceIP: "10.180.0.2",
  2234  			protocol: v1.ProtocolTCP,
  2235  			destIP:   "172.30.0.42",
  2236  			destPort: 53,
  2237  			output:   "10.180.0.1:5353, 10.180.2.1:5353",
  2238  			masq:     false,
  2239  		},
  2240  		{
  2241  			name:     "clusterIP with TCP and SCTP on same port (SCTP)",
  2242  			sourceIP: "10.180.0.2",
  2243  			protocol: v1.ProtocolSCTP,
  2244  			destIP:   "172.30.0.42",
  2245  			destPort: 53,
  2246  			output:   "10.180.0.1:53, 10.180.2.1:53",
  2247  			masq:     false,
  2248  		},
  2249  		{
  2250  			name:     "TCP-only port does not match UDP traffic",
  2251  			sourceIP: "10.180.0.2",
  2252  			protocol: v1.ProtocolUDP,
  2253  			destIP:   "172.30.0.42",
  2254  			destPort: 80,
  2255  			output:   "",
  2256  		},
  2257  		{
  2258  			name:     "svc1 does not accept svc2's ports",
  2259  			sourceIP: "10.180.0.2",
  2260  			destIP:   "172.30.0.41",
  2261  			destPort: 443,
  2262  			output:   "",
  2263  		},
  2264  	})
  2265  }
  2266  
  2267  func TestLoadBalancer(t *testing.T) {
  2268  	ipt := iptablestest.NewFake()
  2269  	fp := NewFakeProxier(ipt)
  2270  	svcIP := "172.30.0.41"
  2271  	svcPort := 80
  2272  	svcNodePort := 3001
  2273  	svcLBIP1 := "1.2.3.4"
  2274  	svcLBIP2 := "5.6.7.8"
  2275  	svcPortName := proxy.ServicePortName{
  2276  		NamespacedName: makeNSN("ns1", "svc1"),
  2277  		Port:           "p80",
  2278  		Protocol:       v1.ProtocolTCP,
  2279  	}
  2280  
  2281  	makeServiceMap(fp,
  2282  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2283  			svc.Spec.Type = "LoadBalancer"
  2284  			svc.Spec.ClusterIP = svcIP
  2285  			svc.Spec.Ports = []v1.ServicePort{{
  2286  				Name:     svcPortName.Port,
  2287  				Port:     int32(svcPort),
  2288  				Protocol: v1.ProtocolTCP,
  2289  				NodePort: int32(svcNodePort),
  2290  			}}
  2291  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
  2292  				{IP: svcLBIP1},
  2293  				{IP: svcLBIP2},
  2294  			}
  2295  			svc.Spec.LoadBalancerSourceRanges = []string{
  2296  				"192.168.0.0/24",
  2297  
  2298  				// Regression test that excess whitespace gets ignored
  2299  				" 203.0.113.0/25",
  2300  			}
  2301  		}),
  2302  	)
  2303  
  2304  	epIP := "10.180.0.1"
  2305  	populateEndpointSlices(fp,
  2306  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2307  			eps.AddressType = discovery.AddressTypeIPv4
  2308  			eps.Endpoints = []discovery.Endpoint{{
  2309  				Addresses: []string{epIP},
  2310  			}}
  2311  			eps.Ports = []discovery.EndpointPort{{
  2312  				Name:     ptr.To(svcPortName.Port),
  2313  				Port:     ptr.To(int32(svcPort)),
  2314  				Protocol: ptr.To(v1.ProtocolTCP),
  2315  			}}
  2316  		}),
  2317  	)
  2318  
  2319  	fp.syncProxyRules()
  2320  
  2321  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2322  		{
  2323  			name:     "pod to cluster IP",
  2324  			sourceIP: "10.0.0.2",
  2325  			destIP:   svcIP,
  2326  			destPort: svcPort,
  2327  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2328  			masq:     false,
  2329  		},
  2330  		{
  2331  			name:     "external to nodePort",
  2332  			sourceIP: testExternalClient,
  2333  			destIP:   testNodeIP,
  2334  			destPort: svcNodePort,
  2335  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2336  			masq:     true,
  2337  		},
  2338  		{
  2339  			name:     "nodePort bypasses LoadBalancerSourceRanges",
  2340  			sourceIP: testExternalClientBlocked,
  2341  			destIP:   testNodeIP,
  2342  			destPort: svcNodePort,
  2343  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2344  			masq:     true,
  2345  		},
  2346  		{
  2347  			name:     "accepted external to LB1",
  2348  			sourceIP: testExternalClient,
  2349  			destIP:   svcLBIP1,
  2350  			destPort: svcPort,
  2351  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2352  			masq:     true,
  2353  		},
  2354  		{
  2355  			name:     "accepted external to LB2",
  2356  			sourceIP: testExternalClient,
  2357  			destIP:   svcLBIP2,
  2358  			destPort: svcPort,
  2359  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2360  			masq:     true,
  2361  		},
  2362  		{
  2363  			name:     "blocked external to LB1",
  2364  			sourceIP: testExternalClientBlocked,
  2365  			destIP:   svcLBIP1,
  2366  			destPort: svcPort,
  2367  			output:   "DROP",
  2368  		},
  2369  		{
  2370  			name:     "blocked external to LB2",
  2371  			sourceIP: testExternalClientBlocked,
  2372  			destIP:   svcLBIP2,
  2373  			destPort: svcPort,
  2374  			output:   "DROP",
  2375  		},
  2376  		{
  2377  			name:     "pod to LB1 (blocked by LoadBalancerSourceRanges)",
  2378  			sourceIP: "10.0.0.2",
  2379  			destIP:   svcLBIP1,
  2380  			destPort: svcPort,
  2381  			output:   "DROP",
  2382  		},
  2383  		{
  2384  			name:     "pod to LB2 (blocked by LoadBalancerSourceRanges)",
  2385  			sourceIP: "10.0.0.2",
  2386  			destIP:   svcLBIP2,
  2387  			destPort: svcPort,
  2388  			output:   "DROP",
  2389  		},
  2390  		{
  2391  			name:     "node to LB1 (allowed by LoadBalancerSourceRanges)",
  2392  			sourceIP: testNodeIP,
  2393  			destIP:   svcLBIP1,
  2394  			destPort: svcPort,
  2395  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2396  			masq:     true,
  2397  		},
  2398  		{
  2399  			name:     "node to LB2 (allowed by LoadBalancerSourceRanges)",
  2400  			sourceIP: testNodeIP,
  2401  			destIP:   svcLBIP2,
  2402  			destPort: svcPort,
  2403  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2404  			masq:     true,
  2405  		},
  2406  
  2407  		// The LB rules assume that when you connect from a node to a LB IP, that
  2408  		// something external to kube-proxy will cause the connection to be
  2409  		// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
  2410  		// node IP, then we add a rule allowing traffic from the LB IP as well...
  2411  		{
  2412  			name:     "same node to LB1, SNATted to LB1 (implicitly allowed)",
  2413  			sourceIP: svcLBIP1,
  2414  			destIP:   svcLBIP1,
  2415  			destPort: svcPort,
  2416  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2417  			masq:     true,
  2418  		},
  2419  		{
  2420  			name:     "same node to LB2, SNATted to LB2 (implicitly allowed)",
  2421  			sourceIP: svcLBIP2,
  2422  			destIP:   svcLBIP2,
  2423  			destPort: svcPort,
  2424  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2425  			masq:     true,
  2426  		},
  2427  	})
  2428  }
  2429  
  2430  // TestNodePorts tests NodePort services under various combinations of the
  2431  // --nodeport-addresses and --localhost-nodeports flags.
  2432  func TestNodePorts(t *testing.T) {
  2433  	testCases := []struct {
  2434  		name string
  2435  
  2436  		family             v1.IPFamily
  2437  		localhostNodePorts bool
  2438  		nodePortAddresses  []string
  2439  
  2440  		// allowAltNodeIP is true if we expect NodePort traffic on the alternate
  2441  		// node IP to be accepted
  2442  		allowAltNodeIP bool
  2443  
  2444  		// expectFirewall is true if we expect KUBE-FIREWALL to be filled in with
  2445  		// an anti-martian-packet rule
  2446  		expectFirewall bool
  2447  	}{
  2448  		{
  2449  			name: "ipv4, localhost-nodeports enabled",
  2450  
  2451  			family:             v1.IPv4Protocol,
  2452  			localhostNodePorts: true,
  2453  			nodePortAddresses:  nil,
  2454  
  2455  			allowAltNodeIP: true,
  2456  			expectFirewall: true,
  2457  		},
  2458  		{
  2459  			name: "ipv4, localhost-nodeports disabled",
  2460  
  2461  			family:             v1.IPv4Protocol,
  2462  			localhostNodePorts: false,
  2463  			nodePortAddresses:  nil,
  2464  
  2465  			allowAltNodeIP: true,
  2466  			expectFirewall: false,
  2467  		},
  2468  		{
  2469  			name: "ipv4, localhost-nodeports disabled, localhost in nodeport-addresses",
  2470  
  2471  			family:             v1.IPv4Protocol,
  2472  			localhostNodePorts: false,
  2473  			nodePortAddresses:  []string{"192.168.0.0/24", "127.0.0.1/32"},
  2474  
  2475  			allowAltNodeIP: false,
  2476  			expectFirewall: false,
  2477  		},
  2478  		{
  2479  			name: "ipv4, localhost-nodeports enabled, multiple nodeport-addresses",
  2480  
  2481  			family:             v1.IPv4Protocol,
  2482  			localhostNodePorts: false,
  2483  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2484  
  2485  			allowAltNodeIP: true,
  2486  			expectFirewall: false,
  2487  		},
  2488  		{
  2489  			name: "ipv6, localhost-nodeports enabled",
  2490  
  2491  			family:             v1.IPv6Protocol,
  2492  			localhostNodePorts: true,
  2493  			nodePortAddresses:  nil,
  2494  
  2495  			allowAltNodeIP: true,
  2496  			expectFirewall: false,
  2497  		},
  2498  		{
  2499  			name: "ipv6, localhost-nodeports disabled",
  2500  
  2501  			family:             v1.IPv6Protocol,
  2502  			localhostNodePorts: false,
  2503  			nodePortAddresses:  nil,
  2504  
  2505  			allowAltNodeIP: true,
  2506  			expectFirewall: false,
  2507  		},
  2508  		{
  2509  			name: "ipv6, localhost-nodeports disabled, multiple nodeport-addresses",
  2510  
  2511  			family:             v1.IPv6Protocol,
  2512  			localhostNodePorts: false,
  2513  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2514  
  2515  			allowAltNodeIP: false,
  2516  			expectFirewall: false,
  2517  		},
  2518  	}
  2519  
  2520  	for _, tc := range testCases {
  2521  		t.Run(tc.name, func(t *testing.T) {
  2522  			var ipt *iptablestest.FakeIPTables
  2523  			var svcIP, epIP1, epIP2 string
  2524  			if tc.family == v1.IPv4Protocol {
  2525  				ipt = iptablestest.NewFake()
  2526  				svcIP = "172.30.0.41"
  2527  				epIP1 = "10.180.0.1"
  2528  				epIP2 = "10.180.2.1"
  2529  			} else {
  2530  				ipt = iptablestest.NewIPv6Fake()
  2531  				svcIP = "fd00:172:30::41"
  2532  				epIP1 = "fd00:10:180::1"
  2533  				epIP2 = "fd00:10:180::2:1"
  2534  			}
  2535  			fp := NewFakeProxier(ipt)
  2536  			fp.localhostNodePorts = tc.localhostNodePorts
  2537  			if tc.nodePortAddresses != nil {
  2538  				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
  2539  			}
  2540  
  2541  			makeServiceMap(fp,
  2542  				makeTestService("ns1", "svc1", func(svc *v1.Service) {
  2543  					svc.Spec.Type = v1.ServiceTypeNodePort
  2544  					svc.Spec.ClusterIP = svcIP
  2545  					svc.Spec.Ports = []v1.ServicePort{{
  2546  						Name:     "p80",
  2547  						Port:     80,
  2548  						Protocol: v1.ProtocolTCP,
  2549  						NodePort: 3001,
  2550  					}}
  2551  				}),
  2552  			)
  2553  
  2554  			populateEndpointSlices(fp,
  2555  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  2556  					if tc.family == v1.IPv4Protocol {
  2557  						eps.AddressType = discovery.AddressTypeIPv4
  2558  					} else {
  2559  						eps.AddressType = discovery.AddressTypeIPv6
  2560  					}
  2561  					eps.Endpoints = []discovery.Endpoint{{
  2562  						Addresses: []string{epIP1},
  2563  						NodeName:  nil,
  2564  					}, {
  2565  						Addresses: []string{epIP2},
  2566  						NodeName:  ptr.To(testHostname),
  2567  					}}
  2568  					eps.Ports = []discovery.EndpointPort{{
  2569  						Name:     ptr.To("p80"),
  2570  						Port:     ptr.To[int32](80),
  2571  						Protocol: ptr.To(v1.ProtocolTCP),
  2572  					}}
  2573  				}),
  2574  			)
  2575  
  2576  			fp.syncProxyRules()
  2577  
  2578  			var podIP, externalClientIP, nodeIP, altNodeIP, localhostIP string
  2579  			if tc.family == v1.IPv4Protocol {
  2580  				podIP = "10.0.0.2"
  2581  				externalClientIP = testExternalClient
  2582  				nodeIP = testNodeIP
  2583  				altNodeIP = testNodeIPAlt
  2584  				localhostIP = "127.0.0.1"
  2585  			} else {
  2586  				podIP = "fd00:10::2"
  2587  				externalClientIP = "2600:5200::1"
  2588  				nodeIP = testNodeIPv6
  2589  				altNodeIP = testNodeIPv6Alt
  2590  				localhostIP = "::1"
  2591  			}
  2592  			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
  2593  
  2594  			// Basic tests are the same for all cases
  2595  			runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2596  				{
  2597  					name:     "pod to cluster IP",
  2598  					sourceIP: podIP,
  2599  					destIP:   svcIP,
  2600  					destPort: 80,
  2601  					output:   output,
  2602  					masq:     false,
  2603  				},
  2604  				{
  2605  					name:     "external to nodePort",
  2606  					sourceIP: externalClientIP,
  2607  					destIP:   nodeIP,
  2608  					destPort: 3001,
  2609  					output:   output,
  2610  					masq:     true,
  2611  				},
  2612  				{
  2613  					name:     "node to nodePort",
  2614  					sourceIP: nodeIP,
  2615  					destIP:   nodeIP,
  2616  					destPort: 3001,
  2617  					output:   output,
  2618  					masq:     true,
  2619  				},
  2620  			})
  2621  
  2622  			// localhost to NodePort is only allowed in IPv4, and only if not disabled
  2623  			if tc.family == v1.IPv4Protocol && tc.localhostNodePorts {
  2624  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2625  					{
  2626  						name:     "localhost to nodePort gets masqueraded",
  2627  						sourceIP: localhostIP,
  2628  						destIP:   localhostIP,
  2629  						destPort: 3001,
  2630  						output:   output,
  2631  						masq:     true,
  2632  					},
  2633  				})
  2634  			} else {
  2635  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2636  					{
  2637  						name:     "localhost to nodePort is ignored",
  2638  						sourceIP: localhostIP,
  2639  						destIP:   localhostIP,
  2640  						destPort: 3001,
  2641  						output:   "",
  2642  					},
  2643  				})
  2644  			}
  2645  
  2646  			// NodePort on altNodeIP should be allowed, unless
  2647  			// nodePortAddressess excludes altNodeIP
  2648  			if tc.allowAltNodeIP {
  2649  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2650  					{
  2651  						name:     "external to nodePort on secondary IP",
  2652  						sourceIP: externalClientIP,
  2653  						destIP:   altNodeIP,
  2654  						destPort: 3001,
  2655  						output:   output,
  2656  						masq:     true,
  2657  					},
  2658  				})
  2659  			} else {
  2660  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2661  					{
  2662  						name:     "secondary nodeIP ignores NodePorts",
  2663  						sourceIP: externalClientIP,
  2664  						destIP:   altNodeIP,
  2665  						destPort: 3001,
  2666  						output:   "",
  2667  					},
  2668  				})
  2669  			}
  2670  
  2671  			// We have to check the firewall rule manually rather than via
  2672  			// runPacketFlowTests(), because the packet tracer doesn't
  2673  			// implement conntrack states.
  2674  			var expected string
  2675  			if tc.expectFirewall {
  2676  				expected = "-A KUBE-FIREWALL -m comment --comment \"block incoming localnet connections\" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP\n"
  2677  			}
  2678  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeletFirewallChain, expected, fp.iptablesData.String())
  2679  		})
  2680  	}
  2681  }
  2682  
  2683  func TestHealthCheckNodePort(t *testing.T) {
  2684  	ipt := iptablestest.NewFake()
  2685  	fp := NewFakeProxier(ipt)
  2686  	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
  2687  
  2688  	svcIP := "172.30.0.42"
  2689  	svcPort := 80
  2690  	svcNodePort := 3001
  2691  	svcHealthCheckNodePort := 30000
  2692  	svcPortName := proxy.ServicePortName{
  2693  		NamespacedName: makeNSN("ns1", "svc1"),
  2694  		Port:           "p80",
  2695  		Protocol:       v1.ProtocolTCP,
  2696  	}
  2697  
  2698  	svc := makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2699  		svc.Spec.Type = "LoadBalancer"
  2700  		svc.Spec.ClusterIP = svcIP
  2701  		svc.Spec.Ports = []v1.ServicePort{{
  2702  			Name:     svcPortName.Port,
  2703  			Port:     int32(svcPort),
  2704  			Protocol: v1.ProtocolTCP,
  2705  			NodePort: int32(svcNodePort),
  2706  		}}
  2707  		svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2708  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2709  	})
  2710  	makeServiceMap(fp, svc)
  2711  	fp.syncProxyRules()
  2712  
  2713  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2714  		{
  2715  			name:     "firewall accepts HealthCheckNodePort",
  2716  			sourceIP: "1.2.3.4",
  2717  			destIP:   testNodeIP,
  2718  			destPort: svcHealthCheckNodePort,
  2719  			output:   "ACCEPT",
  2720  			masq:     false,
  2721  		},
  2722  	})
  2723  
  2724  	fp.OnServiceDelete(svc)
  2725  	fp.syncProxyRules()
  2726  
  2727  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2728  		{
  2729  			name:     "HealthCheckNodePort no longer has any rule",
  2730  			sourceIP: "1.2.3.4",
  2731  			destIP:   testNodeIP,
  2732  			destPort: svcHealthCheckNodePort,
  2733  			output:   "",
  2734  		},
  2735  	})
  2736  }
  2737  
  2738  func TestDropInvalidRule(t *testing.T) {
  2739  	for _, tcpLiberal := range []bool{false, true} {
  2740  		t.Run(fmt.Sprintf("tcpLiberal %t", tcpLiberal), func(t *testing.T) {
  2741  			ipt := iptablestest.NewFake()
  2742  			fp := NewFakeProxier(ipt)
  2743  			fp.conntrackTCPLiberal = tcpLiberal
  2744  			fp.syncProxyRules()
  2745  
  2746  			var expected string
  2747  			if !tcpLiberal {
  2748  				expected = "-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP"
  2749  			}
  2750  			expected += dedent.Dedent(`
  2751  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  2752  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  2753  				`)
  2754  
  2755  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeForwardChain, expected, fp.iptablesData.String())
  2756  		})
  2757  	}
  2758  }
  2759  
  2760  func TestMasqueradeRule(t *testing.T) {
  2761  	for _, randomFully := range []bool{false, true} {
  2762  		t.Run(fmt.Sprintf("randomFully %t", randomFully), func(t *testing.T) {
  2763  			ipt := iptablestest.NewFake().SetHasRandomFully(randomFully)
  2764  			fp := NewFakeProxier(ipt)
  2765  			fp.syncProxyRules()
  2766  
  2767  			expectedFmt := dedent.Dedent(`
  2768  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  2769  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  2770  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s
  2771  				`)
  2772  			var expected string
  2773  			if randomFully {
  2774  				expected = fmt.Sprintf(expectedFmt, " --random-fully")
  2775  			} else {
  2776  				expected = fmt.Sprintf(expectedFmt, "")
  2777  			}
  2778  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableNAT, kubePostroutingChain, expected, fp.iptablesData.String())
  2779  		})
  2780  	}
  2781  }
  2782  
  2783  // TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get
  2784  // masqueraded when using Local traffic policy. For traffic from external sources, that
  2785  // means it can also only be routed to local endpoints, but for traffic from internal
  2786  // sources, it gets routed to all endpoints.
  2787  func TestExternalTrafficPolicyLocal(t *testing.T) {
  2788  	ipt := iptablestest.NewFake()
  2789  	fp := NewFakeProxier(ipt)
  2790  
  2791  	svcIP := "172.30.0.41"
  2792  	svcPort := 80
  2793  	svcNodePort := 3001
  2794  	svcHealthCheckNodePort := 30000
  2795  	svcExternalIPs := "192.168.99.11"
  2796  	svcLBIP := "1.2.3.4"
  2797  	svcPortName := proxy.ServicePortName{
  2798  		NamespacedName: makeNSN("ns1", "svc1"),
  2799  		Port:           "p80",
  2800  	}
  2801  
  2802  	makeServiceMap(fp,
  2803  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2804  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2805  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2806  			svc.Spec.ClusterIP = svcIP
  2807  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2808  			svc.Spec.Ports = []v1.ServicePort{{
  2809  				Name:       svcPortName.Port,
  2810  				Port:       int32(svcPort),
  2811  				Protocol:   v1.ProtocolTCP,
  2812  				NodePort:   int32(svcNodePort),
  2813  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2814  			}}
  2815  			svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2816  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2817  				IP: svcLBIP,
  2818  			}}
  2819  		}),
  2820  	)
  2821  
  2822  	epIP1 := "10.180.0.1"
  2823  	epIP2 := "10.180.2.1"
  2824  	populateEndpointSlices(fp,
  2825  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2826  			eps.AddressType = discovery.AddressTypeIPv4
  2827  			eps.Endpoints = []discovery.Endpoint{{
  2828  				Addresses: []string{epIP1},
  2829  			}, {
  2830  				Addresses: []string{epIP2},
  2831  				NodeName:  ptr.To(testHostname),
  2832  			}}
  2833  			eps.Ports = []discovery.EndpointPort{{
  2834  				Name:     ptr.To(svcPortName.Port),
  2835  				Port:     ptr.To(int32(svcPort)),
  2836  				Protocol: ptr.To(v1.ProtocolTCP),
  2837  			}}
  2838  		}),
  2839  	)
  2840  
  2841  	fp.syncProxyRules()
  2842  
  2843  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2844  		{
  2845  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2846  			sourceIP: "10.0.0.2",
  2847  			destIP:   svcIP,
  2848  			destPort: svcPort,
  2849  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2850  			masq:     false,
  2851  		},
  2852  		{
  2853  			name:     "pod to external IP hits both endpoints, unmasqueraded",
  2854  			sourceIP: "10.0.0.2",
  2855  			destIP:   svcExternalIPs,
  2856  			destPort: svcPort,
  2857  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2858  			masq:     false,
  2859  		},
  2860  		{
  2861  			name:     "external to external IP hits only local endpoint, unmasqueraded",
  2862  			sourceIP: testExternalClient,
  2863  			destIP:   svcExternalIPs,
  2864  			destPort: svcPort,
  2865  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2866  			masq:     false,
  2867  		},
  2868  		{
  2869  			name:     "pod to LB IP hits only both endpoints, unmasqueraded",
  2870  			sourceIP: "10.0.0.2",
  2871  			destIP:   svcLBIP,
  2872  			destPort: svcPort,
  2873  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2874  			masq:     false,
  2875  		},
  2876  		{
  2877  			name:     "external to LB IP hits only local endpoint, unmasqueraded",
  2878  			sourceIP: testExternalClient,
  2879  			destIP:   svcLBIP,
  2880  			destPort: svcPort,
  2881  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2882  			masq:     false,
  2883  		},
  2884  		{
  2885  			name:     "pod to NodePort hits both endpoints, unmasqueraded",
  2886  			sourceIP: "10.0.0.2",
  2887  			destIP:   testNodeIP,
  2888  			destPort: svcNodePort,
  2889  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2890  			masq:     false,
  2891  		},
  2892  		{
  2893  			name:     "external to NodePort hits only local endpoint, unmasqueraded",
  2894  			sourceIP: testExternalClient,
  2895  			destIP:   testNodeIP,
  2896  			destPort: svcNodePort,
  2897  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2898  			masq:     false,
  2899  		},
  2900  	})
  2901  }
  2902  
  2903  // TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets
  2904  // masqueraded when using Cluster traffic policy.
  2905  func TestExternalTrafficPolicyCluster(t *testing.T) {
  2906  	ipt := iptablestest.NewFake()
  2907  	fp := NewFakeProxier(ipt)
  2908  
  2909  	svcIP := "172.30.0.41"
  2910  	svcPort := 80
  2911  	svcNodePort := 3001
  2912  	svcExternalIPs := "192.168.99.11"
  2913  	svcLBIP := "1.2.3.4"
  2914  	svcPortName := proxy.ServicePortName{
  2915  		NamespacedName: makeNSN("ns1", "svc1"),
  2916  		Port:           "p80",
  2917  	}
  2918  
  2919  	makeServiceMap(fp,
  2920  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2921  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2922  			svc.Spec.ClusterIP = svcIP
  2923  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2924  			svc.Spec.Ports = []v1.ServicePort{{
  2925  				Name:       svcPortName.Port,
  2926  				Port:       int32(svcPort),
  2927  				Protocol:   v1.ProtocolTCP,
  2928  				NodePort:   int32(svcNodePort),
  2929  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2930  			}}
  2931  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2932  				IP: svcLBIP,
  2933  			}}
  2934  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  2935  		}),
  2936  	)
  2937  
  2938  	epIP1 := "10.180.0.1"
  2939  	epIP2 := "10.180.2.1"
  2940  	populateEndpointSlices(fp,
  2941  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2942  			eps.AddressType = discovery.AddressTypeIPv4
  2943  			eps.Endpoints = []discovery.Endpoint{{
  2944  				Addresses: []string{epIP1},
  2945  				NodeName:  nil,
  2946  			}, {
  2947  				Addresses: []string{epIP2},
  2948  				NodeName:  ptr.To(testHostname),
  2949  			}}
  2950  			eps.Ports = []discovery.EndpointPort{{
  2951  				Name:     ptr.To(svcPortName.Port),
  2952  				Port:     ptr.To(int32(svcPort)),
  2953  				Protocol: ptr.To(v1.ProtocolTCP),
  2954  			}}
  2955  		}),
  2956  	)
  2957  
  2958  	fp.syncProxyRules()
  2959  
  2960  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2961  		{
  2962  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2963  			sourceIP: "10.0.0.2",
  2964  			destIP:   svcIP,
  2965  			destPort: svcPort,
  2966  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2967  			masq:     false,
  2968  		},
  2969  		{
  2970  			name:     "pod to external IP hits both endpoints, masqueraded",
  2971  			sourceIP: "10.0.0.2",
  2972  			destIP:   svcExternalIPs,
  2973  			destPort: svcPort,
  2974  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2975  			masq:     true,
  2976  		},
  2977  		{
  2978  			name:     "external to external IP hits both endpoints, masqueraded",
  2979  			sourceIP: testExternalClient,
  2980  			destIP:   svcExternalIPs,
  2981  			destPort: svcPort,
  2982  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2983  			masq:     true,
  2984  		},
  2985  		{
  2986  			name:     "pod to LB IP hits both endpoints, masqueraded",
  2987  			sourceIP: "10.0.0.2",
  2988  			destIP:   svcLBIP,
  2989  			destPort: svcPort,
  2990  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2991  			masq:     true,
  2992  		},
  2993  		{
  2994  			name:     "external to LB IP hits both endpoints, masqueraded",
  2995  			sourceIP: testExternalClient,
  2996  			destIP:   svcLBIP,
  2997  			destPort: svcPort,
  2998  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2999  			masq:     true,
  3000  		},
  3001  		{
  3002  			name:     "pod to NodePort hits both endpoints, masqueraded",
  3003  			sourceIP: "10.0.0.2",
  3004  			destIP:   testNodeIP,
  3005  			destPort: svcNodePort,
  3006  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  3007  			masq:     true,
  3008  		},
  3009  		{
  3010  			name:     "external to NodePort hits both endpoints, masqueraded",
  3011  			sourceIP: testExternalClient,
  3012  			destIP:   testNodeIP,
  3013  			destPort: svcNodePort,
  3014  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  3015  			masq:     true,
  3016  		},
  3017  	})
  3018  }
  3019  
  3020  func TestComputeProbability(t *testing.T) {
  3021  	expectedProbabilities := map[int]string{
  3022  		1:      "1.0000000000",
  3023  		2:      "0.5000000000",
  3024  		10:     "0.1000000000",
  3025  		100:    "0.0100000000",
  3026  		1000:   "0.0010000000",
  3027  		10000:  "0.0001000000",
  3028  		100000: "0.0000100000",
  3029  		100001: "0.0000099999",
  3030  	}
  3031  
  3032  	for num, expected := range expectedProbabilities {
  3033  		actual := computeProbability(num)
  3034  		if actual != expected {
  3035  			t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
  3036  		}
  3037  	}
  3038  
  3039  	prevProbability := float64(0)
  3040  	for i := 100000; i > 1; i-- {
  3041  		currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
  3042  		if err != nil {
  3043  			t.Fatalf("Error parsing float probability for %d: %v", i, err)
  3044  		}
  3045  		if currProbability <= prevProbability {
  3046  			t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
  3047  		}
  3048  		prevProbability = currProbability
  3049  	}
  3050  }
  3051  
  3052  func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
  3053  	svc := &v1.Service{
  3054  		ObjectMeta: metav1.ObjectMeta{
  3055  			Name:        name,
  3056  			Namespace:   namespace,
  3057  			Annotations: map[string]string{},
  3058  		},
  3059  		Spec:   v1.ServiceSpec{},
  3060  		Status: v1.ServiceStatus{},
  3061  	}
  3062  	svcFunc(svc)
  3063  	return svc
  3064  }
  3065  
  3066  func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
  3067  	svcPort := v1.ServicePort{
  3068  		Name:       name,
  3069  		Protocol:   protocol,
  3070  		Port:       port,
  3071  		NodePort:   nodeport,
  3072  		TargetPort: intstr.FromInt32(int32(targetPort)),
  3073  	}
  3074  	return append(array, svcPort)
  3075  }
  3076  
  3077  func TestBuildServiceMapAddRemove(t *testing.T) {
  3078  	ipt := iptablestest.NewFake()
  3079  	fp := NewFakeProxier(ipt)
  3080  
  3081  	services := []*v1.Service{
  3082  		makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  3083  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3084  			svc.Spec.ClusterIP = "172.30.55.4"
  3085  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  3086  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  3087  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
  3088  		}),
  3089  		makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
  3090  			svc.Spec.Type = v1.ServiceTypeNodePort
  3091  			svc.Spec.ClusterIP = "172.30.55.10"
  3092  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
  3093  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
  3094  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
  3095  		}),
  3096  		makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
  3097  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  3098  			svc.Spec.ClusterIP = "172.30.55.11"
  3099  			svc.Spec.LoadBalancerIP = "1.2.3.4"
  3100  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
  3101  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
  3102  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  3103  				Ingress: []v1.LoadBalancerIngress{
  3104  					{IP: "1.2.3.4"},
  3105  				},
  3106  			}
  3107  		}),
  3108  		makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
  3109  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  3110  			svc.Spec.ClusterIP = "172.30.55.12"
  3111  			svc.Spec.LoadBalancerIP = "5.6.7.8"
  3112  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
  3113  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
  3114  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  3115  				Ingress: []v1.LoadBalancerIngress{
  3116  					{IP: "5.6.7.8"},
  3117  				},
  3118  			}
  3119  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3120  			svc.Spec.HealthCheckNodePort = 345
  3121  		}),
  3122  	}
  3123  
  3124  	for i := range services {
  3125  		fp.OnServiceAdd(services[i])
  3126  	}
  3127  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3128  	if len(fp.svcPortMap) != 10 {
  3129  		t.Errorf("expected service map length 10, got %v", fp.svcPortMap)
  3130  	}
  3131  
  3132  	if len(result.DeletedUDPClusterIPs) != 0 {
  3133  		// Services only added, so nothing stale yet
  3134  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3135  	}
  3136  
  3137  	// The only-local-loadbalancer ones get added
  3138  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3139  	if len(healthCheckNodePorts) != 1 {
  3140  		t.Errorf("expected 1 healthcheck port, got %v", healthCheckNodePorts)
  3141  	} else {
  3142  		nsn := makeNSN("somewhere", "only-local-load-balancer")
  3143  		if port, found := healthCheckNodePorts[nsn]; !found || port != 345 {
  3144  			t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, healthCheckNodePorts)
  3145  		}
  3146  	}
  3147  
  3148  	// Remove some stuff
  3149  	// oneService is a modification of services[0] with removed first port.
  3150  	oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  3151  		svc.Spec.Type = v1.ServiceTypeClusterIP
  3152  		svc.Spec.ClusterIP = "172.30.55.4"
  3153  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  3154  	})
  3155  
  3156  	fp.OnServiceUpdate(services[0], oneService)
  3157  	fp.OnServiceDelete(services[1])
  3158  	fp.OnServiceDelete(services[2])
  3159  	fp.OnServiceDelete(services[3])
  3160  
  3161  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3162  	if len(fp.svcPortMap) != 1 {
  3163  		t.Errorf("expected service map length 1, got %v", fp.svcPortMap)
  3164  	}
  3165  
  3166  	// All services but one were deleted. While you'd expect only the ClusterIPs
  3167  	// from the three deleted services here, we still have the ClusterIP for
  3168  	// the not-deleted service, because one of it's ServicePorts was deleted.
  3169  	expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
  3170  	if len(result.DeletedUDPClusterIPs) != len(expectedStaleUDPServices) {
  3171  		t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.DeletedUDPClusterIPs.UnsortedList())
  3172  	}
  3173  	for _, ip := range expectedStaleUDPServices {
  3174  		if !result.DeletedUDPClusterIPs.Has(ip) {
  3175  			t.Errorf("expected stale UDP service service %s", ip)
  3176  		}
  3177  	}
  3178  
  3179  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3180  	if len(healthCheckNodePorts) != 0 {
  3181  		t.Errorf("expected 0 healthcheck ports, got %v", healthCheckNodePorts)
  3182  	}
  3183  }
  3184  
  3185  func TestBuildServiceMapServiceHeadless(t *testing.T) {
  3186  	ipt := iptablestest.NewFake()
  3187  	fp := NewFakeProxier(ipt)
  3188  
  3189  	makeServiceMap(fp,
  3190  		makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
  3191  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3192  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3193  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
  3194  		}),
  3195  		makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
  3196  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3197  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3198  		}),
  3199  	)
  3200  
  3201  	// Headless service should be ignored
  3202  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3203  	if len(fp.svcPortMap) != 0 {
  3204  		t.Errorf("expected service map length 0, got %d", len(fp.svcPortMap))
  3205  	}
  3206  
  3207  	if len(result.DeletedUDPClusterIPs) != 0 {
  3208  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3209  	}
  3210  
  3211  	// No proxied services, so no healthchecks
  3212  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3213  	if len(healthCheckNodePorts) != 0 {
  3214  		t.Errorf("expected healthcheck ports length 0, got %d", len(healthCheckNodePorts))
  3215  	}
  3216  }
  3217  
  3218  func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
  3219  	ipt := iptablestest.NewFake()
  3220  	fp := NewFakeProxier(ipt)
  3221  
  3222  	makeServiceMap(fp,
  3223  		makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
  3224  			svc.Spec.Type = v1.ServiceTypeExternalName
  3225  			svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
  3226  			svc.Spec.ExternalName = "foo2.bar.com"
  3227  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
  3228  		}),
  3229  	)
  3230  
  3231  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3232  	if len(fp.svcPortMap) != 0 {
  3233  		t.Errorf("expected service map length 0, got %v", fp.svcPortMap)
  3234  	}
  3235  	if len(result.DeletedUDPClusterIPs) != 0 {
  3236  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs)
  3237  	}
  3238  	// No proxied services, so no healthchecks
  3239  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3240  	if len(healthCheckNodePorts) != 0 {
  3241  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3242  	}
  3243  }
  3244  
  3245  func TestBuildServiceMapServiceUpdate(t *testing.T) {
  3246  	ipt := iptablestest.NewFake()
  3247  	fp := NewFakeProxier(ipt)
  3248  
  3249  	servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3250  		svc.Spec.Type = v1.ServiceTypeClusterIP
  3251  		svc.Spec.ClusterIP = "172.30.55.4"
  3252  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  3253  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
  3254  	})
  3255  	servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3256  		svc.Spec.Type = v1.ServiceTypeLoadBalancer
  3257  		svc.Spec.ClusterIP = "172.30.55.4"
  3258  		svc.Spec.LoadBalancerIP = "1.2.3.4"
  3259  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
  3260  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
  3261  		svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  3262  			Ingress: []v1.LoadBalancerIngress{
  3263  				{IP: "1.2.3.4"},
  3264  			},
  3265  		}
  3266  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3267  		svc.Spec.HealthCheckNodePort = 345
  3268  	})
  3269  
  3270  	fp.OnServiceAdd(servicev1)
  3271  
  3272  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3273  	if len(fp.svcPortMap) != 2 {
  3274  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3275  	}
  3276  	if len(result.DeletedUDPClusterIPs) != 0 {
  3277  		// Services only added, so nothing stale yet
  3278  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3279  	}
  3280  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3281  	if len(healthCheckNodePorts) != 0 {
  3282  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3283  	}
  3284  
  3285  	// Change service to load-balancer
  3286  	fp.OnServiceUpdate(servicev1, servicev2)
  3287  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3288  	if len(fp.svcPortMap) != 2 {
  3289  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3290  	}
  3291  	if len(result.DeletedUDPClusterIPs) != 0 {
  3292  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3293  	}
  3294  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3295  	if len(healthCheckNodePorts) != 1 {
  3296  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3297  	}
  3298  
  3299  	// No change; make sure the service map stays the same and there are
  3300  	// no health-check changes
  3301  	fp.OnServiceUpdate(servicev2, servicev2)
  3302  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3303  	if len(fp.svcPortMap) != 2 {
  3304  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3305  	}
  3306  	if len(result.DeletedUDPClusterIPs) != 0 {
  3307  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3308  	}
  3309  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3310  	if len(healthCheckNodePorts) != 1 {
  3311  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3312  	}
  3313  
  3314  	// And back to ClusterIP
  3315  	fp.OnServiceUpdate(servicev2, servicev1)
  3316  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3317  	if len(fp.svcPortMap) != 2 {
  3318  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3319  	}
  3320  	if len(result.DeletedUDPClusterIPs) != 0 {
  3321  		// Services only added, so nothing stale yet
  3322  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3323  	}
  3324  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3325  	if len(healthCheckNodePorts) != 0 {
  3326  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3327  	}
  3328  }
  3329  
  3330  func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
  3331  	for i := range allEndpointSlices {
  3332  		proxier.OnEndpointSliceAdd(allEndpointSlices[i])
  3333  	}
  3334  }
  3335  
  3336  func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
  3337  	eps := &discovery.EndpointSlice{
  3338  		ObjectMeta: metav1.ObjectMeta{
  3339  			Name:      fmt.Sprintf("%s-%d", name, sliceNum),
  3340  			Namespace: namespace,
  3341  			Labels:    map[string]string{discovery.LabelServiceName: name},
  3342  		},
  3343  	}
  3344  	epsFunc(eps)
  3345  	return eps
  3346  }
  3347  
  3348  func makeNSN(namespace, name string) types.NamespacedName {
  3349  	return types.NamespacedName{Namespace: namespace, Name: name}
  3350  }
  3351  
  3352  func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
  3353  	return proxy.ServicePortName{
  3354  		NamespacedName: makeNSN(ns, name),
  3355  		Port:           port,
  3356  		Protocol:       protocol,
  3357  	}
  3358  }
  3359  
  3360  func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
  3361  	for i := range allServices {
  3362  		proxier.OnServiceAdd(allServices[i])
  3363  	}
  3364  
  3365  	proxier.mu.Lock()
  3366  	defer proxier.mu.Unlock()
  3367  	proxier.servicesSynced = true
  3368  }
  3369  
  3370  type endpointExpectation struct {
  3371  	endpoint string
  3372  	isLocal  bool
  3373  }
  3374  
  3375  func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) {
  3376  	if len(newMap) != len(expected) {
  3377  		t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
  3378  	}
  3379  	for x := range expected {
  3380  		if len(newMap[x]) != len(expected[x]) {
  3381  			t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
  3382  		} else {
  3383  			for i := range expected[x] {
  3384  				newEp := newMap[x][i]
  3385  				if newEp.String() != expected[x][i].endpoint ||
  3386  					newEp.IsLocal() != expected[x][i].isLocal {
  3387  					t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
  3388  				}
  3389  			}
  3390  		}
  3391  	}
  3392  }
  3393  
  3394  func TestUpdateEndpointsMap(t *testing.T) {
  3395  	emptyEndpointSlices := []*discovery.EndpointSlice{
  3396  		makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
  3397  	}
  3398  	subset1 := func(eps *discovery.EndpointSlice) {
  3399  		eps.AddressType = discovery.AddressTypeIPv4
  3400  		eps.Endpoints = []discovery.Endpoint{{
  3401  			Addresses: []string{"10.1.1.1"},
  3402  		}}
  3403  		eps.Ports = []discovery.EndpointPort{{
  3404  			Name:     ptr.To("p11"),
  3405  			Port:     ptr.To[int32](11),
  3406  			Protocol: ptr.To(v1.ProtocolUDP),
  3407  		}}
  3408  	}
  3409  	subset2 := func(eps *discovery.EndpointSlice) {
  3410  		eps.AddressType = discovery.AddressTypeIPv4
  3411  		eps.Endpoints = []discovery.Endpoint{{
  3412  			Addresses: []string{"10.1.1.2"},
  3413  		}}
  3414  		eps.Ports = []discovery.EndpointPort{{
  3415  			Name:     ptr.To("p12"),
  3416  			Port:     ptr.To[int32](12),
  3417  			Protocol: ptr.To(v1.ProtocolUDP),
  3418  		}}
  3419  	}
  3420  	namedPortLocal := []*discovery.EndpointSlice{
  3421  		makeTestEndpointSlice("ns1", "ep1", 1,
  3422  			func(eps *discovery.EndpointSlice) {
  3423  				eps.AddressType = discovery.AddressTypeIPv4
  3424  				eps.Endpoints = []discovery.Endpoint{{
  3425  					Addresses: []string{"10.1.1.1"},
  3426  					NodeName:  ptr.To(testHostname),
  3427  				}}
  3428  				eps.Ports = []discovery.EndpointPort{{
  3429  					Name:     ptr.To("p11"),
  3430  					Port:     ptr.To[int32](11),
  3431  					Protocol: ptr.To(v1.ProtocolUDP),
  3432  				}}
  3433  			}),
  3434  	}
  3435  	namedPort := []*discovery.EndpointSlice{
  3436  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3437  	}
  3438  	namedPortRenamed := []*discovery.EndpointSlice{
  3439  		makeTestEndpointSlice("ns1", "ep1", 1,
  3440  			func(eps *discovery.EndpointSlice) {
  3441  				eps.AddressType = discovery.AddressTypeIPv4
  3442  				eps.Endpoints = []discovery.Endpoint{{
  3443  					Addresses: []string{"10.1.1.1"},
  3444  				}}
  3445  				eps.Ports = []discovery.EndpointPort{{
  3446  					Name:     ptr.To("p11-2"),
  3447  					Port:     ptr.To[int32](11),
  3448  					Protocol: ptr.To(v1.ProtocolUDP),
  3449  				}}
  3450  			}),
  3451  	}
  3452  	namedPortRenumbered := []*discovery.EndpointSlice{
  3453  		makeTestEndpointSlice("ns1", "ep1", 1,
  3454  			func(eps *discovery.EndpointSlice) {
  3455  				eps.AddressType = discovery.AddressTypeIPv4
  3456  				eps.Endpoints = []discovery.Endpoint{{
  3457  					Addresses: []string{"10.1.1.1"},
  3458  				}}
  3459  				eps.Ports = []discovery.EndpointPort{{
  3460  					Name:     ptr.To("p11"),
  3461  					Port:     ptr.To[int32](22),
  3462  					Protocol: ptr.To(v1.ProtocolUDP),
  3463  				}}
  3464  			}),
  3465  	}
  3466  	namedPortsLocalNoLocal := []*discovery.EndpointSlice{
  3467  		makeTestEndpointSlice("ns1", "ep1", 1,
  3468  			func(eps *discovery.EndpointSlice) {
  3469  				eps.AddressType = discovery.AddressTypeIPv4
  3470  				eps.Endpoints = []discovery.Endpoint{{
  3471  					Addresses: []string{"10.1.1.1"},
  3472  				}, {
  3473  					Addresses: []string{"10.1.1.2"},
  3474  					NodeName:  ptr.To(testHostname),
  3475  				}}
  3476  				eps.Ports = []discovery.EndpointPort{{
  3477  					Name:     ptr.To("p11"),
  3478  					Port:     ptr.To[int32](11),
  3479  					Protocol: ptr.To(v1.ProtocolUDP),
  3480  				}, {
  3481  					Name:     ptr.To("p12"),
  3482  					Port:     ptr.To[int32](12),
  3483  					Protocol: ptr.To(v1.ProtocolUDP),
  3484  				}}
  3485  			}),
  3486  	}
  3487  	multipleSubsets := []*discovery.EndpointSlice{
  3488  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3489  		makeTestEndpointSlice("ns1", "ep1", 2, subset2),
  3490  	}
  3491  	subsetLocal := func(eps *discovery.EndpointSlice) {
  3492  		eps.AddressType = discovery.AddressTypeIPv4
  3493  		eps.Endpoints = []discovery.Endpoint{{
  3494  			Addresses: []string{"10.1.1.2"},
  3495  			NodeName:  ptr.To(testHostname),
  3496  		}}
  3497  		eps.Ports = []discovery.EndpointPort{{
  3498  			Name:     ptr.To("p12"),
  3499  			Port:     ptr.To[int32](12),
  3500  			Protocol: ptr.To(v1.ProtocolUDP),
  3501  		}}
  3502  	}
  3503  	multipleSubsetsWithLocal := []*discovery.EndpointSlice{
  3504  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3505  		makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
  3506  	}
  3507  	subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
  3508  		eps.AddressType = discovery.AddressTypeIPv4
  3509  		eps.Endpoints = []discovery.Endpoint{{
  3510  			Addresses: []string{"10.1.1.1"},
  3511  			NodeName:  ptr.To(testHostname),
  3512  		}}
  3513  		eps.Ports = []discovery.EndpointPort{{
  3514  			Name:     ptr.To("p11"),
  3515  			Port:     ptr.To[int32](11),
  3516  			Protocol: ptr.To(v1.ProtocolUDP),
  3517  		}, {
  3518  			Name:     ptr.To("p12"),
  3519  			Port:     ptr.To[int32](12),
  3520  			Protocol: ptr.To(v1.ProtocolUDP),
  3521  		}}
  3522  	}
  3523  	subset3 := func(eps *discovery.EndpointSlice) {
  3524  		eps.AddressType = discovery.AddressTypeIPv4
  3525  		eps.Endpoints = []discovery.Endpoint{{
  3526  			Addresses: []string{"10.1.1.3"},
  3527  		}}
  3528  		eps.Ports = []discovery.EndpointPort{{
  3529  			Name:     ptr.To("p13"),
  3530  			Port:     ptr.To[int32](13),
  3531  			Protocol: ptr.To(v1.ProtocolUDP),
  3532  		}}
  3533  	}
  3534  	multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
  3535  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
  3536  		makeTestEndpointSlice("ns1", "ep1", 2, subset3),
  3537  	}
  3538  	subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
  3539  		eps.AddressType = discovery.AddressTypeIPv4
  3540  		eps.Endpoints = []discovery.Endpoint{{
  3541  			Addresses: []string{"10.1.1.1"},
  3542  		}, {
  3543  			Addresses: []string{"10.1.1.2"},
  3544  			NodeName:  ptr.To(testHostname),
  3545  		}}
  3546  		eps.Ports = []discovery.EndpointPort{{
  3547  			Name:     ptr.To("p11"),
  3548  			Port:     ptr.To[int32](11),
  3549  			Protocol: ptr.To(v1.ProtocolUDP),
  3550  		}, {
  3551  			Name:     ptr.To("p12"),
  3552  			Port:     ptr.To[int32](12),
  3553  			Protocol: ptr.To(v1.ProtocolUDP),
  3554  		}}
  3555  	}
  3556  	subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
  3557  		eps.AddressType = discovery.AddressTypeIPv4
  3558  		eps.Endpoints = []discovery.Endpoint{{
  3559  			Addresses: []string{"10.1.1.3"},
  3560  		}, {
  3561  			Addresses: []string{"10.1.1.4"},
  3562  			NodeName:  ptr.To(testHostname),
  3563  		}}
  3564  		eps.Ports = []discovery.EndpointPort{{
  3565  			Name:     ptr.To("p13"),
  3566  			Port:     ptr.To[int32](13),
  3567  			Protocol: ptr.To(v1.ProtocolUDP),
  3568  		}, {
  3569  			Name:     ptr.To("p14"),
  3570  			Port:     ptr.To[int32](14),
  3571  			Protocol: ptr.To(v1.ProtocolUDP),
  3572  		}}
  3573  	}
  3574  	subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
  3575  		eps.AddressType = discovery.AddressTypeIPv4
  3576  		eps.Endpoints = []discovery.Endpoint{{
  3577  			Addresses: []string{"10.2.2.1"},
  3578  		}, {
  3579  			Addresses: []string{"10.2.2.2"},
  3580  			NodeName:  ptr.To(testHostname),
  3581  		}}
  3582  		eps.Ports = []discovery.EndpointPort{{
  3583  			Name:     ptr.To("p21"),
  3584  			Port:     ptr.To[int32](21),
  3585  			Protocol: ptr.To(v1.ProtocolUDP),
  3586  		}, {
  3587  			Name:     ptr.To("p22"),
  3588  			Port:     ptr.To[int32](22),
  3589  			Protocol: ptr.To(v1.ProtocolUDP),
  3590  		}}
  3591  	}
  3592  	multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
  3593  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
  3594  		makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
  3595  		makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
  3596  	}
  3597  	complexSubset1 := func(eps *discovery.EndpointSlice) {
  3598  		eps.AddressType = discovery.AddressTypeIPv4
  3599  		eps.Endpoints = []discovery.Endpoint{{
  3600  			Addresses: []string{"10.2.2.2"},
  3601  			NodeName:  ptr.To(testHostname),
  3602  		}, {
  3603  			Addresses: []string{"10.2.2.22"},
  3604  			NodeName:  ptr.To(testHostname),
  3605  		}}
  3606  		eps.Ports = []discovery.EndpointPort{{
  3607  			Name:     ptr.To("p22"),
  3608  			Port:     ptr.To[int32](22),
  3609  			Protocol: ptr.To(v1.ProtocolUDP),
  3610  		}}
  3611  	}
  3612  	complexSubset2 := func(eps *discovery.EndpointSlice) {
  3613  		eps.AddressType = discovery.AddressTypeIPv4
  3614  		eps.Endpoints = []discovery.Endpoint{{
  3615  			Addresses: []string{"10.2.2.3"},
  3616  			NodeName:  ptr.To(testHostname),
  3617  		}}
  3618  		eps.Ports = []discovery.EndpointPort{{
  3619  			Name:     ptr.To("p23"),
  3620  			Port:     ptr.To[int32](23),
  3621  			Protocol: ptr.To(v1.ProtocolUDP),
  3622  		}}
  3623  	}
  3624  	complexSubset3 := func(eps *discovery.EndpointSlice) {
  3625  		eps.AddressType = discovery.AddressTypeIPv4
  3626  		eps.Endpoints = []discovery.Endpoint{{
  3627  			Addresses: []string{"10.4.4.4"},
  3628  			NodeName:  ptr.To(testHostname),
  3629  		}, {
  3630  			Addresses: []string{"10.4.4.5"},
  3631  			NodeName:  ptr.To(testHostname),
  3632  		}}
  3633  		eps.Ports = []discovery.EndpointPort{{
  3634  			Name:     ptr.To("p44"),
  3635  			Port:     ptr.To[int32](44),
  3636  			Protocol: ptr.To(v1.ProtocolUDP),
  3637  		}}
  3638  	}
  3639  	complexSubset4 := func(eps *discovery.EndpointSlice) {
  3640  		eps.AddressType = discovery.AddressTypeIPv4
  3641  		eps.Endpoints = []discovery.Endpoint{{
  3642  			Addresses: []string{"10.4.4.6"},
  3643  			NodeName:  ptr.To(testHostname),
  3644  		}}
  3645  		eps.Ports = []discovery.EndpointPort{{
  3646  			Name:     ptr.To("p45"),
  3647  			Port:     ptr.To[int32](45),
  3648  			Protocol: ptr.To(v1.ProtocolUDP),
  3649  		}}
  3650  	}
  3651  	complexSubset5 := func(eps *discovery.EndpointSlice) {
  3652  		eps.AddressType = discovery.AddressTypeIPv4
  3653  		eps.Endpoints = []discovery.Endpoint{{
  3654  			Addresses: []string{"10.1.1.1"},
  3655  		}, {
  3656  			Addresses: []string{"10.1.1.11"},
  3657  		}}
  3658  		eps.Ports = []discovery.EndpointPort{{
  3659  			Name:     ptr.To("p11"),
  3660  			Port:     ptr.To[int32](11),
  3661  			Protocol: ptr.To(v1.ProtocolUDP),
  3662  		}}
  3663  	}
  3664  	complexSubset6 := func(eps *discovery.EndpointSlice) {
  3665  		eps.AddressType = discovery.AddressTypeIPv4
  3666  		eps.Endpoints = []discovery.Endpoint{{
  3667  			Addresses: []string{"10.1.1.2"},
  3668  		}}
  3669  		eps.Ports = []discovery.EndpointPort{{
  3670  			Name:     ptr.To("p12"),
  3671  			Port:     ptr.To[int32](12),
  3672  			Protocol: ptr.To(v1.ProtocolUDP),
  3673  		}, {
  3674  			Name:     ptr.To("p122"),
  3675  			Port:     ptr.To[int32](122),
  3676  			Protocol: ptr.To(v1.ProtocolUDP),
  3677  		}}
  3678  	}
  3679  	complexSubset7 := func(eps *discovery.EndpointSlice) {
  3680  		eps.AddressType = discovery.AddressTypeIPv4
  3681  		eps.Endpoints = []discovery.Endpoint{{
  3682  			Addresses: []string{"10.3.3.3"},
  3683  		}}
  3684  		eps.Ports = []discovery.EndpointPort{{
  3685  			Name:     ptr.To("p33"),
  3686  			Port:     ptr.To[int32](33),
  3687  			Protocol: ptr.To(v1.ProtocolUDP),
  3688  		}}
  3689  	}
  3690  	complexSubset8 := func(eps *discovery.EndpointSlice) {
  3691  		eps.AddressType = discovery.AddressTypeIPv4
  3692  		eps.Endpoints = []discovery.Endpoint{{
  3693  			Addresses: []string{"10.4.4.4"},
  3694  			NodeName:  ptr.To(testHostname),
  3695  		}}
  3696  		eps.Ports = []discovery.EndpointPort{{
  3697  			Name:     ptr.To("p44"),
  3698  			Port:     ptr.To[int32](44),
  3699  			Protocol: ptr.To(v1.ProtocolUDP),
  3700  		}}
  3701  	}
  3702  	complexBefore := []*discovery.EndpointSlice{
  3703  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3704  		nil,
  3705  		makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
  3706  		makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
  3707  		nil,
  3708  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
  3709  		makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
  3710  	}
  3711  	complexAfter := []*discovery.EndpointSlice{
  3712  		makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
  3713  		makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
  3714  		nil,
  3715  		nil,
  3716  		makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
  3717  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
  3718  		nil,
  3719  	}
  3720  
  3721  	testCases := []struct {
  3722  		// previousEndpoints and currentEndpoints are used to call appropriate
  3723  		// handlers OnEndpoints* (based on whether corresponding values are nil
  3724  		// or non-nil) and must be of equal length.
  3725  		name                           string
  3726  		previousEndpoints              []*discovery.EndpointSlice
  3727  		currentEndpoints               []*discovery.EndpointSlice
  3728  		oldEndpoints                   map[proxy.ServicePortName][]endpointExpectation
  3729  		expectedResult                 map[proxy.ServicePortName][]endpointExpectation
  3730  		expectedDeletedUDPEndpoints    []proxy.ServiceEndpoint
  3731  		expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool
  3732  		expectedLocalEndpoints         map[types.NamespacedName]int
  3733  	}{{
  3734  		// Case[0]: nothing
  3735  		name:                           "nothing",
  3736  		oldEndpoints:                   map[proxy.ServicePortName][]endpointExpectation{},
  3737  		expectedResult:                 map[proxy.ServicePortName][]endpointExpectation{},
  3738  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3739  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3740  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3741  	}, {
  3742  		// Case[1]: no change, named port, local
  3743  		name:              "no change, named port, local",
  3744  		previousEndpoints: namedPortLocal,
  3745  		currentEndpoints:  namedPortLocal,
  3746  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3747  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3748  				{endpoint: "10.1.1.1:11", isLocal: true},
  3749  			},
  3750  		},
  3751  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3752  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3753  				{endpoint: "10.1.1.1:11", isLocal: true},
  3754  			},
  3755  		},
  3756  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3757  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3758  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3759  			makeNSN("ns1", "ep1"): 1,
  3760  		},
  3761  	}, {
  3762  		// Case[2]: no change, multiple subsets
  3763  		name:              "no change, multiple subsets",
  3764  		previousEndpoints: multipleSubsets,
  3765  		currentEndpoints:  multipleSubsets,
  3766  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3767  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3768  				{endpoint: "10.1.1.1:11", isLocal: false},
  3769  			},
  3770  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3771  				{endpoint: "10.1.1.2:12", isLocal: false},
  3772  			},
  3773  		},
  3774  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3775  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3776  				{endpoint: "10.1.1.1:11", isLocal: false},
  3777  			},
  3778  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3779  				{endpoint: "10.1.1.2:12", isLocal: false},
  3780  			},
  3781  		},
  3782  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3783  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3784  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3785  	}, {
  3786  		// Case[3]: no change, multiple subsets, multiple ports, local
  3787  		name:              "no change, multiple subsets, multiple ports, local",
  3788  		previousEndpoints: multipleSubsetsMultiplePortsLocal,
  3789  		currentEndpoints:  multipleSubsetsMultiplePortsLocal,
  3790  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3791  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3792  				{endpoint: "10.1.1.1:11", isLocal: true},
  3793  			},
  3794  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3795  				{endpoint: "10.1.1.1:12", isLocal: true},
  3796  			},
  3797  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3798  				{endpoint: "10.1.1.3:13", isLocal: false},
  3799  			},
  3800  		},
  3801  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3802  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3803  				{endpoint: "10.1.1.1:11", isLocal: true},
  3804  			},
  3805  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3806  				{endpoint: "10.1.1.1:12", isLocal: true},
  3807  			},
  3808  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3809  				{endpoint: "10.1.1.3:13", isLocal: false},
  3810  			},
  3811  		},
  3812  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3813  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3814  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3815  			makeNSN("ns1", "ep1"): 1,
  3816  		},
  3817  	}, {
  3818  		// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
  3819  		name:              "no change, multiple endpoints, subsets, IPs, and ports",
  3820  		previousEndpoints: multipleSubsetsIPsPorts,
  3821  		currentEndpoints:  multipleSubsetsIPsPorts,
  3822  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3823  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3824  				{endpoint: "10.1.1.1:11", isLocal: false},
  3825  				{endpoint: "10.1.1.2:11", isLocal: true},
  3826  			},
  3827  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3828  				{endpoint: "10.1.1.1:12", isLocal: false},
  3829  				{endpoint: "10.1.1.2:12", isLocal: true},
  3830  			},
  3831  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3832  				{endpoint: "10.1.1.3:13", isLocal: false},
  3833  				{endpoint: "10.1.1.4:13", isLocal: true},
  3834  			},
  3835  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3836  				{endpoint: "10.1.1.3:14", isLocal: false},
  3837  				{endpoint: "10.1.1.4:14", isLocal: true},
  3838  			},
  3839  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3840  				{endpoint: "10.2.2.1:21", isLocal: false},
  3841  				{endpoint: "10.2.2.2:21", isLocal: true},
  3842  			},
  3843  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3844  				{endpoint: "10.2.2.1:22", isLocal: false},
  3845  				{endpoint: "10.2.2.2:22", isLocal: true},
  3846  			},
  3847  		},
  3848  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3849  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3850  				{endpoint: "10.1.1.1:11", isLocal: false},
  3851  				{endpoint: "10.1.1.2:11", isLocal: true},
  3852  			},
  3853  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3854  				{endpoint: "10.1.1.1:12", isLocal: false},
  3855  				{endpoint: "10.1.1.2:12", isLocal: true},
  3856  			},
  3857  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3858  				{endpoint: "10.1.1.3:13", isLocal: false},
  3859  				{endpoint: "10.1.1.4:13", isLocal: true},
  3860  			},
  3861  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3862  				{endpoint: "10.1.1.3:14", isLocal: false},
  3863  				{endpoint: "10.1.1.4:14", isLocal: true},
  3864  			},
  3865  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3866  				{endpoint: "10.2.2.1:21", isLocal: false},
  3867  				{endpoint: "10.2.2.2:21", isLocal: true},
  3868  			},
  3869  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3870  				{endpoint: "10.2.2.1:22", isLocal: false},
  3871  				{endpoint: "10.2.2.2:22", isLocal: true},
  3872  			},
  3873  		},
  3874  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3875  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3876  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3877  			makeNSN("ns1", "ep1"): 2,
  3878  			makeNSN("ns2", "ep2"): 1,
  3879  		},
  3880  	}, {
  3881  		// Case[5]: add an Endpoints
  3882  		name:              "add an Endpoints",
  3883  		previousEndpoints: []*discovery.EndpointSlice{nil},
  3884  		currentEndpoints:  namedPortLocal,
  3885  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  3886  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3887  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3888  				{endpoint: "10.1.1.1:11", isLocal: true},
  3889  			},
  3890  		},
  3891  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3892  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3893  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  3894  		},
  3895  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3896  			makeNSN("ns1", "ep1"): 1,
  3897  		},
  3898  	}, {
  3899  		// Case[6]: remove an Endpoints
  3900  		name:              "remove an Endpoints",
  3901  		previousEndpoints: namedPortLocal,
  3902  		currentEndpoints:  []*discovery.EndpointSlice{nil},
  3903  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3904  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3905  				{endpoint: "10.1.1.1:11", isLocal: true},
  3906  			},
  3907  		},
  3908  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{},
  3909  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3910  			Endpoint:        "10.1.1.1:11",
  3911  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3912  		}},
  3913  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3914  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3915  	}, {
  3916  		// Case[7]: add an IP and port
  3917  		name:              "add an IP and port",
  3918  		previousEndpoints: namedPort,
  3919  		currentEndpoints:  namedPortsLocalNoLocal,
  3920  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3921  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3922  				{endpoint: "10.1.1.1:11", isLocal: false},
  3923  			},
  3924  		},
  3925  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3926  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3927  				{endpoint: "10.1.1.1:11", isLocal: false},
  3928  				{endpoint: "10.1.1.2:11", isLocal: true},
  3929  			},
  3930  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3931  				{endpoint: "10.1.1.1:12", isLocal: false},
  3932  				{endpoint: "10.1.1.2:12", isLocal: true},
  3933  			},
  3934  		},
  3935  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3936  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3937  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3938  		},
  3939  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3940  			makeNSN("ns1", "ep1"): 1,
  3941  		},
  3942  	}, {
  3943  		// Case[8]: remove an IP and port
  3944  		name:              "remove an IP and port",
  3945  		previousEndpoints: namedPortsLocalNoLocal,
  3946  		currentEndpoints:  namedPort,
  3947  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3948  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3949  				{endpoint: "10.1.1.1:11", isLocal: false},
  3950  				{endpoint: "10.1.1.2:11", isLocal: true},
  3951  			},
  3952  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3953  				{endpoint: "10.1.1.1:12", isLocal: false},
  3954  				{endpoint: "10.1.1.2:12", isLocal: true},
  3955  			},
  3956  		},
  3957  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3958  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3959  				{endpoint: "10.1.1.1:11", isLocal: false},
  3960  			},
  3961  		},
  3962  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3963  			Endpoint:        "10.1.1.2:11",
  3964  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3965  		}, {
  3966  			Endpoint:        "10.1.1.1:12",
  3967  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3968  		}, {
  3969  			Endpoint:        "10.1.1.2:12",
  3970  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3971  		}},
  3972  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3973  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3974  	}, {
  3975  		// Case[9]: add a subset
  3976  		name:              "add a subset",
  3977  		previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
  3978  		currentEndpoints:  multipleSubsetsWithLocal,
  3979  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3980  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3981  				{endpoint: "10.1.1.1:11", isLocal: false},
  3982  			},
  3983  		},
  3984  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3985  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3986  				{endpoint: "10.1.1.1:11", isLocal: false},
  3987  			},
  3988  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3989  				{endpoint: "10.1.1.2:12", isLocal: true},
  3990  			},
  3991  		},
  3992  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3993  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3994  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3995  		},
  3996  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3997  			makeNSN("ns1", "ep1"): 1,
  3998  		},
  3999  	}, {
  4000  		// Case[10]: remove a subset
  4001  		name:              "remove a subset",
  4002  		previousEndpoints: multipleSubsets,
  4003  		currentEndpoints:  []*discovery.EndpointSlice{namedPort[0], nil},
  4004  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  4005  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4006  				{endpoint: "10.1.1.1:11", isLocal: false},
  4007  			},
  4008  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  4009  				{endpoint: "10.1.1.2:12", isLocal: false},
  4010  			},
  4011  		},
  4012  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  4013  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4014  				{endpoint: "10.1.1.1:11", isLocal: false},
  4015  			},
  4016  		},
  4017  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  4018  			Endpoint:        "10.1.1.2:12",
  4019  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  4020  		}},
  4021  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  4022  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  4023  	}, {
  4024  		// Case[11]: rename a port
  4025  		name:              "rename a port",
  4026  		previousEndpoints: namedPort,
  4027  		currentEndpoints:  namedPortRenamed,
  4028  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  4029  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4030  				{endpoint: "10.1.1.1:11", isLocal: false},
  4031  			},
  4032  		},
  4033  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  4034  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
  4035  				{endpoint: "10.1.1.1:11", isLocal: false},
  4036  			},
  4037  		},
  4038  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  4039  			Endpoint:        "10.1.1.1:11",
  4040  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  4041  		}},
  4042  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  4043  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
  4044  		},
  4045  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  4046  	}, {
  4047  		// Case[12]: renumber a port
  4048  		name:              "renumber a port",
  4049  		previousEndpoints: namedPort,
  4050  		currentEndpoints:  namedPortRenumbered,
  4051  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  4052  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4053  				{endpoint: "10.1.1.1:11", isLocal: false},
  4054  			},
  4055  		},
  4056  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  4057  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4058  				{endpoint: "10.1.1.1:22", isLocal: false},
  4059  			},
  4060  		},
  4061  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  4062  			Endpoint:        "10.1.1.1:11",
  4063  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  4064  		}},
  4065  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  4066  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  4067  	}, {
  4068  		// Case[13]: complex add and remove
  4069  		name:              "complex add and remove",
  4070  		previousEndpoints: complexBefore,
  4071  		currentEndpoints:  complexAfter,
  4072  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  4073  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4074  				{endpoint: "10.1.1.1:11", isLocal: false},
  4075  			},
  4076  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  4077  				{endpoint: "10.2.2.22:22", isLocal: true},
  4078  				{endpoint: "10.2.2.2:22", isLocal: true},
  4079  			},
  4080  			makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
  4081  				{endpoint: "10.2.2.3:23", isLocal: true},
  4082  			},
  4083  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  4084  				{endpoint: "10.4.4.4:44", isLocal: true},
  4085  				{endpoint: "10.4.4.5:44", isLocal: true},
  4086  			},
  4087  			makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
  4088  				{endpoint: "10.4.4.6:45", isLocal: true},
  4089  			},
  4090  		},
  4091  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  4092  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4093  				{endpoint: "10.1.1.11:11", isLocal: false},
  4094  				{endpoint: "10.1.1.1:11", isLocal: false},
  4095  			},
  4096  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  4097  				{endpoint: "10.1.1.2:12", isLocal: false},
  4098  			},
  4099  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
  4100  				{endpoint: "10.1.1.2:122", isLocal: false},
  4101  			},
  4102  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
  4103  				{endpoint: "10.3.3.3:33", isLocal: false},
  4104  			},
  4105  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  4106  				{endpoint: "10.4.4.4:44", isLocal: true},
  4107  			},
  4108  		},
  4109  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  4110  			Endpoint:        "10.2.2.2:22",
  4111  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  4112  		}, {
  4113  			Endpoint:        "10.2.2.22:22",
  4114  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  4115  		}, {
  4116  			Endpoint:        "10.2.2.3:23",
  4117  			ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
  4118  		}, {
  4119  			Endpoint:        "10.4.4.5:44",
  4120  			ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
  4121  		}, {
  4122  			Endpoint:        "10.4.4.6:45",
  4123  			ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
  4124  		}},
  4125  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  4126  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP):  true,
  4127  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
  4128  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP):  true,
  4129  		},
  4130  		expectedLocalEndpoints: map[types.NamespacedName]int{
  4131  			makeNSN("ns4", "ep4"): 1,
  4132  		},
  4133  	}, {
  4134  		// Case[14]: change from 0 endpoint address to 1 unnamed port
  4135  		name:              "change from 0 endpoint address to 1 unnamed port",
  4136  		previousEndpoints: emptyEndpointSlices,
  4137  		currentEndpoints:  namedPort,
  4138  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  4139  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  4140  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  4141  				{endpoint: "10.1.1.1:11", isLocal: false},
  4142  			},
  4143  		},
  4144  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  4145  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  4146  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  4147  		},
  4148  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  4149  	},
  4150  	}
  4151  
  4152  	for tci, tc := range testCases {
  4153  		t.Run(tc.name, func(t *testing.T) {
  4154  			ipt := iptablestest.NewFake()
  4155  			fp := NewFakeProxier(ipt)
  4156  			fp.hostname = testHostname
  4157  
  4158  			// First check that after adding all previous versions of endpoints,
  4159  			// the fp.oldEndpoints is as we expect.
  4160  			for i := range tc.previousEndpoints {
  4161  				if tc.previousEndpoints[i] != nil {
  4162  					fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
  4163  				}
  4164  			}
  4165  			fp.endpointsMap.Update(fp.endpointsChanges)
  4166  			checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints)
  4167  
  4168  			// Now let's call appropriate handlers to get to state we want to be.
  4169  			if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
  4170  				t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
  4171  			}
  4172  
  4173  			for i := range tc.previousEndpoints {
  4174  				prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
  4175  				switch {
  4176  				case prev == nil:
  4177  					fp.OnEndpointSliceAdd(curr)
  4178  				case curr == nil:
  4179  					fp.OnEndpointSliceDelete(prev)
  4180  				default:
  4181  					fp.OnEndpointSliceUpdate(prev, curr)
  4182  				}
  4183  			}
  4184  			result := fp.endpointsMap.Update(fp.endpointsChanges)
  4185  			newMap := fp.endpointsMap
  4186  			checkEndpointExpectations(t, tci, newMap, tc.expectedResult)
  4187  			if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) {
  4188  				t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints)
  4189  			}
  4190  			for _, x := range tc.expectedDeletedUDPEndpoints {
  4191  				found := false
  4192  				for _, stale := range result.DeletedUDPEndpoints {
  4193  					if stale == x {
  4194  						found = true
  4195  						break
  4196  					}
  4197  				}
  4198  				if !found {
  4199  					t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.DeletedUDPEndpoints)
  4200  				}
  4201  			}
  4202  			if len(result.NewlyActiveUDPServices) != len(tc.expectedNewlyActiveUDPServices) {
  4203  				t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedNewlyActiveUDPServices), len(result.NewlyActiveUDPServices), result.NewlyActiveUDPServices)
  4204  			}
  4205  			for svcName := range tc.expectedNewlyActiveUDPServices {
  4206  				found := false
  4207  				for _, stale := range result.NewlyActiveUDPServices {
  4208  					if stale == svcName {
  4209  						found = true
  4210  					}
  4211  				}
  4212  				if !found {
  4213  					t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.NewlyActiveUDPServices)
  4214  				}
  4215  			}
  4216  			localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4217  			if !reflect.DeepEqual(localReadyEndpoints, tc.expectedLocalEndpoints) {
  4218  				t.Errorf("[%d] expected local endpoints %v, got %v", tci, tc.expectedLocalEndpoints, localReadyEndpoints)
  4219  			}
  4220  		})
  4221  	}
  4222  }
  4223  
  4224  // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
  4225  func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
  4226  	ipt := iptablestest.NewFake()
  4227  	fp := NewFakeProxier(ipt)
  4228  	fp.OnServiceSynced()
  4229  	fp.OnEndpointSlicesSynced()
  4230  
  4231  	serviceName := "svc1"
  4232  	namespaceName := "ns1"
  4233  
  4234  	fp.OnServiceAdd(&v1.Service{
  4235  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4236  		Spec: v1.ServiceSpec{
  4237  			ClusterIP: "172.30.1.1",
  4238  			Selector:  map[string]string{"foo": "bar"},
  4239  			Ports:     []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
  4240  		},
  4241  	})
  4242  
  4243  	endpointSlice := &discovery.EndpointSlice{
  4244  		ObjectMeta: metav1.ObjectMeta{
  4245  			Name:      fmt.Sprintf("%s-1", serviceName),
  4246  			Namespace: namespaceName,
  4247  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4248  		},
  4249  		Ports: []discovery.EndpointPort{{
  4250  			Name:     ptr.To(""),
  4251  			Port:     ptr.To[int32](80),
  4252  			Protocol: ptr.To(v1.ProtocolTCP),
  4253  		}},
  4254  		AddressType: discovery.AddressTypeIPv4,
  4255  		Endpoints: []discovery.Endpoint{{
  4256  			Addresses:  []string{"10.0.1.1"},
  4257  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4258  			NodeName:   ptr.To(testHostname),
  4259  		}, {
  4260  			Addresses:  []string{"10.0.1.2"},
  4261  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4262  			NodeName:   ptr.To(testHostname),
  4263  		}, {
  4264  			Addresses:  []string{"10.0.1.3"},
  4265  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4266  			NodeName:   ptr.To(testHostname),
  4267  		}, { // not ready endpoints should be ignored
  4268  			Addresses:  []string{"10.0.1.4"},
  4269  			Conditions: discovery.EndpointConditions{Ready: ptr.To(false)},
  4270  			NodeName:   ptr.To(testHostname),
  4271  		}},
  4272  	}
  4273  
  4274  	fp.OnEndpointSliceAdd(endpointSlice)
  4275  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4276  	localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4277  	if len(localReadyEndpoints) != 1 {
  4278  		t.Errorf("unexpected number of local ready endpoints, expected 1 but got: %d", len(localReadyEndpoints))
  4279  	}
  4280  
  4281  	// set all endpoints to terminating
  4282  	endpointSliceTerminating := &discovery.EndpointSlice{
  4283  		ObjectMeta: metav1.ObjectMeta{
  4284  			Name:      fmt.Sprintf("%s-1", serviceName),
  4285  			Namespace: namespaceName,
  4286  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4287  		},
  4288  		Ports: []discovery.EndpointPort{{
  4289  			Name:     ptr.To(""),
  4290  			Port:     ptr.To[int32](80),
  4291  			Protocol: ptr.To(v1.ProtocolTCP),
  4292  		}},
  4293  		AddressType: discovery.AddressTypeIPv4,
  4294  		Endpoints: []discovery.Endpoint{{
  4295  			Addresses: []string{"10.0.1.1"},
  4296  			Conditions: discovery.EndpointConditions{
  4297  				Ready:       ptr.To(false),
  4298  				Serving:     ptr.To(true),
  4299  				Terminating: ptr.To(false),
  4300  			},
  4301  			NodeName: ptr.To(testHostname),
  4302  		}, {
  4303  			Addresses: []string{"10.0.1.2"},
  4304  			Conditions: discovery.EndpointConditions{
  4305  				Ready:       ptr.To(false),
  4306  				Serving:     ptr.To(true),
  4307  				Terminating: ptr.To(true),
  4308  			},
  4309  			NodeName: ptr.To(testHostname),
  4310  		}, {
  4311  			Addresses: []string{"10.0.1.3"},
  4312  			Conditions: discovery.EndpointConditions{
  4313  				Ready:       ptr.To(false),
  4314  				Serving:     ptr.To(true),
  4315  				Terminating: ptr.To(true),
  4316  			},
  4317  			NodeName: ptr.To(testHostname),
  4318  		}, { // not ready endpoints should be ignored
  4319  			Addresses: []string{"10.0.1.4"},
  4320  			Conditions: discovery.EndpointConditions{
  4321  				Ready:       ptr.To(false),
  4322  				Serving:     ptr.To(false),
  4323  				Terminating: ptr.To(true),
  4324  			},
  4325  			NodeName: ptr.To(testHostname),
  4326  		}},
  4327  	}
  4328  
  4329  	fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
  4330  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4331  	localReadyEndpoints = fp.endpointsMap.LocalReadyEndpoints()
  4332  	if len(localReadyEndpoints) != 0 {
  4333  		t.Errorf("unexpected number of local ready endpoints, expected 0 but got: %d", len(localReadyEndpoints))
  4334  	}
  4335  }
  4336  
  4337  func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
  4338  	fcmd := fakeexec.FakeCmd{}
  4339  	fexec := &fakeexec.FakeExec{
  4340  		LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
  4341  	}
  4342  	execFunc := func(cmd string, args ...string) exec.Cmd {
  4343  		return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
  4344  	}
  4345  	cmdOutput := "1 flow entries have been deleted"
  4346  	cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
  4347  
  4348  	// Delete ClusterIP entries
  4349  	fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
  4350  	fexec.CommandScript = append(fexec.CommandScript, execFunc)
  4351  	// Delete ExternalIP entries
  4352  	fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
  4353  	fexec.CommandScript = append(fexec.CommandScript, execFunc)
  4354  	// Delete LoadBalancerIP entries
  4355  	fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
  4356  	fexec.CommandScript = append(fexec.CommandScript, execFunc)
  4357  	// Delete NodePort entries
  4358  	fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
  4359  	fexec.CommandScript = append(fexec.CommandScript, execFunc)
  4360  
  4361  	ipt := iptablestest.NewFake()
  4362  	fp := NewFakeProxier(ipt)
  4363  	fp.exec = fexec
  4364  
  4365  	svcIP := "172.30.0.41"
  4366  	extIP := "192.168.99.11"
  4367  	lbIngressIP := "1.2.3.4"
  4368  	svcPort := 80
  4369  	nodePort := 31201
  4370  	svcPortName := proxy.ServicePortName{
  4371  		NamespacedName: makeNSN("ns1", "svc1"),
  4372  		Port:           "p80",
  4373  		Protocol:       v1.ProtocolUDP,
  4374  	}
  4375  
  4376  	makeServiceMap(fp,
  4377  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4378  			svc.Spec.ClusterIP = svcIP
  4379  			svc.Spec.ExternalIPs = []string{extIP}
  4380  			svc.Spec.Type = "LoadBalancer"
  4381  			svc.Spec.Ports = []v1.ServicePort{{
  4382  				Name:     svcPortName.Port,
  4383  				Port:     int32(svcPort),
  4384  				Protocol: v1.ProtocolUDP,
  4385  				NodePort: int32(nodePort),
  4386  			}}
  4387  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  4388  				IP: lbIngressIP,
  4389  			}}
  4390  		}),
  4391  	)
  4392  
  4393  	fp.syncProxyRules()
  4394  	if fexec.CommandCalls != 0 {
  4395  		t.Fatalf("Created service without endpoints must not clear conntrack entries")
  4396  	}
  4397  
  4398  	epIP := "10.180.0.1"
  4399  	populateEndpointSlices(fp,
  4400  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  4401  			eps.AddressType = discovery.AddressTypeIPv4
  4402  			eps.Endpoints = []discovery.Endpoint{{
  4403  				Addresses: []string{epIP},
  4404  				Conditions: discovery.EndpointConditions{
  4405  					Serving: ptr.To(false),
  4406  				},
  4407  			}}
  4408  			eps.Ports = []discovery.EndpointPort{{
  4409  				Name:     ptr.To(svcPortName.Port),
  4410  				Port:     ptr.To(int32(svcPort)),
  4411  				Protocol: ptr.To(v1.ProtocolUDP),
  4412  			}}
  4413  		}),
  4414  	)
  4415  
  4416  	fp.syncProxyRules()
  4417  
  4418  	if fexec.CommandCalls != 0 {
  4419  		t.Fatalf("Updated UDP service with not ready endpoints must not clear UDP entries")
  4420  	}
  4421  
  4422  	populateEndpointSlices(fp,
  4423  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  4424  			eps.AddressType = discovery.AddressTypeIPv4
  4425  			eps.Endpoints = []discovery.Endpoint{{
  4426  				Addresses: []string{epIP},
  4427  				Conditions: discovery.EndpointConditions{
  4428  					Serving: ptr.To(true),
  4429  				},
  4430  			}}
  4431  			eps.Ports = []discovery.EndpointPort{{
  4432  				Name:     ptr.To(svcPortName.Port),
  4433  				Port:     ptr.To(int32(svcPort)),
  4434  				Protocol: ptr.To(v1.ProtocolUDP),
  4435  			}}
  4436  		}),
  4437  	)
  4438  
  4439  	fp.syncProxyRules()
  4440  
  4441  	if fexec.CommandCalls != 4 {
  4442  		t.Fatalf("Updated UDP service with new endpoints must clear UDP entries 4 times: ClusterIP, NodePort, ExternalIP and LB")
  4443  	}
  4444  
  4445  	// the order is not guaranteed so we have to compare the strings in any order
  4446  	expectedCommands := []string{
  4447  		// Delete ClusterIP Conntrack entries
  4448  		fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP)))),
  4449  		// Delete ExternalIP Conntrack entries
  4450  		fmt.Sprintf("conntrack -D --orig-dst %s -p %s", extIP, strings.ToLower(string((v1.ProtocolUDP)))),
  4451  		// Delete LoadBalancerIP Conntrack entries
  4452  		fmt.Sprintf("conntrack -D --orig-dst %s -p %s", lbIngressIP, strings.ToLower(string((v1.ProtocolUDP)))),
  4453  		// Delete NodePort Conntrack entrie
  4454  		fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort),
  4455  	}
  4456  	actualCommands := []string{
  4457  		strings.Join(fcmd.CombinedOutputLog[0], " "),
  4458  		strings.Join(fcmd.CombinedOutputLog[1], " "),
  4459  		strings.Join(fcmd.CombinedOutputLog[2], " "),
  4460  		strings.Join(fcmd.CombinedOutputLog[3], " "),
  4461  	}
  4462  	sort.Strings(expectedCommands)
  4463  	sort.Strings(actualCommands)
  4464  
  4465  	if !reflect.DeepEqual(expectedCommands, actualCommands) {
  4466  		t.Errorf("Expected commands: %v, but executed %v", expectedCommands, actualCommands)
  4467  	}
  4468  }
  4469  
  4470  func TestProxierMetricsIptablesTotalRules(t *testing.T) {
  4471  	ipt := iptablestest.NewFake()
  4472  	fp := NewFakeProxier(ipt)
  4473  
  4474  	metrics.RegisterMetrics()
  4475  
  4476  	svcIP := "172.30.0.41"
  4477  	svcPort := 80
  4478  	nodePort := 31201
  4479  	svcPortName := proxy.ServicePortName{
  4480  		NamespacedName: makeNSN("ns1", "svc1"),
  4481  		Port:           "p80",
  4482  		Protocol:       v1.ProtocolTCP,
  4483  	}
  4484  
  4485  	makeServiceMap(fp,
  4486  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4487  			svc.Spec.ClusterIP = svcIP
  4488  			svc.Spec.Ports = []v1.ServicePort{{
  4489  				Name:     svcPortName.Port,
  4490  				Port:     int32(svcPort),
  4491  				Protocol: v1.ProtocolTCP,
  4492  				NodePort: int32(nodePort),
  4493  			}}
  4494  		}),
  4495  	)
  4496  	fp.syncProxyRules()
  4497  	iptablesData := fp.iptablesData.String()
  4498  
  4499  	nFilterRules := countRulesFromMetric(utiliptables.TableFilter)
  4500  	expectedFilterRules := countRules(utiliptables.TableFilter, iptablesData)
  4501  
  4502  	if nFilterRules != expectedFilterRules {
  4503  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4504  	}
  4505  
  4506  	nNatRules := countRulesFromMetric(utiliptables.TableNAT)
  4507  	expectedNatRules := countRules(utiliptables.TableNAT, iptablesData)
  4508  
  4509  	if nNatRules != expectedNatRules {
  4510  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4511  	}
  4512  
  4513  	populateEndpointSlices(fp,
  4514  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  4515  			eps.AddressType = discovery.AddressTypeIPv4
  4516  			eps.Endpoints = []discovery.Endpoint{{
  4517  				Addresses: []string{"10.0.0.2"},
  4518  			}, {
  4519  				Addresses: []string{"10.0.0.5"},
  4520  			}}
  4521  			eps.Ports = []discovery.EndpointPort{{
  4522  				Name:     ptr.To(svcPortName.Port),
  4523  				Port:     ptr.To(int32(svcPort)),
  4524  				Protocol: ptr.To(v1.ProtocolTCP),
  4525  			}}
  4526  		}),
  4527  	)
  4528  
  4529  	fp.syncProxyRules()
  4530  	iptablesData = fp.iptablesData.String()
  4531  
  4532  	nFilterRules = countRulesFromMetric(utiliptables.TableFilter)
  4533  	expectedFilterRules = countRules(utiliptables.TableFilter, iptablesData)
  4534  
  4535  	if nFilterRules != expectedFilterRules {
  4536  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4537  	}
  4538  
  4539  	nNatRules = countRulesFromMetric(utiliptables.TableNAT)
  4540  	expectedNatRules = countRules(utiliptables.TableNAT, iptablesData)
  4541  
  4542  	if nNatRules != expectedNatRules {
  4543  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4544  	}
  4545  }
  4546  
  4547  // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
  4548  
  4549  // This test ensures that the iptables proxier supports translating Endpoints to
  4550  // iptables output when internalTrafficPolicy is specified
  4551  func TestInternalTrafficPolicy(t *testing.T) {
  4552  	type endpoint struct {
  4553  		ip       string
  4554  		hostname string
  4555  	}
  4556  
  4557  	testCases := []struct {
  4558  		name                  string
  4559  		line                  int
  4560  		internalTrafficPolicy *v1.ServiceInternalTrafficPolicy
  4561  		endpoints             []endpoint
  4562  		flowTests             []packetFlowTest
  4563  	}{
  4564  		{
  4565  			name:                  "internalTrafficPolicy is cluster",
  4566  			line:                  getLine(),
  4567  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster),
  4568  			endpoints: []endpoint{
  4569  				{"10.0.1.1", testHostname},
  4570  				{"10.0.1.2", "host1"},
  4571  				{"10.0.1.3", "host2"},
  4572  			},
  4573  			flowTests: []packetFlowTest{
  4574  				{
  4575  					name:     "pod to ClusterIP hits all endpoints",
  4576  					sourceIP: "10.0.0.2",
  4577  					destIP:   "172.30.1.1",
  4578  					destPort: 80,
  4579  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
  4580  					masq:     false,
  4581  				},
  4582  			},
  4583  		},
  4584  		{
  4585  			name:                  "internalTrafficPolicy is local and there is one local endpoint",
  4586  			line:                  getLine(),
  4587  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4588  			endpoints: []endpoint{
  4589  				{"10.0.1.1", testHostname},
  4590  				{"10.0.1.2", "host1"},
  4591  				{"10.0.1.3", "host2"},
  4592  			},
  4593  			flowTests: []packetFlowTest{
  4594  				{
  4595  					name:     "pod to ClusterIP hits only local endpoint",
  4596  					sourceIP: "10.0.0.2",
  4597  					destIP:   "172.30.1.1",
  4598  					destPort: 80,
  4599  					output:   "10.0.1.1:80",
  4600  					masq:     false,
  4601  				},
  4602  			},
  4603  		},
  4604  		{
  4605  			name:                  "internalTrafficPolicy is local and there are multiple local endpoints",
  4606  			line:                  getLine(),
  4607  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4608  			endpoints: []endpoint{
  4609  				{"10.0.1.1", testHostname},
  4610  				{"10.0.1.2", testHostname},
  4611  				{"10.0.1.3", "host2"},
  4612  			},
  4613  			flowTests: []packetFlowTest{
  4614  				{
  4615  					name:     "pod to ClusterIP hits all local endpoints",
  4616  					sourceIP: "10.0.0.2",
  4617  					destIP:   "172.30.1.1",
  4618  					destPort: 80,
  4619  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4620  					masq:     false,
  4621  				},
  4622  			},
  4623  		},
  4624  		{
  4625  			name:                  "internalTrafficPolicy is local and there are no local endpoints",
  4626  			line:                  getLine(),
  4627  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4628  			endpoints: []endpoint{
  4629  				{"10.0.1.1", "host0"},
  4630  				{"10.0.1.2", "host1"},
  4631  				{"10.0.1.3", "host2"},
  4632  			},
  4633  			flowTests: []packetFlowTest{
  4634  				{
  4635  					name:     "no endpoints",
  4636  					sourceIP: "10.0.0.2",
  4637  					destIP:   "172.30.1.1",
  4638  					destPort: 80,
  4639  					output:   "DROP",
  4640  				},
  4641  			},
  4642  		},
  4643  	}
  4644  
  4645  	for _, tc := range testCases {
  4646  		t.Run(tc.name, func(t *testing.T) {
  4647  			ipt := iptablestest.NewFake()
  4648  			fp := NewFakeProxier(ipt)
  4649  			fp.OnServiceSynced()
  4650  			fp.OnEndpointSlicesSynced()
  4651  
  4652  			serviceName := "svc1"
  4653  			namespaceName := "ns1"
  4654  
  4655  			svc := &v1.Service{
  4656  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4657  				Spec: v1.ServiceSpec{
  4658  					ClusterIP: "172.30.1.1",
  4659  					Selector:  map[string]string{"foo": "bar"},
  4660  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
  4661  				},
  4662  			}
  4663  			if tc.internalTrafficPolicy != nil {
  4664  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  4665  			}
  4666  
  4667  			fp.OnServiceAdd(svc)
  4668  
  4669  			endpointSlice := &discovery.EndpointSlice{
  4670  				ObjectMeta: metav1.ObjectMeta{
  4671  					Name:      fmt.Sprintf("%s-1", serviceName),
  4672  					Namespace: namespaceName,
  4673  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4674  				},
  4675  				Ports: []discovery.EndpointPort{{
  4676  					Name:     ptr.To(""),
  4677  					Port:     ptr.To[int32](80),
  4678  					Protocol: ptr.To(v1.ProtocolTCP),
  4679  				}},
  4680  				AddressType: discovery.AddressTypeIPv4,
  4681  			}
  4682  			for _, ep := range tc.endpoints {
  4683  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  4684  					Addresses:  []string{ep.ip},
  4685  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4686  					NodeName:   ptr.To(ep.hostname),
  4687  				})
  4688  			}
  4689  
  4690  			fp.OnEndpointSliceAdd(endpointSlice)
  4691  			fp.syncProxyRules()
  4692  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tc.flowTests)
  4693  
  4694  			fp.OnEndpointSliceDelete(endpointSlice)
  4695  			fp.syncProxyRules()
  4696  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, []packetFlowTest{
  4697  				{
  4698  					name:     "endpoints deleted",
  4699  					sourceIP: "10.0.0.2",
  4700  					destIP:   "172.30.1.1",
  4701  					destPort: 80,
  4702  					output:   "REJECT",
  4703  				},
  4704  			})
  4705  		})
  4706  	}
  4707  }
  4708  
  4709  // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
  4710  // ready + terminating endpoints, only the ready endpoints are used.
  4711  func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
  4712  	service := &v1.Service{
  4713  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  4714  		Spec: v1.ServiceSpec{
  4715  			ClusterIP:             "172.30.1.1",
  4716  			Type:                  v1.ServiceTypeLoadBalancer,
  4717  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4718  			Ports: []v1.ServicePort{
  4719  				{
  4720  					Name:       "",
  4721  					TargetPort: intstr.FromInt32(80),
  4722  					Port:       80,
  4723  					Protocol:   v1.ProtocolTCP,
  4724  				},
  4725  			},
  4726  			HealthCheckNodePort: 30000,
  4727  		},
  4728  		Status: v1.ServiceStatus{
  4729  			LoadBalancer: v1.LoadBalancerStatus{
  4730  				Ingress: []v1.LoadBalancerIngress{
  4731  					{IP: "1.2.3.4"},
  4732  				},
  4733  			},
  4734  		},
  4735  	}
  4736  
  4737  	testcases := []struct {
  4738  		name          string
  4739  		line          int
  4740  		endpointslice *discovery.EndpointSlice
  4741  		flowTests     []packetFlowTest
  4742  	}{
  4743  		{
  4744  			name: "ready endpoints exist",
  4745  			line: getLine(),
  4746  			endpointslice: &discovery.EndpointSlice{
  4747  				ObjectMeta: metav1.ObjectMeta{
  4748  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4749  					Namespace: "ns1",
  4750  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4751  				},
  4752  				Ports: []discovery.EndpointPort{{
  4753  					Name:     ptr.To(""),
  4754  					Port:     ptr.To[int32](80),
  4755  					Protocol: ptr.To(v1.ProtocolTCP),
  4756  				}},
  4757  				AddressType: discovery.AddressTypeIPv4,
  4758  				Endpoints: []discovery.Endpoint{
  4759  					{
  4760  						Addresses: []string{"10.0.1.1"},
  4761  						Conditions: discovery.EndpointConditions{
  4762  							Ready:       ptr.To(true),
  4763  							Serving:     ptr.To(true),
  4764  							Terminating: ptr.To(false),
  4765  						},
  4766  						NodeName: ptr.To(testHostname),
  4767  					},
  4768  					{
  4769  						Addresses: []string{"10.0.1.2"},
  4770  						Conditions: discovery.EndpointConditions{
  4771  							Ready:       ptr.To(true),
  4772  							Serving:     ptr.To(true),
  4773  							Terminating: ptr.To(false),
  4774  						},
  4775  						NodeName: ptr.To(testHostname),
  4776  					},
  4777  					{
  4778  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4779  						Addresses: []string{"10.0.1.3"},
  4780  						Conditions: discovery.EndpointConditions{
  4781  							Ready:       ptr.To(false),
  4782  							Serving:     ptr.To(true),
  4783  							Terminating: ptr.To(true),
  4784  						},
  4785  						NodeName: ptr.To(testHostname),
  4786  					},
  4787  					{
  4788  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4789  						Addresses: []string{"10.0.1.4"},
  4790  						Conditions: discovery.EndpointConditions{
  4791  							Ready:       ptr.To(false),
  4792  							Serving:     ptr.To(false),
  4793  							Terminating: ptr.To(true),
  4794  						},
  4795  						NodeName: ptr.To(testHostname),
  4796  					},
  4797  					{
  4798  						// this endpoint should be ignored for external since it's not local
  4799  						Addresses: []string{"10.0.1.5"},
  4800  						Conditions: discovery.EndpointConditions{
  4801  							Ready:       ptr.To(true),
  4802  							Serving:     ptr.To(true),
  4803  							Terminating: ptr.To(false),
  4804  						},
  4805  						NodeName: ptr.To("host-1"),
  4806  					},
  4807  				},
  4808  			},
  4809  			flowTests: []packetFlowTest{
  4810  				{
  4811  					name:     "pod to clusterIP",
  4812  					sourceIP: "10.0.0.2",
  4813  					destIP:   "172.30.1.1",
  4814  					destPort: 80,
  4815  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4816  					masq:     false,
  4817  				},
  4818  				{
  4819  					name:     "external to LB",
  4820  					sourceIP: testExternalClient,
  4821  					destIP:   "1.2.3.4",
  4822  					destPort: 80,
  4823  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4824  					masq:     false,
  4825  				},
  4826  			},
  4827  		},
  4828  		{
  4829  			name: "only terminating endpoints exist",
  4830  			line: getLine(),
  4831  			endpointslice: &discovery.EndpointSlice{
  4832  				ObjectMeta: metav1.ObjectMeta{
  4833  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4834  					Namespace: "ns1",
  4835  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4836  				},
  4837  				Ports: []discovery.EndpointPort{{
  4838  					Name:     ptr.To(""),
  4839  					Port:     ptr.To[int32](80),
  4840  					Protocol: ptr.To(v1.ProtocolTCP),
  4841  				}},
  4842  				AddressType: discovery.AddressTypeIPv4,
  4843  				Endpoints: []discovery.Endpoint{
  4844  					{
  4845  						// this endpoint should be used since there are only ready terminating endpoints
  4846  						Addresses: []string{"10.0.1.2"},
  4847  						Conditions: discovery.EndpointConditions{
  4848  							Ready:       ptr.To(false),
  4849  							Serving:     ptr.To(true),
  4850  							Terminating: ptr.To(true),
  4851  						},
  4852  						NodeName: ptr.To(testHostname),
  4853  					},
  4854  					{
  4855  						// this endpoint should be used since there are only ready terminating endpoints
  4856  						Addresses: []string{"10.0.1.3"},
  4857  						Conditions: discovery.EndpointConditions{
  4858  							Ready:       ptr.To(false),
  4859  							Serving:     ptr.To(true),
  4860  							Terminating: ptr.To(true),
  4861  						},
  4862  						NodeName: ptr.To(testHostname),
  4863  					},
  4864  					{
  4865  						// this endpoint should not be used since it is both terminating and not ready.
  4866  						Addresses: []string{"10.0.1.4"},
  4867  						Conditions: discovery.EndpointConditions{
  4868  							Ready:       ptr.To(false),
  4869  							Serving:     ptr.To(false),
  4870  							Terminating: ptr.To(true),
  4871  						},
  4872  						NodeName: ptr.To(testHostname),
  4873  					},
  4874  					{
  4875  						// this endpoint should be ignored for external since it's not local
  4876  						Addresses: []string{"10.0.1.5"},
  4877  						Conditions: discovery.EndpointConditions{
  4878  							Ready:       ptr.To(true),
  4879  							Serving:     ptr.To(true),
  4880  							Terminating: ptr.To(false),
  4881  						},
  4882  						NodeName: ptr.To("host-1"),
  4883  					},
  4884  				},
  4885  			},
  4886  			flowTests: []packetFlowTest{
  4887  				{
  4888  					name:     "pod to clusterIP",
  4889  					sourceIP: "10.0.0.2",
  4890  					destIP:   "172.30.1.1",
  4891  					destPort: 80,
  4892  					output:   "10.0.1.5:80",
  4893  					masq:     false,
  4894  				},
  4895  				{
  4896  					name:     "external to LB",
  4897  					sourceIP: testExternalClient,
  4898  					destIP:   "1.2.3.4",
  4899  					destPort: 80,
  4900  					output:   "10.0.1.2:80, 10.0.1.3:80",
  4901  					masq:     false,
  4902  				},
  4903  			},
  4904  		},
  4905  		{
  4906  			name: "terminating endpoints on remote node",
  4907  			line: getLine(),
  4908  			endpointslice: &discovery.EndpointSlice{
  4909  				ObjectMeta: metav1.ObjectMeta{
  4910  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4911  					Namespace: "ns1",
  4912  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4913  				},
  4914  				Ports: []discovery.EndpointPort{{
  4915  					Name:     ptr.To(""),
  4916  					Port:     ptr.To[int32](80),
  4917  					Protocol: ptr.To(v1.ProtocolTCP),
  4918  				}},
  4919  				AddressType: discovery.AddressTypeIPv4,
  4920  				Endpoints: []discovery.Endpoint{
  4921  					{
  4922  						// this endpoint won't be used because it's not local,
  4923  						// but it will prevent a REJECT rule from being created
  4924  						Addresses: []string{"10.0.1.5"},
  4925  						Conditions: discovery.EndpointConditions{
  4926  							Ready:       ptr.To(false),
  4927  							Serving:     ptr.To(true),
  4928  							Terminating: ptr.To(true),
  4929  						},
  4930  						NodeName: ptr.To("host-1"),
  4931  					},
  4932  				},
  4933  			},
  4934  			flowTests: []packetFlowTest{
  4935  				{
  4936  					name:     "pod to clusterIP",
  4937  					sourceIP: "10.0.0.2",
  4938  					destIP:   "172.30.1.1",
  4939  					destPort: 80,
  4940  					output:   "10.0.1.5:80",
  4941  				},
  4942  				{
  4943  					name:     "external to LB, no locally-usable endpoints",
  4944  					sourceIP: testExternalClient,
  4945  					destIP:   "1.2.3.4",
  4946  					destPort: 80,
  4947  					output:   "DROP",
  4948  				},
  4949  			},
  4950  		},
  4951  		{
  4952  			name: "no usable endpoints on any node",
  4953  			line: getLine(),
  4954  			endpointslice: &discovery.EndpointSlice{
  4955  				ObjectMeta: metav1.ObjectMeta{
  4956  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4957  					Namespace: "ns1",
  4958  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4959  				},
  4960  				Ports: []discovery.EndpointPort{{
  4961  					Name:     ptr.To(""),
  4962  					Port:     ptr.To[int32](80),
  4963  					Protocol: ptr.To(v1.ProtocolTCP),
  4964  				}},
  4965  				AddressType: discovery.AddressTypeIPv4,
  4966  				Endpoints: []discovery.Endpoint{
  4967  					{
  4968  						// Local but not ready or serving
  4969  						Addresses: []string{"10.0.1.5"},
  4970  						Conditions: discovery.EndpointConditions{
  4971  							Ready:       ptr.To(false),
  4972  							Serving:     ptr.To(false),
  4973  							Terminating: ptr.To(true),
  4974  						},
  4975  						NodeName: ptr.To(testHostname),
  4976  					},
  4977  					{
  4978  						// Remote and not ready or serving
  4979  						Addresses: []string{"10.0.1.5"},
  4980  						Conditions: discovery.EndpointConditions{
  4981  							Ready:       ptr.To(false),
  4982  							Serving:     ptr.To(false),
  4983  							Terminating: ptr.To(true),
  4984  						},
  4985  						NodeName: ptr.To("host-1"),
  4986  					},
  4987  				},
  4988  			},
  4989  			flowTests: []packetFlowTest{
  4990  				{
  4991  					name:     "pod to clusterIP, no usable endpoints",
  4992  					sourceIP: "10.0.0.2",
  4993  					destIP:   "172.30.1.1",
  4994  					destPort: 80,
  4995  					output:   "REJECT",
  4996  				},
  4997  				{
  4998  					name:     "external to LB, no usable endpoints",
  4999  					sourceIP: testExternalClient,
  5000  					destIP:   "1.2.3.4",
  5001  					destPort: 80,
  5002  					output:   "REJECT",
  5003  				},
  5004  			},
  5005  		},
  5006  	}
  5007  
  5008  	for _, testcase := range testcases {
  5009  		t.Run(testcase.name, func(t *testing.T) {
  5010  			ipt := iptablestest.NewFake()
  5011  			fp := NewFakeProxier(ipt)
  5012  			fp.OnServiceSynced()
  5013  			fp.OnEndpointSlicesSynced()
  5014  
  5015  			fp.OnServiceAdd(service)
  5016  
  5017  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  5018  			fp.syncProxyRules()
  5019  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  5020  
  5021  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  5022  			fp.syncProxyRules()
  5023  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  5024  				{
  5025  					name:     "pod to clusterIP after endpoints deleted",
  5026  					sourceIP: "10.0.0.2",
  5027  					destIP:   "172.30.1.1",
  5028  					destPort: 80,
  5029  					output:   "REJECT",
  5030  				},
  5031  				{
  5032  					name:     "external to LB after endpoints deleted",
  5033  					sourceIP: testExternalClient,
  5034  					destIP:   "1.2.3.4",
  5035  					destPort: 80,
  5036  					output:   "REJECT",
  5037  				},
  5038  			})
  5039  		})
  5040  	}
  5041  }
  5042  
  5043  // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
  5044  // ready and ready + terminating endpoints, only the ready endpoints are used.
  5045  func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
  5046  	service := &v1.Service{
  5047  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  5048  		Spec: v1.ServiceSpec{
  5049  			ClusterIP:             "172.30.1.1",
  5050  			Type:                  v1.ServiceTypeLoadBalancer,
  5051  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster,
  5052  			Ports: []v1.ServicePort{
  5053  				{
  5054  					Name:       "",
  5055  					TargetPort: intstr.FromInt32(80),
  5056  					Port:       80,
  5057  					Protocol:   v1.ProtocolTCP,
  5058  				},
  5059  			},
  5060  			HealthCheckNodePort: 30000,
  5061  		},
  5062  		Status: v1.ServiceStatus{
  5063  			LoadBalancer: v1.LoadBalancerStatus{
  5064  				Ingress: []v1.LoadBalancerIngress{
  5065  					{IP: "1.2.3.4"},
  5066  				},
  5067  			},
  5068  		},
  5069  	}
  5070  
  5071  	testcases := []struct {
  5072  		name          string
  5073  		line          int
  5074  		endpointslice *discovery.EndpointSlice
  5075  		flowTests     []packetFlowTest
  5076  	}{
  5077  		{
  5078  			name: "ready endpoints exist",
  5079  			line: getLine(),
  5080  			endpointslice: &discovery.EndpointSlice{
  5081  				ObjectMeta: metav1.ObjectMeta{
  5082  					Name:      fmt.Sprintf("%s-1", "svc1"),
  5083  					Namespace: "ns1",
  5084  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  5085  				},
  5086  				Ports: []discovery.EndpointPort{{
  5087  					Name:     ptr.To(""),
  5088  					Port:     ptr.To[int32](80),
  5089  					Protocol: ptr.To(v1.ProtocolTCP),
  5090  				}},
  5091  				AddressType: discovery.AddressTypeIPv4,
  5092  				Endpoints: []discovery.Endpoint{
  5093  					{
  5094  						Addresses: []string{"10.0.1.1"},
  5095  						Conditions: discovery.EndpointConditions{
  5096  							Ready:       ptr.To(true),
  5097  							Serving:     ptr.To(true),
  5098  							Terminating: ptr.To(false),
  5099  						},
  5100  						NodeName: ptr.To(testHostname),
  5101  					},
  5102  					{
  5103  						Addresses: []string{"10.0.1.2"},
  5104  						Conditions: discovery.EndpointConditions{
  5105  							Ready:       ptr.To(true),
  5106  							Serving:     ptr.To(true),
  5107  							Terminating: ptr.To(false),
  5108  						},
  5109  						NodeName: ptr.To(testHostname),
  5110  					},
  5111  					{
  5112  						// this endpoint should be ignored since there are ready non-terminating endpoints
  5113  						Addresses: []string{"10.0.1.3"},
  5114  						Conditions: discovery.EndpointConditions{
  5115  							Ready:       ptr.To(false),
  5116  							Serving:     ptr.To(true),
  5117  							Terminating: ptr.To(true),
  5118  						},
  5119  						NodeName: ptr.To("another-host"),
  5120  					},
  5121  					{
  5122  						// this endpoint should be ignored since it is not "serving"
  5123  						Addresses: []string{"10.0.1.4"},
  5124  						Conditions: discovery.EndpointConditions{
  5125  							Ready:       ptr.To(false),
  5126  							Serving:     ptr.To(false),
  5127  							Terminating: ptr.To(true),
  5128  						},
  5129  						NodeName: ptr.To("another-host"),
  5130  					},
  5131  					{
  5132  						Addresses: []string{"10.0.1.5"},
  5133  						Conditions: discovery.EndpointConditions{
  5134  							Ready:       ptr.To(true),
  5135  							Serving:     ptr.To(true),
  5136  							Terminating: ptr.To(false),
  5137  						},
  5138  						NodeName: ptr.To("another-host"),
  5139  					},
  5140  				},
  5141  			},
  5142  			flowTests: []packetFlowTest{
  5143  				{
  5144  					name:     "pod to clusterIP",
  5145  					sourceIP: "10.0.0.2",
  5146  					destIP:   "172.30.1.1",
  5147  					destPort: 80,
  5148  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  5149  					masq:     false,
  5150  				},
  5151  				{
  5152  					name:     "external to LB",
  5153  					sourceIP: testExternalClient,
  5154  					destIP:   "1.2.3.4",
  5155  					destPort: 80,
  5156  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  5157  					masq:     true,
  5158  				},
  5159  			},
  5160  		},
  5161  		{
  5162  			name: "only terminating endpoints exist",
  5163  			line: getLine(),
  5164  			endpointslice: &discovery.EndpointSlice{
  5165  				ObjectMeta: metav1.ObjectMeta{
  5166  					Name:      fmt.Sprintf("%s-1", "svc1"),
  5167  					Namespace: "ns1",
  5168  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  5169  				},
  5170  				Ports: []discovery.EndpointPort{{
  5171  					Name:     ptr.To(""),
  5172  					Port:     ptr.To[int32](80),
  5173  					Protocol: ptr.To(v1.ProtocolTCP),
  5174  				}},
  5175  				AddressType: discovery.AddressTypeIPv4,
  5176  				Endpoints: []discovery.Endpoint{
  5177  					{
  5178  						// this endpoint should be used since there are only ready terminating endpoints
  5179  						Addresses: []string{"10.0.1.2"},
  5180  						Conditions: discovery.EndpointConditions{
  5181  							Ready:       ptr.To(false),
  5182  							Serving:     ptr.To(true),
  5183  							Terminating: ptr.To(true),
  5184  						},
  5185  						NodeName: ptr.To(testHostname),
  5186  					},
  5187  					{
  5188  						// this endpoint should be used since there are only ready terminating endpoints
  5189  						Addresses: []string{"10.0.1.3"},
  5190  						Conditions: discovery.EndpointConditions{
  5191  							Ready:       ptr.To(false),
  5192  							Serving:     ptr.To(true),
  5193  							Terminating: ptr.To(true),
  5194  						},
  5195  						NodeName: ptr.To(testHostname),
  5196  					},
  5197  					{
  5198  						// this endpoint should not be used since it is both terminating and not ready.
  5199  						Addresses: []string{"10.0.1.4"},
  5200  						Conditions: discovery.EndpointConditions{
  5201  							Ready:       ptr.To(false),
  5202  							Serving:     ptr.To(false),
  5203  							Terminating: ptr.To(true),
  5204  						},
  5205  						NodeName: ptr.To("another-host"),
  5206  					},
  5207  					{
  5208  						// this endpoint should be used since there are only ready terminating endpoints
  5209  						Addresses: []string{"10.0.1.5"},
  5210  						Conditions: discovery.EndpointConditions{
  5211  							Ready:       ptr.To(false),
  5212  							Serving:     ptr.To(true),
  5213  							Terminating: ptr.To(true),
  5214  						},
  5215  						NodeName: ptr.To("another-host"),
  5216  					},
  5217  				},
  5218  			},
  5219  			flowTests: []packetFlowTest{
  5220  				{
  5221  					name:     "pod to clusterIP",
  5222  					sourceIP: "10.0.0.2",
  5223  					destIP:   "172.30.1.1",
  5224  					destPort: 80,
  5225  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  5226  					masq:     false,
  5227  				},
  5228  				{
  5229  					name:     "external to LB",
  5230  					sourceIP: testExternalClient,
  5231  					destIP:   "1.2.3.4",
  5232  					destPort: 80,
  5233  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  5234  					masq:     true,
  5235  				},
  5236  			},
  5237  		},
  5238  		{
  5239  			name: "terminating endpoints on remote node",
  5240  			line: getLine(),
  5241  			endpointslice: &discovery.EndpointSlice{
  5242  				ObjectMeta: metav1.ObjectMeta{
  5243  					Name:      fmt.Sprintf("%s-1", "svc1"),
  5244  					Namespace: "ns1",
  5245  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  5246  				},
  5247  				Ports: []discovery.EndpointPort{{
  5248  					Name:     ptr.To(""),
  5249  					Port:     ptr.To[int32](80),
  5250  					Protocol: ptr.To(v1.ProtocolTCP),
  5251  				}},
  5252  				AddressType: discovery.AddressTypeIPv4,
  5253  				Endpoints: []discovery.Endpoint{
  5254  					{
  5255  						Addresses: []string{"10.0.1.5"},
  5256  						Conditions: discovery.EndpointConditions{
  5257  							Ready:       ptr.To(false),
  5258  							Serving:     ptr.To(true),
  5259  							Terminating: ptr.To(true),
  5260  						},
  5261  						NodeName: ptr.To("host-1"),
  5262  					},
  5263  				},
  5264  			},
  5265  			flowTests: []packetFlowTest{
  5266  				{
  5267  					name:     "pod to clusterIP",
  5268  					sourceIP: "10.0.0.2",
  5269  					destIP:   "172.30.1.1",
  5270  					destPort: 80,
  5271  					output:   "10.0.1.5:80",
  5272  					masq:     false,
  5273  				},
  5274  				{
  5275  					name:     "external to LB",
  5276  					sourceIP: testExternalClient,
  5277  					destIP:   "1.2.3.4",
  5278  					destPort: 80,
  5279  					output:   "10.0.1.5:80",
  5280  					masq:     true,
  5281  				},
  5282  			},
  5283  		},
  5284  		{
  5285  			name: "no usable endpoints on any node",
  5286  			line: getLine(),
  5287  			endpointslice: &discovery.EndpointSlice{
  5288  				ObjectMeta: metav1.ObjectMeta{
  5289  					Name:      fmt.Sprintf("%s-1", "svc1"),
  5290  					Namespace: "ns1",
  5291  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  5292  				},
  5293  				Ports: []discovery.EndpointPort{{
  5294  					Name:     ptr.To(""),
  5295  					Port:     ptr.To[int32](80),
  5296  					Protocol: ptr.To(v1.ProtocolTCP),
  5297  				}},
  5298  				AddressType: discovery.AddressTypeIPv4,
  5299  				Endpoints: []discovery.Endpoint{
  5300  					{
  5301  						// Local, not ready or serving
  5302  						Addresses: []string{"10.0.1.5"},
  5303  						Conditions: discovery.EndpointConditions{
  5304  							Ready:       ptr.To(false),
  5305  							Serving:     ptr.To(false),
  5306  							Terminating: ptr.To(true),
  5307  						},
  5308  						NodeName: ptr.To(testHostname),
  5309  					},
  5310  					{
  5311  						// Remote, not ready or serving
  5312  						Addresses: []string{"10.0.1.5"},
  5313  						Conditions: discovery.EndpointConditions{
  5314  							Ready:       ptr.To(false),
  5315  							Serving:     ptr.To(false),
  5316  							Terminating: ptr.To(true),
  5317  						},
  5318  						NodeName: ptr.To("host-1"),
  5319  					},
  5320  				},
  5321  			},
  5322  			flowTests: []packetFlowTest{
  5323  				{
  5324  					name:     "pod to clusterIP",
  5325  					sourceIP: "10.0.0.2",
  5326  					destIP:   "172.30.1.1",
  5327  					destPort: 80,
  5328  					output:   "REJECT",
  5329  				},
  5330  				{
  5331  					name:     "external to LB",
  5332  					sourceIP: testExternalClient,
  5333  					destIP:   "1.2.3.4",
  5334  					destPort: 80,
  5335  					output:   "REJECT",
  5336  				},
  5337  			},
  5338  		},
  5339  	}
  5340  
  5341  	for _, testcase := range testcases {
  5342  		t.Run(testcase.name, func(t *testing.T) {
  5343  
  5344  			ipt := iptablestest.NewFake()
  5345  			fp := NewFakeProxier(ipt)
  5346  			fp.OnServiceSynced()
  5347  			fp.OnEndpointSlicesSynced()
  5348  
  5349  			fp.OnServiceAdd(service)
  5350  
  5351  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  5352  			fp.syncProxyRules()
  5353  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  5354  
  5355  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  5356  			fp.syncProxyRules()
  5357  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  5358  				{
  5359  					name:     "pod to clusterIP after endpoints deleted",
  5360  					sourceIP: "10.0.0.2",
  5361  					destIP:   "172.30.1.1",
  5362  					destPort: 80,
  5363  					output:   "REJECT",
  5364  				},
  5365  				{
  5366  					name:     "external to LB after endpoints deleted",
  5367  					sourceIP: testExternalClient,
  5368  					destIP:   "1.2.3.4",
  5369  					destPort: 80,
  5370  					output:   "REJECT",
  5371  				},
  5372  			})
  5373  		})
  5374  	}
  5375  }
  5376  
  5377  func TestInternalExternalMasquerade(t *testing.T) {
  5378  	// (Put the test setup code in an internal function so we can have it here at the
  5379  	// top, before the test cases that will be run against it.)
  5380  	setupTest := func(fp *Proxier) {
  5381  		makeServiceMap(fp,
  5382  			makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5383  				svc.Spec.Type = "LoadBalancer"
  5384  				svc.Spec.ClusterIP = "172.30.0.41"
  5385  				svc.Spec.Ports = []v1.ServicePort{{
  5386  					Name:     "p80",
  5387  					Port:     80,
  5388  					Protocol: v1.ProtocolTCP,
  5389  					NodePort: int32(3001),
  5390  				}}
  5391  				svc.Spec.HealthCheckNodePort = 30001
  5392  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5393  					IP: "1.2.3.4",
  5394  				}}
  5395  			}),
  5396  			makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5397  				svc.Spec.Type = "LoadBalancer"
  5398  				svc.Spec.ClusterIP = "172.30.0.42"
  5399  				svc.Spec.Ports = []v1.ServicePort{{
  5400  					Name:     "p80",
  5401  					Port:     80,
  5402  					Protocol: v1.ProtocolTCP,
  5403  					NodePort: int32(3002),
  5404  				}}
  5405  				svc.Spec.HealthCheckNodePort = 30002
  5406  				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  5407  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5408  					IP: "5.6.7.8",
  5409  				}}
  5410  			}),
  5411  			makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5412  				svc.Spec.Type = "LoadBalancer"
  5413  				svc.Spec.ClusterIP = "172.30.0.43"
  5414  				svc.Spec.Ports = []v1.ServicePort{{
  5415  					Name:     "p80",
  5416  					Port:     80,
  5417  					Protocol: v1.ProtocolTCP,
  5418  					NodePort: int32(3003),
  5419  				}}
  5420  				svc.Spec.HealthCheckNodePort = 30003
  5421  				svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal)
  5422  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5423  					IP: "9.10.11.12",
  5424  				}}
  5425  			}),
  5426  		)
  5427  
  5428  		populateEndpointSlices(fp,
  5429  			makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5430  				eps.AddressType = discovery.AddressTypeIPv4
  5431  				eps.Endpoints = []discovery.Endpoint{
  5432  					{
  5433  						Addresses: []string{"10.180.0.1"},
  5434  						NodeName:  ptr.To(testHostname),
  5435  					},
  5436  					{
  5437  						Addresses: []string{"10.180.1.1"},
  5438  						NodeName:  ptr.To("remote"),
  5439  					},
  5440  				}
  5441  				eps.Ports = []discovery.EndpointPort{{
  5442  					Name:     ptr.To("p80"),
  5443  					Port:     ptr.To[int32](80),
  5444  					Protocol: ptr.To(v1.ProtocolTCP),
  5445  				}}
  5446  			}),
  5447  			makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5448  				eps.AddressType = discovery.AddressTypeIPv4
  5449  				eps.Endpoints = []discovery.Endpoint{
  5450  					{
  5451  						Addresses: []string{"10.180.0.2"},
  5452  						NodeName:  ptr.To(testHostname),
  5453  					},
  5454  					{
  5455  						Addresses: []string{"10.180.1.2"},
  5456  						NodeName:  ptr.To("remote"),
  5457  					},
  5458  				}
  5459  				eps.Ports = []discovery.EndpointPort{{
  5460  					Name:     ptr.To("p80"),
  5461  					Port:     ptr.To[int32](80),
  5462  					Protocol: ptr.To(v1.ProtocolTCP),
  5463  				}}
  5464  			}),
  5465  			makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5466  				eps.AddressType = discovery.AddressTypeIPv4
  5467  				eps.Endpoints = []discovery.Endpoint{
  5468  					{
  5469  						Addresses: []string{"10.180.0.3"},
  5470  						NodeName:  ptr.To(testHostname),
  5471  					},
  5472  					{
  5473  						Addresses: []string{"10.180.1.3"},
  5474  						NodeName:  ptr.To("remote"),
  5475  					},
  5476  				}
  5477  				eps.Ports = []discovery.EndpointPort{{
  5478  					Name:     ptr.To("p80"),
  5479  					Port:     ptr.To[int32](80),
  5480  					Protocol: ptr.To(v1.ProtocolTCP),
  5481  				}}
  5482  			}),
  5483  		)
  5484  
  5485  		fp.syncProxyRules()
  5486  	}
  5487  
  5488  	// We use the same flowTests for all of the testCases. The "output" and "masq"
  5489  	// values here represent the normal case (working localDetector, no masqueradeAll)
  5490  	flowTests := []packetFlowTest{
  5491  		{
  5492  			name:     "pod to ClusterIP",
  5493  			sourceIP: "10.0.0.2",
  5494  			destIP:   "172.30.0.41",
  5495  			destPort: 80,
  5496  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5497  			masq:     false,
  5498  		},
  5499  		{
  5500  			name:     "pod to NodePort",
  5501  			sourceIP: "10.0.0.2",
  5502  			destIP:   testNodeIP,
  5503  			destPort: 3001,
  5504  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5505  			masq:     true,
  5506  		},
  5507  		{
  5508  			name:     "pod to LB",
  5509  			sourceIP: "10.0.0.2",
  5510  			destIP:   "1.2.3.4",
  5511  			destPort: 80,
  5512  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5513  			masq:     true,
  5514  		},
  5515  		{
  5516  			name:     "node to ClusterIP",
  5517  			sourceIP: testNodeIP,
  5518  			destIP:   "172.30.0.41",
  5519  			destPort: 80,
  5520  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5521  			masq:     true,
  5522  		},
  5523  		{
  5524  			name:     "node to NodePort",
  5525  			sourceIP: testNodeIP,
  5526  			destIP:   testNodeIP,
  5527  			destPort: 3001,
  5528  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5529  			masq:     true,
  5530  		},
  5531  		{
  5532  			name:     "localhost to NodePort",
  5533  			sourceIP: "127.0.0.1",
  5534  			destIP:   "127.0.0.1",
  5535  			destPort: 3001,
  5536  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5537  			masq:     true,
  5538  		},
  5539  		{
  5540  			name:     "node to LB",
  5541  			sourceIP: testNodeIP,
  5542  			destIP:   "1.2.3.4",
  5543  			destPort: 80,
  5544  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5545  			masq:     true,
  5546  		},
  5547  		{
  5548  			name:     "external to ClusterIP",
  5549  			sourceIP: testExternalClient,
  5550  			destIP:   "172.30.0.41",
  5551  			destPort: 80,
  5552  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5553  			masq:     true,
  5554  		},
  5555  		{
  5556  			name:     "external to NodePort",
  5557  			sourceIP: testExternalClient,
  5558  			destIP:   testNodeIP,
  5559  			destPort: 3001,
  5560  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5561  			masq:     true,
  5562  		},
  5563  		{
  5564  			name:     "external to LB",
  5565  			sourceIP: testExternalClient,
  5566  			destIP:   "1.2.3.4",
  5567  			destPort: 80,
  5568  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5569  			masq:     true,
  5570  		},
  5571  		{
  5572  			name:     "pod to ClusterIP with eTP:Local",
  5573  			sourceIP: "10.0.0.2",
  5574  			destIP:   "172.30.0.42",
  5575  			destPort: 80,
  5576  
  5577  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5578  			// as "Pod to ClusterIP"
  5579  			output: "10.180.0.2:80, 10.180.1.2:80",
  5580  			masq:   false,
  5581  		},
  5582  		{
  5583  			name:     "pod to NodePort with eTP:Local",
  5584  			sourceIP: "10.0.0.2",
  5585  			destIP:   testNodeIP,
  5586  			destPort: 3002,
  5587  
  5588  			// See the comment below in the "pod to LB with eTP:Local" case.
  5589  			// It doesn't actually make sense to short-circuit here, since if
  5590  			// you connect directly to a NodePort from outside the cluster,
  5591  			// you only get the local endpoints. But it's simpler for us and
  5592  			// slightly more convenient for users to have this case get
  5593  			// short-circuited too.
  5594  			output: "10.180.0.2:80, 10.180.1.2:80",
  5595  			masq:   false,
  5596  		},
  5597  		{
  5598  			name:     "pod to LB with eTP:Local",
  5599  			sourceIP: "10.0.0.2",
  5600  			destIP:   "5.6.7.8",
  5601  			destPort: 80,
  5602  
  5603  			// The short-circuit rule is supposed to make this behave the same
  5604  			// way it would if the packet actually went out to the LB and then
  5605  			// came back into the cluster. So it gets routed to all endpoints,
  5606  			// not just local ones. In reality, if the packet actually left
  5607  			// the cluster, it would have to get masqueraded, but since we can
  5608  			// avoid doing that in the short-circuit case, and not masquerading
  5609  			// is more useful, we avoid masquerading.
  5610  			output: "10.180.0.2:80, 10.180.1.2:80",
  5611  			masq:   false,
  5612  		},
  5613  		{
  5614  			name:     "node to ClusterIP with eTP:Local",
  5615  			sourceIP: testNodeIP,
  5616  			destIP:   "172.30.0.42",
  5617  			destPort: 80,
  5618  
  5619  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5620  			// as "node to ClusterIP"
  5621  			output: "10.180.0.2:80, 10.180.1.2:80",
  5622  			masq:   true,
  5623  		},
  5624  		{
  5625  			name:     "node to NodePort with eTP:Local",
  5626  			sourceIP: testNodeIP,
  5627  			destIP:   testNodeIP,
  5628  			destPort: 3001,
  5629  
  5630  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5631  			// same as "node to NodePort" above.
  5632  			output: "10.180.0.1:80, 10.180.1.1:80",
  5633  			masq:   true,
  5634  		},
  5635  		{
  5636  			name:     "localhost to NodePort with eTP:Local",
  5637  			sourceIP: "127.0.0.1",
  5638  			destIP:   "127.0.0.1",
  5639  			destPort: 3002,
  5640  
  5641  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5642  			// same as "localhost to NodePort" above.
  5643  			output: "10.180.0.2:80, 10.180.1.2:80",
  5644  			masq:   true,
  5645  		},
  5646  		{
  5647  			name:     "node to LB with eTP:Local",
  5648  			sourceIP: testNodeIP,
  5649  			destIP:   "5.6.7.8",
  5650  			destPort: 80,
  5651  
  5652  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5653  			// same as "node to LB" above.
  5654  			output: "10.180.0.2:80, 10.180.1.2:80",
  5655  			masq:   true,
  5656  		},
  5657  		{
  5658  			name:     "external to ClusterIP with eTP:Local",
  5659  			sourceIP: testExternalClient,
  5660  			destIP:   "172.30.0.42",
  5661  			destPort: 80,
  5662  
  5663  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5664  			// as "external to ClusterIP" above.
  5665  			output: "10.180.0.2:80, 10.180.1.2:80",
  5666  			masq:   true,
  5667  		},
  5668  		{
  5669  			name:     "external to NodePort with eTP:Local",
  5670  			sourceIP: testExternalClient,
  5671  			destIP:   testNodeIP,
  5672  			destPort: 3002,
  5673  
  5674  			// externalTrafficPolicy applies; only the local endpoint is
  5675  			// selected, and we don't masquerade.
  5676  			output: "10.180.0.2:80",
  5677  			masq:   false,
  5678  		},
  5679  		{
  5680  			name:     "external to LB with eTP:Local",
  5681  			sourceIP: testExternalClient,
  5682  			destIP:   "5.6.7.8",
  5683  			destPort: 80,
  5684  
  5685  			// externalTrafficPolicy applies; only the local endpoint is
  5686  			// selected, and we don't masquerade.
  5687  			output: "10.180.0.2:80",
  5688  			masq:   false,
  5689  		},
  5690  		{
  5691  			name:     "pod to ClusterIP with iTP:Local",
  5692  			sourceIP: "10.0.0.2",
  5693  			destIP:   "172.30.0.43",
  5694  			destPort: 80,
  5695  
  5696  			// internalTrafficPolicy applies; only the local endpoint is
  5697  			// selected.
  5698  			output: "10.180.0.3:80",
  5699  			masq:   false,
  5700  		},
  5701  		{
  5702  			name:     "pod to NodePort with iTP:Local",
  5703  			sourceIP: "10.0.0.2",
  5704  			destIP:   testNodeIP,
  5705  			destPort: 3003,
  5706  
  5707  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5708  			// "pod to NodePort" above.
  5709  			output: "10.180.0.3:80, 10.180.1.3:80",
  5710  			masq:   true,
  5711  		},
  5712  		{
  5713  			name:     "pod to LB with iTP:Local",
  5714  			sourceIP: "10.0.0.2",
  5715  			destIP:   "9.10.11.12",
  5716  			destPort: 80,
  5717  
  5718  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5719  			// same as "pod to LB" above.
  5720  			output: "10.180.0.3:80, 10.180.1.3:80",
  5721  			masq:   true,
  5722  		},
  5723  		{
  5724  			name:     "node to ClusterIP with iTP:Local",
  5725  			sourceIP: testNodeIP,
  5726  			destIP:   "172.30.0.43",
  5727  			destPort: 80,
  5728  
  5729  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5730  			// Traffic is masqueraded as in the "node to ClusterIP" case because
  5731  			// internalTrafficPolicy does not affect masquerading.
  5732  			output: "10.180.0.3:80",
  5733  			masq:   true,
  5734  		},
  5735  		{
  5736  			name:     "node to NodePort with iTP:Local",
  5737  			sourceIP: testNodeIP,
  5738  			destIP:   testNodeIP,
  5739  			destPort: 3003,
  5740  
  5741  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5742  			// "node to NodePort" above.
  5743  			output: "10.180.0.3:80, 10.180.1.3:80",
  5744  			masq:   true,
  5745  		},
  5746  		{
  5747  			name:     "localhost to NodePort with iTP:Local",
  5748  			sourceIP: "127.0.0.1",
  5749  			destIP:   "127.0.0.1",
  5750  			destPort: 3003,
  5751  
  5752  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5753  			// "localhost to NodePort" above.
  5754  			output: "10.180.0.3:80, 10.180.1.3:80",
  5755  			masq:   true,
  5756  		},
  5757  		{
  5758  			name:     "node to LB with iTP:Local",
  5759  			sourceIP: testNodeIP,
  5760  			destIP:   "9.10.11.12",
  5761  			destPort: 80,
  5762  
  5763  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5764  			// same as "node to LB" above.
  5765  			output: "10.180.0.3:80, 10.180.1.3:80",
  5766  			masq:   true,
  5767  		},
  5768  		{
  5769  			name:     "external to ClusterIP with iTP:Local",
  5770  			sourceIP: testExternalClient,
  5771  			destIP:   "172.30.0.43",
  5772  			destPort: 80,
  5773  
  5774  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5775  			// Traffic is masqueraded as in the "external to ClusterIP" case
  5776  			// because internalTrafficPolicy does not affect masquerading.
  5777  			output: "10.180.0.3:80",
  5778  			masq:   true,
  5779  		},
  5780  		{
  5781  			name:     "external to NodePort with iTP:Local",
  5782  			sourceIP: testExternalClient,
  5783  			destIP:   testNodeIP,
  5784  			destPort: 3003,
  5785  
  5786  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5787  			// "external to NodePort" above.
  5788  			output: "10.180.0.3:80, 10.180.1.3:80",
  5789  			masq:   true,
  5790  		},
  5791  		{
  5792  			name:     "external to LB with iTP:Local",
  5793  			sourceIP: testExternalClient,
  5794  			destIP:   "9.10.11.12",
  5795  			destPort: 80,
  5796  
  5797  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5798  			// same as "external to LB" above.
  5799  			output: "10.180.0.3:80, 10.180.1.3:80",
  5800  			masq:   true,
  5801  		},
  5802  	}
  5803  
  5804  	type packetFlowTestOverride struct {
  5805  		output *string
  5806  		masq   *bool
  5807  	}
  5808  
  5809  	testCases := []struct {
  5810  		name          string
  5811  		line          int
  5812  		masqueradeAll bool
  5813  		localDetector bool
  5814  		overrides     map[string]packetFlowTestOverride
  5815  	}{
  5816  		{
  5817  			name:          "base",
  5818  			line:          getLine(),
  5819  			masqueradeAll: false,
  5820  			localDetector: true,
  5821  			overrides:     nil,
  5822  		},
  5823  		{
  5824  			name:          "no LocalTrafficDetector",
  5825  			line:          getLine(),
  5826  			masqueradeAll: false,
  5827  			localDetector: false,
  5828  			overrides: map[string]packetFlowTestOverride{
  5829  				// With no LocalTrafficDetector, all traffic to a
  5830  				// ClusterIP is assumed to be from a pod, and thus to not
  5831  				// require masquerading.
  5832  				"node to ClusterIP": {
  5833  					masq: ptr.To(false),
  5834  				},
  5835  				"node to ClusterIP with eTP:Local": {
  5836  					masq: ptr.To(false),
  5837  				},
  5838  				"node to ClusterIP with iTP:Local": {
  5839  					masq: ptr.To(false),
  5840  				},
  5841  				"external to ClusterIP": {
  5842  					masq: ptr.To(false),
  5843  				},
  5844  				"external to ClusterIP with eTP:Local": {
  5845  					masq: ptr.To(false),
  5846  				},
  5847  				"external to ClusterIP with iTP:Local": {
  5848  					masq: ptr.To(false),
  5849  				},
  5850  
  5851  				// And there's no eTP:Local short-circuit for pod traffic,
  5852  				// so pods get only the local endpoints.
  5853  				"pod to NodePort with eTP:Local": {
  5854  					output: ptr.To("10.180.0.2:80"),
  5855  				},
  5856  				"pod to LB with eTP:Local": {
  5857  					output: ptr.To("10.180.0.2:80"),
  5858  				},
  5859  			},
  5860  		},
  5861  		{
  5862  			name:          "masqueradeAll",
  5863  			line:          getLine(),
  5864  			masqueradeAll: true,
  5865  			localDetector: true,
  5866  			overrides: map[string]packetFlowTestOverride{
  5867  				// All "to ClusterIP" traffic gets masqueraded when using
  5868  				// --masquerade-all.
  5869  				"pod to ClusterIP": {
  5870  					masq: ptr.To(true),
  5871  				},
  5872  				"pod to ClusterIP with eTP:Local": {
  5873  					masq: ptr.To(true),
  5874  				},
  5875  				"pod to ClusterIP with iTP:Local": {
  5876  					masq: ptr.To(true),
  5877  				},
  5878  			},
  5879  		},
  5880  		{
  5881  			name:          "masqueradeAll, no LocalTrafficDetector",
  5882  			line:          getLine(),
  5883  			masqueradeAll: true,
  5884  			localDetector: false,
  5885  			overrides: map[string]packetFlowTestOverride{
  5886  				// As in "masqueradeAll"
  5887  				"pod to ClusterIP": {
  5888  					masq: ptr.To(true),
  5889  				},
  5890  				"pod to ClusterIP with eTP:Local": {
  5891  					masq: ptr.To(true),
  5892  				},
  5893  				"pod to ClusterIP with iTP:Local": {
  5894  					masq: ptr.To(true),
  5895  				},
  5896  
  5897  				// As in "no LocalTrafficDetector"
  5898  				"pod to NodePort with eTP:Local": {
  5899  					output: ptr.To("10.180.0.2:80"),
  5900  				},
  5901  				"pod to LB with eTP:Local": {
  5902  					output: ptr.To("10.180.0.2:80"),
  5903  				},
  5904  			},
  5905  		},
  5906  	}
  5907  
  5908  	for _, tc := range testCases {
  5909  		t.Run(tc.name, func(t *testing.T) {
  5910  			ipt := iptablestest.NewFake()
  5911  			fp := NewFakeProxier(ipt)
  5912  			fp.masqueradeAll = tc.masqueradeAll
  5913  			if !tc.localDetector {
  5914  				fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
  5915  			}
  5916  			setupTest(fp)
  5917  
  5918  			// Merge base flowTests with per-test-case overrides
  5919  			tcFlowTests := make([]packetFlowTest, len(flowTests))
  5920  			overridesApplied := 0
  5921  			for i := range flowTests {
  5922  				tcFlowTests[i] = flowTests[i]
  5923  				if overrides, set := tc.overrides[flowTests[i].name]; set {
  5924  					overridesApplied++
  5925  					if overrides.masq != nil {
  5926  						if tcFlowTests[i].masq == *overrides.masq {
  5927  							t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
  5928  						}
  5929  						tcFlowTests[i].masq = *overrides.masq
  5930  					}
  5931  					if overrides.output != nil {
  5932  						if tcFlowTests[i].output == *overrides.output {
  5933  							t.Errorf("%q override value for output is same as base value", flowTests[i].name)
  5934  						}
  5935  						tcFlowTests[i].output = *overrides.output
  5936  					}
  5937  				}
  5938  			}
  5939  			if overridesApplied != len(tc.overrides) {
  5940  				t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
  5941  			}
  5942  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tcFlowTests)
  5943  		})
  5944  	}
  5945  }
  5946  
  5947  func countEndpointsAndComments(iptablesData string, matchEndpoint string) (string, int, int) {
  5948  	var numEndpoints, numComments int
  5949  	var matched string
  5950  	for _, line := range strings.Split(iptablesData, "\n") {
  5951  		if strings.HasPrefix(line, "-A KUBE-SEP-") && strings.Contains(line, "-j DNAT") {
  5952  			numEndpoints++
  5953  			if strings.Contains(line, "--comment") {
  5954  				numComments++
  5955  			}
  5956  			if strings.Contains(line, matchEndpoint) {
  5957  				matched = line
  5958  			}
  5959  		}
  5960  	}
  5961  	return matched, numEndpoints, numComments
  5962  }
  5963  
  5964  func TestSyncProxyRulesLargeClusterMode(t *testing.T) {
  5965  	ipt := iptablestest.NewFake()
  5966  	fp := NewFakeProxier(ipt)
  5967  	fp.masqueradeAll = true
  5968  	fp.syncPeriod = 30 * time.Second
  5969  
  5970  	makeServiceMap(fp,
  5971  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5972  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5973  			svc.Spec.ClusterIP = "172.30.0.41"
  5974  			svc.Spec.Ports = []v1.ServicePort{{
  5975  				Name:     "p80",
  5976  				Port:     80,
  5977  				Protocol: v1.ProtocolTCP,
  5978  			}}
  5979  		}),
  5980  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5981  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5982  			svc.Spec.ClusterIP = "172.30.0.42"
  5983  			svc.Spec.Ports = []v1.ServicePort{{
  5984  				Name:     "p8080",
  5985  				Port:     8080,
  5986  				Protocol: v1.ProtocolTCP,
  5987  			}}
  5988  		}),
  5989  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5990  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5991  			svc.Spec.ClusterIP = "172.30.0.43"
  5992  			svc.Spec.Ports = []v1.ServicePort{{
  5993  				Name:     "p8081",
  5994  				Port:     8081,
  5995  				Protocol: v1.ProtocolTCP,
  5996  			}}
  5997  		}),
  5998  	)
  5999  
  6000  	populateEndpointSlices(fp,
  6001  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  6002  			eps.AddressType = discovery.AddressTypeIPv4
  6003  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  6004  			for i := range eps.Endpoints {
  6005  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)}
  6006  			}
  6007  			eps.Ports = []discovery.EndpointPort{{
  6008  				Name:     ptr.To("p80"),
  6009  				Port:     ptr.To[int32](80),
  6010  				Protocol: ptr.To(v1.ProtocolTCP),
  6011  			}}
  6012  		}),
  6013  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  6014  			eps.AddressType = discovery.AddressTypeIPv4
  6015  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  6016  			for i := range eps.Endpoints {
  6017  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)}
  6018  			}
  6019  			eps.Ports = []discovery.EndpointPort{{
  6020  				Name:     ptr.To("p8080"),
  6021  				Port:     ptr.To[int32](8080),
  6022  				Protocol: ptr.To(v1.ProtocolTCP),
  6023  			}}
  6024  		}),
  6025  	)
  6026  
  6027  	fp.syncProxyRules()
  6028  	expectedEndpoints := 2 * (largeClusterEndpointsThreshold/2 - 1)
  6029  
  6030  	firstEndpoint, numEndpoints, numComments := countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  6031  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  6032  	if numEndpoints != expectedEndpoints {
  6033  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  6034  	}
  6035  	if numComments != numEndpoints {
  6036  		t.Errorf("numComments (%d) != numEndpoints (%d) when numEndpoints < threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  6037  	}
  6038  
  6039  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  6040  		eps.AddressType = discovery.AddressTypeIPv4
  6041  		eps.Endpoints = []discovery.Endpoint{{
  6042  			Addresses: []string{"203.0.113.4"},
  6043  		}, {
  6044  			Addresses: []string{"203.0.113.8"},
  6045  		}, {
  6046  			Addresses: []string{"203.0.113.12"},
  6047  		}}
  6048  		eps.Ports = []discovery.EndpointPort{{
  6049  			Name:     ptr.To("p8081"),
  6050  			Port:     ptr.To[int32](8081),
  6051  			Protocol: ptr.To(v1.ProtocolTCP),
  6052  		}}
  6053  	}))
  6054  	fp.syncProxyRules()
  6055  
  6056  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "203.0.113.4")
  6057  	assert.Equal(t, "-A KUBE-SEP-RUVVH7YV3PHQBDOS -m tcp -p tcp -j DNAT --to-destination 203.0.113.4:8081", firstEndpoint)
  6058  	// syncProxyRules will only have output the endpoints for svc3, since the others
  6059  	// didn't change (and syncProxyRules doesn't automatically do a full resync when you
  6060  	// cross the largeClusterEndpointsThreshold).
  6061  	if numEndpoints != 3 {
  6062  		t.Errorf("Found wrong number of endpoints on partial resync: expected %d, got %d", 3, numEndpoints)
  6063  	}
  6064  	if numComments != 0 {
  6065  		t.Errorf("numComments (%d) != 0 after partial resync when numEndpoints (%d) > threshold (%d)", numComments, expectedEndpoints+3, largeClusterEndpointsThreshold)
  6066  	}
  6067  
  6068  	// Now force a full resync and confirm that it rewrites the older services with
  6069  	// no comments as well.
  6070  	fp.forceSyncProxyRules()
  6071  	expectedEndpoints += 3
  6072  
  6073  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  6074  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  6075  	if numEndpoints != expectedEndpoints {
  6076  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  6077  	}
  6078  	if numComments != 0 {
  6079  		t.Errorf("numComments (%d) != 0 when numEndpoints (%d) > threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  6080  	}
  6081  
  6082  	// Now test service deletion; we have to create another service to do this though,
  6083  	// because if we deleted any of the existing services, we'd fall back out of large
  6084  	// cluster mode.
  6085  	svc4 := makeTestService("ns4", "svc4", func(svc *v1.Service) {
  6086  		svc.Spec.Type = v1.ServiceTypeClusterIP
  6087  		svc.Spec.ClusterIP = "172.30.0.44"
  6088  		svc.Spec.Ports = []v1.ServicePort{{
  6089  			Name:     "p8082",
  6090  			Port:     8082,
  6091  			Protocol: v1.ProtocolTCP,
  6092  		}}
  6093  	})
  6094  	fp.OnServiceAdd(svc4)
  6095  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  6096  		eps.AddressType = discovery.AddressTypeIPv4
  6097  		eps.Endpoints = []discovery.Endpoint{{
  6098  			Addresses: []string{"10.4.0.1"},
  6099  		}}
  6100  		eps.Ports = []discovery.EndpointPort{{
  6101  			Name:     ptr.To("p8082"),
  6102  			Port:     ptr.To[int32](8082),
  6103  			Protocol: ptr.To(v1.ProtocolTCP),
  6104  		}}
  6105  	}))
  6106  	fp.syncProxyRules()
  6107  
  6108  	svc4Endpoint, numEndpoints, _ := countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  6109  	assert.Equal(t, "-A KUBE-SEP-SU5STNODRYEWJAUF -m tcp -p tcp -j DNAT --to-destination 10.4.0.1:8082", svc4Endpoint, "svc4 endpoint was not created")
  6110  	// should only sync svc4
  6111  	if numEndpoints != 1 {
  6112  		t.Errorf("Found wrong number of endpoints after svc4 creation: expected %d, got %d", 1, numEndpoints)
  6113  	}
  6114  
  6115  	// In large-cluster mode, if we delete a service, it will not re-sync its chains
  6116  	// but it will not delete them immediately either.
  6117  	fp.lastIPTablesCleanup = time.Now()
  6118  	fp.OnServiceDelete(svc4)
  6119  	fp.syncProxyRules()
  6120  
  6121  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  6122  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  6123  	// should only sync svc4, and shouldn't output its endpoints
  6124  	if numEndpoints != 0 {
  6125  		t.Errorf("Found wrong number of endpoints after service deletion: expected %d, got %d", 0, numEndpoints)
  6126  	}
  6127  	assert.NotContains(t, fp.iptablesData.String(), "-X ", "iptables data unexpectedly contains chain deletions")
  6128  
  6129  	// But resyncing after a long-enough delay will delete the stale chains
  6130  	fp.lastIPTablesCleanup = time.Now().Add(-fp.syncPeriod).Add(-1)
  6131  	fp.syncProxyRules()
  6132  
  6133  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  6134  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  6135  	if numEndpoints != 0 {
  6136  		t.Errorf("Found wrong number of endpoints after delayed resync: expected %d, got %d", 0, numEndpoints)
  6137  	}
  6138  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SVC-EBDQOQU5SJFXRIL3", "iptables data does not contain chain deletion")
  6139  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SEP-SU5STNODRYEWJAUF", "iptables data does not contain endpoint deletions")
  6140  
  6141  	// force a full sync and count
  6142  	fp.forceSyncProxyRules()
  6143  	_, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  6144  	if numEndpoints != expectedEndpoints {
  6145  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  6146  	}
  6147  }
  6148  
  6149  // Test calling syncProxyRules() multiple times with various changes
  6150  func TestSyncProxyRulesRepeated(t *testing.T) {
  6151  	ipt := iptablestest.NewFake()
  6152  	fp := NewFakeProxier(ipt)
  6153  	metrics.RegisterMetrics()
  6154  	defer legacyregistry.Reset()
  6155  
  6156  	// Create initial state
  6157  	var svc2 *v1.Service
  6158  
  6159  	makeServiceMap(fp,
  6160  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  6161  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6162  			svc.Spec.ClusterIP = "172.30.0.41"
  6163  			svc.Spec.Ports = []v1.ServicePort{{
  6164  				Name:     "p80",
  6165  				Port:     80,
  6166  				Protocol: v1.ProtocolTCP,
  6167  			}}
  6168  		}),
  6169  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  6170  			svc2 = svc
  6171  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6172  			svc.Spec.ClusterIP = "172.30.0.42"
  6173  			svc.Spec.Ports = []v1.ServicePort{{
  6174  				Name:     "p8080",
  6175  				Port:     8080,
  6176  				Protocol: v1.ProtocolTCP,
  6177  			}}
  6178  		}),
  6179  	)
  6180  
  6181  	populateEndpointSlices(fp,
  6182  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  6183  			eps.AddressType = discovery.AddressTypeIPv4
  6184  			eps.Endpoints = []discovery.Endpoint{{
  6185  				Addresses: []string{"10.0.1.1"},
  6186  			}}
  6187  			eps.Ports = []discovery.EndpointPort{{
  6188  				Name:     ptr.To("p80"),
  6189  				Port:     ptr.To[int32](80),
  6190  				Protocol: ptr.To(v1.ProtocolTCP),
  6191  			}}
  6192  		}),
  6193  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  6194  			eps.AddressType = discovery.AddressTypeIPv4
  6195  			eps.Endpoints = []discovery.Endpoint{{
  6196  				Addresses: []string{"10.0.2.1"},
  6197  			}}
  6198  			eps.Ports = []discovery.EndpointPort{{
  6199  				Name:     ptr.To("p8080"),
  6200  				Port:     ptr.To[int32](8080),
  6201  				Protocol: ptr.To(v1.ProtocolTCP),
  6202  			}}
  6203  		}),
  6204  	)
  6205  
  6206  	fp.syncProxyRules()
  6207  
  6208  	expected := dedent.Dedent(`
  6209  		*filter
  6210  		:KUBE-NODEPORTS - [0:0]
  6211  		:KUBE-SERVICES - [0:0]
  6212  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6213  		:KUBE-FIREWALL - [0:0]
  6214  		:KUBE-FORWARD - [0:0]
  6215  		:KUBE-PROXY-FIREWALL - [0:0]
  6216  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6217  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6218  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6219  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6220  		COMMIT
  6221  		*nat
  6222  		:KUBE-NODEPORTS - [0:0]
  6223  		:KUBE-SERVICES - [0:0]
  6224  		:KUBE-MARK-MASQ - [0:0]
  6225  		:KUBE-POSTROUTING - [0:0]
  6226  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  6227  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  6228  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  6229  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  6230  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6231  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  6232  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6233  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6234  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6235  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6236  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6237  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  6238  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  6239  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -s 10.0.2.1 -j KUBE-MARK-MASQ
  6240  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -m tcp -p tcp -j DNAT --to-destination 10.0.2.1:8080
  6241  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6242  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 -> 10.0.2.1:8080" -j KUBE-SEP-UHEGFW77JX3KXTOV
  6243  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6244  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  6245  		COMMIT
  6246  		`)
  6247  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  6248  
  6249  	rulesSynced := countRules(utiliptables.TableNAT, expected)
  6250  	rulesSyncedMetric := countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6251  	if rulesSyncedMetric != rulesSynced {
  6252  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6253  	}
  6254  
  6255  	rulesTotal := rulesSynced
  6256  	rulesTotalMetric := countRulesFromMetric(utiliptables.TableNAT)
  6257  	if rulesTotalMetric != rulesTotal {
  6258  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6259  	}
  6260  
  6261  	// Add a new service and its endpoints. (This will only sync the SVC and SEP rules
  6262  	// for the new service, not the existing ones.)
  6263  	makeServiceMap(fp,
  6264  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  6265  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6266  			svc.Spec.ClusterIP = "172.30.0.43"
  6267  			svc.Spec.Ports = []v1.ServicePort{{
  6268  				Name:     "p80",
  6269  				Port:     80,
  6270  				Protocol: v1.ProtocolTCP,
  6271  			}}
  6272  		}),
  6273  	)
  6274  	var eps3 *discovery.EndpointSlice
  6275  	populateEndpointSlices(fp,
  6276  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  6277  			eps3 = eps
  6278  			eps.AddressType = discovery.AddressTypeIPv4
  6279  			eps.Endpoints = []discovery.Endpoint{{
  6280  				Addresses: []string{"10.0.3.1"},
  6281  			}}
  6282  			eps.Ports = []discovery.EndpointPort{{
  6283  				Name:     ptr.To("p80"),
  6284  				Port:     ptr.To[int32](80),
  6285  				Protocol: ptr.To(v1.ProtocolTCP),
  6286  			}}
  6287  		}),
  6288  	)
  6289  	fp.syncProxyRules()
  6290  
  6291  	expected = dedent.Dedent(`
  6292  		*filter
  6293  		:KUBE-NODEPORTS - [0:0]
  6294  		:KUBE-SERVICES - [0:0]
  6295  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6296  		:KUBE-FIREWALL - [0:0]
  6297  		:KUBE-FORWARD - [0:0]
  6298  		:KUBE-PROXY-FIREWALL - [0:0]
  6299  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6300  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6301  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6302  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6303  		COMMIT
  6304  		*nat
  6305  		:KUBE-NODEPORTS - [0:0]
  6306  		:KUBE-SERVICES - [0:0]
  6307  		:KUBE-MARK-MASQ - [0:0]
  6308  		:KUBE-POSTROUTING - [0:0]
  6309  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  6310  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6311  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6312  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  6313  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6314  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6315  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6316  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6317  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6318  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6319  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
  6320  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
  6321  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6322  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
  6323  		COMMIT
  6324  		`)
  6325  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6326  
  6327  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6328  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6329  	if rulesSyncedMetric != rulesSynced {
  6330  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6331  	}
  6332  
  6333  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2
  6334  	// KUBE-SEP-BSWRHOQ77KEXZLNL rules.
  6335  	rulesTotal += 5
  6336  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6337  	if rulesTotalMetric != rulesTotal {
  6338  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6339  	}
  6340  
  6341  	// Delete a service. (Won't update the other services.)
  6342  	fp.OnServiceDelete(svc2)
  6343  	fp.syncProxyRules()
  6344  
  6345  	expected = dedent.Dedent(`
  6346  		*filter
  6347  		:KUBE-NODEPORTS - [0:0]
  6348  		:KUBE-SERVICES - [0:0]
  6349  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6350  		:KUBE-FIREWALL - [0:0]
  6351  		:KUBE-FORWARD - [0:0]
  6352  		:KUBE-PROXY-FIREWALL - [0:0]
  6353  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6354  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6355  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6356  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6357  		COMMIT
  6358  		*nat
  6359  		:KUBE-NODEPORTS - [0:0]
  6360  		:KUBE-SERVICES - [0:0]
  6361  		:KUBE-MARK-MASQ - [0:0]
  6362  		:KUBE-POSTROUTING - [0:0]
  6363  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  6364  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  6365  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6366  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6367  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6368  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6369  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6370  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6371  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6372  		-X KUBE-SEP-UHEGFW77JX3KXTOV
  6373  		-X KUBE-SVC-2VJB64SDSIJUP5T6
  6374  		COMMIT
  6375  		`)
  6376  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6377  
  6378  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6379  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6380  	if rulesSyncedMetric != rulesSynced {
  6381  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6382  	}
  6383  
  6384  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2
  6385  	// KUBE-SEP-UHEGFW77JX3KXTOV rules
  6386  	rulesTotal -= 5
  6387  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6388  	if rulesTotalMetric != rulesTotal {
  6389  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6390  	}
  6391  
  6392  	// Add a service, sync, then add its endpoints. (The first sync will be a no-op other
  6393  	// than adding the REJECT rule. The second sync will create the new service.)
  6394  	var svc4 *v1.Service
  6395  	makeServiceMap(fp,
  6396  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  6397  			svc4 = svc
  6398  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6399  			svc.Spec.ClusterIP = "172.30.0.44"
  6400  			svc.Spec.Ports = []v1.ServicePort{{
  6401  				Name:     "p80",
  6402  				Port:     80,
  6403  				Protocol: v1.ProtocolTCP,
  6404  			}}
  6405  		}),
  6406  	)
  6407  	fp.syncProxyRules()
  6408  	expected = dedent.Dedent(`
  6409  		*filter
  6410  		:KUBE-NODEPORTS - [0:0]
  6411  		:KUBE-SERVICES - [0:0]
  6412  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6413  		:KUBE-FIREWALL - [0:0]
  6414  		:KUBE-FORWARD - [0:0]
  6415  		:KUBE-PROXY-FIREWALL - [0:0]
  6416  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j REJECT
  6417  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6418  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6419  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6420  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6421  		COMMIT
  6422  		*nat
  6423  		:KUBE-NODEPORTS - [0:0]
  6424  		:KUBE-SERVICES - [0:0]
  6425  		:KUBE-MARK-MASQ - [0:0]
  6426  		:KUBE-POSTROUTING - [0:0]
  6427  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6428  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6429  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6430  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6431  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6432  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6433  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6434  		COMMIT
  6435  		`)
  6436  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6437  
  6438  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6439  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6440  	if rulesSyncedMetric != rulesSynced {
  6441  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6442  	}
  6443  
  6444  	// The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't
  6445  	// changed.
  6446  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6447  	if rulesTotalMetric != rulesTotal {
  6448  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6449  	}
  6450  
  6451  	populateEndpointSlices(fp,
  6452  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  6453  			eps.AddressType = discovery.AddressTypeIPv4
  6454  			eps.Endpoints = []discovery.Endpoint{{
  6455  				Addresses: []string{"10.0.4.1"},
  6456  			}}
  6457  			eps.Ports = []discovery.EndpointPort{{
  6458  				Name:     ptr.To("p80"),
  6459  				Port:     ptr.To[int32](80),
  6460  				Protocol: ptr.To(v1.ProtocolTCP),
  6461  			}}
  6462  		}),
  6463  	)
  6464  	fp.syncProxyRules()
  6465  	expected = dedent.Dedent(`
  6466  		*filter
  6467  		:KUBE-NODEPORTS - [0:0]
  6468  		:KUBE-SERVICES - [0:0]
  6469  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6470  		:KUBE-FIREWALL - [0:0]
  6471  		:KUBE-FORWARD - [0:0]
  6472  		:KUBE-PROXY-FIREWALL - [0:0]
  6473  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6474  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6475  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6476  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6477  		COMMIT
  6478  		*nat
  6479  		:KUBE-NODEPORTS - [0:0]
  6480  		:KUBE-SERVICES - [0:0]
  6481  		:KUBE-MARK-MASQ - [0:0]
  6482  		:KUBE-POSTROUTING - [0:0]
  6483  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6484  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6485  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6486  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6487  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6488  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6489  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6490  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6491  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6492  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6493  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
  6494  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
  6495  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6496  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
  6497  		COMMIT
  6498  		`)
  6499  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6500  
  6501  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6502  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6503  	if rulesSyncedMetric != rulesSynced {
  6504  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6505  	}
  6506  
  6507  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and
  6508  	// 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6509  	rulesTotal += 5
  6510  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6511  	if rulesTotalMetric != rulesTotal {
  6512  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6513  	}
  6514  
  6515  	// Change an endpoint of an existing service. This will cause its SVC and SEP
  6516  	// chains to be rewritten.
  6517  	eps3update := eps3.DeepCopy()
  6518  	eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
  6519  	fp.OnEndpointSliceUpdate(eps3, eps3update)
  6520  	fp.syncProxyRules()
  6521  
  6522  	expected = dedent.Dedent(`
  6523  		*filter
  6524  		:KUBE-NODEPORTS - [0:0]
  6525  		:KUBE-SERVICES - [0:0]
  6526  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6527  		:KUBE-FIREWALL - [0:0]
  6528  		:KUBE-FORWARD - [0:0]
  6529  		:KUBE-PROXY-FIREWALL - [0:0]
  6530  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6531  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6532  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6533  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6534  		COMMIT
  6535  		*nat
  6536  		:KUBE-NODEPORTS - [0:0]
  6537  		:KUBE-SERVICES - [0:0]
  6538  		:KUBE-MARK-MASQ - [0:0]
  6539  		:KUBE-POSTROUTING - [0:0]
  6540  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  6541  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6542  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6543  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6544  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6545  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6546  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6547  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6548  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6549  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6550  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6551  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6552  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6553  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6554  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -j KUBE-SEP-DKCFIS26GWF2WLWC
  6555  		-X KUBE-SEP-BSWRHOQ77KEXZLNL
  6556  		COMMIT
  6557  		`)
  6558  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6559  
  6560  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6561  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6562  	if rulesSyncedMetric != rulesSynced {
  6563  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6564  	}
  6565  
  6566  	// We rewrote existing rules but did not change the overall number of rules.
  6567  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6568  	if rulesTotalMetric != rulesTotal {
  6569  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6570  	}
  6571  
  6572  	// Add an endpoint to a service. This will cause its SVC and SEP chains to be rewritten.
  6573  	eps3update2 := eps3update.DeepCopy()
  6574  	eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
  6575  	fp.OnEndpointSliceUpdate(eps3update, eps3update2)
  6576  	fp.syncProxyRules()
  6577  
  6578  	expected = dedent.Dedent(`
  6579  		*filter
  6580  		:KUBE-NODEPORTS - [0:0]
  6581  		:KUBE-SERVICES - [0:0]
  6582  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6583  		:KUBE-FIREWALL - [0:0]
  6584  		:KUBE-FORWARD - [0:0]
  6585  		:KUBE-PROXY-FIREWALL - [0:0]
  6586  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6587  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6588  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6589  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6590  		COMMIT
  6591  		*nat
  6592  		:KUBE-NODEPORTS - [0:0]
  6593  		:KUBE-SERVICES - [0:0]
  6594  		:KUBE-MARK-MASQ - [0:0]
  6595  		:KUBE-POSTROUTING - [0:0]
  6596  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6597  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6598  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6599  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6600  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6601  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6602  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6603  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6604  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6605  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6606  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6607  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6608  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6609  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6610  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6611  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6612  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6613  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6614  		COMMIT
  6615  		`)
  6616  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6617  
  6618  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6619  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6620  	if rulesSyncedMetric != rulesSynced {
  6621  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6622  	}
  6623  
  6624  	// We added 2 KUBE-SEP-JVVZVJ7BSEPPRNBS rules and 1 KUBE-SVC-X27LE4BHSL4DOUIK rule
  6625  	// jumping to the new SEP chain. The other rules related to svc3 got rewritten,
  6626  	// but that does not change the count of rules.
  6627  	rulesTotal += 3
  6628  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6629  	if rulesTotalMetric != rulesTotal {
  6630  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6631  	}
  6632  
  6633  	// Sync with no new changes... This will not rewrite any SVC or SEP chains
  6634  	fp.syncProxyRules()
  6635  
  6636  	expected = dedent.Dedent(`
  6637  		*filter
  6638  		:KUBE-NODEPORTS - [0:0]
  6639  		:KUBE-SERVICES - [0:0]
  6640  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6641  		:KUBE-FIREWALL - [0:0]
  6642  		:KUBE-FORWARD - [0:0]
  6643  		:KUBE-PROXY-FIREWALL - [0:0]
  6644  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6645  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6646  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6647  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6648  		COMMIT
  6649  		*nat
  6650  		:KUBE-NODEPORTS - [0:0]
  6651  		:KUBE-SERVICES - [0:0]
  6652  		:KUBE-MARK-MASQ - [0:0]
  6653  		:KUBE-POSTROUTING - [0:0]
  6654  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6655  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6656  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6657  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6658  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6659  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6660  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6661  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6662  		COMMIT
  6663  		`)
  6664  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6665  
  6666  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6667  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6668  	if rulesSyncedMetric != rulesSynced {
  6669  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6670  	}
  6671  
  6672  	// (No changes)
  6673  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6674  	if rulesTotalMetric != rulesTotal {
  6675  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6676  	}
  6677  
  6678  	// Now force a partial resync error and ensure that it recovers correctly
  6679  	if fp.needFullSync {
  6680  		t.Fatalf("Proxier unexpectedly already needs a full sync?")
  6681  	}
  6682  	partialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
  6683  	if err != nil {
  6684  		t.Fatalf("Could not get partial restore failures metric: %v", err)
  6685  	}
  6686  	if partialRestoreFailures != 0.0 {
  6687  		t.Errorf("Already did a partial resync? Something failed earlier!")
  6688  	}
  6689  
  6690  	// Add a rule jumping from svc3's service chain to svc4's endpoint, then try to
  6691  	// delete svc4. This will fail because the partial resync won't rewrite svc3's
  6692  	// rules and so the partial restore would leave a dangling jump from there to
  6693  	// svc4's endpoint. The proxier will then queue a full resync in response to the
  6694  	// partial resync failure, and the full resync will succeed (since it will rewrite
  6695  	// svc3's rules as well).
  6696  	//
  6697  	// This is an absurd scenario, but it has to be; partial resync failures are
  6698  	// supposed to be impossible; if we knew of any non-absurd scenario that would
  6699  	// cause such a failure, then that would be a bug and we would fix it.
  6700  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6701  		t.Fatalf("svc4's endpoint chain unexpected already does not exist!")
  6702  	}
  6703  	if _, err := fp.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.Chain("KUBE-SVC-X27LE4BHSL4DOUIK"), "-j", "KUBE-SEP-AYCN5HPXMIRJNJXU"); err != nil {
  6704  		t.Fatalf("Could not add bad iptables rule: %v", err)
  6705  	}
  6706  
  6707  	fp.OnServiceDelete(svc4)
  6708  	fp.syncProxyRules()
  6709  
  6710  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6711  		t.Errorf("svc4's endpoint chain was successfully deleted despite dangling references!")
  6712  	}
  6713  	if !fp.needFullSync {
  6714  		t.Errorf("Proxier did not fail on previous partial resync?")
  6715  	}
  6716  	updatedPartialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
  6717  	if err != nil {
  6718  		t.Errorf("Could not get partial restore failures metric: %v", err)
  6719  	}
  6720  	if updatedPartialRestoreFailures != partialRestoreFailures+1.0 {
  6721  		t.Errorf("Partial restore failures metric was not incremented after failed partial resync (expected %.02f, got %.02f)", partialRestoreFailures+1.0, updatedPartialRestoreFailures)
  6722  	}
  6723  
  6724  	// On retry we should do a full resync, which should succeed (and delete svc4)
  6725  	fp.syncProxyRules()
  6726  
  6727  	expected = dedent.Dedent(`
  6728  		*filter
  6729  		:KUBE-NODEPORTS - [0:0]
  6730  		:KUBE-SERVICES - [0:0]
  6731  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6732  		:KUBE-FIREWALL - [0:0]
  6733  		:KUBE-FORWARD - [0:0]
  6734  		:KUBE-PROXY-FIREWALL - [0:0]
  6735  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6736  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6737  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6738  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6739  		COMMIT
  6740  		*nat
  6741  		:KUBE-NODEPORTS - [0:0]
  6742  		:KUBE-SERVICES - [0:0]
  6743  		:KUBE-MARK-MASQ - [0:0]
  6744  		:KUBE-POSTROUTING - [0:0]
  6745  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6746  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6747  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6748  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  6749  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6750  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6751  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  6752  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6753  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6754  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6755  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6756  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6757  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6758  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6759  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6760  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6761  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6762  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6763  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  6764  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  6765  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6766  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6767  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6768  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6769  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  6770  		-X KUBE-SEP-AYCN5HPXMIRJNJXU
  6771  		-X KUBE-SVC-4SW47YFZTEDKD3PK
  6772  		COMMIT
  6773  		`)
  6774  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6775  
  6776  	rulesSynced = countRules(utiliptables.TableNAT, expected)
  6777  	rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
  6778  	if rulesSyncedMetric != rulesSynced {
  6779  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6780  	}
  6781  
  6782  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2
  6783  	// KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6784  	rulesTotal -= 5
  6785  	rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
  6786  	if rulesTotalMetric != rulesTotal {
  6787  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6788  	}
  6789  }
  6790  
  6791  func TestNoEndpointsMetric(t *testing.T) {
  6792  	type endpoint struct {
  6793  		ip       string
  6794  		hostname string
  6795  	}
  6796  
  6797  	metrics.RegisterMetrics()
  6798  	testCases := []struct {
  6799  		name                                                string
  6800  		internalTrafficPolicy                               *v1.ServiceInternalTrafficPolicy
  6801  		externalTrafficPolicy                               v1.ServiceExternalTrafficPolicy
  6802  		endpoints                                           []endpoint
  6803  		expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
  6804  		expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
  6805  	}{
  6806  		{
  6807  			name:                  "internalTrafficPolicy is set and there are local endpoints",
  6808  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6809  			endpoints: []endpoint{
  6810  				{"10.0.1.1", testHostname},
  6811  				{"10.0.1.2", "host1"},
  6812  				{"10.0.1.3", "host2"},
  6813  			},
  6814  		},
  6815  		{
  6816  			name:                  "externalTrafficPolicy is set and there are local endpoints",
  6817  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6818  			endpoints: []endpoint{
  6819  				{"10.0.1.1", testHostname},
  6820  				{"10.0.1.2", "host1"},
  6821  				{"10.0.1.3", "host2"},
  6822  			},
  6823  		},
  6824  		{
  6825  			name:                  "both policies are set and there are local endpoints",
  6826  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6827  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6828  			endpoints: []endpoint{
  6829  				{"10.0.1.1", testHostname},
  6830  				{"10.0.1.2", "host1"},
  6831  				{"10.0.1.3", "host2"},
  6832  			},
  6833  		},
  6834  		{
  6835  			name:                  "internalTrafficPolicy is set and there are no local endpoints",
  6836  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6837  			endpoints: []endpoint{
  6838  				{"10.0.1.1", "host0"},
  6839  				{"10.0.1.2", "host1"},
  6840  				{"10.0.1.3", "host2"},
  6841  			},
  6842  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6843  		},
  6844  		{
  6845  			name:                  "externalTrafficPolicy is set and there are no local endpoints",
  6846  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6847  			endpoints: []endpoint{
  6848  				{"10.0.1.1", "host0"},
  6849  				{"10.0.1.2", "host1"},
  6850  				{"10.0.1.3", "host2"},
  6851  			},
  6852  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6853  		},
  6854  		{
  6855  			name:                  "both policies are set and there are no local endpoints",
  6856  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6857  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6858  			endpoints: []endpoint{
  6859  				{"10.0.1.1", "host0"},
  6860  				{"10.0.1.2", "host1"},
  6861  				{"10.0.1.3", "host2"},
  6862  			},
  6863  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6864  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6865  		},
  6866  		{
  6867  			name:                  "both policies are set and there are no endpoints at all",
  6868  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6869  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6870  			endpoints:             []endpoint{},
  6871  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
  6872  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
  6873  		},
  6874  	}
  6875  
  6876  	for _, tc := range testCases {
  6877  		t.Run(tc.name, func(t *testing.T) {
  6878  			ipt := iptablestest.NewFake()
  6879  			fp := NewFakeProxier(ipt)
  6880  			fp.OnServiceSynced()
  6881  			fp.OnEndpointSlicesSynced()
  6882  
  6883  			serviceName := "svc1"
  6884  			namespaceName := "ns1"
  6885  
  6886  			svc := &v1.Service{
  6887  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  6888  				Spec: v1.ServiceSpec{
  6889  					ClusterIP: "172.30.1.1",
  6890  					Selector:  map[string]string{"foo": "bar"},
  6891  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
  6892  				},
  6893  			}
  6894  			if tc.internalTrafficPolicy != nil {
  6895  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  6896  			}
  6897  			if tc.externalTrafficPolicy != "" {
  6898  				svc.Spec.Type = v1.ServiceTypeNodePort
  6899  				svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
  6900  			}
  6901  
  6902  			fp.OnServiceAdd(svc)
  6903  
  6904  			endpointSlice := &discovery.EndpointSlice{
  6905  				ObjectMeta: metav1.ObjectMeta{
  6906  					Name:      fmt.Sprintf("%s-1", serviceName),
  6907  					Namespace: namespaceName,
  6908  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  6909  				},
  6910  				Ports: []discovery.EndpointPort{{
  6911  					Name:     ptr.To(""),
  6912  					Port:     ptr.To[int32](80),
  6913  					Protocol: ptr.To(v1.ProtocolTCP),
  6914  				}},
  6915  				AddressType: discovery.AddressTypeIPv4,
  6916  			}
  6917  			for _, ep := range tc.endpoints {
  6918  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  6919  					Addresses:  []string{ep.ip},
  6920  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  6921  					NodeName:   ptr.To(ep.hostname),
  6922  				})
  6923  			}
  6924  
  6925  			fp.OnEndpointSliceAdd(endpointSlice)
  6926  			fp.syncProxyRules()
  6927  			syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
  6928  			if err != nil {
  6929  				t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6930  			}
  6931  
  6932  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
  6933  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
  6934  			}
  6935  
  6936  			syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
  6937  			if err != nil {
  6938  				t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6939  			}
  6940  
  6941  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
  6942  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
  6943  			}
  6944  		})
  6945  	}
  6946  }
  6947  
  6948  func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
  6949  	testCases := []struct {
  6950  		name          string
  6951  		ipModeEnabled bool
  6952  		svcIP         string
  6953  		svcLBIP       string
  6954  		ipMode        *v1.LoadBalancerIPMode
  6955  		expectedRule  bool
  6956  	}{
  6957  		/* LoadBalancerIPMode disabled */
  6958  		{
  6959  			name:          "LoadBalancerIPMode disabled, ipMode Proxy",
  6960  			ipModeEnabled: false,
  6961  			svcIP:         "10.20.30.41",
  6962  			svcLBIP:       "1.2.3.4",
  6963  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6964  			expectedRule:  true,
  6965  		},
  6966  		{
  6967  			name:          "LoadBalancerIPMode disabled, ipMode VIP",
  6968  			ipModeEnabled: false,
  6969  			svcIP:         "10.20.30.42",
  6970  			svcLBIP:       "1.2.3.5",
  6971  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6972  			expectedRule:  true,
  6973  		},
  6974  		{
  6975  			name:          "LoadBalancerIPMode disabled, ipMode nil",
  6976  			ipModeEnabled: false,
  6977  			svcIP:         "10.20.30.43",
  6978  			svcLBIP:       "1.2.3.6",
  6979  			ipMode:        nil,
  6980  			expectedRule:  true,
  6981  		},
  6982  		/* LoadBalancerIPMode enabled */
  6983  		{
  6984  			name:          "LoadBalancerIPMode enabled, ipMode Proxy",
  6985  			ipModeEnabled: true,
  6986  			svcIP:         "10.20.30.41",
  6987  			svcLBIP:       "1.2.3.4",
  6988  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6989  			expectedRule:  false,
  6990  		},
  6991  		{
  6992  			name:          "LoadBalancerIPMode enabled, ipMode VIP",
  6993  			ipModeEnabled: true,
  6994  			svcIP:         "10.20.30.42",
  6995  			svcLBIP:       "1.2.3.5",
  6996  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6997  			expectedRule:  true,
  6998  		},
  6999  		{
  7000  			name:          "LoadBalancerIPMode enabled, ipMode nil",
  7001  			ipModeEnabled: true,
  7002  			svcIP:         "10.20.30.43",
  7003  			svcLBIP:       "1.2.3.6",
  7004  			ipMode:        nil,
  7005  			expectedRule:  true,
  7006  		},
  7007  	}
  7008  
  7009  	svcPort := 80
  7010  	svcNodePort := 3001
  7011  	svcPortName := proxy.ServicePortName{
  7012  		NamespacedName: makeNSN("ns1", "svc1"),
  7013  		Port:           "p80",
  7014  		Protocol:       v1.ProtocolTCP,
  7015  	}
  7016  
  7017  	for _, testCase := range testCases {
  7018  		t.Run(testCase.name, func(t *testing.T) {
  7019  			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)()
  7020  			ipt := iptablestest.NewFake()
  7021  			fp := NewFakeProxier(ipt)
  7022  			makeServiceMap(fp,
  7023  				makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  7024  					svc.Spec.Type = "LoadBalancer"
  7025  					svc.Spec.ClusterIP = testCase.svcIP
  7026  					svc.Spec.Ports = []v1.ServicePort{{
  7027  						Name:     svcPortName.Port,
  7028  						Port:     int32(svcPort),
  7029  						Protocol: v1.ProtocolTCP,
  7030  						NodePort: int32(svcNodePort),
  7031  					}}
  7032  					svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  7033  						IP:     testCase.svcLBIP,
  7034  						IPMode: testCase.ipMode,
  7035  					}}
  7036  				}),
  7037  			)
  7038  
  7039  			populateEndpointSlices(fp,
  7040  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  7041  					eps.AddressType = discovery.AddressTypeIPv4
  7042  					eps.Endpoints = []discovery.Endpoint{{
  7043  						Addresses: []string{"10.180.0.1"},
  7044  					}}
  7045  					eps.Ports = []discovery.EndpointPort{{
  7046  						Name:     ptr.To("p80"),
  7047  						Port:     ptr.To[int32](80),
  7048  						Protocol: ptr.To(v1.ProtocolTCP),
  7049  					}}
  7050  				}),
  7051  			)
  7052  
  7053  			fp.syncProxyRules()
  7054  
  7055  			c, _ := ipt.Dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
  7056  			ruleExists := false
  7057  			for _, r := range c.Rules {
  7058  				if r.DestinationAddress != nil && r.DestinationAddress.Value == testCase.svcLBIP {
  7059  					ruleExists = true
  7060  				}
  7061  			}
  7062  			if ruleExists != testCase.expectedRule {
  7063  				t.Errorf("unexpected rule for %s", testCase.svcLBIP)
  7064  			}
  7065  		})
  7066  	}
  7067  }