k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/proxy/iptables/proxier_test.go (about)

     1  //go:build linux
     2  // +build linux
     3  
     4  /*
     5  Copyright 2015 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package iptables
    21  
    22  import (
    23  	"bytes"
    24  	"fmt"
    25  	"net"
    26  	"reflect"
    27  	"regexp"
    28  	stdruntime "runtime"
    29  	"sort"
    30  	"strconv"
    31  	"strings"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/google/go-cmp/cmp"
    36  	"github.com/lithammer/dedent"
    37  	"github.com/stretchr/testify/assert"
    38  	v1 "k8s.io/api/core/v1"
    39  	discovery "k8s.io/api/discovery/v1"
    40  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    41  	"k8s.io/apimachinery/pkg/types"
    42  	"k8s.io/apimachinery/pkg/util/intstr"
    43  	"k8s.io/apimachinery/pkg/util/sets"
    44  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    45  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    46  	"k8s.io/component-base/metrics/legacyregistry"
    47  	"k8s.io/component-base/metrics/testutil"
    48  	"k8s.io/klog/v2"
    49  	klogtesting "k8s.io/klog/v2/ktesting"
    50  	"k8s.io/kubernetes/pkg/features"
    51  	"k8s.io/kubernetes/pkg/proxy"
    52  	kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
    53  	"k8s.io/kubernetes/pkg/proxy/conntrack"
    54  	"k8s.io/kubernetes/pkg/proxy/healthcheck"
    55  	"k8s.io/kubernetes/pkg/proxy/metrics"
    56  	proxyutil "k8s.io/kubernetes/pkg/proxy/util"
    57  	proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
    58  	"k8s.io/kubernetes/pkg/util/async"
    59  	utiliptables "k8s.io/kubernetes/pkg/util/iptables"
    60  	iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
    61  	netutils "k8s.io/utils/net"
    62  	"k8s.io/utils/ptr"
    63  )
    64  
    65  // Conventions for tests using NewFakeProxier:
    66  //
    67  // Pod IPs:             10.0.0.0/8
    68  // Service ClusterIPs:  172.30.0.0/16
    69  // Node IPs:            192.168.0.0/24
    70  // Local Node IP:       192.168.0.2
    71  // Service ExternalIPs: 192.168.99.0/24
    72  // LoadBalancer IPs:    1.2.3.4, 5.6.7.8, 9.10.11.12
    73  // Non-cluster IPs:     203.0.113.0/24
    74  // LB Source Range:     203.0.113.0/25
    75  
    76  const testHostname = "test-hostname"
    77  const testNodeIP = "192.168.0.2"
    78  const testNodeIPAlt = "192.168.1.2"
    79  const testExternalIP = "192.168.99.11"
    80  const testNodeIPv6 = "2001:db8::1"
    81  const testNodeIPv6Alt = "2001:db8:1::2"
    82  const testExternalClient = "203.0.113.2"
    83  const testExternalClientBlocked = "203.0.113.130"
    84  
    85  var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt}
    86  
    87  func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
    88  	// TODO: Call NewProxier after refactoring out the goroutine
    89  	// invocation into a Run() method.
    90  	ipfamily := v1.IPv4Protocol
    91  	podCIDR := "10.0.0.0/8"
    92  	if ipt.IsIPv6() {
    93  		ipfamily = v1.IPv6Protocol
    94  		podCIDR = "fd00:10::/64"
    95  	}
    96  	detectLocal := proxyutil.NewDetectLocalByCIDR(podCIDR)
    97  
    98  	networkInterfacer := proxyutiltest.NewFakeNetwork()
    99  	itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
   100  	addrs := []net.Addr{
   101  		&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
   102  		&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
   103  	}
   104  	networkInterfacer.AddInterfaceAddr(&itf, addrs)
   105  	itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
   106  	addrs1 := []net.Addr{
   107  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
   108  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)},
   109  		&net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)},
   110  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)},
   111  		&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)},
   112  	}
   113  	networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
   114  
   115  	p := &Proxier{
   116  		svcPortMap:               make(proxy.ServicePortMap),
   117  		serviceChanges:           proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
   118  		endpointsMap:             make(proxy.EndpointsMap),
   119  		endpointsChanges:         proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
   120  		needFullSync:             true,
   121  		iptables:                 ipt,
   122  		masqueradeMark:           "0x4000",
   123  		conntrack:                conntrack.NewFake(),
   124  		localDetector:            detectLocal,
   125  		hostname:                 testHostname,
   126  		serviceHealthServer:      healthcheck.NewFakeServiceHealthServer(),
   127  		precomputedProbabilities: make([]string, 0, 1001),
   128  		iptablesData:             bytes.NewBuffer(nil),
   129  		existingFilterChainsData: bytes.NewBuffer(nil),
   130  		filterChains:             proxyutil.NewLineBuffer(),
   131  		filterRules:              proxyutil.NewLineBuffer(),
   132  		natChains:                proxyutil.NewLineBuffer(),
   133  		natRules:                 proxyutil.NewLineBuffer(),
   134  		nodeIP:                   netutils.ParseIPSloppy(testNodeIP),
   135  		localhostNodePorts:       true,
   136  		nodePortAddresses:        proxyutil.NewNodePortAddresses(ipfamily, nil),
   137  		networkInterfacer:        networkInterfacer,
   138  	}
   139  	p.setInitialized(true)
   140  	p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
   141  	return p
   142  }
   143  
   144  // parseIPTablesData takes iptables-save output and returns a map of table name to array of lines.
   145  func parseIPTablesData(ruleData string) (map[string][]string, error) {
   146  	// Split ruleData at the "COMMIT" lines; given valid input, this will result in
   147  	// one element for each table plus an extra empty element (since the ruleData
   148  	// should end with a "COMMIT" line).
   149  	rawTables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
   150  	nTables := len(rawTables) - 1
   151  	if nTables < 2 || rawTables[nTables] != "" {
   152  		return nil, fmt.Errorf("bad ruleData (%d tables)\n%s", nTables, ruleData)
   153  	}
   154  
   155  	tables := make(map[string][]string, nTables)
   156  	for i, table := range rawTables[:nTables] {
   157  		lines := strings.Split(strings.Trim(table, "\n"), "\n")
   158  		// The first line should be, eg, "*nat" or "*filter"
   159  		if lines[0][0] != '*' {
   160  			return nil, fmt.Errorf("bad ruleData (table %d starts with %q)", i+1, lines[0])
   161  		}
   162  		// add back the "COMMIT" line that got eaten by the strings.Split above
   163  		lines = append(lines, "COMMIT")
   164  		tables[lines[0][1:]] = lines
   165  	}
   166  
   167  	if tables["nat"] == nil {
   168  		return nil, fmt.Errorf("bad ruleData (no %q table)", "nat")
   169  	}
   170  	if tables["filter"] == nil {
   171  		return nil, fmt.Errorf("bad ruleData (no %q table)", "filter")
   172  	}
   173  	return tables, nil
   174  }
   175  
   176  func TestParseIPTablesData(t *testing.T) {
   177  	for _, tc := range []struct {
   178  		name   string
   179  		input  string
   180  		output map[string][]string
   181  		error  string
   182  	}{
   183  		{
   184  			name: "basic test",
   185  			input: dedent.Dedent(`
   186  				*filter
   187  				:KUBE-SERVICES - [0:0]
   188  				:KUBE-EXTERNAL-SERVICES - [0:0]
   189  				:KUBE-FORWARD - [0:0]
   190  				:KUBE-NODEPORTS - [0:0]
   191  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   192  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   193  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   194  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   195  				COMMIT
   196  				*nat
   197  				:KUBE-SERVICES - [0:0]
   198  				:KUBE-NODEPORTS - [0:0]
   199  				:KUBE-POSTROUTING - [0:0]
   200  				:KUBE-MARK-MASQ - [0:0]
   201  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   202  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   203  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   204  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   205  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   206  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   207  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   208  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   209  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   210  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   211  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   212  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   213  				COMMIT
   214  				`),
   215  			output: map[string][]string{
   216  				"filter": {
   217  					`*filter`,
   218  					`:KUBE-SERVICES - [0:0]`,
   219  					`:KUBE-EXTERNAL-SERVICES - [0:0]`,
   220  					`:KUBE-FORWARD - [0:0]`,
   221  					`:KUBE-NODEPORTS - [0:0]`,
   222  					`-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
   223  					`-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP`,
   224  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT`,
   225  					`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT`,
   226  					`COMMIT`,
   227  				},
   228  				"nat": {
   229  					`*nat`,
   230  					`:KUBE-SERVICES - [0:0]`,
   231  					`:KUBE-NODEPORTS - [0:0]`,
   232  					`:KUBE-POSTROUTING - [0:0]`,
   233  					`:KUBE-MARK-MASQ - [0:0]`,
   234  					`:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]`,
   235  					`:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]`,
   236  					`-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN`,
   237  					`-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
   238  					`-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE`,
   239  					`-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000`,
   240  					`-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
   241  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`,
   242  					`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
   243  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
   244  					`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
   245  					`-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS`,
   246  					`COMMIT`,
   247  				},
   248  			},
   249  		},
   250  		{
   251  			name: "not enough tables",
   252  			input: dedent.Dedent(`
   253  				*filter
   254  				:KUBE-SERVICES - [0:0]
   255  				:KUBE-EXTERNAL-SERVICES - [0:0]
   256  				:KUBE-FORWARD - [0:0]
   257  				:KUBE-NODEPORTS - [0:0]
   258  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   259  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   260  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   261  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   262  				COMMIT
   263  				`),
   264  			error: "bad ruleData (1 tables)",
   265  		},
   266  		{
   267  			name: "trailing junk",
   268  			input: dedent.Dedent(`
   269  				*filter
   270  				:KUBE-SERVICES - [0:0]
   271  				:KUBE-EXTERNAL-SERVICES - [0:0]
   272  				:KUBE-FORWARD - [0:0]
   273  				:KUBE-NODEPORTS - [0:0]
   274  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   275  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   276  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   277  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   278  				COMMIT
   279  				*nat
   280  				:KUBE-SERVICES - [0:0]
   281  				:KUBE-EXTERNAL-SERVICES - [0:0]
   282  				:KUBE-FORWARD - [0:0]
   283  				:KUBE-NODEPORTS - [0:0]
   284  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   285  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   286  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   287  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   288  				COMMIT
   289  				junk
   290  				`),
   291  			error: "bad ruleData (2 tables)",
   292  		},
   293  		{
   294  			name: "bad start line",
   295  			input: dedent.Dedent(`
   296  				*filter
   297  				:KUBE-SERVICES - [0:0]
   298  				:KUBE-EXTERNAL-SERVICES - [0:0]
   299  				:KUBE-FORWARD - [0:0]
   300  				:KUBE-NODEPORTS - [0:0]
   301  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   302  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   303  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   304  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   305  				COMMIT
   306  				:KUBE-SERVICES - [0:0]
   307  				:KUBE-EXTERNAL-SERVICES - [0:0]
   308  				:KUBE-FORWARD - [0:0]
   309  				:KUBE-NODEPORTS - [0:0]
   310  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   311  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   312  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   313  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   314  				COMMIT
   315  				`),
   316  			error: `bad ruleData (table 2 starts with ":KUBE-SERVICES - [0:0]")`,
   317  		},
   318  		{
   319  			name: "no nat",
   320  			input: dedent.Dedent(`
   321  				*filter
   322  				:KUBE-SERVICES - [0:0]
   323  				:KUBE-EXTERNAL-SERVICES - [0:0]
   324  				:KUBE-FORWARD - [0:0]
   325  				:KUBE-NODEPORTS - [0:0]
   326  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   327  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   328  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   329  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   330  				COMMIT
   331  				*mangle
   332  				:KUBE-SERVICES - [0:0]
   333  				:KUBE-EXTERNAL-SERVICES - [0:0]
   334  				:KUBE-FORWARD - [0:0]
   335  				:KUBE-NODEPORTS - [0:0]
   336  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   337  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   338  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   339  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   340  				COMMIT
   341  				`),
   342  			error: `bad ruleData (no "nat" table)`,
   343  		},
   344  		{
   345  			name: "no filter",
   346  			input: dedent.Dedent(`
   347  				*mangle
   348  				:KUBE-SERVICES - [0:0]
   349  				:KUBE-EXTERNAL-SERVICES - [0:0]
   350  				:KUBE-FORWARD - [0:0]
   351  				:KUBE-NODEPORTS - [0:0]
   352  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   353  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   354  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   355  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   356  				COMMIT
   357  				*nat
   358  				:KUBE-SERVICES - [0:0]
   359  				:KUBE-EXTERNAL-SERVICES - [0:0]
   360  				:KUBE-FORWARD - [0:0]
   361  				:KUBE-NODEPORTS - [0:0]
   362  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   363  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   364  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   365  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   366  				COMMIT
   367  				`),
   368  			error: `bad ruleData (no "filter" table)`,
   369  		},
   370  	} {
   371  		t.Run(tc.name, func(t *testing.T) {
   372  			out, err := parseIPTablesData(tc.input)
   373  			if err == nil {
   374  				if tc.error != "" {
   375  					t.Errorf("unexpectedly did not get error")
   376  				} else {
   377  					assert.Equal(t, tc.output, out)
   378  				}
   379  			} else {
   380  				if tc.error == "" {
   381  					t.Errorf("got unexpected error: %v", err)
   382  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   383  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   384  				}
   385  			}
   386  		})
   387  	}
   388  }
   389  
   390  func countRules(logger klog.Logger, tableName utiliptables.Table, ruleData string) int {
   391  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   392  	if err != nil {
   393  		logger.Error(err, "error parsing iptables rules")
   394  		return -1
   395  	}
   396  
   397  	rules := 0
   398  	table, err := dump.GetTable(tableName)
   399  	if err != nil {
   400  		logger.Error(err, "can't find table", "table", tableName)
   401  		return -1
   402  	}
   403  
   404  	for _, c := range table.Chains {
   405  		rules += len(c.Rules)
   406  	}
   407  	return rules
   408  }
   409  
   410  func countRulesFromMetric(logger klog.Logger, tableName utiliptables.Table) int {
   411  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IPTablesRulesTotal.WithLabelValues(string(tableName)))
   412  	if err != nil {
   413  		logger.Error(err, "metrics are not registered?")
   414  		return -1
   415  	}
   416  	return int(numRulesFloat)
   417  }
   418  
   419  func countRulesFromLastSyncMetric(logger klog.Logger, tableName utiliptables.Table) int {
   420  	numRulesFloat, err := testutil.GetGaugeMetricValue(metrics.IPTablesRulesLastSync.WithLabelValues(string(tableName)))
   421  	if err != nil {
   422  		logger.Error(err, "metrics are not registered?")
   423  		return -1
   424  	}
   425  	return int(numRulesFloat)
   426  }
   427  
   428  // findAllMatches takes an array of lines and a pattern with one parenthesized group, and
   429  // returns a sorted array of all of the unique matches of the parenthesized group.
   430  func findAllMatches(lines []string, pattern string) []string {
   431  	regex := regexp.MustCompile(pattern)
   432  	allMatches := sets.New[string]()
   433  	for _, line := range lines {
   434  		match := regex.FindStringSubmatch(line)
   435  		if len(match) == 2 {
   436  			allMatches.Insert(match[1])
   437  		}
   438  	}
   439  	return sets.List(allMatches)
   440  }
   441  
   442  // checkIPTablesRuleJumps checks that every `-j` in the given rules jumps to a chain
   443  // that we created and added rules to
   444  func checkIPTablesRuleJumps(ruleData string) error {
   445  	tables, err := parseIPTablesData(ruleData)
   446  	if err != nil {
   447  		return err
   448  	}
   449  
   450  	for tableName, lines := range tables {
   451  		// Find all of the lines like ":KUBE-SERVICES", indicating chains that
   452  		// iptables-restore would create when loading the data.
   453  		createdChains := sets.New[string](findAllMatches(lines, `^:([^ ]*)`)...)
   454  		// Find all of the lines like "-X KUBE-SERVICES ..." indicating chains
   455  		// that we are deleting because they are no longer used, and remove
   456  		// those chains from createdChains.
   457  		createdChains = createdChains.Delete(findAllMatches(lines, `-X ([^ ]*)`)...)
   458  
   459  		// Find all of the lines like "-A KUBE-SERVICES ..." indicating chains
   460  		// that we are adding at least one rule to.
   461  		filledChains := sets.New[string](findAllMatches(lines, `-A ([^ ]*)`)...)
   462  
   463  		// Find all of the chains that are jumped to by some rule so we can make
   464  		// sure we only jump to valid chains.
   465  		jumpedChains := sets.New[string](findAllMatches(lines, `-j ([^ ]*)`)...)
   466  		// Ignore jumps to chains that we expect to exist even if kube-proxy
   467  		// didn't create them itself.
   468  		jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
   469  
   470  		// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
   471  		// that we are jumping to a chain that was not created.
   472  		missingChains := jumpedChains.Difference(createdChains)
   473  		missingChains = missingChains.Union(filledChains.Difference(createdChains))
   474  		if len(missingChains) > 0 {
   475  			return fmt.Errorf("some chains in %s are used but were not created: %v", tableName, missingChains.UnsortedList())
   476  		}
   477  
   478  		// Find cases where we have "-A FOO ... -j BAR", but no "-A BAR ...",
   479  		// meaning that we are jumping to a chain that we didn't write out any
   480  		// rules for, which is normally a bug. (Except that KUBE-SERVICES always
   481  		// jumps to KUBE-NODEPORTS, even when there are no NodePort rules.)
   482  		emptyChains := jumpedChains.Difference(filledChains)
   483  		emptyChains.Delete(string(kubeNodePortsChain))
   484  		if len(emptyChains) > 0 {
   485  			return fmt.Errorf("some chains in %s are jumped to but have no rules: %v", tableName, emptyChains.UnsortedList())
   486  		}
   487  
   488  		// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
   489  		// that we are creating an empty chain but not using it for anything.
   490  		extraChains := createdChains.Difference(jumpedChains)
   491  		extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(kubeMarkMasqChain), string(kubeProxyFirewallChain), string(kubeletFirewallChain))
   492  		if len(extraChains) > 0 {
   493  			return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.UnsortedList())
   494  		}
   495  	}
   496  
   497  	return nil
   498  }
   499  
   500  func TestCheckIPTablesRuleJumps(t *testing.T) {
   501  	for _, tc := range []struct {
   502  		name  string
   503  		input string
   504  		error string
   505  	}{
   506  		{
   507  			name: "valid",
   508  			input: dedent.Dedent(`
   509  				*filter
   510  				COMMIT
   511  				*nat
   512  				:KUBE-MARK-MASQ - [0:0]
   513  				:KUBE-SERVICES - [0:0]
   514  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   515  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   516  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   517  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
   518  				COMMIT
   519  				`),
   520  			error: "",
   521  		},
   522  		{
   523  			name: "can't jump to chain that wasn't created",
   524  			input: dedent.Dedent(`
   525  				*filter
   526  				COMMIT
   527  				*nat
   528  				:KUBE-SERVICES - [0:0]
   529  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   530  				COMMIT
   531  				`),
   532  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   533  		},
   534  		{
   535  			name: "can't jump to chain that has no rules",
   536  			input: dedent.Dedent(`
   537  				*filter
   538  				COMMIT
   539  				*nat
   540  				:KUBE-SERVICES - [0:0]
   541  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   542  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   543  				COMMIT
   544  				`),
   545  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   546  		},
   547  		{
   548  			name: "can't add rules to a chain that wasn't created",
   549  			input: dedent.Dedent(`
   550  				*filter
   551  				COMMIT
   552  				*nat
   553  				:KUBE-MARK-MASQ - [0:0]
   554  				:KUBE-SERVICES - [0:0]
   555  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   556  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   557  				COMMIT
   558  				`),
   559  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   560  		},
   561  		{
   562  			name: "can't jump to chain that wasn't created",
   563  			input: dedent.Dedent(`
   564  				*filter
   565  				COMMIT
   566  				*nat
   567  				:KUBE-SERVICES - [0:0]
   568  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   569  				COMMIT
   570  				`),
   571  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   572  		},
   573  		{
   574  			name: "can't jump to chain that has no rules",
   575  			input: dedent.Dedent(`
   576  				*filter
   577  				COMMIT
   578  				*nat
   579  				:KUBE-SERVICES - [0:0]
   580  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   581  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   582  				COMMIT
   583  				`),
   584  			error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   585  		},
   586  		{
   587  			name: "can't add rules to a chain that wasn't created",
   588  			input: dedent.Dedent(`
   589  				*filter
   590  				COMMIT
   591  				*nat
   592  				:KUBE-MARK-MASQ - [0:0]
   593  				:KUBE-SERVICES - [0:0]
   594  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   595  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   596  				COMMIT
   597  				`),
   598  			error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   599  		},
   600  		{
   601  			name: "can't create chain and then not use it",
   602  			input: dedent.Dedent(`
   603  				*filter
   604  				COMMIT
   605  				*nat
   606  				:KUBE-MARK-MASQ - [0:0]
   607  				:KUBE-SERVICES - [0:0]
   608  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   609  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
   610  				COMMIT
   611  				`),
   612  			error: "some chains in nat are created but not used: [KUBE-SVC-XPGD46QRK7WJZT7O]",
   613  		},
   614  	} {
   615  		t.Run(tc.name, func(t *testing.T) {
   616  			err := checkIPTablesRuleJumps(tc.input)
   617  			if err == nil {
   618  				if tc.error != "" {
   619  					t.Errorf("unexpectedly did not get error")
   620  				}
   621  			} else {
   622  				if tc.error == "" {
   623  					t.Errorf("got unexpected error: %v", err)
   624  				} else if !strings.HasPrefix(err.Error(), tc.error) {
   625  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
   626  				}
   627  			}
   628  		})
   629  	}
   630  }
   631  
   632  // orderByCommentServiceName is a helper function that orders two IPTables rules
   633  // based on the service name in their comment. (If either rule has no comment then the
   634  // return value is undefined.)
   635  func orderByCommentServiceName(rule1, rule2 *iptablestest.Rule) bool {
   636  	if rule1.Comment == nil || rule2.Comment == nil {
   637  		return false
   638  	}
   639  	name1, name2 := rule1.Comment.Value, rule2.Comment.Value
   640  
   641  	// The service name is the comment up to the first space or colon
   642  	i := strings.IndexAny(name1, " :")
   643  	if i != -1 {
   644  		name1 = name1[:i]
   645  	}
   646  	i = strings.IndexAny(name2, " :")
   647  	if i != -1 {
   648  		name2 = name2[:i]
   649  	}
   650  
   651  	return name1 < name2
   652  }
   653  
   654  // sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
   655  // Services get processed in, while preserving the relative ordering of related rules.
   656  func sortIPTablesRules(ruleData string) (string, error) {
   657  	dump, err := iptablestest.ParseIPTablesDump(ruleData)
   658  	if err != nil {
   659  		return "", err
   660  	}
   661  
   662  	// Sort tables
   663  	sort.Slice(dump.Tables, func(i, j int) bool {
   664  		return dump.Tables[i].Name < dump.Tables[j].Name
   665  	})
   666  
   667  	// Sort chains
   668  	for t := range dump.Tables {
   669  		table := &dump.Tables[t]
   670  		sort.Slice(table.Chains, func(i, j int) bool {
   671  			switch {
   672  			case table.Chains[i].Name == kubeNodePortsChain:
   673  				// KUBE-NODEPORTS comes before anything
   674  				return true
   675  			case table.Chains[j].Name == kubeNodePortsChain:
   676  				// anything goes after KUBE-NODEPORTS
   677  				return false
   678  			case table.Chains[i].Name == kubeServicesChain:
   679  				// KUBE-SERVICES comes before anything (except KUBE-NODEPORTS)
   680  				return true
   681  			case table.Chains[j].Name == kubeServicesChain:
   682  				// anything (except KUBE-NODEPORTS) goes after KUBE-SERVICES
   683  				return false
   684  			case strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && !strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   685  				// KUBE-* comes before non-KUBE-*
   686  				return true
   687  			case !strings.HasPrefix(string(table.Chains[i].Name), "KUBE-") && strings.HasPrefix(string(table.Chains[j].Name), "KUBE-"):
   688  				// non-KUBE-* goes after KUBE-*
   689  				return false
   690  			default:
   691  				// We have two KUBE-* chains or two non-KUBE-* chains; either
   692  				// way they sort alphabetically
   693  				return table.Chains[i].Name < table.Chains[j].Name
   694  			}
   695  		})
   696  	}
   697  
   698  	// Sort KUBE-NODEPORTS chains by service name
   699  	chain, _ := dump.GetChain(utiliptables.TableFilter, kubeNodePortsChain)
   700  	if chain != nil {
   701  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   702  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   703  		})
   704  	}
   705  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeNodePortsChain)
   706  	if chain != nil {
   707  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   708  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   709  		})
   710  	}
   711  
   712  	// Sort KUBE-SERVICES chains by service name (but keeping the "must be the last
   713  	// rule" rule in the "nat" table's KUBE-SERVICES chain last).
   714  	chain, _ = dump.GetChain(utiliptables.TableFilter, kubeServicesChain)
   715  	if chain != nil {
   716  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   717  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   718  		})
   719  	}
   720  	chain, _ = dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
   721  	if chain != nil {
   722  		sort.SliceStable(chain.Rules, func(i, j int) bool {
   723  			if chain.Rules[i].Comment != nil && strings.Contains(chain.Rules[i].Comment.Value, "must be the last rule") {
   724  				return false
   725  			} else if chain.Rules[j].Comment != nil && strings.Contains(chain.Rules[j].Comment.Value, "must be the last rule") {
   726  				return true
   727  			}
   728  			return orderByCommentServiceName(chain.Rules[i], chain.Rules[j])
   729  		})
   730  	}
   731  
   732  	return dump.String(), nil
   733  }
   734  
   735  func TestSortIPTablesRules(t *testing.T) {
   736  	for _, tc := range []struct {
   737  		name   string
   738  		input  string
   739  		output string
   740  		error  string
   741  	}{
   742  		{
   743  			name: "basic test using each match type",
   744  			input: dedent.Dedent(`
   745  				*filter
   746  				:KUBE-SERVICES - [0:0]
   747  				:KUBE-EXTERNAL-SERVICES - [0:0]
   748  				:KUBE-FIREWALL - [0:0]
   749  				:KUBE-FORWARD - [0:0]
   750  				:KUBE-NODEPORTS - [0:0]
   751  				:KUBE-PROXY-FIREWALL - [0:0]
   752  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   753  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
   754  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
   755  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
   756  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
   757  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   758  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   759  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   760  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
   761  				COMMIT
   762  				*nat
   763  				:KUBE-SERVICES - [0:0]
   764  				:KUBE-NODEPORTS - [0:0]
   765  				:KUBE-POSTROUTING - [0:0]
   766  				:KUBE-MARK-MASQ - [0:0]
   767  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   768  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   769  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
   770  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
   771  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
   772  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
   773  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
   774  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
   775  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
   776  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
   777  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
   778  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
   779  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   780  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   781  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   782  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   783  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   784  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   785  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   786  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   787  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   788  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   789  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   790  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
   791  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   792  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
   793  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
   794  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
   795  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   796  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
   797  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   798  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   799  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
   800  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   801  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
   802  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
   803  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
   804  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   805  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
   806  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
   807  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
   808  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
   809  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
   810  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   811  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   812  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   813  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   814  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
   815  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
   816  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
   817  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
   818  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
   819  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
   820  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   821  				COMMIT
   822  				`),
   823  			output: dedent.Dedent(`
   824  				*filter
   825  				:KUBE-NODEPORTS - [0:0]
   826  				:KUBE-SERVICES - [0:0]
   827  				:KUBE-EXTERNAL-SERVICES - [0:0]
   828  				:KUBE-FIREWALL - [0:0]
   829  				:KUBE-FORWARD - [0:0]
   830  				:KUBE-PROXY-FIREWALL - [0:0]
   831  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   832  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
   833  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
   834  				-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
   835  				-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
   836  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   837  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   838  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   839  				-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
   840  				COMMIT
   841  				*nat
   842  				:KUBE-NODEPORTS - [0:0]
   843  				:KUBE-SERVICES - [0:0]
   844  				:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
   845  				:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
   846  				:KUBE-MARK-MASQ - [0:0]
   847  				:KUBE-POSTROUTING - [0:0]
   848  				:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
   849  				:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
   850  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
   851  				:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
   852  				:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
   853  				:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
   854  				:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
   855  				:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
   856  				:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
   857  				:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
   858  				-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   859  				-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
   860  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
   861  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   862  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   863  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
   864  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
   865  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   866  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
   867  				-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
   868  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   869  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
   870  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
   871  				-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
   872  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
   873  				-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "other traffic to s2/svc2:p80 will be dropped by KUBE-PROXY-FIREWALL"
   874  				-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
   875  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
   876  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
   877  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
   878  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
   879  				-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
   880  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
   881  				-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
   882  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
   883  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
   884  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
   885  				-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
   886  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
   887  				-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
   888  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   889  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   890  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
   891  				-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
   892  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   893  				-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
   894  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   895  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
   896  				-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
   897  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
   898  				-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
   899  				-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
   900  				COMMIT
   901  				`),
   902  		},
   903  		{
   904  			name: "extra tables",
   905  			input: dedent.Dedent(`
   906  				*filter
   907  				:KUBE-SERVICES - [0:0]
   908  				:KUBE-EXTERNAL-SERVICES - [0:0]
   909  				:KUBE-FORWARD - [0:0]
   910  				:KUBE-NODEPORTS - [0:0]
   911  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   912  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   913  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   914  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   915  				COMMIT
   916  				*nat
   917  				:KUBE-SERVICES - [0:0]
   918  				:KUBE-EXTERNAL-SERVICES - [0:0]
   919  				:KUBE-FORWARD - [0:0]
   920  				:KUBE-NODEPORTS - [0:0]
   921  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   922  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   923  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   924  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   925  				COMMIT
   926  				*mangle
   927  				:KUBE-SERVICES - [0:0]
   928  				:KUBE-EXTERNAL-SERVICES - [0:0]
   929  				:KUBE-FORWARD - [0:0]
   930  				:KUBE-NODEPORTS - [0:0]
   931  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   932  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   933  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   934  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   935  				COMMIT
   936  				`),
   937  			output: dedent.Dedent(`
   938  				*filter
   939  				:KUBE-NODEPORTS - [0:0]
   940  				:KUBE-SERVICES - [0:0]
   941  				:KUBE-EXTERNAL-SERVICES - [0:0]
   942  				:KUBE-FORWARD - [0:0]
   943  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   944  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   945  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   946  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   947  				COMMIT
   948  				*mangle
   949  				:KUBE-NODEPORTS - [0:0]
   950  				:KUBE-SERVICES - [0:0]
   951  				:KUBE-EXTERNAL-SERVICES - [0:0]
   952  				:KUBE-FORWARD - [0:0]
   953  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   954  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   955  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   956  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   957  				COMMIT
   958  				*nat
   959  				:KUBE-NODEPORTS - [0:0]
   960  				:KUBE-SERVICES - [0:0]
   961  				:KUBE-EXTERNAL-SERVICES - [0:0]
   962  				:KUBE-FORWARD - [0:0]
   963  				-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
   964  				-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
   965  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
   966  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
   967  				COMMIT
   968  				`),
   969  		},
   970  		{
   971  			name: "correctly match same service name in different styles of comments",
   972  			input: dedent.Dedent(`
   973  				*filter
   974  				COMMIT
   975  				*nat
   976  				:KUBE-SERVICES - [0:0]
   977  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
   978  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
   979  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
   980  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
   981  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
   982  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
   983  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
   984  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
   985  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
   986  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
   987  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
   988  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
   989  				COMMIT
   990  				`),
   991  			output: dedent.Dedent(`
   992  				*filter
   993  				COMMIT
   994  				*nat
   995  				:KUBE-SERVICES - [0:0]
   996  				-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
   997  				-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
   998  				-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
   999  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1000  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1001  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1002  				-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
  1003  				-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
  1004  				-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
  1005  				-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
  1006  				-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
  1007  				-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
  1008  				COMMIT
  1009  				`),
  1010  		},
  1011  		{
  1012  			name: "unexpected junk lines are preserved",
  1013  			input: dedent.Dedent(`
  1014  				*filter
  1015  				COMMIT
  1016  				*nat
  1017  				:KUBE-SERVICES - [0:0]
  1018  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1019  				:KUBE-AAAAA - [0:0]
  1020  				:KUBE-ZZZZZ - [0:0]
  1021  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1022  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1023  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1024  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1025  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1026  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1027  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1028  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1029  				COMMIT
  1030  				`),
  1031  			output: dedent.Dedent(`
  1032  				*filter
  1033  				COMMIT
  1034  				*nat
  1035  				:KUBE-SERVICES - [0:0]
  1036  				:KUBE-AAAAA - [0:0]
  1037  				:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1038  				:KUBE-ZZZZZ - [0:0]
  1039  				:WHY-IS-THIS-CHAIN-HERE - [0:0]
  1040  				-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
  1041  				-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
  1042  				-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
  1043  				-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
  1044  				-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1045  				-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
  1046  				-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
  1047  				COMMIT
  1048  				`),
  1049  		},
  1050  	} {
  1051  		t.Run(tc.name, func(t *testing.T) {
  1052  			out, err := sortIPTablesRules(tc.input)
  1053  			if err == nil {
  1054  				if tc.error != "" {
  1055  					t.Errorf("unexpectedly did not get error")
  1056  				} else {
  1057  					assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
  1058  				}
  1059  			} else {
  1060  				if tc.error == "" {
  1061  					t.Errorf("got unexpected error: %v", err)
  1062  				} else if !strings.HasPrefix(err.Error(), tc.error) {
  1063  					t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
  1064  				}
  1065  			}
  1066  		})
  1067  	}
  1068  }
  1069  
  1070  // getLine returns the line number of the caller, if possible.  This is useful in
  1071  // tests with a large number of cases - when something goes wrong you can find
  1072  // which case more easily.
  1073  func getLine() int {
  1074  	_, _, line, ok := stdruntime.Caller(1)
  1075  	if ok {
  1076  		return line
  1077  	}
  1078  	return 0
  1079  }
  1080  
  1081  // assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
  1082  // expected, ignoring irrelevant ordering differences. By default this also checks the
  1083  // rules for consistency (eg, no jumps to chains that aren't defined), but that can be
  1084  // disabled by passing false for checkConsistency if you are passing a partial set of rules.
  1085  func assertIPTablesRulesEqual(t *testing.T, line int, checkConsistency bool, expected, result string) {
  1086  	expected = strings.TrimLeft(expected, " \t\n")
  1087  
  1088  	result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n"))
  1089  	if err != nil {
  1090  		t.Fatalf("%s", err)
  1091  	}
  1092  
  1093  	lineStr := ""
  1094  	if line != 0 {
  1095  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1096  	}
  1097  	if diff := cmp.Diff(expected, result); diff != "" {
  1098  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1099  	}
  1100  
  1101  	if checkConsistency {
  1102  		err = checkIPTablesRuleJumps(expected)
  1103  		if err != nil {
  1104  			t.Fatalf("%s%s", err, lineStr)
  1105  		}
  1106  	}
  1107  }
  1108  
  1109  // assertIPTablesChainEqual asserts that the indicated chain in the indicated table in
  1110  // result contains exactly the rules in expected (in that order).
  1111  func assertIPTablesChainEqual(t *testing.T, line int, table utiliptables.Table, chain utiliptables.Chain, expected, result string) {
  1112  	expected = strings.TrimLeft(expected, " \t\n")
  1113  
  1114  	dump, err := iptablestest.ParseIPTablesDump(strings.TrimLeft(result, " \t\n"))
  1115  	if err != nil {
  1116  		t.Fatalf("%s", err)
  1117  	}
  1118  
  1119  	result = ""
  1120  	if ch, _ := dump.GetChain(table, chain); ch != nil {
  1121  		for _, rule := range ch.Rules {
  1122  			result += rule.Raw + "\n"
  1123  		}
  1124  	}
  1125  
  1126  	lineStr := ""
  1127  	if line != 0 {
  1128  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1129  	}
  1130  	if diff := cmp.Diff(expected, result); diff != "" {
  1131  		t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
  1132  	}
  1133  }
  1134  
  1135  // addressMatches helps test whether an iptables rule such as "! -s 192.168.0.0/16" matches
  1136  // ipStr. address.Value is either an IP address ("1.2.3.4") or a CIDR string
  1137  // ("1.2.3.0/24").
  1138  func addressMatches(t *testing.T, address *iptablestest.IPTablesValue, ipStr string) bool {
  1139  	ip := netutils.ParseIPSloppy(ipStr)
  1140  	if ip == nil {
  1141  		t.Fatalf("Bad IP in test case: %s", ipStr)
  1142  	}
  1143  
  1144  	var matches bool
  1145  	if strings.Contains(address.Value, "/") {
  1146  		_, cidr, err := netutils.ParseCIDRSloppy(address.Value)
  1147  		if err != nil {
  1148  			t.Errorf("Bad CIDR in kube-proxy output: %v", err)
  1149  		}
  1150  		matches = cidr.Contains(ip)
  1151  	} else {
  1152  		ip2 := netutils.ParseIPSloppy(address.Value)
  1153  		if ip2 == nil {
  1154  			t.Errorf("Bad IP/CIDR in kube-proxy output: %s", address.Value)
  1155  		}
  1156  		matches = ip.Equal(ip2)
  1157  	}
  1158  	return (!address.Negated && matches) || (address.Negated && !matches)
  1159  }
  1160  
  1161  // iptablesTracer holds data used while virtually tracing a packet through a set of
  1162  // iptables rules
  1163  type iptablesTracer struct {
  1164  	ipt      *iptablestest.FakeIPTables
  1165  	localIPs sets.Set[string]
  1166  	t        *testing.T
  1167  
  1168  	// matches accumulates the list of rules that were matched, for debugging purposes.
  1169  	matches []string
  1170  
  1171  	// outputs accumulates the list of matched terminal rule targets (endpoint
  1172  	// IP:ports, or a special target like "REJECT") and is eventually used to generate
  1173  	// the return value of tracePacket.
  1174  	outputs []string
  1175  
  1176  	// markMasq tracks whether the packet has been marked for masquerading
  1177  	markMasq bool
  1178  }
  1179  
  1180  // newIPTablesTracer creates an iptablesTracer. nodeIPs are the IPs to treat as local
  1181  // node IPs (for determining whether rules with "--src-type LOCAL" or "--dst-type LOCAL"
  1182  // match).
  1183  func newIPTablesTracer(t *testing.T, ipt *iptablestest.FakeIPTables, nodeIPs []string) *iptablesTracer {
  1184  	localIPs := sets.New("127.0.0.1", "::1")
  1185  	localIPs.Insert(nodeIPs...)
  1186  
  1187  	return &iptablesTracer{
  1188  		ipt:      ipt,
  1189  		localIPs: localIPs,
  1190  		t:        t,
  1191  	}
  1192  }
  1193  
  1194  // ruleMatches checks if the given iptables rule matches (at least probabilistically) a
  1195  // packet with the given sourceIP, destIP, and destPort.
  1196  func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, protocol, destIP, destPort string) bool {
  1197  	// The sub-rules within an iptables rule are ANDed together, so the rule only
  1198  	// matches if all of them match. So go through the subrules, and if any of them
  1199  	// DON'T match, then fail.
  1200  
  1201  	if rule.SourceAddress != nil && !addressMatches(tracer.t, rule.SourceAddress, sourceIP) {
  1202  		return false
  1203  	}
  1204  	if rule.SourceType != nil {
  1205  		addrtype := "not-matched"
  1206  		if tracer.localIPs.Has(sourceIP) {
  1207  			addrtype = "LOCAL"
  1208  		}
  1209  		if !rule.SourceType.Matches(addrtype) {
  1210  			return false
  1211  		}
  1212  	}
  1213  
  1214  	if rule.Protocol != nil && !rule.Protocol.Matches(protocol) {
  1215  		return false
  1216  	}
  1217  
  1218  	if rule.DestinationAddress != nil && !addressMatches(tracer.t, rule.DestinationAddress, destIP) {
  1219  		return false
  1220  	}
  1221  	if rule.DestinationType != nil {
  1222  		addrtype := "not-matched"
  1223  		if tracer.localIPs.Has(destIP) {
  1224  			addrtype = "LOCAL"
  1225  		}
  1226  		if !rule.DestinationType.Matches(addrtype) {
  1227  			return false
  1228  		}
  1229  	}
  1230  	if rule.DestinationPort != nil && !rule.DestinationPort.Matches(destPort) {
  1231  		return false
  1232  	}
  1233  
  1234  	// Any rule that checks for past state/history does not match
  1235  	if rule.AffinityCheck != nil || rule.MarkCheck != nil || rule.CTStateCheck != nil {
  1236  		return false
  1237  	}
  1238  
  1239  	// Anything else is assumed to match
  1240  	return true
  1241  }
  1242  
  1243  // runChain runs the given packet through the rules in the given table and chain, updating
  1244  // tracer's internal state accordingly. It returns true if it hits a terminal action.
  1245  func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptables.Chain, sourceIP, protocol, destIP, destPort string) bool {
  1246  	c, _ := tracer.ipt.Dump.GetChain(table, chain)
  1247  	if c == nil {
  1248  		return false
  1249  	}
  1250  
  1251  	for _, rule := range c.Rules {
  1252  		if rule.Jump == nil {
  1253  			continue
  1254  		}
  1255  
  1256  		if !tracer.ruleMatches(rule, sourceIP, protocol, destIP, destPort) {
  1257  			continue
  1258  		}
  1259  		// record the matched rule for debugging purposes
  1260  		tracer.matches = append(tracer.matches, rule.Raw)
  1261  
  1262  		switch rule.Jump.Value {
  1263  		case "KUBE-MARK-MASQ":
  1264  			tracer.markMasq = true
  1265  			continue
  1266  
  1267  		case "ACCEPT", "REJECT", "DROP":
  1268  			// (only valid in filter)
  1269  			tracer.outputs = append(tracer.outputs, rule.Jump.Value)
  1270  			return true
  1271  
  1272  		case "DNAT":
  1273  			// (only valid in nat)
  1274  			tracer.outputs = append(tracer.outputs, rule.DNATDestination.Value)
  1275  			return true
  1276  
  1277  		default:
  1278  			// We got a "-j KUBE-SOMETHING", so process that chain
  1279  			terminated := tracer.runChain(table, utiliptables.Chain(rule.Jump.Value), sourceIP, protocol, destIP, destPort)
  1280  
  1281  			// If the subchain hit a terminal rule AND the rule that sent us
  1282  			// to that chain was non-probabilistic, then this chain terminates
  1283  			// as well. But if we went there because of a --probability rule,
  1284  			// then we want to keep accumulating further matches against this
  1285  			// chain.
  1286  			if terminated && rule.Probability == nil {
  1287  				return true
  1288  			}
  1289  		}
  1290  	}
  1291  
  1292  	return false
  1293  }
  1294  
  1295  // tracePacket determines what would happen to a packet with the given sourceIP, protocol,
  1296  // destIP, and destPort, given the indicated iptables ruleData. nodeIP is the local node
  1297  // IP (for rules matching "LOCAL"). (The protocol value should be lowercase as in iptables
  1298  // rules, not uppercase as in corev1.)
  1299  //
  1300  // The return values are: an array of matched rules (for debugging), the final packet
  1301  // destinations (a comma-separated list of IPs, or one of the special targets "ACCEPT",
  1302  // "DROP", or "REJECT"), and whether the packet would be masqueraded.
  1303  func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, protocol, destIP, destPort string, nodeIPs []string) ([]string, string, bool) {
  1304  	tracer := newIPTablesTracer(t, ipt, nodeIPs)
  1305  
  1306  	// nat:PREROUTING goes first
  1307  	tracer.runChain(utiliptables.TableNAT, utiliptables.ChainPrerouting, sourceIP, protocol, destIP, destPort)
  1308  
  1309  	// After the PREROUTING rules run, pending DNATs are processed (which would affect
  1310  	// the destination IP that later rules match against).
  1311  	if len(tracer.outputs) != 0 {
  1312  		destIP = strings.Split(tracer.outputs[0], ":")[0]
  1313  	}
  1314  
  1315  	// Now the filter rules get run; exactly which ones depend on whether this is an
  1316  	// inbound, outbound, or intra-host packet, which we don't know. So we just run
  1317  	// the interesting tables manually. (Theoretically this could cause conflicts in
  1318  	// the future in which case we'd have to do something more complicated.)
  1319  	tracer.runChain(utiliptables.TableFilter, kubeServicesChain, sourceIP, protocol, destIP, destPort)
  1320  	tracer.runChain(utiliptables.TableFilter, kubeExternalServicesChain, sourceIP, protocol, destIP, destPort)
  1321  	tracer.runChain(utiliptables.TableFilter, kubeNodePortsChain, sourceIP, protocol, destIP, destPort)
  1322  	tracer.runChain(utiliptables.TableFilter, kubeProxyFirewallChain, sourceIP, protocol, destIP, destPort)
  1323  
  1324  	// Finally, the nat:POSTROUTING rules run, but the only interesting thing that
  1325  	// happens there is that the masquerade mark gets turned into actual masquerading.
  1326  
  1327  	return tracer.matches, strings.Join(tracer.outputs, ", "), tracer.markMasq
  1328  }
  1329  
  1330  type packetFlowTest struct {
  1331  	name     string
  1332  	sourceIP string
  1333  	protocol v1.Protocol
  1334  	destIP   string
  1335  	destPort int
  1336  	output   string
  1337  	masq     bool
  1338  }
  1339  
  1340  func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, nodeIPs []string, testCases []packetFlowTest) {
  1341  	lineStr := ""
  1342  	if line != 0 {
  1343  		lineStr = fmt.Sprintf(" (from line %d)", line)
  1344  	}
  1345  	for _, tc := range testCases {
  1346  		t.Run(tc.name, func(t *testing.T) {
  1347  			protocol := strings.ToLower(string(tc.protocol))
  1348  			if protocol == "" {
  1349  				protocol = "tcp"
  1350  			}
  1351  			matches, output, masq := tracePacket(t, ipt, tc.sourceIP, protocol, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIPs)
  1352  			var errors []string
  1353  			if output != tc.output {
  1354  				errors = append(errors, fmt.Sprintf("wrong output: expected %q got %q", tc.output, output))
  1355  			}
  1356  			if masq != tc.masq {
  1357  				errors = append(errors, fmt.Sprintf("wrong masq: expected %v got %v", tc.masq, masq))
  1358  			}
  1359  			if errors != nil {
  1360  				t.Errorf("Test %q of a %s packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n",
  1361  					tc.name, protocol, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n"))
  1362  			}
  1363  		})
  1364  	}
  1365  }
  1366  
  1367  // This tests tracePacket against static data, just to make sure we match things in the
  1368  // way we expect to.
  1369  func TestTracePacket(t *testing.T) {
  1370  	rules := dedent.Dedent(`
  1371  		*filter
  1372  		:INPUT - [0:0]
  1373  		:FORWARD - [0:0]
  1374  		:OUTPUT - [0:0]
  1375  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1376  		:KUBE-FIREWALL - [0:0]
  1377  		:KUBE-FORWARD - [0:0]
  1378  		:KUBE-NODEPORTS - [0:0]
  1379  		:KUBE-SERVICES - [0:0]
  1380  		:KUBE-PROXY-FIREWALL - [0:0]
  1381  		-A INPUT -m comment --comment kubernetes health check service ports -j KUBE-NODEPORTS
  1382  		-A INPUT -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1383  		-A FORWARD -m comment --comment kubernetes forwarding rules -j KUBE-FORWARD
  1384  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1385  		-A FORWARD -m conntrack --ctstate NEW -m comment --comment kubernetes externally-visible service portals -j KUBE-EXTERNAL-SERVICES
  1386  		-A OUTPUT -m conntrack --ctstate NEW -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1387  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1388  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1389  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1390  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1391  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1392  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1393  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1394  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1395  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1396  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1397  		COMMIT
  1398  		*nat
  1399  		:PREROUTING - [0:0]
  1400  		:INPUT - [0:0]
  1401  		:OUTPUT - [0:0]
  1402  		:POSTROUTING - [0:0]
  1403  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1404  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1405  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1406  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1407  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1408  		:KUBE-MARK-MASQ - [0:0]
  1409  		:KUBE-NODEPORTS - [0:0]
  1410  		:KUBE-POSTROUTING - [0:0]
  1411  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1412  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1413  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1414  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1415  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1416  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1417  		:KUBE-SERVICES - [0:0]
  1418  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1419  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1420  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1421  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1422  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1423  		-A PREROUTING -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1424  		-A OUTPUT -m comment --comment kubernetes service portals -j KUBE-SERVICES
  1425  		-A POSTROUTING -m comment --comment kubernetes postrouting rules -j KUBE-POSTROUTING
  1426  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1427  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1428  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1429  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1430  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1431  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1432  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1433  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1434  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1435  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1436  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1437  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1438  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1439  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1440  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1441  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1442  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1443  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1444  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1445  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1446  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1447  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1448  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1449  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1450  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1451  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1452  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1453  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1454  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1455  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1456  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1457  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1458  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1459  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1460  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1461  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1462  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1463  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1464  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1465  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1466  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1467  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1468  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1469  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1470  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1471  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1472  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  1473  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1474  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  1475  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1476  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1477  		COMMIT
  1478  		`)
  1479  
  1480  	ipt := iptablestest.NewFake()
  1481  	err := ipt.RestoreAll([]byte(rules), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
  1482  	if err != nil {
  1483  		t.Fatalf("Restore of test data failed: %v", err)
  1484  	}
  1485  
  1486  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1487  		{
  1488  			name:     "no match",
  1489  			sourceIP: "10.0.0.2",
  1490  			destIP:   "10.0.0.3",
  1491  			destPort: 80,
  1492  			output:   "",
  1493  		},
  1494  		{
  1495  			name:     "single endpoint",
  1496  			sourceIP: "10.0.0.2",
  1497  			destIP:   "172.30.0.41",
  1498  			destPort: 80,
  1499  			output:   "10.180.0.1:80",
  1500  		},
  1501  		{
  1502  			name:     "multiple endpoints",
  1503  			sourceIP: "10.0.0.2",
  1504  			destIP:   "172.30.0.44",
  1505  			destPort: 80,
  1506  			output:   "10.180.0.4:80, 10.180.0.5:80",
  1507  		},
  1508  		{
  1509  			name:     "LOCAL, KUBE-MARK-MASQ",
  1510  			sourceIP: testNodeIP,
  1511  			destIP:   "192.168.99.22",
  1512  			destPort: 80,
  1513  			output:   "10.180.0.2:80",
  1514  			masq:     true,
  1515  		},
  1516  		{
  1517  			name:     "DROP",
  1518  			sourceIP: testExternalClient,
  1519  			destIP:   "192.168.99.22",
  1520  			destPort: 80,
  1521  			output:   "DROP",
  1522  		},
  1523  		{
  1524  			name:     "ACCEPT (NodePortHealthCheck)",
  1525  			sourceIP: testNodeIP,
  1526  			destIP:   testNodeIP,
  1527  			destPort: 30000,
  1528  			output:   "ACCEPT",
  1529  		},
  1530  		{
  1531  			name:     "REJECT",
  1532  			sourceIP: "10.0.0.2",
  1533  			destIP:   "172.30.0.46",
  1534  			destPort: 80,
  1535  			output:   "REJECT",
  1536  		},
  1537  	})
  1538  }
  1539  
  1540  // TestOverallIPTablesRules creates a variety of services and verifies that the generated
  1541  // rules are exactly as expected.
  1542  func TestOverallIPTablesRules(t *testing.T) {
  1543  	logger, _ := klogtesting.NewTestContext(t)
  1544  	ipt := iptablestest.NewFake()
  1545  	fp := NewFakeProxier(ipt)
  1546  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables)
  1547  
  1548  	makeServiceMap(fp,
  1549  		// create ClusterIP service
  1550  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  1551  			svc.Spec.ClusterIP = "172.30.0.41"
  1552  			svc.Spec.Ports = []v1.ServicePort{{
  1553  				Name:     "p80",
  1554  				Port:     80,
  1555  				Protocol: v1.ProtocolTCP,
  1556  			}}
  1557  		}),
  1558  		// create LoadBalancer service with Local traffic policy
  1559  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  1560  			svc.Spec.Type = "LoadBalancer"
  1561  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  1562  			svc.Spec.ClusterIP = "172.30.0.42"
  1563  			svc.Spec.Ports = []v1.ServicePort{{
  1564  				Name:     "p80",
  1565  				Port:     80,
  1566  				Protocol: v1.ProtocolTCP,
  1567  				NodePort: 3001,
  1568  			}}
  1569  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1570  				IP: "1.2.3.4",
  1571  			}}
  1572  			svc.Spec.ExternalIPs = []string{"192.168.99.22"}
  1573  			svc.Spec.HealthCheckNodePort = 30000
  1574  		}),
  1575  		// create NodePort service
  1576  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  1577  			svc.Spec.Type = "NodePort"
  1578  			svc.Spec.ClusterIP = "172.30.0.43"
  1579  			svc.Spec.Ports = []v1.ServicePort{{
  1580  				Name:     "p80",
  1581  				Port:     80,
  1582  				Protocol: v1.ProtocolTCP,
  1583  				NodePort: 3003,
  1584  			}}
  1585  		}),
  1586  		// create ExternalIP service
  1587  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  1588  			svc.Spec.Type = "NodePort"
  1589  			svc.Spec.ClusterIP = "172.30.0.44"
  1590  			svc.Spec.ExternalIPs = []string{"192.168.99.33"}
  1591  			svc.Spec.Ports = []v1.ServicePort{{
  1592  				Name:       "p80",
  1593  				Port:       80,
  1594  				Protocol:   v1.ProtocolTCP,
  1595  				TargetPort: intstr.FromInt32(80),
  1596  			}}
  1597  		}),
  1598  		// create LoadBalancer service with Cluster traffic policy, source ranges,
  1599  		// and session affinity
  1600  		makeTestService("ns5", "svc5", func(svc *v1.Service) {
  1601  			svc.Spec.Type = "LoadBalancer"
  1602  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  1603  			svc.Spec.ClusterIP = "172.30.0.45"
  1604  			svc.Spec.Ports = []v1.ServicePort{{
  1605  				Name:     "p80",
  1606  				Port:     80,
  1607  				Protocol: v1.ProtocolTCP,
  1608  				NodePort: 3002,
  1609  			}}
  1610  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1611  				IP: "5.6.7.8",
  1612  			}}
  1613  			svc.Spec.HealthCheckNodePort = 30000
  1614  			// Extra whitespace to ensure that invalid value will not result
  1615  			// in a crash, for backward compatibility.
  1616  			svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
  1617  
  1618  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  1619  			svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
  1620  				ClientIP: &v1.ClientIPConfig{
  1621  					TimeoutSeconds: ptr.To[int32](10800),
  1622  				},
  1623  			}
  1624  		}),
  1625  		// create ClusterIP service with no endpoints
  1626  		makeTestService("ns6", "svc6", func(svc *v1.Service) {
  1627  			svc.Spec.Type = "ClusterIP"
  1628  			svc.Spec.ClusterIP = "172.30.0.46"
  1629  			svc.Spec.Ports = []v1.ServicePort{{
  1630  				Name:       "p80",
  1631  				Port:       80,
  1632  				Protocol:   v1.ProtocolTCP,
  1633  				TargetPort: intstr.FromInt32(80),
  1634  			}}
  1635  		}),
  1636  	)
  1637  	populateEndpointSlices(fp,
  1638  		// create ClusterIP service endpoints
  1639  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  1640  			eps.AddressType = discovery.AddressTypeIPv4
  1641  			eps.Endpoints = []discovery.Endpoint{{
  1642  				Addresses: []string{"10.180.0.1"},
  1643  			}}
  1644  			eps.Ports = []discovery.EndpointPort{{
  1645  				Name:     ptr.To("p80"),
  1646  				Port:     ptr.To[int32](80),
  1647  				Protocol: ptr.To(v1.ProtocolTCP),
  1648  			}}
  1649  		}),
  1650  		// create Local LoadBalancer endpoints. Note that since we aren't setting
  1651  		// its NodeName, this endpoint will be considered non-local and ignored.
  1652  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  1653  			eps.AddressType = discovery.AddressTypeIPv4
  1654  			eps.Endpoints = []discovery.Endpoint{{
  1655  				Addresses: []string{"10.180.0.2"},
  1656  			}}
  1657  			eps.Ports = []discovery.EndpointPort{{
  1658  				Name:     ptr.To("p80"),
  1659  				Port:     ptr.To[int32](80),
  1660  				Protocol: ptr.To(v1.ProtocolTCP),
  1661  			}}
  1662  		}),
  1663  		// create NodePort service endpoints
  1664  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  1665  			eps.AddressType = discovery.AddressTypeIPv4
  1666  			eps.Endpoints = []discovery.Endpoint{{
  1667  				Addresses: []string{"10.180.0.3"},
  1668  			}}
  1669  			eps.Ports = []discovery.EndpointPort{{
  1670  				Name:     ptr.To("p80"),
  1671  				Port:     ptr.To[int32](80),
  1672  				Protocol: ptr.To(v1.ProtocolTCP),
  1673  			}}
  1674  		}),
  1675  		// create ExternalIP service endpoints
  1676  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  1677  			eps.AddressType = discovery.AddressTypeIPv4
  1678  			eps.Endpoints = []discovery.Endpoint{{
  1679  				Addresses: []string{"10.180.0.4"},
  1680  			}, {
  1681  				Addresses: []string{"10.180.0.5"},
  1682  				NodeName:  ptr.To(testHostname),
  1683  			}}
  1684  			eps.Ports = []discovery.EndpointPort{{
  1685  				Name:     ptr.To("p80"),
  1686  				Port:     ptr.To[int32](80),
  1687  				Protocol: ptr.To(v1.ProtocolTCP),
  1688  			}}
  1689  		}),
  1690  		// create Cluster LoadBalancer endpoints
  1691  		makeTestEndpointSlice("ns5", "svc5", 1, func(eps *discovery.EndpointSlice) {
  1692  			eps.AddressType = discovery.AddressTypeIPv4
  1693  			eps.Endpoints = []discovery.Endpoint{{
  1694  				Addresses: []string{"10.180.0.3"},
  1695  			}}
  1696  			eps.Ports = []discovery.EndpointPort{{
  1697  				Name:     ptr.To("p80"),
  1698  				Port:     ptr.To[int32](80),
  1699  				Protocol: ptr.To(v1.ProtocolTCP),
  1700  			}}
  1701  		}),
  1702  	)
  1703  
  1704  	fp.syncProxyRules()
  1705  
  1706  	expected := dedent.Dedent(`
  1707  		*filter
  1708  		:KUBE-NODEPORTS - [0:0]
  1709  		:KUBE-SERVICES - [0:0]
  1710  		:KUBE-EXTERNAL-SERVICES - [0:0]
  1711  		:KUBE-FIREWALL - [0:0]
  1712  		:KUBE-FORWARD - [0:0]
  1713  		:KUBE-PROXY-FIREWALL - [0:0]
  1714  		-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
  1715  		-A KUBE-SERVICES -m comment --comment "ns6/svc6:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.46 --dport 80 -j REJECT
  1716  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j DROP
  1717  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP
  1718  		-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns2/svc2:p80 has no local endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j DROP
  1719  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  1720  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  1721  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  1722  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  1723  		-A KUBE-PROXY-FIREWALL -m comment --comment "ns5/svc5:p80 traffic not accepted by KUBE-FW-NUKIZ6OKUXPJNT4C" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP
  1724  		COMMIT
  1725  		*nat
  1726  		:KUBE-NODEPORTS - [0:0]
  1727  		:KUBE-SERVICES - [0:0]
  1728  		:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
  1729  		:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
  1730  		:KUBE-EXT-NUKIZ6OKUXPJNT4C - [0:0]
  1731  		:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
  1732  		:KUBE-FW-NUKIZ6OKUXPJNT4C - [0:0]
  1733  		:KUBE-MARK-MASQ - [0:0]
  1734  		:KUBE-POSTROUTING - [0:0]
  1735  		:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
  1736  		:KUBE-SEP-I77PXRDZVX7PMWMN - [0:0]
  1737  		:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
  1738  		:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
  1739  		:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
  1740  		:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
  1741  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  1742  		:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
  1743  		:KUBE-SVC-NUKIZ6OKUXPJNT4C - [0:0]
  1744  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  1745  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  1746  		-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1747  		-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
  1748  		-A KUBE-NODEPORTS -m comment --comment ns5/svc5:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1749  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  1750  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1751  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1752  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
  1753  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  1754  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  1755  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
  1756  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1757  		-A KUBE-SERVICES -m comment --comment "ns5/svc5:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-NUKIZ6OKUXPJNT4C
  1758  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  1759  		-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
  1760  		-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
  1761  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1762  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
  1763  		-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
  1764  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -m comment --comment "masquerade traffic for ns5/svc5:p80 external destinations" -j KUBE-MARK-MASQ
  1765  		-A KUBE-EXT-NUKIZ6OKUXPJNT4C -j KUBE-SVC-NUKIZ6OKUXPJNT4C
  1766  		-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
  1767  		-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
  1768  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-NUKIZ6OKUXPJNT4C
  1769  		-A KUBE-FW-NUKIZ6OKUXPJNT4C -m comment --comment "other traffic to ns5/svc5:p80 will be dropped by KUBE-PROXY-FIREWALL"
  1770  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  1771  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  1772  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  1773  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  1774  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
  1775  		-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
  1776  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1777  		-A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1778  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
  1779  		-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
  1780  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
  1781  		-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
  1782  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
  1783  		-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
  1784  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
  1785  		-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
  1786  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1787  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
  1788  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
  1789  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1790  		-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
  1791  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1792  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --rcheck --seconds 10800 --reap -j KUBE-SEP-I77PXRDZVX7PMWMN
  1793  		-A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN
  1794  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1795  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
  1796  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  1797  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
  1798  		COMMIT
  1799  		`)
  1800  
  1801  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  1802  
  1803  	nNatRules := countRulesFromMetric(logger, utiliptables.TableNAT)
  1804  	expectedNatRules := countRules(logger, utiliptables.TableNAT, fp.iptablesData.String())
  1805  
  1806  	if nNatRules != expectedNatRules {
  1807  		t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
  1808  	}
  1809  }
  1810  
  1811  // TestNoEndpointsReject tests that a service with no endpoints rejects connections to
  1812  // its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP.
  1813  func TestNoEndpointsReject(t *testing.T) {
  1814  	ipt := iptablestest.NewFake()
  1815  	fp := NewFakeProxier(ipt)
  1816  	svcIP := "172.30.0.41"
  1817  	svcPort := 80
  1818  	svcNodePort := 3001
  1819  	svcExternalIPs := "192.168.99.11"
  1820  	svcLBIP := "1.2.3.4"
  1821  	svcPortName := proxy.ServicePortName{
  1822  		NamespacedName: makeNSN("ns1", "svc1"),
  1823  		Port:           "p80",
  1824  	}
  1825  
  1826  	makeServiceMap(fp,
  1827  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  1828  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1829  			svc.Spec.ClusterIP = svcIP
  1830  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  1831  			svc.Spec.Ports = []v1.ServicePort{{
  1832  				Name:     svcPortName.Port,
  1833  				Protocol: v1.ProtocolTCP,
  1834  				Port:     int32(svcPort),
  1835  				NodePort: int32(svcNodePort),
  1836  			}}
  1837  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  1838  				IP: svcLBIP,
  1839  			}}
  1840  		}),
  1841  	)
  1842  	fp.syncProxyRules()
  1843  
  1844  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1845  		{
  1846  			name:     "pod to cluster IP with no endpoints",
  1847  			sourceIP: "10.0.0.2",
  1848  			destIP:   svcIP,
  1849  			destPort: svcPort,
  1850  			output:   "REJECT",
  1851  		},
  1852  		{
  1853  			name:     "external to external IP with no endpoints",
  1854  			sourceIP: testExternalClient,
  1855  			destIP:   svcExternalIPs,
  1856  			destPort: svcPort,
  1857  			output:   "REJECT",
  1858  		},
  1859  		{
  1860  			name:     "pod to NodePort with no endpoints",
  1861  			sourceIP: "10.0.0.2",
  1862  			destIP:   testNodeIP,
  1863  			destPort: svcNodePort,
  1864  			output:   "REJECT",
  1865  		},
  1866  		{
  1867  			name:     "external to NodePort with no endpoints",
  1868  			sourceIP: testExternalClient,
  1869  			destIP:   testNodeIP,
  1870  			destPort: svcNodePort,
  1871  			output:   "REJECT",
  1872  		},
  1873  		{
  1874  			name:     "pod to LoadBalancer IP with no endpoints",
  1875  			sourceIP: "10.0.0.2",
  1876  			destIP:   svcLBIP,
  1877  			destPort: svcPort,
  1878  			output:   "REJECT",
  1879  		},
  1880  		{
  1881  			name:     "external to LoadBalancer IP with no endpoints",
  1882  			sourceIP: testExternalClient,
  1883  			destIP:   svcLBIP,
  1884  			destPort: svcPort,
  1885  			output:   "REJECT",
  1886  		},
  1887  	})
  1888  }
  1889  
  1890  // TestClusterIPGeneral tests various basic features of a ClusterIP service
  1891  func TestClusterIPGeneral(t *testing.T) {
  1892  	ipt := iptablestest.NewFake()
  1893  	fp := NewFakeProxier(ipt)
  1894  
  1895  	makeServiceMap(fp,
  1896  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  1897  			svc.Spec.ClusterIP = "172.30.0.41"
  1898  			svc.Spec.Ports = []v1.ServicePort{{
  1899  				Name:     "http",
  1900  				Port:     80,
  1901  				Protocol: v1.ProtocolTCP,
  1902  			}}
  1903  		}),
  1904  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  1905  			svc.Spec.ClusterIP = "172.30.0.42"
  1906  			svc.Spec.Ports = []v1.ServicePort{
  1907  				{
  1908  					Name:     "http",
  1909  					Port:     80,
  1910  					Protocol: v1.ProtocolTCP,
  1911  				},
  1912  				{
  1913  					Name:       "https",
  1914  					Port:       443,
  1915  					Protocol:   v1.ProtocolTCP,
  1916  					TargetPort: intstr.FromInt32(8443),
  1917  				},
  1918  				{
  1919  					Name:     "dns-udp",
  1920  					Port:     53,
  1921  					Protocol: v1.ProtocolUDP,
  1922  				},
  1923  				{
  1924  					Name:     "dns-tcp",
  1925  					Port:     53,
  1926  					Protocol: v1.ProtocolTCP,
  1927  					// We use TargetPort on TCP but not UDP/SCTP to
  1928  					// help disambiguate the output.
  1929  					TargetPort: intstr.FromInt32(5353),
  1930  				},
  1931  				{
  1932  					Name:     "dns-sctp",
  1933  					Port:     53,
  1934  					Protocol: v1.ProtocolSCTP,
  1935  				},
  1936  			}
  1937  		}),
  1938  	)
  1939  
  1940  	populateEndpointSlices(fp,
  1941  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  1942  			eps.AddressType = discovery.AddressTypeIPv4
  1943  			eps.Endpoints = []discovery.Endpoint{{
  1944  				Addresses: []string{"10.180.0.1"},
  1945  				NodeName:  ptr.To(testHostname),
  1946  			}}
  1947  			eps.Ports = []discovery.EndpointPort{{
  1948  				Name:     ptr.To("http"),
  1949  				Port:     ptr.To[int32](80),
  1950  				Protocol: ptr.To(v1.ProtocolTCP),
  1951  			}}
  1952  		}),
  1953  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  1954  			eps.AddressType = discovery.AddressTypeIPv4
  1955  			eps.Endpoints = []discovery.Endpoint{
  1956  				{
  1957  					Addresses: []string{"10.180.0.1"},
  1958  					NodeName:  ptr.To(testHostname),
  1959  				},
  1960  				{
  1961  					Addresses: []string{"10.180.2.1"},
  1962  					NodeName:  ptr.To("host2"),
  1963  				},
  1964  			}
  1965  			eps.Ports = []discovery.EndpointPort{
  1966  				{
  1967  					Name:     ptr.To("http"),
  1968  					Port:     ptr.To[int32](80),
  1969  					Protocol: ptr.To(v1.ProtocolTCP),
  1970  				},
  1971  				{
  1972  					Name:     ptr.To("https"),
  1973  					Port:     ptr.To[int32](8443),
  1974  					Protocol: ptr.To(v1.ProtocolTCP),
  1975  				},
  1976  				{
  1977  					Name:     ptr.To("dns-udp"),
  1978  					Port:     ptr.To[int32](53),
  1979  					Protocol: ptr.To(v1.ProtocolUDP),
  1980  				},
  1981  				{
  1982  					Name:     ptr.To("dns-tcp"),
  1983  					Port:     ptr.To[int32](5353),
  1984  					Protocol: ptr.To(v1.ProtocolTCP),
  1985  				},
  1986  				{
  1987  					Name:     ptr.To("dns-sctp"),
  1988  					Port:     ptr.To[int32](53),
  1989  					Protocol: ptr.To(v1.ProtocolSCTP),
  1990  				},
  1991  			}
  1992  		}),
  1993  	)
  1994  
  1995  	fp.syncProxyRules()
  1996  
  1997  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  1998  		{
  1999  			name:     "simple clusterIP",
  2000  			sourceIP: "10.180.0.2",
  2001  			destIP:   "172.30.0.41",
  2002  			destPort: 80,
  2003  			output:   "10.180.0.1:80",
  2004  			masq:     false,
  2005  		},
  2006  		{
  2007  			name:     "hairpin to cluster IP",
  2008  			sourceIP: "10.180.0.1",
  2009  			destIP:   "172.30.0.41",
  2010  			destPort: 80,
  2011  			output:   "10.180.0.1:80",
  2012  			masq:     true,
  2013  		},
  2014  		{
  2015  			name:     "clusterIP with multiple endpoints",
  2016  			sourceIP: "10.180.0.2",
  2017  			destIP:   "172.30.0.42",
  2018  			destPort: 80,
  2019  			output:   "10.180.0.1:80, 10.180.2.1:80",
  2020  			masq:     false,
  2021  		},
  2022  		{
  2023  			name:     "clusterIP with TargetPort",
  2024  			sourceIP: "10.180.0.2",
  2025  			destIP:   "172.30.0.42",
  2026  			destPort: 443,
  2027  			output:   "10.180.0.1:8443, 10.180.2.1:8443",
  2028  			masq:     false,
  2029  		},
  2030  		{
  2031  			name:     "clusterIP with TCP, UDP, and SCTP on same port (TCP)",
  2032  			sourceIP: "10.180.0.2",
  2033  			protocol: v1.ProtocolTCP,
  2034  			destIP:   "172.30.0.42",
  2035  			destPort: 53,
  2036  			output:   "10.180.0.1:5353, 10.180.2.1:5353",
  2037  			masq:     false,
  2038  		},
  2039  		{
  2040  			name:     "clusterIP with TCP, UDP, and SCTP on same port (UDP)",
  2041  			sourceIP: "10.180.0.2",
  2042  			protocol: v1.ProtocolUDP,
  2043  			destIP:   "172.30.0.42",
  2044  			destPort: 53,
  2045  			output:   "10.180.0.1:53, 10.180.2.1:53",
  2046  			masq:     false,
  2047  		},
  2048  		{
  2049  			name:     "clusterIP with TCP, UDP, and SCTP on same port (SCTP)",
  2050  			sourceIP: "10.180.0.2",
  2051  			protocol: v1.ProtocolSCTP,
  2052  			destIP:   "172.30.0.42",
  2053  			destPort: 53,
  2054  			output:   "10.180.0.1:53, 10.180.2.1:53",
  2055  			masq:     false,
  2056  		},
  2057  		{
  2058  			name:     "TCP-only port does not match UDP traffic",
  2059  			sourceIP: "10.180.0.2",
  2060  			protocol: v1.ProtocolUDP,
  2061  			destIP:   "172.30.0.42",
  2062  			destPort: 80,
  2063  			output:   "",
  2064  		},
  2065  		{
  2066  			name:     "svc1 does not accept svc2's ports",
  2067  			sourceIP: "10.180.0.2",
  2068  			destIP:   "172.30.0.41",
  2069  			destPort: 443,
  2070  			output:   "",
  2071  		},
  2072  	})
  2073  }
  2074  
  2075  func TestLoadBalancer(t *testing.T) {
  2076  	ipt := iptablestest.NewFake()
  2077  	fp := NewFakeProxier(ipt)
  2078  	svcIP := "172.30.0.41"
  2079  	svcPort := 80
  2080  	svcNodePort := 3001
  2081  	svcLBIP1 := "1.2.3.4"
  2082  	svcLBIP2 := "5.6.7.8"
  2083  	svcPortName := proxy.ServicePortName{
  2084  		NamespacedName: makeNSN("ns1", "svc1"),
  2085  		Port:           "p80",
  2086  		Protocol:       v1.ProtocolTCP,
  2087  	}
  2088  
  2089  	makeServiceMap(fp,
  2090  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2091  			svc.Spec.Type = "LoadBalancer"
  2092  			svc.Spec.ClusterIP = svcIP
  2093  			svc.Spec.Ports = []v1.ServicePort{{
  2094  				Name:     svcPortName.Port,
  2095  				Port:     int32(svcPort),
  2096  				Protocol: v1.ProtocolTCP,
  2097  				NodePort: int32(svcNodePort),
  2098  			}}
  2099  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
  2100  				{IP: svcLBIP1},
  2101  				{IP: svcLBIP2},
  2102  			}
  2103  			svc.Spec.LoadBalancerSourceRanges = []string{
  2104  				"192.168.0.0/24",
  2105  
  2106  				// Regression test that excess whitespace gets ignored
  2107  				" 203.0.113.0/25",
  2108  			}
  2109  		}),
  2110  	)
  2111  
  2112  	epIP := "10.180.0.1"
  2113  	populateEndpointSlices(fp,
  2114  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2115  			eps.AddressType = discovery.AddressTypeIPv4
  2116  			eps.Endpoints = []discovery.Endpoint{{
  2117  				Addresses: []string{epIP},
  2118  			}}
  2119  			eps.Ports = []discovery.EndpointPort{{
  2120  				Name:     ptr.To(svcPortName.Port),
  2121  				Port:     ptr.To(int32(svcPort)),
  2122  				Protocol: ptr.To(v1.ProtocolTCP),
  2123  			}}
  2124  		}),
  2125  	)
  2126  
  2127  	fp.syncProxyRules()
  2128  
  2129  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2130  		{
  2131  			name:     "pod to cluster IP",
  2132  			sourceIP: "10.0.0.2",
  2133  			destIP:   svcIP,
  2134  			destPort: svcPort,
  2135  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2136  			masq:     false,
  2137  		},
  2138  		{
  2139  			name:     "external to nodePort",
  2140  			sourceIP: testExternalClient,
  2141  			destIP:   testNodeIP,
  2142  			destPort: svcNodePort,
  2143  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2144  			masq:     true,
  2145  		},
  2146  		{
  2147  			name:     "nodePort bypasses LoadBalancerSourceRanges",
  2148  			sourceIP: testExternalClientBlocked,
  2149  			destIP:   testNodeIP,
  2150  			destPort: svcNodePort,
  2151  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2152  			masq:     true,
  2153  		},
  2154  		{
  2155  			name:     "accepted external to LB1",
  2156  			sourceIP: testExternalClient,
  2157  			destIP:   svcLBIP1,
  2158  			destPort: svcPort,
  2159  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2160  			masq:     true,
  2161  		},
  2162  		{
  2163  			name:     "accepted external to LB2",
  2164  			sourceIP: testExternalClient,
  2165  			destIP:   svcLBIP2,
  2166  			destPort: svcPort,
  2167  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2168  			masq:     true,
  2169  		},
  2170  		{
  2171  			name:     "blocked external to LB1",
  2172  			sourceIP: testExternalClientBlocked,
  2173  			destIP:   svcLBIP1,
  2174  			destPort: svcPort,
  2175  			output:   "DROP",
  2176  		},
  2177  		{
  2178  			name:     "blocked external to LB2",
  2179  			sourceIP: testExternalClientBlocked,
  2180  			destIP:   svcLBIP2,
  2181  			destPort: svcPort,
  2182  			output:   "DROP",
  2183  		},
  2184  		{
  2185  			name:     "pod to LB1 (blocked by LoadBalancerSourceRanges)",
  2186  			sourceIP: "10.0.0.2",
  2187  			destIP:   svcLBIP1,
  2188  			destPort: svcPort,
  2189  			output:   "DROP",
  2190  		},
  2191  		{
  2192  			name:     "pod to LB2 (blocked by LoadBalancerSourceRanges)",
  2193  			sourceIP: "10.0.0.2",
  2194  			destIP:   svcLBIP2,
  2195  			destPort: svcPort,
  2196  			output:   "DROP",
  2197  		},
  2198  		{
  2199  			name:     "node to LB1 (allowed by LoadBalancerSourceRanges)",
  2200  			sourceIP: testNodeIP,
  2201  			destIP:   svcLBIP1,
  2202  			destPort: svcPort,
  2203  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2204  			masq:     true,
  2205  		},
  2206  		{
  2207  			name:     "node to LB2 (allowed by LoadBalancerSourceRanges)",
  2208  			sourceIP: testNodeIP,
  2209  			destIP:   svcLBIP2,
  2210  			destPort: svcPort,
  2211  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2212  			masq:     true,
  2213  		},
  2214  
  2215  		// The LB rules assume that when you connect from a node to a LB IP, that
  2216  		// something external to kube-proxy will cause the connection to be
  2217  		// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
  2218  		// node IP, then we add a rule allowing traffic from the LB IP as well...
  2219  		{
  2220  			name:     "same node to LB1, SNATted to LB1 (implicitly allowed)",
  2221  			sourceIP: svcLBIP1,
  2222  			destIP:   svcLBIP1,
  2223  			destPort: svcPort,
  2224  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2225  			masq:     true,
  2226  		},
  2227  		{
  2228  			name:     "same node to LB2, SNATted to LB2 (implicitly allowed)",
  2229  			sourceIP: svcLBIP2,
  2230  			destIP:   svcLBIP2,
  2231  			destPort: svcPort,
  2232  			output:   fmt.Sprintf("%s:%d", epIP, svcPort),
  2233  			masq:     true,
  2234  		},
  2235  	})
  2236  }
  2237  
  2238  // TestNodePorts tests NodePort services under various combinations of the
  2239  // --nodeport-addresses and --localhost-nodeports flags.
  2240  func TestNodePorts(t *testing.T) {
  2241  	testCases := []struct {
  2242  		name string
  2243  
  2244  		family             v1.IPFamily
  2245  		localhostNodePorts bool
  2246  		nodePortAddresses  []string
  2247  
  2248  		// allowAltNodeIP is true if we expect NodePort traffic on the alternate
  2249  		// node IP to be accepted
  2250  		allowAltNodeIP bool
  2251  
  2252  		// expectFirewall is true if we expect KUBE-FIREWALL to be filled in with
  2253  		// an anti-martian-packet rule
  2254  		expectFirewall bool
  2255  	}{
  2256  		{
  2257  			name: "ipv4, localhost-nodeports enabled",
  2258  
  2259  			family:             v1.IPv4Protocol,
  2260  			localhostNodePorts: true,
  2261  			nodePortAddresses:  nil,
  2262  
  2263  			allowAltNodeIP: true,
  2264  			expectFirewall: true,
  2265  		},
  2266  		{
  2267  			name: "ipv4, localhost-nodeports disabled",
  2268  
  2269  			family:             v1.IPv4Protocol,
  2270  			localhostNodePorts: false,
  2271  			nodePortAddresses:  nil,
  2272  
  2273  			allowAltNodeIP: true,
  2274  			expectFirewall: false,
  2275  		},
  2276  		{
  2277  			name: "ipv4, localhost-nodeports disabled, localhost in nodeport-addresses",
  2278  
  2279  			family:             v1.IPv4Protocol,
  2280  			localhostNodePorts: false,
  2281  			nodePortAddresses:  []string{"192.168.0.0/24", "127.0.0.1/32"},
  2282  
  2283  			allowAltNodeIP: false,
  2284  			expectFirewall: false,
  2285  		},
  2286  		{
  2287  			name: "ipv4, localhost-nodeports enabled, multiple nodeport-addresses",
  2288  
  2289  			family:             v1.IPv4Protocol,
  2290  			localhostNodePorts: false,
  2291  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2292  
  2293  			allowAltNodeIP: true,
  2294  			expectFirewall: false,
  2295  		},
  2296  		{
  2297  			name: "ipv6, localhost-nodeports enabled",
  2298  
  2299  			family:             v1.IPv6Protocol,
  2300  			localhostNodePorts: true,
  2301  			nodePortAddresses:  nil,
  2302  
  2303  			allowAltNodeIP: true,
  2304  			expectFirewall: false,
  2305  		},
  2306  		{
  2307  			name: "ipv6, localhost-nodeports disabled",
  2308  
  2309  			family:             v1.IPv6Protocol,
  2310  			localhostNodePorts: false,
  2311  			nodePortAddresses:  nil,
  2312  
  2313  			allowAltNodeIP: true,
  2314  			expectFirewall: false,
  2315  		},
  2316  		{
  2317  			name: "ipv6, localhost-nodeports disabled, multiple nodeport-addresses",
  2318  
  2319  			family:             v1.IPv6Protocol,
  2320  			localhostNodePorts: false,
  2321  			nodePortAddresses:  []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
  2322  
  2323  			allowAltNodeIP: false,
  2324  			expectFirewall: false,
  2325  		},
  2326  	}
  2327  
  2328  	for _, tc := range testCases {
  2329  		t.Run(tc.name, func(t *testing.T) {
  2330  			var ipt *iptablestest.FakeIPTables
  2331  			var svcIP, epIP1, epIP2 string
  2332  			if tc.family == v1.IPv4Protocol {
  2333  				ipt = iptablestest.NewFake()
  2334  				svcIP = "172.30.0.41"
  2335  				epIP1 = "10.180.0.1"
  2336  				epIP2 = "10.180.2.1"
  2337  			} else {
  2338  				ipt = iptablestest.NewIPv6Fake()
  2339  				svcIP = "fd00:172:30::41"
  2340  				epIP1 = "fd00:10:180::1"
  2341  				epIP2 = "fd00:10:180::2:1"
  2342  			}
  2343  			fp := NewFakeProxier(ipt)
  2344  			fp.localhostNodePorts = tc.localhostNodePorts
  2345  			if tc.nodePortAddresses != nil {
  2346  				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
  2347  			}
  2348  
  2349  			makeServiceMap(fp,
  2350  				makeTestService("ns1", "svc1", func(svc *v1.Service) {
  2351  					svc.Spec.Type = v1.ServiceTypeNodePort
  2352  					svc.Spec.ClusterIP = svcIP
  2353  					svc.Spec.Ports = []v1.ServicePort{{
  2354  						Name:     "p80",
  2355  						Port:     80,
  2356  						Protocol: v1.ProtocolTCP,
  2357  						NodePort: 3001,
  2358  					}}
  2359  				}),
  2360  			)
  2361  
  2362  			populateEndpointSlices(fp,
  2363  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  2364  					if tc.family == v1.IPv4Protocol {
  2365  						eps.AddressType = discovery.AddressTypeIPv4
  2366  					} else {
  2367  						eps.AddressType = discovery.AddressTypeIPv6
  2368  					}
  2369  					eps.Endpoints = []discovery.Endpoint{{
  2370  						Addresses: []string{epIP1},
  2371  						NodeName:  nil,
  2372  					}, {
  2373  						Addresses: []string{epIP2},
  2374  						NodeName:  ptr.To(testHostname),
  2375  					}}
  2376  					eps.Ports = []discovery.EndpointPort{{
  2377  						Name:     ptr.To("p80"),
  2378  						Port:     ptr.To[int32](80),
  2379  						Protocol: ptr.To(v1.ProtocolTCP),
  2380  					}}
  2381  				}),
  2382  			)
  2383  
  2384  			fp.syncProxyRules()
  2385  
  2386  			var podIP, externalClientIP, nodeIP, altNodeIP, localhostIP string
  2387  			if tc.family == v1.IPv4Protocol {
  2388  				podIP = "10.0.0.2"
  2389  				externalClientIP = testExternalClient
  2390  				nodeIP = testNodeIP
  2391  				altNodeIP = testNodeIPAlt
  2392  				localhostIP = "127.0.0.1"
  2393  			} else {
  2394  				podIP = "fd00:10::2"
  2395  				externalClientIP = "2600:5200::1"
  2396  				nodeIP = testNodeIPv6
  2397  				altNodeIP = testNodeIPv6Alt
  2398  				localhostIP = "::1"
  2399  			}
  2400  			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
  2401  
  2402  			// Basic tests are the same for all cases
  2403  			runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2404  				{
  2405  					name:     "pod to cluster IP",
  2406  					sourceIP: podIP,
  2407  					destIP:   svcIP,
  2408  					destPort: 80,
  2409  					output:   output,
  2410  					masq:     false,
  2411  				},
  2412  				{
  2413  					name:     "external to nodePort",
  2414  					sourceIP: externalClientIP,
  2415  					destIP:   nodeIP,
  2416  					destPort: 3001,
  2417  					output:   output,
  2418  					masq:     true,
  2419  				},
  2420  				{
  2421  					name:     "node to nodePort",
  2422  					sourceIP: nodeIP,
  2423  					destIP:   nodeIP,
  2424  					destPort: 3001,
  2425  					output:   output,
  2426  					masq:     true,
  2427  				},
  2428  			})
  2429  
  2430  			// localhost to NodePort is only allowed in IPv4, and only if not disabled
  2431  			if tc.family == v1.IPv4Protocol && tc.localhostNodePorts {
  2432  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2433  					{
  2434  						name:     "localhost to nodePort gets masqueraded",
  2435  						sourceIP: localhostIP,
  2436  						destIP:   localhostIP,
  2437  						destPort: 3001,
  2438  						output:   output,
  2439  						masq:     true,
  2440  					},
  2441  				})
  2442  			} else {
  2443  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2444  					{
  2445  						name:     "localhost to nodePort is ignored",
  2446  						sourceIP: localhostIP,
  2447  						destIP:   localhostIP,
  2448  						destPort: 3001,
  2449  						output:   "",
  2450  					},
  2451  				})
  2452  			}
  2453  
  2454  			// NodePort on altNodeIP should be allowed, unless
  2455  			// nodePortAddressess excludes altNodeIP
  2456  			if tc.allowAltNodeIP {
  2457  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2458  					{
  2459  						name:     "external to nodePort on secondary IP",
  2460  						sourceIP: externalClientIP,
  2461  						destIP:   altNodeIP,
  2462  						destPort: 3001,
  2463  						output:   output,
  2464  						masq:     true,
  2465  					},
  2466  				})
  2467  			} else {
  2468  				runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2469  					{
  2470  						name:     "secondary nodeIP ignores NodePorts",
  2471  						sourceIP: externalClientIP,
  2472  						destIP:   altNodeIP,
  2473  						destPort: 3001,
  2474  						output:   "",
  2475  					},
  2476  				})
  2477  			}
  2478  
  2479  			// We have to check the firewall rule manually rather than via
  2480  			// runPacketFlowTests(), because the packet tracer doesn't
  2481  			// implement conntrack states.
  2482  			var expected string
  2483  			if tc.expectFirewall {
  2484  				expected = "-A KUBE-FIREWALL -m comment --comment \"block incoming localnet connections\" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP\n"
  2485  			}
  2486  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeletFirewallChain, expected, fp.iptablesData.String())
  2487  		})
  2488  	}
  2489  }
  2490  
  2491  func TestHealthCheckNodePort(t *testing.T) {
  2492  	ipt := iptablestest.NewFake()
  2493  	fp := NewFakeProxier(ipt)
  2494  	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
  2495  
  2496  	svcIP := "172.30.0.42"
  2497  	svcPort := 80
  2498  	svcNodePort := 3001
  2499  	svcHealthCheckNodePort := 30000
  2500  	svcPortName := proxy.ServicePortName{
  2501  		NamespacedName: makeNSN("ns1", "svc1"),
  2502  		Port:           "p80",
  2503  		Protocol:       v1.ProtocolTCP,
  2504  	}
  2505  
  2506  	svc := makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2507  		svc.Spec.Type = "LoadBalancer"
  2508  		svc.Spec.ClusterIP = svcIP
  2509  		svc.Spec.Ports = []v1.ServicePort{{
  2510  			Name:     svcPortName.Port,
  2511  			Port:     int32(svcPort),
  2512  			Protocol: v1.ProtocolTCP,
  2513  			NodePort: int32(svcNodePort),
  2514  		}}
  2515  		svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2516  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2517  	})
  2518  	makeServiceMap(fp, svc)
  2519  	fp.syncProxyRules()
  2520  
  2521  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2522  		{
  2523  			name:     "firewall accepts HealthCheckNodePort",
  2524  			sourceIP: "1.2.3.4",
  2525  			destIP:   testNodeIP,
  2526  			destPort: svcHealthCheckNodePort,
  2527  			output:   "ACCEPT",
  2528  			masq:     false,
  2529  		},
  2530  	})
  2531  
  2532  	fp.OnServiceDelete(svc)
  2533  	fp.syncProxyRules()
  2534  
  2535  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2536  		{
  2537  			name:     "HealthCheckNodePort no longer has any rule",
  2538  			sourceIP: "1.2.3.4",
  2539  			destIP:   testNodeIP,
  2540  			destPort: svcHealthCheckNodePort,
  2541  			output:   "",
  2542  		},
  2543  	})
  2544  }
  2545  
  2546  func TestDropInvalidRule(t *testing.T) {
  2547  	kubeForwardChainRules := dedent.Dedent(`
  2548  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  2549  				-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  2550  	`)
  2551  
  2552  	testCases := []struct {
  2553  		nfacctEnsured  bool
  2554  		tcpLiberal     bool
  2555  		dropRule       string
  2556  		nfAcctCounters map[string]bool
  2557  	}{
  2558  		{
  2559  			nfacctEnsured:  false,
  2560  			tcpLiberal:     false,
  2561  			nfAcctCounters: map[string]bool{},
  2562  			dropRule:       "-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP",
  2563  		},
  2564  		{
  2565  			nfacctEnsured: true,
  2566  			tcpLiberal:    false,
  2567  			nfAcctCounters: map[string]bool{
  2568  				metrics.IPTablesCTStateInvalidDroppedNFAcctCounter: true,
  2569  			},
  2570  			dropRule: fmt.Sprintf("-A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name %s -j DROP", metrics.IPTablesCTStateInvalidDroppedNFAcctCounter),
  2571  		},
  2572  		{
  2573  			nfacctEnsured:  false,
  2574  			tcpLiberal:     true,
  2575  			nfAcctCounters: map[string]bool{},
  2576  			dropRule:       "",
  2577  		},
  2578  	}
  2579  	for _, tc := range testCases {
  2580  		t.Run(fmt.Sprintf("tcpLiberal is %t and nfacctEnsured is %t", tc.tcpLiberal, tc.nfacctEnsured), func(t *testing.T) {
  2581  			ipt := iptablestest.NewFake()
  2582  			fp := NewFakeProxier(ipt)
  2583  			fp.conntrackTCPLiberal = tc.tcpLiberal
  2584  			fp.nfAcctCounters = tc.nfAcctCounters
  2585  			fp.syncProxyRules()
  2586  
  2587  			expected := tc.dropRule + kubeForwardChainRules
  2588  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeForwardChain, expected, fp.iptablesData.String())
  2589  		})
  2590  	}
  2591  }
  2592  
  2593  func TestMasqueradeRule(t *testing.T) {
  2594  	for _, randomFully := range []bool{false, true} {
  2595  		t.Run(fmt.Sprintf("randomFully %t", randomFully), func(t *testing.T) {
  2596  			ipt := iptablestest.NewFake().SetHasRandomFully(randomFully)
  2597  			fp := NewFakeProxier(ipt)
  2598  			fp.syncProxyRules()
  2599  
  2600  			expectedFmt := dedent.Dedent(`
  2601  				-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  2602  				-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  2603  				-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s
  2604  				`)
  2605  			var expected string
  2606  			if randomFully {
  2607  				expected = fmt.Sprintf(expectedFmt, " --random-fully")
  2608  			} else {
  2609  				expected = fmt.Sprintf(expectedFmt, "")
  2610  			}
  2611  			assertIPTablesChainEqual(t, getLine(), utiliptables.TableNAT, kubePostroutingChain, expected, fp.iptablesData.String())
  2612  		})
  2613  	}
  2614  }
  2615  
  2616  // TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get
  2617  // masqueraded when using Local traffic policy. For traffic from external sources, that
  2618  // means it can also only be routed to local endpoints, but for traffic from internal
  2619  // sources, it gets routed to all endpoints.
  2620  func TestExternalTrafficPolicyLocal(t *testing.T) {
  2621  	ipt := iptablestest.NewFake()
  2622  	fp := NewFakeProxier(ipt)
  2623  
  2624  	svcIP := "172.30.0.41"
  2625  	svcPort := 80
  2626  	svcNodePort := 3001
  2627  	svcHealthCheckNodePort := 30000
  2628  	svcExternalIPs := "192.168.99.11"
  2629  	svcLBIP := "1.2.3.4"
  2630  	svcPortName := proxy.ServicePortName{
  2631  		NamespacedName: makeNSN("ns1", "svc1"),
  2632  		Port:           "p80",
  2633  	}
  2634  
  2635  	makeServiceMap(fp,
  2636  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2637  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2638  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2639  			svc.Spec.ClusterIP = svcIP
  2640  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2641  			svc.Spec.Ports = []v1.ServicePort{{
  2642  				Name:       svcPortName.Port,
  2643  				Port:       int32(svcPort),
  2644  				Protocol:   v1.ProtocolTCP,
  2645  				NodePort:   int32(svcNodePort),
  2646  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2647  			}}
  2648  			svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
  2649  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2650  				IP: svcLBIP,
  2651  			}}
  2652  		}),
  2653  	)
  2654  
  2655  	epIP1 := "10.180.0.1"
  2656  	epIP2 := "10.180.2.1"
  2657  	populateEndpointSlices(fp,
  2658  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2659  			eps.AddressType = discovery.AddressTypeIPv4
  2660  			eps.Endpoints = []discovery.Endpoint{{
  2661  				Addresses: []string{epIP1},
  2662  			}, {
  2663  				Addresses: []string{epIP2},
  2664  				NodeName:  ptr.To(testHostname),
  2665  			}}
  2666  			eps.Ports = []discovery.EndpointPort{{
  2667  				Name:     ptr.To(svcPortName.Port),
  2668  				Port:     ptr.To(int32(svcPort)),
  2669  				Protocol: ptr.To(v1.ProtocolTCP),
  2670  			}}
  2671  		}),
  2672  	)
  2673  
  2674  	fp.syncProxyRules()
  2675  
  2676  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2677  		{
  2678  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2679  			sourceIP: "10.0.0.2",
  2680  			destIP:   svcIP,
  2681  			destPort: svcPort,
  2682  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2683  			masq:     false,
  2684  		},
  2685  		{
  2686  			name:     "pod to external IP hits both endpoints, unmasqueraded",
  2687  			sourceIP: "10.0.0.2",
  2688  			destIP:   svcExternalIPs,
  2689  			destPort: svcPort,
  2690  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2691  			masq:     false,
  2692  		},
  2693  		{
  2694  			name:     "external to external IP hits only local endpoint, unmasqueraded",
  2695  			sourceIP: testExternalClient,
  2696  			destIP:   svcExternalIPs,
  2697  			destPort: svcPort,
  2698  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2699  			masq:     false,
  2700  		},
  2701  		{
  2702  			name:     "pod to LB IP hits only both endpoints, unmasqueraded",
  2703  			sourceIP: "10.0.0.2",
  2704  			destIP:   svcLBIP,
  2705  			destPort: svcPort,
  2706  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2707  			masq:     false,
  2708  		},
  2709  		{
  2710  			name:     "external to LB IP hits only local endpoint, unmasqueraded",
  2711  			sourceIP: testExternalClient,
  2712  			destIP:   svcLBIP,
  2713  			destPort: svcPort,
  2714  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2715  			masq:     false,
  2716  		},
  2717  		{
  2718  			name:     "pod to NodePort hits both endpoints, unmasqueraded",
  2719  			sourceIP: "10.0.0.2",
  2720  			destIP:   testNodeIP,
  2721  			destPort: svcNodePort,
  2722  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2723  			masq:     false,
  2724  		},
  2725  		{
  2726  			name:     "external to NodePort hits only local endpoint, unmasqueraded",
  2727  			sourceIP: testExternalClient,
  2728  			destIP:   testNodeIP,
  2729  			destPort: svcNodePort,
  2730  			output:   fmt.Sprintf("%s:%d", epIP2, svcPort),
  2731  			masq:     false,
  2732  		},
  2733  	})
  2734  }
  2735  
  2736  // TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets
  2737  // masqueraded when using Cluster traffic policy.
  2738  func TestExternalTrafficPolicyCluster(t *testing.T) {
  2739  	ipt := iptablestest.NewFake()
  2740  	fp := NewFakeProxier(ipt)
  2741  
  2742  	svcIP := "172.30.0.41"
  2743  	svcPort := 80
  2744  	svcNodePort := 3001
  2745  	svcExternalIPs := "192.168.99.11"
  2746  	svcLBIP := "1.2.3.4"
  2747  	svcPortName := proxy.ServicePortName{
  2748  		NamespacedName: makeNSN("ns1", "svc1"),
  2749  		Port:           "p80",
  2750  	}
  2751  
  2752  	makeServiceMap(fp,
  2753  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  2754  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2755  			svc.Spec.ClusterIP = svcIP
  2756  			svc.Spec.ExternalIPs = []string{svcExternalIPs}
  2757  			svc.Spec.Ports = []v1.ServicePort{{
  2758  				Name:       svcPortName.Port,
  2759  				Port:       int32(svcPort),
  2760  				Protocol:   v1.ProtocolTCP,
  2761  				NodePort:   int32(svcNodePort),
  2762  				TargetPort: intstr.FromInt32(int32(svcPort)),
  2763  			}}
  2764  			svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  2765  				IP: svcLBIP,
  2766  			}}
  2767  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
  2768  		}),
  2769  	)
  2770  
  2771  	epIP1 := "10.180.0.1"
  2772  	epIP2 := "10.180.2.1"
  2773  	populateEndpointSlices(fp,
  2774  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  2775  			eps.AddressType = discovery.AddressTypeIPv4
  2776  			eps.Endpoints = []discovery.Endpoint{{
  2777  				Addresses: []string{epIP1},
  2778  				NodeName:  nil,
  2779  			}, {
  2780  				Addresses: []string{epIP2},
  2781  				NodeName:  ptr.To(testHostname),
  2782  			}}
  2783  			eps.Ports = []discovery.EndpointPort{{
  2784  				Name:     ptr.To(svcPortName.Port),
  2785  				Port:     ptr.To(int32(svcPort)),
  2786  				Protocol: ptr.To(v1.ProtocolTCP),
  2787  			}}
  2788  		}),
  2789  	)
  2790  
  2791  	fp.syncProxyRules()
  2792  
  2793  	runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{
  2794  		{
  2795  			name:     "pod to cluster IP hits both endpoints, unmasqueraded",
  2796  			sourceIP: "10.0.0.2",
  2797  			destIP:   svcIP,
  2798  			destPort: svcPort,
  2799  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2800  			masq:     false,
  2801  		},
  2802  		{
  2803  			name:     "pod to external IP hits both endpoints, masqueraded",
  2804  			sourceIP: "10.0.0.2",
  2805  			destIP:   svcExternalIPs,
  2806  			destPort: svcPort,
  2807  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2808  			masq:     true,
  2809  		},
  2810  		{
  2811  			name:     "external to external IP hits both endpoints, masqueraded",
  2812  			sourceIP: testExternalClient,
  2813  			destIP:   svcExternalIPs,
  2814  			destPort: svcPort,
  2815  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2816  			masq:     true,
  2817  		},
  2818  		{
  2819  			name:     "pod to LB IP hits both endpoints, masqueraded",
  2820  			sourceIP: "10.0.0.2",
  2821  			destIP:   svcLBIP,
  2822  			destPort: svcPort,
  2823  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2824  			masq:     true,
  2825  		},
  2826  		{
  2827  			name:     "external to LB IP hits both endpoints, masqueraded",
  2828  			sourceIP: testExternalClient,
  2829  			destIP:   svcLBIP,
  2830  			destPort: svcPort,
  2831  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2832  			masq:     true,
  2833  		},
  2834  		{
  2835  			name:     "pod to NodePort hits both endpoints, masqueraded",
  2836  			sourceIP: "10.0.0.2",
  2837  			destIP:   testNodeIP,
  2838  			destPort: svcNodePort,
  2839  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2840  			masq:     true,
  2841  		},
  2842  		{
  2843  			name:     "external to NodePort hits both endpoints, masqueraded",
  2844  			sourceIP: testExternalClient,
  2845  			destIP:   testNodeIP,
  2846  			destPort: svcNodePort,
  2847  			output:   fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
  2848  			masq:     true,
  2849  		},
  2850  	})
  2851  }
  2852  
  2853  func TestComputeProbability(t *testing.T) {
  2854  	expectedProbabilities := map[int]string{
  2855  		1:      "1.0000000000",
  2856  		2:      "0.5000000000",
  2857  		10:     "0.1000000000",
  2858  		100:    "0.0100000000",
  2859  		1000:   "0.0010000000",
  2860  		10000:  "0.0001000000",
  2861  		100000: "0.0000100000",
  2862  		100001: "0.0000099999",
  2863  	}
  2864  
  2865  	for num, expected := range expectedProbabilities {
  2866  		actual := computeProbability(num)
  2867  		if actual != expected {
  2868  			t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
  2869  		}
  2870  	}
  2871  
  2872  	prevProbability := float64(0)
  2873  	for i := 100000; i > 1; i-- {
  2874  		currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
  2875  		if err != nil {
  2876  			t.Fatalf("Error parsing float probability for %d: %v", i, err)
  2877  		}
  2878  		if currProbability <= prevProbability {
  2879  			t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
  2880  		}
  2881  		prevProbability = currProbability
  2882  	}
  2883  }
  2884  
  2885  func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
  2886  	svc := &v1.Service{
  2887  		ObjectMeta: metav1.ObjectMeta{
  2888  			Name:        name,
  2889  			Namespace:   namespace,
  2890  			Annotations: map[string]string{},
  2891  		},
  2892  		Spec:   v1.ServiceSpec{},
  2893  		Status: v1.ServiceStatus{},
  2894  	}
  2895  	svcFunc(svc)
  2896  	return svc
  2897  }
  2898  
  2899  func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
  2900  	svcPort := v1.ServicePort{
  2901  		Name:       name,
  2902  		Protocol:   protocol,
  2903  		Port:       port,
  2904  		NodePort:   nodeport,
  2905  		TargetPort: intstr.FromInt32(int32(targetPort)),
  2906  	}
  2907  	return append(array, svcPort)
  2908  }
  2909  
  2910  func TestBuildServiceMapAddRemove(t *testing.T) {
  2911  	ipt := iptablestest.NewFake()
  2912  	fp := NewFakeProxier(ipt)
  2913  
  2914  	services := []*v1.Service{
  2915  		makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  2916  			svc.Spec.Type = v1.ServiceTypeClusterIP
  2917  			svc.Spec.ClusterIP = "172.30.55.4"
  2918  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  2919  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  2920  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
  2921  		}),
  2922  		makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
  2923  			svc.Spec.Type = v1.ServiceTypeNodePort
  2924  			svc.Spec.ClusterIP = "172.30.55.10"
  2925  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
  2926  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
  2927  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
  2928  		}),
  2929  		makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
  2930  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2931  			svc.Spec.ClusterIP = "172.30.55.11"
  2932  			svc.Spec.LoadBalancerIP = "1.2.3.4"
  2933  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
  2934  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
  2935  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  2936  				Ingress: []v1.LoadBalancerIngress{
  2937  					{IP: "1.2.3.4"},
  2938  				},
  2939  			}
  2940  		}),
  2941  		makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
  2942  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2943  			svc.Spec.ClusterIP = "172.30.55.12"
  2944  			svc.Spec.LoadBalancerIP = "5.6.7.8"
  2945  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
  2946  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
  2947  			svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  2948  				Ingress: []v1.LoadBalancerIngress{
  2949  					{IP: "5.6.7.8"},
  2950  				},
  2951  			}
  2952  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2953  			svc.Spec.HealthCheckNodePort = 345
  2954  		}),
  2955  	}
  2956  
  2957  	for i := range services {
  2958  		fp.OnServiceAdd(services[i])
  2959  	}
  2960  	result := fp.svcPortMap.Update(fp.serviceChanges)
  2961  	if len(fp.svcPortMap) != 10 {
  2962  		t.Errorf("expected service map length 10, got %v", fp.svcPortMap)
  2963  	}
  2964  
  2965  	if len(result.DeletedUDPClusterIPs) != 0 {
  2966  		// Services only added, so nothing stale yet
  2967  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  2968  	}
  2969  
  2970  	// The only-local-loadbalancer ones get added
  2971  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  2972  	if len(healthCheckNodePorts) != 1 {
  2973  		t.Errorf("expected 1 healthcheck port, got %v", healthCheckNodePorts)
  2974  	} else {
  2975  		nsn := makeNSN("somewhere", "only-local-load-balancer")
  2976  		if port, found := healthCheckNodePorts[nsn]; !found || port != 345 {
  2977  			t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, healthCheckNodePorts)
  2978  		}
  2979  	}
  2980  
  2981  	// Remove some stuff
  2982  	// oneService is a modification of services[0] with removed first port.
  2983  	oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
  2984  		svc.Spec.Type = v1.ServiceTypeClusterIP
  2985  		svc.Spec.ClusterIP = "172.30.55.4"
  2986  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
  2987  	})
  2988  
  2989  	fp.OnServiceUpdate(services[0], oneService)
  2990  	fp.OnServiceDelete(services[1])
  2991  	fp.OnServiceDelete(services[2])
  2992  	fp.OnServiceDelete(services[3])
  2993  
  2994  	result = fp.svcPortMap.Update(fp.serviceChanges)
  2995  	if len(fp.svcPortMap) != 1 {
  2996  		t.Errorf("expected service map length 1, got %v", fp.svcPortMap)
  2997  	}
  2998  
  2999  	// All services but one were deleted. While you'd expect only the ClusterIPs
  3000  	// from the three deleted services here, we still have the ClusterIP for
  3001  	// the not-deleted service, because one of it's ServicePorts was deleted.
  3002  	expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
  3003  	if len(result.DeletedUDPClusterIPs) != len(expectedStaleUDPServices) {
  3004  		t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.DeletedUDPClusterIPs.UnsortedList())
  3005  	}
  3006  	for _, ip := range expectedStaleUDPServices {
  3007  		if !result.DeletedUDPClusterIPs.Has(ip) {
  3008  			t.Errorf("expected stale UDP service service %s", ip)
  3009  		}
  3010  	}
  3011  
  3012  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3013  	if len(healthCheckNodePorts) != 0 {
  3014  		t.Errorf("expected 0 healthcheck ports, got %v", healthCheckNodePorts)
  3015  	}
  3016  }
  3017  
  3018  func TestBuildServiceMapServiceHeadless(t *testing.T) {
  3019  	ipt := iptablestest.NewFake()
  3020  	fp := NewFakeProxier(ipt)
  3021  
  3022  	makeServiceMap(fp,
  3023  		makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
  3024  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3025  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3026  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
  3027  		}),
  3028  		makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
  3029  			svc.Spec.Type = v1.ServiceTypeClusterIP
  3030  			svc.Spec.ClusterIP = v1.ClusterIPNone
  3031  		}),
  3032  	)
  3033  
  3034  	// Headless service should be ignored
  3035  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3036  	if len(fp.svcPortMap) != 0 {
  3037  		t.Errorf("expected service map length 0, got %d", len(fp.svcPortMap))
  3038  	}
  3039  
  3040  	if len(result.DeletedUDPClusterIPs) != 0 {
  3041  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3042  	}
  3043  
  3044  	// No proxied services, so no healthchecks
  3045  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3046  	if len(healthCheckNodePorts) != 0 {
  3047  		t.Errorf("expected healthcheck ports length 0, got %d", len(healthCheckNodePorts))
  3048  	}
  3049  }
  3050  
  3051  func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
  3052  	ipt := iptablestest.NewFake()
  3053  	fp := NewFakeProxier(ipt)
  3054  
  3055  	makeServiceMap(fp,
  3056  		makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
  3057  			svc.Spec.Type = v1.ServiceTypeExternalName
  3058  			svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
  3059  			svc.Spec.ExternalName = "foo2.bar.com"
  3060  			svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
  3061  		}),
  3062  	)
  3063  
  3064  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3065  	if len(fp.svcPortMap) != 0 {
  3066  		t.Errorf("expected service map length 0, got %v", fp.svcPortMap)
  3067  	}
  3068  	if len(result.DeletedUDPClusterIPs) != 0 {
  3069  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs)
  3070  	}
  3071  	// No proxied services, so no healthchecks
  3072  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3073  	if len(healthCheckNodePorts) != 0 {
  3074  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3075  	}
  3076  }
  3077  
  3078  func TestBuildServiceMapServiceUpdate(t *testing.T) {
  3079  	ipt := iptablestest.NewFake()
  3080  	fp := NewFakeProxier(ipt)
  3081  
  3082  	servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3083  		svc.Spec.Type = v1.ServiceTypeClusterIP
  3084  		svc.Spec.ClusterIP = "172.30.55.4"
  3085  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
  3086  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
  3087  	})
  3088  	servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
  3089  		svc.Spec.Type = v1.ServiceTypeLoadBalancer
  3090  		svc.Spec.ClusterIP = "172.30.55.4"
  3091  		svc.Spec.LoadBalancerIP = "1.2.3.4"
  3092  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
  3093  		svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
  3094  		svc.Status.LoadBalancer = v1.LoadBalancerStatus{
  3095  			Ingress: []v1.LoadBalancerIngress{
  3096  				{IP: "1.2.3.4"},
  3097  			},
  3098  		}
  3099  		svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3100  		svc.Spec.HealthCheckNodePort = 345
  3101  	})
  3102  
  3103  	fp.OnServiceAdd(servicev1)
  3104  
  3105  	result := fp.svcPortMap.Update(fp.serviceChanges)
  3106  	if len(fp.svcPortMap) != 2 {
  3107  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3108  	}
  3109  	if len(result.DeletedUDPClusterIPs) != 0 {
  3110  		// Services only added, so nothing stale yet
  3111  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3112  	}
  3113  	healthCheckNodePorts := fp.svcPortMap.HealthCheckNodePorts()
  3114  	if len(healthCheckNodePorts) != 0 {
  3115  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3116  	}
  3117  
  3118  	// Change service to load-balancer
  3119  	fp.OnServiceUpdate(servicev1, servicev2)
  3120  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3121  	if len(fp.svcPortMap) != 2 {
  3122  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3123  	}
  3124  	if len(result.DeletedUDPClusterIPs) != 0 {
  3125  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3126  	}
  3127  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3128  	if len(healthCheckNodePorts) != 1 {
  3129  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3130  	}
  3131  
  3132  	// No change; make sure the service map stays the same and there are
  3133  	// no health-check changes
  3134  	fp.OnServiceUpdate(servicev2, servicev2)
  3135  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3136  	if len(fp.svcPortMap) != 2 {
  3137  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3138  	}
  3139  	if len(result.DeletedUDPClusterIPs) != 0 {
  3140  		t.Errorf("expected stale UDP services length 0, got %v", result.DeletedUDPClusterIPs.UnsortedList())
  3141  	}
  3142  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3143  	if len(healthCheckNodePorts) != 1 {
  3144  		t.Errorf("expected healthcheck ports length 1, got %v", healthCheckNodePorts)
  3145  	}
  3146  
  3147  	// And back to ClusterIP
  3148  	fp.OnServiceUpdate(servicev2, servicev1)
  3149  	result = fp.svcPortMap.Update(fp.serviceChanges)
  3150  	if len(fp.svcPortMap) != 2 {
  3151  		t.Errorf("expected service map length 2, got %v", fp.svcPortMap)
  3152  	}
  3153  	if len(result.DeletedUDPClusterIPs) != 0 {
  3154  		// Services only added, so nothing stale yet
  3155  		t.Errorf("expected stale UDP services length 0, got %d", len(result.DeletedUDPClusterIPs))
  3156  	}
  3157  	healthCheckNodePorts = fp.svcPortMap.HealthCheckNodePorts()
  3158  	if len(healthCheckNodePorts) != 0 {
  3159  		t.Errorf("expected healthcheck ports length 0, got %v", healthCheckNodePorts)
  3160  	}
  3161  }
  3162  
  3163  func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
  3164  	for i := range allEndpointSlices {
  3165  		proxier.OnEndpointSliceAdd(allEndpointSlices[i])
  3166  	}
  3167  }
  3168  
  3169  func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
  3170  	eps := &discovery.EndpointSlice{
  3171  		ObjectMeta: metav1.ObjectMeta{
  3172  			Name:      fmt.Sprintf("%s-%d", name, sliceNum),
  3173  			Namespace: namespace,
  3174  			Labels:    map[string]string{discovery.LabelServiceName: name},
  3175  		},
  3176  	}
  3177  	epsFunc(eps)
  3178  	return eps
  3179  }
  3180  
  3181  func makeNSN(namespace, name string) types.NamespacedName {
  3182  	return types.NamespacedName{Namespace: namespace, Name: name}
  3183  }
  3184  
  3185  func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
  3186  	return proxy.ServicePortName{
  3187  		NamespacedName: makeNSN(ns, name),
  3188  		Port:           port,
  3189  		Protocol:       protocol,
  3190  	}
  3191  }
  3192  
  3193  func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
  3194  	for i := range allServices {
  3195  		proxier.OnServiceAdd(allServices[i])
  3196  	}
  3197  
  3198  	proxier.mu.Lock()
  3199  	defer proxier.mu.Unlock()
  3200  	proxier.servicesSynced = true
  3201  }
  3202  
  3203  type endpointExpectation struct {
  3204  	endpoint string
  3205  	isLocal  bool
  3206  }
  3207  
  3208  func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) {
  3209  	if len(newMap) != len(expected) {
  3210  		t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
  3211  	}
  3212  	for x := range expected {
  3213  		if len(newMap[x]) != len(expected[x]) {
  3214  			t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
  3215  		} else {
  3216  			for i := range expected[x] {
  3217  				newEp := newMap[x][i]
  3218  				if newEp.String() != expected[x][i].endpoint ||
  3219  					newEp.IsLocal() != expected[x][i].isLocal {
  3220  					t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
  3221  				}
  3222  			}
  3223  		}
  3224  	}
  3225  }
  3226  
  3227  func TestUpdateEndpointsMap(t *testing.T) {
  3228  	emptyEndpointSlices := []*discovery.EndpointSlice{
  3229  		makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
  3230  	}
  3231  	subset1 := func(eps *discovery.EndpointSlice) {
  3232  		eps.AddressType = discovery.AddressTypeIPv4
  3233  		eps.Endpoints = []discovery.Endpoint{{
  3234  			Addresses: []string{"10.1.1.1"},
  3235  		}}
  3236  		eps.Ports = []discovery.EndpointPort{{
  3237  			Name:     ptr.To("p11"),
  3238  			Port:     ptr.To[int32](11),
  3239  			Protocol: ptr.To(v1.ProtocolUDP),
  3240  		}}
  3241  	}
  3242  	subset2 := func(eps *discovery.EndpointSlice) {
  3243  		eps.AddressType = discovery.AddressTypeIPv4
  3244  		eps.Endpoints = []discovery.Endpoint{{
  3245  			Addresses: []string{"10.1.1.2"},
  3246  		}}
  3247  		eps.Ports = []discovery.EndpointPort{{
  3248  			Name:     ptr.To("p12"),
  3249  			Port:     ptr.To[int32](12),
  3250  			Protocol: ptr.To(v1.ProtocolUDP),
  3251  		}}
  3252  	}
  3253  	namedPortLocal := []*discovery.EndpointSlice{
  3254  		makeTestEndpointSlice("ns1", "ep1", 1,
  3255  			func(eps *discovery.EndpointSlice) {
  3256  				eps.AddressType = discovery.AddressTypeIPv4
  3257  				eps.Endpoints = []discovery.Endpoint{{
  3258  					Addresses: []string{"10.1.1.1"},
  3259  					NodeName:  ptr.To(testHostname),
  3260  				}}
  3261  				eps.Ports = []discovery.EndpointPort{{
  3262  					Name:     ptr.To("p11"),
  3263  					Port:     ptr.To[int32](11),
  3264  					Protocol: ptr.To(v1.ProtocolUDP),
  3265  				}}
  3266  			}),
  3267  	}
  3268  	namedPort := []*discovery.EndpointSlice{
  3269  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3270  	}
  3271  	namedPortRenamed := []*discovery.EndpointSlice{
  3272  		makeTestEndpointSlice("ns1", "ep1", 1,
  3273  			func(eps *discovery.EndpointSlice) {
  3274  				eps.AddressType = discovery.AddressTypeIPv4
  3275  				eps.Endpoints = []discovery.Endpoint{{
  3276  					Addresses: []string{"10.1.1.1"},
  3277  				}}
  3278  				eps.Ports = []discovery.EndpointPort{{
  3279  					Name:     ptr.To("p11-2"),
  3280  					Port:     ptr.To[int32](11),
  3281  					Protocol: ptr.To(v1.ProtocolUDP),
  3282  				}}
  3283  			}),
  3284  	}
  3285  	namedPortRenumbered := []*discovery.EndpointSlice{
  3286  		makeTestEndpointSlice("ns1", "ep1", 1,
  3287  			func(eps *discovery.EndpointSlice) {
  3288  				eps.AddressType = discovery.AddressTypeIPv4
  3289  				eps.Endpoints = []discovery.Endpoint{{
  3290  					Addresses: []string{"10.1.1.1"},
  3291  				}}
  3292  				eps.Ports = []discovery.EndpointPort{{
  3293  					Name:     ptr.To("p11"),
  3294  					Port:     ptr.To[int32](22),
  3295  					Protocol: ptr.To(v1.ProtocolUDP),
  3296  				}}
  3297  			}),
  3298  	}
  3299  	namedPortsLocalNoLocal := []*discovery.EndpointSlice{
  3300  		makeTestEndpointSlice("ns1", "ep1", 1,
  3301  			func(eps *discovery.EndpointSlice) {
  3302  				eps.AddressType = discovery.AddressTypeIPv4
  3303  				eps.Endpoints = []discovery.Endpoint{{
  3304  					Addresses: []string{"10.1.1.1"},
  3305  				}, {
  3306  					Addresses: []string{"10.1.1.2"},
  3307  					NodeName:  ptr.To(testHostname),
  3308  				}}
  3309  				eps.Ports = []discovery.EndpointPort{{
  3310  					Name:     ptr.To("p11"),
  3311  					Port:     ptr.To[int32](11),
  3312  					Protocol: ptr.To(v1.ProtocolUDP),
  3313  				}, {
  3314  					Name:     ptr.To("p12"),
  3315  					Port:     ptr.To[int32](12),
  3316  					Protocol: ptr.To(v1.ProtocolUDP),
  3317  				}}
  3318  			}),
  3319  	}
  3320  	multipleSubsets := []*discovery.EndpointSlice{
  3321  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3322  		makeTestEndpointSlice("ns1", "ep1", 2, subset2),
  3323  	}
  3324  	subsetLocal := func(eps *discovery.EndpointSlice) {
  3325  		eps.AddressType = discovery.AddressTypeIPv4
  3326  		eps.Endpoints = []discovery.Endpoint{{
  3327  			Addresses: []string{"10.1.1.2"},
  3328  			NodeName:  ptr.To(testHostname),
  3329  		}}
  3330  		eps.Ports = []discovery.EndpointPort{{
  3331  			Name:     ptr.To("p12"),
  3332  			Port:     ptr.To[int32](12),
  3333  			Protocol: ptr.To(v1.ProtocolUDP),
  3334  		}}
  3335  	}
  3336  	multipleSubsetsWithLocal := []*discovery.EndpointSlice{
  3337  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3338  		makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
  3339  	}
  3340  	subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
  3341  		eps.AddressType = discovery.AddressTypeIPv4
  3342  		eps.Endpoints = []discovery.Endpoint{{
  3343  			Addresses: []string{"10.1.1.1"},
  3344  			NodeName:  ptr.To(testHostname),
  3345  		}}
  3346  		eps.Ports = []discovery.EndpointPort{{
  3347  			Name:     ptr.To("p11"),
  3348  			Port:     ptr.To[int32](11),
  3349  			Protocol: ptr.To(v1.ProtocolUDP),
  3350  		}, {
  3351  			Name:     ptr.To("p12"),
  3352  			Port:     ptr.To[int32](12),
  3353  			Protocol: ptr.To(v1.ProtocolUDP),
  3354  		}}
  3355  	}
  3356  	subset3 := func(eps *discovery.EndpointSlice) {
  3357  		eps.AddressType = discovery.AddressTypeIPv4
  3358  		eps.Endpoints = []discovery.Endpoint{{
  3359  			Addresses: []string{"10.1.1.3"},
  3360  		}}
  3361  		eps.Ports = []discovery.EndpointPort{{
  3362  			Name:     ptr.To("p13"),
  3363  			Port:     ptr.To[int32](13),
  3364  			Protocol: ptr.To(v1.ProtocolUDP),
  3365  		}}
  3366  	}
  3367  	multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
  3368  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
  3369  		makeTestEndpointSlice("ns1", "ep1", 2, subset3),
  3370  	}
  3371  	subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
  3372  		eps.AddressType = discovery.AddressTypeIPv4
  3373  		eps.Endpoints = []discovery.Endpoint{{
  3374  			Addresses: []string{"10.1.1.1"},
  3375  		}, {
  3376  			Addresses: []string{"10.1.1.2"},
  3377  			NodeName:  ptr.To(testHostname),
  3378  		}}
  3379  		eps.Ports = []discovery.EndpointPort{{
  3380  			Name:     ptr.To("p11"),
  3381  			Port:     ptr.To[int32](11),
  3382  			Protocol: ptr.To(v1.ProtocolUDP),
  3383  		}, {
  3384  			Name:     ptr.To("p12"),
  3385  			Port:     ptr.To[int32](12),
  3386  			Protocol: ptr.To(v1.ProtocolUDP),
  3387  		}}
  3388  	}
  3389  	subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
  3390  		eps.AddressType = discovery.AddressTypeIPv4
  3391  		eps.Endpoints = []discovery.Endpoint{{
  3392  			Addresses: []string{"10.1.1.3"},
  3393  		}, {
  3394  			Addresses: []string{"10.1.1.4"},
  3395  			NodeName:  ptr.To(testHostname),
  3396  		}}
  3397  		eps.Ports = []discovery.EndpointPort{{
  3398  			Name:     ptr.To("p13"),
  3399  			Port:     ptr.To[int32](13),
  3400  			Protocol: ptr.To(v1.ProtocolUDP),
  3401  		}, {
  3402  			Name:     ptr.To("p14"),
  3403  			Port:     ptr.To[int32](14),
  3404  			Protocol: ptr.To(v1.ProtocolUDP),
  3405  		}}
  3406  	}
  3407  	subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
  3408  		eps.AddressType = discovery.AddressTypeIPv4
  3409  		eps.Endpoints = []discovery.Endpoint{{
  3410  			Addresses: []string{"10.2.2.1"},
  3411  		}, {
  3412  			Addresses: []string{"10.2.2.2"},
  3413  			NodeName:  ptr.To(testHostname),
  3414  		}}
  3415  		eps.Ports = []discovery.EndpointPort{{
  3416  			Name:     ptr.To("p21"),
  3417  			Port:     ptr.To[int32](21),
  3418  			Protocol: ptr.To(v1.ProtocolUDP),
  3419  		}, {
  3420  			Name:     ptr.To("p22"),
  3421  			Port:     ptr.To[int32](22),
  3422  			Protocol: ptr.To(v1.ProtocolUDP),
  3423  		}}
  3424  	}
  3425  	multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
  3426  		makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
  3427  		makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
  3428  		makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
  3429  	}
  3430  	complexSubset1 := func(eps *discovery.EndpointSlice) {
  3431  		eps.AddressType = discovery.AddressTypeIPv4
  3432  		eps.Endpoints = []discovery.Endpoint{{
  3433  			Addresses: []string{"10.2.2.2"},
  3434  			NodeName:  ptr.To(testHostname),
  3435  		}, {
  3436  			Addresses: []string{"10.2.2.22"},
  3437  			NodeName:  ptr.To(testHostname),
  3438  		}}
  3439  		eps.Ports = []discovery.EndpointPort{{
  3440  			Name:     ptr.To("p22"),
  3441  			Port:     ptr.To[int32](22),
  3442  			Protocol: ptr.To(v1.ProtocolUDP),
  3443  		}}
  3444  	}
  3445  	complexSubset2 := func(eps *discovery.EndpointSlice) {
  3446  		eps.AddressType = discovery.AddressTypeIPv4
  3447  		eps.Endpoints = []discovery.Endpoint{{
  3448  			Addresses: []string{"10.2.2.3"},
  3449  			NodeName:  ptr.To(testHostname),
  3450  		}}
  3451  		eps.Ports = []discovery.EndpointPort{{
  3452  			Name:     ptr.To("p23"),
  3453  			Port:     ptr.To[int32](23),
  3454  			Protocol: ptr.To(v1.ProtocolUDP),
  3455  		}}
  3456  	}
  3457  	complexSubset3 := func(eps *discovery.EndpointSlice) {
  3458  		eps.AddressType = discovery.AddressTypeIPv4
  3459  		eps.Endpoints = []discovery.Endpoint{{
  3460  			Addresses: []string{"10.4.4.4"},
  3461  			NodeName:  ptr.To(testHostname),
  3462  		}, {
  3463  			Addresses: []string{"10.4.4.5"},
  3464  			NodeName:  ptr.To(testHostname),
  3465  		}}
  3466  		eps.Ports = []discovery.EndpointPort{{
  3467  			Name:     ptr.To("p44"),
  3468  			Port:     ptr.To[int32](44),
  3469  			Protocol: ptr.To(v1.ProtocolUDP),
  3470  		}}
  3471  	}
  3472  	complexSubset4 := func(eps *discovery.EndpointSlice) {
  3473  		eps.AddressType = discovery.AddressTypeIPv4
  3474  		eps.Endpoints = []discovery.Endpoint{{
  3475  			Addresses: []string{"10.4.4.6"},
  3476  			NodeName:  ptr.To(testHostname),
  3477  		}}
  3478  		eps.Ports = []discovery.EndpointPort{{
  3479  			Name:     ptr.To("p45"),
  3480  			Port:     ptr.To[int32](45),
  3481  			Protocol: ptr.To(v1.ProtocolUDP),
  3482  		}}
  3483  	}
  3484  	complexSubset5 := func(eps *discovery.EndpointSlice) {
  3485  		eps.AddressType = discovery.AddressTypeIPv4
  3486  		eps.Endpoints = []discovery.Endpoint{{
  3487  			Addresses: []string{"10.1.1.1"},
  3488  		}, {
  3489  			Addresses: []string{"10.1.1.11"},
  3490  		}}
  3491  		eps.Ports = []discovery.EndpointPort{{
  3492  			Name:     ptr.To("p11"),
  3493  			Port:     ptr.To[int32](11),
  3494  			Protocol: ptr.To(v1.ProtocolUDP),
  3495  		}}
  3496  	}
  3497  	complexSubset6 := func(eps *discovery.EndpointSlice) {
  3498  		eps.AddressType = discovery.AddressTypeIPv4
  3499  		eps.Endpoints = []discovery.Endpoint{{
  3500  			Addresses: []string{"10.1.1.2"},
  3501  		}}
  3502  		eps.Ports = []discovery.EndpointPort{{
  3503  			Name:     ptr.To("p12"),
  3504  			Port:     ptr.To[int32](12),
  3505  			Protocol: ptr.To(v1.ProtocolUDP),
  3506  		}, {
  3507  			Name:     ptr.To("p122"),
  3508  			Port:     ptr.To[int32](122),
  3509  			Protocol: ptr.To(v1.ProtocolUDP),
  3510  		}}
  3511  	}
  3512  	complexSubset7 := func(eps *discovery.EndpointSlice) {
  3513  		eps.AddressType = discovery.AddressTypeIPv4
  3514  		eps.Endpoints = []discovery.Endpoint{{
  3515  			Addresses: []string{"10.3.3.3"},
  3516  		}}
  3517  		eps.Ports = []discovery.EndpointPort{{
  3518  			Name:     ptr.To("p33"),
  3519  			Port:     ptr.To[int32](33),
  3520  			Protocol: ptr.To(v1.ProtocolUDP),
  3521  		}}
  3522  	}
  3523  	complexSubset8 := func(eps *discovery.EndpointSlice) {
  3524  		eps.AddressType = discovery.AddressTypeIPv4
  3525  		eps.Endpoints = []discovery.Endpoint{{
  3526  			Addresses: []string{"10.4.4.4"},
  3527  			NodeName:  ptr.To(testHostname),
  3528  		}}
  3529  		eps.Ports = []discovery.EndpointPort{{
  3530  			Name:     ptr.To("p44"),
  3531  			Port:     ptr.To[int32](44),
  3532  			Protocol: ptr.To(v1.ProtocolUDP),
  3533  		}}
  3534  	}
  3535  	complexBefore := []*discovery.EndpointSlice{
  3536  		makeTestEndpointSlice("ns1", "ep1", 1, subset1),
  3537  		nil,
  3538  		makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
  3539  		makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
  3540  		nil,
  3541  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
  3542  		makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
  3543  	}
  3544  	complexAfter := []*discovery.EndpointSlice{
  3545  		makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
  3546  		makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
  3547  		nil,
  3548  		nil,
  3549  		makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
  3550  		makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
  3551  		nil,
  3552  	}
  3553  
  3554  	testCases := []struct {
  3555  		// previousEndpoints and currentEndpoints are used to call appropriate
  3556  		// handlers OnEndpoints* (based on whether corresponding values are nil
  3557  		// or non-nil) and must be of equal length.
  3558  		name                           string
  3559  		previousEndpoints              []*discovery.EndpointSlice
  3560  		currentEndpoints               []*discovery.EndpointSlice
  3561  		oldEndpoints                   map[proxy.ServicePortName][]endpointExpectation
  3562  		expectedResult                 map[proxy.ServicePortName][]endpointExpectation
  3563  		expectedDeletedUDPEndpoints    []proxy.ServiceEndpoint
  3564  		expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool
  3565  		expectedLocalEndpoints         map[types.NamespacedName]int
  3566  	}{{
  3567  		// Case[0]: nothing
  3568  		name:                           "nothing",
  3569  		oldEndpoints:                   map[proxy.ServicePortName][]endpointExpectation{},
  3570  		expectedResult:                 map[proxy.ServicePortName][]endpointExpectation{},
  3571  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3572  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3573  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3574  	}, {
  3575  		// Case[1]: no change, named port, local
  3576  		name:              "no change, named port, local",
  3577  		previousEndpoints: namedPortLocal,
  3578  		currentEndpoints:  namedPortLocal,
  3579  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3580  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3581  				{endpoint: "10.1.1.1:11", isLocal: true},
  3582  			},
  3583  		},
  3584  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3585  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3586  				{endpoint: "10.1.1.1:11", isLocal: true},
  3587  			},
  3588  		},
  3589  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3590  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3591  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3592  			makeNSN("ns1", "ep1"): 1,
  3593  		},
  3594  	}, {
  3595  		// Case[2]: no change, multiple subsets
  3596  		name:              "no change, multiple subsets",
  3597  		previousEndpoints: multipleSubsets,
  3598  		currentEndpoints:  multipleSubsets,
  3599  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3600  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3601  				{endpoint: "10.1.1.1:11", isLocal: false},
  3602  			},
  3603  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3604  				{endpoint: "10.1.1.2:12", isLocal: false},
  3605  			},
  3606  		},
  3607  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3608  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3609  				{endpoint: "10.1.1.1:11", isLocal: false},
  3610  			},
  3611  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3612  				{endpoint: "10.1.1.2:12", isLocal: false},
  3613  			},
  3614  		},
  3615  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3616  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3617  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3618  	}, {
  3619  		// Case[3]: no change, multiple subsets, multiple ports, local
  3620  		name:              "no change, multiple subsets, multiple ports, local",
  3621  		previousEndpoints: multipleSubsetsMultiplePortsLocal,
  3622  		currentEndpoints:  multipleSubsetsMultiplePortsLocal,
  3623  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3624  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3625  				{endpoint: "10.1.1.1:11", isLocal: true},
  3626  			},
  3627  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3628  				{endpoint: "10.1.1.1:12", isLocal: true},
  3629  			},
  3630  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3631  				{endpoint: "10.1.1.3:13", isLocal: false},
  3632  			},
  3633  		},
  3634  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3635  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3636  				{endpoint: "10.1.1.1:11", isLocal: true},
  3637  			},
  3638  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3639  				{endpoint: "10.1.1.1:12", isLocal: true},
  3640  			},
  3641  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3642  				{endpoint: "10.1.1.3:13", isLocal: false},
  3643  			},
  3644  		},
  3645  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3646  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3647  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3648  			makeNSN("ns1", "ep1"): 1,
  3649  		},
  3650  	}, {
  3651  		// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
  3652  		name:              "no change, multiple endpoints, subsets, IPs, and ports",
  3653  		previousEndpoints: multipleSubsetsIPsPorts,
  3654  		currentEndpoints:  multipleSubsetsIPsPorts,
  3655  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3656  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3657  				{endpoint: "10.1.1.1:11", isLocal: false},
  3658  				{endpoint: "10.1.1.2:11", isLocal: true},
  3659  			},
  3660  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3661  				{endpoint: "10.1.1.1:12", isLocal: false},
  3662  				{endpoint: "10.1.1.2:12", isLocal: true},
  3663  			},
  3664  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3665  				{endpoint: "10.1.1.3:13", isLocal: false},
  3666  				{endpoint: "10.1.1.4:13", isLocal: true},
  3667  			},
  3668  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3669  				{endpoint: "10.1.1.3:14", isLocal: false},
  3670  				{endpoint: "10.1.1.4:14", isLocal: true},
  3671  			},
  3672  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3673  				{endpoint: "10.2.2.1:21", isLocal: false},
  3674  				{endpoint: "10.2.2.2:21", isLocal: true},
  3675  			},
  3676  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3677  				{endpoint: "10.2.2.1:22", isLocal: false},
  3678  				{endpoint: "10.2.2.2:22", isLocal: true},
  3679  			},
  3680  		},
  3681  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3682  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3683  				{endpoint: "10.1.1.1:11", isLocal: false},
  3684  				{endpoint: "10.1.1.2:11", isLocal: true},
  3685  			},
  3686  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3687  				{endpoint: "10.1.1.1:12", isLocal: false},
  3688  				{endpoint: "10.1.1.2:12", isLocal: true},
  3689  			},
  3690  			makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
  3691  				{endpoint: "10.1.1.3:13", isLocal: false},
  3692  				{endpoint: "10.1.1.4:13", isLocal: true},
  3693  			},
  3694  			makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
  3695  				{endpoint: "10.1.1.3:14", isLocal: false},
  3696  				{endpoint: "10.1.1.4:14", isLocal: true},
  3697  			},
  3698  			makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
  3699  				{endpoint: "10.2.2.1:21", isLocal: false},
  3700  				{endpoint: "10.2.2.2:21", isLocal: true},
  3701  			},
  3702  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3703  				{endpoint: "10.2.2.1:22", isLocal: false},
  3704  				{endpoint: "10.2.2.2:22", isLocal: true},
  3705  			},
  3706  		},
  3707  		expectedDeletedUDPEndpoints:    []proxy.ServiceEndpoint{},
  3708  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3709  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3710  			makeNSN("ns1", "ep1"): 2,
  3711  			makeNSN("ns2", "ep2"): 1,
  3712  		},
  3713  	}, {
  3714  		// Case[5]: add an Endpoints
  3715  		name:              "add an Endpoints",
  3716  		previousEndpoints: []*discovery.EndpointSlice{nil},
  3717  		currentEndpoints:  namedPortLocal,
  3718  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  3719  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3720  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3721  				{endpoint: "10.1.1.1:11", isLocal: true},
  3722  			},
  3723  		},
  3724  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3725  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3726  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  3727  		},
  3728  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3729  			makeNSN("ns1", "ep1"): 1,
  3730  		},
  3731  	}, {
  3732  		// Case[6]: remove an Endpoints
  3733  		name:              "remove an Endpoints",
  3734  		previousEndpoints: namedPortLocal,
  3735  		currentEndpoints:  []*discovery.EndpointSlice{nil},
  3736  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3737  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3738  				{endpoint: "10.1.1.1:11", isLocal: true},
  3739  			},
  3740  		},
  3741  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{},
  3742  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3743  			Endpoint:        "10.1.1.1:11",
  3744  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3745  		}},
  3746  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3747  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3748  	}, {
  3749  		// Case[7]: add an IP and port
  3750  		name:              "add an IP and port",
  3751  		previousEndpoints: namedPort,
  3752  		currentEndpoints:  namedPortsLocalNoLocal,
  3753  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3754  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3755  				{endpoint: "10.1.1.1:11", isLocal: false},
  3756  			},
  3757  		},
  3758  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3759  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3760  				{endpoint: "10.1.1.1:11", isLocal: false},
  3761  				{endpoint: "10.1.1.2:11", isLocal: true},
  3762  			},
  3763  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3764  				{endpoint: "10.1.1.1:12", isLocal: false},
  3765  				{endpoint: "10.1.1.2:12", isLocal: true},
  3766  			},
  3767  		},
  3768  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3769  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3770  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3771  		},
  3772  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3773  			makeNSN("ns1", "ep1"): 1,
  3774  		},
  3775  	}, {
  3776  		// Case[8]: remove an IP and port
  3777  		name:              "remove an IP and port",
  3778  		previousEndpoints: namedPortsLocalNoLocal,
  3779  		currentEndpoints:  namedPort,
  3780  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3781  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3782  				{endpoint: "10.1.1.1:11", isLocal: false},
  3783  				{endpoint: "10.1.1.2:11", isLocal: true},
  3784  			},
  3785  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3786  				{endpoint: "10.1.1.1:12", isLocal: false},
  3787  				{endpoint: "10.1.1.2:12", isLocal: true},
  3788  			},
  3789  		},
  3790  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3791  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3792  				{endpoint: "10.1.1.1:11", isLocal: false},
  3793  			},
  3794  		},
  3795  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3796  			Endpoint:        "10.1.1.2:11",
  3797  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3798  		}, {
  3799  			Endpoint:        "10.1.1.1:12",
  3800  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3801  		}, {
  3802  			Endpoint:        "10.1.1.2:12",
  3803  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3804  		}},
  3805  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3806  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3807  	}, {
  3808  		// Case[9]: add a subset
  3809  		name:              "add a subset",
  3810  		previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
  3811  		currentEndpoints:  multipleSubsetsWithLocal,
  3812  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3813  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3814  				{endpoint: "10.1.1.1:11", isLocal: false},
  3815  			},
  3816  		},
  3817  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3818  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3819  				{endpoint: "10.1.1.1:11", isLocal: false},
  3820  			},
  3821  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3822  				{endpoint: "10.1.1.2:12", isLocal: true},
  3823  			},
  3824  		},
  3825  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3826  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3827  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
  3828  		},
  3829  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3830  			makeNSN("ns1", "ep1"): 1,
  3831  		},
  3832  	}, {
  3833  		// Case[10]: remove a subset
  3834  		name:              "remove a subset",
  3835  		previousEndpoints: multipleSubsets,
  3836  		currentEndpoints:  []*discovery.EndpointSlice{namedPort[0], nil},
  3837  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3838  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3839  				{endpoint: "10.1.1.1:11", isLocal: false},
  3840  			},
  3841  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3842  				{endpoint: "10.1.1.2:12", isLocal: false},
  3843  			},
  3844  		},
  3845  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3846  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3847  				{endpoint: "10.1.1.1:11", isLocal: false},
  3848  			},
  3849  		},
  3850  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3851  			Endpoint:        "10.1.1.2:12",
  3852  			ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
  3853  		}},
  3854  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3855  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3856  	}, {
  3857  		// Case[11]: rename a port
  3858  		name:              "rename a port",
  3859  		previousEndpoints: namedPort,
  3860  		currentEndpoints:  namedPortRenamed,
  3861  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3862  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3863  				{endpoint: "10.1.1.1:11", isLocal: false},
  3864  			},
  3865  		},
  3866  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3867  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
  3868  				{endpoint: "10.1.1.1:11", isLocal: false},
  3869  			},
  3870  		},
  3871  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3872  			Endpoint:        "10.1.1.1:11",
  3873  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3874  		}},
  3875  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3876  			makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
  3877  		},
  3878  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  3879  	}, {
  3880  		// Case[12]: renumber a port
  3881  		name:              "renumber a port",
  3882  		previousEndpoints: namedPort,
  3883  		currentEndpoints:  namedPortRenumbered,
  3884  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3885  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3886  				{endpoint: "10.1.1.1:11", isLocal: false},
  3887  			},
  3888  		},
  3889  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3890  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3891  				{endpoint: "10.1.1.1:22", isLocal: false},
  3892  			},
  3893  		},
  3894  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3895  			Endpoint:        "10.1.1.1:11",
  3896  			ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
  3897  		}},
  3898  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{},
  3899  		expectedLocalEndpoints:         map[types.NamespacedName]int{},
  3900  	}, {
  3901  		// Case[13]: complex add and remove
  3902  		name:              "complex add and remove",
  3903  		previousEndpoints: complexBefore,
  3904  		currentEndpoints:  complexAfter,
  3905  		oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{
  3906  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3907  				{endpoint: "10.1.1.1:11", isLocal: false},
  3908  			},
  3909  			makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
  3910  				{endpoint: "10.2.2.22:22", isLocal: true},
  3911  				{endpoint: "10.2.2.2:22", isLocal: true},
  3912  			},
  3913  			makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
  3914  				{endpoint: "10.2.2.3:23", isLocal: true},
  3915  			},
  3916  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  3917  				{endpoint: "10.4.4.4:44", isLocal: true},
  3918  				{endpoint: "10.4.4.5:44", isLocal: true},
  3919  			},
  3920  			makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
  3921  				{endpoint: "10.4.4.6:45", isLocal: true},
  3922  			},
  3923  		},
  3924  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3925  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3926  				{endpoint: "10.1.1.11:11", isLocal: false},
  3927  				{endpoint: "10.1.1.1:11", isLocal: false},
  3928  			},
  3929  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
  3930  				{endpoint: "10.1.1.2:12", isLocal: false},
  3931  			},
  3932  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
  3933  				{endpoint: "10.1.1.2:122", isLocal: false},
  3934  			},
  3935  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
  3936  				{endpoint: "10.3.3.3:33", isLocal: false},
  3937  			},
  3938  			makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
  3939  				{endpoint: "10.4.4.4:44", isLocal: true},
  3940  			},
  3941  		},
  3942  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{
  3943  			Endpoint:        "10.2.2.2:22",
  3944  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  3945  		}, {
  3946  			Endpoint:        "10.2.2.22:22",
  3947  			ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
  3948  		}, {
  3949  			Endpoint:        "10.2.2.3:23",
  3950  			ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
  3951  		}, {
  3952  			Endpoint:        "10.4.4.5:44",
  3953  			ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
  3954  		}, {
  3955  			Endpoint:        "10.4.4.6:45",
  3956  			ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
  3957  		}},
  3958  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3959  			makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP):  true,
  3960  			makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
  3961  			makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP):  true,
  3962  		},
  3963  		expectedLocalEndpoints: map[types.NamespacedName]int{
  3964  			makeNSN("ns4", "ep4"): 1,
  3965  		},
  3966  	}, {
  3967  		// Case[14]: change from 0 endpoint address to 1 unnamed port
  3968  		name:              "change from 0 endpoint address to 1 unnamed port",
  3969  		previousEndpoints: emptyEndpointSlices,
  3970  		currentEndpoints:  namedPort,
  3971  		oldEndpoints:      map[proxy.ServicePortName][]endpointExpectation{},
  3972  		expectedResult: map[proxy.ServicePortName][]endpointExpectation{
  3973  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
  3974  				{endpoint: "10.1.1.1:11", isLocal: false},
  3975  			},
  3976  		},
  3977  		expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{},
  3978  		expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{
  3979  			makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
  3980  		},
  3981  		expectedLocalEndpoints: map[types.NamespacedName]int{},
  3982  	},
  3983  	}
  3984  
  3985  	for tci, tc := range testCases {
  3986  		t.Run(tc.name, func(t *testing.T) {
  3987  			ipt := iptablestest.NewFake()
  3988  			fp := NewFakeProxier(ipt)
  3989  			fp.hostname = testHostname
  3990  
  3991  			// First check that after adding all previous versions of endpoints,
  3992  			// the fp.oldEndpoints is as we expect.
  3993  			for i := range tc.previousEndpoints {
  3994  				if tc.previousEndpoints[i] != nil {
  3995  					fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
  3996  				}
  3997  			}
  3998  			fp.endpointsMap.Update(fp.endpointsChanges)
  3999  			checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints)
  4000  
  4001  			// Now let's call appropriate handlers to get to state we want to be.
  4002  			if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
  4003  				t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
  4004  			}
  4005  
  4006  			for i := range tc.previousEndpoints {
  4007  				prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
  4008  				switch {
  4009  				case prev == nil:
  4010  					fp.OnEndpointSliceAdd(curr)
  4011  				case curr == nil:
  4012  					fp.OnEndpointSliceDelete(prev)
  4013  				default:
  4014  					fp.OnEndpointSliceUpdate(prev, curr)
  4015  				}
  4016  			}
  4017  			result := fp.endpointsMap.Update(fp.endpointsChanges)
  4018  			newMap := fp.endpointsMap
  4019  			checkEndpointExpectations(t, tci, newMap, tc.expectedResult)
  4020  			if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) {
  4021  				t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints)
  4022  			}
  4023  			for _, x := range tc.expectedDeletedUDPEndpoints {
  4024  				found := false
  4025  				for _, stale := range result.DeletedUDPEndpoints {
  4026  					if stale == x {
  4027  						found = true
  4028  						break
  4029  					}
  4030  				}
  4031  				if !found {
  4032  					t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.DeletedUDPEndpoints)
  4033  				}
  4034  			}
  4035  			if len(result.NewlyActiveUDPServices) != len(tc.expectedNewlyActiveUDPServices) {
  4036  				t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedNewlyActiveUDPServices), len(result.NewlyActiveUDPServices), result.NewlyActiveUDPServices)
  4037  			}
  4038  			for svcName := range tc.expectedNewlyActiveUDPServices {
  4039  				found := false
  4040  				for _, stale := range result.NewlyActiveUDPServices {
  4041  					if stale == svcName {
  4042  						found = true
  4043  					}
  4044  				}
  4045  				if !found {
  4046  					t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.NewlyActiveUDPServices)
  4047  				}
  4048  			}
  4049  			localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4050  			if !reflect.DeepEqual(localReadyEndpoints, tc.expectedLocalEndpoints) {
  4051  				t.Errorf("[%d] expected local endpoints %v, got %v", tci, tc.expectedLocalEndpoints, localReadyEndpoints)
  4052  			}
  4053  		})
  4054  	}
  4055  }
  4056  
  4057  // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
  4058  func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
  4059  	ipt := iptablestest.NewFake()
  4060  	fp := NewFakeProxier(ipt)
  4061  	fp.OnServiceSynced()
  4062  	fp.OnEndpointSlicesSynced()
  4063  
  4064  	serviceName := "svc1"
  4065  	namespaceName := "ns1"
  4066  
  4067  	fp.OnServiceAdd(&v1.Service{
  4068  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4069  		Spec: v1.ServiceSpec{
  4070  			ClusterIP: "172.30.1.1",
  4071  			Selector:  map[string]string{"foo": "bar"},
  4072  			Ports:     []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
  4073  		},
  4074  	})
  4075  
  4076  	endpointSlice := &discovery.EndpointSlice{
  4077  		ObjectMeta: metav1.ObjectMeta{
  4078  			Name:      fmt.Sprintf("%s-1", serviceName),
  4079  			Namespace: namespaceName,
  4080  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4081  		},
  4082  		Ports: []discovery.EndpointPort{{
  4083  			Name:     ptr.To(""),
  4084  			Port:     ptr.To[int32](80),
  4085  			Protocol: ptr.To(v1.ProtocolTCP),
  4086  		}},
  4087  		AddressType: discovery.AddressTypeIPv4,
  4088  		Endpoints: []discovery.Endpoint{{
  4089  			Addresses:  []string{"10.0.1.1"},
  4090  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4091  			NodeName:   ptr.To(testHostname),
  4092  		}, {
  4093  			Addresses:  []string{"10.0.1.2"},
  4094  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4095  			NodeName:   ptr.To(testHostname),
  4096  		}, {
  4097  			Addresses:  []string{"10.0.1.3"},
  4098  			Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4099  			NodeName:   ptr.To(testHostname),
  4100  		}, { // not ready endpoints should be ignored
  4101  			Addresses:  []string{"10.0.1.4"},
  4102  			Conditions: discovery.EndpointConditions{Ready: ptr.To(false)},
  4103  			NodeName:   ptr.To(testHostname),
  4104  		}},
  4105  	}
  4106  
  4107  	fp.OnEndpointSliceAdd(endpointSlice)
  4108  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4109  	localReadyEndpoints := fp.endpointsMap.LocalReadyEndpoints()
  4110  	if len(localReadyEndpoints) != 1 {
  4111  		t.Errorf("unexpected number of local ready endpoints, expected 1 but got: %d", len(localReadyEndpoints))
  4112  	}
  4113  
  4114  	// set all endpoints to terminating
  4115  	endpointSliceTerminating := &discovery.EndpointSlice{
  4116  		ObjectMeta: metav1.ObjectMeta{
  4117  			Name:      fmt.Sprintf("%s-1", serviceName),
  4118  			Namespace: namespaceName,
  4119  			Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4120  		},
  4121  		Ports: []discovery.EndpointPort{{
  4122  			Name:     ptr.To(""),
  4123  			Port:     ptr.To[int32](80),
  4124  			Protocol: ptr.To(v1.ProtocolTCP),
  4125  		}},
  4126  		AddressType: discovery.AddressTypeIPv4,
  4127  		Endpoints: []discovery.Endpoint{{
  4128  			Addresses: []string{"10.0.1.1"},
  4129  			Conditions: discovery.EndpointConditions{
  4130  				Ready:       ptr.To(false),
  4131  				Serving:     ptr.To(true),
  4132  				Terminating: ptr.To(false),
  4133  			},
  4134  			NodeName: ptr.To(testHostname),
  4135  		}, {
  4136  			Addresses: []string{"10.0.1.2"},
  4137  			Conditions: discovery.EndpointConditions{
  4138  				Ready:       ptr.To(false),
  4139  				Serving:     ptr.To(true),
  4140  				Terminating: ptr.To(true),
  4141  			},
  4142  			NodeName: ptr.To(testHostname),
  4143  		}, {
  4144  			Addresses: []string{"10.0.1.3"},
  4145  			Conditions: discovery.EndpointConditions{
  4146  				Ready:       ptr.To(false),
  4147  				Serving:     ptr.To(true),
  4148  				Terminating: ptr.To(true),
  4149  			},
  4150  			NodeName: ptr.To(testHostname),
  4151  		}, { // not ready endpoints should be ignored
  4152  			Addresses: []string{"10.0.1.4"},
  4153  			Conditions: discovery.EndpointConditions{
  4154  				Ready:       ptr.To(false),
  4155  				Serving:     ptr.To(false),
  4156  				Terminating: ptr.To(true),
  4157  			},
  4158  			NodeName: ptr.To(testHostname),
  4159  		}},
  4160  	}
  4161  
  4162  	fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
  4163  	_ = fp.endpointsMap.Update(fp.endpointsChanges)
  4164  	localReadyEndpoints = fp.endpointsMap.LocalReadyEndpoints()
  4165  	if len(localReadyEndpoints) != 0 {
  4166  		t.Errorf("unexpected number of local ready endpoints, expected 0 but got: %d", len(localReadyEndpoints))
  4167  	}
  4168  }
  4169  
  4170  func TestProxierMetricsIPTablesTotalRules(t *testing.T) {
  4171  	logger, _ := klogtesting.NewTestContext(t)
  4172  	ipt := iptablestest.NewFake()
  4173  	fp := NewFakeProxier(ipt)
  4174  
  4175  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables)
  4176  
  4177  	svcIP := "172.30.0.41"
  4178  	svcPort := 80
  4179  	nodePort := 31201
  4180  	svcPortName := proxy.ServicePortName{
  4181  		NamespacedName: makeNSN("ns1", "svc1"),
  4182  		Port:           "p80",
  4183  		Protocol:       v1.ProtocolTCP,
  4184  	}
  4185  
  4186  	makeServiceMap(fp,
  4187  		makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  4188  			svc.Spec.ClusterIP = svcIP
  4189  			svc.Spec.Ports = []v1.ServicePort{{
  4190  				Name:     svcPortName.Port,
  4191  				Port:     int32(svcPort),
  4192  				Protocol: v1.ProtocolTCP,
  4193  				NodePort: int32(nodePort),
  4194  			}}
  4195  		}),
  4196  	)
  4197  	fp.syncProxyRules()
  4198  	iptablesData := fp.iptablesData.String()
  4199  
  4200  	nFilterRules := countRulesFromMetric(logger, utiliptables.TableFilter)
  4201  	expectedFilterRules := countRules(logger, utiliptables.TableFilter, iptablesData)
  4202  
  4203  	if nFilterRules != expectedFilterRules {
  4204  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4205  	}
  4206  
  4207  	nNatRules := countRulesFromMetric(logger, utiliptables.TableNAT)
  4208  	expectedNatRules := countRules(logger, utiliptables.TableNAT, iptablesData)
  4209  
  4210  	if nNatRules != expectedNatRules {
  4211  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4212  	}
  4213  
  4214  	populateEndpointSlices(fp,
  4215  		makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
  4216  			eps.AddressType = discovery.AddressTypeIPv4
  4217  			eps.Endpoints = []discovery.Endpoint{{
  4218  				Addresses: []string{"10.0.0.2"},
  4219  			}, {
  4220  				Addresses: []string{"10.0.0.5"},
  4221  			}}
  4222  			eps.Ports = []discovery.EndpointPort{{
  4223  				Name:     ptr.To(svcPortName.Port),
  4224  				Port:     ptr.To(int32(svcPort)),
  4225  				Protocol: ptr.To(v1.ProtocolTCP),
  4226  			}}
  4227  		}),
  4228  	)
  4229  
  4230  	fp.syncProxyRules()
  4231  	iptablesData = fp.iptablesData.String()
  4232  
  4233  	nFilterRules = countRulesFromMetric(logger, utiliptables.TableFilter)
  4234  	expectedFilterRules = countRules(logger, utiliptables.TableFilter, iptablesData)
  4235  
  4236  	if nFilterRules != expectedFilterRules {
  4237  		t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
  4238  	}
  4239  
  4240  	nNatRules = countRulesFromMetric(logger, utiliptables.TableNAT)
  4241  	expectedNatRules = countRules(logger, utiliptables.TableNAT, iptablesData)
  4242  
  4243  	if nNatRules != expectedNatRules {
  4244  		t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
  4245  	}
  4246  }
  4247  
  4248  // TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
  4249  
  4250  // This test ensures that the iptables proxier supports translating Endpoints to
  4251  // iptables output when internalTrafficPolicy is specified
  4252  func TestInternalTrafficPolicy(t *testing.T) {
  4253  	type endpoint struct {
  4254  		ip       string
  4255  		hostname string
  4256  	}
  4257  
  4258  	testCases := []struct {
  4259  		name                  string
  4260  		line                  int
  4261  		internalTrafficPolicy *v1.ServiceInternalTrafficPolicy
  4262  		endpoints             []endpoint
  4263  		flowTests             []packetFlowTest
  4264  	}{
  4265  		{
  4266  			name:                  "internalTrafficPolicy is cluster",
  4267  			line:                  getLine(),
  4268  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster),
  4269  			endpoints: []endpoint{
  4270  				{"10.0.1.1", testHostname},
  4271  				{"10.0.1.2", "host1"},
  4272  				{"10.0.1.3", "host2"},
  4273  			},
  4274  			flowTests: []packetFlowTest{
  4275  				{
  4276  					name:     "pod to ClusterIP hits all endpoints",
  4277  					sourceIP: "10.0.0.2",
  4278  					destIP:   "172.30.1.1",
  4279  					destPort: 80,
  4280  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
  4281  					masq:     false,
  4282  				},
  4283  			},
  4284  		},
  4285  		{
  4286  			name:                  "internalTrafficPolicy is local and there is one local endpoint",
  4287  			line:                  getLine(),
  4288  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4289  			endpoints: []endpoint{
  4290  				{"10.0.1.1", testHostname},
  4291  				{"10.0.1.2", "host1"},
  4292  				{"10.0.1.3", "host2"},
  4293  			},
  4294  			flowTests: []packetFlowTest{
  4295  				{
  4296  					name:     "pod to ClusterIP hits only local endpoint",
  4297  					sourceIP: "10.0.0.2",
  4298  					destIP:   "172.30.1.1",
  4299  					destPort: 80,
  4300  					output:   "10.0.1.1:80",
  4301  					masq:     false,
  4302  				},
  4303  			},
  4304  		},
  4305  		{
  4306  			name:                  "internalTrafficPolicy is local and there are multiple local endpoints",
  4307  			line:                  getLine(),
  4308  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4309  			endpoints: []endpoint{
  4310  				{"10.0.1.1", testHostname},
  4311  				{"10.0.1.2", testHostname},
  4312  				{"10.0.1.3", "host2"},
  4313  			},
  4314  			flowTests: []packetFlowTest{
  4315  				{
  4316  					name:     "pod to ClusterIP hits all local endpoints",
  4317  					sourceIP: "10.0.0.2",
  4318  					destIP:   "172.30.1.1",
  4319  					destPort: 80,
  4320  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4321  					masq:     false,
  4322  				},
  4323  			},
  4324  		},
  4325  		{
  4326  			name:                  "internalTrafficPolicy is local and there are no local endpoints",
  4327  			line:                  getLine(),
  4328  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  4329  			endpoints: []endpoint{
  4330  				{"10.0.1.1", "host0"},
  4331  				{"10.0.1.2", "host1"},
  4332  				{"10.0.1.3", "host2"},
  4333  			},
  4334  			flowTests: []packetFlowTest{
  4335  				{
  4336  					name:     "no endpoints",
  4337  					sourceIP: "10.0.0.2",
  4338  					destIP:   "172.30.1.1",
  4339  					destPort: 80,
  4340  					output:   "DROP",
  4341  				},
  4342  			},
  4343  		},
  4344  	}
  4345  
  4346  	for _, tc := range testCases {
  4347  		t.Run(tc.name, func(t *testing.T) {
  4348  			ipt := iptablestest.NewFake()
  4349  			fp := NewFakeProxier(ipt)
  4350  			fp.OnServiceSynced()
  4351  			fp.OnEndpointSlicesSynced()
  4352  
  4353  			serviceName := "svc1"
  4354  			namespaceName := "ns1"
  4355  
  4356  			svc := &v1.Service{
  4357  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  4358  				Spec: v1.ServiceSpec{
  4359  					ClusterIP: "172.30.1.1",
  4360  					Selector:  map[string]string{"foo": "bar"},
  4361  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
  4362  				},
  4363  			}
  4364  			if tc.internalTrafficPolicy != nil {
  4365  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  4366  			}
  4367  
  4368  			fp.OnServiceAdd(svc)
  4369  
  4370  			endpointSlice := &discovery.EndpointSlice{
  4371  				ObjectMeta: metav1.ObjectMeta{
  4372  					Name:      fmt.Sprintf("%s-1", serviceName),
  4373  					Namespace: namespaceName,
  4374  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  4375  				},
  4376  				Ports: []discovery.EndpointPort{{
  4377  					Name:     ptr.To(""),
  4378  					Port:     ptr.To[int32](80),
  4379  					Protocol: ptr.To(v1.ProtocolTCP),
  4380  				}},
  4381  				AddressType: discovery.AddressTypeIPv4,
  4382  			}
  4383  			for _, ep := range tc.endpoints {
  4384  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  4385  					Addresses:  []string{ep.ip},
  4386  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  4387  					NodeName:   ptr.To(ep.hostname),
  4388  				})
  4389  			}
  4390  
  4391  			fp.OnEndpointSliceAdd(endpointSlice)
  4392  			fp.syncProxyRules()
  4393  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tc.flowTests)
  4394  
  4395  			fp.OnEndpointSliceDelete(endpointSlice)
  4396  			fp.syncProxyRules()
  4397  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, []packetFlowTest{
  4398  				{
  4399  					name:     "endpoints deleted",
  4400  					sourceIP: "10.0.0.2",
  4401  					destIP:   "172.30.1.1",
  4402  					destPort: 80,
  4403  					output:   "REJECT",
  4404  				},
  4405  			})
  4406  		})
  4407  	}
  4408  }
  4409  
  4410  // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
  4411  // ready + terminating endpoints, only the ready endpoints are used.
  4412  func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
  4413  	service := &v1.Service{
  4414  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  4415  		Spec: v1.ServiceSpec{
  4416  			ClusterIP:             "172.30.1.1",
  4417  			Type:                  v1.ServiceTypeLoadBalancer,
  4418  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  4419  			Ports: []v1.ServicePort{
  4420  				{
  4421  					Name:       "",
  4422  					TargetPort: intstr.FromInt32(80),
  4423  					Port:       80,
  4424  					Protocol:   v1.ProtocolTCP,
  4425  				},
  4426  			},
  4427  			HealthCheckNodePort: 30000,
  4428  		},
  4429  		Status: v1.ServiceStatus{
  4430  			LoadBalancer: v1.LoadBalancerStatus{
  4431  				Ingress: []v1.LoadBalancerIngress{
  4432  					{IP: "1.2.3.4"},
  4433  				},
  4434  			},
  4435  		},
  4436  	}
  4437  
  4438  	testcases := []struct {
  4439  		name          string
  4440  		line          int
  4441  		endpointslice *discovery.EndpointSlice
  4442  		flowTests     []packetFlowTest
  4443  	}{
  4444  		{
  4445  			name: "ready endpoints exist",
  4446  			line: getLine(),
  4447  			endpointslice: &discovery.EndpointSlice{
  4448  				ObjectMeta: metav1.ObjectMeta{
  4449  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4450  					Namespace: "ns1",
  4451  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4452  				},
  4453  				Ports: []discovery.EndpointPort{{
  4454  					Name:     ptr.To(""),
  4455  					Port:     ptr.To[int32](80),
  4456  					Protocol: ptr.To(v1.ProtocolTCP),
  4457  				}},
  4458  				AddressType: discovery.AddressTypeIPv4,
  4459  				Endpoints: []discovery.Endpoint{
  4460  					{
  4461  						Addresses: []string{"10.0.1.1"},
  4462  						Conditions: discovery.EndpointConditions{
  4463  							Ready:       ptr.To(true),
  4464  							Serving:     ptr.To(true),
  4465  							Terminating: ptr.To(false),
  4466  						},
  4467  						NodeName: ptr.To(testHostname),
  4468  					},
  4469  					{
  4470  						Addresses: []string{"10.0.1.2"},
  4471  						Conditions: discovery.EndpointConditions{
  4472  							Ready:       ptr.To(true),
  4473  							Serving:     ptr.To(true),
  4474  							Terminating: ptr.To(false),
  4475  						},
  4476  						NodeName: ptr.To(testHostname),
  4477  					},
  4478  					{
  4479  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4480  						Addresses: []string{"10.0.1.3"},
  4481  						Conditions: discovery.EndpointConditions{
  4482  							Ready:       ptr.To(false),
  4483  							Serving:     ptr.To(true),
  4484  							Terminating: ptr.To(true),
  4485  						},
  4486  						NodeName: ptr.To(testHostname),
  4487  					},
  4488  					{
  4489  						// this endpoint should be ignored for external since there are ready non-terminating endpoints
  4490  						Addresses: []string{"10.0.1.4"},
  4491  						Conditions: discovery.EndpointConditions{
  4492  							Ready:       ptr.To(false),
  4493  							Serving:     ptr.To(false),
  4494  							Terminating: ptr.To(true),
  4495  						},
  4496  						NodeName: ptr.To(testHostname),
  4497  					},
  4498  					{
  4499  						// this endpoint should be ignored for external since it's not local
  4500  						Addresses: []string{"10.0.1.5"},
  4501  						Conditions: discovery.EndpointConditions{
  4502  							Ready:       ptr.To(true),
  4503  							Serving:     ptr.To(true),
  4504  							Terminating: ptr.To(false),
  4505  						},
  4506  						NodeName: ptr.To("host-1"),
  4507  					},
  4508  				},
  4509  			},
  4510  			flowTests: []packetFlowTest{
  4511  				{
  4512  					name:     "pod to clusterIP",
  4513  					sourceIP: "10.0.0.2",
  4514  					destIP:   "172.30.1.1",
  4515  					destPort: 80,
  4516  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4517  					masq:     false,
  4518  				},
  4519  				{
  4520  					name:     "external to LB",
  4521  					sourceIP: testExternalClient,
  4522  					destIP:   "1.2.3.4",
  4523  					destPort: 80,
  4524  					output:   "10.0.1.1:80, 10.0.1.2:80",
  4525  					masq:     false,
  4526  				},
  4527  			},
  4528  		},
  4529  		{
  4530  			name: "only terminating endpoints exist",
  4531  			line: getLine(),
  4532  			endpointslice: &discovery.EndpointSlice{
  4533  				ObjectMeta: metav1.ObjectMeta{
  4534  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4535  					Namespace: "ns1",
  4536  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4537  				},
  4538  				Ports: []discovery.EndpointPort{{
  4539  					Name:     ptr.To(""),
  4540  					Port:     ptr.To[int32](80),
  4541  					Protocol: ptr.To(v1.ProtocolTCP),
  4542  				}},
  4543  				AddressType: discovery.AddressTypeIPv4,
  4544  				Endpoints: []discovery.Endpoint{
  4545  					{
  4546  						// this endpoint should be used since there are only ready terminating endpoints
  4547  						Addresses: []string{"10.0.1.2"},
  4548  						Conditions: discovery.EndpointConditions{
  4549  							Ready:       ptr.To(false),
  4550  							Serving:     ptr.To(true),
  4551  							Terminating: ptr.To(true),
  4552  						},
  4553  						NodeName: ptr.To(testHostname),
  4554  					},
  4555  					{
  4556  						// this endpoint should be used since there are only ready terminating endpoints
  4557  						Addresses: []string{"10.0.1.3"},
  4558  						Conditions: discovery.EndpointConditions{
  4559  							Ready:       ptr.To(false),
  4560  							Serving:     ptr.To(true),
  4561  							Terminating: ptr.To(true),
  4562  						},
  4563  						NodeName: ptr.To(testHostname),
  4564  					},
  4565  					{
  4566  						// this endpoint should not be used since it is both terminating and not ready.
  4567  						Addresses: []string{"10.0.1.4"},
  4568  						Conditions: discovery.EndpointConditions{
  4569  							Ready:       ptr.To(false),
  4570  							Serving:     ptr.To(false),
  4571  							Terminating: ptr.To(true),
  4572  						},
  4573  						NodeName: ptr.To(testHostname),
  4574  					},
  4575  					{
  4576  						// this endpoint should be ignored for external since it's not local
  4577  						Addresses: []string{"10.0.1.5"},
  4578  						Conditions: discovery.EndpointConditions{
  4579  							Ready:       ptr.To(true),
  4580  							Serving:     ptr.To(true),
  4581  							Terminating: ptr.To(false),
  4582  						},
  4583  						NodeName: ptr.To("host-1"),
  4584  					},
  4585  				},
  4586  			},
  4587  			flowTests: []packetFlowTest{
  4588  				{
  4589  					name:     "pod to clusterIP",
  4590  					sourceIP: "10.0.0.2",
  4591  					destIP:   "172.30.1.1",
  4592  					destPort: 80,
  4593  					output:   "10.0.1.5:80",
  4594  					masq:     false,
  4595  				},
  4596  				{
  4597  					name:     "external to LB",
  4598  					sourceIP: testExternalClient,
  4599  					destIP:   "1.2.3.4",
  4600  					destPort: 80,
  4601  					output:   "10.0.1.2:80, 10.0.1.3:80",
  4602  					masq:     false,
  4603  				},
  4604  			},
  4605  		},
  4606  		{
  4607  			name: "terminating endpoints on remote node",
  4608  			line: getLine(),
  4609  			endpointslice: &discovery.EndpointSlice{
  4610  				ObjectMeta: metav1.ObjectMeta{
  4611  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4612  					Namespace: "ns1",
  4613  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4614  				},
  4615  				Ports: []discovery.EndpointPort{{
  4616  					Name:     ptr.To(""),
  4617  					Port:     ptr.To[int32](80),
  4618  					Protocol: ptr.To(v1.ProtocolTCP),
  4619  				}},
  4620  				AddressType: discovery.AddressTypeIPv4,
  4621  				Endpoints: []discovery.Endpoint{
  4622  					{
  4623  						// this endpoint won't be used because it's not local,
  4624  						// but it will prevent a REJECT rule from being created
  4625  						Addresses: []string{"10.0.1.5"},
  4626  						Conditions: discovery.EndpointConditions{
  4627  							Ready:       ptr.To(false),
  4628  							Serving:     ptr.To(true),
  4629  							Terminating: ptr.To(true),
  4630  						},
  4631  						NodeName: ptr.To("host-1"),
  4632  					},
  4633  				},
  4634  			},
  4635  			flowTests: []packetFlowTest{
  4636  				{
  4637  					name:     "pod to clusterIP",
  4638  					sourceIP: "10.0.0.2",
  4639  					destIP:   "172.30.1.1",
  4640  					destPort: 80,
  4641  					output:   "10.0.1.5:80",
  4642  				},
  4643  				{
  4644  					name:     "external to LB, no locally-usable endpoints",
  4645  					sourceIP: testExternalClient,
  4646  					destIP:   "1.2.3.4",
  4647  					destPort: 80,
  4648  					output:   "DROP",
  4649  				},
  4650  			},
  4651  		},
  4652  		{
  4653  			name: "no usable endpoints on any node",
  4654  			line: getLine(),
  4655  			endpointslice: &discovery.EndpointSlice{
  4656  				ObjectMeta: metav1.ObjectMeta{
  4657  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4658  					Namespace: "ns1",
  4659  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4660  				},
  4661  				Ports: []discovery.EndpointPort{{
  4662  					Name:     ptr.To(""),
  4663  					Port:     ptr.To[int32](80),
  4664  					Protocol: ptr.To(v1.ProtocolTCP),
  4665  				}},
  4666  				AddressType: discovery.AddressTypeIPv4,
  4667  				Endpoints: []discovery.Endpoint{
  4668  					{
  4669  						// Local but not ready or serving
  4670  						Addresses: []string{"10.0.1.5"},
  4671  						Conditions: discovery.EndpointConditions{
  4672  							Ready:       ptr.To(false),
  4673  							Serving:     ptr.To(false),
  4674  							Terminating: ptr.To(true),
  4675  						},
  4676  						NodeName: ptr.To(testHostname),
  4677  					},
  4678  					{
  4679  						// Remote and not ready or serving
  4680  						Addresses: []string{"10.0.1.5"},
  4681  						Conditions: discovery.EndpointConditions{
  4682  							Ready:       ptr.To(false),
  4683  							Serving:     ptr.To(false),
  4684  							Terminating: ptr.To(true),
  4685  						},
  4686  						NodeName: ptr.To("host-1"),
  4687  					},
  4688  				},
  4689  			},
  4690  			flowTests: []packetFlowTest{
  4691  				{
  4692  					name:     "pod to clusterIP, no usable endpoints",
  4693  					sourceIP: "10.0.0.2",
  4694  					destIP:   "172.30.1.1",
  4695  					destPort: 80,
  4696  					output:   "REJECT",
  4697  				},
  4698  				{
  4699  					name:     "external to LB, no usable endpoints",
  4700  					sourceIP: testExternalClient,
  4701  					destIP:   "1.2.3.4",
  4702  					destPort: 80,
  4703  					output:   "REJECT",
  4704  				},
  4705  			},
  4706  		},
  4707  	}
  4708  
  4709  	for _, testcase := range testcases {
  4710  		t.Run(testcase.name, func(t *testing.T) {
  4711  			ipt := iptablestest.NewFake()
  4712  			fp := NewFakeProxier(ipt)
  4713  			fp.OnServiceSynced()
  4714  			fp.OnEndpointSlicesSynced()
  4715  
  4716  			fp.OnServiceAdd(service)
  4717  
  4718  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  4719  			fp.syncProxyRules()
  4720  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  4721  
  4722  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  4723  			fp.syncProxyRules()
  4724  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  4725  				{
  4726  					name:     "pod to clusterIP after endpoints deleted",
  4727  					sourceIP: "10.0.0.2",
  4728  					destIP:   "172.30.1.1",
  4729  					destPort: 80,
  4730  					output:   "REJECT",
  4731  				},
  4732  				{
  4733  					name:     "external to LB after endpoints deleted",
  4734  					sourceIP: testExternalClient,
  4735  					destIP:   "1.2.3.4",
  4736  					destPort: 80,
  4737  					output:   "REJECT",
  4738  				},
  4739  			})
  4740  		})
  4741  	}
  4742  }
  4743  
  4744  // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
  4745  // ready and ready + terminating endpoints, only the ready endpoints are used.
  4746  func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
  4747  	service := &v1.Service{
  4748  		ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
  4749  		Spec: v1.ServiceSpec{
  4750  			ClusterIP:             "172.30.1.1",
  4751  			Type:                  v1.ServiceTypeLoadBalancer,
  4752  			ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster,
  4753  			Ports: []v1.ServicePort{
  4754  				{
  4755  					Name:       "",
  4756  					TargetPort: intstr.FromInt32(80),
  4757  					Port:       80,
  4758  					Protocol:   v1.ProtocolTCP,
  4759  				},
  4760  			},
  4761  			HealthCheckNodePort: 30000,
  4762  		},
  4763  		Status: v1.ServiceStatus{
  4764  			LoadBalancer: v1.LoadBalancerStatus{
  4765  				Ingress: []v1.LoadBalancerIngress{
  4766  					{IP: "1.2.3.4"},
  4767  				},
  4768  			},
  4769  		},
  4770  	}
  4771  
  4772  	testcases := []struct {
  4773  		name          string
  4774  		line          int
  4775  		endpointslice *discovery.EndpointSlice
  4776  		flowTests     []packetFlowTest
  4777  	}{
  4778  		{
  4779  			name: "ready endpoints exist",
  4780  			line: getLine(),
  4781  			endpointslice: &discovery.EndpointSlice{
  4782  				ObjectMeta: metav1.ObjectMeta{
  4783  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4784  					Namespace: "ns1",
  4785  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4786  				},
  4787  				Ports: []discovery.EndpointPort{{
  4788  					Name:     ptr.To(""),
  4789  					Port:     ptr.To[int32](80),
  4790  					Protocol: ptr.To(v1.ProtocolTCP),
  4791  				}},
  4792  				AddressType: discovery.AddressTypeIPv4,
  4793  				Endpoints: []discovery.Endpoint{
  4794  					{
  4795  						Addresses: []string{"10.0.1.1"},
  4796  						Conditions: discovery.EndpointConditions{
  4797  							Ready:       ptr.To(true),
  4798  							Serving:     ptr.To(true),
  4799  							Terminating: ptr.To(false),
  4800  						},
  4801  						NodeName: ptr.To(testHostname),
  4802  					},
  4803  					{
  4804  						Addresses: []string{"10.0.1.2"},
  4805  						Conditions: discovery.EndpointConditions{
  4806  							Ready:       ptr.To(true),
  4807  							Serving:     ptr.To(true),
  4808  							Terminating: ptr.To(false),
  4809  						},
  4810  						NodeName: ptr.To(testHostname),
  4811  					},
  4812  					{
  4813  						// this endpoint should be ignored since there are ready non-terminating endpoints
  4814  						Addresses: []string{"10.0.1.3"},
  4815  						Conditions: discovery.EndpointConditions{
  4816  							Ready:       ptr.To(false),
  4817  							Serving:     ptr.To(true),
  4818  							Terminating: ptr.To(true),
  4819  						},
  4820  						NodeName: ptr.To("another-host"),
  4821  					},
  4822  					{
  4823  						// this endpoint should be ignored since it is not "serving"
  4824  						Addresses: []string{"10.0.1.4"},
  4825  						Conditions: discovery.EndpointConditions{
  4826  							Ready:       ptr.To(false),
  4827  							Serving:     ptr.To(false),
  4828  							Terminating: ptr.To(true),
  4829  						},
  4830  						NodeName: ptr.To("another-host"),
  4831  					},
  4832  					{
  4833  						Addresses: []string{"10.0.1.5"},
  4834  						Conditions: discovery.EndpointConditions{
  4835  							Ready:       ptr.To(true),
  4836  							Serving:     ptr.To(true),
  4837  							Terminating: ptr.To(false),
  4838  						},
  4839  						NodeName: ptr.To("another-host"),
  4840  					},
  4841  				},
  4842  			},
  4843  			flowTests: []packetFlowTest{
  4844  				{
  4845  					name:     "pod to clusterIP",
  4846  					sourceIP: "10.0.0.2",
  4847  					destIP:   "172.30.1.1",
  4848  					destPort: 80,
  4849  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4850  					masq:     false,
  4851  				},
  4852  				{
  4853  					name:     "external to LB",
  4854  					sourceIP: testExternalClient,
  4855  					destIP:   "1.2.3.4",
  4856  					destPort: 80,
  4857  					output:   "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
  4858  					masq:     true,
  4859  				},
  4860  			},
  4861  		},
  4862  		{
  4863  			name: "only terminating endpoints exist",
  4864  			line: getLine(),
  4865  			endpointslice: &discovery.EndpointSlice{
  4866  				ObjectMeta: metav1.ObjectMeta{
  4867  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4868  					Namespace: "ns1",
  4869  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4870  				},
  4871  				Ports: []discovery.EndpointPort{{
  4872  					Name:     ptr.To(""),
  4873  					Port:     ptr.To[int32](80),
  4874  					Protocol: ptr.To(v1.ProtocolTCP),
  4875  				}},
  4876  				AddressType: discovery.AddressTypeIPv4,
  4877  				Endpoints: []discovery.Endpoint{
  4878  					{
  4879  						// this endpoint should be used since there are only ready terminating endpoints
  4880  						Addresses: []string{"10.0.1.2"},
  4881  						Conditions: discovery.EndpointConditions{
  4882  							Ready:       ptr.To(false),
  4883  							Serving:     ptr.To(true),
  4884  							Terminating: ptr.To(true),
  4885  						},
  4886  						NodeName: ptr.To(testHostname),
  4887  					},
  4888  					{
  4889  						// this endpoint should be used since there are only ready terminating endpoints
  4890  						Addresses: []string{"10.0.1.3"},
  4891  						Conditions: discovery.EndpointConditions{
  4892  							Ready:       ptr.To(false),
  4893  							Serving:     ptr.To(true),
  4894  							Terminating: ptr.To(true),
  4895  						},
  4896  						NodeName: ptr.To(testHostname),
  4897  					},
  4898  					{
  4899  						// this endpoint should not be used since it is both terminating and not ready.
  4900  						Addresses: []string{"10.0.1.4"},
  4901  						Conditions: discovery.EndpointConditions{
  4902  							Ready:       ptr.To(false),
  4903  							Serving:     ptr.To(false),
  4904  							Terminating: ptr.To(true),
  4905  						},
  4906  						NodeName: ptr.To("another-host"),
  4907  					},
  4908  					{
  4909  						// this endpoint should be used since there are only ready terminating endpoints
  4910  						Addresses: []string{"10.0.1.5"},
  4911  						Conditions: discovery.EndpointConditions{
  4912  							Ready:       ptr.To(false),
  4913  							Serving:     ptr.To(true),
  4914  							Terminating: ptr.To(true),
  4915  						},
  4916  						NodeName: ptr.To("another-host"),
  4917  					},
  4918  				},
  4919  			},
  4920  			flowTests: []packetFlowTest{
  4921  				{
  4922  					name:     "pod to clusterIP",
  4923  					sourceIP: "10.0.0.2",
  4924  					destIP:   "172.30.1.1",
  4925  					destPort: 80,
  4926  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  4927  					masq:     false,
  4928  				},
  4929  				{
  4930  					name:     "external to LB",
  4931  					sourceIP: testExternalClient,
  4932  					destIP:   "1.2.3.4",
  4933  					destPort: 80,
  4934  					output:   "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
  4935  					masq:     true,
  4936  				},
  4937  			},
  4938  		},
  4939  		{
  4940  			name: "terminating endpoints on remote node",
  4941  			line: getLine(),
  4942  			endpointslice: &discovery.EndpointSlice{
  4943  				ObjectMeta: metav1.ObjectMeta{
  4944  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4945  					Namespace: "ns1",
  4946  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4947  				},
  4948  				Ports: []discovery.EndpointPort{{
  4949  					Name:     ptr.To(""),
  4950  					Port:     ptr.To[int32](80),
  4951  					Protocol: ptr.To(v1.ProtocolTCP),
  4952  				}},
  4953  				AddressType: discovery.AddressTypeIPv4,
  4954  				Endpoints: []discovery.Endpoint{
  4955  					{
  4956  						Addresses: []string{"10.0.1.5"},
  4957  						Conditions: discovery.EndpointConditions{
  4958  							Ready:       ptr.To(false),
  4959  							Serving:     ptr.To(true),
  4960  							Terminating: ptr.To(true),
  4961  						},
  4962  						NodeName: ptr.To("host-1"),
  4963  					},
  4964  				},
  4965  			},
  4966  			flowTests: []packetFlowTest{
  4967  				{
  4968  					name:     "pod to clusterIP",
  4969  					sourceIP: "10.0.0.2",
  4970  					destIP:   "172.30.1.1",
  4971  					destPort: 80,
  4972  					output:   "10.0.1.5:80",
  4973  					masq:     false,
  4974  				},
  4975  				{
  4976  					name:     "external to LB",
  4977  					sourceIP: testExternalClient,
  4978  					destIP:   "1.2.3.4",
  4979  					destPort: 80,
  4980  					output:   "10.0.1.5:80",
  4981  					masq:     true,
  4982  				},
  4983  			},
  4984  		},
  4985  		{
  4986  			name: "no usable endpoints on any node",
  4987  			line: getLine(),
  4988  			endpointslice: &discovery.EndpointSlice{
  4989  				ObjectMeta: metav1.ObjectMeta{
  4990  					Name:      fmt.Sprintf("%s-1", "svc1"),
  4991  					Namespace: "ns1",
  4992  					Labels:    map[string]string{discovery.LabelServiceName: "svc1"},
  4993  				},
  4994  				Ports: []discovery.EndpointPort{{
  4995  					Name:     ptr.To(""),
  4996  					Port:     ptr.To[int32](80),
  4997  					Protocol: ptr.To(v1.ProtocolTCP),
  4998  				}},
  4999  				AddressType: discovery.AddressTypeIPv4,
  5000  				Endpoints: []discovery.Endpoint{
  5001  					{
  5002  						// Local, not ready or serving
  5003  						Addresses: []string{"10.0.1.5"},
  5004  						Conditions: discovery.EndpointConditions{
  5005  							Ready:       ptr.To(false),
  5006  							Serving:     ptr.To(false),
  5007  							Terminating: ptr.To(true),
  5008  						},
  5009  						NodeName: ptr.To(testHostname),
  5010  					},
  5011  					{
  5012  						// Remote, not ready or serving
  5013  						Addresses: []string{"10.0.1.5"},
  5014  						Conditions: discovery.EndpointConditions{
  5015  							Ready:       ptr.To(false),
  5016  							Serving:     ptr.To(false),
  5017  							Terminating: ptr.To(true),
  5018  						},
  5019  						NodeName: ptr.To("host-1"),
  5020  					},
  5021  				},
  5022  			},
  5023  			flowTests: []packetFlowTest{
  5024  				{
  5025  					name:     "pod to clusterIP",
  5026  					sourceIP: "10.0.0.2",
  5027  					destIP:   "172.30.1.1",
  5028  					destPort: 80,
  5029  					output:   "REJECT",
  5030  				},
  5031  				{
  5032  					name:     "external to LB",
  5033  					sourceIP: testExternalClient,
  5034  					destIP:   "1.2.3.4",
  5035  					destPort: 80,
  5036  					output:   "REJECT",
  5037  				},
  5038  			},
  5039  		},
  5040  	}
  5041  
  5042  	for _, testcase := range testcases {
  5043  		t.Run(testcase.name, func(t *testing.T) {
  5044  
  5045  			ipt := iptablestest.NewFake()
  5046  			fp := NewFakeProxier(ipt)
  5047  			fp.OnServiceSynced()
  5048  			fp.OnEndpointSlicesSynced()
  5049  
  5050  			fp.OnServiceAdd(service)
  5051  
  5052  			fp.OnEndpointSliceAdd(testcase.endpointslice)
  5053  			fp.syncProxyRules()
  5054  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests)
  5055  
  5056  			fp.OnEndpointSliceDelete(testcase.endpointslice)
  5057  			fp.syncProxyRules()
  5058  			runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{
  5059  				{
  5060  					name:     "pod to clusterIP after endpoints deleted",
  5061  					sourceIP: "10.0.0.2",
  5062  					destIP:   "172.30.1.1",
  5063  					destPort: 80,
  5064  					output:   "REJECT",
  5065  				},
  5066  				{
  5067  					name:     "external to LB after endpoints deleted",
  5068  					sourceIP: testExternalClient,
  5069  					destIP:   "1.2.3.4",
  5070  					destPort: 80,
  5071  					output:   "REJECT",
  5072  				},
  5073  			})
  5074  		})
  5075  	}
  5076  }
  5077  
  5078  func TestInternalExternalMasquerade(t *testing.T) {
  5079  	// (Put the test setup code in an internal function so we can have it here at the
  5080  	// top, before the test cases that will be run against it.)
  5081  	setupTest := func(fp *Proxier) {
  5082  		makeServiceMap(fp,
  5083  			makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5084  				svc.Spec.Type = "LoadBalancer"
  5085  				svc.Spec.ClusterIP = "172.30.0.41"
  5086  				svc.Spec.Ports = []v1.ServicePort{{
  5087  					Name:     "p80",
  5088  					Port:     80,
  5089  					Protocol: v1.ProtocolTCP,
  5090  					NodePort: int32(3001),
  5091  				}}
  5092  				svc.Spec.HealthCheckNodePort = 30001
  5093  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5094  					IP: "1.2.3.4",
  5095  				}}
  5096  			}),
  5097  			makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5098  				svc.Spec.Type = "LoadBalancer"
  5099  				svc.Spec.ClusterIP = "172.30.0.42"
  5100  				svc.Spec.Ports = []v1.ServicePort{{
  5101  					Name:     "p80",
  5102  					Port:     80,
  5103  					Protocol: v1.ProtocolTCP,
  5104  					NodePort: int32(3002),
  5105  				}}
  5106  				svc.Spec.HealthCheckNodePort = 30002
  5107  				svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  5108  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5109  					IP: "5.6.7.8",
  5110  				}}
  5111  			}),
  5112  			makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5113  				svc.Spec.Type = "LoadBalancer"
  5114  				svc.Spec.ClusterIP = "172.30.0.43"
  5115  				svc.Spec.Ports = []v1.ServicePort{{
  5116  					Name:     "p80",
  5117  					Port:     80,
  5118  					Protocol: v1.ProtocolTCP,
  5119  					NodePort: int32(3003),
  5120  				}}
  5121  				svc.Spec.HealthCheckNodePort = 30003
  5122  				svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal)
  5123  				svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  5124  					IP: "9.10.11.12",
  5125  				}}
  5126  			}),
  5127  		)
  5128  
  5129  		populateEndpointSlices(fp,
  5130  			makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5131  				eps.AddressType = discovery.AddressTypeIPv4
  5132  				eps.Endpoints = []discovery.Endpoint{
  5133  					{
  5134  						Addresses: []string{"10.180.0.1"},
  5135  						NodeName:  ptr.To(testHostname),
  5136  					},
  5137  					{
  5138  						Addresses: []string{"10.180.1.1"},
  5139  						NodeName:  ptr.To("remote"),
  5140  					},
  5141  				}
  5142  				eps.Ports = []discovery.EndpointPort{{
  5143  					Name:     ptr.To("p80"),
  5144  					Port:     ptr.To[int32](80),
  5145  					Protocol: ptr.To(v1.ProtocolTCP),
  5146  				}}
  5147  			}),
  5148  			makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5149  				eps.AddressType = discovery.AddressTypeIPv4
  5150  				eps.Endpoints = []discovery.Endpoint{
  5151  					{
  5152  						Addresses: []string{"10.180.0.2"},
  5153  						NodeName:  ptr.To(testHostname),
  5154  					},
  5155  					{
  5156  						Addresses: []string{"10.180.1.2"},
  5157  						NodeName:  ptr.To("remote"),
  5158  					},
  5159  				}
  5160  				eps.Ports = []discovery.EndpointPort{{
  5161  					Name:     ptr.To("p80"),
  5162  					Port:     ptr.To[int32](80),
  5163  					Protocol: ptr.To(v1.ProtocolTCP),
  5164  				}}
  5165  			}),
  5166  			makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5167  				eps.AddressType = discovery.AddressTypeIPv4
  5168  				eps.Endpoints = []discovery.Endpoint{
  5169  					{
  5170  						Addresses: []string{"10.180.0.3"},
  5171  						NodeName:  ptr.To(testHostname),
  5172  					},
  5173  					{
  5174  						Addresses: []string{"10.180.1.3"},
  5175  						NodeName:  ptr.To("remote"),
  5176  					},
  5177  				}
  5178  				eps.Ports = []discovery.EndpointPort{{
  5179  					Name:     ptr.To("p80"),
  5180  					Port:     ptr.To[int32](80),
  5181  					Protocol: ptr.To(v1.ProtocolTCP),
  5182  				}}
  5183  			}),
  5184  		)
  5185  
  5186  		fp.syncProxyRules()
  5187  	}
  5188  
  5189  	// We use the same flowTests for all of the testCases. The "output" and "masq"
  5190  	// values here represent the normal case (working localDetector, no masqueradeAll)
  5191  	flowTests := []packetFlowTest{
  5192  		{
  5193  			name:     "pod to ClusterIP",
  5194  			sourceIP: "10.0.0.2",
  5195  			destIP:   "172.30.0.41",
  5196  			destPort: 80,
  5197  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5198  			masq:     false,
  5199  		},
  5200  		{
  5201  			name:     "pod to NodePort",
  5202  			sourceIP: "10.0.0.2",
  5203  			destIP:   testNodeIP,
  5204  			destPort: 3001,
  5205  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5206  			masq:     true,
  5207  		},
  5208  		{
  5209  			name:     "pod to LB",
  5210  			sourceIP: "10.0.0.2",
  5211  			destIP:   "1.2.3.4",
  5212  			destPort: 80,
  5213  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5214  			masq:     true,
  5215  		},
  5216  		{
  5217  			name:     "node to ClusterIP",
  5218  			sourceIP: testNodeIP,
  5219  			destIP:   "172.30.0.41",
  5220  			destPort: 80,
  5221  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5222  			masq:     true,
  5223  		},
  5224  		{
  5225  			name:     "node to NodePort",
  5226  			sourceIP: testNodeIP,
  5227  			destIP:   testNodeIP,
  5228  			destPort: 3001,
  5229  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5230  			masq:     true,
  5231  		},
  5232  		{
  5233  			name:     "localhost to NodePort",
  5234  			sourceIP: "127.0.0.1",
  5235  			destIP:   "127.0.0.1",
  5236  			destPort: 3001,
  5237  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5238  			masq:     true,
  5239  		},
  5240  		{
  5241  			name:     "node to LB",
  5242  			sourceIP: testNodeIP,
  5243  			destIP:   "1.2.3.4",
  5244  			destPort: 80,
  5245  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5246  			masq:     true,
  5247  		},
  5248  		{
  5249  			name:     "external to ClusterIP",
  5250  			sourceIP: testExternalClient,
  5251  			destIP:   "172.30.0.41",
  5252  			destPort: 80,
  5253  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5254  			masq:     true,
  5255  		},
  5256  		{
  5257  			name:     "external to NodePort",
  5258  			sourceIP: testExternalClient,
  5259  			destIP:   testNodeIP,
  5260  			destPort: 3001,
  5261  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5262  			masq:     true,
  5263  		},
  5264  		{
  5265  			name:     "external to LB",
  5266  			sourceIP: testExternalClient,
  5267  			destIP:   "1.2.3.4",
  5268  			destPort: 80,
  5269  			output:   "10.180.0.1:80, 10.180.1.1:80",
  5270  			masq:     true,
  5271  		},
  5272  		{
  5273  			name:     "pod to ClusterIP with eTP:Local",
  5274  			sourceIP: "10.0.0.2",
  5275  			destIP:   "172.30.0.42",
  5276  			destPort: 80,
  5277  
  5278  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5279  			// as "Pod to ClusterIP"
  5280  			output: "10.180.0.2:80, 10.180.1.2:80",
  5281  			masq:   false,
  5282  		},
  5283  		{
  5284  			name:     "pod to NodePort with eTP:Local",
  5285  			sourceIP: "10.0.0.2",
  5286  			destIP:   testNodeIP,
  5287  			destPort: 3002,
  5288  
  5289  			// See the comment below in the "pod to LB with eTP:Local" case.
  5290  			// It doesn't actually make sense to short-circuit here, since if
  5291  			// you connect directly to a NodePort from outside the cluster,
  5292  			// you only get the local endpoints. But it's simpler for us and
  5293  			// slightly more convenient for users to have this case get
  5294  			// short-circuited too.
  5295  			output: "10.180.0.2:80, 10.180.1.2:80",
  5296  			masq:   false,
  5297  		},
  5298  		{
  5299  			name:     "pod to LB with eTP:Local",
  5300  			sourceIP: "10.0.0.2",
  5301  			destIP:   "5.6.7.8",
  5302  			destPort: 80,
  5303  
  5304  			// The short-circuit rule is supposed to make this behave the same
  5305  			// way it would if the packet actually went out to the LB and then
  5306  			// came back into the cluster. So it gets routed to all endpoints,
  5307  			// not just local ones. In reality, if the packet actually left
  5308  			// the cluster, it would have to get masqueraded, but since we can
  5309  			// avoid doing that in the short-circuit case, and not masquerading
  5310  			// is more useful, we avoid masquerading.
  5311  			output: "10.180.0.2:80, 10.180.1.2:80",
  5312  			masq:   false,
  5313  		},
  5314  		{
  5315  			name:     "node to ClusterIP with eTP:Local",
  5316  			sourceIP: testNodeIP,
  5317  			destIP:   "172.30.0.42",
  5318  			destPort: 80,
  5319  
  5320  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5321  			// as "node to ClusterIP"
  5322  			output: "10.180.0.2:80, 10.180.1.2:80",
  5323  			masq:   true,
  5324  		},
  5325  		{
  5326  			name:     "node to NodePort with eTP:Local",
  5327  			sourceIP: testNodeIP,
  5328  			destIP:   testNodeIP,
  5329  			destPort: 3001,
  5330  
  5331  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5332  			// same as "node to NodePort" above.
  5333  			output: "10.180.0.1:80, 10.180.1.1:80",
  5334  			masq:   true,
  5335  		},
  5336  		{
  5337  			name:     "localhost to NodePort with eTP:Local",
  5338  			sourceIP: "127.0.0.1",
  5339  			destIP:   "127.0.0.1",
  5340  			destPort: 3002,
  5341  
  5342  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5343  			// same as "localhost to NodePort" above.
  5344  			output: "10.180.0.2:80, 10.180.1.2:80",
  5345  			masq:   true,
  5346  		},
  5347  		{
  5348  			name:     "node to LB with eTP:Local",
  5349  			sourceIP: testNodeIP,
  5350  			destIP:   "5.6.7.8",
  5351  			destPort: 80,
  5352  
  5353  			// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
  5354  			// same as "node to LB" above.
  5355  			output: "10.180.0.2:80, 10.180.1.2:80",
  5356  			masq:   true,
  5357  		},
  5358  		{
  5359  			name:     "external to ClusterIP with eTP:Local",
  5360  			sourceIP: testExternalClient,
  5361  			destIP:   "172.30.0.42",
  5362  			destPort: 80,
  5363  
  5364  			// externalTrafficPolicy does not apply to ClusterIP traffic, so same
  5365  			// as "external to ClusterIP" above.
  5366  			output: "10.180.0.2:80, 10.180.1.2:80",
  5367  			masq:   true,
  5368  		},
  5369  		{
  5370  			name:     "external to NodePort with eTP:Local",
  5371  			sourceIP: testExternalClient,
  5372  			destIP:   testNodeIP,
  5373  			destPort: 3002,
  5374  
  5375  			// externalTrafficPolicy applies; only the local endpoint is
  5376  			// selected, and we don't masquerade.
  5377  			output: "10.180.0.2:80",
  5378  			masq:   false,
  5379  		},
  5380  		{
  5381  			name:     "external to LB with eTP:Local",
  5382  			sourceIP: testExternalClient,
  5383  			destIP:   "5.6.7.8",
  5384  			destPort: 80,
  5385  
  5386  			// externalTrafficPolicy applies; only the local endpoint is
  5387  			// selected, and we don't masquerade.
  5388  			output: "10.180.0.2:80",
  5389  			masq:   false,
  5390  		},
  5391  		{
  5392  			name:     "pod to ClusterIP with iTP:Local",
  5393  			sourceIP: "10.0.0.2",
  5394  			destIP:   "172.30.0.43",
  5395  			destPort: 80,
  5396  
  5397  			// internalTrafficPolicy applies; only the local endpoint is
  5398  			// selected.
  5399  			output: "10.180.0.3:80",
  5400  			masq:   false,
  5401  		},
  5402  		{
  5403  			name:     "pod to NodePort with iTP:Local",
  5404  			sourceIP: "10.0.0.2",
  5405  			destIP:   testNodeIP,
  5406  			destPort: 3003,
  5407  
  5408  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5409  			// "pod to NodePort" above.
  5410  			output: "10.180.0.3:80, 10.180.1.3:80",
  5411  			masq:   true,
  5412  		},
  5413  		{
  5414  			name:     "pod to LB with iTP:Local",
  5415  			sourceIP: "10.0.0.2",
  5416  			destIP:   "9.10.11.12",
  5417  			destPort: 80,
  5418  
  5419  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5420  			// same as "pod to LB" above.
  5421  			output: "10.180.0.3:80, 10.180.1.3:80",
  5422  			masq:   true,
  5423  		},
  5424  		{
  5425  			name:     "node to ClusterIP with iTP:Local",
  5426  			sourceIP: testNodeIP,
  5427  			destIP:   "172.30.0.43",
  5428  			destPort: 80,
  5429  
  5430  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5431  			// Traffic is masqueraded as in the "node to ClusterIP" case because
  5432  			// internalTrafficPolicy does not affect masquerading.
  5433  			output: "10.180.0.3:80",
  5434  			masq:   true,
  5435  		},
  5436  		{
  5437  			name:     "node to NodePort with iTP:Local",
  5438  			sourceIP: testNodeIP,
  5439  			destIP:   testNodeIP,
  5440  			destPort: 3003,
  5441  
  5442  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5443  			// "node to NodePort" above.
  5444  			output: "10.180.0.3:80, 10.180.1.3:80",
  5445  			masq:   true,
  5446  		},
  5447  		{
  5448  			name:     "localhost to NodePort with iTP:Local",
  5449  			sourceIP: "127.0.0.1",
  5450  			destIP:   "127.0.0.1",
  5451  			destPort: 3003,
  5452  
  5453  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5454  			// "localhost to NodePort" above.
  5455  			output: "10.180.0.3:80, 10.180.1.3:80",
  5456  			masq:   true,
  5457  		},
  5458  		{
  5459  			name:     "node to LB with iTP:Local",
  5460  			sourceIP: testNodeIP,
  5461  			destIP:   "9.10.11.12",
  5462  			destPort: 80,
  5463  
  5464  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5465  			// same as "node to LB" above.
  5466  			output: "10.180.0.3:80, 10.180.1.3:80",
  5467  			masq:   true,
  5468  		},
  5469  		{
  5470  			name:     "external to ClusterIP with iTP:Local",
  5471  			sourceIP: testExternalClient,
  5472  			destIP:   "172.30.0.43",
  5473  			destPort: 80,
  5474  
  5475  			// internalTrafficPolicy applies; only the local endpoint is selected.
  5476  			// Traffic is masqueraded as in the "external to ClusterIP" case
  5477  			// because internalTrafficPolicy does not affect masquerading.
  5478  			output: "10.180.0.3:80",
  5479  			masq:   true,
  5480  		},
  5481  		{
  5482  			name:     "external to NodePort with iTP:Local",
  5483  			sourceIP: testExternalClient,
  5484  			destIP:   testNodeIP,
  5485  			destPort: 3003,
  5486  
  5487  			// internalTrafficPolicy does not apply to NodePort traffic, so same as
  5488  			// "external to NodePort" above.
  5489  			output: "10.180.0.3:80, 10.180.1.3:80",
  5490  			masq:   true,
  5491  		},
  5492  		{
  5493  			name:     "external to LB with iTP:Local",
  5494  			sourceIP: testExternalClient,
  5495  			destIP:   "9.10.11.12",
  5496  			destPort: 80,
  5497  
  5498  			// internalTrafficPolicy does not apply to LoadBalancer traffic, so
  5499  			// same as "external to LB" above.
  5500  			output: "10.180.0.3:80, 10.180.1.3:80",
  5501  			masq:   true,
  5502  		},
  5503  	}
  5504  
  5505  	type packetFlowTestOverride struct {
  5506  		output *string
  5507  		masq   *bool
  5508  	}
  5509  
  5510  	testCases := []struct {
  5511  		name          string
  5512  		line          int
  5513  		masqueradeAll bool
  5514  		localDetector bool
  5515  		overrides     map[string]packetFlowTestOverride
  5516  	}{
  5517  		{
  5518  			name:          "base",
  5519  			line:          getLine(),
  5520  			masqueradeAll: false,
  5521  			localDetector: true,
  5522  			overrides:     nil,
  5523  		},
  5524  		{
  5525  			name:          "no LocalTrafficDetector",
  5526  			line:          getLine(),
  5527  			masqueradeAll: false,
  5528  			localDetector: false,
  5529  			overrides: map[string]packetFlowTestOverride{
  5530  				// With no LocalTrafficDetector, all traffic to a
  5531  				// ClusterIP is assumed to be from a pod, and thus to not
  5532  				// require masquerading.
  5533  				"node to ClusterIP": {
  5534  					masq: ptr.To(false),
  5535  				},
  5536  				"node to ClusterIP with eTP:Local": {
  5537  					masq: ptr.To(false),
  5538  				},
  5539  				"node to ClusterIP with iTP:Local": {
  5540  					masq: ptr.To(false),
  5541  				},
  5542  				"external to ClusterIP": {
  5543  					masq: ptr.To(false),
  5544  				},
  5545  				"external to ClusterIP with eTP:Local": {
  5546  					masq: ptr.To(false),
  5547  				},
  5548  				"external to ClusterIP with iTP:Local": {
  5549  					masq: ptr.To(false),
  5550  				},
  5551  
  5552  				// And there's no eTP:Local short-circuit for pod traffic,
  5553  				// so pods get only the local endpoints.
  5554  				"pod to NodePort with eTP:Local": {
  5555  					output: ptr.To("10.180.0.2:80"),
  5556  				},
  5557  				"pod to LB with eTP:Local": {
  5558  					output: ptr.To("10.180.0.2:80"),
  5559  				},
  5560  			},
  5561  		},
  5562  		{
  5563  			name:          "masqueradeAll",
  5564  			line:          getLine(),
  5565  			masqueradeAll: true,
  5566  			localDetector: true,
  5567  			overrides: map[string]packetFlowTestOverride{
  5568  				// All "to ClusterIP" traffic gets masqueraded when using
  5569  				// --masquerade-all.
  5570  				"pod to ClusterIP": {
  5571  					masq: ptr.To(true),
  5572  				},
  5573  				"pod to ClusterIP with eTP:Local": {
  5574  					masq: ptr.To(true),
  5575  				},
  5576  				"pod to ClusterIP with iTP:Local": {
  5577  					masq: ptr.To(true),
  5578  				},
  5579  			},
  5580  		},
  5581  		{
  5582  			name:          "masqueradeAll, no LocalTrafficDetector",
  5583  			line:          getLine(),
  5584  			masqueradeAll: true,
  5585  			localDetector: false,
  5586  			overrides: map[string]packetFlowTestOverride{
  5587  				// As in "masqueradeAll"
  5588  				"pod to ClusterIP": {
  5589  					masq: ptr.To(true),
  5590  				},
  5591  				"pod to ClusterIP with eTP:Local": {
  5592  					masq: ptr.To(true),
  5593  				},
  5594  				"pod to ClusterIP with iTP:Local": {
  5595  					masq: ptr.To(true),
  5596  				},
  5597  
  5598  				// As in "no LocalTrafficDetector"
  5599  				"pod to NodePort with eTP:Local": {
  5600  					output: ptr.To("10.180.0.2:80"),
  5601  				},
  5602  				"pod to LB with eTP:Local": {
  5603  					output: ptr.To("10.180.0.2:80"),
  5604  				},
  5605  			},
  5606  		},
  5607  	}
  5608  
  5609  	for _, tc := range testCases {
  5610  		t.Run(tc.name, func(t *testing.T) {
  5611  			ipt := iptablestest.NewFake()
  5612  			fp := NewFakeProxier(ipt)
  5613  			fp.masqueradeAll = tc.masqueradeAll
  5614  			if !tc.localDetector {
  5615  				fp.localDetector = proxyutil.NewNoOpLocalDetector()
  5616  			}
  5617  			setupTest(fp)
  5618  
  5619  			// Merge base flowTests with per-test-case overrides
  5620  			tcFlowTests := make([]packetFlowTest, len(flowTests))
  5621  			overridesApplied := 0
  5622  			for i := range flowTests {
  5623  				tcFlowTests[i] = flowTests[i]
  5624  				if overrides, set := tc.overrides[flowTests[i].name]; set {
  5625  					overridesApplied++
  5626  					if overrides.masq != nil {
  5627  						if tcFlowTests[i].masq == *overrides.masq {
  5628  							t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
  5629  						}
  5630  						tcFlowTests[i].masq = *overrides.masq
  5631  					}
  5632  					if overrides.output != nil {
  5633  						if tcFlowTests[i].output == *overrides.output {
  5634  							t.Errorf("%q override value for output is same as base value", flowTests[i].name)
  5635  						}
  5636  						tcFlowTests[i].output = *overrides.output
  5637  					}
  5638  				}
  5639  			}
  5640  			if overridesApplied != len(tc.overrides) {
  5641  				t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
  5642  			}
  5643  			runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tcFlowTests)
  5644  		})
  5645  	}
  5646  }
  5647  
  5648  func countEndpointsAndComments(iptablesData string, matchEndpoint string) (string, int, int) {
  5649  	var numEndpoints, numComments int
  5650  	var matched string
  5651  	for _, line := range strings.Split(iptablesData, "\n") {
  5652  		if strings.HasPrefix(line, "-A KUBE-SEP-") && strings.Contains(line, "-j DNAT") {
  5653  			numEndpoints++
  5654  			if strings.Contains(line, "--comment") {
  5655  				numComments++
  5656  			}
  5657  			if strings.Contains(line, matchEndpoint) {
  5658  				matched = line
  5659  			}
  5660  		}
  5661  	}
  5662  	return matched, numEndpoints, numComments
  5663  }
  5664  
  5665  func TestSyncProxyRulesLargeClusterMode(t *testing.T) {
  5666  	ipt := iptablestest.NewFake()
  5667  	fp := NewFakeProxier(ipt)
  5668  	fp.masqueradeAll = true
  5669  	fp.syncPeriod = 30 * time.Second
  5670  
  5671  	makeServiceMap(fp,
  5672  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5673  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5674  			svc.Spec.ClusterIP = "172.30.0.41"
  5675  			svc.Spec.Ports = []v1.ServicePort{{
  5676  				Name:     "p80",
  5677  				Port:     80,
  5678  				Protocol: v1.ProtocolTCP,
  5679  			}}
  5680  		}),
  5681  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5682  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5683  			svc.Spec.ClusterIP = "172.30.0.42"
  5684  			svc.Spec.Ports = []v1.ServicePort{{
  5685  				Name:     "p8080",
  5686  				Port:     8080,
  5687  				Protocol: v1.ProtocolTCP,
  5688  			}}
  5689  		}),
  5690  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5691  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5692  			svc.Spec.ClusterIP = "172.30.0.43"
  5693  			svc.Spec.Ports = []v1.ServicePort{{
  5694  				Name:     "p8081",
  5695  				Port:     8081,
  5696  				Protocol: v1.ProtocolTCP,
  5697  			}}
  5698  		}),
  5699  	)
  5700  
  5701  	populateEndpointSlices(fp,
  5702  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5703  			eps.AddressType = discovery.AddressTypeIPv4
  5704  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  5705  			for i := range eps.Endpoints {
  5706  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)}
  5707  			}
  5708  			eps.Ports = []discovery.EndpointPort{{
  5709  				Name:     ptr.To("p80"),
  5710  				Port:     ptr.To[int32](80),
  5711  				Protocol: ptr.To(v1.ProtocolTCP),
  5712  			}}
  5713  		}),
  5714  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5715  			eps.AddressType = discovery.AddressTypeIPv4
  5716  			eps.Endpoints = make([]discovery.Endpoint, largeClusterEndpointsThreshold/2-1)
  5717  			for i := range eps.Endpoints {
  5718  				eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)}
  5719  			}
  5720  			eps.Ports = []discovery.EndpointPort{{
  5721  				Name:     ptr.To("p8080"),
  5722  				Port:     ptr.To[int32](8080),
  5723  				Protocol: ptr.To(v1.ProtocolTCP),
  5724  			}}
  5725  		}),
  5726  	)
  5727  
  5728  	fp.syncProxyRules()
  5729  	expectedEndpoints := 2 * (largeClusterEndpointsThreshold/2 - 1)
  5730  
  5731  	firstEndpoint, numEndpoints, numComments := countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5732  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  5733  	if numEndpoints != expectedEndpoints {
  5734  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5735  	}
  5736  	if numComments != numEndpoints {
  5737  		t.Errorf("numComments (%d) != numEndpoints (%d) when numEndpoints < threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  5738  	}
  5739  
  5740  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5741  		eps.AddressType = discovery.AddressTypeIPv4
  5742  		eps.Endpoints = []discovery.Endpoint{{
  5743  			Addresses: []string{"203.0.113.4"},
  5744  		}, {
  5745  			Addresses: []string{"203.0.113.8"},
  5746  		}, {
  5747  			Addresses: []string{"203.0.113.12"},
  5748  		}}
  5749  		eps.Ports = []discovery.EndpointPort{{
  5750  			Name:     ptr.To("p8081"),
  5751  			Port:     ptr.To[int32](8081),
  5752  			Protocol: ptr.To(v1.ProtocolTCP),
  5753  		}}
  5754  	}))
  5755  	fp.syncProxyRules()
  5756  
  5757  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "203.0.113.4")
  5758  	assert.Equal(t, "-A KUBE-SEP-RUVVH7YV3PHQBDOS -m tcp -p tcp -j DNAT --to-destination 203.0.113.4:8081", firstEndpoint)
  5759  	// syncProxyRules will only have output the endpoints for svc3, since the others
  5760  	// didn't change (and syncProxyRules doesn't automatically do a full resync when you
  5761  	// cross the largeClusterEndpointsThreshold).
  5762  	if numEndpoints != 3 {
  5763  		t.Errorf("Found wrong number of endpoints on partial resync: expected %d, got %d", 3, numEndpoints)
  5764  	}
  5765  	if numComments != 0 {
  5766  		t.Errorf("numComments (%d) != 0 after partial resync when numEndpoints (%d) > threshold (%d)", numComments, expectedEndpoints+3, largeClusterEndpointsThreshold)
  5767  	}
  5768  
  5769  	// Now force a full resync and confirm that it rewrites the older services with
  5770  	// no comments as well.
  5771  	fp.forceSyncProxyRules()
  5772  	expectedEndpoints += 3
  5773  
  5774  	firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5775  	assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
  5776  	if numEndpoints != expectedEndpoints {
  5777  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5778  	}
  5779  	if numComments != 0 {
  5780  		t.Errorf("numComments (%d) != 0 when numEndpoints (%d) > threshold (%d)", numComments, numEndpoints, largeClusterEndpointsThreshold)
  5781  	}
  5782  
  5783  	// Now test service deletion; we have to create another service to do this though,
  5784  	// because if we deleted any of the existing services, we'd fall back out of large
  5785  	// cluster mode.
  5786  	svc4 := makeTestService("ns4", "svc4", func(svc *v1.Service) {
  5787  		svc.Spec.Type = v1.ServiceTypeClusterIP
  5788  		svc.Spec.ClusterIP = "172.30.0.44"
  5789  		svc.Spec.Ports = []v1.ServicePort{{
  5790  			Name:     "p8082",
  5791  			Port:     8082,
  5792  			Protocol: v1.ProtocolTCP,
  5793  		}}
  5794  	})
  5795  	fp.OnServiceAdd(svc4)
  5796  	fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  5797  		eps.AddressType = discovery.AddressTypeIPv4
  5798  		eps.Endpoints = []discovery.Endpoint{{
  5799  			Addresses: []string{"10.4.0.1"},
  5800  		}}
  5801  		eps.Ports = []discovery.EndpointPort{{
  5802  			Name:     ptr.To("p8082"),
  5803  			Port:     ptr.To[int32](8082),
  5804  			Protocol: ptr.To(v1.ProtocolTCP),
  5805  		}}
  5806  	}))
  5807  	fp.syncProxyRules()
  5808  
  5809  	svc4Endpoint, numEndpoints, _ := countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5810  	assert.Equal(t, "-A KUBE-SEP-SU5STNODRYEWJAUF -m tcp -p tcp -j DNAT --to-destination 10.4.0.1:8082", svc4Endpoint, "svc4 endpoint was not created")
  5811  	// should only sync svc4
  5812  	if numEndpoints != 1 {
  5813  		t.Errorf("Found wrong number of endpoints after svc4 creation: expected %d, got %d", 1, numEndpoints)
  5814  	}
  5815  
  5816  	// In large-cluster mode, if we delete a service, it will not re-sync its chains
  5817  	// but it will not delete them immediately either.
  5818  	fp.lastIPTablesCleanup = time.Now()
  5819  	fp.OnServiceDelete(svc4)
  5820  	fp.syncProxyRules()
  5821  
  5822  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5823  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  5824  	// should only sync svc4, and shouldn't output its endpoints
  5825  	if numEndpoints != 0 {
  5826  		t.Errorf("Found wrong number of endpoints after service deletion: expected %d, got %d", 0, numEndpoints)
  5827  	}
  5828  	assert.NotContains(t, fp.iptablesData.String(), "-X ", "iptables data unexpectedly contains chain deletions")
  5829  
  5830  	// But resyncing after a long-enough delay will delete the stale chains
  5831  	fp.lastIPTablesCleanup = time.Now().Add(-fp.syncPeriod).Add(-1)
  5832  	fp.syncProxyRules()
  5833  
  5834  	svc4Endpoint, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.4.0.1")
  5835  	assert.Equal(t, "", svc4Endpoint, "svc4 endpoint was still created!")
  5836  	if numEndpoints != 0 {
  5837  		t.Errorf("Found wrong number of endpoints after delayed resync: expected %d, got %d", 0, numEndpoints)
  5838  	}
  5839  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SVC-EBDQOQU5SJFXRIL3", "iptables data does not contain chain deletion")
  5840  	assert.Contains(t, fp.iptablesData.String(), "-X KUBE-SEP-SU5STNODRYEWJAUF", "iptables data does not contain endpoint deletions")
  5841  
  5842  	// force a full sync and count
  5843  	fp.forceSyncProxyRules()
  5844  	_, numEndpoints, _ = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
  5845  	if numEndpoints != expectedEndpoints {
  5846  		t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
  5847  	}
  5848  }
  5849  
  5850  // Test calling syncProxyRules() multiple times with various changes
  5851  func TestSyncProxyRulesRepeated(t *testing.T) {
  5852  	logger, _ := klogtesting.NewTestContext(t)
  5853  	ipt := iptablestest.NewFake()
  5854  	fp := NewFakeProxier(ipt)
  5855  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables)
  5856  	defer legacyregistry.Reset()
  5857  
  5858  	// Create initial state
  5859  	var svc2 *v1.Service
  5860  
  5861  	makeServiceMap(fp,
  5862  		makeTestService("ns1", "svc1", func(svc *v1.Service) {
  5863  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5864  			svc.Spec.ClusterIP = "172.30.0.41"
  5865  			svc.Spec.Ports = []v1.ServicePort{{
  5866  				Name:     "p80",
  5867  				Port:     80,
  5868  				Protocol: v1.ProtocolTCP,
  5869  			}}
  5870  		}),
  5871  		makeTestService("ns2", "svc2", func(svc *v1.Service) {
  5872  			svc2 = svc
  5873  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5874  			svc.Spec.ClusterIP = "172.30.0.42"
  5875  			svc.Spec.Ports = []v1.ServicePort{{
  5876  				Name:     "p8080",
  5877  				Port:     8080,
  5878  				Protocol: v1.ProtocolTCP,
  5879  			}}
  5880  		}),
  5881  	)
  5882  
  5883  	populateEndpointSlices(fp,
  5884  		makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  5885  			eps.AddressType = discovery.AddressTypeIPv4
  5886  			eps.Endpoints = []discovery.Endpoint{{
  5887  				Addresses: []string{"10.0.1.1"},
  5888  			}}
  5889  			eps.Ports = []discovery.EndpointPort{{
  5890  				Name:     ptr.To("p80"),
  5891  				Port:     ptr.To[int32](80),
  5892  				Protocol: ptr.To(v1.ProtocolTCP),
  5893  			}}
  5894  		}),
  5895  		makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
  5896  			eps.AddressType = discovery.AddressTypeIPv4
  5897  			eps.Endpoints = []discovery.Endpoint{{
  5898  				Addresses: []string{"10.0.2.1"},
  5899  			}}
  5900  			eps.Ports = []discovery.EndpointPort{{
  5901  				Name:     ptr.To("p8080"),
  5902  				Port:     ptr.To[int32](8080),
  5903  				Protocol: ptr.To(v1.ProtocolTCP),
  5904  			}}
  5905  		}),
  5906  	)
  5907  
  5908  	fp.syncProxyRules()
  5909  
  5910  	expected := dedent.Dedent(`
  5911  		*filter
  5912  		:KUBE-NODEPORTS - [0:0]
  5913  		:KUBE-SERVICES - [0:0]
  5914  		:KUBE-EXTERNAL-SERVICES - [0:0]
  5915  		:KUBE-FIREWALL - [0:0]
  5916  		:KUBE-FORWARD - [0:0]
  5917  		:KUBE-PROXY-FIREWALL - [0:0]
  5918  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  5919  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  5920  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  5921  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  5922  		COMMIT
  5923  		*nat
  5924  		:KUBE-NODEPORTS - [0:0]
  5925  		:KUBE-SERVICES - [0:0]
  5926  		:KUBE-MARK-MASQ - [0:0]
  5927  		:KUBE-POSTROUTING - [0:0]
  5928  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  5929  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  5930  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  5931  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  5932  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  5933  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  5934  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  5935  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  5936  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  5937  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  5938  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  5939  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  5940  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  5941  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -s 10.0.2.1 -j KUBE-MARK-MASQ
  5942  		-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -m tcp -p tcp -j DNAT --to-destination 10.0.2.1:8080
  5943  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  5944  		-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 -> 10.0.2.1:8080" -j KUBE-SEP-UHEGFW77JX3KXTOV
  5945  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  5946  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  5947  		COMMIT
  5948  		`)
  5949  	assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
  5950  
  5951  	rulesSynced := countRules(logger, utiliptables.TableNAT, expected)
  5952  	rulesSyncedMetric := countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  5953  	if rulesSyncedMetric != rulesSynced {
  5954  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  5955  	}
  5956  
  5957  	rulesTotal := rulesSynced
  5958  	rulesTotalMetric := countRulesFromMetric(logger, utiliptables.TableNAT)
  5959  	if rulesTotalMetric != rulesTotal {
  5960  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  5961  	}
  5962  
  5963  	// Add a new service and its endpoints. (This will only sync the SVC and SEP rules
  5964  	// for the new service, not the existing ones.)
  5965  	makeServiceMap(fp,
  5966  		makeTestService("ns3", "svc3", func(svc *v1.Service) {
  5967  			svc.Spec.Type = v1.ServiceTypeClusterIP
  5968  			svc.Spec.ClusterIP = "172.30.0.43"
  5969  			svc.Spec.Ports = []v1.ServicePort{{
  5970  				Name:     "p80",
  5971  				Port:     80,
  5972  				Protocol: v1.ProtocolTCP,
  5973  			}}
  5974  		}),
  5975  	)
  5976  	var eps3 *discovery.EndpointSlice
  5977  	populateEndpointSlices(fp,
  5978  		makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
  5979  			eps3 = eps
  5980  			eps.AddressType = discovery.AddressTypeIPv4
  5981  			eps.Endpoints = []discovery.Endpoint{{
  5982  				Addresses: []string{"10.0.3.1"},
  5983  			}}
  5984  			eps.Ports = []discovery.EndpointPort{{
  5985  				Name:     ptr.To("p80"),
  5986  				Port:     ptr.To[int32](80),
  5987  				Protocol: ptr.To(v1.ProtocolTCP),
  5988  			}}
  5989  		}),
  5990  	)
  5991  	fp.syncProxyRules()
  5992  
  5993  	expected = dedent.Dedent(`
  5994  		*filter
  5995  		:KUBE-NODEPORTS - [0:0]
  5996  		:KUBE-SERVICES - [0:0]
  5997  		:KUBE-EXTERNAL-SERVICES - [0:0]
  5998  		:KUBE-FIREWALL - [0:0]
  5999  		:KUBE-FORWARD - [0:0]
  6000  		:KUBE-PROXY-FIREWALL - [0:0]
  6001  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6002  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6003  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6004  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6005  		COMMIT
  6006  		*nat
  6007  		:KUBE-NODEPORTS - [0:0]
  6008  		:KUBE-SERVICES - [0:0]
  6009  		:KUBE-MARK-MASQ - [0:0]
  6010  		:KUBE-POSTROUTING - [0:0]
  6011  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  6012  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6013  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6014  		-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
  6015  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6016  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6017  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6018  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6019  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6020  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6021  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
  6022  		-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
  6023  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6024  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
  6025  		COMMIT
  6026  		`)
  6027  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6028  
  6029  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6030  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6031  	if rulesSyncedMetric != rulesSynced {
  6032  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6033  	}
  6034  
  6035  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2
  6036  	// KUBE-SEP-BSWRHOQ77KEXZLNL rules.
  6037  	rulesTotal += 5
  6038  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6039  	if rulesTotalMetric != rulesTotal {
  6040  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6041  	}
  6042  
  6043  	// Delete a service. (Won't update the other services.)
  6044  	fp.OnServiceDelete(svc2)
  6045  	fp.syncProxyRules()
  6046  
  6047  	expected = dedent.Dedent(`
  6048  		*filter
  6049  		:KUBE-NODEPORTS - [0:0]
  6050  		:KUBE-SERVICES - [0:0]
  6051  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6052  		:KUBE-FIREWALL - [0:0]
  6053  		:KUBE-FORWARD - [0:0]
  6054  		:KUBE-PROXY-FIREWALL - [0:0]
  6055  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6056  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6057  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6058  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6059  		COMMIT
  6060  		*nat
  6061  		:KUBE-NODEPORTS - [0:0]
  6062  		:KUBE-SERVICES - [0:0]
  6063  		:KUBE-MARK-MASQ - [0:0]
  6064  		:KUBE-POSTROUTING - [0:0]
  6065  		:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
  6066  		:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
  6067  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6068  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6069  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6070  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6071  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6072  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6073  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6074  		-X KUBE-SEP-UHEGFW77JX3KXTOV
  6075  		-X KUBE-SVC-2VJB64SDSIJUP5T6
  6076  		COMMIT
  6077  		`)
  6078  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6079  
  6080  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6081  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6082  	if rulesSyncedMetric != rulesSynced {
  6083  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6084  	}
  6085  
  6086  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2
  6087  	// KUBE-SEP-UHEGFW77JX3KXTOV rules
  6088  	rulesTotal -= 5
  6089  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6090  	if rulesTotalMetric != rulesTotal {
  6091  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6092  	}
  6093  
  6094  	// Add a service, sync, then add its endpoints. (The first sync will be a no-op other
  6095  	// than adding the REJECT rule. The second sync will create the new service.)
  6096  	var svc4 *v1.Service
  6097  	makeServiceMap(fp,
  6098  		makeTestService("ns4", "svc4", func(svc *v1.Service) {
  6099  			svc4 = svc
  6100  			svc.Spec.Type = v1.ServiceTypeClusterIP
  6101  			svc.Spec.ClusterIP = "172.30.0.44"
  6102  			svc.Spec.Ports = []v1.ServicePort{{
  6103  				Name:     "p80",
  6104  				Port:     80,
  6105  				Protocol: v1.ProtocolTCP,
  6106  			}}
  6107  		}),
  6108  	)
  6109  	fp.syncProxyRules()
  6110  	expected = dedent.Dedent(`
  6111  		*filter
  6112  		:KUBE-NODEPORTS - [0:0]
  6113  		:KUBE-SERVICES - [0:0]
  6114  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6115  		:KUBE-FIREWALL - [0:0]
  6116  		:KUBE-FORWARD - [0:0]
  6117  		:KUBE-PROXY-FIREWALL - [0:0]
  6118  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j REJECT
  6119  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6120  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6121  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6122  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6123  		COMMIT
  6124  		*nat
  6125  		:KUBE-NODEPORTS - [0:0]
  6126  		:KUBE-SERVICES - [0:0]
  6127  		:KUBE-MARK-MASQ - [0:0]
  6128  		:KUBE-POSTROUTING - [0:0]
  6129  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6130  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6131  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6132  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6133  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6134  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6135  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6136  		COMMIT
  6137  		`)
  6138  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6139  
  6140  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6141  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6142  	if rulesSyncedMetric != rulesSynced {
  6143  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6144  	}
  6145  
  6146  	// The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't
  6147  	// changed.
  6148  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6149  	if rulesTotalMetric != rulesTotal {
  6150  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6151  	}
  6152  
  6153  	populateEndpointSlices(fp,
  6154  		makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
  6155  			eps.AddressType = discovery.AddressTypeIPv4
  6156  			eps.Endpoints = []discovery.Endpoint{{
  6157  				Addresses: []string{"10.0.4.1"},
  6158  			}}
  6159  			eps.Ports = []discovery.EndpointPort{{
  6160  				Name:     ptr.To("p80"),
  6161  				Port:     ptr.To[int32](80),
  6162  				Protocol: ptr.To(v1.ProtocolTCP),
  6163  			}}
  6164  		}),
  6165  	)
  6166  	fp.syncProxyRules()
  6167  	expected = dedent.Dedent(`
  6168  		*filter
  6169  		:KUBE-NODEPORTS - [0:0]
  6170  		:KUBE-SERVICES - [0:0]
  6171  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6172  		:KUBE-FIREWALL - [0:0]
  6173  		:KUBE-FORWARD - [0:0]
  6174  		:KUBE-PROXY-FIREWALL - [0:0]
  6175  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6176  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6177  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6178  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6179  		COMMIT
  6180  		*nat
  6181  		:KUBE-NODEPORTS - [0:0]
  6182  		:KUBE-SERVICES - [0:0]
  6183  		:KUBE-MARK-MASQ - [0:0]
  6184  		:KUBE-POSTROUTING - [0:0]
  6185  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6186  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6187  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6188  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6189  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6190  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6191  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6192  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6193  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6194  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6195  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
  6196  		-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
  6197  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6198  		-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
  6199  		COMMIT
  6200  		`)
  6201  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6202  
  6203  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6204  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6205  	if rulesSyncedMetric != rulesSynced {
  6206  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6207  	}
  6208  
  6209  	// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and
  6210  	// 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6211  	rulesTotal += 5
  6212  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6213  	if rulesTotalMetric != rulesTotal {
  6214  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6215  	}
  6216  
  6217  	// Change an endpoint of an existing service. This will cause its SVC and SEP
  6218  	// chains to be rewritten.
  6219  	eps3update := eps3.DeepCopy()
  6220  	eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
  6221  	fp.OnEndpointSliceUpdate(eps3, eps3update)
  6222  	fp.syncProxyRules()
  6223  
  6224  	expected = dedent.Dedent(`
  6225  		*filter
  6226  		:KUBE-NODEPORTS - [0:0]
  6227  		:KUBE-SERVICES - [0:0]
  6228  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6229  		:KUBE-FIREWALL - [0:0]
  6230  		:KUBE-FORWARD - [0:0]
  6231  		:KUBE-PROXY-FIREWALL - [0:0]
  6232  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6233  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6234  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6235  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6236  		COMMIT
  6237  		*nat
  6238  		:KUBE-NODEPORTS - [0:0]
  6239  		:KUBE-SERVICES - [0:0]
  6240  		:KUBE-MARK-MASQ - [0:0]
  6241  		:KUBE-POSTROUTING - [0:0]
  6242  		:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
  6243  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6244  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6245  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6246  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6247  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6248  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6249  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6250  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6251  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6252  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6253  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6254  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6255  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6256  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -j KUBE-SEP-DKCFIS26GWF2WLWC
  6257  		-X KUBE-SEP-BSWRHOQ77KEXZLNL
  6258  		COMMIT
  6259  		`)
  6260  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6261  
  6262  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6263  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6264  	if rulesSyncedMetric != rulesSynced {
  6265  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6266  	}
  6267  
  6268  	// We rewrote existing rules but did not change the overall number of rules.
  6269  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6270  	if rulesTotalMetric != rulesTotal {
  6271  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6272  	}
  6273  
  6274  	// Add an endpoint to a service. This will cause its SVC and SEP chains to be rewritten.
  6275  	eps3update2 := eps3update.DeepCopy()
  6276  	eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
  6277  	fp.OnEndpointSliceUpdate(eps3update, eps3update2)
  6278  	fp.syncProxyRules()
  6279  
  6280  	expected = dedent.Dedent(`
  6281  		*filter
  6282  		:KUBE-NODEPORTS - [0:0]
  6283  		:KUBE-SERVICES - [0:0]
  6284  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6285  		:KUBE-FIREWALL - [0:0]
  6286  		:KUBE-FORWARD - [0:0]
  6287  		:KUBE-PROXY-FIREWALL - [0:0]
  6288  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6289  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6290  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6291  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6292  		COMMIT
  6293  		*nat
  6294  		:KUBE-NODEPORTS - [0:0]
  6295  		:KUBE-SERVICES - [0:0]
  6296  		:KUBE-MARK-MASQ - [0:0]
  6297  		:KUBE-POSTROUTING - [0:0]
  6298  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6299  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6300  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6301  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6302  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6303  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6304  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6305  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6306  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6307  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6308  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6309  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6310  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6311  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6312  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6313  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6314  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6315  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6316  		COMMIT
  6317  		`)
  6318  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6319  
  6320  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6321  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6322  	if rulesSyncedMetric != rulesSynced {
  6323  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6324  	}
  6325  
  6326  	// We added 2 KUBE-SEP-JVVZVJ7BSEPPRNBS rules and 1 KUBE-SVC-X27LE4BHSL4DOUIK rule
  6327  	// jumping to the new SEP chain. The other rules related to svc3 got rewritten,
  6328  	// but that does not change the count of rules.
  6329  	rulesTotal += 3
  6330  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6331  	if rulesTotalMetric != rulesTotal {
  6332  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6333  	}
  6334  
  6335  	// Sync with no new changes... This will not rewrite any SVC or SEP chains
  6336  	fp.syncProxyRules()
  6337  
  6338  	expected = dedent.Dedent(`
  6339  		*filter
  6340  		:KUBE-NODEPORTS - [0:0]
  6341  		:KUBE-SERVICES - [0:0]
  6342  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6343  		:KUBE-FIREWALL - [0:0]
  6344  		:KUBE-FORWARD - [0:0]
  6345  		:KUBE-PROXY-FIREWALL - [0:0]
  6346  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6347  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6348  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6349  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6350  		COMMIT
  6351  		*nat
  6352  		:KUBE-NODEPORTS - [0:0]
  6353  		:KUBE-SERVICES - [0:0]
  6354  		:KUBE-MARK-MASQ - [0:0]
  6355  		:KUBE-POSTROUTING - [0:0]
  6356  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6357  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6358  		-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
  6359  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6360  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6361  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6362  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6363  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6364  		COMMIT
  6365  		`)
  6366  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6367  
  6368  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6369  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6370  	if rulesSyncedMetric != rulesSynced {
  6371  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6372  	}
  6373  
  6374  	// (No changes)
  6375  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6376  	if rulesTotalMetric != rulesTotal {
  6377  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6378  	}
  6379  
  6380  	// Now force a partial resync error and ensure that it recovers correctly
  6381  	if fp.needFullSync {
  6382  		t.Fatalf("Proxier unexpectedly already needs a full sync?")
  6383  	}
  6384  	partialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IPTablesPartialRestoreFailuresTotal)
  6385  	if err != nil {
  6386  		t.Fatalf("Could not get partial restore failures metric: %v", err)
  6387  	}
  6388  	if partialRestoreFailures != 0.0 {
  6389  		t.Errorf("Already did a partial resync? Something failed earlier!")
  6390  	}
  6391  
  6392  	// Add a rule jumping from svc3's service chain to svc4's endpoint, then try to
  6393  	// delete svc4. This will fail because the partial resync won't rewrite svc3's
  6394  	// rules and so the partial restore would leave a dangling jump from there to
  6395  	// svc4's endpoint. The proxier will then queue a full resync in response to the
  6396  	// partial resync failure, and the full resync will succeed (since it will rewrite
  6397  	// svc3's rules as well).
  6398  	//
  6399  	// This is an absurd scenario, but it has to be; partial resync failures are
  6400  	// supposed to be impossible; if we knew of any non-absurd scenario that would
  6401  	// cause such a failure, then that would be a bug and we would fix it.
  6402  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6403  		t.Fatalf("svc4's endpoint chain unexpected already does not exist!")
  6404  	}
  6405  	if _, err := fp.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.Chain("KUBE-SVC-X27LE4BHSL4DOUIK"), "-j", "KUBE-SEP-AYCN5HPXMIRJNJXU"); err != nil {
  6406  		t.Fatalf("Could not add bad iptables rule: %v", err)
  6407  	}
  6408  
  6409  	fp.OnServiceDelete(svc4)
  6410  	fp.syncProxyRules()
  6411  
  6412  	if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
  6413  		t.Errorf("svc4's endpoint chain was successfully deleted despite dangling references!")
  6414  	}
  6415  	if !fp.needFullSync {
  6416  		t.Errorf("Proxier did not fail on previous partial resync?")
  6417  	}
  6418  	updatedPartialRestoreFailures, err := testutil.GetCounterMetricValue(metrics.IPTablesPartialRestoreFailuresTotal)
  6419  	if err != nil {
  6420  		t.Errorf("Could not get partial restore failures metric: %v", err)
  6421  	}
  6422  	if updatedPartialRestoreFailures != partialRestoreFailures+1.0 {
  6423  		t.Errorf("Partial restore failures metric was not incremented after failed partial resync (expected %.02f, got %.02f)", partialRestoreFailures+1.0, updatedPartialRestoreFailures)
  6424  	}
  6425  
  6426  	// On retry we should do a full resync, which should succeed (and delete svc4)
  6427  	fp.syncProxyRules()
  6428  
  6429  	expected = dedent.Dedent(`
  6430  		*filter
  6431  		:KUBE-NODEPORTS - [0:0]
  6432  		:KUBE-SERVICES - [0:0]
  6433  		:KUBE-EXTERNAL-SERVICES - [0:0]
  6434  		:KUBE-FIREWALL - [0:0]
  6435  		:KUBE-FORWARD - [0:0]
  6436  		:KUBE-PROXY-FIREWALL - [0:0]
  6437  		-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
  6438  		-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
  6439  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
  6440  		-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
  6441  		COMMIT
  6442  		*nat
  6443  		:KUBE-NODEPORTS - [0:0]
  6444  		:KUBE-SERVICES - [0:0]
  6445  		:KUBE-MARK-MASQ - [0:0]
  6446  		:KUBE-POSTROUTING - [0:0]
  6447  		:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
  6448  		:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
  6449  		:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
  6450  		:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
  6451  		:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
  6452  		:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
  6453  		:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
  6454  		-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
  6455  		-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
  6456  		-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
  6457  		-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
  6458  		-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
  6459  		-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
  6460  		-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
  6461  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
  6462  		-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
  6463  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
  6464  		-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
  6465  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
  6466  		-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
  6467  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6468  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
  6469  		-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
  6470  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
  6471  		-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
  6472  		-X KUBE-SEP-AYCN5HPXMIRJNJXU
  6473  		-X KUBE-SVC-4SW47YFZTEDKD3PK
  6474  		COMMIT
  6475  		`)
  6476  	assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
  6477  
  6478  	rulesSynced = countRules(logger, utiliptables.TableNAT, expected)
  6479  	rulesSyncedMetric = countRulesFromLastSyncMetric(logger, utiliptables.TableNAT)
  6480  	if rulesSyncedMetric != rulesSynced {
  6481  		t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
  6482  	}
  6483  
  6484  	// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2
  6485  	// KUBE-SEP-AYCN5HPXMIRJNJXU rules
  6486  	rulesTotal -= 5
  6487  	rulesTotalMetric = countRulesFromMetric(logger, utiliptables.TableNAT)
  6488  	if rulesTotalMetric != rulesTotal {
  6489  		t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
  6490  	}
  6491  }
  6492  
  6493  func TestNoEndpointsMetric(t *testing.T) {
  6494  	type endpoint struct {
  6495  		ip       string
  6496  		hostname string
  6497  	}
  6498  
  6499  	metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables)
  6500  	testCases := []struct {
  6501  		name                                                string
  6502  		internalTrafficPolicy                               *v1.ServiceInternalTrafficPolicy
  6503  		externalTrafficPolicy                               v1.ServiceExternalTrafficPolicy
  6504  		endpoints                                           []endpoint
  6505  		expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
  6506  		expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
  6507  	}{
  6508  		{
  6509  			name:                  "internalTrafficPolicy is set and there are local endpoints",
  6510  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6511  			endpoints: []endpoint{
  6512  				{"10.0.1.1", testHostname},
  6513  				{"10.0.1.2", "host1"},
  6514  				{"10.0.1.3", "host2"},
  6515  			},
  6516  		},
  6517  		{
  6518  			name:                  "externalTrafficPolicy is set and there are local endpoints",
  6519  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6520  			endpoints: []endpoint{
  6521  				{"10.0.1.1", testHostname},
  6522  				{"10.0.1.2", "host1"},
  6523  				{"10.0.1.3", "host2"},
  6524  			},
  6525  		},
  6526  		{
  6527  			name:                  "both policies are set and there are local endpoints",
  6528  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6529  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6530  			endpoints: []endpoint{
  6531  				{"10.0.1.1", testHostname},
  6532  				{"10.0.1.2", "host1"},
  6533  				{"10.0.1.3", "host2"},
  6534  			},
  6535  		},
  6536  		{
  6537  			name:                  "internalTrafficPolicy is set and there are no local endpoints",
  6538  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6539  			endpoints: []endpoint{
  6540  				{"10.0.1.1", "host0"},
  6541  				{"10.0.1.2", "host1"},
  6542  				{"10.0.1.3", "host2"},
  6543  			},
  6544  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6545  		},
  6546  		{
  6547  			name:                  "externalTrafficPolicy is set and there are no local endpoints",
  6548  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6549  			endpoints: []endpoint{
  6550  				{"10.0.1.1", "host0"},
  6551  				{"10.0.1.2", "host1"},
  6552  				{"10.0.1.3", "host2"},
  6553  			},
  6554  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6555  		},
  6556  		{
  6557  			name:                  "both policies are set and there are no local endpoints",
  6558  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6559  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6560  			endpoints: []endpoint{
  6561  				{"10.0.1.1", "host0"},
  6562  				{"10.0.1.2", "host1"},
  6563  				{"10.0.1.3", "host2"},
  6564  			},
  6565  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
  6566  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
  6567  		},
  6568  		{
  6569  			name:                  "both policies are set and there are no endpoints at all",
  6570  			internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal),
  6571  			externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
  6572  			endpoints:             []endpoint{},
  6573  			expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
  6574  			expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
  6575  		},
  6576  	}
  6577  
  6578  	for _, tc := range testCases {
  6579  		t.Run(tc.name, func(t *testing.T) {
  6580  			ipt := iptablestest.NewFake()
  6581  			fp := NewFakeProxier(ipt)
  6582  			fp.OnServiceSynced()
  6583  			fp.OnEndpointSlicesSynced()
  6584  
  6585  			serviceName := "svc1"
  6586  			namespaceName := "ns1"
  6587  
  6588  			svc := &v1.Service{
  6589  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
  6590  				Spec: v1.ServiceSpec{
  6591  					ClusterIP: "172.30.1.1",
  6592  					Selector:  map[string]string{"foo": "bar"},
  6593  					Ports:     []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
  6594  				},
  6595  			}
  6596  			if tc.internalTrafficPolicy != nil {
  6597  				svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
  6598  			}
  6599  			if tc.externalTrafficPolicy != "" {
  6600  				svc.Spec.Type = v1.ServiceTypeNodePort
  6601  				svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
  6602  			}
  6603  
  6604  			fp.OnServiceAdd(svc)
  6605  
  6606  			endpointSlice := &discovery.EndpointSlice{
  6607  				ObjectMeta: metav1.ObjectMeta{
  6608  					Name:      fmt.Sprintf("%s-1", serviceName),
  6609  					Namespace: namespaceName,
  6610  					Labels:    map[string]string{discovery.LabelServiceName: serviceName},
  6611  				},
  6612  				Ports: []discovery.EndpointPort{{
  6613  					Name:     ptr.To(""),
  6614  					Port:     ptr.To[int32](80),
  6615  					Protocol: ptr.To(v1.ProtocolTCP),
  6616  				}},
  6617  				AddressType: discovery.AddressTypeIPv4,
  6618  			}
  6619  			for _, ep := range tc.endpoints {
  6620  				endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
  6621  					Addresses:  []string{ep.ip},
  6622  					Conditions: discovery.EndpointConditions{Ready: ptr.To(true)},
  6623  					NodeName:   ptr.To(ep.hostname),
  6624  				})
  6625  			}
  6626  
  6627  			fp.OnEndpointSliceAdd(endpointSlice)
  6628  			fp.syncProxyRules()
  6629  			syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
  6630  			if err != nil {
  6631  				t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6632  			}
  6633  
  6634  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
  6635  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
  6636  			}
  6637  
  6638  			syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
  6639  			if err != nil {
  6640  				t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
  6641  			}
  6642  
  6643  			if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
  6644  				t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
  6645  			}
  6646  		})
  6647  	}
  6648  }
  6649  
  6650  func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
  6651  	testCases := []struct {
  6652  		name          string
  6653  		ipModeEnabled bool
  6654  		svcIP         string
  6655  		svcLBIP       string
  6656  		ipMode        *v1.LoadBalancerIPMode
  6657  		expectedRule  bool
  6658  	}{
  6659  		/* LoadBalancerIPMode disabled */
  6660  		{
  6661  			name:          "LoadBalancerIPMode disabled, ipMode Proxy",
  6662  			ipModeEnabled: false,
  6663  			svcIP:         "10.20.30.41",
  6664  			svcLBIP:       "1.2.3.4",
  6665  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6666  			expectedRule:  true,
  6667  		},
  6668  		{
  6669  			name:          "LoadBalancerIPMode disabled, ipMode VIP",
  6670  			ipModeEnabled: false,
  6671  			svcIP:         "10.20.30.42",
  6672  			svcLBIP:       "1.2.3.5",
  6673  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6674  			expectedRule:  true,
  6675  		},
  6676  		{
  6677  			name:          "LoadBalancerIPMode disabled, ipMode nil",
  6678  			ipModeEnabled: false,
  6679  			svcIP:         "10.20.30.43",
  6680  			svcLBIP:       "1.2.3.6",
  6681  			ipMode:        nil,
  6682  			expectedRule:  true,
  6683  		},
  6684  		/* LoadBalancerIPMode enabled */
  6685  		{
  6686  			name:          "LoadBalancerIPMode enabled, ipMode Proxy",
  6687  			ipModeEnabled: true,
  6688  			svcIP:         "10.20.30.41",
  6689  			svcLBIP:       "1.2.3.4",
  6690  			ipMode:        ptr.To(v1.LoadBalancerIPModeProxy),
  6691  			expectedRule:  false,
  6692  		},
  6693  		{
  6694  			name:          "LoadBalancerIPMode enabled, ipMode VIP",
  6695  			ipModeEnabled: true,
  6696  			svcIP:         "10.20.30.42",
  6697  			svcLBIP:       "1.2.3.5",
  6698  			ipMode:        ptr.To(v1.LoadBalancerIPModeVIP),
  6699  			expectedRule:  true,
  6700  		},
  6701  		{
  6702  			name:          "LoadBalancerIPMode enabled, ipMode nil",
  6703  			ipModeEnabled: true,
  6704  			svcIP:         "10.20.30.43",
  6705  			svcLBIP:       "1.2.3.6",
  6706  			ipMode:        nil,
  6707  			expectedRule:  true,
  6708  		},
  6709  	}
  6710  
  6711  	svcPort := 80
  6712  	svcNodePort := 3001
  6713  	svcPortName := proxy.ServicePortName{
  6714  		NamespacedName: makeNSN("ns1", "svc1"),
  6715  		Port:           "p80",
  6716  		Protocol:       v1.ProtocolTCP,
  6717  	}
  6718  
  6719  	for _, testCase := range testCases {
  6720  		t.Run(testCase.name, func(t *testing.T) {
  6721  			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LoadBalancerIPMode, testCase.ipModeEnabled)
  6722  			ipt := iptablestest.NewFake()
  6723  			fp := NewFakeProxier(ipt)
  6724  			makeServiceMap(fp,
  6725  				makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
  6726  					svc.Spec.Type = "LoadBalancer"
  6727  					svc.Spec.ClusterIP = testCase.svcIP
  6728  					svc.Spec.Ports = []v1.ServicePort{{
  6729  						Name:     svcPortName.Port,
  6730  						Port:     int32(svcPort),
  6731  						Protocol: v1.ProtocolTCP,
  6732  						NodePort: int32(svcNodePort),
  6733  					}}
  6734  					svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
  6735  						IP:     testCase.svcLBIP,
  6736  						IPMode: testCase.ipMode,
  6737  					}}
  6738  				}),
  6739  			)
  6740  
  6741  			populateEndpointSlices(fp,
  6742  				makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
  6743  					eps.AddressType = discovery.AddressTypeIPv4
  6744  					eps.Endpoints = []discovery.Endpoint{{
  6745  						Addresses: []string{"10.180.0.1"},
  6746  					}}
  6747  					eps.Ports = []discovery.EndpointPort{{
  6748  						Name:     ptr.To("p80"),
  6749  						Port:     ptr.To[int32](80),
  6750  						Protocol: ptr.To(v1.ProtocolTCP),
  6751  					}}
  6752  				}),
  6753  			)
  6754  
  6755  			fp.syncProxyRules()
  6756  
  6757  			c, _ := ipt.Dump.GetChain(utiliptables.TableNAT, kubeServicesChain)
  6758  			ruleExists := false
  6759  			for _, r := range c.Rules {
  6760  				if r.DestinationAddress != nil && r.DestinationAddress.Value == testCase.svcLBIP {
  6761  					ruleExists = true
  6762  				}
  6763  			}
  6764  			if ruleExists != testCase.expectedRule {
  6765  				t.Errorf("unexpected rule for %s", testCase.svcLBIP)
  6766  			}
  6767  		})
  6768  	}
  6769  }