github.com/cilium/cilium@v1.16.2/test/k8s/services.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package k8sTest
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"net"
    10  	"strings"
    11  	"sync"
    12  	"time"
    13  
    14  	. "github.com/onsi/gomega"
    15  	v1 "k8s.io/api/core/v1"
    16  
    17  	. "github.com/cilium/cilium/test/ginkgo-ext"
    18  	"github.com/cilium/cilium/test/helpers"
    19  )
    20  
    21  const (
    22  	appServiceName  = "app1-service"
    23  	echoServiceName = "echo"
    24  	echoPodLabel    = "name=echo"
    25  	// echoServiceNameIPv6 = "echo-ipv6"
    26  
    27  	testDSClient = "zgroup=testDSClient"
    28  	testDS       = "zgroup=testDS"
    29  	testDSK8s2   = "zgroup=test-k8s2"
    30  )
    31  
    32  // The 5.4 CI job is intended to catch BPF complexity regressions and as such
    33  // doesn't need to execute this test suite.
    34  var _ = SkipDescribeIf(helpers.RunsOn54Kernel, "K8sDatapathServicesTest", func() {
    35  	var (
    36  		kubectl        *helpers.Kubectl
    37  		ciliumFilename string
    38  		ni             *helpers.NodesInfo
    39  		err            error
    40  	)
    41  
    42  	BeforeAll(func() {
    43  		kubectl = helpers.CreateKubectl(helpers.K8s1VMName(), logger)
    44  		deploymentManager.SetKubectl(kubectl)
    45  
    46  		ni, err = helpers.GetNodesInfo(kubectl)
    47  		Expect(err).Should(BeNil(), "Cannot get nodes info")
    48  
    49  		ciliumFilename = helpers.TimestampFilename("cilium.yaml")
    50  	})
    51  
    52  	AfterFailed(func() {
    53  		kubectl.CiliumReport("cilium-dbg service list", "cilium-dbg endpoint list")
    54  	})
    55  
    56  	JustAfterEach(func() {
    57  		kubectl.ValidateNoErrorsInLogs(CurrentGinkgoTestDescription().Duration)
    58  	})
    59  
    60  	AfterAll(func() {
    61  		ExpectAllPodsTerminated(kubectl)
    62  		UninstallCiliumFromManifest(kubectl, ciliumFilename)
    63  		kubectl.CloseSSHClient()
    64  	})
    65  
    66  	Context("Checks E/W loadbalancing (ClusterIP, NodePort from inside cluster, etc)", func() {
    67  		var yamls []string
    68  		var demoPolicyL7 string
    69  
    70  		BeforeAll(func() {
    71  			DeployCiliumAndDNS(kubectl, ciliumFilename)
    72  
    73  			toApply := []string{"demo.yaml", "demo_ds.yaml", "echo-svc.yaml", "echo-policy.yaml"}
    74  			if helpers.DualStackSupported() {
    75  				toApply = append(toApply, "demo_v6.yaml", "demo_ds_v6.yaml", "echo-svc_v6.yaml")
    76  				if helpers.DualStackSupportBeta() {
    77  					toApply = append(toApply, "echo_svc_dualstack.yaml")
    78  				}
    79  			}
    80  			for _, fn := range toApply {
    81  				path := helpers.ManifestGet(kubectl.BasePath(), fn)
    82  				kubectl.ApplyDefault(path).ExpectSuccess("Unable to apply %s", path)
    83  				yamls = append(yamls, path)
    84  			}
    85  
    86  			// Wait for all pods to be in ready state.
    87  			err := kubectl.WaitforPods(helpers.DefaultNamespace, "", helpers.HelperTimeout)
    88  			Expect(err).Should(BeNil())
    89  
    90  			demoPolicyL7 = helpers.ManifestGet(kubectl.BasePath(), "l7-policy-demo.yaml")
    91  		})
    92  
    93  		AfterAll(func() {
    94  			wg := sync.WaitGroup{}
    95  			for _, yaml := range yamls {
    96  				wg.Add(1)
    97  				go func(yaml string) {
    98  					defer wg.Done()
    99  					// Ensure that all deployments are fully cleaned up before
   100  					// proceeding to the next test.
   101  					res := kubectl.DeleteAndWait(yaml, true)
   102  
   103  					Expect(res.WasSuccessful()).Should(BeTrue(), "Unable to cleanup yaml: %s", yaml)
   104  				}(yaml)
   105  			}
   106  			wg.Wait()
   107  			ExpectAllPodsTerminated(kubectl)
   108  		})
   109  
   110  		SkipContextIf(helpers.DoesNotRunWithKubeProxyReplacement, "Checks in-cluster KPR", func() {
   111  			It("Tests HealthCheckNodePort", func() {
   112  				testHealthCheckNodePort(kubectl, ni)
   113  			})
   114  
   115  			It("Tests that binding to NodePort port fails", func() {
   116  				testFailBind(kubectl, ni)
   117  			})
   118  
   119  			SkipContextIf(helpers.RunsOnAKS, "with L7 policy", func() {
   120  				AfterAll(func() {
   121  					kubectl.Delete(demoPolicyL7)
   122  					// Remove CT entries to avoid packet drops which could happen
   123  					// due to matching stale entries with proxy_redirect = 1
   124  					kubectl.CiliumExecMustSucceedOnAll(context.TODO(),
   125  						"cilium-dbg bpf ct flush global", "Unable to flush CT maps")
   126  				})
   127  
   128  				It("Tests NodePort with L7 Policy", func() {
   129  					applyPolicy(kubectl, demoPolicyL7)
   130  					testNodePort(kubectl, ni, false, false, 0)
   131  				})
   132  			})
   133  		})
   134  
   135  		// The test is relevant only for bpf_lxc LB, while bpf_sock (KPR enabled)
   136  		// doesn't require any special handling for hairpin service flows.
   137  		SkipItIf(helpers.RunsWithKubeProxyReplacement, "Checks service accessing itself (hairpin flow)", func() {
   138  			serviceNames := []string{echoServiceName}
   139  			// Hairpin flow mode is currently not supported for IPv6.
   140  			// TODO: Uncomment after https://github.com/cilium/cilium/pull/14138 is merged
   141  			// if helpers.DualStackSupported() {
   142  			// }
   143  			// 	serviceNames = append(serviceNames, // )
   144  
   145  			for _, svcName := range serviceNames {
   146  				clusterIP, _, err := kubectl.GetServiceHostPort(helpers.DefaultNamespace, svcName)
   147  				Expect(err).Should(BeNil(), "Cannot get service %q ClusterIP", svcName)
   148  				Expect(net.ParseIP(clusterIP) != nil).Should(BeTrue(), "ClusterIP is not an IP")
   149  
   150  				url := fmt.Sprintf("http://%s/", net.JoinHostPort(clusterIP, "80"))
   151  				testCurlFromPods(kubectl, echoPodLabel, url, 10, 0)
   152  				url = fmt.Sprintf("tftp://%s/hello", net.JoinHostPort(clusterIP, "69"))
   153  				testCurlFromPods(kubectl, echoPodLabel, url, 10, 0)
   154  			}
   155  
   156  		}, 600)
   157  
   158  		SkipContextIf(func() bool {
   159  			return helpers.RunsWithKubeProxyReplacement()
   160  		}, "Tests NodePort inside cluster (kube-proxy)", func() {
   161  			It("with IPSec and externalTrafficPolicy=Local", func() {
   162  				deploymentManager.SetKubectl(kubectl)
   163  				deploymentManager.Deploy(helpers.CiliumNamespace, IPSecSecret)
   164  				DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   165  					"encryption.enabled": "true",
   166  				})
   167  				testExternalTrafficPolicyLocal(kubectl, ni)
   168  				deploymentManager.DeleteAll()
   169  				deploymentManager.DeleteCilium()
   170  			})
   171  
   172  			It("with the host firewall and externalTrafficPolicy=Local", func() {
   173  				DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   174  					"hostFirewall.enabled": "true",
   175  				})
   176  				testExternalTrafficPolicyLocal(kubectl, ni)
   177  			})
   178  
   179  			It("with externalTrafficPolicy=Local", func() {
   180  				DeployCiliumAndDNS(kubectl, ciliumFilename)
   181  				testExternalTrafficPolicyLocal(kubectl, ni)
   182  			})
   183  
   184  			It("vanilla", func() {
   185  				testNodePort(kubectl, ni, false, false, 0)
   186  			})
   187  		})
   188  
   189  		SkipContextIf(func() bool { return helpers.RunsWithKubeProxyReplacement() || helpers.RunsOnAKS() }, "TFTP with DNS Proxy port collision", func() {
   190  			var (
   191  				demoPolicy    string
   192  				ciliumPodK8s1 string
   193  				ciliumPodK8s2 string
   194  				DNSProxyPort1 int
   195  				DNSProxyPort2 int
   196  			)
   197  
   198  			BeforeAll(func() {
   199  				var err error
   200  				ciliumPodK8s1, err = kubectl.GetCiliumPodOnNode(helpers.K8s1)
   201  				Expect(err).Should(BeNil(), "Cannot get cilium pod on %s", helpers.K8s1)
   202  				ciliumPodK8s2, err = kubectl.GetCiliumPodOnNode(helpers.K8s2)
   203  				Expect(err).Should(BeNil(), "Cannot get cilium pod on %s", helpers.K8s2)
   204  
   205  				// Find out the DNS proxy ports in use
   206  				DNSProxyPort1 = kubectl.GetDNSProxyPort(ciliumPodK8s1)
   207  				By("DNS Proxy port in k8s1 (%s): %d", ciliumPodK8s1, DNSProxyPort1)
   208  				DNSProxyPort2 = kubectl.GetDNSProxyPort(ciliumPodK8s2)
   209  				By("DNS Proxy port in k8s2 (%s): %d", ciliumPodK8s2, DNSProxyPort2)
   210  
   211  				demoPolicy = helpers.ManifestGet(kubectl.BasePath(), "l4-policy-demo.yaml")
   212  			})
   213  
   214  			AfterAll(func() {
   215  				kubectl.Delete(demoPolicy)
   216  			})
   217  
   218  			It("Tests TFTP from DNS Proxy Port", func() {
   219  				if DNSProxyPort2 == DNSProxyPort1 {
   220  					Skip(fmt.Sprintf("TFTP source port test can not be done when both nodes have the same proxy port (%d == %d)", DNSProxyPort1, DNSProxyPort2))
   221  				}
   222  
   223  				applyPolicy(kubectl, demoPolicy)
   224  
   225  				var data v1.Service
   226  				err := kubectl.Get(helpers.DefaultNamespace, "service test-nodeport").Unmarshal(&data)
   227  				Expect(err).Should(BeNil(), "Can not retrieve service")
   228  
   229  				// Since we address NodePort in k8s2 using the DNS proxy port of k8s2 as
   230  				// the source port from k8s1, one round is enough regardless of the backend
   231  				// selection, as in both cases the replies are reverse NATted at k8s2.
   232  				count := 1
   233  				fails := 0
   234  				// Client from k8s1
   235  				clientPod, _ := kubectl.GetPodOnNodeLabeledWithOffset(helpers.K8s1, testDSClient, 0)
   236  				// Destination is a NodePort in k8s2, curl (in k8s1) binding to the same local port as the DNS proxy port
   237  				// in k8s2
   238  				url := getTFTPLink(ni.K8s2IP, data.Spec.Ports[1].NodePort) + fmt.Sprintf(" --local-port %d", DNSProxyPort2)
   239  				cmd := testCommand(helpers.CurlFailNoStats(url), count, fails)
   240  				By("Making %d curl requests from %s pod to service %s using source port %d", count, clientPod, url, DNSProxyPort2)
   241  				res := kubectl.ExecPodCmd(helpers.DefaultNamespace, clientPod, cmd)
   242  				Expect(res).Should(helpers.CMDSuccess(), "Request from %s pod to service %s failed", clientPod, url)
   243  
   244  				if helpers.DualStackSupported() {
   245  					err := kubectl.Get(helpers.DefaultNamespace, "service test-nodeport-ipv6").Unmarshal(&data)
   246  					Expect(err).Should(BeNil(), "Can not retrieve service")
   247  
   248  					// Client from k8s1
   249  					clientPod, _ := kubectl.GetPodOnNodeLabeledWithOffset(helpers.K8s1, testDSClient, 0)
   250  					// Destination is a NodePort in k8s2, curl (in k8s1) binding to the same local port as the DNS proxy port
   251  					// in k8s2
   252  					url := getTFTPLink(ni.PrimaryK8s2IPv6, data.Spec.Ports[1].NodePort) + fmt.Sprintf(" --local-port %d", DNSProxyPort2)
   253  					cmd := testCommand(helpers.CurlFailNoStats(url), count, fails)
   254  					By("Making %d curl requests from %s pod to service %s using source port %d", count, clientPod, url, DNSProxyPort2)
   255  					res := kubectl.ExecPodCmd(helpers.DefaultNamespace, clientPod, cmd)
   256  					Expect(res).Should(helpers.CMDSuccess(), "Request from %s pod to service %s failed", clientPod, url)
   257  				}
   258  			})
   259  		})
   260  
   261  		SkipContextIf(func() bool {
   262  			return helpers.RunsWithKubeProxyReplacement()
   263  		}, "with L4 policy", func() {
   264  			var (
   265  				demoPolicy string
   266  			)
   267  
   268  			BeforeAll(func() {
   269  				demoPolicy = helpers.ManifestGet(kubectl.BasePath(), "l4-policy-demo.yaml")
   270  			})
   271  
   272  			AfterAll(func() {
   273  				kubectl.Delete(demoPolicy)
   274  			})
   275  
   276  			It("Tests NodePort with L4 Policy", func() {
   277  				applyPolicy(kubectl, demoPolicy)
   278  				testNodePort(kubectl, ni, false, false, 0)
   279  			})
   280  		})
   281  
   282  		SkipContextIf(func() bool {
   283  			return helpers.RunsWithKubeProxyReplacement() || helpers.RunsOnAKS()
   284  		}, "with L7 policy", func() {
   285  			var demoPolicyL7 string
   286  
   287  			BeforeAll(func() {
   288  				demoPolicyL7 = helpers.ManifestGet(kubectl.BasePath(), "l7-policy-demo.yaml")
   289  			})
   290  
   291  			AfterAll(func() {
   292  				kubectl.Delete(demoPolicyL7)
   293  				// Same reason as in other L7 test above
   294  				kubectl.CiliumExecMustSucceedOnAll(context.TODO(),
   295  					"cilium-dbg bpf ct flush global", "Unable to flush CT maps")
   296  			})
   297  
   298  			It("Tests NodePort with L7 Policy", func() {
   299  				applyPolicy(kubectl, demoPolicyL7)
   300  				testNodePort(kubectl, ni, false, false, 0)
   301  			})
   302  		})
   303  	})
   304  
   305  	SkipContextIf(func() bool {
   306  		return helpers.DoesNotRunWithKubeProxyReplacement() ||
   307  			helpers.DoesNotExistNodeWithoutCilium()
   308  	}, "Checks N/S loadbalancing", func() {
   309  		var yamls []string
   310  
   311  		BeforeAll(func() {
   312  			DeployCiliumAndDNS(kubectl, ciliumFilename)
   313  
   314  			toApply := []string{"demo.yaml", "demo_ds.yaml", "echo-svc.yaml"}
   315  			if helpers.DualStackSupported() {
   316  				toApply = append(toApply, "demo_ds_v6.yaml")
   317  			}
   318  			for _, fn := range toApply {
   319  				path := helpers.ManifestGet(kubectl.BasePath(), fn)
   320  				kubectl.ApplyDefault(path).ExpectSuccess("Unable to apply %s", path)
   321  				yamls = append(yamls, path)
   322  			}
   323  
   324  			By(`Connectivity config:: helpers.DualStackSupported(): %v
   325  Primary Interface %s   :: IPv4: (%s, %s), IPv6: (%s, %s)
   326  Secondary Interface %s :: IPv4: (%s, %s), IPv6: (%s, %s)`,
   327  				helpers.DualStackSupported(), ni.PrivateIface,
   328  				ni.K8s1IP, ni.K8s2IP, ni.PrimaryK8s1IPv6, ni.PrimaryK8s2IPv6,
   329  				helpers.SecondaryIface, ni.SecondaryK8s1IPv4, ni.SecondaryK8s2IPv4,
   330  				ni.SecondaryK8s1IPv6, ni.SecondaryK8s2IPv6)
   331  
   332  			// Wait for all pods to be in ready state.
   333  			err := kubectl.WaitforPods(helpers.DefaultNamespace, "", helpers.HelperTimeout)
   334  			Expect(err).Should(BeNil())
   335  		})
   336  
   337  		AfterAll(func() {
   338  			for _, yaml := range yamls {
   339  				kubectl.Delete(yaml)
   340  			}
   341  			ExpectAllPodsTerminated(kubectl)
   342  		})
   343  
   344  		It("Tests NodePort with sessionAffinity from outside", func() {
   345  			testSessionAffinity(kubectl, ni, true, true)
   346  		})
   347  
   348  		It("Tests externalIPs", func() {
   349  			testExternalIPs(kubectl, ni)
   350  		})
   351  
   352  		It("Tests GH#10983", func() {
   353  			var data v1.Service
   354  
   355  			// We need two NodePort services with the same single endpoint,
   356  			// so thus we choose the "test-nodeport{-local,}-k8s2" svc.
   357  			// Both svcs will be accessed via the k8s2 node, because
   358  			// "test-nodeport-local-k8s2" has the local external traffic
   359  			// policy.
   360  			err := kubectl.Get(helpers.DefaultNamespace, "svc test-nodeport-local-k8s2").Unmarshal(&data)
   361  			Expect(err).Should(BeNil(), "Can not retrieve service")
   362  			svc1URL := getHTTPLink(ni.K8s2IP, data.Spec.Ports[0].NodePort)
   363  			err = kubectl.Get(helpers.DefaultNamespace, "svc test-nodeport-k8s2").Unmarshal(&data)
   364  			Expect(err).Should(BeNil(), "Can not retrieve service")
   365  			svc2URL := getHTTPLink(ni.K8s2IP, data.Spec.Ports[0].NodePort)
   366  
   367  			// Send two requests from the same src IP and port to the endpoint
   368  			// via two different NodePort svc to trigger the stale conntrack
   369  			// entry issue. Once it's fixed, the second request should not
   370  			// fail.
   371  			testCurlFromOutsideWithLocalPort(kubectl, ni, svc1URL, 1, false, 64002)
   372  			time.Sleep(120 * time.Second) // to reuse the source port
   373  			testCurlFromOutsideWithLocalPort(kubectl, ni, svc2URL, 1, false, 64002)
   374  		})
   375  
   376  		It("Tests security id propagation in N/S LB requests fwd-ed over tunnel", func() {
   377  			// This test case checks whether the "wold" identity is passed in
   378  			// the encapsulated N/S LB requests which are forwarded to the node
   379  			// running the service endpoint. The check is performed by installing
   380  			// a network policy which disallows traffic to the service endpoints
   381  			// from outside.
   382  
   383  			var netpol string
   384  
   385  			// "test-nodeport-k8s2" is the svc with the single endpoint running
   386  			// on the "k8s2". We will send request via the "k8s1", so that we
   387  			// can test the forwarding. In addition, we will send the request
   388  			// via the "k8s2" request to test whether the policy enforcement
   389  			// works as expected in the case of the "backend local" case.
   390  			var data v1.Service
   391  			err := kubectl.Get(helpers.DefaultNamespace, "service test-nodeport-k8s2").Unmarshal(&data)
   392  			Expect(err).Should(BeNil(), "Can not retrieve service")
   393  			svcAddrs := []string{
   394  				getHTTPLink(ni.K8s1IP, data.Spec.Ports[0].NodePort),
   395  				getHTTPLink(ni.K8s2IP, data.Spec.Ports[0].NodePort),
   396  			}
   397  			if helpers.DualStackSupported() {
   398  				err := kubectl.Get(helpers.DefaultNamespace, "service test-nodeport-k8s2-ipv6").Unmarshal(&data)
   399  				Expect(err).Should(BeNil(), "Can not retrieve service")
   400  				svcAddrs = append(svcAddrs,
   401  					getHTTPLink(ni.PrimaryK8s1IPv6, data.Spec.Ports[0].NodePort),
   402  					getHTTPLink(ni.PrimaryK8s2IPv6, data.Spec.Ports[0].NodePort))
   403  			}
   404  
   405  			// No policy is applied, no request should be dropped.
   406  			for _, addr := range svcAddrs {
   407  				testCurlFromOutside(kubectl, ni, addr, 1, false)
   408  			}
   409  
   410  			netpol = helpers.ManifestGet(kubectl.BasePath(), "netpol-deny-ns-lb-test-k8s2.yaml")
   411  			_, err = kubectl.CiliumClusterwidePolicyAction(netpol,
   412  				helpers.KubectlApply, helpers.HelperTimeout)
   413  			Expect(err).Should(BeNil(), "Policy %s cannot be applied", netpol)
   414  
   415  			defer func() {
   416  				_, err := kubectl.CiliumClusterwidePolicyAction(netpol,
   417  					helpers.KubectlDelete, helpers.HelperTimeout)
   418  				Expect(err).Should(BeNil(), "Policy %s cannot be deleted", netpol)
   419  			}()
   420  
   421  			// Now let's apply the policy. All request should fail.
   422  			for _, addr := range svcAddrs {
   423  				testCurlFailFromOutside(kubectl, ni, addr, 1)
   424  			}
   425  		})
   426  
   427  		It("Tests with direct routing and DSR", func() {
   428  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   429  				"loadBalancer.mode":    "dsr",
   430  				"routingMode":          "native",
   431  				"autoDirectNodeRoutes": "true",
   432  			})
   433  
   434  			testDSR(kubectl, ni, ni.K8s1IP, "service test-nodeport-k8s2", 64000)
   435  			if helpers.DualStackSupported() {
   436  				testDSR(kubectl, ni, ni.PrimaryK8s1IPv6, "service test-nodeport-k8s2-ipv6", 64001)
   437  			}
   438  			testNodePortExternal(kubectl, ni, false, true, true)
   439  		})
   440  
   441  		It("Tests with XDP, direct routing, SNAT and Random", func() {
   442  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   443  				"loadBalancer.acceleration": "testing-only",
   444  				"loadBalancer.mode":         "snat",
   445  				"loadBalancer.algorithm":    "random",
   446  				"routingMode":               "native",
   447  				"autoDirectNodeRoutes":      "true",
   448  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   449  			})
   450  			testNodePortExternal(kubectl, ni, false, false, false)
   451  		})
   452  
   453  		It("Tests with XDP, vxlan tunnel, SNAT and Random", func() {
   454  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   455  				"loadBalancer.acceleration": "testing-only",
   456  				"loadBalancer.mode":         "snat",
   457  				"loadBalancer.algorithm":    "random",
   458  				"tunnelProtocol":            "vxlan",
   459  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   460  			})
   461  			testNodePortExternal(kubectl, ni, false, false, false)
   462  		})
   463  
   464  		It("Tests with XDP, direct routing, SNAT and Maglev", func() {
   465  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   466  				"loadBalancer.acceleration": "testing-only",
   467  				"loadBalancer.mode":         "snat",
   468  				"loadBalancer.algorithm":    "maglev",
   469  				"maglev.tableSize":          "251",
   470  				"routingMode":               "native",
   471  				"autoDirectNodeRoutes":      "true",
   472  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   473  				// Support for host firewall + Maglev is currently broken,
   474  				// see #14047 for details.
   475  				"hostFirewall.enabled": "false",
   476  			})
   477  
   478  			testMaglev(kubectl, ni)
   479  			testNodePortExternal(kubectl, ni, false, false, false)
   480  		})
   481  
   482  		It("Tests with XDP, direct routing, Hybrid and Random", func() {
   483  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   484  				"loadBalancer.acceleration": "testing-only",
   485  				"loadBalancer.mode":         "hybrid",
   486  				"loadBalancer.algorithm":    "random",
   487  				"routingMode":               "native",
   488  				"autoDirectNodeRoutes":      "true",
   489  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   490  			})
   491  			testNodePortExternal(kubectl, ni, false, true, false)
   492  		})
   493  
   494  		It("Tests with XDP, direct routing, Hybrid and Maglev", func() {
   495  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   496  				"loadBalancer.acceleration": "testing-only",
   497  				"loadBalancer.mode":         "hybrid",
   498  				"loadBalancer.algorithm":    "maglev",
   499  				"maglev.tableSize":          "251",
   500  				"routingMode":               "native",
   501  				"autoDirectNodeRoutes":      "true",
   502  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   503  				// Support for host firewall + Maglev is currently broken,
   504  				// see #14047 for details.
   505  				"hostFirewall.enabled": "false",
   506  			})
   507  			testNodePortExternal(kubectl, ni, false, true, false)
   508  		})
   509  
   510  		It("Tests with XDP, direct routing, DSR and Random", func() {
   511  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   512  				"loadBalancer.acceleration": "testing-only",
   513  				"loadBalancer.mode":         "dsr",
   514  				"loadBalancer.algorithm":    "random",
   515  				"routingMode":               "native",
   516  				"autoDirectNodeRoutes":      "true",
   517  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   518  			})
   519  			testNodePortExternal(kubectl, ni, false, true, true)
   520  		})
   521  
   522  		It("Tests with XDP, direct routing, DSR and Maglev", func() {
   523  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   524  				"loadBalancer.acceleration": "testing-only",
   525  				"loadBalancer.mode":         "dsr",
   526  				"loadBalancer.algorithm":    "maglev",
   527  				"maglev.tableSize":          "251",
   528  				"routingMode":               "native",
   529  				"autoDirectNodeRoutes":      "true",
   530  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   531  				// Support for host firewall + Maglev is currently broken,
   532  				// see #14047 for details.
   533  				"hostFirewall.enabled": "false",
   534  			})
   535  			testNodePortExternal(kubectl, ni, false, true, true)
   536  		})
   537  
   538  		It("Tests with XDP, direct routing, DSR with Geneve and Maglev", func() {
   539  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   540  				"loadBalancer.acceleration": "testing-only",
   541  				"loadBalancer.mode":         "dsr",
   542  				"loadBalancer.algorithm":    "maglev",
   543  				"maglev.tableSize":          "251",
   544  				"routingMode":               "native",
   545  				"tunnelProtocol":            "geneve",
   546  				"autoDirectNodeRoutes":      "true",
   547  				"loadBalancer.dsrDispatch":  "geneve",
   548  				"devices":                   fmt.Sprintf(`'{%s}'`, ni.PrivateIface),
   549  				// Support for host firewall + Maglev is currently broken,
   550  				// see #14047 for details.
   551  				"hostFirewall.enabled": "false",
   552  			})
   553  			testNodePortExternal(kubectl, ni, false, true, true)
   554  		})
   555  
   556  		It("Tests with TC, direct routing and Hybrid", func() {
   557  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   558  				"loadBalancer.acceleration": "disabled",
   559  				"loadBalancer.mode":         "hybrid",
   560  				"loadBalancer.algorithm":    "random",
   561  				"routingMode":               "native",
   562  				"autoDirectNodeRoutes":      "true",
   563  				"devices":                   "'{}'", // Revert back to auto-detection after XDP.
   564  			})
   565  			testNodePortExternal(kubectl, ni, false, true, false)
   566  		})
   567  
   568  		It("Tests with TC, direct routing and dsr with geneve", func() {
   569  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   570  				"loadBalancer.acceleration": "disabled",
   571  				"loadBalancer.mode":         "dsr",
   572  				"loadBalancer.algorithm":    "maglev",
   573  				"maglev.tableSize":          "251",
   574  				"routingMode":               "native",
   575  				"tunnelProtocol":            "geneve",
   576  				"autoDirectNodeRoutes":      "true",
   577  				"loadBalancer.dsrDispatch":  "geneve",
   578  				"devices":                   "'{}'", // Revert back to auto-detection after XDP.
   579  			})
   580  			testNodePortExternal(kubectl, ni, false, true, true)
   581  		})
   582  
   583  		It("Tests with TC, direct routing and Hybrid-DSR with Geneve", func() {
   584  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   585  				"loadBalancer.acceleration": "disabled",
   586  				"loadBalancer.mode":         "hybrid",
   587  				"loadBalancer.algorithm":    "random",
   588  				"routingMode":               "native",
   589  				"tunnelProtocol":            "geneve",
   590  				"autoDirectNodeRoutes":      "true",
   591  				"loadBalancer.dsrDispatch":  "geneve",
   592  				"devices":                   "'{}'",
   593  			})
   594  			testNodePortExternal(kubectl, ni, false, true, false)
   595  		})
   596  
   597  		It("Tests with TC, geneve tunnel, dsr and Maglev", func() {
   598  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   599  				"loadBalancer.acceleration": "disabled",
   600  				"loadBalancer.mode":         "dsr",
   601  				"loadBalancer.algorithm":    "maglev",
   602  				"maglev.tableSize":          "251",
   603  				"tunnelProtocol":            "geneve",
   604  				"loadBalancer.dsrDispatch":  "geneve",
   605  				"devices":                   "'{}'", // Revert back to auto-detection after XDP.
   606  			})
   607  			testNodePortExternal(kubectl, ni, false, true, true)
   608  		})
   609  
   610  		It("Tests with TC, geneve tunnel, and Hybrid-DSR with Geneve", func() {
   611  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   612  				"loadBalancer.acceleration": "disabled",
   613  				"loadBalancer.mode":         "hybrid",
   614  				"loadBalancer.algorithm":    "random",
   615  				"tunnelProtocol":            "geneve",
   616  				"loadBalancer.dsrDispatch":  "geneve",
   617  				"devices":                   "'{}'",
   618  			})
   619  			testNodePortExternal(kubectl, ni, false, true, false)
   620  		})
   621  
   622  		It("Supports IPv4 fragments", func() {
   623  			options := map[string]string{}
   624  			// On GKE we need to disable endpoint routes as fragment tracking
   625  			// isn't compatible with that options. See #15958.
   626  			if helpers.RunsOnGKE() {
   627  				options["gke.enabled"] = "false"
   628  				options["routingMode"] = "native"
   629  			}
   630  
   631  			DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, options)
   632  
   633  			cmd := fmt.Sprintf("cilium-dbg config %s=%s", helpers.OptionConntrackAccounting, helpers.OptionEnabled)
   634  			kubectl.CiliumExecMustSucceedOnAll(context.TODO(), cmd, "Unable to enable ConntrackAccounting option")
   635  			kubectl.CiliumPreFlightCheck()
   636  			testIPv4FragmentSupport(kubectl, ni)
   637  		})
   638  
   639  		SkipContextIf(helpers.RunsOnGKE, "With host policy", func() {
   640  			hostPolicyFilename := "ccnp-host-policy-nodeport-tests.yaml"
   641  			var ccnpHostPolicy string
   642  
   643  			BeforeAll(func() {
   644  				DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   645  					"hostFirewall.enabled": "true",
   646  				})
   647  
   648  				originalCCNPHostPolicy := helpers.ManifestGet(kubectl.BasePath(), hostPolicyFilename)
   649  				res := kubectl.ExecMiddle("mktemp")
   650  				res.ExpectSuccess()
   651  				ccnpHostPolicy = strings.Trim(res.Stdout(), "\n")
   652  				nodeIP, err := kubectl.GetNodeIPByLabel(kubectl.GetFirstNodeWithoutCiliumLabel(), false)
   653  				Expect(err).Should(BeNil())
   654  				kubectl.ExecMiddle(fmt.Sprintf("sed 's/NODE_WITHOUT_CILIUM_IP/%s/' %s > %s",
   655  					nodeIP, originalCCNPHostPolicy, ccnpHostPolicy)).ExpectSuccess()
   656  
   657  				prepareHostPolicyEnforcement(kubectl, ccnpHostPolicy)
   658  
   659  				_, err = kubectl.CiliumClusterwidePolicyAction(ccnpHostPolicy,
   660  					helpers.KubectlApply, helpers.HelperTimeout)
   661  				Expect(err).Should(BeNil(),
   662  					"Policy %s cannot be applied", ccnpHostPolicy)
   663  			})
   664  
   665  			AfterAll(func() {
   666  				_, err := kubectl.CiliumClusterwidePolicyAction(ccnpHostPolicy,
   667  					helpers.KubectlDelete, helpers.HelperTimeout)
   668  				Expect(err).Should(BeNil(),
   669  					"Policy %s cannot be deleted", ccnpHostPolicy)
   670  
   671  				DeployCiliumAndDNS(kubectl, ciliumFilename)
   672  			})
   673  
   674  			It("Tests NodePort", func() {
   675  				testNodePort(kubectl, ni, true, true, 0)
   676  			})
   677  		})
   678  
   679  		It("ClusterIP cannot be accessed externally when access is disabled",
   680  			func() {
   681  				Expect(curlClusterIPFromExternalHost(kubectl, ni)).
   682  					ShouldNot(helpers.CMDSuccess(),
   683  						"External host %s unexpectedly connected to ClusterIP when lbExternalClusterIP was unset", ni.OutsideNodeName)
   684  			})
   685  
   686  		Context("With ClusterIP external access", func() {
   687  			var (
   688  				svcIP string
   689  			)
   690  			BeforeAll(func() {
   691  				DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   692  					"bpf.lbExternalClusterIP": "true",
   693  					// Enable Maglev to check if the Maglev LUT for ClusterIP is properly populated,
   694  					// and external clients can access ClusterIP with it.
   695  					"loadBalancer.algorithm": "maglev",
   696  				})
   697  				clusterIP, _, err := kubectl.GetServiceHostPort(helpers.DefaultNamespace, appServiceName)
   698  				svcIP = clusterIP
   699  				Expect(err).Should(BeNil(), "Cannot get service %s", appServiceName)
   700  				res := kubectl.AddIPRoute(ni.OutsideNodeName, svcIP, ni.K8s1IP, false)
   701  				Expect(res).Should(helpers.CMDSuccess(), "Error adding IP route for %s via %s", svcIP, ni.K8s1IP)
   702  			})
   703  
   704  			AfterAll(func() {
   705  				res := kubectl.DelIPRoute(ni.OutsideNodeName, svcIP, ni.K8s1IP)
   706  				Expect(res).Should(helpers.CMDSuccess(), "Error removing IP route for %s via %s", svcIP, ni.K8s1IP)
   707  			})
   708  
   709  			It("ClusterIP can be accessed when external access is enabled", func() {
   710  				Expect(curlClusterIPFromExternalHost(kubectl, ni)).
   711  					Should(helpers.CMDSuccess(), "Could not curl ClusterIP %s from external host", svcIP)
   712  			})
   713  		})
   714  	})
   715  
   716  	SkipContextIf(
   717  		func() bool {
   718  			return helpers.RunsWithKubeProxy() || helpers.DoesNotExistNodeWithoutCilium()
   719  		},
   720  		"Checks device reconfiguration",
   721  		func() {
   722  			var (
   723  				demoYAML string
   724  			)
   725  			const (
   726  				ipv4VXLANK8s1    = "192.168.254.1"
   727  				ipv4VXLANOutside = "192.168.254.2"
   728  			)
   729  
   730  			BeforeAll(func() {
   731  				demoYAML = helpers.ManifestGet(kubectl.BasePath(), "demo_ds.yaml")
   732  
   733  				DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
   734  					"devices": "",
   735  				})
   736  
   737  				res := kubectl.ApplyDefault(demoYAML)
   738  				Expect(res).Should(helpers.CMDSuccess(), "Unable to apply %s", demoYAML)
   739  				waitPodsDs(kubectl, []string{testDS})
   740  
   741  				// Setup a pair of vxlan devices between k8s1 and the outside node.
   742  				devOutside, err := kubectl.GetPrivateIface(ni.OutsideNodeName)
   743  				Expect(err).Should(BeNil(), "Cannot get public interface for %s", ni.OutsideNodeName)
   744  
   745  				devK8s1, err := kubectl.GetPrivateIface(helpers.K8s1)
   746  				Expect(err).Should(BeNil(), "Cannot get public interface for %s", helpers.K8s1)
   747  
   748  				res = kubectl.AddVXLAN(ni.OutsideNodeName, ni.K8s1IP, devOutside, ipv4VXLANOutside+"/24", 1)
   749  				Expect(res).Should(helpers.CMDSuccess(), "Error adding VXLAN device for outside node")
   750  
   751  				res = kubectl.AddVXLAN(ni.K8s1NodeName, ni.OutsideIP, devK8s1, ipv4VXLANK8s1+"/24", 1)
   752  				Expect(res).Should(helpers.CMDSuccess(), "Error adding VXLAN device for k8s1")
   753  
   754  			})
   755  
   756  			AfterAll(func() {
   757  				_ = kubectl.Delete(demoYAML)
   758  				ExpectAllPodsTerminated(kubectl)
   759  
   760  				res := kubectl.DelVXLAN(ni.K8s1NodeName, 1)
   761  				Expect(res).Should(helpers.CMDSuccess(), "Error removing vxlan1 from k8s1")
   762  				res = kubectl.DelVXLAN(ni.OutsideNodeName, 1)
   763  				Expect(res).Should(helpers.CMDSuccess(), "Error removing vxlan1 from outside node")
   764  			})
   765  
   766  			It("Detects newly added device and reloads datapath", func() {
   767  				var data v1.Service
   768  				err := kubectl.Get(helpers.DefaultNamespace, "svc test-nodeport").Unmarshal(&data)
   769  				Expect(err).Should(BeNil(), "Cannot retrieve service test-nodeport")
   770  				url := getHTTPLink(ipv4VXLANK8s1, data.Spec.Ports[0].NodePort)
   771  
   772  				// Try accessing the NodePort service from the external node over the VXLAN tunnel.
   773  				// We're expecting Cilium to detect the vxlan1 interface and reload the datapath,
   774  				// allowing us to access NodePort services.
   775  				// Note that this can be quite slow due to datapath recompilation!
   776  				Eventually(
   777  					func() bool {
   778  						res := kubectl.ExecInHostNetNS(
   779  							context.TODO(), ni.OutsideNodeName,
   780  							helpers.CurlFail(url))
   781  						return res.WasSuccessful()
   782  					},
   783  					60*time.Second, 1*time.Second,
   784  				).Should(BeTrue(), "Could not curl NodePort service over newly added device")
   785  			})
   786  		})
   787  })