k8s.io/kubernetes@v1.29.3/test/e2e/network/conntrack.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package network
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  	"time"
    24  
    25  	v1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/util/intstr"
    28  	"k8s.io/apimachinery/pkg/util/wait"
    29  	clientset "k8s.io/client-go/kubernetes"
    30  	"k8s.io/kubernetes/test/e2e/framework"
    31  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    32  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    33  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    34  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    35  	"k8s.io/kubernetes/test/e2e/network/common"
    36  	imageutils "k8s.io/kubernetes/test/utils/image"
    37  	admissionapi "k8s.io/pod-security-admission/api"
    38  
    39  	"github.com/onsi/ginkgo/v2"
    40  	"github.com/onsi/gomega"
    41  )
    42  
    43  const (
    44  	serviceName = "svc-udp"
    45  	podClient   = "pod-client"
    46  	podBackend1 = "pod-server-1"
    47  	podBackend2 = "pod-server-2"
    48  	srcPort     = 12345
    49  )
    50  
    51  // Linux NAT uses conntrack to perform NAT, everytime a new
    52  // flow is seen, a connection is created in the conntrack table, and it
    53  // is being used by the NAT module.
    54  // Each entry in the conntrack table has associated a timeout, that removes
    55  // the connection once it expires.
    56  // UDP is a connectionless protocol, so the conntrack module tracking functions
    57  // are not very advanced.
    58  // It uses a short timeout (30 sec by default) that is renewed if there are new flows
    59  // matching the connection. Otherwise it expires the entry.
    60  // This behaviour can cause issues in Kubernetes when one entry on the conntrack table
    61  // is never expired because the sender does not stop sending traffic, but the pods or
    62  // endpoints were deleted, blackholing the traffic
    63  // In order to mitigate this problem, Kubernetes delete the stale entries:
    64  // - when an endpoint is removed
    65  // - when a service goes from no endpoints to new endpoint
    66  
    67  // Ref: https://api.semanticscholar.org/CorpusID:198903401
    68  // Boye, Magnus. "Netfilter Connection Tracking and NAT Implementation." (2012).
    69  
    70  var _ = common.SIGDescribe("Conntrack", func() {
    71  
    72  	fr := framework.NewDefaultFramework("conntrack")
    73  	fr.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    74  
    75  	type nodeInfo struct {
    76  		name   string
    77  		nodeIP string
    78  	}
    79  
    80  	var (
    81  		cs                             clientset.Interface
    82  		ns                             string
    83  		clientNodeInfo, serverNodeInfo nodeInfo
    84  	)
    85  
    86  	logContainsFn := func(text, podName string) wait.ConditionWithContextFunc {
    87  		return func(ctx context.Context) (bool, error) {
    88  			logs, err := e2epod.GetPodLogs(ctx, cs, ns, podName, podName)
    89  			if err != nil {
    90  				// Retry the error next time.
    91  				return false, nil
    92  			}
    93  			if !strings.Contains(string(logs), text) {
    94  				return false, nil
    95  			}
    96  			return true, nil
    97  		}
    98  	}
    99  
   100  	ginkgo.BeforeEach(func(ctx context.Context) {
   101  		cs = fr.ClientSet
   102  		ns = fr.Namespace.Name
   103  
   104  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
   105  		framework.ExpectNoError(err)
   106  		if len(nodes.Items) < 2 {
   107  			e2eskipper.Skipf(
   108  				"Test requires >= 2 Ready nodes, but there are only %v nodes",
   109  				len(nodes.Items))
   110  		}
   111  
   112  		family := v1.IPv4Protocol
   113  		if framework.TestContext.ClusterIsIPv6() {
   114  			family = v1.IPv6Protocol
   115  		}
   116  
   117  		ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family)
   118  		gomega.Expect(ips).ToNot(gomega.BeEmpty())
   119  
   120  		clientNodeInfo = nodeInfo{
   121  			name:   nodes.Items[0].Name,
   122  			nodeIP: ips[0],
   123  		}
   124  
   125  		ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family)
   126  		gomega.Expect(ips).ToNot(gomega.BeEmpty())
   127  
   128  		serverNodeInfo = nodeInfo{
   129  			name:   nodes.Items[1].Name,
   130  			nodeIP: ips[0],
   131  		}
   132  	})
   133  
   134  	ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a NodePort service", func(ctx context.Context) {
   135  
   136  		// Create a NodePort service
   137  		udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
   138  		ginkgo.By("creating a UDP service " + serviceName + " with type=NodePort in " + ns)
   139  		udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
   140  			svc.Spec.Type = v1.ServiceTypeNodePort
   141  			svc.Spec.Ports = []v1.ServicePort{
   142  				{Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
   143  			}
   144  		})
   145  		framework.ExpectNoError(err)
   146  
   147  		// Create a pod in one node to create the UDP traffic against the NodePort service every 5 seconds
   148  		ginkgo.By("creating a client pod for probing the service " + serviceName)
   149  		clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
   150  		nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
   151  		e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
   152  		cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
   153  		clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
   154  		clientPod.Spec.Containers[0].Name = podClient
   155  		e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
   156  
   157  		// Read the client pod logs
   158  		logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   159  		framework.ExpectNoError(err)
   160  		framework.Logf("Pod client logs: %s", logs)
   161  
   162  		// Add a backend pod to the service in the other node
   163  		ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
   164  		serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   165  		serverPod1.Labels = udpJig.Labels
   166  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   167  		e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
   168  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
   169  
   170  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
   171  
   172  		// Note that the fact that Endpoints object already exists, does NOT mean
   173  		// that iptables (or whatever else is used) was already programmed.
   174  		// Additionally take into account that UDP conntract entries timeout is
   175  		// 30 seconds by default.
   176  		// Based on the above check if the pod receives the traffic.
   177  		ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
   178  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
   179  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   180  			framework.ExpectNoError(err)
   181  			framework.Logf("Pod client logs: %s", logs)
   182  			framework.Failf("Failed to connect to backend 1")
   183  		}
   184  
   185  		// Create a second pod
   186  		ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
   187  		serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   188  		serverPod2.Labels = udpJig.Labels
   189  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   190  		e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
   191  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
   192  
   193  		// and delete the first pod
   194  		framework.Logf("Cleaning up %s pod", podBackend1)
   195  		e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   196  
   197  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
   198  
   199  		// Check that the second pod keeps receiving traffic
   200  		// UDP conntrack entries timeout is 30 sec by default
   201  		ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
   202  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
   203  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   204  			framework.ExpectNoError(err)
   205  			framework.Logf("Pod client logs: %s", logs)
   206  			framework.Failf("Failed to connect to backend 2")
   207  		}
   208  	})
   209  
   210  	ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service", func(ctx context.Context) {
   211  
   212  		// Create a ClusterIP service
   213  		udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
   214  		ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
   215  		udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
   216  			svc.Spec.Type = v1.ServiceTypeClusterIP
   217  			svc.Spec.Ports = []v1.ServicePort{
   218  				{Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
   219  			}
   220  		})
   221  		framework.ExpectNoError(err)
   222  
   223  		// Create a pod in one node to create the UDP traffic against the ClusterIP service every 5 seconds
   224  		ginkgo.By("creating a client pod for probing the service " + serviceName)
   225  		clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
   226  		nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
   227  		e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
   228  		cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
   229  		clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
   230  		clientPod.Spec.Containers[0].Name = podClient
   231  		e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
   232  
   233  		// Read the client pod logs
   234  		logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   235  		framework.ExpectNoError(err)
   236  		framework.Logf("Pod client logs: %s", logs)
   237  
   238  		// Add a backend pod to the service in the other node
   239  		ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
   240  		serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   241  		serverPod1.Labels = udpJig.Labels
   242  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   243  		e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
   244  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
   245  
   246  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
   247  
   248  		// Note that the fact that Endpoints object already exists, does NOT mean
   249  		// that iptables (or whatever else is used) was already programmed.
   250  		// Additionally take into account that UDP conntract entries timeout is
   251  		// 30 seconds by default.
   252  		// Based on the above check if the pod receives the traffic.
   253  		ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
   254  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
   255  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   256  			framework.ExpectNoError(err)
   257  			framework.Logf("Pod client logs: %s", logs)
   258  			framework.Failf("Failed to connect to backend 1")
   259  		}
   260  
   261  		// Create a second pod
   262  		ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
   263  		serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   264  		serverPod2.Labels = udpJig.Labels
   265  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   266  		e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
   267  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
   268  
   269  		// and delete the first pod
   270  		framework.Logf("Cleaning up %s pod", podBackend1)
   271  		e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   272  
   273  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
   274  
   275  		// Check that the second pod keeps receiving traffic
   276  		// UDP conntrack entries timeout is 30 sec by default
   277  		ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
   278  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
   279  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   280  			framework.ExpectNoError(err)
   281  			framework.Logf("Pod client logs: %s", logs)
   282  			framework.Failf("Failed to connect to backend 2")
   283  		}
   284  	})
   285  
   286  	ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork", func(ctx context.Context) {
   287  
   288  		// Create a ClusterIP service
   289  		udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
   290  		ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
   291  		udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
   292  			svc.Spec.Type = v1.ServiceTypeClusterIP
   293  			svc.Spec.Ports = []v1.ServicePort{
   294  				{Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
   295  			}
   296  		})
   297  		framework.ExpectNoError(err)
   298  
   299  		// Create a pod in one node to create the UDP traffic against the ClusterIP service every 5 seconds
   300  		ginkgo.By("creating a client pod for probing the service " + serviceName)
   301  		clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
   302  		nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
   303  		e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
   304  		cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
   305  		clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
   306  		clientPod.Spec.Containers[0].Name = podClient
   307  		clientPod.Spec.HostNetwork = true
   308  		e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
   309  
   310  		// Read the client pod logs
   311  		logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   312  		framework.ExpectNoError(err)
   313  		framework.Logf("Pod client logs: %s", logs)
   314  
   315  		// Add a backend pod to the service in the other node
   316  		ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
   317  		serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   318  		serverPod1.Labels = udpJig.Labels
   319  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   320  		e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
   321  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
   322  
   323  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
   324  
   325  		// Note that the fact that Endpoints object already exists, does NOT mean
   326  		// that iptables (or whatever else is used) was already programmed.
   327  		// Additionally take into account that UDP conntract entries timeout is
   328  		// 30 seconds by default.
   329  		// Based on the above check if the pod receives the traffic.
   330  		ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
   331  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
   332  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   333  			framework.ExpectNoError(err)
   334  			framework.Logf("Pod client logs: %s", logs)
   335  			framework.Failf("Failed to connect to backend 1")
   336  		}
   337  
   338  		// Create a second pod
   339  		ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
   340  		serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   341  		serverPod2.Labels = udpJig.Labels
   342  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   343  		e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
   344  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
   345  
   346  		// and delete the first pod
   347  		framework.Logf("Cleaning up %s pod", podBackend1)
   348  		e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   349  
   350  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
   351  
   352  		// Check that the second pod keeps receiving traffic
   353  		// UDP conntrack entries timeout is 30 sec by default
   354  		ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
   355  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
   356  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   357  			framework.ExpectNoError(err)
   358  			framework.Logf("Pod client logs: %s", logs)
   359  			framework.Failf("Failed to connect to backend 2")
   360  		}
   361  	})
   362  
   363  	// Regression test for #105657
   364  	// 1. Create an UDP Service
   365  	// 2. Client Pod sending traffic to the UDP service
   366  	// 3. Create an UDP server associated to the Service created in 1. with an init container that sleeps for some time
   367  	// The init container makes that the server pod is not ready, however, the endpoint slices are created, it is just
   368  	// that the Endpoint conditions Ready is false.
   369  	// If the kube-proxy conntrack logic doesn't check readiness, it will delete the conntrack entries for the UDP server
   370  	// when the endpoint slice has been created, however, the iptables rules will not installed until at least one
   371  	// endpoint is ready. If some traffic arrives to since kube-proxy clear the entries (see the endpoint slice) and
   372  	// installs the corresponding iptables rules (the endpoint is ready), a conntrack entry will be generated blackholing
   373  	// subsequent traffic.
   374  	ginkgo.It("should be able to preserve UDP traffic when initial unready endpoints get ready", func(ctx context.Context) {
   375  
   376  		// Create a ClusterIP service
   377  		udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
   378  		ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
   379  		udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
   380  			svc.Spec.Type = v1.ServiceTypeClusterIP
   381  			svc.Spec.Ports = []v1.ServicePort{
   382  				{Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
   383  			}
   384  		})
   385  		framework.ExpectNoError(err)
   386  
   387  		// Create a pod in one node to create the UDP traffic against the ClusterIP service every 5 seconds
   388  		ginkgo.By("creating a client pod for probing the service " + serviceName)
   389  		clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
   390  		nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
   391  		e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
   392  		cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
   393  		clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
   394  		clientPod.Spec.Containers[0].Name = podClient
   395  		e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
   396  
   397  		// Read the client pod logs
   398  		logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   399  		framework.ExpectNoError(err)
   400  		framework.Logf("Pod client logs: %s", logs)
   401  
   402  		// Add a backend pod to the service in the other node
   403  		ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
   404  		serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
   405  		serverPod1.Labels = udpJig.Labels
   406  		nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
   407  		// Add an init container to hold the pod to be ready for 15 seconds
   408  		serverPod1.Spec.InitContainers = []v1.Container{
   409  			{
   410  				Name:    "init",
   411  				Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   412  				Command: []string{"/bin/sh", "-c", "echo Pausing start. && sleep 15"},
   413  			},
   414  		}
   415  		e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
   416  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
   417  
   418  		// wait until the endpoints are ready
   419  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
   420  
   421  		// Note that the fact that Endpoints object already exists, does NOT mean
   422  		// that iptables (or whatever else is used) was already programmed.
   423  		// Additionally take into account that UDP conntract entries timeout is
   424  		// 30 seconds by default.
   425  		// Based on the above check if the pod receives the traffic.
   426  		ginkgo.By("checking client pod connected to the backend on Node IP " + serverNodeInfo.nodeIP)
   427  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
   428  			logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
   429  			framework.ExpectNoError(err)
   430  			framework.Logf("Pod client logs: %s", logs)
   431  			framework.Failf("Failed to connect to backend pod")
   432  		}
   433  
   434  	})
   435  
   436  	// Regression test for #74839, where:
   437  	// Packets considered INVALID by conntrack are now dropped. In particular, this fixes
   438  	// a problem where spurious retransmits in a long-running TCP connection to a service
   439  	// IP could result in the connection being closed with the error "Connection reset by
   440  	// peer"
   441  	// xref: https://kubernetes.io/blog/2019/03/29/kube-proxy-subtleties-debugging-an-intermittent-connection-reset/
   442  	ginkgo.It("should drop INVALID conntrack entries [Privileged]", func(ctx context.Context) {
   443  		serverLabel := map[string]string{
   444  			"app": "boom-server",
   445  		}
   446  
   447  		serverPod := &v1.Pod{
   448  			ObjectMeta: metav1.ObjectMeta{
   449  				Name:   "boom-server",
   450  				Labels: serverLabel,
   451  			},
   452  			Spec: v1.PodSpec{
   453  				Containers: []v1.Container{
   454  					{
   455  						Name:  "boom-server",
   456  						Image: imageutils.GetE2EImage(imageutils.RegressionIssue74839),
   457  						Ports: []v1.ContainerPort{
   458  							{
   459  								ContainerPort: 9000, // Default port exposed by boom-server
   460  							},
   461  						},
   462  						Env: []v1.EnvVar{
   463  							{
   464  								Name: "POD_IP",
   465  								ValueFrom: &v1.EnvVarSource{
   466  									FieldRef: &v1.ObjectFieldSelector{
   467  										APIVersion: "v1",
   468  										FieldPath:  "status.podIP",
   469  									},
   470  								},
   471  							},
   472  							{
   473  								Name: "POD_IPS",
   474  								ValueFrom: &v1.EnvVarSource{
   475  									FieldRef: &v1.ObjectFieldSelector{
   476  										APIVersion: "v1",
   477  										FieldPath:  "status.podIPs",
   478  									},
   479  								},
   480  							},
   481  						},
   482  						SecurityContext: &v1.SecurityContext{
   483  							Capabilities: &v1.Capabilities{
   484  								Add: []v1.Capability{"NET_RAW"},
   485  							},
   486  						},
   487  					},
   488  				},
   489  			},
   490  		}
   491  		nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name}
   492  		e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection)
   493  		e2epod.NewPodClient(fr).CreateSync(ctx, serverPod)
   494  		ginkgo.By("Server pod created on node " + serverNodeInfo.name)
   495  
   496  		svc := &v1.Service{
   497  			ObjectMeta: metav1.ObjectMeta{
   498  				Name: "boom-server",
   499  			},
   500  			Spec: v1.ServiceSpec{
   501  				Selector: serverLabel,
   502  				Ports: []v1.ServicePort{
   503  					{
   504  						Protocol: v1.ProtocolTCP,
   505  						Port:     9000,
   506  					},
   507  				},
   508  			},
   509  		}
   510  		_, err := fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{})
   511  		framework.ExpectNoError(err)
   512  
   513  		ginkgo.By("Server service created")
   514  
   515  		pod := &v1.Pod{
   516  			ObjectMeta: metav1.ObjectMeta{
   517  				Name: "startup-script",
   518  			},
   519  			Spec: v1.PodSpec{
   520  				Containers: []v1.Container{
   521  					{
   522  						Name:  "startup-script",
   523  						Image: imageutils.GetE2EImage(imageutils.BusyBox),
   524  						Command: []string{
   525  							"sh", "-c", "while true; do sleep 2; nc boom-server 9000& done",
   526  						},
   527  					},
   528  				},
   529  				RestartPolicy: v1.RestartPolicyNever,
   530  			},
   531  		}
   532  		nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name}
   533  		e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
   534  
   535  		e2epod.NewPodClient(fr).CreateSync(ctx, pod)
   536  		ginkgo.By("Client pod created")
   537  
   538  		// The client will open connections against the server
   539  		// The server will inject invalid packets
   540  		// if conntrack does not drop the invalid packets it will go through without NAT
   541  		// so the client will receive an unexpected TCP connection and RST the connection
   542  		// the server will log ERROR if that happens
   543  		ginkgo.By("checking client pod does not RST the TCP connection because it receives an INVALID packet")
   544  		if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn("ERROR", "boom-server")); err == nil {
   545  			logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server")
   546  			framework.ExpectNoError(err)
   547  			framework.Logf("boom-server pod logs: %s", logs)
   548  			framework.Failf("boom-server pod received a RST from the client")
   549  		}
   550  
   551  		logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server")
   552  		framework.ExpectNoError(err)
   553  		if !strings.Contains(string(logs), "connection established") {
   554  			framework.Logf("boom-server pod logs: %s", logs)
   555  			framework.Failf("boom-server pod did not send any bad packet to the client")
   556  		}
   557  		framework.Logf("boom-server pod logs: %s", logs)
   558  		framework.Logf("boom-server OK: did not receive any RST packet")
   559  	})
   560  })