k8s.io/kubernetes@v1.29.3/test/e2e/network/networking.go (about)

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package network
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"strings"
    24  
    25  	v1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/util/sets"
    28  	utilwait "k8s.io/apimachinery/pkg/util/wait"
    29  	"k8s.io/kubernetes/pkg/cluster/ports"
    30  	"k8s.io/kubernetes/test/e2e/feature"
    31  	"k8s.io/kubernetes/test/e2e/framework"
    32  	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
    33  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    34  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    35  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    36  	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
    37  	"k8s.io/kubernetes/test/e2e/network/common"
    38  	"k8s.io/kubernetes/test/e2e/storage/utils"
    39  	admissionapi "k8s.io/pod-security-admission/api"
    40  
    41  	"github.com/onsi/ginkgo/v2"
    42  )
    43  
    44  // checkConnectivityToHost launches a pod to test connectivity to the specified
    45  // host. An error will be returned if the host is not reachable from the pod.
    46  //
    47  // An empty nodeName will use the schedule to choose where the pod is executed.
    48  func checkConnectivityToHost(ctx context.Context, f *framework.Framework, nodeName, podName, host string, port, timeout int) error {
    49  	command := []string{
    50  		"nc",
    51  		"-vz",
    52  		"-w", strconv.Itoa(timeout),
    53  		host,
    54  		strconv.Itoa(port),
    55  	}
    56  
    57  	pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil)
    58  	pod.Spec.Containers[0].Command = command
    59  	pod.Spec.Containers[0].Args = nil // otherwise 'pause` is magically an argument to nc, which causes all hell to break loose
    60  	nodeSelection := e2epod.NodeSelection{Name: nodeName}
    61  	e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
    62  	pod.Spec.RestartPolicy = v1.RestartPolicyNever
    63  
    64  	podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
    65  	_, err := podClient.Create(ctx, pod, metav1.CreateOptions{})
    66  	if err != nil {
    67  		return err
    68  	}
    69  	err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
    70  
    71  	if err != nil {
    72  		logs, logErr := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
    73  		if logErr != nil {
    74  			framework.Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
    75  		} else {
    76  			framework.Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
    77  		}
    78  	}
    79  
    80  	return err
    81  }
    82  
    83  var _ = common.SIGDescribe("Networking", func() {
    84  	var svcname = "nettest"
    85  	f := framework.NewDefaultFramework(svcname)
    86  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    87  
    88  	f.It("should provide Internet connection for containers", feature.NetworkingIPv4, func(ctx context.Context) {
    89  		ginkgo.By("Running container which tries to connect to 8.8.8.8")
    90  		framework.ExpectNoError(
    91  			checkConnectivityToHost(ctx, f, "", "connectivity-test", "8.8.8.8", 53, 30))
    92  	})
    93  
    94  	f.It("should provide Internet connection for containers", feature.NetworkingIPv6, "[Experimental][LinuxOnly]", func(ctx context.Context) {
    95  		// IPv6 is not supported on Windows.
    96  		e2eskipper.SkipIfNodeOSDistroIs("windows")
    97  		ginkgo.By("Running container which tries to connect to 2001:4860:4860::8888")
    98  		framework.ExpectNoError(
    99  			checkConnectivityToHost(ctx, f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30))
   100  	})
   101  
   102  	f.It("should provider Internet connection for containers using DNS", feature.NetworkingDNS, func(ctx context.Context) {
   103  		ginkgo.By("Running container which tries to connect to google.com")
   104  		framework.ExpectNoError(
   105  			checkConnectivityToHost(ctx, f, "", "connectivity-test", "google.com", 80, 30))
   106  	})
   107  
   108  	// First test because it has no dependencies on variables created later on.
   109  	ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func(ctx context.Context) {
   110  		tests := []struct {
   111  			path string
   112  		}{
   113  			{path: "/healthz"},
   114  			{path: "/api"},
   115  			{path: "/apis"},
   116  			{path: "/metrics"},
   117  			{path: "/openapi/v2"},
   118  			{path: "/version"},
   119  			// TODO: test proxy links here
   120  		}
   121  		if !framework.ProviderIs("gke", "skeleton") {
   122  			tests = append(tests, struct{ path string }{path: "/logs"})
   123  		}
   124  		for _, test := range tests {
   125  			ginkgo.By(fmt.Sprintf("testing: %s", test.path))
   126  			data, err := f.ClientSet.CoreV1().RESTClient().Get().
   127  				AbsPath(test.path).
   128  				DoRaw(ctx)
   129  			if err != nil {
   130  				framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
   131  			}
   132  		}
   133  	})
   134  
   135  	ginkgo.It("should check kube-proxy urls", func(ctx context.Context) {
   136  		// TODO: this is overkill we just need the host networking pod
   137  		// to hit kube-proxy urls.
   138  		config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   139  
   140  		ginkgo.By("checking kube-proxy URLs")
   141  		config.GetSelfURL(ctx, ports.ProxyHealthzPort, "/healthz", "200 OK")
   142  		// Verify /healthz returns the proper content.
   143  		config.GetSelfURL(ctx, ports.ProxyHealthzPort, "/healthz", "lastUpdated")
   144  		// Verify /proxyMode returns http status code 200.
   145  		config.GetSelfURLStatusCode(ctx, ports.ProxyStatusPort, "/proxyMode", "200")
   146  	})
   147  
   148  	ginkgo.Describe("Granular Checks: Services", func() {
   149  
   150  		ginkgo.It("should function for pod-Service: http", func(ctx context.Context) {
   151  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   152  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   153  			err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   154  			if err != nil {
   155  				framework.Failf("failed dialing endpoint, %v", err)
   156  			}
   157  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort))
   158  
   159  			err = config.DialFromTestContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   160  			if err != nil {
   161  				framework.Failf("failed dialing endpoint, %v", err)
   162  			}
   163  		})
   164  
   165  		ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) {
   166  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   167  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   168  			err := config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   169  			if err != nil {
   170  				framework.Failf("failed dialing endpoint, %v", err)
   171  			}
   172  
   173  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort))
   174  			err = config.DialFromTestContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   175  			if err != nil {
   176  				framework.Failf("failed dialing endpoint, %v", err)
   177  			}
   178  		})
   179  
   180  		f.It("should function for pod-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) {
   181  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
   182  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
   183  			err := config.DialFromTestContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   184  			if err != nil {
   185  				framework.Failf("failed dialing endpoint, %v", err)
   186  			}
   187  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeSCTPPort))
   188  			err = config.DialFromTestContainer(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   189  			if err != nil {
   190  				framework.Failf("failed dialing endpoint, %v", err)
   191  			}
   192  		})
   193  
   194  		ginkgo.It("should function for node-Service: http", func(ctx context.Context) {
   195  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   196  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   197  			err := config.DialFromNode(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   198  			if err != nil {
   199  				framework.Failf("failed dialing endpoint, %v", err)
   200  			}
   201  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
   202  			err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   203  			if err != nil {
   204  				framework.Failf("failed dialing endpoint, %v", err)
   205  			}
   206  		})
   207  
   208  		ginkgo.It("should function for node-Service: udp", func(ctx context.Context) {
   209  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   210  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort))
   211  			err := config.DialFromNode(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   212  			if err != nil {
   213  				framework.Failf("failed dialing endpoint, %v", err)
   214  			}
   215  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
   216  			err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   217  			if err != nil {
   218  				framework.Failf("failed dialing endpoint, %v", err)
   219  			}
   220  		})
   221  
   222  		f.It("should function for node-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) {
   223  			ginkgo.Skip("Skipping SCTP node to service test until DialFromNode supports SCTP #96482")
   224  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
   225  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterSCTPPort))
   226  			err := config.DialFromNode(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   227  			if err != nil {
   228  				framework.Failf("failed dialing endpoint, %v", err)
   229  			}
   230  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeSCTPPort))
   231  			err = config.DialFromNode(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   232  			if err != nil {
   233  				framework.Failf("failed dialing endpoint, %v", err)
   234  			}
   235  		})
   236  
   237  		ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) {
   238  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   239  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   240  			err := config.DialFromEndpointContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   241  			if err != nil {
   242  				framework.Failf("failed dialing endpoint, %v", err)
   243  			}
   244  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort))
   245  			err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   246  			if err != nil {
   247  				framework.Failf("failed dialing endpoint, %v", err)
   248  			}
   249  		})
   250  
   251  		ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) {
   252  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   253  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   254  			err := config.DialFromEndpointContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   255  			if err != nil {
   256  				framework.Failf("failed dialing endpoint, %v", err)
   257  			}
   258  
   259  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort))
   260  			err = config.DialFromEndpointContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   261  			if err != nil {
   262  				framework.Failf("failed dialing endpoint, %v", err)
   263  			}
   264  		})
   265  
   266  		f.It("should function for endpoint-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) {
   267  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP)
   268  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
   269  			err := config.DialFromEndpointContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   270  			if err != nil {
   271  				framework.Failf("failed dialing endpoint, %v", err)
   272  			}
   273  
   274  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeSCTPPort))
   275  			err = config.DialFromEndpointContainer(ctx, "sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   276  			if err != nil {
   277  				framework.Failf("failed dialing endpoint, %v", err)
   278  			}
   279  		})
   280  
   281  		// This test ensures that in a situation where multiple services exist with the same selector,
   282  		// deleting one of the services does not affect the connectivity of the remaining service
   283  		ginkgo.It("should function for multiple endpoint-Services with same selector", func(ctx context.Context) {
   284  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   285  			ginkgo.By("creating a second service with same selector")
   286  			svc2, httpPort := createSecondNodePortService(ctx, f, config)
   287  
   288  			// original service should work
   289  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   290  			err := config.DialFromEndpointContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   291  			if err != nil {
   292  				framework.Failf("failed dialing endpoint, %v", err)
   293  			}
   294  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort))
   295  			err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   296  			if err != nil {
   297  				framework.Failf("failed dialing endpoint, %v", err)
   298  			}
   299  			// Dial second service
   300  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (svc2.clusterIP)", config.EndpointPods[0].Name, svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
   301  			err = config.DialFromEndpointContainer(ctx, "http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   302  			if err != nil {
   303  				framework.Failf("failed dialing endpoint, %v", err)
   304  			}
   305  
   306  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, httpPort))
   307  			err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames())
   308  			if err != nil {
   309  				framework.Failf("failed dialing endpoint, %v", err)
   310  			}
   311  
   312  			ginkgo.By("deleting the original node port service")
   313  			config.DeleteNodePortService(ctx)
   314  
   315  			// Second service should continue to function unaffected
   316  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (svc2.clusterIP)", config.EndpointPods[0].Name, svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
   317  			err = config.DialFromEndpointContainer(ctx, "http", svc2.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   318  			if err != nil {
   319  				framework.Failf("failed dialing endpoint, %v", err)
   320  			}
   321  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, httpPort))
   322  			err = config.DialFromEndpointContainer(ctx, "http", config.NodeIP, httpPort, config.MaxTries, 0, config.EndpointHostnames())
   323  			if err != nil {
   324  				framework.Failf("failed dialing endpoint, %v", err)
   325  			}
   326  		})
   327  
   328  		ginkgo.It("should update endpoints: http", func(ctx context.Context) {
   329  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   330  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   331  			err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   332  			if err != nil {
   333  				framework.Failf("failed dialing endpoint (initial), %v", err)
   334  			}
   335  			ginkgo.By("Deleting a pod which, will be replaced with a new endpoint")
   336  			config.DeleteNetProxyPod(ctx)
   337  
   338  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP) (endpoint recovery)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   339  			err = config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
   340  			if err != nil {
   341  				framework.Failf("failed dialing endpoint (recovery), %v", err)
   342  			}
   343  		})
   344  
   345  		ginkgo.It("should update endpoints: udp", func(ctx context.Context) {
   346  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   347  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   348  			err := config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   349  			if err != nil {
   350  				framework.Failf("failed dialing endpoint (initial), %v", err)
   351  			}
   352  			ginkgo.By("Deleting a pod which, will be replaced with a new endpoint")
   353  			config.DeleteNetProxyPod(ctx)
   354  
   355  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP) (endpoint recovery)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   356  			err = config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
   357  			if err != nil {
   358  				framework.Failf("failed dialing endpoint (recovery), %v", err)
   359  			}
   360  		})
   361  
   362  		// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
   363  		f.It("should update nodePort: http", f.WithSlow(), func(ctx context.Context) {
   364  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   365  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (ctx, nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
   366  			err := config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   367  			if err != nil {
   368  				framework.Failf("Error dialing http from node: %v", err)
   369  			}
   370  			ginkgo.By("Deleting the node port access point")
   371  			config.DeleteNodePortService(ctx)
   372  
   373  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP) and getting ZERO host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
   374  			// #106770 MaxTries can be very large on large clusters, with the risk that a new NodePort is created by another test and start to answer traffic.
   375  			// Since we only want to assert that traffic is not being forwarded anymore and the retry timeout is 2 seconds, consider the test is correct
   376  			// if the service doesn't answer after 10 tries.
   377  			err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, 10, 10, sets.NewString())
   378  			if err != nil {
   379  				framework.Failf("Failure validating that node port service STOPPED removed properly: %v", err)
   380  			}
   381  		})
   382  
   383  		// quick validation of udp, next test confirms that this services update as well after endpoints are removed, but is slower.
   384  		ginkgo.It("should support basic nodePort: udp functionality", func(ctx context.Context) {
   385  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   386  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort))
   387  			err := config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   388  			if err != nil {
   389  				framework.Failf("Failure validating that nodePort service WAS forwarding properly: %v", err)
   390  			}
   391  		})
   392  
   393  		// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
   394  		f.It("should update nodePort: udp", f.WithSlow(), func(ctx context.Context) {
   395  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork)
   396  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort))
   397  			err := config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   398  			if err != nil {
   399  				framework.Failf("Failure validating that nodePort service WAS forwarding properly: %v", err)
   400  			}
   401  
   402  			ginkgo.By("Deleting the node port access point")
   403  			config.DeleteNodePortService(ctx)
   404  
   405  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ZERO host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort))
   406  			// #106770 MaxTries can be very large on large clusters, with the risk that a new NodePort is created by another test and start to answer traffic.
   407  			// Since we only want to assert that traffic is not being forwarded anymore and the retry timeout is 2 seconds, consider the test is correct
   408  			// if the service doesn't answer after 10 tries.
   409  			err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, 10, 10, sets.NewString())
   410  			if err != nil {
   411  				framework.Failf("Failure validating that node port service STOPPED removed properly: %v", err)
   412  			}
   413  		})
   414  
   415  		// [LinuxOnly]: Windows does not support session affinity.
   416  		ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) {
   417  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   418  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
   419  
   420  			// Check if number of endpoints returned are exactly one.
   421  			eps, err := config.GetEndpointsFromTestContainer(ctx, "http", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks)
   422  			if err != nil {
   423  				framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
   424  			}
   425  			if len(eps) == 0 {
   426  				framework.Failf("Unexpected no endpoints return")
   427  			}
   428  			if len(eps) > 1 {
   429  				framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
   430  			}
   431  		})
   432  
   433  		// [LinuxOnly]: Windows does not support session affinity.
   434  		ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) {
   435  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   436  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort))
   437  
   438  			// Check if number of endpoints returned are exactly one.
   439  			eps, err := config.GetEndpointsFromTestContainer(ctx, "udp", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks)
   440  			if err != nil {
   441  				framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
   442  			}
   443  			if len(eps) == 0 {
   444  				framework.Failf("Unexpected no endpoints return")
   445  			}
   446  			if len(eps) > 1 {
   447  				framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
   448  			}
   449  		})
   450  
   451  		ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) {
   452  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   453  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   454  			message := strings.Repeat("42", 1000)
   455  			err := config.DialEchoFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
   456  			if err != nil {
   457  				framework.Failf("failed dialing endpoint, %v", err)
   458  			}
   459  		})
   460  
   461  		ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) {
   462  			config := e2enetwork.NewNetworkingTestConfig(ctx, f)
   463  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   464  			message := "n" + strings.Repeat("o", 1999)
   465  			err := config.DialEchoFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
   466  			if err != nil {
   467  				framework.Failf("failed dialing endpoint, %v", err)
   468  			}
   469  		})
   470  
   471  		// if the endpoints pods use hostNetwork, several tests can't run in parallel
   472  		// because the pods will try to acquire the same port in the host.
   473  		// We run the test in serial, to avoid port conflicts.
   474  		ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) {
   475  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork)
   476  
   477  			ginkgo.By("pod-Service(hostNetwork): http")
   478  
   479  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   480  			err := config.DialFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   481  			if err != nil {
   482  				framework.Failf("failed dialing endpoint, %v", err)
   483  			}
   484  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort))
   485  			err = config.DialFromTestContainer(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   486  			if err != nil {
   487  				framework.Failf("failed dialing endpoint, %v", err)
   488  			}
   489  
   490  			ginkgo.By("pod-Service(hostNetwork): udp")
   491  
   492  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   493  			err = config.DialFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   494  			if err != nil {
   495  				framework.Failf("failed dialing endpoint, %v", err)
   496  			}
   497  
   498  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort))
   499  			err = config.DialFromTestContainer(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   500  			if err != nil {
   501  				framework.Failf("failed dialing endpoint, %v", err)
   502  			}
   503  
   504  			ginkgo.By("node-Service(hostNetwork): http")
   505  
   506  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   507  			err = config.DialFromNode(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   508  			if err != nil {
   509  				framework.Failf("failed dialing endpoint, %v", err)
   510  			}
   511  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
   512  			err = config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   513  			if err != nil {
   514  				framework.Failf("failed dialing endpoint, %v", err)
   515  			}
   516  			ginkgo.By("node-Service(hostNetwork): udp")
   517  
   518  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort))
   519  			err = config.DialFromNode(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   520  			if err != nil {
   521  				framework.Failf("failed dialing endpoint, %v", err)
   522  			}
   523  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
   524  			err = config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   525  			if err != nil {
   526  				framework.Failf("failed dialing endpoint, %v", err)
   527  			}
   528  
   529  			ginkgo.By("handle large requests: http(hostNetwork)")
   530  
   531  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
   532  			message := strings.Repeat("42", 1000)
   533  			err = config.DialEchoFromTestContainer(ctx, "http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
   534  			if err != nil {
   535  				framework.Failf("failed dialing endpoint, %v", err)
   536  			}
   537  
   538  			ginkgo.By("handle large requests: udp(hostNetwork)")
   539  
   540  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
   541  			message = "n" + strings.Repeat("o", 1999)
   542  			err = config.DialEchoFromTestContainer(ctx, "udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
   543  			if err != nil {
   544  				framework.Failf("failed dialing endpoint, %v", err)
   545  			}
   546  
   547  		})
   548  
   549  	})
   550  
   551  	f.It("should recreate its iptables rules if they are deleted", f.WithDisruptive(), func(ctx context.Context) {
   552  		e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
   553  		e2eskipper.SkipUnlessSSHKeyPresent()
   554  
   555  		hosts, err := e2essh.NodeSSHHosts(ctx, f.ClientSet)
   556  		framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
   557  		if len(hosts) == 0 {
   558  			framework.Failf("No ssh-able nodes")
   559  		}
   560  		host := hosts[0]
   561  
   562  		ns := f.Namespace.Name
   563  		numPods, servicePort := 3, defaultServeHostnameServicePort
   564  		svc := "iptables-flush-test"
   565  
   566  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc)
   567  		podNames, svcIP, err := StartServeHostnameService(ctx, f.ClientSet, getServeHostnameService(svc), ns, numPods)
   568  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns)
   569  
   570  		// Ideally we want to reload the system firewall, but we don't necessarily
   571  		// know how to do that on this system ("firewall-cmd --reload"? "systemctl
   572  		// restart iptables"?). So instead we just manually delete all "KUBE-"
   573  		// chains.
   574  
   575  		ginkgo.By("dumping iptables rules on node " + host)
   576  		result, err := e2essh.SSH(ctx, "sudo iptables-save", host, framework.TestContext.Provider)
   577  		e2essh.LogResult(result)
   578  		if err != nil || result.Code != 0 {
   579  			framework.Failf("couldn't dump iptable rules: %v", err)
   580  		}
   581  
   582  		// All the commands that delete rules have to come before all the commands
   583  		// that delete chains, since the chains can't be deleted while there are
   584  		// still rules referencing them.
   585  		var deleteRuleCmds, deleteChainCmds []string
   586  		table := ""
   587  		for _, line := range strings.Split(result.Stdout, "\n") {
   588  			if strings.HasPrefix(line, "*") {
   589  				table = line[1:]
   590  			} else if table == "" {
   591  				continue
   592  			}
   593  
   594  			// Delete jumps from non-KUBE chains to KUBE chains
   595  			if !strings.HasPrefix(line, "-A KUBE-") && strings.Contains(line, "-j KUBE-") {
   596  				deleteRuleCmds = append(deleteRuleCmds, fmt.Sprintf("sudo iptables -t %s -D %s || true", table, line[3:]))
   597  			}
   598  			// Flush and delete all KUBE chains
   599  			if strings.HasPrefix(line, ":KUBE-") {
   600  				chain := strings.Split(line, " ")[0][1:]
   601  				deleteRuleCmds = append(deleteRuleCmds, fmt.Sprintf("sudo iptables -t %s -F %s || true", table, chain))
   602  				deleteChainCmds = append(deleteChainCmds, fmt.Sprintf("sudo iptables -t %s -X %s || true", table, chain))
   603  			}
   604  		}
   605  		cmd := strings.Join(append(deleteRuleCmds, deleteChainCmds...), "\n")
   606  
   607  		ginkgo.By("deleting all KUBE-* iptables chains")
   608  		result, err = e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider)
   609  		if err != nil || result.Code != 0 {
   610  			e2essh.LogResult(result)
   611  			framework.Failf("couldn't delete iptable rules: %v", err)
   612  		}
   613  
   614  		ginkgo.By("verifying that kube-proxy rules are eventually recreated")
   615  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, f.ClientSet, ns, podNames, svcIP, servicePort))
   616  
   617  		ginkgo.By("verifying that kubelet rules are eventually recreated")
   618  		err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
   619  			result, err = e2essh.SSH(ctx, "sudo iptables-save -t mangle", host, framework.TestContext.Provider)
   620  			if err != nil || result.Code != 0 {
   621  				e2essh.LogResult(result)
   622  				return false, err
   623  			}
   624  
   625  			if strings.Contains(result.Stdout, "\n:KUBE-IPTABLES-HINT") {
   626  				return true, nil
   627  			}
   628  			return false, nil
   629  		})
   630  		if err != nil {
   631  			e2essh.LogResult(result)
   632  		}
   633  		framework.ExpectNoError(err, "kubelet did not recreate its iptables rules")
   634  	})
   635  
   636  	// This is [Serial] because it can't run at the same time as the
   637  	// [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded.
   638  	f.It("should allow creating a Pod with an SCTP HostPort [LinuxOnly]", f.WithSerial(), func(ctx context.Context) {
   639  		node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
   640  		framework.ExpectNoError(err)
   641  		hostExec := utils.NewHostExec(f)
   642  		ginkgo.DeferCleanup(hostExec.Cleanup)
   643  
   644  		ginkgo.By("getting the state of the sctp module on the selected node")
   645  		nodes := &v1.NodeList{}
   646  		nodes.Items = append(nodes.Items, *node)
   647  		sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes)
   648  
   649  		ginkgo.By("creating a pod with hostport on the selected node")
   650  		podName := "hostport"
   651  		ports := []v1.ContainerPort{{Protocol: v1.ProtocolSCTP, ContainerPort: 5060, HostPort: 5060}}
   652  		podSpec := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, ports)
   653  		nodeSelection := e2epod.NodeSelection{Name: node.Name}
   654  		e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection)
   655  
   656  		ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
   657  		e2epod.NewPodClient(f).CreateSync(ctx, podSpec)
   658  		ginkgo.DeferCleanup(func(ctx context.Context) {
   659  			err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{})
   660  			framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)
   661  		})
   662  		ginkgo.By("validating sctp module is still not loaded")
   663  		sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes)
   664  		if !sctpLoadedAtStart && sctpLoadedAtEnd {
   665  			framework.Failf("The state of the sctp module has changed due to the test case")
   666  		}
   667  	})
   668  })