k8s.io/kubernetes@v1.29.3/test/e2e/network/dual_stack.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package network
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"net"
    23  	"strings"
    24  	"time"
    25  
    26  	"github.com/onsi/ginkgo/v2"
    27  	"github.com/onsi/gomega"
    28  
    29  	appsv1 "k8s.io/api/apps/v1"
    30  	v1 "k8s.io/api/core/v1"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	clientset "k8s.io/client-go/kubernetes"
    34  	"k8s.io/kubernetes/test/e2e/feature"
    35  	"k8s.io/kubernetes/test/e2e/framework"
    36  	e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
    37  	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
    38  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    39  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    40  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    41  	"k8s.io/kubernetes/test/e2e/network/common"
    42  	imageutils "k8s.io/kubernetes/test/utils/image"
    43  	admissionapi "k8s.io/pod-security-admission/api"
    44  	netutils "k8s.io/utils/net"
    45  )
    46  
    47  // Tests for ipv4-ipv6 dual-stack feature
    48  var _ = common.SIGDescribe(feature.IPv6DualStack, func() {
    49  	f := framework.NewDefaultFramework("dualstack")
    50  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    51  
    52  	var cs clientset.Interface
    53  	var podClient *e2epod.PodClient
    54  
    55  	ginkgo.BeforeEach(func() {
    56  		cs = f.ClientSet
    57  		podClient = e2epod.NewPodClient(f)
    58  	})
    59  
    60  	ginkgo.It("should have ipv4 and ipv6 internal node ip", func(ctx context.Context) {
    61  		// TODO (aramase) can switch to new function to get all nodes
    62  		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, cs)
    63  		framework.ExpectNoError(err)
    64  
    65  		for _, node := range nodeList.Items {
    66  			// get all internal ips for node
    67  			internalIPs := e2enode.GetAddresses(&node, v1.NodeInternalIP)
    68  
    69  			gomega.Expect(internalIPs).To(gomega.HaveLen(2))
    70  			// assert 2 ips belong to different families
    71  			if netutils.IsIPv4String(internalIPs[0]) == netutils.IsIPv4String(internalIPs[1]) {
    72  				framework.Failf("both internalIPs %s and %s belong to the same families", internalIPs[0], internalIPs[1])
    73  			}
    74  		}
    75  	})
    76  
    77  	ginkgo.It("should create pod, add ipv6 and ipv4 ip to pod ips", func(ctx context.Context) {
    78  		podName := "pod-dualstack-ips"
    79  
    80  		pod := &v1.Pod{
    81  			ObjectMeta: metav1.ObjectMeta{
    82  				Name:   podName,
    83  				Labels: map[string]string{"test": "dualstack-pod-ips"},
    84  			},
    85  			Spec: v1.PodSpec{
    86  				Containers: []v1.Container{
    87  					{
    88  						Name:  "dualstack-pod-ips",
    89  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
    90  					},
    91  				},
    92  			},
    93  		}
    94  
    95  		ginkgo.By("submitting the pod to kubernetes")
    96  		p := podClient.CreateSync(ctx, pod)
    97  
    98  		gomega.Expect(p.Status.PodIP).ShouldNot(gomega.BeEquivalentTo(""))
    99  		gomega.Expect(p.Status.PodIPs).ShouldNot(gomega.BeNil())
   100  
   101  		// validate there are 2 ips in podIPs
   102  		gomega.Expect(p.Status.PodIPs).To(gomega.HaveLen(2))
   103  		// validate first ip in PodIPs is same as PodIP
   104  		gomega.Expect(p.Status.PodIP).To(gomega.Equal(p.Status.PodIPs[0].IP))
   105  		// assert 2 pod ips belong to different families
   106  		if netutils.IsIPv4String(p.Status.PodIPs[0].IP) == netutils.IsIPv4String(p.Status.PodIPs[1].IP) {
   107  			framework.Failf("both internalIPs %s and %s belong to the same families", p.Status.PodIPs[0].IP, p.Status.PodIPs[1].IP)
   108  		}
   109  
   110  		ginkgo.By("deleting the pod")
   111  		err := podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30))
   112  		framework.ExpectNoError(err, "failed to delete pod")
   113  	})
   114  
   115  	f.It("should create pod, add ipv6 and ipv4 ip to host ips", feature.PodHostIPs, func(ctx context.Context) {
   116  		podName := "pod-dualstack-ips"
   117  
   118  		pod := &v1.Pod{
   119  			ObjectMeta: metav1.ObjectMeta{
   120  				Name:   podName,
   121  				Labels: map[string]string{"test": "dualstack-host-ips"},
   122  			},
   123  			Spec: v1.PodSpec{
   124  				Containers: []v1.Container{
   125  					{
   126  						Name:  "dualstack-host-ips",
   127  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
   128  					},
   129  				},
   130  			},
   131  		}
   132  
   133  		ginkgo.By("submitting the pod to kubernetes")
   134  		p := podClient.CreateSync(ctx, pod)
   135  
   136  		gomega.Expect(p.Status.HostIP).ShouldNot(gomega.BeEquivalentTo(""))
   137  		gomega.Expect(p.Status.HostIPs).ShouldNot(gomega.BeNil())
   138  
   139  		// validate there are 2 ips in hostIPs
   140  		gomega.Expect(p.Status.HostIPs).To(gomega.HaveLen(2))
   141  		// validate first ip in hostIPs is same as HostIP
   142  		gomega.Expect(p.Status.HostIP).To(gomega.Equal(p.Status.HostIPs[0].IP))
   143  		// assert 2 host ips belong to different families
   144  		if netutils.IsIPv4String(p.Status.HostIPs[0].IP) == netutils.IsIPv4String(p.Status.HostIPs[1].IP) {
   145  			framework.Failf("both internalIPs %s and %s belong to the same families", p.Status.HostIPs[0], p.Status.HostIPs[1])
   146  		}
   147  
   148  		ginkgo.By("deleting the pod")
   149  		err := podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30))
   150  		framework.ExpectNoError(err, "failed to delete pod")
   151  	})
   152  
   153  	// takes close to 140s to complete, so doesn't need to be marked [SLOW]
   154  	ginkgo.It("should be able to reach pod on ipv4 and ipv6 ip", func(ctx context.Context) {
   155  		serverDeploymentName := "dualstack-server"
   156  		clientDeploymentName := "dualstack-client"
   157  
   158  		// get all schedulable nodes to determine the number of replicas for pods
   159  		// this is to ensure connectivity from all nodes on cluster
   160  		nodeList, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 3)
   161  		framework.ExpectNoError(err)
   162  
   163  		replicas := int32(len(nodeList.Items))
   164  
   165  		serverDeploymentSpec := e2edeployment.NewDeployment(serverDeploymentName,
   166  			replicas,
   167  			map[string]string{"test": "dual-stack-server"},
   168  			"dualstack-test-server",
   169  			imageutils.GetE2EImage(imageutils.Agnhost),
   170  			appsv1.RollingUpdateDeploymentStrategyType)
   171  		serverDeploymentSpec.Spec.Template.Spec.Containers[0].Args = []string{"test-webserver"}
   172  
   173  		// to ensure all the pods land on different nodes and we can thereby
   174  		// validate connectivity across all nodes.
   175  		serverDeploymentSpec.Spec.Template.Spec.Affinity = &v1.Affinity{
   176  			PodAntiAffinity: &v1.PodAntiAffinity{
   177  				RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
   178  					{
   179  						LabelSelector: &metav1.LabelSelector{
   180  							MatchExpressions: []metav1.LabelSelectorRequirement{
   181  								{
   182  									Key:      "test",
   183  									Operator: metav1.LabelSelectorOpIn,
   184  									Values:   []string{"dualstack-test-server"},
   185  								},
   186  							},
   187  						},
   188  						TopologyKey: "kubernetes.io/hostname",
   189  					},
   190  				},
   191  			},
   192  		}
   193  
   194  		clientDeploymentSpec := e2edeployment.NewDeployment(clientDeploymentName,
   195  			replicas,
   196  			map[string]string{"test": "dual-stack-client"},
   197  			"dualstack-test-client",
   198  			imageutils.GetE2EImage(imageutils.Agnhost),
   199  			appsv1.RollingUpdateDeploymentStrategyType)
   200  
   201  		clientDeploymentSpec.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "3600"}
   202  		clientDeploymentSpec.Spec.Template.Spec.Affinity = &v1.Affinity{
   203  			PodAntiAffinity: &v1.PodAntiAffinity{
   204  				RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
   205  					{
   206  						LabelSelector: &metav1.LabelSelector{
   207  							MatchExpressions: []metav1.LabelSelectorRequirement{
   208  								{
   209  									Key:      "test",
   210  									Operator: metav1.LabelSelectorOpIn,
   211  									Values:   []string{"dualstack-test-client"},
   212  								},
   213  							},
   214  						},
   215  						TopologyKey: "kubernetes.io/hostname",
   216  					},
   217  				},
   218  			},
   219  		}
   220  
   221  		serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(ctx, serverDeploymentSpec, metav1.CreateOptions{})
   222  		framework.ExpectNoError(err)
   223  
   224  		clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(ctx, clientDeploymentSpec, metav1.CreateOptions{})
   225  		framework.ExpectNoError(err)
   226  
   227  		err = e2edeployment.WaitForDeploymentComplete(cs, serverDeployment)
   228  		framework.ExpectNoError(err)
   229  		err = e2edeployment.WaitForDeploymentComplete(cs, clientDeployment)
   230  		framework.ExpectNoError(err)
   231  
   232  		serverPods, err := e2edeployment.GetPodsForDeployment(ctx, cs, serverDeployment)
   233  		framework.ExpectNoError(err)
   234  
   235  		clientPods, err := e2edeployment.GetPodsForDeployment(ctx, cs, clientDeployment)
   236  		framework.ExpectNoError(err)
   237  
   238  		assertNetworkConnectivity(ctx, f, *serverPods, *clientPods, "dualstack-test-client", "80")
   239  	})
   240  
   241  	ginkgo.It("should create a single stack service with cluster ip from primary service range", func(ctx context.Context) {
   242  		serviceName := "defaultclusterip"
   243  		ns := f.Namespace.Name
   244  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   245  
   246  		t := NewServerTest(cs, ns, serviceName)
   247  		defer func() {
   248  			defer ginkgo.GinkgoRecover()
   249  			if errs := t.Cleanup(); len(errs) != 0 {
   250  				framework.Failf("errors in cleanup: %v", errs)
   251  			}
   252  		}()
   253  
   254  		ginkgo.By("creating service " + ns + "/" + serviceName + " with Service.Spec.IPFamilies not set nil policy")
   255  		service := createService(t.ServiceName, t.Namespace, t.Labels, nil, nil)
   256  
   257  		jig.Labels = t.Labels
   258  		err := jig.CreateServicePods(ctx, 2)
   259  		framework.ExpectNoError(err)
   260  		svc, err := t.CreateService(service)
   261  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
   262  
   263  		validateNumOfServicePorts(svc, 2)
   264  
   265  		expectedPolicy := v1.IPFamilyPolicySingleStack
   266  		expectedFamilies := []v1.IPFamily{v1.IPv4Protocol}
   267  		if framework.TestContext.ClusterIsIPv6() {
   268  			expectedFamilies = []v1.IPFamily{v1.IPv6Protocol}
   269  		}
   270  
   271  		// check the spec has been set to default ip family
   272  		validateServiceAndClusterIPFamily(svc, expectedFamilies, &expectedPolicy)
   273  
   274  		// ensure endpoint belong to same ipfamily as service
   275  		if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
   276  			endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
   277  			if err != nil {
   278  				return false, nil
   279  			}
   280  			validateEndpointsBelongToIPFamily(svc, endpoint, expectedFamilies[0] /*endpoint controller works on primary ip*/)
   281  
   282  			return true, nil
   283  		}); err != nil {
   284  			framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
   285  		}
   286  	})
   287  
   288  	ginkgo.It("should create service with ipv4 cluster ip", func(ctx context.Context) {
   289  		serviceName := "ipv4clusterip"
   290  		ns := f.Namespace.Name
   291  
   292  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   293  
   294  		t := NewServerTest(cs, ns, serviceName)
   295  		defer func() {
   296  			defer ginkgo.GinkgoRecover()
   297  			if errs := t.Cleanup(); len(errs) != 0 {
   298  				framework.Failf("errors in cleanup: %v", errs)
   299  			}
   300  		}()
   301  
   302  		ginkgo.By("creating service " + ns + "/" + serviceName + " with Service.Spec.IPFamily IPv4" + ns)
   303  
   304  		expectedPolicy := v1.IPFamilyPolicySingleStack
   305  		expectedFamilies := []v1.IPFamily{v1.IPv4Protocol}
   306  
   307  		service := createService(t.ServiceName, t.Namespace, t.Labels, nil, expectedFamilies)
   308  
   309  		jig.Labels = t.Labels
   310  		err := jig.CreateServicePods(ctx, 2)
   311  		framework.ExpectNoError(err)
   312  		svc, err := t.CreateService(service)
   313  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
   314  
   315  		validateNumOfServicePorts(svc, 2)
   316  
   317  		// check the spec has been set to IPv4 and cluster ip belong to IPv4 family
   318  		validateServiceAndClusterIPFamily(svc, expectedFamilies, &expectedPolicy)
   319  
   320  		// ensure endpoints belong to same ipfamily as service
   321  		if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
   322  			endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
   323  			if err != nil {
   324  				return false, nil
   325  			}
   326  			validateEndpointsBelongToIPFamily(svc, endpoint, expectedFamilies[0] /* endpoint controller operates on primary ip */)
   327  			return true, nil
   328  		}); err != nil {
   329  			framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
   330  		}
   331  	})
   332  
   333  	ginkgo.It("should create service with ipv6 cluster ip", func(ctx context.Context) {
   334  		serviceName := "ipv6clusterip"
   335  		ns := f.Namespace.Name
   336  		ipv6 := v1.IPv6Protocol
   337  
   338  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   339  
   340  		t := NewServerTest(cs, ns, serviceName)
   341  		defer func() {
   342  			defer ginkgo.GinkgoRecover()
   343  			if errs := t.Cleanup(); len(errs) != 0 {
   344  				framework.Failf("errors in cleanup: %v", errs)
   345  			}
   346  		}()
   347  
   348  		ginkgo.By("creating service " + ns + "/" + serviceName + " with Service.Spec.IPFamily IPv6" + ns)
   349  		expectedPolicy := v1.IPFamilyPolicySingleStack
   350  		expectedFamilies := []v1.IPFamily{v1.IPv6Protocol}
   351  
   352  		service := createService(t.ServiceName, t.Namespace, t.Labels, nil, expectedFamilies)
   353  
   354  		jig.Labels = t.Labels
   355  		err := jig.CreateServicePods(ctx, 2)
   356  		framework.ExpectNoError(err)
   357  		svc, err := t.CreateService(service)
   358  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
   359  
   360  		validateNumOfServicePorts(svc, 2)
   361  
   362  		// check the spec has been set to IPv6 and cluster ip belongs to IPv6 family
   363  		validateServiceAndClusterIPFamily(svc, expectedFamilies, &expectedPolicy)
   364  
   365  		// ensure endpoints belong to same ipfamily as service
   366  		if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
   367  			endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
   368  			if err != nil {
   369  				return false, nil
   370  			}
   371  			validateEndpointsBelongToIPFamily(svc, endpoint, ipv6)
   372  			return true, nil
   373  		}); err != nil {
   374  			framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
   375  		}
   376  	})
   377  
   378  	ginkgo.It("should create service with ipv4,v6 cluster ip", func(ctx context.Context) {
   379  		serviceName := "ipv4ipv6clusterip"
   380  		ns := f.Namespace.Name
   381  
   382  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   383  
   384  		t := NewServerTest(cs, ns, serviceName)
   385  		defer func() {
   386  			defer ginkgo.GinkgoRecover()
   387  			if errs := t.Cleanup(); len(errs) != 0 {
   388  				framework.Failf("errors in cleanup: %v", errs)
   389  			}
   390  		}()
   391  
   392  		ginkgo.By("creating service " + ns + "/" + serviceName + " with Service.Spec.IPFamily IPv4, IPv6" + ns)
   393  
   394  		expectedPolicy := v1.IPFamilyPolicyRequireDualStack
   395  		expectedFamilies := []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}
   396  
   397  		service := createService(t.ServiceName, t.Namespace, t.Labels, &expectedPolicy, expectedFamilies)
   398  
   399  		jig.Labels = t.Labels
   400  		err := jig.CreateServicePods(ctx, 2)
   401  		framework.ExpectNoError(err)
   402  		svc, err := t.CreateService(service)
   403  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
   404  
   405  		validateNumOfServicePorts(svc, 2)
   406  
   407  		// check the spec has been set to IPv4 and cluster ip belong to IPv4 family
   408  		validateServiceAndClusterIPFamily(svc, expectedFamilies, &expectedPolicy)
   409  
   410  		// ensure endpoints belong to same ipfamily as service
   411  		if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
   412  			endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
   413  			if err != nil {
   414  				return false, nil
   415  			}
   416  			validateEndpointsBelongToIPFamily(svc, endpoint, expectedFamilies[0] /* endpoint controller operates on primary ip */)
   417  			return true, nil
   418  		}); err != nil {
   419  			framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
   420  		}
   421  	})
   422  
   423  	ginkgo.It("should create service with ipv6,v4 cluster ip", func(ctx context.Context) {
   424  		serviceName := "ipv6ipv4clusterip"
   425  		ns := f.Namespace.Name
   426  
   427  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   428  
   429  		t := NewServerTest(cs, ns, serviceName)
   430  		defer func() {
   431  			defer ginkgo.GinkgoRecover()
   432  			if errs := t.Cleanup(); len(errs) != 0 {
   433  				framework.Failf("errors in cleanup: %v", errs)
   434  			}
   435  		}()
   436  
   437  		ginkgo.By("creating service " + ns + "/" + serviceName + " with Service.Spec.IPFamily IPv4, IPv6" + ns)
   438  
   439  		expectedPolicy := v1.IPFamilyPolicyRequireDualStack
   440  		expectedFamilies := []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol}
   441  
   442  		service := createService(t.ServiceName, t.Namespace, t.Labels, &expectedPolicy, expectedFamilies)
   443  
   444  		jig.Labels = t.Labels
   445  		err := jig.CreateServicePods(ctx, 2)
   446  		framework.ExpectNoError(err)
   447  		svc, err := t.CreateService(service)
   448  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
   449  
   450  		validateNumOfServicePorts(svc, 2)
   451  
   452  		// check the spec has been set to IPv4 and cluster ip belong to IPv4 family
   453  		validateServiceAndClusterIPFamily(svc, expectedFamilies, &expectedPolicy)
   454  
   455  		// ensure endpoints belong to same ipfamily as service
   456  		if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
   457  			endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
   458  			if err != nil {
   459  				return false, nil
   460  			}
   461  			validateEndpointsBelongToIPFamily(svc, endpoint, expectedFamilies[0] /* endpoint controller operates on primary ip */)
   462  			return true, nil
   463  		}); err != nil {
   464  			framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
   465  		}
   466  	})
   467  	// TODO (khenidak add slice validation logic, since endpoint controller only operates
   468  	// on primary ClusterIP
   469  
   470  	// Service Granular Checks as in k8s.io/kubernetes/test/e2e/network/networking.go
   471  	// but using the secondary IP, so we run the same tests for each ClusterIP family
   472  	ginkgo.Describe("Granular Checks: Services Secondary IP Family [LinuxOnly]", func() {
   473  
   474  		ginkgo.It("should function for pod-Service: http", func(ctx context.Context) {
   475  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   476  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   477  			err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   478  			if err != nil {
   479  				framework.Failf("failed dialing endpoint, %v", err)
   480  			}
   481  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeHTTPPort))
   482  			err = config.DialFromTestContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   483  			if err != nil {
   484  				framework.Failf("failed dialing endpoint, %v", err)
   485  			}
   486  		})
   487  
   488  		ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) {
   489  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   490  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   491  			err := config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   492  			if err != nil {
   493  				framework.Failf("failed dialing endpoint, %v", err)
   494  			}
   495  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeUDPPort))
   496  			err = config.DialFromTestContainer(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   497  			if err != nil {
   498  				framework.Failf("failed dialing endpoint, %v", err)
   499  			}
   500  		})
   501  
   502  		f.It("should function for pod-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) {
   503  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP)
   504  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort))
   505  			err := config.DialFromTestContainer(ctx, "sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   506  			if err != nil {
   507  				framework.Failf("failed dialing endpoint, %v", err)
   508  			}
   509  
   510  			ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeSCTPPort))
   511  			err = config.DialFromTestContainer(ctx, "sctp", config.SecondaryNodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
   512  			if err != nil {
   513  				framework.Failf("failed dialing endpoint, %v", err)
   514  			}
   515  		})
   516  
   517  		ginkgo.It("should function for node-Service: http", func(ctx context.Context) {
   518  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork)
   519  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   520  			err := config.DialFromNode(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   521  			if err != nil {
   522  				framework.Failf("failed dialing endpoint, %v", err)
   523  			}
   524  
   525  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeHTTPPort))
   526  			err = config.DialFromNode(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   527  			if err != nil {
   528  				framework.Failf("failed dialing endpoint, %v", err)
   529  			}
   530  		})
   531  
   532  		ginkgo.It("should function for node-Service: udp", func(ctx context.Context) {
   533  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork)
   534  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   535  			err := config.DialFromNode(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   536  			if err != nil {
   537  				framework.Failf("failed dialing endpoint, %v", err)
   538  			}
   539  
   540  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeUDPPort))
   541  			err = config.DialFromNode(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   542  			if err != nil {
   543  				framework.Failf("failed dialing endpoint, %v", err)
   544  			}
   545  		})
   546  
   547  		ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) {
   548  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   549  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   550  			err := config.DialFromEndpointContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   551  			if err != nil {
   552  				framework.Failf("failed dialing endpoint, %v", err)
   553  			}
   554  			ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.SecondaryNodeIP, config.NodeHTTPPort))
   555  			err = config.DialFromEndpointContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   556  			if err != nil {
   557  				framework.Failf("failed dialing endpoint, %v", err)
   558  			}
   559  		})
   560  
   561  		ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) {
   562  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   563  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   564  			err := config.DialFromEndpointContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   565  			if err != nil {
   566  				framework.Failf("failed dialing endpoint, %v", err)
   567  			}
   568  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.SecondaryNodeIP, config.NodeUDPPort))
   569  			err = config.DialFromEndpointContainer(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   570  			if err != nil {
   571  				framework.Failf("failed dialing endpoint, %v", err)
   572  			}
   573  		})
   574  
   575  		ginkgo.It("should update endpoints: http", func(ctx context.Context) {
   576  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   577  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   578  			err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   579  			if err != nil {
   580  				framework.Failf("failed dialing endpoint, %v", err)
   581  			}
   582  			config.DeleteNetProxyPod(ctx)
   583  
   584  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   585  			err = config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
   586  			if err != nil {
   587  				framework.Failf("failed dialing endpoint, %v", err)
   588  			}
   589  		})
   590  
   591  		ginkgo.It("should update endpoints: udp", func(ctx context.Context) {
   592  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   593  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   594  			err := config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   595  			if err != nil {
   596  				framework.Failf("failed dialing endpoint, %v", err)
   597  			}
   598  
   599  			config.DeleteNetProxyPod(ctx)
   600  
   601  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   602  			err = config.DialFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
   603  			if err != nil {
   604  				framework.Failf("failed dialing endpoint, %v", err)
   605  			}
   606  		})
   607  
   608  		// [LinuxOnly]: Windows does not support session affinity.
   609  		ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) {
   610  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   611  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort))
   612  
   613  			// Check if number of endpoints returned are exactly one.
   614  			eps, err := config.GetEndpointsFromTestContainer(ctx, "http", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks)
   615  			if err != nil {
   616  				framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
   617  			}
   618  			if len(eps) == 0 {
   619  				framework.Failf("Unexpected no endpoints return")
   620  			}
   621  			if len(eps) > 1 {
   622  				framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
   623  			}
   624  		})
   625  
   626  		// [LinuxOnly]: Windows does not support session affinity.
   627  		ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) {
   628  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   629  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort))
   630  
   631  			// Check if number of endpoints returned are exactly one.
   632  			eps, err := config.GetEndpointsFromTestContainer(ctx, "udp", config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks)
   633  			if err != nil {
   634  				framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
   635  			}
   636  			if len(eps) == 0 {
   637  				framework.Failf("Unexpected no endpoints return")
   638  			}
   639  			if len(eps) > 1 {
   640  				framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
   641  			}
   642  		})
   643  
   644  		ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) {
   645  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   646  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   647  			message := strings.Repeat("42", 1000)
   648  			config.DialEchoFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
   649  		})
   650  
   651  		ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) {
   652  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack)
   653  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   654  			message := "n" + strings.Repeat("o", 1999)
   655  			config.DialEchoFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
   656  		})
   657  
   658  		// if the endpoints pods use hostNetwork, several tests can't run in parallel
   659  		// because the pods will try to acquire the same port in the host.
   660  		// We run the test in serial, to avoid port conflicts.
   661  		ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) {
   662  			config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork)
   663  
   664  			ginkgo.By("pod-Service(hostNetwork): http")
   665  
   666  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   667  			err := config.DialFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   668  			if err != nil {
   669  				framework.Failf("failed dialing endpoint, %v", err)
   670  			}
   671  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.SecondaryNodeIP, config.NodeHTTPPort))
   672  			err = config.DialFromTestContainer(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   673  			if err != nil {
   674  				framework.Failf("failed dialing endpoint, %v", err)
   675  			}
   676  
   677  			ginkgo.By("node-Service(hostNetwork): http")
   678  
   679  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   680  			err = config.DialFromNode(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   681  			if err != nil {
   682  				framework.Failf("failed dialing endpoint, %v", err)
   683  			}
   684  
   685  			ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeHTTPPort))
   686  			err = config.DialFromNode(ctx, "http", config.SecondaryNodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
   687  			if err != nil {
   688  				framework.Failf("failed dialing endpoint, %v", err)
   689  			}
   690  
   691  			ginkgo.By("node-Service(hostNetwork): udp")
   692  
   693  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   694  
   695  			err = config.DialFromNode(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   696  			if err != nil {
   697  				time.Sleep(10 * time.Hour)
   698  				framework.Failf("failed dialing endpoint, %v", err)
   699  			}
   700  
   701  			ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.SecondaryNodeIP, config.SecondaryNodeIP, config.NodeUDPPort))
   702  			err = config.DialFromNode(ctx, "udp", config.SecondaryNodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
   703  			if err != nil {
   704  				framework.Failf("failed dialing endpoint, %v", err)
   705  			}
   706  
   707  			ginkgo.By("handle large requests: http(hostNetwork)")
   708  
   709  			ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.SecondaryClusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort))
   710  			message := strings.Repeat("42", 1000)
   711  			err = config.DialEchoFromTestContainer(ctx, "http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
   712  			if err != nil {
   713  				framework.Failf("failed dialing endpoint, %v", err)
   714  			}
   715  
   716  			ginkgo.By("handle large requests: udp(hostNetwork)")
   717  
   718  			ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.SecondaryClusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort))
   719  			message = "n" + strings.Repeat("o", 1999)
   720  			err = config.DialEchoFromTestContainer(ctx, "udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
   721  			if err != nil {
   722  				framework.Failf("failed dialing endpoint, %v", err)
   723  			}
   724  		})
   725  	})
   726  })
   727  
   728  func validateNumOfServicePorts(svc *v1.Service, expectedNumOfPorts int) {
   729  	if len(svc.Spec.Ports) != expectedNumOfPorts {
   730  		framework.Failf("got unexpected len(Spec.Ports) for service: %v", svc)
   731  	}
   732  }
   733  
   734  func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1.IPFamily, expectedPolicy *v1.IPFamilyPolicy) {
   735  	if len(svc.Spec.IPFamilies) != len(expectedIPFamilies) {
   736  		framework.Failf("service ip family nil for service %s/%s", svc.Namespace, svc.Name)
   737  	}
   738  
   739  	for idx, family := range expectedIPFamilies {
   740  		if svc.Spec.IPFamilies[idx] != family {
   741  			framework.Failf("service %s/%s expected family %v at index[%v] got %v", svc.Namespace, svc.Name, family, idx, svc.Spec.IPFamilies[idx])
   742  		}
   743  	}
   744  
   745  	// validate ip assigned is from the family
   746  	if len(svc.Spec.ClusterIPs) != len(svc.Spec.IPFamilies) {
   747  		framework.Failf("service %s/%s assigned ips [%+v] does not match families [%+v]", svc.Namespace, svc.Name, svc.Spec.ClusterIPs, svc.Spec.IPFamilies)
   748  	}
   749  
   750  	for idx, family := range svc.Spec.IPFamilies {
   751  		if (family == v1.IPv6Protocol) != netutils.IsIPv6String(svc.Spec.ClusterIPs[idx]) {
   752  			framework.Failf("service %s/%s assigned ips at [%v]:%v does not match family:%v", svc.Namespace, svc.Name, idx, svc.Spec.ClusterIPs[idx], family)
   753  		}
   754  	}
   755  	// validate policy
   756  	if expectedPolicy == nil && svc.Spec.IPFamilyPolicy != nil {
   757  		framework.Failf("service %s/%s expected nil for IPFamilyPolicy", svc.Namespace, svc.Name)
   758  	}
   759  	if expectedPolicy != nil && svc.Spec.IPFamilyPolicy == nil {
   760  		framework.Failf("service %s/%s expected value %v for IPFamilyPolicy", svc.Namespace, svc.Name, expectedPolicy)
   761  	}
   762  
   763  	if expectedPolicy != nil && *(svc.Spec.IPFamilyPolicy) != *(expectedPolicy) {
   764  		framework.Failf("service %s/%s expected value %v for IPFamilyPolicy", svc.Namespace, svc.Name, expectedPolicy)
   765  	}
   766  }
   767  
   768  func validateEndpointsBelongToIPFamily(svc *v1.Service, endpoint *v1.Endpoints, expectedIPFamily v1.IPFamily) {
   769  	if len(endpoint.Subsets) == 0 {
   770  		framework.Failf("Endpoint has no subsets, cannot determine service ip family matches endpoints ip family for service %s/%s", svc.Namespace, svc.Name)
   771  	}
   772  	for _, ss := range endpoint.Subsets {
   773  		for _, e := range ss.Addresses {
   774  			if (expectedIPFamily == v1.IPv6Protocol) != netutils.IsIPv6String(e.IP) {
   775  				framework.Failf("service endpoint %s doesn't belong to %s ip family", e.IP, expectedIPFamily)
   776  			}
   777  		}
   778  	}
   779  }
   780  
   781  func assertNetworkConnectivity(ctx context.Context, f *framework.Framework, serverPods v1.PodList, clientPods v1.PodList, containerName, port string) {
   782  	// curl from each client pod to all server pods to assert connectivity
   783  	duration := "10s"
   784  	pollInterval := "1s"
   785  	timeout := 10
   786  
   787  	var serverIPs []string
   788  	for _, pod := range serverPods.Items {
   789  		if pod.Status.PodIPs == nil || len(pod.Status.PodIPs) != 2 {
   790  			framework.Failf("PodIPs list not expected value, got %v", pod.Status.PodIPs)
   791  		}
   792  		if netutils.IsIPv4String(pod.Status.PodIPs[0].IP) == netutils.IsIPv4String(pod.Status.PodIPs[1].IP) {
   793  			framework.Failf("PodIPs should belong to different families, got %v", pod.Status.PodIPs)
   794  		}
   795  		serverIPs = append(serverIPs, pod.Status.PodIPs[0].IP, pod.Status.PodIPs[1].IP)
   796  	}
   797  
   798  	for _, clientPod := range clientPods.Items {
   799  		for _, ip := range serverIPs {
   800  			gomega.Consistently(ctx, func() error {
   801  				ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %s", clientPod.Name, ip, port))
   802  				cmd := checkNetworkConnectivity(ip, port, timeout)
   803  				_, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, clientPod.Name, containerName, cmd...)
   804  				return err
   805  			}, duration, pollInterval).ShouldNot(gomega.HaveOccurred())
   806  		}
   807  	}
   808  }
   809  
   810  func checkNetworkConnectivity(ip, port string, timeout int) []string {
   811  	curl := fmt.Sprintf("curl -g --connect-timeout %v http://%s", timeout, net.JoinHostPort(ip, port))
   812  	cmd := []string{"/bin/sh", "-c", curl}
   813  	return cmd
   814  }
   815  
   816  // createService returns a service spec with defined arguments
   817  func createService(name, ns string, labels map[string]string, ipFamilyPolicy *v1.IPFamilyPolicy, ipFamilies []v1.IPFamily) *v1.Service {
   818  	return &v1.Service{
   819  		ObjectMeta: metav1.ObjectMeta{
   820  			Name:      name,
   821  			Namespace: ns,
   822  		},
   823  		Spec: v1.ServiceSpec{
   824  			Selector:       labels,
   825  			Type:           v1.ServiceTypeNodePort,
   826  			IPFamilyPolicy: ipFamilyPolicy,
   827  			IPFamilies:     ipFamilies,
   828  			Ports: []v1.ServicePort{
   829  				{
   830  					Name:     "tcp-port",
   831  					Port:     53,
   832  					Protocol: v1.ProtocolTCP,
   833  				},
   834  				{
   835  					Name:     "udp-port",
   836  					Port:     53,
   837  					Protocol: v1.ProtocolUDP,
   838  				},
   839  			},
   840  		},
   841  	}
   842  }