k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/network/service.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package network
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/rand"
    25  	"net"
    26  	"net/http"
    27  	"sort"
    28  	"strconv"
    29  	"strings"
    30  	"time"
    31  
    32  	appsv1 "k8s.io/api/apps/v1"
    33  	v1 "k8s.io/api/core/v1"
    34  	discoveryv1 "k8s.io/api/discovery/v1"
    35  
    36  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    37  	"k8s.io/apimachinery/pkg/api/resource"
    38  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    39  	"k8s.io/apimachinery/pkg/labels"
    40  	"k8s.io/apimachinery/pkg/runtime/schema"
    41  	"k8s.io/apimachinery/pkg/types"
    42  	"k8s.io/apimachinery/pkg/util/intstr"
    43  	utilnet "k8s.io/apimachinery/pkg/util/net"
    44  	utilrand "k8s.io/apimachinery/pkg/util/rand"
    45  	"k8s.io/apimachinery/pkg/util/sets"
    46  	"k8s.io/apimachinery/pkg/util/wait"
    47  	watch "k8s.io/apimachinery/pkg/watch"
    48  	admissionapi "k8s.io/pod-security-admission/api"
    49  
    50  	clientset "k8s.io/client-go/kubernetes"
    51  	"k8s.io/client-go/tools/cache"
    52  	watchtools "k8s.io/client-go/tools/watch"
    53  	"k8s.io/client-go/util/retry"
    54  
    55  	cloudprovider "k8s.io/cloud-provider"
    56  	netutils "k8s.io/utils/net"
    57  	utilpointer "k8s.io/utils/pointer"
    58  
    59  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    60  	"k8s.io/kubernetes/test/e2e/framework"
    61  	e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
    62  	e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
    63  	e2eendpointslice "k8s.io/kubernetes/test/e2e/framework/endpointslice"
    64  	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
    65  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    66  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    67  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    68  	e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"
    69  	e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
    70  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    71  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    72  	"k8s.io/kubernetes/test/e2e/network/common"
    73  	testutils "k8s.io/kubernetes/test/utils"
    74  	imageutils "k8s.io/kubernetes/test/utils/image"
    75  
    76  	"github.com/onsi/ginkgo/v2"
    77  	"github.com/onsi/gomega"
    78  )
    79  
    80  const (
    81  	defaultServeHostnameServicePort = 80
    82  	defaultServeHostnameServiceName = "svc-hostname"
    83  
    84  	// AffinityTimeout is the maximum time that CheckAffinity is allowed to take; this
    85  	// needs to be more than long enough for AffinityConfirmCount HTTP requests to
    86  	// complete in a busy CI cluster, but shouldn't be too long since we will end up
    87  	// waiting the entire time in the tests where affinity is not expected.
    88  	AffinityTimeout = 2 * time.Minute
    89  
    90  	// AffinityConfirmCount is the number of needed continuous requests to confirm that
    91  	// affinity is enabled.
    92  	AffinityConfirmCount = 15
    93  
    94  	// SessionAffinityTimeout is the number of seconds to wait between requests for
    95  	// session affinity to timeout before trying a load-balancer request again
    96  	SessionAffinityTimeout = 125
    97  
    98  	// label define which is used to find kube-proxy and kube-apiserver pod
    99  	kubeProxyLabelName     = "kube-proxy"
   100  	clusterAddonLabelKey   = "k8s-app"
   101  	kubeAPIServerLabelName = "kube-apiserver"
   102  	clusterComponentKey    = "component"
   103  
   104  	svcReadyTimeout = 1 * time.Minute
   105  )
   106  
   107  var (
   108  	defaultServeHostnameService = v1.Service{
   109  		ObjectMeta: metav1.ObjectMeta{
   110  			Name: defaultServeHostnameServiceName,
   111  		},
   112  		Spec: v1.ServiceSpec{
   113  			Ports: []v1.ServicePort{{
   114  				Port:       int32(defaultServeHostnameServicePort),
   115  				TargetPort: intstr.FromInt32(9376),
   116  				Protocol:   v1.ProtocolTCP,
   117  			}},
   118  			Selector: map[string]string{
   119  				"name": defaultServeHostnameServiceName,
   120  			},
   121  		},
   122  	}
   123  )
   124  
   125  // portsByPodName is a map that maps pod name to container ports.
   126  type portsByPodName map[string][]int
   127  
   128  // portsByPodUID is a map that maps pod name to container ports.
   129  type portsByPodUID map[types.UID][]int
   130  
   131  // fullPortsByPodName is a map that maps pod name to container ports including their protocols.
   132  type fullPortsByPodName map[string][]v1.ContainerPort
   133  
   134  // fullPortsByPodUID is a map that maps pod name to container ports.
   135  type fullPortsByPodUID map[types.UID][]v1.ContainerPort
   136  
   137  // affinityCheckFromPod returns interval, timeout and function pinging the service and
   138  // returning pinged hosts for pinging the service from execPod.
   139  func affinityCheckFromPod(execPod *v1.Pod, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) {
   140  	timeout := AffinityTimeout
   141  	// interval considering a maximum of 2 seconds per connection
   142  	interval := 2 * AffinityConfirmCount * time.Second
   143  
   144  	serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
   145  	curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
   146  	cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl)
   147  	getHosts := func() []string {
   148  		stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
   149  		if err != nil {
   150  			framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
   151  			return nil
   152  		}
   153  		return strings.Split(stdout, "\n")
   154  	}
   155  
   156  	return interval, timeout, getHosts
   157  }
   158  
   159  // affinityCheckFromTest returns interval, timeout and function pinging the service and
   160  // returning pinged hosts for pinging the service from the test itself.
   161  func affinityCheckFromTest(ctx context.Context, cs clientset.Interface, serviceIP string, servicePort int) (time.Duration, time.Duration, func() []string) {
   162  	interval := 2 * time.Second
   163  	timeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, cs)
   164  
   165  	params := &e2enetwork.HTTPPokeParams{Timeout: 2 * time.Second}
   166  	getHosts := func() []string {
   167  		var hosts []string
   168  		for i := 0; i < AffinityConfirmCount; i++ {
   169  			result := e2enetwork.PokeHTTP(serviceIP, servicePort, "", params)
   170  			if result.Status == e2enetwork.HTTPSuccess {
   171  				hosts = append(hosts, string(result.Body))
   172  			}
   173  		}
   174  		return hosts
   175  	}
   176  
   177  	return interval, timeout, getHosts
   178  }
   179  
   180  // CheckAffinity function tests whether the service affinity works as expected.
   181  // If affinity is expected, the test will return true once affinityConfirmCount
   182  // number of same response observed in a row. If affinity is not expected, the
   183  // test will keep observe until different responses observed. The function will
   184  // return false only in case of unexpected errors.
   185  func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool {
   186  	var interval, timeout time.Duration
   187  	var getHosts func() []string
   188  	if execPod != nil {
   189  		interval, timeout, getHosts = affinityCheckFromPod(execPod, serviceIP, servicePort)
   190  	} else {
   191  		interval, timeout, getHosts = affinityCheckFromTest(ctx, cs, serviceIP, servicePort)
   192  	}
   193  
   194  	var tracker affinityTracker
   195  	if pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
   196  		hosts := getHosts()
   197  		for _, host := range hosts {
   198  			if len(host) > 0 {
   199  				tracker.recordHost(strings.TrimSpace(host))
   200  			}
   201  		}
   202  
   203  		trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
   204  		if !trackerFulfilled {
   205  			return false, nil
   206  		}
   207  
   208  		if !shouldHold && !affinityHolds {
   209  			return true, nil
   210  		}
   211  		if shouldHold && affinityHolds {
   212  			return true, nil
   213  		}
   214  		return false, nil
   215  	}); pollErr != nil {
   216  		trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
   217  		if !wait.Interrupted(pollErr) {
   218  			checkAffinityFailed(tracker, pollErr.Error())
   219  			return false
   220  		}
   221  		if !trackerFulfilled {
   222  			checkAffinityFailed(tracker, fmt.Sprintf("Connection timed out or not enough responses."))
   223  		}
   224  		if shouldHold {
   225  			checkAffinityFailed(tracker, "Affinity should hold but didn't.")
   226  		} else {
   227  			checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
   228  		}
   229  		return true
   230  	}
   231  	return true
   232  }
   233  
   234  // affinityTracker tracks the destination of a request for the affinity tests.
   235  type affinityTracker struct {
   236  	hostTrace []string
   237  }
   238  
   239  // Record the response going to a given host.
   240  func (at *affinityTracker) recordHost(host string) {
   241  	at.hostTrace = append(at.hostTrace, host)
   242  	framework.Logf("Received response from host: %s", host)
   243  }
   244  
   245  // Check that we got a constant count requests going to the same host.
   246  func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) {
   247  	fulfilled = (len(at.hostTrace) >= count)
   248  	if len(at.hostTrace) == 0 {
   249  		return fulfilled, true
   250  	}
   251  	last := at.hostTrace[0:]
   252  	if len(at.hostTrace)-count >= 0 {
   253  		last = at.hostTrace[len(at.hostTrace)-count:]
   254  	}
   255  	host := at.hostTrace[len(at.hostTrace)-1]
   256  	for _, h := range last {
   257  		if h != host {
   258  			return fulfilled, false
   259  		}
   260  	}
   261  	return fulfilled, true
   262  }
   263  
   264  func checkAffinityFailed(tracker affinityTracker, err string) {
   265  	framework.Logf("%v", tracker.hostTrace)
   266  	framework.Failf(err)
   267  }
   268  
   269  // StartServeHostnameService creates a replication controller that serves its
   270  // hostname and a service on top of it.
   271  func StartServeHostnameService(ctx context.Context, c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
   272  	podNames := make([]string, replicas)
   273  	name := svc.ObjectMeta.Name
   274  	ginkgo.By("creating service " + name + " in namespace " + ns)
   275  	_, err := c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{})
   276  	if err != nil {
   277  		return podNames, "", err
   278  	}
   279  
   280  	var createdPods []*v1.Pod
   281  	maxContainerFailures := 0
   282  	config := testutils.RCConfig{
   283  		Client:               c,
   284  		Image:                imageutils.GetE2EImage(imageutils.Agnhost),
   285  		Command:              []string{"/agnhost", "serve-hostname"},
   286  		Name:                 name,
   287  		Namespace:            ns,
   288  		PollInterval:         3 * time.Second,
   289  		Timeout:              framework.PodReadyBeforeTimeout,
   290  		Replicas:             replicas,
   291  		CreatedPods:          &createdPods,
   292  		MaxContainerFailures: &maxContainerFailures,
   293  	}
   294  	err = e2erc.RunRC(ctx, config)
   295  	if err != nil {
   296  		return podNames, "", err
   297  	}
   298  
   299  	if len(createdPods) != replicas {
   300  		return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods))
   301  	}
   302  
   303  	for i := range createdPods {
   304  		podNames[i] = createdPods[i].ObjectMeta.Name
   305  	}
   306  	sort.StringSlice(podNames).Sort()
   307  
   308  	service, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
   309  	if err != nil {
   310  		return podNames, "", err
   311  	}
   312  	if service.Spec.ClusterIP == "" {
   313  		return podNames, "", fmt.Errorf("service IP is blank for %v", name)
   314  	}
   315  	serviceIP := service.Spec.ClusterIP
   316  	return podNames, serviceIP, nil
   317  }
   318  
   319  // StopServeHostnameService stops the given service.
   320  func StopServeHostnameService(ctx context.Context, clientset clientset.Interface, ns, name string) error {
   321  	if err := e2erc.DeleteRCAndWaitForGC(ctx, clientset, ns, name); err != nil {
   322  		return err
   323  	}
   324  	if err := clientset.CoreV1().Services(ns).Delete(ctx, name, metav1.DeleteOptions{}); err != nil {
   325  		return err
   326  	}
   327  	return nil
   328  }
   329  
   330  // verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
   331  // host exec pod of host network type and from the exec pod of container network type.
   332  // Each pod in the service is expected to echo its name. These names are compared with the
   333  // given expectedPods list after a sort | uniq.
   334  func verifyServeHostnameServiceUp(ctx context.Context, c clientset.Interface, ns string, expectedPods []string, serviceIP string, servicePort int) error {
   335  	// to verify from host network
   336  	hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-up-host-exec-pod")
   337  
   338  	// to verify from container's network
   339  	execPod := e2epod.CreateExecPodOrFail(ctx, c, ns, "verify-service-up-exec-pod-", nil)
   340  	defer func() {
   341  		e2epod.DeletePodOrFail(ctx, c, ns, hostExecPod.Name)
   342  		e2epod.DeletePodOrFail(ctx, c, ns, execPod.Name)
   343  	}()
   344  
   345  	// verify service from pod
   346  	cmdFunc := func(podName string) string {
   347  		wgetCmd := "wget -q -O -"
   348  		// Command 'wget' in Windows image may not support option 'T'
   349  		if !framework.NodeOSDistroIs("windows") {
   350  			wgetCmd += " -T 1"
   351  		}
   352  		serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
   353  		cmd := fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
   354  			50*len(expectedPods), wgetCmd, serviceIPPort)
   355  		framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, podName)
   356  		// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
   357  		output, err := e2eoutput.RunHostCmd(ns, podName, cmd)
   358  		if err != nil {
   359  			framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, podName, err, output)
   360  		}
   361  		return output
   362  	}
   363  
   364  	expectedEndpoints := sets.NewString(expectedPods...)
   365  	ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
   366  	for _, podName := range []string{hostExecPod.Name, execPod.Name} {
   367  		passed := false
   368  		gotEndpoints := sets.NewString()
   369  
   370  		// Retry cmdFunc for a while
   371  		for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
   372  			for _, endpoint := range strings.Split(cmdFunc(podName), "\n") {
   373  				trimmedEp := strings.TrimSpace(endpoint)
   374  				if trimmedEp != "" {
   375  					gotEndpoints.Insert(trimmedEp)
   376  				}
   377  			}
   378  			// TODO: simply checking that the retrieved endpoints is a superset
   379  			// of the expected allows us to ignore intermitten network flakes that
   380  			// result in output like "wget timed out", but these should be rare
   381  			// and we need a better way to track how often it occurs.
   382  			if gotEndpoints.IsSuperset(expectedEndpoints) {
   383  				if !gotEndpoints.Equal(expectedEndpoints) {
   384  					framework.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
   385  				}
   386  				passed = true
   387  				break
   388  			}
   389  			framework.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
   390  		}
   391  		if !passed {
   392  			// Sort the lists so they're easier to visually diff.
   393  			exp := expectedEndpoints.List()
   394  			got := gotEndpoints.List()
   395  			sort.StringSlice(exp).Sort()
   396  			sort.StringSlice(got).Sort()
   397  			return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got)
   398  		}
   399  	}
   400  	return nil
   401  }
   402  
   403  // verifyServeHostnameServiceDown verifies that the given service isn't served.
   404  func verifyServeHostnameServiceDown(ctx context.Context, c clientset.Interface, ns string, serviceIP string, servicePort int) error {
   405  	// verify from host network
   406  	hostExecPod := launchHostExecPod(ctx, c, ns, "verify-service-down-host-exec-pod")
   407  	defer func() {
   408  		e2epod.DeletePodOrFail(ctx, c, ns, hostExecPod.Name)
   409  	}()
   410  
   411  	ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
   412  	// The current versions of curl included in CentOS and RHEL distros
   413  	// misinterpret square brackets around IPv6 as globbing, so use the -g
   414  	// argument to disable globbing to handle the IPv6 case.
   415  	command := fmt.Sprintf(
   416  		"curl -g -s --connect-timeout 2 http://%s && echo service-down-failed", ipPort)
   417  
   418  	for start := time.Now(); time.Since(start) < e2eservice.KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
   419  		output, err := e2eoutput.RunHostCmd(ns, hostExecPod.Name, command)
   420  		if err != nil {
   421  			framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", command, ns, hostExecPod.Name, err, output)
   422  		}
   423  		if !strings.Contains(output, "service-down-failed") {
   424  			return nil
   425  		}
   426  		framework.Logf("service still alive - still waiting")
   427  	}
   428  
   429  	return fmt.Errorf("waiting for service to be down timed out")
   430  }
   431  
   432  // testNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
   433  func testNotReachableHTTP(ctx context.Context, host string, port int, timeout time.Duration) {
   434  	pollfn := func(ctx context.Context) (bool, error) {
   435  		result := e2enetwork.PokeHTTP(host, port, "/", nil)
   436  		if result.Code == 0 {
   437  			return true, nil
   438  		}
   439  		return false, nil // caller can retry
   440  	}
   441  
   442  	if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollfn); err != nil {
   443  		framework.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
   444  	}
   445  }
   446  
   447  // UDPPokeParams is a struct for UDP poke parameters.
   448  type UDPPokeParams struct {
   449  	Timeout  time.Duration
   450  	Response string
   451  }
   452  
   453  // UDPPokeResult is a struct for UDP poke result.
   454  type UDPPokeResult struct {
   455  	Status   UDPPokeStatus
   456  	Error    error  // if there was any error
   457  	Response []byte // if code != 0
   458  }
   459  
   460  // UDPPokeStatus is string for representing UDP poke status.
   461  type UDPPokeStatus string
   462  
   463  const (
   464  	// UDPSuccess is UDP poke status which is success.
   465  	UDPSuccess UDPPokeStatus = "Success"
   466  	// UDPError is UDP poke status which is error.
   467  	UDPError UDPPokeStatus = "UnknownError"
   468  	// UDPTimeout is UDP poke status which is timeout.
   469  	UDPTimeout UDPPokeStatus = "TimedOut"
   470  	// UDPRefused is UDP poke status which is connection refused.
   471  	UDPRefused UDPPokeStatus = "ConnectionRefused"
   472  	// UDPBadResponse is UDP poke status which is bad response.
   473  	UDPBadResponse UDPPokeStatus = "BadResponse"
   474  	// Any time we add new errors, we should audit all callers of this.
   475  )
   476  
   477  // PokeUDP tries to connect to a host on a port and send the given request. Callers
   478  // can specify additional success parameters, if desired.
   479  //
   480  // The result status will be characterized as precisely as possible, given the
   481  // known users of this.
   482  //
   483  // The result error will be populated for any status other than Success.
   484  //
   485  // The result response will be populated if the UDP transaction was completed, even
   486  // if the other test params make this a failure).
   487  func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPokeResult {
   488  	hostPort := net.JoinHostPort(host, strconv.Itoa(port))
   489  	url := fmt.Sprintf("udp://%s", hostPort)
   490  
   491  	ret := UDPPokeResult{}
   492  
   493  	// Sanity check inputs, because it has happened.  These are the only things
   494  	// that should hard fail the test - they are basically ASSERT()s.
   495  	if host == "" {
   496  		framework.Failf("Got empty host for UDP poke (%s)", url)
   497  		return ret
   498  	}
   499  	if port == 0 {
   500  		framework.Failf("Got port==0 for UDP poke (%s)", url)
   501  		return ret
   502  	}
   503  
   504  	// Set default params.
   505  	if params == nil {
   506  		params = &UDPPokeParams{}
   507  	}
   508  
   509  	framework.Logf("Poking %v", url)
   510  
   511  	con, err := net.Dial("udp", hostPort)
   512  	if err != nil {
   513  		ret.Status = UDPError
   514  		ret.Error = err
   515  		framework.Logf("Poke(%q): %v", url, err)
   516  		return ret
   517  	}
   518  
   519  	_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
   520  	if err != nil {
   521  		ret.Error = err
   522  		var neterr net.Error
   523  		if errors.As(err, &neterr) && neterr.Timeout() {
   524  			ret.Status = UDPTimeout
   525  		} else if strings.Contains(err.Error(), "connection refused") {
   526  			ret.Status = UDPRefused
   527  		} else {
   528  			ret.Status = UDPError
   529  		}
   530  		framework.Logf("Poke(%q): %v", url, err)
   531  		return ret
   532  	}
   533  
   534  	if params.Timeout != 0 {
   535  		err = con.SetDeadline(time.Now().Add(params.Timeout))
   536  		if err != nil {
   537  			ret.Status = UDPError
   538  			ret.Error = err
   539  			framework.Logf("Poke(%q): %v", url, err)
   540  			return ret
   541  		}
   542  	}
   543  
   544  	bufsize := len(params.Response) + 1
   545  	if bufsize == 0 {
   546  		bufsize = 4096
   547  	}
   548  	var buf = make([]byte, bufsize)
   549  	n, err := con.Read(buf)
   550  	if err != nil {
   551  		ret.Error = err
   552  		var neterr net.Error
   553  		if errors.As(err, &neterr) && neterr.Timeout() {
   554  			ret.Status = UDPTimeout
   555  		} else if strings.Contains(err.Error(), "connection refused") {
   556  			ret.Status = UDPRefused
   557  		} else {
   558  			ret.Status = UDPError
   559  		}
   560  		framework.Logf("Poke(%q): %v", url, err)
   561  		return ret
   562  	}
   563  	ret.Response = buf[0:n]
   564  
   565  	if params.Response != "" && string(ret.Response) != params.Response {
   566  		ret.Status = UDPBadResponse
   567  		ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
   568  		framework.Logf("Poke(%q): %v", url, ret.Error)
   569  		return ret
   570  	}
   571  
   572  	ret.Status = UDPSuccess
   573  	framework.Logf("Poke(%q): success", url)
   574  	return ret
   575  }
   576  
   577  // testReachableUDP tests that the given host serves UDP on the given port.
   578  func testReachableUDP(ctx context.Context, host string, port int, timeout time.Duration) {
   579  	pollfn := func(ctx context.Context) (bool, error) {
   580  		result := PokeUDP(host, port, "echo hello", &UDPPokeParams{
   581  			Timeout:  3 * time.Second,
   582  			Response: "hello",
   583  		})
   584  		if result.Status == UDPSuccess {
   585  			return true, nil
   586  		}
   587  		return false, nil // caller can retry
   588  	}
   589  
   590  	if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollfn); err != nil {
   591  		framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
   592  	}
   593  }
   594  
   595  // testNotReachableUDP tests that the given host doesn't serve UDP on the given port.
   596  func testNotReachableUDP(ctx context.Context, host string, port int, timeout time.Duration) {
   597  	pollfn := func(ctx context.Context) (bool, error) {
   598  		result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
   599  		if result.Status != UDPSuccess && result.Status != UDPError {
   600  			return true, nil
   601  		}
   602  		return false, nil // caller can retry
   603  	}
   604  	if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollfn); err != nil {
   605  		framework.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
   606  	}
   607  }
   608  
   609  // TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
   610  func TestHTTPHealthCheckNodePort(ctx context.Context, host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
   611  	count := 0
   612  	condition := func(ctx context.Context) (bool, error) {
   613  		success, _ := testHTTPHealthCheckNodePort(host, port, request)
   614  		if success && expectSucceed ||
   615  			!success && !expectSucceed {
   616  			count++
   617  		}
   618  		if count >= threshold {
   619  			return true, nil
   620  		}
   621  		return false, nil
   622  	}
   623  
   624  	if err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, condition); err != nil {
   625  		return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count)
   626  	}
   627  	return nil
   628  }
   629  
   630  func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) {
   631  	ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
   632  	url := fmt.Sprintf("http://%s%s", ipPort, request)
   633  	if ip == "" || port == 0 {
   634  		framework.Failf("Got empty IP for reachability check (%s)", url)
   635  		return false, fmt.Errorf("invalid input ip or port")
   636  	}
   637  	framework.Logf("Testing HTTP health check on %v", url)
   638  	resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
   639  	if err != nil {
   640  		framework.Logf("Got error testing for reachability of %s: %v", url, err)
   641  		return false, err
   642  	}
   643  	defer func() { _ = resp.Body.Close() }()
   644  	if err != nil {
   645  		framework.Logf("Got error reading response from %s: %v", url, err)
   646  		return false, err
   647  	}
   648  	// HealthCheck responder returns 503 for no local endpoints
   649  	if resp.StatusCode == 503 {
   650  		return false, nil
   651  	}
   652  	// HealthCheck responder returns 200 for non-zero local endpoints
   653  	if resp.StatusCode == 200 {
   654  		return true, nil
   655  	}
   656  	return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
   657  }
   658  
   659  func testHTTPHealthCheckNodePortFromTestContainer(ctx context.Context, config *e2enetwork.NetworkingTestConfig, host string, port int, timeout time.Duration, expectSucceed bool, threshold int) error {
   660  	count := 0
   661  	pollFn := func(ctx context.Context) (bool, error) {
   662  		statusCode, err := config.GetHTTPCodeFromTestContainer(ctx,
   663  			"/healthz",
   664  			host,
   665  			port)
   666  		if err != nil {
   667  			framework.Logf("Got error reading status code from http://%s:%d/healthz via test container: %v", host, port, err)
   668  			return false, nil
   669  		}
   670  		framework.Logf("Got status code from http://%s:%d/healthz via test container: %d", host, port, statusCode)
   671  		success := statusCode == 200
   672  		if (success && expectSucceed) ||
   673  			(!success && !expectSucceed) {
   674  			count++
   675  		}
   676  		return count >= threshold, nil
   677  	}
   678  	err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, pollFn)
   679  	if err != nil {
   680  		return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v:%v/healthz, got %d", threshold, expectSucceed, host, port, count)
   681  	}
   682  	return nil
   683  }
   684  
   685  // Does an HTTP GET, but does not reuse TCP connections
   686  // This masks problems where the iptables rule has changed, but we don't see it
   687  func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
   688  	tr := utilnet.SetTransportDefaults(&http.Transport{
   689  		DisableKeepAlives: true,
   690  	})
   691  	client := &http.Client{
   692  		Transport: tr,
   693  		Timeout:   timeout,
   694  	}
   695  	return client.Get(url)
   696  }
   697  
   698  func getServeHostnameService(name string) *v1.Service {
   699  	svc := defaultServeHostnameService.DeepCopy()
   700  	svc.ObjectMeta.Name = name
   701  	svc.Spec.Selector["name"] = name
   702  	return svc
   703  }
   704  
   705  // waitForAPIServerUp waits for the kube-apiserver to be up.
   706  func waitForAPIServerUp(ctx context.Context, c clientset.Interface) error {
   707  	for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
   708  		body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(ctx).Raw()
   709  		if err == nil && string(body) == "ok" {
   710  			return nil
   711  		}
   712  	}
   713  	return fmt.Errorf("waiting for apiserver timed out")
   714  }
   715  
   716  // getEndpointNodesWithInternalIP returns a map of nodenames:internal-ip on which the
   717  // endpoints of the Service are running.
   718  func getEndpointNodesWithInternalIP(ctx context.Context, jig *e2eservice.TestJig) (map[string]string, error) {
   719  	nodesWithIPs, err := jig.GetEndpointNodesWithIP(ctx, v1.NodeInternalIP)
   720  	if err != nil {
   721  		return nil, err
   722  	}
   723  	endpointsNodeMap := make(map[string]string, len(nodesWithIPs))
   724  	for nodeName, internalIPs := range nodesWithIPs {
   725  		if len(internalIPs) < 1 {
   726  			return nil, fmt.Errorf("no internal ip found for node %s", nodeName)
   727  		}
   728  		endpointsNodeMap[nodeName] = internalIPs[0]
   729  	}
   730  	return endpointsNodeMap, nil
   731  }
   732  
   733  var _ = common.SIGDescribe("Services", func() {
   734  	f := framework.NewDefaultFramework("services")
   735  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   736  
   737  	var cs clientset.Interface
   738  
   739  	ginkgo.BeforeEach(func() {
   740  		cs = f.ClientSet
   741  	})
   742  
   743  	// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
   744  
   745  	/*
   746  		Release: v1.9
   747  		Testname: Kubernetes Service
   748  		Description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' service running in the cluster.
   749  	*/
   750  	framework.ConformanceIt("should provide secure master service", func(ctx context.Context) {
   751  		_, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
   752  		framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes")
   753  	})
   754  
   755  	/*
   756  		Release: v1.9
   757  		Testname: Service, endpoints
   758  		Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty.
   759  	*/
   760  	framework.ConformanceIt("should serve a basic endpoint from pods", func(ctx context.Context) {
   761  		serviceName := "endpoint-test2"
   762  		ns := f.Namespace.Name
   763  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   764  
   765  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
   766  		ginkgo.DeferCleanup(func(ctx context.Context) {
   767  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
   768  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
   769  		})
   770  		svc, err := jig.CreateTCPServiceWithPort(ctx, nil, 80)
   771  		framework.ExpectNoError(err)
   772  
   773  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
   774  
   775  		names := map[string]bool{}
   776  		ginkgo.DeferCleanup(func(ctx context.Context) {
   777  			for name := range names {
   778  				err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
   779  				framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
   780  			}
   781  		})
   782  
   783  		name1 := "pod1"
   784  		name2 := "pod2"
   785  
   786  		createPodOrFail(ctx, f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80")
   787  		names[name1] = true
   788  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {80}})
   789  
   790  		ginkgo.By("Checking if the Service forwards traffic to pod1")
   791  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
   792  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   793  		framework.ExpectNoError(err)
   794  
   795  		createPodOrFail(ctx, f, ns, name2, jig.Labels, []v1.ContainerPort{{ContainerPort: 80}}, "netexec", "--http-port", "80")
   796  		names[name2] = true
   797  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {80}, name2: {80}})
   798  
   799  		ginkgo.By("Checking if the Service forwards traffic to pod1 and pod2")
   800  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   801  		framework.ExpectNoError(err)
   802  
   803  		e2epod.DeletePodOrFail(ctx, cs, ns, name1)
   804  		delete(names, name1)
   805  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name2: {80}})
   806  
   807  		ginkgo.By("Checking if the Service forwards traffic to pod2")
   808  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   809  		framework.ExpectNoError(err)
   810  
   811  		e2epod.DeletePodOrFail(ctx, cs, ns, name2)
   812  		delete(names, name2)
   813  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
   814  	})
   815  
   816  	/*
   817  		Release: v1.9
   818  		Testname: Service, endpoints with multiple ports
   819  		Description: Create a service with two ports but no Pods are added to the service yet.  The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints.
   820  	*/
   821  	framework.ConformanceIt("should serve multiport endpoints from pods", func(ctx context.Context) {
   822  		// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
   823  		serviceName := "multi-endpoint-test"
   824  		ns := f.Namespace.Name
   825  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   826  
   827  		ginkgo.DeferCleanup(func(ctx context.Context) {
   828  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
   829  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
   830  		})
   831  
   832  		svc1port := "svc1"
   833  		svc2port := "svc2"
   834  
   835  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
   836  		svc, err := jig.CreateTCPService(ctx, func(service *v1.Service) {
   837  			service.Spec.Ports = []v1.ServicePort{
   838  				{
   839  					Name:       "portname1",
   840  					Port:       80,
   841  					TargetPort: intstr.FromString(svc1port),
   842  				},
   843  				{
   844  					Name:       "portname2",
   845  					Port:       81,
   846  					TargetPort: intstr.FromString(svc2port),
   847  				},
   848  			}
   849  		})
   850  		framework.ExpectNoError(err)
   851  
   852  		port1 := 100
   853  		port2 := 101
   854  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
   855  
   856  		names := map[string]bool{}
   857  		ginkgo.DeferCleanup(func(ctx context.Context) {
   858  			for name := range names {
   859  				err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
   860  				framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
   861  			}
   862  		})
   863  
   864  		containerPorts1 := []v1.ContainerPort{
   865  			{
   866  				Name:          svc1port,
   867  				ContainerPort: int32(port1),
   868  			},
   869  		}
   870  		containerPorts2 := []v1.ContainerPort{
   871  			{
   872  				Name:          svc2port,
   873  				ContainerPort: int32(port2),
   874  			},
   875  		}
   876  
   877  		podname1 := "pod1"
   878  		podname2 := "pod2"
   879  
   880  		createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1))
   881  		names[podname1] = true
   882  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}})
   883  
   884  		createPodOrFail(ctx, f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2))
   885  		names[podname2] = true
   886  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}})
   887  
   888  		ginkgo.By("Checking if the Service forwards traffic to pods")
   889  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
   890  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   891  		framework.ExpectNoError(err)
   892  
   893  		e2epod.DeletePodOrFail(ctx, cs, ns, podname1)
   894  		delete(names, podname1)
   895  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname2: {port2}})
   896  
   897  		e2epod.DeletePodOrFail(ctx, cs, ns, podname2)
   898  		delete(names, podname2)
   899  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
   900  	})
   901  
   902  	ginkgo.It("should be updated after adding or deleting ports ", func(ctx context.Context) {
   903  		serviceName := "edit-port-test"
   904  		ns := f.Namespace.Name
   905  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   906  
   907  		svc1port := "svc1"
   908  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
   909  		svc, err := jig.CreateTCPService(ctx, func(service *v1.Service) {
   910  			service.Spec.Ports = []v1.ServicePort{
   911  				{
   912  					Name:       "portname1",
   913  					Port:       80,
   914  					TargetPort: intstr.FromString(svc1port),
   915  				},
   916  			}
   917  		})
   918  		framework.ExpectNoError(err)
   919  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
   920  
   921  		podname1 := "pod1"
   922  		port1 := 100
   923  		containerPorts1 := []v1.ContainerPort{
   924  			{
   925  				Name:          svc1port,
   926  				ContainerPort: int32(port1),
   927  			},
   928  		}
   929  		createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts1, "netexec", "--http-port", strconv.Itoa(port1))
   930  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}})
   931  
   932  		ginkgo.By("Checking if the Service " + serviceName + " forwards traffic to " + podname1)
   933  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
   934  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   935  		framework.ExpectNoError(err)
   936  
   937  		ginkgo.By("Adding a new port to service " + serviceName)
   938  		svc2port := "svc2"
   939  		svc, err = jig.UpdateService(ctx, func(s *v1.Service) {
   940  			s.Spec.Ports = []v1.ServicePort{
   941  				{
   942  					Name:       "portname1",
   943  					Port:       80,
   944  					TargetPort: intstr.FromString(svc1port),
   945  				},
   946  				{
   947  					Name:       "portname2",
   948  					Port:       81,
   949  					TargetPort: intstr.FromString(svc2port),
   950  				},
   951  			}
   952  		})
   953  		framework.ExpectNoError(err)
   954  
   955  		ginkgo.By("Adding a new endpoint to the new port ")
   956  		podname2 := "pod2"
   957  		port2 := 101
   958  		containerPorts2 := []v1.ContainerPort{
   959  			{
   960  				Name:          svc2port,
   961  				ContainerPort: int32(port2),
   962  			},
   963  		}
   964  		createPodOrFail(ctx, f, ns, podname2, jig.Labels, containerPorts2, "netexec", "--http-port", strconv.Itoa(port2))
   965  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}, podname2: {port2}})
   966  
   967  		ginkgo.By("Checking if the Service forwards traffic to " + podname1 + " and " + podname2)
   968  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   969  		framework.ExpectNoError(err)
   970  
   971  		ginkgo.By("Deleting a port from service " + serviceName)
   972  		svc, err = jig.UpdateService(ctx, func(s *v1.Service) {
   973  			s.Spec.Ports = []v1.ServicePort{
   974  				{
   975  					Name:       "portname1",
   976  					Port:       80,
   977  					TargetPort: intstr.FromString(svc1port),
   978  				},
   979  			}
   980  		})
   981  		framework.ExpectNoError(err)
   982  
   983  		ginkgo.By("Checking if the Service forwards traffic to " + podname1 + " and not forwards to " + podname2)
   984  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podname1: {port1}})
   985  		err = jig.CheckServiceReachability(ctx, svc, execPod)
   986  		framework.ExpectNoError(err)
   987  	})
   988  
   989  	ginkgo.It("should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]", func(ctx context.Context) {
   990  		// this test is creating a pod with HostNetwork=true, which is not supported on Windows.
   991  		e2eskipper.SkipIfNodeOSDistroIs("windows")
   992  
   993  		serviceName := "sourceip-test"
   994  		ns := f.Namespace.Name
   995  
   996  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
   997  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
   998  		jig.ExternalIPs = false
   999  		servicePort := 8080
  1000  		tcpService, err := jig.CreateTCPServiceWithPort(ctx, nil, int32(servicePort))
  1001  		framework.ExpectNoError(err)
  1002  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1003  			framework.Logf("Cleaning up the sourceip test service")
  1004  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1005  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1006  		})
  1007  		serviceIP := tcpService.Spec.ClusterIP
  1008  		framework.Logf("sourceip-test cluster ip: %s", serviceIP)
  1009  
  1010  		ginkgo.By("Picking 2 Nodes to test whether source IP is preserved or not")
  1011  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  1012  		framework.ExpectNoError(err)
  1013  		nodeCounts := len(nodes.Items)
  1014  		if nodeCounts < 2 {
  1015  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  1016  		}
  1017  
  1018  		ginkgo.By("Creating a webserver pod to be part of the TCP service which echoes back source ip")
  1019  		serverPodName := "echo-sourceip"
  1020  		pod := e2epod.NewAgnhostPod(ns, serverPodName, nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
  1021  		pod.Labels = jig.Labels
  1022  		_, err = cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
  1023  		framework.ExpectNoError(err)
  1024  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
  1025  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1026  			framework.Logf("Cleaning up the echo server pod")
  1027  			err := cs.CoreV1().Pods(ns).Delete(ctx, serverPodName, metav1.DeleteOptions{})
  1028  			framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName)
  1029  		})
  1030  
  1031  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}})
  1032  
  1033  		ginkgo.By("Creating pause pod deployment")
  1034  		deployment := createPausePodDeployment(ctx, cs, "pause-pod", ns, nodeCounts)
  1035  
  1036  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1037  			framework.Logf("Deleting deployment")
  1038  			err = cs.AppsV1().Deployments(ns).Delete(ctx, deployment.Name, metav1.DeleteOptions{})
  1039  			framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
  1040  		})
  1041  
  1042  		framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
  1043  
  1044  		deployment, err = cs.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
  1045  		framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
  1046  		labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  1047  
  1048  		pausePods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector.String()})
  1049  		framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
  1050  
  1051  		gomega.Expect(pausePods.Items[0].Spec.NodeName).NotTo(gomega.Equal(pausePods.Items[1].Spec.NodeName))
  1052  
  1053  		serviceAddress := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
  1054  
  1055  		for _, pausePod := range pausePods.Items {
  1056  			sourceIP, execPodIP := execSourceIPTest(pausePod, serviceAddress)
  1057  			ginkgo.By("Verifying the preserved source ip")
  1058  			gomega.Expect(sourceIP).To(gomega.Equal(execPodIP))
  1059  		}
  1060  	})
  1061  
  1062  	ginkgo.It("should allow pods to hairpin back to themselves through services", func(ctx context.Context) {
  1063  		serviceName := "hairpin-test"
  1064  		ns := f.Namespace.Name
  1065  
  1066  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
  1067  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1068  		jig.ExternalIPs = false
  1069  		servicePort := 8080
  1070  		svc, err := jig.CreateTCPServiceWithPort(ctx, nil, int32(servicePort))
  1071  		framework.ExpectNoError(err)
  1072  		serviceIP := svc.Spec.ClusterIP
  1073  		framework.Logf("hairpin-test cluster ip: %s", serviceIP)
  1074  
  1075  		ginkgo.By("creating a client/server pod")
  1076  		serverPodName := "hairpin"
  1077  		podTemplate := e2epod.NewAgnhostPod(ns, serverPodName, nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
  1078  		podTemplate.Labels = jig.Labels
  1079  		pod, err := cs.CoreV1().Pods(ns).Create(ctx, podTemplate, metav1.CreateOptions{})
  1080  		framework.ExpectNoError(err)
  1081  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout))
  1082  
  1083  		ginkgo.By("waiting for the service to expose an endpoint")
  1084  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{serverPodName: {servicePort}})
  1085  
  1086  		ginkgo.By("Checking if the pod can reach itself")
  1087  		err = jig.CheckServiceReachability(ctx, svc, pod)
  1088  		framework.ExpectNoError(err)
  1089  	})
  1090  
  1091  	ginkgo.It("should be able to up and down services", func(ctx context.Context) {
  1092  		ns := f.Namespace.Name
  1093  		numPods, servicePort := 3, defaultServeHostnameServicePort
  1094  
  1095  		svc1 := "up-down-1"
  1096  		svc2 := "up-down-2"
  1097  		svc3 := "up-down-3"
  1098  
  1099  		ginkgo.By("creating " + svc1 + " in namespace " + ns)
  1100  		podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods)
  1101  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
  1102  		ginkgo.By("creating " + svc2 + " in namespace " + ns)
  1103  		podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods)
  1104  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
  1105  
  1106  		ginkgo.By("verifying service " + svc1 + " is up")
  1107  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1108  
  1109  		ginkgo.By("verifying service " + svc2 + " is up")
  1110  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1111  
  1112  		// Stop service 1 and make sure it is gone.
  1113  		ginkgo.By("stopping service " + svc1)
  1114  		framework.ExpectNoError(StopServeHostnameService(ctx, f.ClientSet, ns, svc1))
  1115  
  1116  		ginkgo.By("verifying service " + svc1 + " is not up")
  1117  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svc1IP, servicePort))
  1118  		ginkgo.By("verifying service " + svc2 + " is still up")
  1119  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1120  
  1121  		// Start another service and verify both are up.
  1122  		ginkgo.By("creating service " + svc3 + " in namespace " + ns)
  1123  		podNames3, svc3IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc3), ns, numPods)
  1124  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns)
  1125  
  1126  		if svc2IP == svc3IP {
  1127  			framework.Failf("service IPs conflict: %v", svc2IP)
  1128  		}
  1129  
  1130  		ginkgo.By("verifying service " + svc2 + " is still up")
  1131  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1132  
  1133  		ginkgo.By("verifying service " + svc3 + " is up")
  1134  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames3, svc3IP, servicePort))
  1135  	})
  1136  
  1137  	ginkgo.It("should work after the service has been recreated", func(ctx context.Context) {
  1138  		serviceName := "service-deletion"
  1139  		ns := f.Namespace.Name
  1140  		numPods, servicePort := 1, defaultServeHostnameServicePort
  1141  
  1142  		ginkgo.By("creating the service " + serviceName + " in namespace " + ns)
  1143  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, serviceName)
  1144  		podNames, svcIP, _ := StartServeHostnameService(ctx, cs, getServeHostnameService(serviceName), ns, numPods)
  1145  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames, svcIP, servicePort))
  1146  
  1147  		ginkgo.By("deleting the service " + serviceName + " in namespace " + ns)
  1148  		err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1149  		framework.ExpectNoError(err)
  1150  
  1151  		ginkgo.By("Waiting for the service " + serviceName + " in namespace " + ns + " to disappear")
  1152  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.RespondingTimeout, func() (bool, error) {
  1153  			_, err := cs.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
  1154  			if err != nil {
  1155  				if apierrors.IsNotFound(err) {
  1156  					framework.Logf("Service %s/%s is gone.", ns, serviceName)
  1157  					return true, nil
  1158  				}
  1159  				return false, err
  1160  			}
  1161  			framework.Logf("Service %s/%s still exists", ns, serviceName)
  1162  			return false, nil
  1163  		}); pollErr != nil {
  1164  			framework.Failf("Failed to wait for service to disappear: %v", pollErr)
  1165  		}
  1166  
  1167  		ginkgo.By("recreating the service " + serviceName + " in namespace " + ns)
  1168  		svc, err := cs.CoreV1().Services(ns).Create(ctx, getServeHostnameService(serviceName), metav1.CreateOptions{})
  1169  		framework.ExpectNoError(err)
  1170  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames, svc.Spec.ClusterIP, servicePort))
  1171  	})
  1172  
  1173  	f.It("should work after restarting kube-proxy", f.WithDisruptive(), func(ctx context.Context) {
  1174  		kubeProxyLabelSet := map[string]string{clusterAddonLabelKey: kubeProxyLabelName}
  1175  		e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeProxyLabelName, cs, metav1.NamespaceSystem, kubeProxyLabelSet)
  1176  
  1177  		// TODO: use the ServiceTestJig here
  1178  		ns := f.Namespace.Name
  1179  		numPods, servicePort := 3, defaultServeHostnameServicePort
  1180  
  1181  		svc1 := "restart-proxy-1"
  1182  		svc2 := "restart-proxy-2"
  1183  
  1184  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
  1185  		podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods)
  1186  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
  1187  
  1188  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
  1189  		podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods)
  1190  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
  1191  
  1192  		if svc1IP == svc2IP {
  1193  			framework.Failf("VIPs conflict: %v", svc1IP)
  1194  		}
  1195  
  1196  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1197  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1198  
  1199  		if err := restartComponent(ctx, cs, kubeProxyLabelName, metav1.NamespaceSystem, kubeProxyLabelSet); err != nil {
  1200  			framework.Failf("error restarting kube-proxy: %v", err)
  1201  		}
  1202  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1203  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1204  	})
  1205  
  1206  	f.It("should work after restarting apiserver", f.WithDisruptive(), func(ctx context.Context) {
  1207  
  1208  		if !framework.ProviderIs("gke") {
  1209  			e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
  1210  		}
  1211  
  1212  		// TODO: use the ServiceTestJig here
  1213  		ns := f.Namespace.Name
  1214  		numPods, servicePort := 3, defaultServeHostnameServicePort
  1215  
  1216  		svc1 := "restart-apiserver-1"
  1217  		svc2 := "restart-apiserver-2"
  1218  
  1219  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc1)
  1220  		podNames1, svc1IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc1), ns, numPods)
  1221  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns)
  1222  
  1223  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1224  
  1225  		// Restart apiserver
  1226  		ginkgo.By("Restarting apiserver")
  1227  		if err := restartApiserver(ctx, ns, cs); err != nil {
  1228  			framework.Failf("error restarting apiserver: %v", err)
  1229  		}
  1230  		ginkgo.By("Waiting for apiserver to come up by polling /healthz")
  1231  		if err := waitForAPIServerUp(ctx, cs); err != nil {
  1232  			framework.Failf("error while waiting for apiserver up: %v", err)
  1233  		}
  1234  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1235  
  1236  		// Create a new service and check if it's not reusing IP.
  1237  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, svc2)
  1238  		podNames2, svc2IP, err := StartServeHostnameService(ctx, cs, getServeHostnameService(svc2), ns, numPods)
  1239  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
  1240  
  1241  		if svc1IP == svc2IP {
  1242  			framework.Failf("VIPs conflict: %v", svc1IP)
  1243  		}
  1244  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames1, svc1IP, servicePort))
  1245  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort))
  1246  	})
  1247  
  1248  	/*
  1249  		Release: v1.16
  1250  		Testname: Service, NodePort Service
  1251  		Description: Create a TCP NodePort service, and test reachability from a client Pod.
  1252  		The client Pod MUST be able to access the NodePort service by service name and cluster
  1253  		IP on the service port, and on nodes' internal and external IPs on the NodePort.
  1254  	*/
  1255  	framework.ConformanceIt("should be able to create a functioning NodePort service", func(ctx context.Context) {
  1256  		serviceName := "nodeport-test"
  1257  		ns := f.Namespace.Name
  1258  
  1259  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1260  
  1261  		ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
  1262  		nodePortService, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  1263  			svc.Spec.Type = v1.ServiceTypeNodePort
  1264  			svc.Spec.Ports = []v1.ServicePort{
  1265  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(9376)},
  1266  			}
  1267  		})
  1268  		framework.ExpectNoError(err)
  1269  		err = jig.CreateServicePods(ctx, 2)
  1270  		framework.ExpectNoError(err)
  1271  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1272  		err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
  1273  		framework.ExpectNoError(err)
  1274  	})
  1275  
  1276  	/*
  1277  		Create a ClusterIP service with an External IP that is not assigned to an interface.
  1278  		The IP ranges here are reserved for documentation according to
  1279  		[RFC 5737](https://tools.ietf.org/html/rfc5737) Section 3 and should not be used by any host.
  1280  	*/
  1281  	ginkgo.It("should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node", func(ctx context.Context) {
  1282  		serviceName := "externalip-test"
  1283  		ns := f.Namespace.Name
  1284  		externalIP := "203.0.113.250"
  1285  		if framework.TestContext.ClusterIsIPv6() {
  1286  			externalIP = "2001:DB8::cb00:71fa"
  1287  		}
  1288  
  1289  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1290  		jig.ExternalIPs = false
  1291  
  1292  		ginkgo.By("creating service " + serviceName + " with type=clusterIP in namespace " + ns)
  1293  		clusterIPService, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  1294  			svc.Spec.Type = v1.ServiceTypeClusterIP
  1295  			svc.Spec.ExternalIPs = []string{externalIP}
  1296  			svc.Spec.Ports = []v1.ServicePort{
  1297  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(9376)},
  1298  			}
  1299  		})
  1300  		if err != nil && strings.Contains(err.Error(), "Use of external IPs is denied by admission control") {
  1301  			e2eskipper.Skipf("Admission controller to deny services with external IPs is enabled - skip.")
  1302  		}
  1303  		framework.ExpectNoError(err)
  1304  		err = jig.CreateServicePods(ctx, 2)
  1305  		framework.ExpectNoError(err)
  1306  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1307  		err = jig.CheckServiceReachability(ctx, clusterIPService, execPod)
  1308  		framework.ExpectNoError(err)
  1309  	})
  1310  
  1311  	/*
  1312  		Testname: Service, update NodePort, same port different protocol
  1313  		Description: Create a service to accept TCP requests. By default, created service MUST be of type ClusterIP and an ClusterIP MUST be assigned to the service.
  1314  		When service type is updated to NodePort supporting TCP protocol, it MUST be reachable on nodeIP over allocated NodePort to serve TCP requests.
  1315  		When this NodePort service is updated to use two protocols i.e. TCP and UDP for same assigned service port 80, service update MUST be successful by allocating two NodePorts to the service and
  1316  		service MUST be able to serve both TCP and UDP requests over same service port 80.
  1317  	*/
  1318  	ginkgo.It("should be able to update service type to NodePort listening on same port number but different protocols", func(ctx context.Context) {
  1319  		serviceName := "nodeport-update-service"
  1320  		ns := f.Namespace.Name
  1321  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1322  		jig.ExternalIPs = false
  1323  
  1324  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
  1325  		tcpService, err := jig.CreateTCPService(ctx, nil)
  1326  		framework.ExpectNoError(err)
  1327  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1328  			framework.Logf("Cleaning up the updating NodePorts test service")
  1329  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1330  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1331  		})
  1332  		framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port)
  1333  
  1334  		ginkgo.By("changing the TCP service to type=NodePort")
  1335  		nodePortService, err := jig.UpdateService(ctx, func(s *v1.Service) {
  1336  			s.Spec.Type = v1.ServiceTypeNodePort
  1337  			s.Spec.Ports = []v1.ServicePort{
  1338  				{
  1339  					Name:       "tcp-port",
  1340  					Port:       80,
  1341  					Protocol:   v1.ProtocolTCP,
  1342  					TargetPort: intstr.FromInt32(9376),
  1343  				},
  1344  			}
  1345  		})
  1346  		framework.ExpectNoError(err)
  1347  
  1348  		err = jig.CreateServicePods(ctx, 2)
  1349  		framework.ExpectNoError(err)
  1350  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1351  		err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
  1352  		framework.ExpectNoError(err)
  1353  
  1354  		ginkgo.By("Updating NodePort service to listen TCP and UDP based requests over same Port")
  1355  		nodePortService, err = jig.UpdateService(ctx, func(s *v1.Service) {
  1356  			s.Spec.Type = v1.ServiceTypeNodePort
  1357  			s.Spec.Ports = []v1.ServicePort{
  1358  				{
  1359  					Name:       "tcp-port",
  1360  					Port:       80,
  1361  					Protocol:   v1.ProtocolTCP,
  1362  					TargetPort: intstr.FromInt32(9376),
  1363  				},
  1364  				{
  1365  					Name:       "udp-port",
  1366  					Port:       80,
  1367  					Protocol:   v1.ProtocolUDP,
  1368  					TargetPort: intstr.FromInt32(9376),
  1369  				},
  1370  			}
  1371  		})
  1372  		framework.ExpectNoError(err)
  1373  		err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
  1374  		framework.ExpectNoError(err)
  1375  		nodePortCounts := len(nodePortService.Spec.Ports)
  1376  		gomega.Expect(nodePortCounts).To(gomega.Equal(2), "updated service should have two Ports but found %d Ports", nodePortCounts)
  1377  
  1378  		for _, port := range nodePortService.Spec.Ports {
  1379  			gomega.Expect(port.NodePort).ToNot(gomega.BeZero(), "NodePort service failed to allocate NodePort for Port %s", port.Name)
  1380  			framework.Logf("NodePort service allocates NodePort: %d for Port: %s over Protocol: %s", port.NodePort, port.Name, port.Protocol)
  1381  		}
  1382  	})
  1383  
  1384  	/*
  1385  		Release: v1.16
  1386  		Testname: Service, change type, ExternalName to ClusterIP
  1387  		Description: Create a service of type ExternalName, pointing to external DNS. ClusterIP MUST not be assigned to the service.
  1388  		Update the service from ExternalName to ClusterIP by removing ExternalName entry, assigning port 80 as service port and TCP as protocol.
  1389  		Service update MUST be successful by assigning ClusterIP to the service and it MUST be reachable over serviceName and ClusterIP on provided service port.
  1390  	*/
  1391  	framework.ConformanceIt("should be able to change the type from ExternalName to ClusterIP", func(ctx context.Context) {
  1392  		serviceName := "externalname-service"
  1393  		ns := f.Namespace.Name
  1394  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1395  
  1396  		ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
  1397  		_, err := jig.CreateExternalNameService(ctx, nil)
  1398  		framework.ExpectNoError(err)
  1399  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1400  			framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
  1401  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1402  			framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  1403  		})
  1404  
  1405  		ginkgo.By("changing the ExternalName service to type=ClusterIP")
  1406  		clusterIPService, err := jig.UpdateService(ctx, func(s *v1.Service) {
  1407  			s.Spec.Type = v1.ServiceTypeClusterIP
  1408  			s.Spec.ExternalName = ""
  1409  			s.Spec.Ports = []v1.ServicePort{
  1410  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(9376)},
  1411  			}
  1412  		})
  1413  		framework.ExpectNoError(err)
  1414  
  1415  		err = jig.CreateServicePods(ctx, 2)
  1416  		framework.ExpectNoError(err)
  1417  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1418  		err = jig.CheckServiceReachability(ctx, clusterIPService, execPod)
  1419  		framework.ExpectNoError(err)
  1420  	})
  1421  
  1422  	/*
  1423  		Release: v1.16
  1424  		Testname: Service, change type, ExternalName to NodePort
  1425  		Description: Create a service of type ExternalName, pointing to external DNS. ClusterIP MUST not be assigned to the service.
  1426  		Update the service from ExternalName to NodePort, assigning port 80 as service port and, TCP as protocol.
  1427  		service update MUST be successful by exposing service on every node's IP on dynamically assigned NodePort and, ClusterIP MUST be assigned to route service requests.
  1428  		Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort.
  1429  	*/
  1430  	framework.ConformanceIt("should be able to change the type from ExternalName to NodePort", func(ctx context.Context) {
  1431  		serviceName := "externalname-service"
  1432  		ns := f.Namespace.Name
  1433  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1434  
  1435  		ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
  1436  		_, err := jig.CreateExternalNameService(ctx, nil)
  1437  		framework.ExpectNoError(err)
  1438  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1439  			framework.Logf("Cleaning up the ExternalName to NodePort test service")
  1440  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1441  			framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  1442  		})
  1443  
  1444  		ginkgo.By("changing the ExternalName service to type=NodePort")
  1445  		nodePortService, err := jig.UpdateService(ctx, func(s *v1.Service) {
  1446  			s.Spec.Type = v1.ServiceTypeNodePort
  1447  			s.Spec.ExternalName = ""
  1448  			s.Spec.Ports = []v1.ServicePort{
  1449  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(9376)},
  1450  			}
  1451  		})
  1452  		framework.ExpectNoError(err)
  1453  		err = jig.CreateServicePods(ctx, 2)
  1454  		framework.ExpectNoError(err)
  1455  
  1456  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1457  		err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
  1458  		framework.ExpectNoError(err)
  1459  	})
  1460  
  1461  	/*
  1462  		Release: v1.16
  1463  		Testname: Service, change type, ClusterIP to ExternalName
  1464  		Description: Create a service of type ClusterIP. Service creation MUST be successful by assigning ClusterIP to the service.
  1465  		Update service type from ClusterIP to ExternalName by setting CNAME entry as externalName. Service update MUST be successful and service MUST not has associated ClusterIP.
  1466  		Service MUST be able to resolve to IP address by returning A records ensuring service is pointing to provided externalName.
  1467  	*/
  1468  	framework.ConformanceIt("should be able to change the type from ClusterIP to ExternalName", func(ctx context.Context) {
  1469  		serviceName := "clusterip-service"
  1470  		ns := f.Namespace.Name
  1471  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1472  
  1473  		ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
  1474  		_, err := jig.CreateTCPService(ctx, nil)
  1475  		framework.ExpectNoError(err)
  1476  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1477  			framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
  1478  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1479  			framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  1480  		})
  1481  
  1482  		ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
  1483  		externalServiceName := "externalsvc"
  1484  		externalServiceFQDN := createAndGetExternalServiceFQDN(ctx, cs, ns, externalServiceName)
  1485  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
  1486  
  1487  		ginkgo.By("changing the ClusterIP service to type=ExternalName")
  1488  		externalNameService, err := jig.UpdateService(ctx, func(s *v1.Service) {
  1489  			s.Spec.Type = v1.ServiceTypeExternalName
  1490  			s.Spec.ExternalName = externalServiceFQDN
  1491  		})
  1492  		framework.ExpectNoError(err)
  1493  		if externalNameService.Spec.ClusterIP != "" {
  1494  			framework.Failf("Spec.ClusterIP was not cleared")
  1495  		}
  1496  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1497  		err = jig.CheckServiceReachability(ctx, externalNameService, execPod)
  1498  		framework.ExpectNoError(err)
  1499  	})
  1500  
  1501  	/*
  1502  		Release: v1.16
  1503  		Testname: Service, change type, NodePort to ExternalName
  1504  		Description: Create a service of type NodePort. Service creation MUST be successful by exposing service on every node's IP on dynamically assigned NodePort and, ClusterIP MUST be assigned to route service requests.
  1505  		Update the service type from NodePort to ExternalName by setting CNAME entry as externalName. Service update MUST be successful and, MUST not has ClusterIP associated with the service and, allocated NodePort MUST be released.
  1506  		Service MUST be able to resolve to IP address by returning A records ensuring service is pointing to provided externalName.
  1507  	*/
  1508  	framework.ConformanceIt("should be able to change the type from NodePort to ExternalName", func(ctx context.Context) {
  1509  		serviceName := "nodeport-service"
  1510  		ns := f.Namespace.Name
  1511  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1512  
  1513  		ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns)
  1514  		_, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  1515  			svc.Spec.Type = v1.ServiceTypeNodePort
  1516  		})
  1517  		framework.ExpectNoError(err)
  1518  		ginkgo.DeferCleanup(func(ctx context.Context) {
  1519  			framework.Logf("Cleaning up the NodePort to ExternalName test service")
  1520  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  1521  			framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  1522  		})
  1523  
  1524  		ginkgo.By("Creating active service to test reachability when its FQDN is referred as externalName for another service")
  1525  		externalServiceName := "externalsvc"
  1526  		externalServiceFQDN := createAndGetExternalServiceFQDN(ctx, cs, ns, externalServiceName)
  1527  		ginkgo.DeferCleanup(StopServeHostnameService, f.ClientSet, ns, externalServiceName)
  1528  
  1529  		ginkgo.By("changing the NodePort service to type=ExternalName")
  1530  		externalNameService, err := jig.UpdateService(ctx, func(s *v1.Service) {
  1531  			s.Spec.Type = v1.ServiceTypeExternalName
  1532  			s.Spec.ExternalName = externalServiceFQDN
  1533  		})
  1534  		framework.ExpectNoError(err)
  1535  		if externalNameService.Spec.ClusterIP != "" {
  1536  			framework.Failf("Spec.ClusterIP was not cleared")
  1537  		}
  1538  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  1539  		err = jig.CheckServiceReachability(ctx, externalNameService, execPod)
  1540  		framework.ExpectNoError(err)
  1541  	})
  1542  
  1543  	ginkgo.It("should prevent NodePort collisions", func(ctx context.Context) {
  1544  		// TODO: use the ServiceTestJig here
  1545  		baseName := "nodeport-collision-"
  1546  		serviceName1 := baseName + "1"
  1547  		serviceName2 := baseName + "2"
  1548  		ns := f.Namespace.Name
  1549  
  1550  		t := NewServerTest(cs, ns, serviceName1)
  1551  		defer func() {
  1552  			defer ginkgo.GinkgoRecover()
  1553  			errs := t.Cleanup()
  1554  			if len(errs) != 0 {
  1555  				framework.Failf("errors in cleanup: %v", errs)
  1556  			}
  1557  		}()
  1558  
  1559  		ginkgo.By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
  1560  		service := t.BuildServiceSpec()
  1561  		service.Spec.Type = v1.ServiceTypeNodePort
  1562  		result, err := t.CreateService(service)
  1563  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
  1564  
  1565  		if result.Spec.Type != v1.ServiceTypeNodePort {
  1566  			framework.Failf("got unexpected Spec.Type for new service: %v", result)
  1567  		}
  1568  		if len(result.Spec.Ports) != 1 {
  1569  			framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
  1570  		}
  1571  		port := result.Spec.Ports[0]
  1572  		if port.NodePort == 0 {
  1573  			framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result)
  1574  		}
  1575  
  1576  		ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort")
  1577  		service2 := t.BuildServiceSpec()
  1578  		service2.Name = serviceName2
  1579  		service2.Spec.Type = v1.ServiceTypeNodePort
  1580  		service2.Spec.Ports[0].NodePort = port.NodePort
  1581  		result2, err := t.CreateService(service2)
  1582  		if err == nil {
  1583  			framework.Failf("Created service with conflicting NodePort: %v", result2)
  1584  		}
  1585  		expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort)
  1586  		gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  1587  
  1588  		ginkgo.By("deleting service " + serviceName1 + " to release NodePort")
  1589  		err = t.DeleteService(serviceName1)
  1590  		framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName1, ns)
  1591  
  1592  		ginkgo.By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
  1593  		_, err = t.CreateService(service2)
  1594  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
  1595  	})
  1596  
  1597  	ginkgo.It("should check NodePort out-of-range", func(ctx context.Context) {
  1598  		// TODO: use the ServiceTestJig here
  1599  		serviceName := "nodeport-range-test"
  1600  		ns := f.Namespace.Name
  1601  
  1602  		t := NewServerTest(cs, ns, serviceName)
  1603  		defer func() {
  1604  			defer ginkgo.GinkgoRecover()
  1605  			errs := t.Cleanup()
  1606  			if len(errs) != 0 {
  1607  				framework.Failf("errors in cleanup: %v", errs)
  1608  			}
  1609  		}()
  1610  
  1611  		service := t.BuildServiceSpec()
  1612  		service.Spec.Type = v1.ServiceTypeNodePort
  1613  
  1614  		ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns)
  1615  		service, err := t.CreateService(service)
  1616  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  1617  
  1618  		if service.Spec.Type != v1.ServiceTypeNodePort {
  1619  			framework.Failf("got unexpected Spec.Type for new service: %v", service)
  1620  		}
  1621  		if len(service.Spec.Ports) != 1 {
  1622  			framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
  1623  		}
  1624  		port := service.Spec.Ports[0]
  1625  		if port.NodePort == 0 {
  1626  			framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
  1627  		}
  1628  		if !e2eservice.NodePortRange.Contains(int(port.NodePort)) {
  1629  			framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
  1630  		}
  1631  
  1632  		outOfRangeNodePort := 0
  1633  		for {
  1634  			outOfRangeNodePort = 1 + rand.Intn(65535)
  1635  			if !e2eservice.NodePortRange.Contains(outOfRangeNodePort) {
  1636  				break
  1637  			}
  1638  		}
  1639  		ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
  1640  		result, err := e2eservice.UpdateService(ctx, cs, ns, serviceName, func(s *v1.Service) {
  1641  			s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
  1642  		})
  1643  		if err == nil {
  1644  			framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
  1645  		}
  1646  		expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort)
  1647  		gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  1648  
  1649  		ginkgo.By("deleting original service " + serviceName)
  1650  		err = t.DeleteService(serviceName)
  1651  		framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1652  
  1653  		ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort))
  1654  		service = t.BuildServiceSpec()
  1655  		service.Spec.Type = v1.ServiceTypeNodePort
  1656  		service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
  1657  		service, err = t.CreateService(service)
  1658  		if err == nil {
  1659  			framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
  1660  		}
  1661  		gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  1662  	})
  1663  
  1664  	ginkgo.It("should release NodePorts on delete", func(ctx context.Context) {
  1665  		// TODO: use the ServiceTestJig here
  1666  		serviceName := "nodeport-reuse"
  1667  		ns := f.Namespace.Name
  1668  
  1669  		t := NewServerTest(cs, ns, serviceName)
  1670  		defer func() {
  1671  			defer ginkgo.GinkgoRecover()
  1672  			errs := t.Cleanup()
  1673  			if len(errs) != 0 {
  1674  				framework.Failf("errors in cleanup: %v", errs)
  1675  			}
  1676  		}()
  1677  
  1678  		service := t.BuildServiceSpec()
  1679  		service.Spec.Type = v1.ServiceTypeNodePort
  1680  
  1681  		ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns)
  1682  		service, err := t.CreateService(service)
  1683  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  1684  
  1685  		if service.Spec.Type != v1.ServiceTypeNodePort {
  1686  			framework.Failf("got unexpected Spec.Type for new service: %v", service)
  1687  		}
  1688  		if len(service.Spec.Ports) != 1 {
  1689  			framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
  1690  		}
  1691  		port := service.Spec.Ports[0]
  1692  		if port.NodePort == 0 {
  1693  			framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
  1694  		}
  1695  		if !e2eservice.NodePortRange.Contains(int(port.NodePort)) {
  1696  			framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
  1697  		}
  1698  		nodePort := port.NodePort
  1699  
  1700  		ginkgo.By("deleting original service " + serviceName)
  1701  		err = t.DeleteService(serviceName)
  1702  		framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1703  
  1704  		hostExec := launchHostExecPod(ctx, f.ClientSet, f.Namespace.Name, "hostexec")
  1705  		cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
  1706  		var stdout string
  1707  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
  1708  			var err error
  1709  			stdout, err = e2eoutput.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
  1710  			if err != nil {
  1711  				framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
  1712  				return false, nil
  1713  			}
  1714  			return true, nil
  1715  		}); pollErr != nil {
  1716  			framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, e2eservice.KubeProxyLagTimeout, stdout)
  1717  		}
  1718  
  1719  		ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
  1720  		service = t.BuildServiceSpec()
  1721  		service.Spec.Type = v1.ServiceTypeNodePort
  1722  		service.Spec.Ports[0].NodePort = nodePort
  1723  		_, err = t.CreateService(service)
  1724  		framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  1725  	})
  1726  
  1727  	ginkgo.It("should create endpoints for unready pods", func(ctx context.Context) {
  1728  		serviceName := "tolerate-unready"
  1729  		ns := f.Namespace.Name
  1730  
  1731  		t := NewServerTest(cs, ns, serviceName)
  1732  		defer func() {
  1733  			defer ginkgo.GinkgoRecover()
  1734  			errs := t.Cleanup()
  1735  			if len(errs) != 0 {
  1736  				framework.Failf("errors in cleanup: %v", errs)
  1737  			}
  1738  		}()
  1739  
  1740  		t.Name = "slow-terminating-unready-pod"
  1741  		t.Image = imageutils.GetE2EImage(imageutils.Agnhost)
  1742  		port := int32(80)
  1743  		terminateSeconds := int64(100)
  1744  
  1745  		service := &v1.Service{
  1746  			ObjectMeta: metav1.ObjectMeta{
  1747  				Name:      t.ServiceName,
  1748  				Namespace: t.Namespace,
  1749  			},
  1750  			Spec: v1.ServiceSpec{
  1751  				Selector: t.Labels,
  1752  				Ports: []v1.ServicePort{{
  1753  					Name:       "http",
  1754  					Port:       port,
  1755  					TargetPort: intstr.FromInt32(port),
  1756  				}},
  1757  				PublishNotReadyAddresses: true,
  1758  			},
  1759  		}
  1760  		rcSpec := e2erc.ByNameContainer(t.Name, 1, t.Labels, v1.Container{
  1761  			Args:  []string{"netexec", fmt.Sprintf("--http-port=%d", port)},
  1762  			Name:  t.Name,
  1763  			Image: t.Image,
  1764  			Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: v1.ProtocolTCP}},
  1765  			ReadinessProbe: &v1.Probe{
  1766  				ProbeHandler: v1.ProbeHandler{
  1767  					Exec: &v1.ExecAction{
  1768  						Command: []string{"/bin/false"},
  1769  					},
  1770  				},
  1771  			},
  1772  			Lifecycle: &v1.Lifecycle{
  1773  				PreStop: &v1.LifecycleHandler{
  1774  					Exec: &v1.ExecAction{
  1775  						Command: []string{"/bin/sleep", fmt.Sprintf("%d", terminateSeconds)},
  1776  					},
  1777  				},
  1778  			},
  1779  		}, nil)
  1780  		rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
  1781  
  1782  		ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector))
  1783  		_, err := t.CreateRC(rcSpec)
  1784  		framework.ExpectNoError(err)
  1785  
  1786  		ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
  1787  		_, err = t.CreateService(service)
  1788  		framework.ExpectNoError(err)
  1789  
  1790  		ginkgo.By("Verifying pods for RC " + t.Name)
  1791  		framework.ExpectNoError(e2epod.VerifyPods(ctx, t.Client, t.Namespace, t.Name, false, 1))
  1792  
  1793  		svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
  1794  		ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName)
  1795  
  1796  		execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, f.Namespace.Name, "execpod-", nil)
  1797  		execPodName := execPod.Name
  1798  		cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port)
  1799  		var stdout string
  1800  		if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
  1801  			var err error
  1802  			stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1803  			if err != nil {
  1804  				framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1805  				return false, nil
  1806  			}
  1807  			return true, nil
  1808  		}); pollErr != nil {
  1809  			framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
  1810  		}
  1811  
  1812  		ginkgo.By("Scaling down replication controller to zero")
  1813  		e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
  1814  
  1815  		ginkgo.By("Update service to not tolerate unready services")
  1816  		_, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
  1817  			s.Spec.PublishNotReadyAddresses = false
  1818  		})
  1819  		framework.ExpectNoError(err)
  1820  
  1821  		ginkgo.By("Check if pod is unreachable")
  1822  		cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/; test \"$?\" -ne \"0\"", svcName, port)
  1823  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
  1824  			var err error
  1825  			stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1826  			if err != nil {
  1827  				framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1828  				return false, nil
  1829  			}
  1830  			return true, nil
  1831  		}); pollErr != nil {
  1832  			framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
  1833  		}
  1834  
  1835  		ginkgo.By("Update service to tolerate unready services again")
  1836  		_, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
  1837  			s.Spec.PublishNotReadyAddresses = true
  1838  		})
  1839  		framework.ExpectNoError(err)
  1840  
  1841  		ginkgo.By("Check if terminating pod is available through service")
  1842  		cmd = fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port)
  1843  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
  1844  			var err error
  1845  			stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1846  			if err != nil {
  1847  				framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1848  				return false, nil
  1849  			}
  1850  			return true, nil
  1851  		}); pollErr != nil {
  1852  			framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
  1853  		}
  1854  
  1855  		ginkgo.By("Remove pods immediately")
  1856  		label := labels.SelectorFromSet(labels.Set(t.Labels))
  1857  		options := metav1.ListOptions{LabelSelector: label.String()}
  1858  		podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
  1859  		pods, err := podClient.List(ctx, options)
  1860  		if err != nil {
  1861  			framework.Logf("warning: error retrieving pods: %s", err)
  1862  		} else {
  1863  			for _, pod := range pods.Items {
  1864  				var gracePeriodSeconds int64 = 0
  1865  				err := podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
  1866  				if err != nil {
  1867  					framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
  1868  				}
  1869  			}
  1870  		}
  1871  	})
  1872  
  1873  	ginkgo.It("should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true", func(ctx context.Context) {
  1874  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  1875  		framework.ExpectNoError(err)
  1876  		nodeCounts := len(nodes.Items)
  1877  		if nodeCounts < 2 {
  1878  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  1879  		}
  1880  		node0 := nodes.Items[0]
  1881  		node1 := nodes.Items[1]
  1882  
  1883  		serviceName := "svc-tolerate-unready"
  1884  		ns := f.Namespace.Name
  1885  		servicePort := 80
  1886  
  1887  		ginkgo.By("creating a NodePort TCP service " + serviceName + " that PublishNotReadyAddresses on" + ns)
  1888  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  1889  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  1890  			svc.Spec.Ports = []v1.ServicePort{
  1891  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  1892  			}
  1893  			svc.Spec.Type = v1.ServiceTypeNodePort
  1894  			svc.Spec.PublishNotReadyAddresses = true
  1895  		})
  1896  		framework.ExpectNoError(err, "failed to create Service")
  1897  
  1898  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  1899  		gracePeriod := int64(300)
  1900  		webserverPod0 := &v1.Pod{
  1901  			ObjectMeta: metav1.ObjectMeta{
  1902  				Name: "webserver-pod",
  1903  			},
  1904  			Spec: v1.PodSpec{
  1905  				Containers: []v1.Container{
  1906  					{
  1907  						Name:  "agnhost",
  1908  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
  1909  						Args:  []string{"netexec", "--http-port=80", fmt.Sprintf("--delay-shutdown=%d", gracePeriod)},
  1910  						Ports: []v1.ContainerPort{
  1911  							{
  1912  								ContainerPort: 80,
  1913  							},
  1914  						},
  1915  						ReadinessProbe: &v1.Probe{
  1916  							ProbeHandler: v1.ProbeHandler{
  1917  								HTTPGet: &v1.HTTPGetAction{
  1918  									Path: "/readyz",
  1919  									Port: intstr.IntOrString{
  1920  										IntVal: int32(80),
  1921  									},
  1922  									Scheme: v1.URISchemeHTTP,
  1923  								},
  1924  							},
  1925  						},
  1926  						LivenessProbe: &v1.Probe{
  1927  							ProbeHandler: v1.ProbeHandler{
  1928  								HTTPGet: &v1.HTTPGetAction{
  1929  									Path: "/healthz",
  1930  									Port: intstr.IntOrString{
  1931  										IntVal: int32(80),
  1932  									},
  1933  									Scheme: v1.URISchemeHTTP,
  1934  								},
  1935  							},
  1936  						},
  1937  					},
  1938  				},
  1939  			},
  1940  		}
  1941  		webserverPod0.Labels = jig.Labels
  1942  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod)
  1943  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  1944  
  1945  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  1946  		framework.ExpectNoError(err, "failed to create pod")
  1947  		err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)
  1948  		if err != nil {
  1949  			framework.Failf("error waiting for pod %s to be ready %v", webserverPod0.Name, err)
  1950  		}
  1951  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  1952  
  1953  		ginkgo.By("Creating 1 pause pods that will try to connect to the webservers")
  1954  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  1955  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  1956  
  1957  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  1958  		framework.ExpectNoError(err, "failed to create pod")
  1959  		err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)
  1960  		if err != nil {
  1961  			framework.Failf("error waiting for pod %s to be ready %v", pausePod1.Name, err)
  1962  		}
  1963  
  1964  		// webserver should continue to serve traffic through the Service after delete since:
  1965  		//  - it has a 100s termination grace period
  1966  		//  - it is unready but PublishNotReadyAddresses is true
  1967  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  1968  		framework.ExpectNoError(err)
  1969  
  1970  		// Wait until the pod becomes unready
  1971  		err = e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
  1972  			return !podutil.IsPodReady(pod), nil
  1973  		})
  1974  		if err != nil {
  1975  			framework.Failf("error waiting for pod %s to be unready %v", webserverPod0.Name, err)
  1976  		}
  1977  		// assert 5 times that the pause pod can connect to the Service
  1978  		nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
  1979  		nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
  1980  		clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  1981  		nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  1982  		nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  1983  		// connect 3 times every 5 seconds to the Service with the unready and terminating endpoint
  1984  		for i := 0; i < 5; i++ {
  1985  			execHostnameTest(*pausePod1, clusterIPAddress, webserverPod0.Name)
  1986  			execHostnameTest(*pausePod1, nodePortAddress0, webserverPod0.Name)
  1987  			execHostnameTest(*pausePod1, nodePortAddress1, webserverPod0.Name)
  1988  			time.Sleep(5 * time.Second)
  1989  		}
  1990  	})
  1991  
  1992  	ginkgo.It("should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false", func(ctx context.Context) {
  1993  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  1994  		framework.ExpectNoError(err)
  1995  		nodeCounts := len(nodes.Items)
  1996  		if nodeCounts < 2 {
  1997  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  1998  		}
  1999  		node0 := nodes.Items[0]
  2000  		node1 := nodes.Items[1]
  2001  
  2002  		serviceName := "svc-not-tolerate-unready"
  2003  		ns := f.Namespace.Name
  2004  		servicePort := 80
  2005  
  2006  		ginkgo.By("creating a NodePort TCP service " + serviceName + " that PublishNotReadyAddresses on" + ns)
  2007  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2008  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2009  			svc.Spec.Ports = []v1.ServicePort{
  2010  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2011  			}
  2012  			svc.Spec.Type = v1.ServiceTypeNodePort
  2013  			svc.Spec.PublishNotReadyAddresses = false
  2014  		})
  2015  		framework.ExpectNoError(err, "failed to create Service")
  2016  
  2017  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2018  		gracePeriod := int64(300)
  2019  		webserverPod0 := &v1.Pod{
  2020  			ObjectMeta: metav1.ObjectMeta{
  2021  				Name: "webserver-pod",
  2022  			},
  2023  			Spec: v1.PodSpec{
  2024  				Containers: []v1.Container{
  2025  					{
  2026  						Name:  "agnhost",
  2027  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
  2028  						Args:  []string{"netexec", "--http-port=80", fmt.Sprintf("--delay-shutdown=%d", gracePeriod)},
  2029  						Ports: []v1.ContainerPort{
  2030  							{
  2031  								ContainerPort: 80,
  2032  							},
  2033  						},
  2034  						ReadinessProbe: &v1.Probe{
  2035  							ProbeHandler: v1.ProbeHandler{
  2036  								HTTPGet: &v1.HTTPGetAction{
  2037  									Path: "/readyz",
  2038  									Port: intstr.IntOrString{
  2039  										IntVal: int32(80),
  2040  									},
  2041  									Scheme: v1.URISchemeHTTP,
  2042  								},
  2043  							},
  2044  						},
  2045  						LivenessProbe: &v1.Probe{
  2046  							ProbeHandler: v1.ProbeHandler{
  2047  								HTTPGet: &v1.HTTPGetAction{
  2048  									Path: "/healthz",
  2049  									Port: intstr.IntOrString{
  2050  										IntVal: int32(80),
  2051  									},
  2052  									Scheme: v1.URISchemeHTTP,
  2053  								},
  2054  							},
  2055  						},
  2056  					},
  2057  				},
  2058  			},
  2059  		}
  2060  		webserverPod0.Labels = jig.Labels
  2061  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(gracePeriod)
  2062  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2063  
  2064  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2065  		framework.ExpectNoError(err, "failed to create pod")
  2066  		err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout)
  2067  		if err != nil {
  2068  			framework.Failf("error waiting for pod %s to be ready %v", webserverPod0.Name, err)
  2069  		}
  2070  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2071  
  2072  		ginkgo.By("Creating 1 pause pods that will try to connect to the webservers")
  2073  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2074  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2075  
  2076  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2077  		framework.ExpectNoError(err, "failed to create pod")
  2078  		err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)
  2079  		if err != nil {
  2080  			framework.Failf("error waiting for pod %s to be ready %v", pausePod1.Name, err)
  2081  		}
  2082  
  2083  		// webserver should stop to serve traffic through the Service after delete since:
  2084  		//  - it has a 100s termination grace period
  2085  		//  - it is unready but PublishNotReadyAddresses is false
  2086  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  2087  		framework.ExpectNoError(err)
  2088  
  2089  		// Wait until the pod becomes unready
  2090  		err = e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, webserverPod0.Name, "pod not ready", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
  2091  			return !podutil.IsPodReady(pod), nil
  2092  		})
  2093  		if err != nil {
  2094  			framework.Failf("error waiting for pod %s to be unready %v", webserverPod0.Name, err)
  2095  		}
  2096  
  2097  		nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
  2098  		nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
  2099  		nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  2100  		nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  2101  
  2102  		// Wait until the change has been propagated to both nodes.
  2103  		cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0)
  2104  		if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, true, func(_ context.Context) (bool, error) {
  2105  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2106  			if err != nil {
  2107  				return true, nil
  2108  			}
  2109  			return false, nil
  2110  		}); pollErr != nil {
  2111  			framework.ExpectNoError(pollErr, "pod on node0 still serves traffic")
  2112  		}
  2113  		cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
  2114  		if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, true, func(_ context.Context) (bool, error) {
  2115  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2116  			if err != nil {
  2117  				return true, nil
  2118  			}
  2119  			return false, nil
  2120  		}); pollErr != nil {
  2121  			framework.ExpectNoError(pollErr, "pod on node1 still serves traffic")
  2122  		}
  2123  
  2124  		clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2125  		// connect 3 times every 5 seconds to the Service and expect a failure
  2126  		for i := 0; i < 5; i++ {
  2127  			cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress)
  2128  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2129  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2130  
  2131  			cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0)
  2132  			_, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2133  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to NodePort address")
  2134  
  2135  			cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
  2136  			_, err = e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2137  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to NodePort address")
  2138  
  2139  			time.Sleep(5 * time.Second)
  2140  		}
  2141  	})
  2142  
  2143  	/*
  2144  		Release: v1.19
  2145  		Testname: Service, ClusterIP type, session affinity to ClientIP
  2146  		Description: Create a service of type "ClusterIP". Service's sessionAffinity is set to "ClientIP". Service creation MUST be successful by assigning "ClusterIP" to the service.
  2147  		Create a Replication Controller to ensure that 3 pods are running and are targeted by the service to serve hostname of the pod when requests are sent to the service.
  2148  		Create another pod to make requests to the service. Service MUST serve the hostname from the same pod of the replica for all consecutive requests.
  2149  		Service MUST be reachable over serviceName and the ClusterIP on servicePort.
  2150  		[LinuxOnly]: Windows does not support session affinity.
  2151  	*/
  2152  	framework.ConformanceIt("should have session affinity work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) {
  2153  		svc := getServeHostnameService("affinity-clusterip")
  2154  		svc.Spec.Type = v1.ServiceTypeClusterIP
  2155  		execAffinityTestForNonLBService(ctx, f, cs, svc)
  2156  	})
  2157  
  2158  	ginkgo.It("should have session affinity timeout work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) {
  2159  		svc := getServeHostnameService("affinity-clusterip-timeout")
  2160  		svc.Spec.Type = v1.ServiceTypeClusterIP
  2161  		execAffinityTestForSessionAffinityTimeout(ctx, f, cs, svc)
  2162  	})
  2163  
  2164  	/*
  2165  		Release: v1.19
  2166  		Testname: Service, ClusterIP type, session affinity to None
  2167  		Description: Create a service of type "ClusterIP". Service's sessionAffinity is set to "ClientIP". Service creation MUST be successful by assigning "ClusterIP" to the service.
  2168  		Create a Replication Controller to ensure that 3 pods are running and are targeted by the service to serve hostname of the pod when requests are sent to the service.
  2169  		Create another pod to make requests to the service. Update the service's sessionAffinity to "None". Service update MUST be successful. When a requests are made to the service, it MUST be able serve the hostname from any pod of the replica.
  2170  		When service's sessionAffinily is updated back to "ClientIP", service MUST serve the hostname from the same pod of the replica for all consecutive requests.
  2171  		Service MUST be reachable over serviceName and the ClusterIP on servicePort.
  2172  		[LinuxOnly]: Windows does not support session affinity.
  2173  	*/
  2174  	framework.ConformanceIt("should be able to switch session affinity for service with type clusterIP [LinuxOnly]", func(ctx context.Context) {
  2175  		svc := getServeHostnameService("affinity-clusterip-transition")
  2176  		svc.Spec.Type = v1.ServiceTypeClusterIP
  2177  		execAffinityTestForNonLBServiceWithTransition(ctx, f, cs, svc)
  2178  	})
  2179  
  2180  	/*
  2181  		Release: v1.19
  2182  		Testname: Service, NodePort type, session affinity to ClientIP
  2183  		Description: Create a service of type "NodePort" and provide service port and protocol. Service's sessionAffinity is set to "ClientIP". Service creation MUST be successful by assigning a "ClusterIP" to service and allocating NodePort on all nodes.
  2184  		Create a Replication Controller to ensure that 3 pods are running and are targeted by the service to serve hostname of the pod when a requests are sent to the service.
  2185  		Create another pod to make requests to the service on node's IP and NodePort. Service MUST serve the hostname from the same pod of the replica for all consecutive requests.
  2186  		Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort.
  2187  		[LinuxOnly]: Windows does not support session affinity.
  2188  	*/
  2189  	framework.ConformanceIt("should have session affinity work for NodePort service [LinuxOnly]", func(ctx context.Context) {
  2190  		svc := getServeHostnameService("affinity-nodeport")
  2191  		svc.Spec.Type = v1.ServiceTypeNodePort
  2192  		execAffinityTestForNonLBService(ctx, f, cs, svc)
  2193  	})
  2194  
  2195  	ginkgo.It("should have session affinity timeout work for NodePort service [LinuxOnly]", func(ctx context.Context) {
  2196  		svc := getServeHostnameService("affinity-nodeport-timeout")
  2197  		svc.Spec.Type = v1.ServiceTypeNodePort
  2198  		execAffinityTestForSessionAffinityTimeout(ctx, f, cs, svc)
  2199  	})
  2200  
  2201  	/*
  2202  		Release: v1.19
  2203  		Testname: Service, NodePort type, session affinity to None
  2204  		Description: Create a service of type "NodePort" and provide service port and protocol. Service's sessionAffinity is set to "ClientIP". Service creation MUST be successful by assigning a "ClusterIP" to the service and allocating NodePort on all the nodes.
  2205  		Create a Replication Controller to ensure that 3 pods are running and are targeted by the service to serve hostname of the pod when requests are sent to the service.
  2206  		Create another pod to make requests to the service. Update the service's sessionAffinity to "None". Service update MUST be successful. When a requests are made to the service on node's IP and NodePort, service MUST be able serve the hostname from any pod of the replica.
  2207  		When service's sessionAffinily is updated back to "ClientIP", service MUST serve the hostname from the same pod of the replica for all consecutive requests.
  2208  		Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort.
  2209  		[LinuxOnly]: Windows does not support session affinity.
  2210  	*/
  2211  	framework.ConformanceIt("should be able to switch session affinity for NodePort service [LinuxOnly]", func(ctx context.Context) {
  2212  		svc := getServeHostnameService("affinity-nodeport-transition")
  2213  		svc.Spec.Type = v1.ServiceTypeNodePort
  2214  		execAffinityTestForNonLBServiceWithTransition(ctx, f, cs, svc)
  2215  	})
  2216  
  2217  	ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func(ctx context.Context) {
  2218  		ns := f.Namespace.Name
  2219  		numPods, servicePort := 3, defaultServeHostnameServicePort
  2220  		serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"}
  2221  
  2222  		// We will create 2 services to test creating services in both states and also dynamic updates
  2223  		// svcDisabled: Created with the label, will always be disabled. We create this early and
  2224  		//              test again late to make sure it never becomes available.
  2225  		// svcToggled: Created without the label then the label is toggled verifying reachability at each step.
  2226  
  2227  		ginkgo.By("creating service-disabled in namespace " + ns)
  2228  		svcDisabled := getServeHostnameService("service-proxy-disabled")
  2229  		svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels
  2230  		_, svcDisabledIP, err := StartServeHostnameService(ctx, cs, svcDisabled, ns, numPods)
  2231  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns)
  2232  
  2233  		ginkgo.By("creating service in namespace " + ns)
  2234  		svcToggled := getServeHostnameService("service-proxy-toggled")
  2235  		podToggledNames, svcToggledIP, err := StartServeHostnameService(ctx, cs, svcToggled, ns, numPods)
  2236  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns)
  2237  
  2238  		jig := e2eservice.NewTestJig(cs, ns, svcToggled.ObjectMeta.Name)
  2239  
  2240  		ginkgo.By("verifying service is up")
  2241  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podToggledNames, svcToggledIP, servicePort))
  2242  
  2243  		ginkgo.By("verifying service-disabled is not up")
  2244  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcDisabledIP, servicePort))
  2245  
  2246  		ginkgo.By("adding service-proxy-name label")
  2247  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  2248  			svc.ObjectMeta.Labels = serviceProxyNameLabels
  2249  		})
  2250  		framework.ExpectNoError(err)
  2251  
  2252  		ginkgo.By("verifying service is not up")
  2253  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcToggledIP, servicePort))
  2254  
  2255  		ginkgo.By("removing service-proxy-name annotation")
  2256  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  2257  			svc.ObjectMeta.Labels = nil
  2258  		})
  2259  		framework.ExpectNoError(err)
  2260  
  2261  		ginkgo.By("verifying service is up")
  2262  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podToggledNames, svcToggledIP, servicePort))
  2263  
  2264  		ginkgo.By("verifying service-disabled is still not up")
  2265  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcDisabledIP, servicePort))
  2266  	})
  2267  
  2268  	ginkgo.It("should implement service.kubernetes.io/headless", func(ctx context.Context) {
  2269  		ns := f.Namespace.Name
  2270  		numPods, servicePort := 3, defaultServeHostnameServicePort
  2271  		serviceHeadlessLabels := map[string]string{v1.IsHeadlessService: ""}
  2272  
  2273  		// We will create 2 services to test creating services in both states and also dynamic updates
  2274  		// svcHeadless: Created with the label, will always be disabled. We create this early and
  2275  		//              test again late to make sure it never becomes available.
  2276  		// svcHeadlessToggled: Created without the label then the label is toggled verifying reachability at each step.
  2277  
  2278  		ginkgo.By("creating service-headless in namespace " + ns)
  2279  		svcHeadless := getServeHostnameService("service-headless")
  2280  		svcHeadless.ObjectMeta.Labels = serviceHeadlessLabels
  2281  		// This should be improved, as we do not want a Headlesss Service to contain an IP...
  2282  		_, svcHeadlessIP, err := StartServeHostnameService(ctx, cs, svcHeadless, ns, numPods)
  2283  		framework.ExpectNoError(err, "failed to create replication controller with headless service: %s in the namespace: %s", svcHeadlessIP, ns)
  2284  
  2285  		ginkgo.By("creating service in namespace " + ns)
  2286  		svcHeadlessToggled := getServeHostnameService("service-headless-toggled")
  2287  		podHeadlessToggledNames, svcHeadlessToggledIP, err := StartServeHostnameService(ctx, cs, svcHeadlessToggled, ns, numPods)
  2288  		framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcHeadlessToggledIP, ns)
  2289  
  2290  		jig := e2eservice.NewTestJig(cs, ns, svcHeadlessToggled.ObjectMeta.Name)
  2291  
  2292  		ginkgo.By("verifying service is up")
  2293  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
  2294  
  2295  		ginkgo.By("verifying service-headless is not up")
  2296  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessIP, servicePort))
  2297  
  2298  		ginkgo.By("adding service.kubernetes.io/headless label")
  2299  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  2300  			svc.ObjectMeta.Labels = serviceHeadlessLabels
  2301  		})
  2302  		framework.ExpectNoError(err)
  2303  
  2304  		ginkgo.By("verifying service is not up")
  2305  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessToggledIP, servicePort))
  2306  
  2307  		ginkgo.By("removing service.kubernetes.io/headless annotation")
  2308  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  2309  			svc.ObjectMeta.Labels = nil
  2310  		})
  2311  		framework.ExpectNoError(err)
  2312  
  2313  		ginkgo.By("verifying service is up")
  2314  		framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podHeadlessToggledNames, svcHeadlessToggledIP, servicePort))
  2315  
  2316  		ginkgo.By("verifying service-headless is still not up")
  2317  		framework.ExpectNoError(verifyServeHostnameServiceDown(ctx, cs, ns, svcHeadlessIP, servicePort))
  2318  	})
  2319  
  2320  	ginkgo.It("should be rejected when no endpoints exist", func(ctx context.Context) {
  2321  		namespace := f.Namespace.Name
  2322  		serviceName := "no-pods"
  2323  		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
  2324  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests)
  2325  		framework.ExpectNoError(err)
  2326  		port := 80
  2327  
  2328  		ginkgo.By("creating a service with no endpoints")
  2329  		_, err = jig.CreateTCPServiceWithPort(ctx, nil, int32(port))
  2330  		framework.ExpectNoError(err)
  2331  
  2332  		nodeName := nodes.Items[0].Name
  2333  		podName := "execpod-noendpoints"
  2334  
  2335  		ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName))
  2336  		execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, namespace, podName, func(pod *v1.Pod) {
  2337  			nodeSelection := e2epod.NodeSelection{Name: nodeName}
  2338  			e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
  2339  		})
  2340  
  2341  		serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
  2342  		framework.Logf("waiting up to %v to connect to %v", e2eservice.KubeProxyEndpointLagTimeout, serviceAddress)
  2343  		cmd := fmt.Sprintf("/agnhost connect --timeout=3s %s", serviceAddress)
  2344  
  2345  		ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName))
  2346  		expectedErr := "REFUSED"
  2347  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) {
  2348  			_, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  2349  
  2350  			if err != nil {
  2351  				if strings.Contains(err.Error(), expectedErr) {
  2352  					framework.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
  2353  					return true, nil
  2354  				}
  2355  				framework.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
  2356  				return false, nil
  2357  			}
  2358  			return true, errors.New("expected connect call to fail")
  2359  		}); pollErr != nil {
  2360  			framework.ExpectNoError(pollErr)
  2361  		}
  2362  	})
  2363  
  2364  	// regression test for https://issues.k8s.io/109414 and https://issues.k8s.io/109718
  2365  	ginkgo.It("should be rejected for evicted pods (no endpoints exist)", func(ctx context.Context) {
  2366  		namespace := f.Namespace.Name
  2367  		serviceName := "evicted-pods"
  2368  		jig := e2eservice.NewTestJig(cs, namespace, serviceName)
  2369  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, e2eservice.MaxNodesForEndpointsTests)
  2370  		framework.ExpectNoError(err)
  2371  		nodeName := nodes.Items[0].Name
  2372  
  2373  		port := 80
  2374  
  2375  		ginkgo.By("creating a service with no endpoints")
  2376  		_, err = jig.CreateTCPServiceWithPort(ctx, func(s *v1.Service) {
  2377  			// set publish not ready addresses to cover edge cases too
  2378  			s.Spec.PublishNotReadyAddresses = true
  2379  		}, int32(port))
  2380  		framework.ExpectNoError(err)
  2381  
  2382  		// Create a pod in one node to get evicted
  2383  		ginkgo.By("creating a client pod that is going to be evicted for the service " + serviceName)
  2384  		evictedPod := e2epod.NewAgnhostPod(namespace, "evicted-pod", nil, nil, nil)
  2385  		evictedPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "sleep 10; dd if=/dev/zero of=file bs=1M count=10; sleep 10000"}
  2386  		evictedPod.Spec.Containers[0].Name = "evicted-pod"
  2387  		evictedPod.Spec.Containers[0].Resources = v1.ResourceRequirements{
  2388  			Limits: v1.ResourceList{"ephemeral-storage": resource.MustParse("5Mi")},
  2389  		}
  2390  		e2epod.NewPodClient(f).Create(ctx, evictedPod)
  2391  		err = e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, evictedPod.Name, "Evicted", f.Namespace.Name)
  2392  		if err != nil {
  2393  			framework.Failf("error waiting for pod to be evicted: %v", err)
  2394  		}
  2395  
  2396  		podName := "execpod-evictedpods"
  2397  		ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName))
  2398  		execPod := e2epod.CreateExecPodOrFail(ctx, f.ClientSet, namespace, podName, func(pod *v1.Pod) {
  2399  			nodeSelection := e2epod.NodeSelection{Name: nodeName}
  2400  			e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
  2401  		})
  2402  
  2403  		if epErr := wait.PollImmediate(framework.Poll, e2eservice.ServiceEndpointsTimeout, func() (bool, error) {
  2404  			endpoints, err := cs.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
  2405  			if err != nil {
  2406  				framework.Logf("error fetching '%s/%s' Endpoints: %s", namespace, serviceName, err.Error())
  2407  				return false, err
  2408  			}
  2409  			if len(endpoints.Subsets) > 0 {
  2410  				framework.Logf("expected '%s/%s' Endpoints to be empty, got: %v", namespace, serviceName, endpoints.Subsets)
  2411  				return false, nil
  2412  			}
  2413  			epsList, err := cs.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serviceName)})
  2414  			if err != nil {
  2415  				framework.Logf("error fetching '%s/%s' EndpointSlices: %s", namespace, serviceName, err.Error())
  2416  				return false, err
  2417  			}
  2418  			if len(epsList.Items) != 1 {
  2419  				framework.Logf("expected exactly 1 EndpointSlice, got: %d", len(epsList.Items))
  2420  				return false, nil
  2421  			}
  2422  			endpointSlice := epsList.Items[0]
  2423  			if len(endpointSlice.Endpoints) > 0 {
  2424  				framework.Logf("expected EndpointSlice to be empty, got %d endpoints", len(endpointSlice.Endpoints))
  2425  				return false, nil
  2426  			}
  2427  			return true, nil
  2428  		}); epErr != nil {
  2429  			framework.ExpectNoError(epErr)
  2430  		}
  2431  
  2432  		serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
  2433  		framework.Logf("waiting up to %v to connect to %v", e2eservice.KubeProxyEndpointLagTimeout, serviceAddress)
  2434  		cmd := fmt.Sprintf("/agnhost connect --timeout=3s %s", serviceAddress)
  2435  
  2436  		ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v expected to be refused", serviceAddress, podName, nodeName))
  2437  		expectedErr := "REFUSED"
  2438  		if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) {
  2439  			_, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  2440  
  2441  			if err != nil {
  2442  				if strings.Contains(err.Error(), expectedErr) {
  2443  					framework.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
  2444  					return true, nil
  2445  				}
  2446  				framework.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
  2447  				return false, nil
  2448  			}
  2449  			return true, errors.New("expected connect call to fail")
  2450  		}); pollErr != nil {
  2451  			framework.ExpectNoError(pollErr)
  2452  		}
  2453  	})
  2454  
  2455  	ginkgo.It("should respect internalTrafficPolicy=Local Pod to Pod", func(ctx context.Context) {
  2456  		// windows kube-proxy does not support this feature yet
  2457  		// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
  2458  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2459  
  2460  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2461  		framework.ExpectNoError(err)
  2462  		nodeCounts := len(nodes.Items)
  2463  		if nodeCounts < 2 {
  2464  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2465  		}
  2466  		node0 := nodes.Items[0]
  2467  		node1 := nodes.Items[1]
  2468  
  2469  		serviceName := "svc-itp"
  2470  		ns := f.Namespace.Name
  2471  		servicePort := 80
  2472  
  2473  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
  2474  		local := v1.ServiceInternalTrafficPolicyLocal
  2475  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2476  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2477  			svc.Spec.Ports = []v1.ServicePort{
  2478  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2479  			}
  2480  			svc.Spec.InternalTrafficPolicy = &local
  2481  		})
  2482  		framework.ExpectNoError(err)
  2483  
  2484  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2485  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
  2486  		webserverPod0.Labels = jig.Labels
  2487  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2488  
  2489  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2490  		framework.ExpectNoError(err)
  2491  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2492  
  2493  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2494  
  2495  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  2496  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2497  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2498  
  2499  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2500  		framework.ExpectNoError(err)
  2501  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2502  
  2503  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2504  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2505  
  2506  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2507  		framework.ExpectNoError(err)
  2508  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2509  
  2510  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  2511  		serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2512  		for i := 0; i < 5; i++ {
  2513  			// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
  2514  			execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
  2515  
  2516  			// the second pause pod is on a different node, so it should see a connection error every time
  2517  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
  2518  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2519  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2520  		}
  2521  	})
  2522  
  2523  	ginkgo.It("should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod", func(ctx context.Context) {
  2524  		// windows kube-proxy does not support this feature yet
  2525  		// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
  2526  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2527  
  2528  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2529  		framework.ExpectNoError(err)
  2530  		nodeCounts := len(nodes.Items)
  2531  		if nodeCounts < 2 {
  2532  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2533  		}
  2534  		node0 := nodes.Items[0]
  2535  		node1 := nodes.Items[1]
  2536  
  2537  		serviceName := "svc-itp"
  2538  		ns := f.Namespace.Name
  2539  		servicePort := 8000
  2540  
  2541  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
  2542  		local := v1.ServiceInternalTrafficPolicyLocal
  2543  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2544  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2545  			svc.Spec.Ports = []v1.ServicePort{
  2546  				{Port: 8000, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(8000)},
  2547  			}
  2548  			svc.Spec.InternalTrafficPolicy = &local
  2549  		})
  2550  		framework.ExpectNoError(err)
  2551  
  2552  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2553  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
  2554  		webserverPod0.Labels = jig.Labels
  2555  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2556  
  2557  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2558  		framework.ExpectNoError(err)
  2559  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2560  
  2561  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2562  
  2563  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  2564  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2565  		pausePod0.Spec.HostNetwork = true
  2566  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2567  
  2568  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2569  		framework.ExpectNoError(err)
  2570  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2571  
  2572  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2573  		pausePod1.Spec.HostNetwork = true
  2574  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2575  
  2576  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2577  		framework.ExpectNoError(err)
  2578  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2579  
  2580  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  2581  		serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2582  		for i := 0; i < 5; i++ {
  2583  			// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
  2584  			execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
  2585  
  2586  			// the second pause pod is on a different node, so it should see a connection error every time
  2587  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
  2588  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2589  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2590  		}
  2591  	})
  2592  
  2593  	ginkgo.It("should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true)", func(ctx context.Context) {
  2594  		// windows kube-proxy does not support this feature yet
  2595  		// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
  2596  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2597  
  2598  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2599  		framework.ExpectNoError(err)
  2600  		nodeCounts := len(nodes.Items)
  2601  		if nodeCounts < 2 {
  2602  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2603  		}
  2604  		node0 := nodes.Items[0]
  2605  		node1 := nodes.Items[1]
  2606  
  2607  		serviceName := "svc-itp"
  2608  		ns := f.Namespace.Name
  2609  		servicePort := 80
  2610  		// If the pod can't bind to this port, it will fail to start, and it will fail the test,
  2611  		// because is using hostNetwork. Using a not common port will reduce this possibility.
  2612  		endpointPort := 10180
  2613  
  2614  		ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
  2615  		local := v1.ServiceInternalTrafficPolicyLocal
  2616  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2617  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2618  			svc.Spec.Ports = []v1.ServicePort{
  2619  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(endpointPort)},
  2620  			}
  2621  			svc.Spec.InternalTrafficPolicy = &local
  2622  		})
  2623  		framework.ExpectNoError(err)
  2624  
  2625  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2626  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(endpointPort), "--udp-port", strconv.Itoa(endpointPort))
  2627  		webserverPod0.Labels = jig.Labels
  2628  		webserverPod0.Spec.HostNetwork = true
  2629  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2630  
  2631  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2632  		framework.ExpectNoError(err)
  2633  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2634  
  2635  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {endpointPort}})
  2636  
  2637  		ginkgo.By("Creating 2 pause pods that will try to connect to the webserver")
  2638  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2639  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2640  
  2641  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2642  		framework.ExpectNoError(err)
  2643  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2644  
  2645  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2646  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2647  
  2648  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2649  		framework.ExpectNoError(err)
  2650  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2651  
  2652  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  2653  		serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2654  		for i := 0; i < 5; i++ {
  2655  			// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
  2656  			// note that the expected hostname is the node name because the backend pod is on host network
  2657  			execHostnameTest(*pausePod0, serviceAddress, node0.Name)
  2658  
  2659  			// the second pause pod is on a different node, so it should see a connection error every time
  2660  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
  2661  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2662  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2663  		}
  2664  
  2665  		ginkgo.By("Creating 2 pause hostNetwork pods that will try to connect to the webserver")
  2666  		pausePod2 := e2epod.NewAgnhostPod(ns, "pause-pod-2", nil, nil, nil)
  2667  		pausePod2.Spec.HostNetwork = true
  2668  		e2epod.SetNodeSelection(&pausePod2.Spec, e2epod.NodeSelection{Name: node0.Name})
  2669  
  2670  		pausePod2, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod2, metav1.CreateOptions{})
  2671  		framework.ExpectNoError(err)
  2672  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod2.Name, f.Namespace.Name, framework.PodStartTimeout))
  2673  
  2674  		pausePod3 := e2epod.NewAgnhostPod(ns, "pause-pod-3", nil, nil, nil)
  2675  		pausePod3.Spec.HostNetwork = true
  2676  		e2epod.SetNodeSelection(&pausePod3.Spec, e2epod.NodeSelection{Name: node1.Name})
  2677  
  2678  		pausePod3, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod3, metav1.CreateOptions{})
  2679  		framework.ExpectNoError(err)
  2680  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod3.Name, f.Namespace.Name, framework.PodStartTimeout))
  2681  
  2682  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  2683  		for i := 0; i < 5; i++ {
  2684  			// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
  2685  			// note that the expected hostname is the node name because the backend pod is on host network
  2686  			execHostnameTest(*pausePod2, serviceAddress, node0.Name)
  2687  
  2688  			// the second pause pod is on a different node, so it should see a connection error every time
  2689  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
  2690  			_, err := e2eoutput.RunHostCmd(pausePod3.Namespace, pausePod3.Name, cmd)
  2691  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2692  		}
  2693  	})
  2694  
  2695  	ginkgo.It("should fail health check node port if there are only terminating endpoints", func(ctx context.Context) {
  2696  		// windows kube-proxy does not support this feature yet
  2697  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2698  
  2699  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2700  		framework.ExpectNoError(err)
  2701  		nodeCounts := len(nodes.Items)
  2702  		if nodeCounts < 2 {
  2703  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2704  		}
  2705  		node0 := nodes.Items[0]
  2706  
  2707  		serviceName := "svc-proxy-terminating"
  2708  		ns := f.Namespace.Name
  2709  		servicePort := 80
  2710  
  2711  		ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
  2712  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2713  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2714  			svc.Spec.Ports = []v1.ServicePort{
  2715  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2716  			}
  2717  			svc.Spec.Type = v1.ServiceTypeLoadBalancer
  2718  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  2719  		})
  2720  		framework.ExpectNoError(err)
  2721  
  2722  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2723  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100")
  2724  		webserverPod0.Labels = jig.Labels
  2725  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100)
  2726  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2727  
  2728  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2729  		framework.ExpectNoError(err)
  2730  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2731  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2732  
  2733  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2734  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2735  
  2736  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2737  		framework.ExpectNoError(err)
  2738  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2739  
  2740  		nodeIPs := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
  2741  		healthCheckNodePortAddr := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.HealthCheckNodePort)))
  2742  		// validate that the health check node port from kube-proxy returns 200 when there are ready endpoints
  2743  		err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
  2744  			cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --max-time 5 http://%s/healthz`, healthCheckNodePortAddr)
  2745  			out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
  2746  			if err != nil {
  2747  				framework.Logf("unexpected error trying to connect to nodeport %s : %v", healthCheckNodePortAddr, err)
  2748  				return false, nil
  2749  			}
  2750  
  2751  			expectedOut := "200"
  2752  			if out != expectedOut {
  2753  				framework.Logf("expected output: %s , got %s", expectedOut, out)
  2754  				return false, nil
  2755  			}
  2756  			return true, nil
  2757  		})
  2758  		framework.ExpectNoError(err)
  2759  
  2760  		// webserver should continue to serve traffic through the Service after deletion, even though the health check node port should return 503
  2761  		ginkgo.By("Terminating the webserver pod")
  2762  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  2763  		framework.ExpectNoError(err)
  2764  
  2765  		// validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints
  2766  		err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
  2767  			cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --max-time 5 http://%s/healthz`, healthCheckNodePortAddr)
  2768  			out, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
  2769  			if err != nil {
  2770  				framework.Logf("unexpected error trying to connect to nodeport %s : %v", healthCheckNodePortAddr, err)
  2771  				return false, nil
  2772  			}
  2773  
  2774  			expectedOut := "503"
  2775  			if out != expectedOut {
  2776  				framework.Logf("expected output: %s , got %s", expectedOut, out)
  2777  				return false, nil
  2778  			}
  2779  			return true, nil
  2780  		})
  2781  		framework.ExpectNoError(err)
  2782  
  2783  		// also verify that while health check node port indicates 0 endpoints and returns 503, the endpoint still serves traffic.
  2784  		nodePortAddress := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  2785  		execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name)
  2786  	})
  2787  
  2788  	ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster", func(ctx context.Context) {
  2789  		// windows kube-proxy does not support this feature yet
  2790  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2791  
  2792  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2793  		framework.ExpectNoError(err)
  2794  		nodeCounts := len(nodes.Items)
  2795  		if nodeCounts < 2 {
  2796  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2797  		}
  2798  		node0 := nodes.Items[0]
  2799  		node1 := nodes.Items[1]
  2800  
  2801  		serviceName := "svc-proxy-terminating"
  2802  		ns := f.Namespace.Name
  2803  		servicePort := 80
  2804  
  2805  		ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
  2806  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2807  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2808  			svc.Spec.Ports = []v1.ServicePort{
  2809  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2810  			}
  2811  		})
  2812  		framework.ExpectNoError(err)
  2813  
  2814  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2815  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100")
  2816  		webserverPod0.Labels = jig.Labels
  2817  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100)
  2818  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2819  
  2820  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2821  		framework.ExpectNoError(err)
  2822  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2823  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2824  
  2825  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  2826  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2827  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2828  
  2829  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2830  		framework.ExpectNoError(err)
  2831  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2832  
  2833  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2834  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2835  
  2836  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2837  		framework.ExpectNoError(err)
  2838  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2839  
  2840  		// webserver should continue to serve traffic through the Service after delete since:
  2841  		//  - it has a 100s termination grace period
  2842  		//  - it is the only ready endpoint
  2843  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  2844  		framework.ExpectNoError(err)
  2845  
  2846  		// assert 5 times that both the local and remote pod can connect to the Service while all endpoints are terminating
  2847  		serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2848  		for i := 0; i < 5; i++ {
  2849  			// There's a Service with internalTrafficPolicy=Cluster,
  2850  			// with a single endpoint (which is terminating) called webserver0 running on node0.
  2851  			// pausePod0 and pausePod1 are on node0 and node1 respectively.
  2852  			// pausePod0 -> Service clusterIP succeeds because traffic policy is "Cluster"
  2853  			// pausePod1 -> Service clusterIP succeeds because traffic policy is "Cluster"
  2854  			execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
  2855  			execHostnameTest(*pausePod1, serviceAddress, webserverPod0.Name)
  2856  
  2857  			time.Sleep(5 * time.Second)
  2858  		}
  2859  	})
  2860  
  2861  	ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local", func(ctx context.Context) {
  2862  		// windows kube-proxy does not support this feature yet
  2863  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2864  
  2865  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2866  		framework.ExpectNoError(err)
  2867  		nodeCounts := len(nodes.Items)
  2868  		if nodeCounts < 2 {
  2869  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2870  		}
  2871  		node0 := nodes.Items[0]
  2872  		node1 := nodes.Items[1]
  2873  
  2874  		serviceName := "svc-proxy-terminating"
  2875  		ns := f.Namespace.Name
  2876  		servicePort := 80
  2877  
  2878  		ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
  2879  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2880  		local := v1.ServiceInternalTrafficPolicyLocal
  2881  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2882  			svc.Spec.Ports = []v1.ServicePort{
  2883  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2884  			}
  2885  			svc.Spec.InternalTrafficPolicy = &local
  2886  		})
  2887  		framework.ExpectNoError(err)
  2888  
  2889  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2890  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100")
  2891  		webserverPod0.Labels = jig.Labels
  2892  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100)
  2893  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2894  
  2895  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2896  		framework.ExpectNoError(err)
  2897  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2898  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2899  
  2900  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  2901  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2902  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2903  
  2904  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2905  		framework.ExpectNoError(err)
  2906  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2907  
  2908  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2909  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2910  
  2911  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2912  		framework.ExpectNoError(err)
  2913  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2914  
  2915  		// webserver should continue to serve traffic through the Service after delete since:
  2916  		//  - it has a 100s termination grace period
  2917  		//  - it is the only ready endpoint
  2918  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  2919  		framework.ExpectNoError(err)
  2920  
  2921  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  2922  		serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
  2923  		for i := 0; i < 5; i++ {
  2924  			// There's a Service with internalTrafficPolicy=Local,
  2925  			// with a single endpoint (which is terminating) called webserver0 running on node0.
  2926  			// pausePod0 and pausePod1 are on node0 and node1 respectively.
  2927  			// pausePod0 -> Service clusterIP succeeds because webserver0 is running on node0 and traffic policy is "Local"
  2928  			// pausePod1 -> Service clusterIP fails because webserver0 is on a different node and traffic policy is "Local"
  2929  			execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
  2930  
  2931  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
  2932  			_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
  2933  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to cluster IP")
  2934  
  2935  			time.Sleep(5 * time.Second)
  2936  		}
  2937  	})
  2938  
  2939  	ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster", func(ctx context.Context) {
  2940  		// windows kube-proxy does not support this feature yet
  2941  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  2942  
  2943  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  2944  		framework.ExpectNoError(err)
  2945  		nodeCounts := len(nodes.Items)
  2946  		if nodeCounts < 2 {
  2947  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  2948  		}
  2949  		node0 := nodes.Items[0]
  2950  		node1 := nodes.Items[1]
  2951  
  2952  		serviceName := "svc-proxy-terminating"
  2953  		ns := f.Namespace.Name
  2954  		servicePort := 80
  2955  
  2956  		ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
  2957  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  2958  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  2959  			svc.Spec.Ports = []v1.ServicePort{
  2960  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  2961  			}
  2962  			svc.Spec.Type = v1.ServiceTypeNodePort
  2963  		})
  2964  		framework.ExpectNoError(err)
  2965  
  2966  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  2967  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100")
  2968  		webserverPod0.Labels = jig.Labels
  2969  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100)
  2970  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2971  
  2972  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  2973  		framework.ExpectNoError(err)
  2974  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2975  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  2976  
  2977  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  2978  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  2979  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  2980  
  2981  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  2982  		framework.ExpectNoError(err)
  2983  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  2984  
  2985  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  2986  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  2987  
  2988  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  2989  		framework.ExpectNoError(err)
  2990  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  2991  
  2992  		// webserver should continue to serve traffic through the Service after delete since:
  2993  		//  - it has a 100s termination grace period
  2994  		//  - it is the only ready endpoint
  2995  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  2996  		framework.ExpectNoError(err)
  2997  
  2998  		// assert 5 times that both the local and remote pod can connect to the Service NodePort while all endpoints are terminating
  2999  		nodeIPs := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
  3000  		nodePortAddress := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  3001  		for i := 0; i < 5; i++ {
  3002  			// There's a Service Type=NodePort with externalTrafficPolicy=Cluster,
  3003  			// with a single endpoint (which is terminating) called webserver0 running on node0.
  3004  			// pausePod0 and pausePod1 are on node0 and node1 respectively.
  3005  			// pausePod0 -> node0 node port succeeds because webserver0 is running on node0 and traffic policy is "Cluster"
  3006  			// pausePod1 -> node0 node port succeeds because webserver0 is running on node0 and traffic policy is "Cluster"
  3007  			execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name)
  3008  			execHostnameTest(*pausePod1, nodePortAddress, webserverPod0.Name)
  3009  
  3010  			time.Sleep(5 * time.Second)
  3011  		}
  3012  	})
  3013  
  3014  	ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local", func(ctx context.Context) {
  3015  		// windows kube-proxy does not support this feature yet
  3016  		e2eskipper.SkipIfNodeOSDistroIs("windows")
  3017  
  3018  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  3019  		framework.ExpectNoError(err)
  3020  		nodeCounts := len(nodes.Items)
  3021  		if nodeCounts < 2 {
  3022  			e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
  3023  		}
  3024  		node0 := nodes.Items[0]
  3025  		node1 := nodes.Items[1]
  3026  
  3027  		serviceName := "svc-proxy-terminating"
  3028  		ns := f.Namespace.Name
  3029  		servicePort := 80
  3030  
  3031  		ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
  3032  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  3033  		svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
  3034  			svc.Spec.Ports = []v1.ServicePort{
  3035  				{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(80)},
  3036  			}
  3037  			svc.Spec.Type = v1.ServiceTypeNodePort
  3038  			svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
  3039  		})
  3040  		framework.ExpectNoError(err)
  3041  
  3042  		ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
  3043  		webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100")
  3044  		webserverPod0.Labels = jig.Labels
  3045  		webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100)
  3046  		e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  3047  
  3048  		_, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{})
  3049  		framework.ExpectNoError(err)
  3050  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  3051  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
  3052  
  3053  		ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
  3054  		pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
  3055  		e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
  3056  
  3057  		pausePod0, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod0, metav1.CreateOptions{})
  3058  		framework.ExpectNoError(err)
  3059  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
  3060  
  3061  		pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
  3062  		e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
  3063  
  3064  		pausePod1, err = cs.CoreV1().Pods(ns).Create(ctx, pausePod1, metav1.CreateOptions{})
  3065  		framework.ExpectNoError(err)
  3066  		framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
  3067  
  3068  		// webserver should continue to serve traffic through the Service after delete since:
  3069  		//  - it has a 100s termination grace period
  3070  		//  - it is the only ready endpoint
  3071  		err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{})
  3072  		framework.ExpectNoError(err)
  3073  
  3074  		// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
  3075  		nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
  3076  		nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
  3077  		nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  3078  		nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
  3079  		for i := 0; i < 5; i++ {
  3080  			// There's a Service Type=NodePort with externalTrafficPolicy=Local,
  3081  			// with a single endpoint (which is terminating) called webserver0 running on node0.
  3082  			// pausePod0 and pausePod1 are on node0 and node1 respectively.
  3083  			// pausePod0 -> node1 node port fails because it's "external" and there are no local endpoints
  3084  			// pausePod1 -> node0 node port succeeds because webserver0 is running on node0
  3085  			// pausePod0 -> node0 node port succeeds because webserver0 is running on node0
  3086  			//
  3087  			// NOTE: pausePod1 -> node1 will succeed for kube-proxy because kube-proxy considers pod-to-same-node-NodePort
  3088  			// connections as neither internal nor external and always get Cluster traffic policy. However, we do not test
  3089  			// this here because not all Network implementations follow kube-proxy's interpretation of "destination"
  3090  			// traffic policy. See: https://github.com/kubernetes/kubernetes/pull/123622
  3091  			cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
  3092  			_, err := e2eoutput.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
  3093  			gomega.Expect(err).To(gomega.HaveOccurred(), "expected error when trying to connect to node port for pausePod0")
  3094  
  3095  			execHostnameTest(*pausePod0, nodePortAddress0, webserverPod0.Name)
  3096  			execHostnameTest(*pausePod1, nodePortAddress0, webserverPod0.Name)
  3097  
  3098  			time.Sleep(5 * time.Second)
  3099  		}
  3100  	})
  3101  
  3102  	/*
  3103  	   Release: v1.18
  3104  	   Testname: Find Kubernetes Service in default Namespace
  3105  	   Description: List all Services in all Namespaces, response MUST include a Service named Kubernetes with the Namespace of default.
  3106  	*/
  3107  	framework.ConformanceIt("should find a service from listing all namespaces", func(ctx context.Context) {
  3108  		ginkgo.By("fetching services")
  3109  		svcs, _ := f.ClientSet.CoreV1().Services("").List(ctx, metav1.ListOptions{})
  3110  
  3111  		foundSvc := false
  3112  		for _, svc := range svcs.Items {
  3113  			if svc.ObjectMeta.Name == "kubernetes" && svc.ObjectMeta.Namespace == "default" {
  3114  				foundSvc = true
  3115  				break
  3116  			}
  3117  		}
  3118  
  3119  		if !foundSvc {
  3120  			framework.Fail("could not find service 'kubernetes' in service list in all namespaces")
  3121  		}
  3122  	})
  3123  
  3124  	/*
  3125  	   Release: v1.19
  3126  	   Testname: Endpoint resource lifecycle
  3127  	   Description: Create an endpoint, the endpoint MUST exist.
  3128  	   The endpoint is updated with a new label, a check after the update MUST find the changes.
  3129  	   The endpoint is then patched with a new IPv4 address and port, a check after the patch MUST the changes.
  3130  	   The endpoint is deleted by it's label, a watch listens for the deleted watch event.
  3131  	*/
  3132  	framework.ConformanceIt("should test the lifecycle of an Endpoint", func(ctx context.Context) {
  3133  		testNamespaceName := f.Namespace.Name
  3134  		testEndpointName := "testservice"
  3135  		testEndpoints := v1.Endpoints{
  3136  			ObjectMeta: metav1.ObjectMeta{
  3137  				Name: testEndpointName,
  3138  				Labels: map[string]string{
  3139  					"test-endpoint-static": "true",
  3140  				},
  3141  			},
  3142  			Subsets: []v1.EndpointSubset{{
  3143  				Addresses: []v1.EndpointAddress{{
  3144  					IP: "10.0.0.24",
  3145  				}},
  3146  				Ports: []v1.EndpointPort{{
  3147  					Name:     "http",
  3148  					Port:     80,
  3149  					Protocol: v1.ProtocolTCP,
  3150  				}},
  3151  			}},
  3152  		}
  3153  		w := &cache.ListWatch{
  3154  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  3155  				options.LabelSelector = "test-endpoint-static=true"
  3156  				return f.ClientSet.CoreV1().Endpoints(testNamespaceName).Watch(ctx, options)
  3157  			},
  3158  		}
  3159  		endpointsList, err := f.ClientSet.CoreV1().Endpoints("").List(ctx, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"})
  3160  		framework.ExpectNoError(err, "failed to list Endpoints")
  3161  
  3162  		ginkgo.By("creating an Endpoint")
  3163  		_, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(ctx, &testEndpoints, metav1.CreateOptions{})
  3164  		framework.ExpectNoError(err, "failed to create Endpoint")
  3165  		ginkgo.By("waiting for available Endpoint")
  3166  		ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second)
  3167  		defer cancel()
  3168  		_, err = watchtools.Until(ctxUntil, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3169  			switch event.Type {
  3170  			case watch.Added:
  3171  				if endpoints, ok := event.Object.(*v1.Endpoints); ok {
  3172  					found := endpoints.ObjectMeta.Name == endpoints.Name &&
  3173  						endpoints.Labels["test-endpoint-static"] == "true"
  3174  					return found, nil
  3175  				}
  3176  			default:
  3177  				framework.Logf("observed event type %v", event.Type)
  3178  			}
  3179  			return false, nil
  3180  		})
  3181  		framework.ExpectNoError(err, "failed to see %v event", watch.Added)
  3182  
  3183  		ginkgo.By("listing all Endpoints")
  3184  		endpointsList, err = f.ClientSet.CoreV1().Endpoints("").List(ctx, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"})
  3185  		framework.ExpectNoError(err, "failed to list Endpoints")
  3186  		eventFound := false
  3187  		var foundEndpoint v1.Endpoints
  3188  		for _, endpoint := range endpointsList.Items {
  3189  			if endpoint.ObjectMeta.Name == testEndpointName && endpoint.ObjectMeta.Namespace == testNamespaceName {
  3190  				eventFound = true
  3191  				foundEndpoint = endpoint
  3192  				break
  3193  			}
  3194  		}
  3195  		if !eventFound {
  3196  			framework.Fail("unable to find Endpoint Service in list of Endpoints")
  3197  		}
  3198  
  3199  		ginkgo.By("updating the Endpoint")
  3200  		foundEndpoint.ObjectMeta.Labels["test-service"] = "updated"
  3201  		_, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Update(ctx, &foundEndpoint, metav1.UpdateOptions{})
  3202  		framework.ExpectNoError(err, "failed to update Endpoint with new label")
  3203  
  3204  		ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
  3205  		defer cancel()
  3206  		_, err = watchtools.Until(ctxUntil, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3207  			switch event.Type {
  3208  			case watch.Modified:
  3209  				if endpoints, ok := event.Object.(*v1.Endpoints); ok {
  3210  					found := endpoints.ObjectMeta.Name == endpoints.Name &&
  3211  						endpoints.Labels["test-endpoint-static"] == "true"
  3212  					return found, nil
  3213  				}
  3214  			default:
  3215  				framework.Logf("observed event type %v", event.Type)
  3216  			}
  3217  			return false, nil
  3218  		})
  3219  		framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
  3220  
  3221  		ginkgo.By("fetching the Endpoint")
  3222  		endpoints, err := f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{})
  3223  		framework.ExpectNoError(err, "failed to fetch Endpoint")
  3224  		gomega.Expect(foundEndpoint.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-service", "updated"), "failed to update Endpoint %v in namespace %v label not updated", testEndpointName, testNamespaceName)
  3225  
  3226  		endpointPatch, err := json.Marshal(map[string]interface{}{
  3227  			"metadata": map[string]interface{}{
  3228  				"labels": map[string]string{
  3229  					"test-service": "patched",
  3230  				},
  3231  			},
  3232  			"subsets": []map[string]interface{}{
  3233  				{
  3234  					"addresses": []map[string]string{
  3235  						{
  3236  							"ip": "10.0.0.25",
  3237  						},
  3238  					},
  3239  					"ports": []map[string]interface{}{
  3240  						{
  3241  							"name": "http-test",
  3242  							"port": int32(8080),
  3243  						},
  3244  					},
  3245  				},
  3246  			},
  3247  		})
  3248  		framework.ExpectNoError(err, "failed to marshal JSON for WatchEvent patch")
  3249  		ginkgo.By("patching the Endpoint")
  3250  		_, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Patch(ctx, testEndpointName, types.StrategicMergePatchType, []byte(endpointPatch), metav1.PatchOptions{})
  3251  		framework.ExpectNoError(err, "failed to patch Endpoint")
  3252  		ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
  3253  		defer cancel()
  3254  		_, err = watchtools.Until(ctxUntil, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3255  			switch event.Type {
  3256  			case watch.Modified:
  3257  				if endpoints, ok := event.Object.(*v1.Endpoints); ok {
  3258  					found := endpoints.ObjectMeta.Name == endpoints.Name &&
  3259  						endpoints.Labels["test-endpoint-static"] == "true"
  3260  					return found, nil
  3261  				}
  3262  			default:
  3263  				framework.Logf("observed event type %v", event.Type)
  3264  			}
  3265  			return false, nil
  3266  		})
  3267  		framework.ExpectNoError(err, "failed to see %v event", watch.Modified)
  3268  
  3269  		ginkgo.By("fetching the Endpoint")
  3270  		endpoints, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{})
  3271  		framework.ExpectNoError(err, "failed to fetch Endpoint")
  3272  		gomega.Expect(endpoints.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-service", "patched"), "failed to patch Endpoint with Label")
  3273  		endpointSubsetOne := endpoints.Subsets[0]
  3274  		endpointSubsetOneAddresses := endpointSubsetOne.Addresses[0]
  3275  		endpointSubsetOnePorts := endpointSubsetOne.Ports[0]
  3276  		gomega.Expect(endpointSubsetOneAddresses.IP).To(gomega.Equal("10.0.0.25"), "failed to patch Endpoint")
  3277  		gomega.Expect(endpointSubsetOnePorts.Name).To(gomega.Equal("http-test"), "failed to patch Endpoint")
  3278  		gomega.Expect(endpointSubsetOnePorts.Port).To(gomega.Equal(int32(8080)), "failed to patch Endpoint")
  3279  
  3280  		ginkgo.By("deleting the Endpoint by Collection")
  3281  		err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"})
  3282  		framework.ExpectNoError(err, "failed to delete Endpoint by Collection")
  3283  
  3284  		ginkgo.By("waiting for Endpoint deletion")
  3285  		ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
  3286  		defer cancel()
  3287  		_, err = watchtools.Until(ctxUntil, endpoints.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3288  			switch event.Type {
  3289  			case watch.Deleted:
  3290  				if endpoints, ok := event.Object.(*v1.Endpoints); ok {
  3291  					found := endpoints.ObjectMeta.Name == endpoints.Name &&
  3292  						endpoints.Labels["test-endpoint-static"] == "true"
  3293  					return found, nil
  3294  				}
  3295  			default:
  3296  				framework.Logf("observed event type %v", event.Type)
  3297  			}
  3298  			return false, nil
  3299  		})
  3300  		framework.ExpectNoError(err, "failed to see %v event", watch.Deleted)
  3301  
  3302  		ginkgo.By("fetching the Endpoint")
  3303  		_, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{})
  3304  		gomega.Expect(err).To(gomega.HaveOccurred(), "should not be able to fetch Endpoint")
  3305  	})
  3306  
  3307  	/*
  3308  		Release: v1.21
  3309  		Testname: Service, complete ServiceStatus lifecycle
  3310  		Description: Create a service, the service MUST exist.
  3311  		When retrieving /status the action MUST be validated.
  3312  		When patching /status the action MUST be validated.
  3313  		When updating /status the action MUST be validated.
  3314  		When patching a service the action MUST be validated.
  3315  	*/
  3316  	framework.ConformanceIt("should complete a service status lifecycle", func(ctx context.Context) {
  3317  
  3318  		ns := f.Namespace.Name
  3319  		svcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
  3320  		svcClient := f.ClientSet.CoreV1().Services(ns)
  3321  
  3322  		testSvcName := "test-service-" + utilrand.String(5)
  3323  		testSvcLabels := map[string]string{"test-service-static": "true"}
  3324  		testSvcLabelsFlat := "test-service-static=true"
  3325  
  3326  		w := &cache.ListWatch{
  3327  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  3328  				options.LabelSelector = testSvcLabelsFlat
  3329  				return cs.CoreV1().Services(ns).Watch(ctx, options)
  3330  			},
  3331  		}
  3332  
  3333  		svcList, err := cs.CoreV1().Services("").List(ctx, metav1.ListOptions{LabelSelector: testSvcLabelsFlat})
  3334  		framework.ExpectNoError(err, "failed to list Services")
  3335  
  3336  		ginkgo.By("creating a Service")
  3337  		testService := v1.Service{
  3338  			ObjectMeta: metav1.ObjectMeta{
  3339  				Name:   testSvcName,
  3340  				Labels: testSvcLabels,
  3341  			},
  3342  			Spec: v1.ServiceSpec{
  3343  				Type: "LoadBalancer",
  3344  				Ports: []v1.ServicePort{{
  3345  					Name:       "http",
  3346  					Protocol:   v1.ProtocolTCP,
  3347  					Port:       int32(80),
  3348  					TargetPort: intstr.FromInt32(80),
  3349  				}},
  3350  				LoadBalancerClass: utilpointer.String("example.com/internal-vip"),
  3351  			},
  3352  		}
  3353  		_, err = cs.CoreV1().Services(ns).Create(ctx, &testService, metav1.CreateOptions{})
  3354  		framework.ExpectNoError(err, "failed to create Service")
  3355  
  3356  		ginkgo.By("watching for the Service to be added")
  3357  		ctxUntil, cancel := context.WithTimeout(ctx, svcReadyTimeout)
  3358  		defer cancel()
  3359  		_, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3360  			if svc, ok := event.Object.(*v1.Service); ok {
  3361  				found := svc.ObjectMeta.Name == testService.ObjectMeta.Name &&
  3362  					svc.ObjectMeta.Namespace == ns &&
  3363  					svc.Labels["test-service-static"] == "true"
  3364  				if !found {
  3365  					framework.Logf("observed Service %v in namespace %v with labels: %v & ports %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels, svc.Spec.Ports)
  3366  					return false, nil
  3367  				}
  3368  				framework.Logf("Found Service %v in namespace %v with labels: %v & ports %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels, svc.Spec.Ports)
  3369  				return found, nil
  3370  			}
  3371  			framework.Logf("Observed event: %+v", event.Object)
  3372  			return false, nil
  3373  		})
  3374  		framework.ExpectNoError(err, "Failed to locate Service %v in namespace %v", testService.ObjectMeta.Name, ns)
  3375  		framework.Logf("Service %s created", testSvcName)
  3376  
  3377  		ginkgo.By("Getting /status")
  3378  		svcStatusUnstructured, err := f.DynamicClient.Resource(svcResource).Namespace(ns).Get(ctx, testSvcName, metav1.GetOptions{}, "status")
  3379  		framework.ExpectNoError(err, "Failed to fetch ServiceStatus of Service %s in namespace %s", testSvcName, ns)
  3380  		svcStatusBytes, err := json.Marshal(svcStatusUnstructured)
  3381  		framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
  3382  
  3383  		var svcStatus v1.Service
  3384  		err = json.Unmarshal(svcStatusBytes, &svcStatus)
  3385  		framework.ExpectNoError(err, "Failed to unmarshal JSON bytes to a Service object type")
  3386  		framework.Logf("Service %s has LoadBalancer: %v", testSvcName, svcStatus.Status.LoadBalancer)
  3387  
  3388  		ginkgo.By("patching the ServiceStatus")
  3389  		lbStatus := v1.LoadBalancerStatus{
  3390  			Ingress: []v1.LoadBalancerIngress{{IP: "203.0.113.1"}},
  3391  		}
  3392  		lbStatusJSON, err := json.Marshal(lbStatus)
  3393  		framework.ExpectNoError(err, "Failed to marshal JSON. %v", err)
  3394  		_, err = svcClient.Patch(ctx, testSvcName, types.MergePatchType,
  3395  			[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`),
  3396  			metav1.PatchOptions{}, "status")
  3397  		framework.ExpectNoError(err, "Could not patch service status", err)
  3398  
  3399  		ginkgo.By("watching for the Service to be patched")
  3400  		ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout)
  3401  		defer cancel()
  3402  
  3403  		_, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3404  			if svc, ok := event.Object.(*v1.Service); ok {
  3405  				found := svc.ObjectMeta.Name == testService.ObjectMeta.Name &&
  3406  					svc.ObjectMeta.Namespace == ns &&
  3407  					svc.Annotations["patchedstatus"] == "true"
  3408  				if !found {
  3409  					framework.Logf("observed Service %v in namespace %v with annotations: %v & LoadBalancer: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Annotations, svc.Status.LoadBalancer)
  3410  					return false, nil
  3411  				}
  3412  				framework.Logf("Found Service %v in namespace %v with annotations: %v & LoadBalancer: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Annotations, svc.Status.LoadBalancer)
  3413  				return found, nil
  3414  			}
  3415  			framework.Logf("Observed event: %+v", event.Object)
  3416  			return false, nil
  3417  		})
  3418  		framework.ExpectNoError(err, "failed to locate Service %v in namespace %v", testService.ObjectMeta.Name, ns)
  3419  		framework.Logf("Service %s has service status patched", testSvcName)
  3420  
  3421  		ginkgo.By("updating the ServiceStatus")
  3422  
  3423  		var statusToUpdate, updatedStatus *v1.Service
  3424  		err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
  3425  			statusToUpdate, err = svcClient.Get(ctx, testSvcName, metav1.GetOptions{})
  3426  			framework.ExpectNoError(err, "Unable to retrieve service %s", testSvcName)
  3427  
  3428  			statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, metav1.Condition{
  3429  				Type:    "StatusUpdate",
  3430  				Status:  metav1.ConditionTrue,
  3431  				Reason:  "E2E",
  3432  				Message: "Set from e2e test",
  3433  			})
  3434  
  3435  			updatedStatus, err = svcClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
  3436  			return err
  3437  		})
  3438  		framework.ExpectNoError(err, "\n\n Failed to UpdateStatus. %v\n\n", err)
  3439  		framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
  3440  
  3441  		ginkgo.By("watching for the Service to be updated")
  3442  		ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout)
  3443  		defer cancel()
  3444  		_, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3445  			if svc, ok := event.Object.(*v1.Service); ok {
  3446  				found := svc.ObjectMeta.Name == testService.ObjectMeta.Name &&
  3447  					svc.ObjectMeta.Namespace == ns &&
  3448  					svc.Annotations["patchedstatus"] == "true"
  3449  				if !found {
  3450  					framework.Logf("Observed Service %v in namespace %v with annotations: %v & Conditions: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Annotations, svc.Status.LoadBalancer)
  3451  					return false, nil
  3452  				}
  3453  				for _, cond := range svc.Status.Conditions {
  3454  					if cond.Type == "StatusUpdate" &&
  3455  						cond.Reason == "E2E" &&
  3456  						cond.Message == "Set from e2e test" {
  3457  						framework.Logf("Found Service %v in namespace %v with annotations: %v & Conditions: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Annotations, svc.Status.Conditions)
  3458  						return found, nil
  3459  					} else {
  3460  						framework.Logf("Observed Service %v in namespace %v with annotations: %v & Conditions: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Annotations, svc.Status.LoadBalancer)
  3461  						return false, nil
  3462  					}
  3463  				}
  3464  			}
  3465  			framework.Logf("Observed event: %+v", event.Object)
  3466  			return false, nil
  3467  		})
  3468  		framework.ExpectNoError(err, "failed to locate Service %v in namespace %v", testService.ObjectMeta.Name, ns)
  3469  		framework.Logf("Service %s has service status updated", testSvcName)
  3470  
  3471  		ginkgo.By("patching the service")
  3472  		servicePatchPayload, err := json.Marshal(v1.Service{
  3473  			ObjectMeta: metav1.ObjectMeta{
  3474  				Labels: map[string]string{
  3475  					"test-service": "patched",
  3476  				},
  3477  			},
  3478  		})
  3479  
  3480  		_, err = svcClient.Patch(ctx, testSvcName, types.StrategicMergePatchType, []byte(servicePatchPayload), metav1.PatchOptions{})
  3481  		framework.ExpectNoError(err, "failed to patch service. %v", err)
  3482  
  3483  		ginkgo.By("watching for the Service to be patched")
  3484  		ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout)
  3485  		defer cancel()
  3486  		_, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3487  			if svc, ok := event.Object.(*v1.Service); ok {
  3488  				found := svc.ObjectMeta.Name == testService.ObjectMeta.Name &&
  3489  					svc.ObjectMeta.Namespace == ns &&
  3490  					svc.Labels["test-service"] == "patched"
  3491  				if !found {
  3492  					framework.Logf("observed Service %v in namespace %v with labels: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels)
  3493  					return false, nil
  3494  				}
  3495  				framework.Logf("Found Service %v in namespace %v with labels: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels)
  3496  				return found, nil
  3497  			}
  3498  			framework.Logf("Observed event: %+v", event.Object)
  3499  			return false, nil
  3500  		})
  3501  		framework.ExpectNoError(err, "failed to locate Service %v in namespace %v", testService.ObjectMeta.Name, ns)
  3502  		framework.Logf("Service %s patched", testSvcName)
  3503  
  3504  		ginkgo.By("deleting the service")
  3505  		err = cs.CoreV1().Services(ns).Delete(ctx, testSvcName, metav1.DeleteOptions{})
  3506  		framework.ExpectNoError(err, "failed to delete the Service. %v", err)
  3507  
  3508  		ginkgo.By("watching for the Service to be deleted")
  3509  		ctxUntil, cancel = context.WithTimeout(ctx, svcReadyTimeout)
  3510  		defer cancel()
  3511  		_, err = watchtools.Until(ctxUntil, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  3512  			switch event.Type {
  3513  			case watch.Deleted:
  3514  				if svc, ok := event.Object.(*v1.Service); ok {
  3515  					found := svc.ObjectMeta.Name == testService.ObjectMeta.Name &&
  3516  						svc.ObjectMeta.Namespace == ns &&
  3517  						svc.Labels["test-service-static"] == "true"
  3518  					if !found {
  3519  						framework.Logf("observed Service %v in namespace %v with labels: %v & annotations: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels, svc.Annotations)
  3520  						return false, nil
  3521  					}
  3522  					framework.Logf("Found Service %v in namespace %v with labels: %v & annotations: %v", svc.ObjectMeta.Name, svc.ObjectMeta.Namespace, svc.Labels, svc.Annotations)
  3523  					return found, nil
  3524  				}
  3525  			default:
  3526  				framework.Logf("Observed event: %+v", event.Type)
  3527  			}
  3528  			return false, nil
  3529  		})
  3530  		framework.ExpectNoError(err, "failed to delete Service %v in namespace %v", testService.ObjectMeta.Name, ns)
  3531  		framework.Logf("Service %s deleted", testSvcName)
  3532  	})
  3533  
  3534  	/*
  3535  		Release: v1.23
  3536  		Testname: Service, deletes a collection of services
  3537  		Description: Create three services with the required
  3538  		labels and ports. It MUST locate three services in the
  3539  		test namespace. It MUST succeed at deleting a collection
  3540  		of services via a label selector. It MUST locate only
  3541  		one service after deleting the service collection.
  3542  	*/
  3543  	framework.ConformanceIt("should delete a collection of services", func(ctx context.Context) {
  3544  
  3545  		ns := f.Namespace.Name
  3546  		svcClient := f.ClientSet.CoreV1().Services(ns)
  3547  		svcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
  3548  		svcDynamicClient := f.DynamicClient.Resource(svcResource).Namespace(ns)
  3549  
  3550  		svcLabel := map[string]string{"e2e-test-service": "delete"}
  3551  		deleteLabel := labels.SelectorFromSet(svcLabel).String()
  3552  
  3553  		ginkgo.By("creating a collection of services")
  3554  
  3555  		testServices := []struct {
  3556  			name  string
  3557  			label map[string]string
  3558  			port  int
  3559  		}{
  3560  			{
  3561  				name:  "e2e-svc-a-" + utilrand.String(5),
  3562  				label: map[string]string{"e2e-test-service": "delete"},
  3563  				port:  8001,
  3564  			},
  3565  			{
  3566  				name:  "e2e-svc-b-" + utilrand.String(5),
  3567  				label: map[string]string{"e2e-test-service": "delete"},
  3568  				port:  8002,
  3569  			},
  3570  			{
  3571  				name:  "e2e-svc-c-" + utilrand.String(5),
  3572  				label: map[string]string{"e2e-test-service": "keep"},
  3573  				port:  8003,
  3574  			},
  3575  		}
  3576  
  3577  		for _, testService := range testServices {
  3578  			func() {
  3579  				framework.Logf("Creating %s", testService.name)
  3580  
  3581  				svc := v1.Service{
  3582  					ObjectMeta: metav1.ObjectMeta{
  3583  						Name:   testService.name,
  3584  						Labels: testService.label,
  3585  					},
  3586  					Spec: v1.ServiceSpec{
  3587  						Type: "ClusterIP",
  3588  						Ports: []v1.ServicePort{{
  3589  							Name:       "http",
  3590  							Protocol:   v1.ProtocolTCP,
  3591  							Port:       int32(testService.port),
  3592  							TargetPort: intstr.FromInt(testService.port),
  3593  						}},
  3594  					},
  3595  				}
  3596  				_, err := svcClient.Create(ctx, &svc, metav1.CreateOptions{})
  3597  				framework.ExpectNoError(err, "failed to create Service")
  3598  
  3599  			}()
  3600  		}
  3601  
  3602  		svcList, err := cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
  3603  		framework.ExpectNoError(err, "failed to list Services")
  3604  		gomega.Expect(svcList.Items).To(gomega.HaveLen(3), "Required count of services out of sync")
  3605  
  3606  		ginkgo.By("deleting service collection")
  3607  		err = svcDynamicClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: deleteLabel})
  3608  		framework.ExpectNoError(err, "failed to delete service collection. %v", err)
  3609  
  3610  		svcList, err = cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
  3611  		framework.ExpectNoError(err, "failed to list Services")
  3612  		gomega.Expect(svcList.Items).To(gomega.HaveLen(1), "Required count of services out of sync")
  3613  
  3614  		framework.Logf("Collection of services has been deleted")
  3615  	})
  3616  
  3617  	/*
  3618  		Release: v1.29
  3619  		Testname: Service, should serve endpoints on same port and different protocols.
  3620  		Description: Create one service with two ports, same port number and different protocol TCP and UDP.
  3621  		It MUST be able to forward traffic to both ports.
  3622  		Update the Service to expose only the TCP port, it MUST succeed to connect to the TCP port and fail
  3623  		to connect to the UDP port.
  3624  		Update the Service to expose only the UDP port, it MUST succeed to connect to the UDP port and fail
  3625  		to connect to the TCP port.
  3626  	*/
  3627  	framework.ConformanceIt("should serve endpoints on same port and different protocols", func(ctx context.Context) {
  3628  		serviceName := "multiprotocol-test"
  3629  		testLabels := map[string]string{"app": "multiport"}
  3630  		ns := f.Namespace.Name
  3631  		containerPort := 80
  3632  
  3633  		svcTCPport := v1.ServicePort{
  3634  			Name:       "tcp-port",
  3635  			Port:       80,
  3636  			TargetPort: intstr.FromInt(containerPort),
  3637  			Protocol:   v1.ProtocolTCP,
  3638  		}
  3639  		svcUDPport := v1.ServicePort{
  3640  			Name:       "udp-port",
  3641  			Port:       80,
  3642  			TargetPort: intstr.FromInt(containerPort),
  3643  			Protocol:   v1.ProtocolUDP,
  3644  		}
  3645  
  3646  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
  3647  
  3648  		testService := v1.Service{
  3649  			ObjectMeta: metav1.ObjectMeta{
  3650  				Name:   serviceName,
  3651  				Labels: testLabels,
  3652  			},
  3653  			Spec: v1.ServiceSpec{
  3654  				Type:     v1.ServiceTypeClusterIP,
  3655  				Selector: testLabels,
  3656  				Ports:    []v1.ServicePort{svcTCPport, svcUDPport},
  3657  			},
  3658  		}
  3659  		service, err := cs.CoreV1().Services(ns).Create(ctx, &testService, metav1.CreateOptions{})
  3660  		framework.ExpectNoError(err, "failed to create Service")
  3661  
  3662  		containerPorts := []v1.ContainerPort{{
  3663  			Name:          svcTCPport.Name,
  3664  			ContainerPort: int32(containerPort),
  3665  			Protocol:      v1.ProtocolTCP,
  3666  		}, {
  3667  			Name:          svcUDPport.Name,
  3668  			ContainerPort: int32(containerPort),
  3669  			Protocol:      v1.ProtocolUDP,
  3670  		}}
  3671  		podname1 := "pod1"
  3672  		ginkgo.By("creating pod " + podname1 + " in namespace " + ns)
  3673  		createPodOrFail(ctx, f, ns, podname1, testLabels, containerPorts, "netexec", "--http-port", strconv.Itoa(containerPort), "--udp-port", strconv.Itoa(containerPort))
  3674  		validateEndpointsPortsWithProtocolsOrFail(cs, ns, serviceName, fullPortsByPodName{podname1: containerPorts})
  3675  
  3676  		ginkgo.By("Checking if the Service forwards traffic to the TCP and UDP port")
  3677  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  3678  		err = testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolTCP, execPod, 30*time.Second)
  3679  		if err != nil {
  3680  			framework.Failf("Failed to connect to Service TCP port: %v", err)
  3681  		}
  3682  		err = testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolUDP, execPod, 30*time.Second)
  3683  		if err != nil {
  3684  			framework.Failf("Failed to connect to Service UDP port: %v", err)
  3685  		}
  3686  
  3687  		ginkgo.By("Checking if the Service forwards traffic to TCP only")
  3688  		service, err = cs.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
  3689  		if err != nil {
  3690  			framework.Failf("failed to get Service %q: %v", serviceName, err)
  3691  		}
  3692  		service.Spec.Ports = []v1.ServicePort{svcTCPport}
  3693  		_, err = cs.CoreV1().Services(ns).Update(ctx, service, metav1.UpdateOptions{})
  3694  		if err != nil {
  3695  			framework.Failf("failed to get Service %q: %v", serviceName, err)
  3696  		}
  3697  
  3698  		// test reachability
  3699  		err = testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolTCP, execPod, 30*time.Second)
  3700  		if err != nil {
  3701  			framework.Failf("Failed to connect to Service TCP port: %v", err)
  3702  		}
  3703  		// take into account the NetworkProgrammingLatency
  3704  		// testEndpointReachability tries 3 times every 3 second
  3705  		// we retry again during 30 seconds to check if the port stops forwarding
  3706  		gomega.Eventually(ctx, func() error {
  3707  			return testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolUDP, execPod, 6*time.Second)
  3708  		}).WithTimeout(30 * time.Second).WithPolling(5 * time.Second).ShouldNot(gomega.BeNil())
  3709  
  3710  		ginkgo.By("Checking if the Service forwards traffic to UDP only")
  3711  		service, err = cs.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
  3712  		if err != nil {
  3713  			framework.Failf("failed to get Service %q: %v", serviceName, err)
  3714  		}
  3715  		service.Spec.Ports = []v1.ServicePort{svcUDPport}
  3716  		_, err = cs.CoreV1().Services(ns).Update(ctx, service, metav1.UpdateOptions{})
  3717  		if err != nil {
  3718  			framework.Failf("failed to update Service %q: %v", serviceName, err)
  3719  		}
  3720  
  3721  		// test reachability
  3722  		err = testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolUDP, execPod, 30*time.Second)
  3723  		if err != nil {
  3724  			framework.Failf("Failed to connect to Service UDP port: %v", err)
  3725  		}
  3726  		// take into account the NetworkProgrammingLatency
  3727  		// testEndpointReachability tries 3 times every 3 second
  3728  		// we retry again during 30 seconds to check if the port stops forwarding
  3729  		gomega.Eventually(ctx, func() error {
  3730  			return testEndpointReachability(ctx, service.Spec.ClusterIP, 80, v1.ProtocolTCP, execPod, 6*time.Second)
  3731  		}).WithTimeout(30 * time.Second).WithPolling(5 * time.Second).ShouldNot(gomega.BeNil())
  3732  	})
  3733  
  3734  	/*
  3735  		Release: v1.26
  3736  		Testname: Service, same ports with different protocols on a Load Balancer Service
  3737  		Description: Create a LoadBalancer service with two ports that have the same value but use different protocols. Add a Pod that listens on both ports. The Pod must be reachable via the ClusterIP and both ports
  3738  	*/
  3739  	ginkgo.It("should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ", func(ctx context.Context) {
  3740  		serviceName := "multiprotocol-lb-test"
  3741  		ns := f.Namespace.Name
  3742  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  3743  
  3744  		ginkgo.DeferCleanup(func(ctx context.Context) {
  3745  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  3746  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  3747  		})
  3748  
  3749  		svc1port := "svc1"
  3750  		svc2port := "svc2"
  3751  
  3752  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
  3753  		svc, err := jig.CreateLoadBalancerServiceWaitForClusterIPOnly(func(service *v1.Service) {
  3754  			service.Spec.Ports = []v1.ServicePort{
  3755  				{
  3756  					Name:       "portname1",
  3757  					Port:       80,
  3758  					TargetPort: intstr.FromString(svc1port),
  3759  					Protocol:   v1.ProtocolTCP,
  3760  				},
  3761  				{
  3762  					Name:       "portname2",
  3763  					Port:       80,
  3764  					TargetPort: intstr.FromString(svc2port),
  3765  					Protocol:   v1.ProtocolUDP,
  3766  				},
  3767  			}
  3768  		})
  3769  		framework.ExpectNoError(err)
  3770  
  3771  		containerPort := 100
  3772  
  3773  		names := map[string]bool{}
  3774  		ginkgo.DeferCleanup(func(ctx context.Context) {
  3775  			for name := range names {
  3776  				err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
  3777  				framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
  3778  			}
  3779  		})
  3780  
  3781  		containerPorts := []v1.ContainerPort{
  3782  			{
  3783  				Name:          svc1port,
  3784  				ContainerPort: int32(containerPort),
  3785  				Protocol:      v1.ProtocolTCP,
  3786  			},
  3787  			{
  3788  				Name:          svc2port,
  3789  				ContainerPort: int32(containerPort),
  3790  				Protocol:      v1.ProtocolUDP,
  3791  			},
  3792  		}
  3793  
  3794  		podname1 := "pod1"
  3795  
  3796  		createPodOrFail(ctx, f, ns, podname1, jig.Labels, containerPorts, "netexec", "--http-port", strconv.Itoa(containerPort), "--udp-port", strconv.Itoa(containerPort))
  3797  		validateEndpointsPortsWithProtocolsOrFail(cs, ns, serviceName, fullPortsByPodName{podname1: containerPorts})
  3798  
  3799  		ginkgo.By("Checking if the Service forwards traffic to pods")
  3800  		execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
  3801  		err = jig.CheckServiceReachability(ctx, svc, execPod)
  3802  		framework.ExpectNoError(err)
  3803  		e2epod.DeletePodOrFail(ctx, cs, ns, podname1)
  3804  	})
  3805  
  3806  	// These is [Serial] because it can't run at the same time as the
  3807  	// [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded.
  3808  	f.It("should allow creating a basic SCTP service with pod and endpoints [LinuxOnly]", f.WithSerial(), func(ctx context.Context) {
  3809  		serviceName := "sctp-endpoint-test"
  3810  		ns := f.Namespace.Name
  3811  		jig := e2eservice.NewTestJig(cs, ns, serviceName)
  3812  
  3813  		ginkgo.By("getting the state of the sctp module on nodes")
  3814  		nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
  3815  		framework.ExpectNoError(err)
  3816  		sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes)
  3817  
  3818  		ginkgo.By("creating service " + serviceName + " in namespace " + ns)
  3819  		_, err = jig.CreateSCTPServiceWithPort(ctx, nil, 5060)
  3820  		framework.ExpectNoError(err)
  3821  		ginkgo.DeferCleanup(func(ctx context.Context) {
  3822  			err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
  3823  			framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  3824  		})
  3825  
  3826  		err = e2enetwork.WaitForService(ctx, f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
  3827  		framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
  3828  
  3829  		ginkgo.By("validating endpoints do not exist yet")
  3830  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
  3831  
  3832  		ginkgo.By("creating a pod for the service")
  3833  		names := map[string]bool{}
  3834  
  3835  		name1 := "pod1"
  3836  
  3837  		createPodOrFail(ctx, f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}})
  3838  		names[name1] = true
  3839  		ginkgo.DeferCleanup(func(ctx context.Context) {
  3840  			for name := range names {
  3841  				err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
  3842  				framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
  3843  			}
  3844  		})
  3845  
  3846  		ginkgo.By("validating endpoints exists")
  3847  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{name1: {5060}})
  3848  
  3849  		ginkgo.By("deleting the pod")
  3850  		e2epod.DeletePodOrFail(ctx, cs, ns, name1)
  3851  		delete(names, name1)
  3852  		ginkgo.By("validating endpoints do not exist anymore")
  3853  		validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{})
  3854  
  3855  		ginkgo.By("validating sctp module is still not loaded")
  3856  		sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(ctx, f, nodes)
  3857  		if !sctpLoadedAtStart && sctpLoadedAtEnd {
  3858  			framework.Failf("The state of the sctp module has changed due to the test case")
  3859  		}
  3860  	})
  3861  })
  3862  
  3863  // execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
  3864  // affinity test for non-load-balancer services. Session affinity will be
  3865  // enabled when the service is created and a short timeout will be configured so
  3866  // session affinity must change after the timeout expirese.
  3867  func execAffinityTestForSessionAffinityTimeout(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  3868  	ns := f.Namespace.Name
  3869  	numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name
  3870  	ginkgo.By("creating service in namespace " + ns)
  3871  	serviceType := svc.Spec.Type
  3872  	// set an affinity timeout equal to the number of connection requests
  3873  	svcSessionAffinityTimeout := int32(SessionAffinityTimeout)
  3874  	svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  3875  	svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
  3876  		ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
  3877  	}
  3878  	_, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods)
  3879  	framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
  3880  	ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
  3881  	jig := e2eservice.NewTestJig(cs, ns, serviceName)
  3882  	svc, err = jig.Client.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
  3883  	framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
  3884  	var svcIP string
  3885  	if serviceType == v1.ServiceTypeNodePort {
  3886  		nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
  3887  		framework.ExpectNoError(err)
  3888  		// The node addresses must have the same IP family as the ClusterIP
  3889  		family := v1.IPv4Protocol
  3890  		if netutils.IsIPv6String(svc.Spec.ClusterIP) {
  3891  			family = v1.IPv6Protocol
  3892  		}
  3893  		svcIP = e2enode.FirstAddressByTypeAndFamily(nodes, v1.NodeInternalIP, family)
  3894  		gomega.Expect(svcIP).NotTo(gomega.BeEmpty(), "failed to get Node internal IP for family: %s", family)
  3895  		servicePort = int(svc.Spec.Ports[0].NodePort)
  3896  	} else {
  3897  		svcIP = svc.Spec.ClusterIP
  3898  	}
  3899  
  3900  	execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod-affinity", nil)
  3901  	ginkgo.DeferCleanup(func(ctx context.Context) {
  3902  		framework.Logf("Cleaning up the exec pod")
  3903  		err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
  3904  		framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
  3905  	})
  3906  	err = jig.CheckServiceReachability(ctx, svc, execPod)
  3907  	framework.ExpectNoError(err)
  3908  
  3909  	// the service should be sticky until the timeout expires
  3910  	if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
  3911  		framework.Failf("the service %s (%s:%d) should be sticky until the timeout expires", svc.Name, svcIP, servicePort)
  3912  	}
  3913  	// but it should return different hostnames after the timeout expires
  3914  	// try several times to avoid the probability that we hit the same pod twice
  3915  	hosts := sets.NewString()
  3916  	cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, net.JoinHostPort(svcIP, strconv.Itoa(servicePort)))
  3917  	for i := 0; i < 10; i++ {
  3918  		hostname, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  3919  		if err == nil {
  3920  			hosts.Insert(hostname)
  3921  			if hosts.Len() > 1 {
  3922  				return
  3923  			}
  3924  			// In some case, ipvs didn't deleted the persistent connection after timeout expired,
  3925  			// use 'ipvsadm -lnc' command can found the expire time become '13171233:02' after '00:00'
  3926  			//
  3927  			// pro expire state       source             virtual            destination
  3928  			// TCP 00:00  NONE        10.105.253.160:0   10.105.253.160:80  10.244.1.25:9376
  3929  			//
  3930  			// pro expire state       source             virtual            destination
  3931  			// TCP 13171233:02 NONE        10.105.253.160:0   10.105.253.160:80  10.244.1.25:9376
  3932  			//
  3933  			// And 2 seconds later, the connection will be ensure deleted,
  3934  			// so we sleep 'svcSessionAffinityTimeout+5' seconds to avoid this issue.
  3935  			// TODO: figure out why the expired connection didn't be deleted and fix this issue in ipvs side.
  3936  			time.Sleep(time.Duration(svcSessionAffinityTimeout+5) * time.Second)
  3937  		}
  3938  	}
  3939  	framework.Fail("Session is sticky after reaching the timeout")
  3940  }
  3941  
  3942  func execAffinityTestForNonLBServiceWithTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  3943  	execAffinityTestForNonLBServiceWithOptionalTransition(ctx, f, cs, svc, true)
  3944  }
  3945  
  3946  func execAffinityTestForNonLBService(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  3947  	execAffinityTestForNonLBServiceWithOptionalTransition(ctx, f, cs, svc, false)
  3948  }
  3949  
  3950  // execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of
  3951  // affinity test for non-load-balancer services. Session affinity will be
  3952  // enabled when the service is created. If parameter isTransitionTest is true,
  3953  // session affinity will be switched off/on and test if the service converges
  3954  // to a stable affinity state.
  3955  func execAffinityTestForNonLBServiceWithOptionalTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
  3956  	ns := f.Namespace.Name
  3957  	numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name
  3958  	ginkgo.By("creating service in namespace " + ns)
  3959  	serviceType := svc.Spec.Type
  3960  	svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  3961  	_, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods)
  3962  	framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
  3963  	ginkgo.DeferCleanup(StopServeHostnameService, cs, ns, serviceName)
  3964  	jig := e2eservice.NewTestJig(cs, ns, serviceName)
  3965  	svc, err = jig.Client.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{})
  3966  	framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
  3967  	var svcIP string
  3968  	if serviceType == v1.ServiceTypeNodePort {
  3969  		nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
  3970  		framework.ExpectNoError(err)
  3971  		// The node addresses must have the same IP family as the ClusterIP
  3972  		family := v1.IPv4Protocol
  3973  		if netutils.IsIPv6String(svc.Spec.ClusterIP) {
  3974  			family = v1.IPv6Protocol
  3975  		}
  3976  		svcIP = e2enode.FirstAddressByTypeAndFamily(nodes, v1.NodeInternalIP, family)
  3977  		gomega.Expect(svcIP).NotTo(gomega.BeEmpty(), "failed to get Node internal IP for family: %s", family)
  3978  		servicePort = int(svc.Spec.Ports[0].NodePort)
  3979  	} else {
  3980  		svcIP = svc.Spec.ClusterIP
  3981  	}
  3982  
  3983  	execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod-affinity", nil)
  3984  	ginkgo.DeferCleanup(func(ctx context.Context) {
  3985  		framework.Logf("Cleaning up the exec pod")
  3986  		err := cs.CoreV1().Pods(ns).Delete(ctx, execPod.Name, metav1.DeleteOptions{})
  3987  		framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
  3988  	})
  3989  	err = jig.CheckServiceReachability(ctx, svc, execPod)
  3990  	framework.ExpectNoError(err)
  3991  
  3992  	if !isTransitionTest {
  3993  		if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
  3994  			framework.Failf("Failed to check affinity for service %s/%s", ns, svc.Name)
  3995  		}
  3996  	}
  3997  	if isTransitionTest {
  3998  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  3999  			svc.Spec.SessionAffinity = v1.ServiceAffinityNone
  4000  		})
  4001  		framework.ExpectNoError(err)
  4002  		if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, false) {
  4003  			framework.Failf("Failed to check affinity for service %s/%s without session affinity", ns, svc.Name)
  4004  		}
  4005  		_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  4006  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  4007  		})
  4008  		framework.ExpectNoError(err)
  4009  		if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
  4010  			framework.Failf("Failed to check affinity for service %s/%s with session affinity", ns, svc.Name)
  4011  		}
  4012  	}
  4013  }
  4014  
  4015  func execAffinityTestForLBServiceWithTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  4016  	execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, true)
  4017  }
  4018  
  4019  func execAffinityTestForLBService(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  4020  	execAffinityTestForLBServiceWithOptionalTransition(ctx, f, cs, svc, false)
  4021  }
  4022  
  4023  // execAffinityTestForLBServiceWithOptionalTransition is a helper function that wrap the logic of
  4024  // affinity test for load balancer services, similar to
  4025  // execAffinityTestForNonLBServiceWithOptionalTransition.
  4026  func execAffinityTestForLBServiceWithOptionalTransition(ctx context.Context, f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
  4027  	numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name
  4028  
  4029  	ginkgo.By("creating service in namespace " + ns)
  4030  	svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  4031  	_, _, err := StartServeHostnameService(ctx, cs, svc, ns, numPods)
  4032  	framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
  4033  	jig := e2eservice.NewTestJig(cs, ns, serviceName)
  4034  	ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
  4035  	svc, err = jig.WaitForLoadBalancer(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs))
  4036  	framework.ExpectNoError(err)
  4037  	ginkgo.DeferCleanup(func(ctx context.Context) {
  4038  		podNodePairs, err := e2enode.PodNodePairs(ctx, cs, ns)
  4039  		framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
  4040  		_ = StopServeHostnameService(ctx, cs, ns, serviceName)
  4041  		lb := cloudprovider.DefaultLoadBalancerName(svc)
  4042  		framework.Logf("cleaning load balancer resource for %s", lb)
  4043  		e2eservice.CleanupServiceResources(ctx, cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
  4044  	})
  4045  	ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  4046  	port := int(svc.Spec.Ports[0].Port)
  4047  
  4048  	if !isTransitionTest {
  4049  		if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
  4050  			framework.Failf("Failed to verify affinity for loadbalance service %s/%s", ns, serviceName)
  4051  		}
  4052  	}
  4053  	if isTransitionTest {
  4054  		svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  4055  			svc.Spec.SessionAffinity = v1.ServiceAffinityNone
  4056  		})
  4057  		framework.ExpectNoError(err)
  4058  		if !checkAffinity(ctx, cs, nil, ingressIP, port, false) {
  4059  			framework.Failf("Failed to verify affinity for loadbalance service %s/%s without session affinity ", ns, serviceName)
  4060  		}
  4061  		svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
  4062  			svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  4063  		})
  4064  		framework.ExpectNoError(err)
  4065  		if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
  4066  			framework.Failf("Failed to verify affinity for loadbalance service %s/%s with session affinity ", ns, serviceName)
  4067  		}
  4068  	}
  4069  }
  4070  
  4071  func createAndGetExternalServiceFQDN(ctx context.Context, cs clientset.Interface, ns, serviceName string) string {
  4072  	_, _, err := StartServeHostnameService(ctx, cs, getServeHostnameService(serviceName), ns, 2)
  4073  	framework.ExpectNoError(err, "Expected Service %s to be running", serviceName)
  4074  	return fmt.Sprintf("%s.%s.svc.%s", serviceName, ns, framework.TestContext.ClusterDNSDomain)
  4075  }
  4076  
  4077  func createPausePodDeployment(ctx context.Context, cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment {
  4078  	labels := map[string]string{"deployment": "agnhost-pause"}
  4079  	pauseDeployment := e2edeployment.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
  4080  
  4081  	pauseDeployment.Spec.Template.Spec.Containers[0] = e2epod.NewAgnhostContainer("agnhost-pause", nil, nil, "pause")
  4082  	pauseDeployment.Spec.Template.Spec.Affinity = &v1.Affinity{
  4083  		PodAntiAffinity: &v1.PodAntiAffinity{
  4084  			RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  4085  				{
  4086  					LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
  4087  					TopologyKey:   "kubernetes.io/hostname",
  4088  					Namespaces:    []string{ns},
  4089  				},
  4090  			},
  4091  		},
  4092  	}
  4093  
  4094  	deployment, err := cs.AppsV1().Deployments(ns).Create(ctx, pauseDeployment, metav1.CreateOptions{})
  4095  	framework.ExpectNoError(err, "Error in creating deployment for pause pod")
  4096  	return deployment
  4097  }
  4098  
  4099  // createPodOrFail creates a pod with the specified containerPorts.
  4100  func createPodOrFail(ctx context.Context, f *framework.Framework, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort, args ...string) {
  4101  	ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
  4102  	pod := e2epod.NewAgnhostPod(ns, name, nil, nil, containerPorts, args...)
  4103  	pod.ObjectMeta.Labels = labels
  4104  	// Add a dummy environment variable to work around a docker issue.
  4105  	// https://github.com/docker/docker/issues/14203
  4106  	pod.Spec.Containers[0].Env = []v1.EnvVar{{Name: "FOO", Value: " "}}
  4107  	e2epod.NewPodClient(f).CreateSync(ctx, pod)
  4108  }
  4109  
  4110  // launchHostExecPod launches a hostexec pod in the given namespace and waits
  4111  // until it's Running
  4112  func launchHostExecPod(ctx context.Context, client clientset.Interface, ns, name string) *v1.Pod {
  4113  	framework.Logf("Creating new host exec pod")
  4114  	hostExecPod := e2epod.NewExecPodSpec(ns, name, true)
  4115  	pod, err := client.CoreV1().Pods(ns).Create(ctx, hostExecPod, metav1.CreateOptions{})
  4116  	framework.ExpectNoError(err)
  4117  	err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, name, ns, framework.PodStartTimeout)
  4118  	framework.ExpectNoError(err)
  4119  	return pod
  4120  }
  4121  
  4122  // checkReachabilityFromPod checks reachability from the specified pod.
  4123  func checkReachabilityFromPod(ctx context.Context, expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) {
  4124  	cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
  4125  	err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
  4126  		_, err := e2eoutput.RunHostCmd(namespace, pod, cmd)
  4127  		if expectToBeReachable && err != nil {
  4128  			framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
  4129  			return false, nil
  4130  		}
  4131  
  4132  		if !expectToBeReachable && err == nil {
  4133  			framework.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
  4134  			return false, nil
  4135  		}
  4136  		return true, nil
  4137  	})
  4138  	framework.ExpectNoError(err)
  4139  }
  4140  
  4141  func validatePorts(ep, expectedEndpoints portsByPodUID) error {
  4142  	if len(ep) != len(expectedEndpoints) {
  4143  		// should not happen because we check this condition before
  4144  		return fmt.Errorf("invalid number of endpoints got %v, expected %v", ep, expectedEndpoints)
  4145  	}
  4146  	for podUID := range expectedEndpoints {
  4147  		if _, ok := ep[podUID]; !ok {
  4148  			return fmt.Errorf("endpoint %v not found", podUID)
  4149  		}
  4150  		if len(ep[podUID]) != len(expectedEndpoints[podUID]) {
  4151  			return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
  4152  		}
  4153  		sort.Ints(ep[podUID])
  4154  		sort.Ints(expectedEndpoints[podUID])
  4155  		for index := range ep[podUID] {
  4156  			if ep[podUID][index] != expectedEndpoints[podUID][index] {
  4157  				return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
  4158  			}
  4159  		}
  4160  	}
  4161  	return nil
  4162  }
  4163  
  4164  func translatePodNameToUID(ctx context.Context, c clientset.Interface, ns string, expectedEndpoints portsByPodName) (portsByPodUID, error) {
  4165  	portsByUID := make(portsByPodUID)
  4166  	for name, portList := range expectedEndpoints {
  4167  		pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
  4168  		if err != nil {
  4169  			return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
  4170  		}
  4171  		portsByUID[pod.ObjectMeta.UID] = portList
  4172  	}
  4173  	return portsByUID, nil
  4174  }
  4175  
  4176  // validateEndpointsPortsOrFail validates that the given service exists and is served by the given expectedEndpoints.
  4177  func validateEndpointsPortsOrFail(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectedEndpoints portsByPodName) {
  4178  	ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
  4179  	expectedPortsByPodUID, err := translatePodNameToUID(ctx, c, namespace, expectedEndpoints)
  4180  	framework.ExpectNoError(err, "failed to translate pod name to UID, ns:%s, expectedEndpoints:%v", namespace, expectedEndpoints)
  4181  
  4182  	var (
  4183  		pollErr error
  4184  		i       = 0
  4185  	)
  4186  	if pollErr = wait.PollImmediate(time.Second, framework.ServiceStartTimeout, func() (bool, error) {
  4187  		i++
  4188  
  4189  		ep, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
  4190  		if err != nil {
  4191  			framework.Logf("Failed go get Endpoints object: %v", err)
  4192  			// Retry the error
  4193  			return false, nil
  4194  		}
  4195  		portsByUID := portsByPodUID(e2eendpoints.GetContainerPortsByPodUID(ep))
  4196  		if err := validatePorts(portsByUID, expectedPortsByPodUID); err != nil {
  4197  			if i%5 == 0 {
  4198  				framework.Logf("Unexpected endpoints: found %v, expected %v, will retry", portsByUID, expectedEndpoints)
  4199  			}
  4200  			return false, nil
  4201  		}
  4202  
  4203  		// If EndpointSlice API is enabled, then validate if appropriate EndpointSlice objects
  4204  		// were also create/updated/deleted.
  4205  		if _, err := c.Discovery().ServerResourcesForGroupVersion(discoveryv1.SchemeGroupVersion.String()); err == nil {
  4206  			opts := metav1.ListOptions{
  4207  				LabelSelector: "kubernetes.io/service-name=" + serviceName,
  4208  			}
  4209  			es, err := c.DiscoveryV1().EndpointSlices(namespace).List(ctx, opts)
  4210  			if err != nil {
  4211  				framework.Logf("Failed go list EndpointSlice objects: %v", err)
  4212  				// Retry the error
  4213  				return false, nil
  4214  			}
  4215  			portsByUID = portsByPodUID(e2eendpointslice.GetContainerPortsByPodUID(es.Items))
  4216  			if err := validatePorts(portsByUID, expectedPortsByPodUID); err != nil {
  4217  				if i%5 == 0 {
  4218  					framework.Logf("Unexpected endpoint slices: found %v, expected %v, will retry", portsByUID, expectedEndpoints)
  4219  				}
  4220  				return false, nil
  4221  			}
  4222  		}
  4223  		framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v",
  4224  			serviceName, namespace, expectedEndpoints)
  4225  		return true, nil
  4226  	}); pollErr != nil {
  4227  		if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}); err == nil {
  4228  			for _, pod := range pods.Items {
  4229  				framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
  4230  			}
  4231  		} else {
  4232  			framework.Logf("Can't list pod debug info: %v", err)
  4233  		}
  4234  	}
  4235  	framework.ExpectNoError(pollErr, "error waithing for service %s in namespace %s to expose endpoints %v: %v", serviceName, namespace, expectedEndpoints)
  4236  }
  4237  
  4238  func restartApiserver(ctx context.Context, namespace string, cs clientset.Interface) error {
  4239  	if framework.ProviderIs("gke") {
  4240  		// GKE use a same-version master upgrade to teardown/recreate master.
  4241  		v, err := cs.Discovery().ServerVersion()
  4242  		if err != nil {
  4243  			return err
  4244  		}
  4245  		return e2eproviders.MasterUpgradeGKE(ctx, namespace, v.GitVersion[1:]) // strip leading 'v'
  4246  	}
  4247  
  4248  	return restartComponent(ctx, cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
  4249  }
  4250  
  4251  // restartComponent restarts component static pod
  4252  func restartComponent(ctx context.Context, cs clientset.Interface, cName, ns string, matchLabels map[string]string) error {
  4253  	pods, err := e2epod.GetPods(ctx, cs, ns, matchLabels)
  4254  	if err != nil {
  4255  		return fmt.Errorf("failed to get %s's pods, err: %w", cName, err)
  4256  	}
  4257  	if len(pods) == 0 {
  4258  		return fmt.Errorf("%s pod count is 0", cName)
  4259  	}
  4260  
  4261  	if err := e2epod.DeletePodsWithGracePeriod(ctx, cs, pods, 0); err != nil {
  4262  		return fmt.Errorf("failed to restart component: %s, err: %w", cName, err)
  4263  	}
  4264  
  4265  	_, err = e2epod.PodsCreatedByLabel(ctx, cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels))
  4266  	return err
  4267  }
  4268  
  4269  // validateEndpointsPortsWithProtocolsOrFail validates that the given service exists and is served by the given expectedEndpoints.
  4270  func validateEndpointsPortsWithProtocolsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints fullPortsByPodName) {
  4271  	ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
  4272  	expectedPortsByPodUID, err := translatePortsByPodNameToPortsByPodUID(c, namespace, expectedEndpoints)
  4273  	framework.ExpectNoError(err, "failed to translate pod name to UID, ns:%s, expectedEndpoints:%v", namespace, expectedEndpoints)
  4274  
  4275  	var (
  4276  		pollErr error
  4277  		i       = 0
  4278  	)
  4279  	if pollErr = wait.PollImmediate(time.Second, framework.ServiceStartTimeout, func() (bool, error) {
  4280  		i++
  4281  
  4282  		ep, err := c.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
  4283  		if err != nil {
  4284  			framework.Logf("Failed go get Endpoints object: %v", err)
  4285  			// Retry the error
  4286  			return false, nil
  4287  		}
  4288  		portsByUID := fullPortsByPodUID(e2eendpoints.GetFullContainerPortsByPodUID(ep))
  4289  		if err := validatePortsAndProtocols(portsByUID, expectedPortsByPodUID); err != nil {
  4290  			if i%5 == 0 {
  4291  				framework.Logf("Unexpected endpoints: found %v, expected %v, will retry", portsByUID, expectedEndpoints)
  4292  			}
  4293  			return false, nil
  4294  		}
  4295  
  4296  		// If EndpointSlice API is enabled, then validate if appropriate EndpointSlice objects
  4297  		// were also create/updated/deleted.
  4298  		if _, err := c.Discovery().ServerResourcesForGroupVersion(discoveryv1.SchemeGroupVersion.String()); err == nil {
  4299  			opts := metav1.ListOptions{
  4300  				LabelSelector: "kubernetes.io/service-name=" + serviceName,
  4301  			}
  4302  			es, err := c.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), opts)
  4303  			if err != nil {
  4304  				framework.Logf("Failed go list EndpointSlice objects: %v", err)
  4305  				// Retry the error
  4306  				return false, nil
  4307  			}
  4308  			portsByUID = fullPortsByPodUID(e2eendpointslice.GetFullContainerPortsByPodUID(es.Items))
  4309  			if err := validatePortsAndProtocols(portsByUID, expectedPortsByPodUID); err != nil {
  4310  				if i%5 == 0 {
  4311  					framework.Logf("Unexpected endpoint slices: found %v, expected %v, will retry", portsByUID, expectedEndpoints)
  4312  				}
  4313  				return false, nil
  4314  			}
  4315  		}
  4316  		framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v",
  4317  			serviceName, namespace, expectedEndpoints)
  4318  		return true, nil
  4319  	}); pollErr != nil {
  4320  		if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}); err == nil {
  4321  			for _, pod := range pods.Items {
  4322  				framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
  4323  			}
  4324  		} else {
  4325  			framework.Logf("Can't list pod debug info: %v", err)
  4326  		}
  4327  	}
  4328  	framework.ExpectNoError(pollErr, "error waithing for service %s in namespace %s to expose endpoints %v: %v", serviceName, namespace, expectedEndpoints)
  4329  }
  4330  
  4331  func translatePortsByPodNameToPortsByPodUID(c clientset.Interface, ns string, expectedEndpoints fullPortsByPodName) (fullPortsByPodUID, error) {
  4332  	portsByUID := make(fullPortsByPodUID)
  4333  	for name, portList := range expectedEndpoints {
  4334  		pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
  4335  		if err != nil {
  4336  			return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
  4337  		}
  4338  		portsByUID[pod.ObjectMeta.UID] = portList
  4339  	}
  4340  	return portsByUID, nil
  4341  }
  4342  
  4343  func validatePortsAndProtocols(ep, expectedEndpoints fullPortsByPodUID) error {
  4344  	if len(ep) != len(expectedEndpoints) {
  4345  		// should not happen because we check this condition before
  4346  		return fmt.Errorf("invalid number of endpoints got %v, expected %v", ep, expectedEndpoints)
  4347  	}
  4348  	for podUID := range expectedEndpoints {
  4349  		if _, ok := ep[podUID]; !ok {
  4350  			return fmt.Errorf("endpoint %v not found", podUID)
  4351  		}
  4352  		if len(ep[podUID]) != len(expectedEndpoints[podUID]) {
  4353  			return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
  4354  		}
  4355  		var match bool
  4356  		for _, epPort := range ep[podUID] {
  4357  			match = false
  4358  			for _, expectedPort := range expectedEndpoints[podUID] {
  4359  				if epPort.ContainerPort == expectedPort.ContainerPort && epPort.Protocol == expectedPort.Protocol {
  4360  					match = true
  4361  				}
  4362  			}
  4363  			if !match {
  4364  				return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
  4365  			}
  4366  		}
  4367  	}
  4368  	return nil
  4369  }