k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/common/node/pods.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package node
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/json"
    23  	"fmt"
    24  	"io"
    25  	"runtime/debug"
    26  	"strconv"
    27  	"strings"
    28  	"time"
    29  
    30  	"k8s.io/client-go/util/retry"
    31  
    32  	"golang.org/x/net/websocket"
    33  
    34  	v1 "k8s.io/api/core/v1"
    35  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    36  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    37  	"k8s.io/apimachinery/pkg/labels"
    38  	"k8s.io/apimachinery/pkg/runtime"
    39  	"k8s.io/apimachinery/pkg/runtime/schema"
    40  	"k8s.io/apimachinery/pkg/types"
    41  	"k8s.io/apimachinery/pkg/util/intstr"
    42  	utilrand "k8s.io/apimachinery/pkg/util/rand"
    43  	"k8s.io/apimachinery/pkg/util/uuid"
    44  	"k8s.io/apimachinery/pkg/util/wait"
    45  	"k8s.io/apimachinery/pkg/watch"
    46  	"k8s.io/client-go/dynamic"
    47  	"k8s.io/client-go/tools/cache"
    48  	watchtools "k8s.io/client-go/tools/watch"
    49  	"k8s.io/kubectl/pkg/util/podutils"
    50  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    51  	"k8s.io/kubernetes/pkg/kubelet"
    52  	"k8s.io/kubernetes/test/e2e/framework"
    53  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    54  	e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    55  	e2ewebsocket "k8s.io/kubernetes/test/e2e/framework/websocket"
    56  	imageutils "k8s.io/kubernetes/test/utils/image"
    57  	admissionapi "k8s.io/pod-security-admission/api"
    58  	"k8s.io/utils/pointer"
    59  
    60  	"github.com/onsi/ginkgo/v2"
    61  	"github.com/onsi/gomega"
    62  )
    63  
    64  const (
    65  	buildBackOffDuration = time.Minute
    66  	syncLoopFrequency    = 10 * time.Second
    67  	maxBackOffTolerance  = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
    68  	podRetryPeriod       = 1 * time.Second
    69  )
    70  
    71  // testHostIP tests that a pod gets a host IP
    72  func testHostIP(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod) {
    73  	ginkgo.By("creating pod")
    74  	podClient.CreateSync(ctx, pod)
    75  
    76  	// Try to make sure we get a hostIP for each pod.
    77  	hostIPTimeout := 2 * time.Minute
    78  	t := time.Now()
    79  	for {
    80  		p, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
    81  		framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
    82  		if p.Status.HostIP != "" {
    83  			framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
    84  			break
    85  		}
    86  		if time.Since(t) >= hostIPTimeout {
    87  			framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
    88  				p.Name, time.Since(t).Seconds())
    89  		}
    90  		framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
    91  		time.Sleep(5 * time.Second)
    92  	}
    93  }
    94  
    95  func startPodAndGetBackOffs(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
    96  	podClient.CreateSync(ctx, pod)
    97  	time.Sleep(sleepAmount)
    98  	gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
    99  	podName := pod.Name
   100  	containerName := pod.Spec.Containers[0].Name
   101  
   102  	ginkgo.By("getting restart delay-0")
   103  	_, err := getRestartDelay(ctx, podClient, podName, containerName)
   104  	if err != nil {
   105  		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   106  	}
   107  
   108  	ginkgo.By("getting restart delay-1")
   109  	delay1, err := getRestartDelay(ctx, podClient, podName, containerName)
   110  	if err != nil {
   111  		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   112  	}
   113  
   114  	ginkgo.By("getting restart delay-2")
   115  	delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
   116  	if err != nil {
   117  		framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   118  	}
   119  	return delay1, delay2
   120  }
   121  
   122  func getRestartDelay(ctx context.Context, podClient *e2epod.PodClient, podName string, containerName string) (time.Duration, error) {
   123  	beginTime := time.Now()
   124  	var previousRestartCount int32 = -1
   125  	var previousFinishedAt time.Time
   126  	for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
   127  		time.Sleep(time.Second)
   128  		pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
   129  		framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
   130  		status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
   131  		if !ok {
   132  			framework.Logf("getRestartDelay: status missing")
   133  			continue
   134  		}
   135  
   136  		// the only case this happens is if this is the first time the Pod is running and there is no "Last State".
   137  		if status.LastTerminationState.Terminated == nil {
   138  			framework.Logf("Container's last state is not \"Terminated\".")
   139  			continue
   140  		}
   141  
   142  		if previousRestartCount == -1 {
   143  			if status.State.Running != nil {
   144  				// container is still Running, there is no "FinishedAt" time.
   145  				continue
   146  			} else if status.State.Terminated != nil {
   147  				previousFinishedAt = status.State.Terminated.FinishedAt.Time
   148  			} else {
   149  				previousFinishedAt = status.LastTerminationState.Terminated.FinishedAt.Time
   150  			}
   151  			previousRestartCount = status.RestartCount
   152  		}
   153  
   154  		// when the RestartCount is changed, the Containers will be in one of the following states:
   155  		//Running, Terminated, Waiting (it already is waiting for the backoff period to expire, and the last state details have been stored into status.LastTerminationState).
   156  		if status.RestartCount > previousRestartCount {
   157  			var startedAt time.Time
   158  			if status.State.Running != nil {
   159  				startedAt = status.State.Running.StartedAt.Time
   160  			} else if status.State.Terminated != nil {
   161  				startedAt = status.State.Terminated.StartedAt.Time
   162  			} else {
   163  				startedAt = status.LastTerminationState.Terminated.StartedAt.Time
   164  			}
   165  			framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, previousFinishedAt, startedAt, startedAt.Sub(previousFinishedAt))
   166  			return startedAt.Sub(previousFinishedAt), nil
   167  		}
   168  	}
   169  	return 0, fmt.Errorf("timeout getting pod restart delay")
   170  }
   171  
   172  // expectNoErrorWithRetries checks if an error occurs with the given retry count.
   173  func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
   174  	// TODO (pohly): replace the entire function with gomege.Eventually.
   175  	var err error
   176  	for i := 0; i < maxRetries; i++ {
   177  		err = fn()
   178  		if err == nil {
   179  			return
   180  		}
   181  		framework.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
   182  	}
   183  	if err != nil {
   184  		debug.PrintStack()
   185  	}
   186  	gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...)
   187  }
   188  
   189  var _ = SIGDescribe("Pods", func() {
   190  	f := framework.NewDefaultFramework("pods")
   191  	f.NamespacePodSecurityLevel = admissionapi.LevelRestricted
   192  	var podClient *e2epod.PodClient
   193  	var dc dynamic.Interface
   194  
   195  	ginkgo.BeforeEach(func() {
   196  		podClient = e2epod.NewPodClient(f)
   197  		dc = f.DynamicClient
   198  	})
   199  
   200  	/*
   201  		Release: v1.9
   202  		Testname: Pods, assigned hostip
   203  		Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
   204  	*/
   205  	framework.ConformanceIt("should get a host IP", f.WithNodeConformance(), func(ctx context.Context) {
   206  		name := "pod-hostip-" + string(uuid.NewUUID())
   207  		testHostIP(ctx, podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   208  			ObjectMeta: metav1.ObjectMeta{
   209  				Name: name,
   210  			},
   211  			Spec: v1.PodSpec{
   212  				Containers: []v1.Container{
   213  					{
   214  						Name:  "test",
   215  						Image: imageutils.GetPauseImageName(),
   216  					},
   217  				},
   218  			},
   219  		}))
   220  	})
   221  
   222  	/*
   223  		Release: v1.9
   224  		Testname: Pods, lifecycle
   225  		Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
   226  	*/
   227  	framework.ConformanceIt("should be submitted and removed", f.WithNodeConformance(), func(ctx context.Context) {
   228  		ginkgo.By("creating the pod")
   229  		name := "pod-submit-remove-" + string(uuid.NewUUID())
   230  		value := strconv.Itoa(time.Now().Nanosecond())
   231  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   232  			ObjectMeta: metav1.ObjectMeta{
   233  				Name: name,
   234  				Labels: map[string]string{
   235  					"name": "foo",
   236  					"time": value,
   237  				},
   238  			},
   239  			Spec: v1.PodSpec{
   240  				Containers: []v1.Container{
   241  					{
   242  						Name:  "pause",
   243  						Image: imageutils.GetPauseImageName(),
   244  					},
   245  				},
   246  			},
   247  		})
   248  
   249  		ginkgo.By("setting up watch")
   250  		selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
   251  		options := metav1.ListOptions{LabelSelector: selector.String()}
   252  		pods, err := podClient.List(ctx, options)
   253  		framework.ExpectNoError(err, "failed to query for pods")
   254  		gomega.Expect(pods.Items).To(gomega.BeEmpty())
   255  
   256  		lw := &cache.ListWatch{
   257  			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
   258  				options.LabelSelector = selector.String()
   259  				podList, err := podClient.List(ctx, options)
   260  				return podList, err
   261  			},
   262  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
   263  				options.LabelSelector = selector.String()
   264  				return podClient.Watch(ctx, options)
   265  			},
   266  		}
   267  		_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
   268  		defer w.Stop()
   269  
   270  		ctxUntil, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
   271  		defer cancelCtx()
   272  		if !cache.WaitForCacheSync(ctxUntil.Done(), informer.HasSynced) {
   273  			framework.Failf("Timeout while waiting to Pod informer to sync")
   274  		}
   275  
   276  		ginkgo.By("submitting the pod to kubernetes")
   277  		podClient.Create(ctx, pod)
   278  
   279  		ginkgo.By("verifying the pod is in kubernetes")
   280  		selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
   281  		options = metav1.ListOptions{LabelSelector: selector.String()}
   282  		pods, err = podClient.List(ctx, options)
   283  		framework.ExpectNoError(err, "failed to query for pods")
   284  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   285  
   286  		ginkgo.By("verifying pod creation was observed")
   287  		select {
   288  		case event := <-w.ResultChan():
   289  			if event.Type != watch.Added {
   290  				framework.Failf("Failed to observe pod creation: %v", event)
   291  			}
   292  		case <-time.After(framework.PodStartTimeout):
   293  			framework.Failf("Timeout while waiting for pod creation")
   294  		}
   295  
   296  		// We need to wait for the pod to be running, otherwise the deletion
   297  		// may be carried out immediately rather than gracefully.
   298  		framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
   299  		// save the running pod
   300  		pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
   301  		framework.ExpectNoError(err, "failed to GET scheduled pod")
   302  
   303  		ginkgo.By("deleting the pod gracefully")
   304  		err = podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(30))
   305  		framework.ExpectNoError(err, "failed to delete pod")
   306  
   307  		ginkgo.By("verifying pod deletion was observed")
   308  		deleted := false
   309  		var lastPod *v1.Pod
   310  		timer := time.After(e2epod.DefaultPodDeletionTimeout)
   311  		for !deleted {
   312  			select {
   313  			case event := <-w.ResultChan():
   314  				switch event.Type {
   315  				case watch.Deleted:
   316  					lastPod = event.Object.(*v1.Pod)
   317  					deleted = true
   318  				case watch.Error:
   319  					framework.Logf("received a watch error: %v", event.Object)
   320  					framework.Failf("watch closed with error")
   321  				}
   322  			case <-timer:
   323  				framework.Failf("timed out waiting for pod deletion")
   324  			}
   325  		}
   326  		if !deleted {
   327  			framework.Failf("Failed to observe pod deletion")
   328  		}
   329  
   330  		gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
   331  		gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
   332  
   333  		selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
   334  		options = metav1.ListOptions{LabelSelector: selector.String()}
   335  		pods, err = podClient.List(ctx, options)
   336  		framework.ExpectNoError(err, "failed to query for pods")
   337  		gomega.Expect(pods.Items).To(gomega.BeEmpty())
   338  	})
   339  
   340  	/*
   341  		Release: v1.9
   342  		Testname: Pods, update
   343  		Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
   344  	*/
   345  	framework.ConformanceIt("should be updated", f.WithNodeConformance(), func(ctx context.Context) {
   346  		ginkgo.By("creating the pod")
   347  		name := "pod-update-" + string(uuid.NewUUID())
   348  		value := strconv.Itoa(time.Now().Nanosecond())
   349  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   350  			ObjectMeta: metav1.ObjectMeta{
   351  				Name: name,
   352  				Labels: map[string]string{
   353  					"name": "foo",
   354  					"time": value,
   355  				},
   356  			},
   357  			Spec: v1.PodSpec{
   358  				Containers: []v1.Container{
   359  					{
   360  						Name:  "pause",
   361  						Image: imageutils.GetPauseImageName(),
   362  					},
   363  				},
   364  			},
   365  		})
   366  
   367  		ginkgo.By("submitting the pod to kubernetes")
   368  		pod = podClient.CreateSync(ctx, pod)
   369  
   370  		ginkgo.By("verifying the pod is in kubernetes")
   371  		selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
   372  		options := metav1.ListOptions{LabelSelector: selector.String()}
   373  		pods, err := podClient.List(ctx, options)
   374  		framework.ExpectNoError(err, "failed to query for pods")
   375  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   376  
   377  		ginkgo.By("updating the pod")
   378  		podClient.Update(ctx, name, func(pod *v1.Pod) {
   379  			value = strconv.Itoa(time.Now().Nanosecond())
   380  			pod.Labels["time"] = value
   381  		})
   382  
   383  		framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
   384  
   385  		ginkgo.By("verifying the updated pod is in kubernetes")
   386  		selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
   387  		options = metav1.ListOptions{LabelSelector: selector.String()}
   388  		pods, err = podClient.List(ctx, options)
   389  		framework.ExpectNoError(err, "failed to query for pods")
   390  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   391  		framework.Logf("Pod update OK")
   392  	})
   393  
   394  	/*
   395  		Release: v1.9
   396  		Testname: Pods, ActiveDeadlineSeconds
   397  		Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
   398  	*/
   399  	framework.ConformanceIt("should allow activeDeadlineSeconds to be updated", f.WithNodeConformance(), func(ctx context.Context) {
   400  		ginkgo.By("creating the pod")
   401  		name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
   402  		value := strconv.Itoa(time.Now().Nanosecond())
   403  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   404  			ObjectMeta: metav1.ObjectMeta{
   405  				Name: name,
   406  				Labels: map[string]string{
   407  					"name": "foo",
   408  					"time": value,
   409  				},
   410  			},
   411  			Spec: v1.PodSpec{
   412  				Containers: []v1.Container{
   413  					{
   414  						Name:  "pause",
   415  						Image: imageutils.GetPauseImageName(),
   416  					},
   417  				},
   418  			},
   419  		})
   420  
   421  		ginkgo.By("submitting the pod to kubernetes")
   422  		podClient.CreateSync(ctx, pod)
   423  
   424  		ginkgo.By("verifying the pod is in kubernetes")
   425  		selector := labels.SelectorFromSet(labels.Set{"time": value})
   426  		options := metav1.ListOptions{LabelSelector: selector.String()}
   427  		pods, err := podClient.List(ctx, options)
   428  		framework.ExpectNoError(err, "failed to query for pods")
   429  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   430  
   431  		ginkgo.By("updating the pod")
   432  		podClient.Update(ctx, name, func(pod *v1.Pod) {
   433  			newDeadline := int64(5)
   434  			pod.Spec.ActiveDeadlineSeconds = &newDeadline
   435  		})
   436  
   437  		framework.ExpectNoError(e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, pod.Name, "DeadlineExceeded", f.Namespace.Name))
   438  	})
   439  
   440  	/*
   441  		Release: v1.9
   442  		Testname: Pods, service environment variables
   443  		Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
   444  	*/
   445  	framework.ConformanceIt("should contain environment variables for services", f.WithNodeConformance(), func(ctx context.Context) {
   446  		// Make a pod that will be a service.
   447  		// This pod serves its hostname via HTTP.
   448  		serverName := "server-envvars-" + string(uuid.NewUUID())
   449  		serverPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   450  			ObjectMeta: metav1.ObjectMeta{
   451  				Name:   serverName,
   452  				Labels: map[string]string{"name": serverName},
   453  			},
   454  			Spec: v1.PodSpec{
   455  				Containers: []v1.Container{
   456  					{
   457  						Name:  "srv",
   458  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
   459  						Ports: []v1.ContainerPort{{ContainerPort: 9376}},
   460  					},
   461  				},
   462  			},
   463  		})
   464  		podClient.CreateSync(ctx, serverPod)
   465  
   466  		// This service exposes port 8080 of the test pod as a service on port 8765
   467  		// TODO(filbranden): We would like to use a unique service name such as:
   468  		//   svcName := "svc-envvars-" + randomSuffix()
   469  		// However, that affects the name of the environment variables which are the capitalized
   470  		// service name, so that breaks this test.  One possibility is to tweak the variable names
   471  		// to match the service.  Another is to rethink environment variable names and possibly
   472  		// allow overriding the prefix in the service manifest.
   473  		svcName := "fooservice"
   474  		svc := &v1.Service{
   475  			ObjectMeta: metav1.ObjectMeta{
   476  				Name: svcName,
   477  				Labels: map[string]string{
   478  					"name": svcName,
   479  				},
   480  			},
   481  			Spec: v1.ServiceSpec{
   482  				Ports: []v1.ServicePort{{
   483  					Port:       8765,
   484  					TargetPort: intstr.FromInt32(8080),
   485  				}},
   486  				Selector: map[string]string{
   487  					"name": serverName,
   488  				},
   489  			},
   490  		}
   491  		_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{})
   492  		framework.ExpectNoError(err, "failed to create service")
   493  
   494  		// Make a client pod that verifies that it has the service environment variables.
   495  		podName := "client-envvars-" + string(uuid.NewUUID())
   496  		const containerName = "env3cont"
   497  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   498  			ObjectMeta: metav1.ObjectMeta{
   499  				Name:   podName,
   500  				Labels: map[string]string{"name": podName},
   501  			},
   502  			Spec: v1.PodSpec{
   503  				Containers: []v1.Container{
   504  					{
   505  						Name:    containerName,
   506  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   507  						Command: []string{"sh", "-c", "env"},
   508  					},
   509  				},
   510  				RestartPolicy: v1.RestartPolicyNever,
   511  			},
   512  		})
   513  
   514  		// It's possible for the Pod to be created before the Kubelet is updated with the new
   515  		// service. In that case, we just retry.
   516  		const maxRetries = 3
   517  		expectedVars := []string{
   518  			"FOOSERVICE_SERVICE_HOST=",
   519  			"FOOSERVICE_SERVICE_PORT=",
   520  			"FOOSERVICE_PORT=",
   521  			"FOOSERVICE_PORT_8765_TCP_PORT=",
   522  			"FOOSERVICE_PORT_8765_TCP_PROTO=",
   523  			"FOOSERVICE_PORT_8765_TCP=",
   524  			"FOOSERVICE_PORT_8765_TCP_ADDR=",
   525  		}
   526  		expectNoErrorWithRetries(func() error {
   527  			return e2epodoutput.MatchContainerOutput(ctx, f, pod, containerName, expectedVars, gomega.ContainSubstring)
   528  		}, maxRetries, "Container should have service environment variables set")
   529  	})
   530  
   531  	/*
   532  		Release: v1.13
   533  		Testname: Pods, remote command execution over websocket
   534  		Description: A Pod is created. Websocket is created to retrieve exec command output from this pod.
   535  		Message retrieved form Websocket MUST match with expected exec command output.
   536  	*/
   537  	framework.ConformanceIt("should support remote command execution over websockets", f.WithNodeConformance(), func(ctx context.Context) {
   538  		config, err := framework.LoadConfig()
   539  		framework.ExpectNoError(err, "unable to get base config")
   540  
   541  		ginkgo.By("creating the pod")
   542  		name := "pod-exec-websocket-" + string(uuid.NewUUID())
   543  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   544  			ObjectMeta: metav1.ObjectMeta{
   545  				Name: name,
   546  			},
   547  			Spec: v1.PodSpec{
   548  				Containers: []v1.Container{
   549  					{
   550  						Name:    "main",
   551  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   552  						Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
   553  					},
   554  				},
   555  			},
   556  		})
   557  
   558  		ginkgo.By("submitting the pod to kubernetes")
   559  		pod = podClient.CreateSync(ctx, pod)
   560  
   561  		req := f.ClientSet.CoreV1().RESTClient().Get().
   562  			Namespace(f.Namespace.Name).
   563  			Resource("pods").
   564  			Name(pod.Name).
   565  			Suffix("exec").
   566  			Param("stderr", "1").
   567  			Param("stdout", "1").
   568  			Param("container", pod.Spec.Containers[0].Name).
   569  			Param("command", "echo").
   570  			Param("command", "remote execution test")
   571  
   572  		url := req.URL()
   573  		ws, err := e2ewebsocket.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
   574  		if err != nil {
   575  			framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
   576  		}
   577  		defer ws.Close()
   578  
   579  		buf := &bytes.Buffer{}
   580  		gomega.Eventually(ctx, func() error {
   581  			for {
   582  				var msg []byte
   583  				if err := websocket.Message.Receive(ws, &msg); err != nil {
   584  					if err == io.EOF {
   585  						break
   586  					}
   587  					framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
   588  				}
   589  				if len(msg) == 0 {
   590  					continue
   591  				}
   592  				if msg[0] != 1 {
   593  					if len(msg) == 1 {
   594  						// skip an empty message on stream other than stdout
   595  						continue
   596  					} else {
   597  						framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
   598  					}
   599  
   600  				}
   601  				buf.Write(msg[1:])
   602  			}
   603  			if buf.Len() == 0 {
   604  				return fmt.Errorf("unexpected output from server")
   605  			}
   606  			if !strings.Contains(buf.String(), "remote execution test") {
   607  				return fmt.Errorf("expected to find 'remote execution test' in %q", buf.String())
   608  			}
   609  			return nil
   610  		}, time.Minute, 10*time.Second).Should(gomega.BeNil())
   611  	})
   612  
   613  	/*
   614  		Release: v1.13
   615  		Testname: Pods, logs from websockets
   616  		Description: A Pod is created. Websocket is created to retrieve log of a container from this pod.
   617  		Message retrieved form Websocket MUST match with container's output.
   618  	*/
   619  	framework.ConformanceIt("should support retrieving logs from the container over websockets", f.WithNodeConformance(), func(ctx context.Context) {
   620  		config, err := framework.LoadConfig()
   621  		framework.ExpectNoError(err, "unable to get base config")
   622  
   623  		ginkgo.By("creating the pod")
   624  		name := "pod-logs-websocket-" + string(uuid.NewUUID())
   625  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   626  			ObjectMeta: metav1.ObjectMeta{
   627  				Name: name,
   628  			},
   629  			Spec: v1.PodSpec{
   630  				Containers: []v1.Container{
   631  					{
   632  						Name:    "main",
   633  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   634  						Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
   635  					},
   636  				},
   637  			},
   638  		})
   639  
   640  		ginkgo.By("submitting the pod to kubernetes")
   641  		podClient.CreateSync(ctx, pod)
   642  
   643  		req := f.ClientSet.CoreV1().RESTClient().Get().
   644  			Namespace(f.Namespace.Name).
   645  			Resource("pods").
   646  			Name(pod.Name).
   647  			Suffix("log").
   648  			Param("container", pod.Spec.Containers[0].Name)
   649  
   650  		url := req.URL()
   651  
   652  		ws, err := e2ewebsocket.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
   653  		if err != nil {
   654  			framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
   655  		}
   656  		defer ws.Close()
   657  		buf := &bytes.Buffer{}
   658  		for {
   659  			var msg []byte
   660  			if err := websocket.Message.Receive(ws, &msg); err != nil {
   661  				if err == io.EOF {
   662  					break
   663  				}
   664  				framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
   665  			}
   666  			if len(strings.TrimSpace(string(msg))) == 0 {
   667  				continue
   668  			}
   669  			buf.Write(msg)
   670  		}
   671  		if buf.String() != "container is alive\n" {
   672  			framework.Failf("Unexpected websocket logs:\n%s", buf.String())
   673  		}
   674  	})
   675  
   676  	// Slow (~7 mins)
   677  	f.It("should have their auto-restart back-off timer reset on image update", f.WithSlow(), f.WithNodeConformance(), func(ctx context.Context) {
   678  		podName := "pod-back-off-image"
   679  		containerName := "back-off"
   680  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   681  			ObjectMeta: metav1.ObjectMeta{
   682  				Name:   podName,
   683  				Labels: map[string]string{"test": "back-off-image"},
   684  			},
   685  			Spec: v1.PodSpec{
   686  				Containers: []v1.Container{
   687  					{
   688  						Name:    containerName,
   689  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   690  						Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
   691  					},
   692  				},
   693  			},
   694  		})
   695  
   696  		delay1, delay2 := startPodAndGetBackOffs(ctx, podClient, pod, buildBackOffDuration)
   697  
   698  		ginkgo.By("updating the image")
   699  		podClient.Update(ctx, podName, func(pod *v1.Pod) {
   700  			pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
   701  		})
   702  
   703  		time.Sleep(syncLoopFrequency)
   704  		framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
   705  
   706  		ginkgo.By("get restart delay after image update")
   707  		delayAfterUpdate, err := getRestartDelay(ctx, podClient, podName, containerName)
   708  		if err != nil {
   709  			framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   710  		}
   711  
   712  		if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
   713  			framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
   714  		}
   715  	})
   716  
   717  	// Slow by design (~27 mins) issue #19027
   718  	f.It("should cap back-off at MaxContainerBackOff", f.WithSlow(), f.WithNodeConformance(), func(ctx context.Context) {
   719  		podName := "back-off-cap"
   720  		containerName := "back-off-cap"
   721  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   722  			ObjectMeta: metav1.ObjectMeta{
   723  				Name:   podName,
   724  				Labels: map[string]string{"test": "liveness"},
   725  			},
   726  			Spec: v1.PodSpec{
   727  				Containers: []v1.Container{
   728  					{
   729  						Name:    containerName,
   730  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   731  						Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
   732  					},
   733  				},
   734  			},
   735  		})
   736  
   737  		podClient.CreateSync(ctx, pod)
   738  		time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
   739  
   740  		// wait for a delay == capped delay of MaxContainerBackOff
   741  		ginkgo.By("getting restart delay when capped")
   742  		var (
   743  			delay1 time.Duration
   744  			err    error
   745  		)
   746  		for i := 0; i < 3; i++ {
   747  			delay1, err = getRestartDelay(ctx, podClient, podName, containerName)
   748  			if err != nil {
   749  				framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   750  			}
   751  
   752  			if delay1 < kubelet.MaxContainerBackOff {
   753  				continue
   754  			}
   755  		}
   756  
   757  		if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
   758  			framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
   759  		}
   760  
   761  		ginkgo.By("getting restart delay after a capped delay")
   762  		delay2, err := getRestartDelay(ctx, podClient, podName, containerName)
   763  		if err != nil {
   764  			framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
   765  		}
   766  
   767  		if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
   768  			framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
   769  		}
   770  	})
   771  
   772  	f.It("should support pod readiness gates", f.WithNodeConformance(), func(ctx context.Context) {
   773  		podName := "pod-ready"
   774  		readinessGate1 := "k8s.io/test-condition1"
   775  		readinessGate2 := "k8s.io/test-condition2"
   776  		patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
   777  		pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   778  			ObjectMeta: metav1.ObjectMeta{
   779  				Name:   podName,
   780  				Labels: map[string]string{"test": "pod-readiness-gate"},
   781  			},
   782  			Spec: v1.PodSpec{
   783  				Containers: []v1.Container{
   784  					{
   785  						Name:    "pod-readiness-gate",
   786  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   787  						Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
   788  					},
   789  				},
   790  				ReadinessGates: []v1.PodReadinessGate{
   791  					{ConditionType: v1.PodConditionType(readinessGate1)},
   792  					{ConditionType: v1.PodConditionType(readinessGate2)},
   793  				},
   794  			},
   795  		})
   796  
   797  		validatePodReadiness := func(expectReady bool) {
   798  			err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
   799  				pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
   800  				framework.ExpectNoError(err)
   801  				podReady := podutils.IsPodReady(pod)
   802  				res := expectReady == podReady
   803  				if !res {
   804  					framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v (pod status %#v)", podName, expectReady, podReady, pod.Status)
   805  				}
   806  				return res, nil
   807  			})
   808  			framework.ExpectNoError(err)
   809  		}
   810  
   811  		ginkgo.By("submitting the pod to kubernetes")
   812  		e2epod.NewPodClient(f).Create(ctx, pod)
   813  		framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
   814  		if podClient.PodIsReady(ctx, podName) {
   815  			framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)
   816  		}
   817  
   818  		ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
   819  		_, err := podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
   820  		framework.ExpectNoError(err)
   821  		// Sleep for 10 seconds.
   822  		time.Sleep(syncLoopFrequency)
   823  		// Verify the pod is still not ready
   824  		if podClient.PodIsReady(ctx, podName) {
   825  			framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name)
   826  		}
   827  
   828  		ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
   829  		_, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
   830  		framework.ExpectNoError(err)
   831  		validatePodReadiness(true)
   832  
   833  		ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
   834  		_, err = podClient.Patch(ctx, podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), metav1.PatchOptions{}, "status")
   835  		framework.ExpectNoError(err)
   836  		validatePodReadiness(false)
   837  
   838  	})
   839  
   840  	/*
   841  		Release: v1.19
   842  		Testname: Pods, delete a collection
   843  		Description: A set of pods is created with a label selector which MUST be found when listed.
   844  		The set of pods is deleted and MUST NOT show up when listed by its label selector.
   845  	*/
   846  	framework.ConformanceIt("should delete a collection of pods", func(ctx context.Context) {
   847  		podTestNames := []string{"test-pod-1", "test-pod-2", "test-pod-3"}
   848  
   849  		one := int64(1)
   850  
   851  		ginkgo.By("Create set of pods")
   852  		// create a set of pods in test namespace
   853  		for _, podTestName := range podTestNames {
   854  			_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx,
   855  				e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   856  					ObjectMeta: metav1.ObjectMeta{
   857  						Name: podTestName,
   858  						Labels: map[string]string{
   859  							"type": "Testing",
   860  						},
   861  					},
   862  					Spec: v1.PodSpec{
   863  						TerminationGracePeriodSeconds: &one,
   864  						Containers: []v1.Container{{
   865  							Image: imageutils.GetE2EImage(imageutils.Agnhost),
   866  							Name:  "token-test",
   867  						}},
   868  						RestartPolicy: v1.RestartPolicyNever,
   869  					}}), metav1.CreateOptions{})
   870  			framework.ExpectNoError(err, "failed to create pod")
   871  			framework.Logf("created %v", podTestName)
   872  		}
   873  
   874  		// wait as required for all 3 pods to be running
   875  		ginkgo.By("waiting for all 3 pods to be running")
   876  		err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, f.Timeouts.PodStart)
   877  		framework.ExpectNoError(err, "3 pods not found running.")
   878  
   879  		// delete Collection of pods with a label in the current namespace
   880  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{
   881  			LabelSelector: "type=Testing"})
   882  		framework.ExpectNoError(err, "failed to delete collection of pods")
   883  
   884  		// wait for all pods to be deleted
   885  		ginkgo.By("waiting for all pods to be deleted")
   886  		err = wait.PollUntilContextTimeout(ctx, podRetryPeriod, f.Timeouts.PodDelete, true, checkPodListQuantity(f, "type=Testing", 0))
   887  		framework.ExpectNoError(err, "found a pod(s)")
   888  	})
   889  
   890  	/*
   891  		Release: v1.20
   892  		Testname: Pods, completes the lifecycle of a Pod and the PodStatus
   893  		Description: A Pod is created with a static label which MUST succeed. It MUST succeed when
   894  		patching the label and the pod data. When checking and replacing the PodStatus it MUST
   895  		succeed. It MUST succeed when deleting the Pod.
   896  	*/
   897  	framework.ConformanceIt("should run through the lifecycle of Pods and PodStatus", func(ctx context.Context) {
   898  		podResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
   899  		testNamespaceName := f.Namespace.Name
   900  		testPodName := "pod-test"
   901  		testPodImage := imageutils.GetE2EImage(imageutils.Agnhost)
   902  		testPodImage2 := imageutils.GetE2EImage(imageutils.Httpd)
   903  		testPodLabels := map[string]string{"test-pod-static": "true"}
   904  		testPodLabelsFlat := "test-pod-static=true"
   905  		one := int64(1)
   906  
   907  		w := &cache.ListWatch{
   908  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
   909  				options.LabelSelector = testPodLabelsFlat
   910  				return f.ClientSet.CoreV1().Pods(testNamespaceName).Watch(ctx, options)
   911  			},
   912  		}
   913  		podsList, err := f.ClientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
   914  		framework.ExpectNoError(err, "failed to list Pods")
   915  
   916  		testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
   917  			ObjectMeta: metav1.ObjectMeta{
   918  				Name:   testPodName,
   919  				Labels: testPodLabels,
   920  			},
   921  			Spec: v1.PodSpec{
   922  				TerminationGracePeriodSeconds: &one,
   923  				Containers: []v1.Container{
   924  					{
   925  						Name:  testPodName,
   926  						Image: testPodImage,
   927  					},
   928  				},
   929  			},
   930  		})
   931  		ginkgo.By("creating a Pod with a static label")
   932  		_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(ctx, testPod, metav1.CreateOptions{})
   933  		framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
   934  
   935  		ginkgo.By("watching for Pod to be ready")
   936  		ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
   937  		defer cancel()
   938  		_, err = watchtools.Until(ctxUntil, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
   939  			if pod, ok := event.Object.(*v1.Pod); ok {
   940  				found := pod.ObjectMeta.Name == testPod.ObjectMeta.Name &&
   941  					pod.ObjectMeta.Namespace == testNamespaceName &&
   942  					pod.Labels["test-pod-static"] == "true" &&
   943  					pod.Status.Phase == v1.PodRunning
   944  				if !found {
   945  					framework.Logf("observed Pod %v in namespace %v in phase %v with labels: %v & conditions %v", pod.ObjectMeta.Name, pod.ObjectMeta.Namespace, pod.Status.Phase, pod.Labels, pod.Status.Conditions)
   946  					return false, nil
   947  				}
   948  				framework.Logf("Found Pod %v in namespace %v in phase %v with labels: %v & conditions %v", pod.ObjectMeta.Name, pod.ObjectMeta.Namespace, pod.Status.Phase, pod.Labels, pod.Status.Conditions)
   949  				return found, nil
   950  			}
   951  			framework.Logf("Observed event: %+v", event.Object)
   952  			return false, nil
   953  		})
   954  		if err != nil {
   955  			framework.Logf("failed to see event that pod is created: %v", err)
   956  		}
   957  		p, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
   958  		framework.ExpectNoError(err, "failed to get Pod %v in namespace %v", testPodName, testNamespaceName)
   959  		gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodRunning), "failed to see Pod %v in namespace %v running", p.ObjectMeta.Name, testNamespaceName)
   960  
   961  		ginkgo.By("patching the Pod with a new Label and updated data")
   962  		prePatchResourceVersion := p.ResourceVersion
   963  		podPatch, err := json.Marshal(v1.Pod{
   964  			ObjectMeta: metav1.ObjectMeta{
   965  				Labels: map[string]string{"test-pod": "patched"},
   966  			},
   967  			Spec: v1.PodSpec{
   968  				TerminationGracePeriodSeconds: &one,
   969  				Containers: []v1.Container{{
   970  					Name:  testPodName,
   971  					Image: testPodImage2,
   972  				}},
   973  			},
   974  		})
   975  		framework.ExpectNoError(err, "failed to marshal JSON patch for Pod")
   976  		_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Patch(ctx, testPodName, types.StrategicMergePatchType, []byte(podPatch), metav1.PatchOptions{})
   977  		framework.ExpectNoError(err, "failed to patch Pod %s in namespace %s", testPodName, testNamespaceName)
   978  		ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second)
   979  		defer cancel()
   980  		_, err = watchtools.Until(ctxUntil, prePatchResourceVersion, w, func(event watch.Event) (bool, error) {
   981  			switch event.Type {
   982  			case watch.Modified:
   983  				if pod, ok := event.Object.(*v1.Pod); ok {
   984  					found := pod.ObjectMeta.Name == pod.Name &&
   985  						pod.Labels["test-pod-static"] == "true"
   986  					return found, nil
   987  				}
   988  			default:
   989  				framework.Logf("observed event type %v", event.Type)
   990  			}
   991  			return false, nil
   992  		})
   993  		if err != nil {
   994  			framework.Logf("failed to see %v event: %v", watch.Modified, err)
   995  		}
   996  
   997  		ginkgo.By("getting the Pod and ensuring that it's patched")
   998  		pod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
   999  		framework.ExpectNoError(err, "failed to fetch Pod %s in namespace %s", testPodName, testNamespaceName)
  1000  		gomega.Expect(pod.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-pod", "patched"), "failed to patch Pod - missing label")
  1001  		gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(testPodImage2), "failed to patch Pod - wrong image")
  1002  
  1003  		ginkgo.By("replacing the Pod's status Ready condition to False")
  1004  		var podStatusUpdate *v1.Pod
  1005  
  1006  		err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
  1007  			podStatusUnstructured, err := dc.Resource(podResource).Namespace(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{}, "status")
  1008  			framework.ExpectNoError(err, "failed to fetch PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
  1009  			podStatusBytes, err := json.Marshal(podStatusUnstructured)
  1010  			framework.ExpectNoError(err, "failed to marshal unstructured response")
  1011  			var podStatus v1.Pod
  1012  			err = json.Unmarshal(podStatusBytes, &podStatus)
  1013  			framework.ExpectNoError(err, "failed to unmarshal JSON bytes to a Pod object type")
  1014  			podStatusUpdated := podStatus
  1015  			podStatusFieldPatchCount := 0
  1016  			podStatusFieldPatchCountTotal := 2
  1017  			for pos, cond := range podStatusUpdated.Status.Conditions {
  1018  				if (cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue) || (cond.Type == v1.ContainersReady && cond.Status == v1.ConditionTrue) {
  1019  					podStatusUpdated.Status.Conditions[pos].Status = v1.ConditionFalse
  1020  					podStatusFieldPatchCount++
  1021  				}
  1022  			}
  1023  			gomega.Expect(podStatusFieldPatchCount).To(gomega.Equal(podStatusFieldPatchCountTotal), "failed to patch all relevant Pod conditions")
  1024  			podStatusUpdate, err = f.ClientSet.CoreV1().Pods(testNamespaceName).UpdateStatus(ctx, &podStatusUpdated, metav1.UpdateOptions{})
  1025  			return err
  1026  		})
  1027  		framework.ExpectNoError(err, "failed to update PodStatus of Pod %s in namespace %s", testPodName, testNamespaceName)
  1028  
  1029  		ginkgo.By("check the Pod again to ensure its Ready conditions are False")
  1030  		podStatusFieldPatchCount := 0
  1031  		podStatusFieldPatchCountTotal := 2
  1032  		for _, cond := range podStatusUpdate.Status.Conditions {
  1033  			if (cond.Type == v1.PodReady && cond.Status == v1.ConditionFalse) || (cond.Type == v1.ContainersReady && cond.Status == v1.ConditionFalse) {
  1034  				podStatusFieldPatchCount++
  1035  			}
  1036  		}
  1037  		gomega.Expect(podStatusFieldPatchCount).To(gomega.Equal(podStatusFieldPatchCountTotal), "failed to update PodStatus - field patch count doesn't match the total")
  1038  
  1039  		ginkgo.By("deleting the Pod via a Collection with a LabelSelector")
  1040  		preDeleteResourceVersion := podStatusUpdate.ResourceVersion
  1041  		err = f.ClientSet.CoreV1().Pods(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testPodLabelsFlat})
  1042  		framework.ExpectNoError(err, "failed to delete Pod by collection")
  1043  
  1044  		ginkgo.By("watching for the Pod to be deleted")
  1045  		ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodDelete)
  1046  		defer cancel()
  1047  		_, err = watchtools.Until(ctxUntil, preDeleteResourceVersion, w, func(event watch.Event) (bool, error) {
  1048  			switch event.Type {
  1049  			case watch.Deleted:
  1050  				if pod, ok := event.Object.(*v1.Pod); ok {
  1051  					found := pod.ObjectMeta.Name == pod.Name &&
  1052  						pod.Labels["test-pod-static"] == "true"
  1053  					return found, nil
  1054  				} else {
  1055  					framework.Logf("observed event type %v that was not a pod: %T", event.Type, event.Object)
  1056  				}
  1057  			default:
  1058  				framework.Logf("observed event type %v", event.Type)
  1059  			}
  1060  			return false, nil
  1061  		})
  1062  		if err != nil {
  1063  			framework.Logf("failed to see %v event: %v", watch.Deleted, err)
  1064  		}
  1065  		postDeletePod, err := f.ClientSet.CoreV1().Pods(testNamespaceName).Get(ctx, testPodName, metav1.GetOptions{})
  1066  		var postDeletePodJSON []byte
  1067  		if postDeletePod != nil {
  1068  			postDeletePodJSON, _ = json.Marshal(postDeletePod)
  1069  		}
  1070  		gomega.Expect(err).To(gomega.HaveOccurred(), "pod %v found in namespace %v, but it should be deleted: %s", testPodName, testNamespaceName, string(postDeletePodJSON))
  1071  		if !apierrors.IsNotFound(err) {
  1072  			framework.Failf("expected IsNotFound error, got %#v", err)
  1073  		}
  1074  	})
  1075  
  1076  	/*
  1077  		Release: v1.25
  1078  		Testname: Pods, patching status
  1079  		Description: A pod is created which MUST succeed
  1080  		and be found running. The pod status when patched
  1081  		MUST succeed. Given the patching of the pod status,
  1082  		the fields MUST equal the new values.
  1083  	*/
  1084  	framework.ConformanceIt("should patch a pod status", func(ctx context.Context) {
  1085  		ns := f.Namespace.Name
  1086  		podClient := f.ClientSet.CoreV1().Pods(ns)
  1087  		podName := "pod-" + utilrand.String(5)
  1088  		label := map[string]string{"e2e": podName}
  1089  
  1090  		ginkgo.By("Create a pod")
  1091  		testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
  1092  			ObjectMeta: metav1.ObjectMeta{
  1093  				Name:   podName,
  1094  				Labels: label,
  1095  			},
  1096  			Spec: v1.PodSpec{
  1097  				TerminationGracePeriodSeconds: pointer.Int64(1),
  1098  				Containers: []v1.Container{
  1099  					{
  1100  						Name:  "agnhost",
  1101  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
  1102  					},
  1103  				},
  1104  			},
  1105  		})
  1106  		pod, err := podClient.Create(ctx, testPod, metav1.CreateOptions{})
  1107  		framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, ns)
  1108  		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod), "Pod didn't start within time out period")
  1109  
  1110  		ginkgo.By("patching /status")
  1111  		podStatus := v1.PodStatus{
  1112  			Message: "Patched by e2e test",
  1113  			Reason:  "E2E",
  1114  		}
  1115  		pStatusJSON, err := json.Marshal(podStatus)
  1116  		framework.ExpectNoError(err, "Failed to marshal. %v", podStatus)
  1117  
  1118  		pStatus, err := podClient.Patch(ctx, podName, types.MergePatchType,
  1119  			[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(pStatusJSON)+`}`),
  1120  			metav1.PatchOptions{}, "status")
  1121  		framework.ExpectNoError(err, "failed to patch pod: %q", podName)
  1122  		gomega.Expect(pStatus.Status.Message).To(gomega.Equal("Patched by e2e test"), "Status.Message for %q was %q but expected it to be \"Patched by e2e test\"", podName, pStatus.Status.Message)
  1123  		gomega.Expect(pStatus.Status.Reason).To(gomega.Equal("E2E"), "Status.Reason for %q was %q but expected it to be \"E2E\"", podName, pStatus.Status.Reason)
  1124  		framework.Logf("Status Message: %q and Reason: %q", pStatus.Status.Message, pStatus.Status.Reason)
  1125  	})
  1126  })
  1127  
  1128  func checkPodListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
  1129  	return func(ctx context.Context) (bool, error) {
  1130  		var err error
  1131  
  1132  		list, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{
  1133  			LabelSelector: label})
  1134  
  1135  		if err != nil {
  1136  			return false, err
  1137  		}
  1138  
  1139  		if len(list.Items) != quantity {
  1140  			framework.Logf("Pod quantity %d is different from expected quantity %d", len(list.Items), quantity)
  1141  			return false, err
  1142  		}
  1143  		return true, nil
  1144  	}
  1145  }